text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(args): """ cdstarcat add SPEC Add metadata about objects (specified by SPEC) in CDSTAR to the catalog. SPEC: Either a CDSTAR object ID or a query. """
spec = args.args[0] with _catalog(args) as cat: n = len(cat) if OBJID_PATTERN.match(spec): cat.add_objids(spec) else: results = cat.add_query(spec) args.log.info('{0} hits for query {1}'.format(results, spec)) args.log.info('{0} objects added'.format(len(cat) - n)) return len(cat) - n
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(args): """ cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories). """
with _catalog(args) as cat: for fname, created, obj in cat.create(args.args[0], {}): args.log.info('{0} -> {1} object {2.id}'.format( fname, 'new' if created else 'existing', obj))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(args): """ cdstarcat delete OID Delete an object specified by OID from CDSTAR. """
with _catalog(args) as cat: n = len(cat) cat.delete(args.args[0]) args.log.info('{0} objects deleted'.format(n - len(cat))) return n - len(cat)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_continuously(self, interval=1): """Continuously run, while executing pending jobs at each elapsed time interval. @return cease_continuous_run: threading.Event which can be set to cease continuous run. Please note that it is *intended behavior that run_continuously() does not run missed jobs*. For example, if you've registered a job that should run every minute and you set a continuous run interval of one hour then your job won't be run 60 times at each interval but only once. """
cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @classmethod def run(cls): while not cease_continuous_run.is_set(): self.run_pending() time.sleep(interval) continuous_thread = ScheduleThread() continuous_thread.start() return cease_continuous_run
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def at(self, time_str): """ Schedule the job every day at a specific time. Calling this is only valid for jobs scheduled to run every N day(s). :param time_str: A string in `XX:YY` format. :return: The invoked job instance """
assert self.unit in ('days', 'hours') or self.start_day hour, minute = time_str.split(':') minute = int(minute) if self.unit == 'days' or self.start_day: hour = int(hour) assert 0 <= hour <= 23 elif self.unit == 'hours': hour = 0 assert 0 <= minute <= 59 self.at_time = datetime.time(hour, minute) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_status(self): """ returns True if status has changed """
# this function should be part of the server if (self.status is not None) and self.status > 0: # status is already final return False old_status = self.status job_dir = self.run_dir + os.sep + self.id if os.path.isfile(run_dir + os.sep + 'FAILURE'): self.status = self.FAILED elif os.path.isfile(run_dir + os.sep + 'FINISHED'): self.status = self.FINISHED else: self.status = self.RUNNING if self.status != old_status: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_fasta(filename): """Check if filename is FASTA based on extension Return: Boolean """
if re.search("\.fa*s[ta]*$", filename, flags=re.I): return True elif re.search("\.fa$", filename, flags=re.I): return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def plotGene(self): ''' Plot the gene ''' pl.plot(self.x, self.y, '.') pl.grid(True) pl.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def plotIndividual(self): ''' Plot the individual ''' pl.plot(self.x_int, self.y_int) pl.grid(True) pl.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def plot(self): ''' Plot the individual and the gene ''' pl.plot(self.x, self.y, '.') pl.plot(self.x_int, self.y_int) pl.grid(True) pl.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mutation(self, strength = 0.1): ''' Single gene mutation ''' mutStrengthReal = strength mutMaxSizeReal = self.gLength/2 mutSizeReal = int(numpy.random.random_integers(1,mutMaxSizeReal)) mutationPosReal = int(numpy.random.random_integers(0+mutSizeReal-1,self.y.shape[0]-1-mutSizeReal)) mutationSignReal = pl.rand() mutationReal = pl.rand() if mutationSignReal > 0.5: for i in range(-mutSizeReal/2, mutSizeReal/2): self.y.real[mutationPosReal+i] = self.y.real[mutationPosReal+i] + mutStrengthReal*self.y.real[mutationPosReal+i]*mutationReal else: for i in range(-mutSizeReal/2, mutSizeReal/2): self.y.real[mutationPosReal+i] = self.y.real[mutationPosReal+i] - mutStrengthReal*self.y.real[mutationPosReal+i]*mutationReal self.birth()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mutations(self, nbr, strength): ''' Multiple gene mutations ''' for i in range(nbr): self.mutation(strength)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mutation(self,strength = 0.1): ''' Single gene mutation - Complex version ''' # Mutation du gene - real mutStrengthReal = strength mutMaxSizeReal = self.gLength/2 mutSizeReal = int(numpy.random.random_integers(1,mutMaxSizeReal)) mutationPosReal = int(numpy.random.random_integers(0+mutSizeReal-1,self.y.shape[0]-1-mutSizeReal)) mutationSignReal = pl.rand() mutationReal = pl.rand() # Mutation du gene - imag mutStrengthImag = strength mutMaxSizeImag = self.gLength/2 mutSizeImag = int(numpy.random.random_integers(1,mutMaxSizeImag)) mutationPosImag = int(numpy.random.random_integers(0+mutSizeImag-1,self.y.shape[0]-1-mutSizeImag)) mutationSignImag = pl.rand() mutationImag = pl.rand() if mutationSignReal > 0.5: for i in range(-mutSizeReal/2, mutSizeReal/2): self.y.real[mutationPosReal+i] = self.y.real[mutationPosReal+i] + mutStrengthReal*self.y.real[mutationPosReal+i]*mutationReal else: for i in range(-mutSizeReal/2, mutSizeReal/2): self.y.real[mutationPosReal+i] = self.y.real[mutationPosReal+i] - mutStrengthReal*self.y.real[mutationPosReal+i]*mutationReal if mutationSignImag > 0.5: for i in range(-mutSizeImag/2, mutSizeImag/2): self.y.imag[mutationPosImag+i] = self.y.imag[mutationPosImag+i] + mutStrengthImag*self.y.imag[mutationPosImag+i]*mutationImag else: for i in range(-mutSizeImag/2, mutSizeImag/2): self.y.imag[mutationPosImag+i] = self.y.imag[mutationPosImag+i] - mutStrengthImag*self.y.imag[mutationPosImag+i]*mutationImag # Compute the individual self.birth()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rankingEval(self): ''' Sorting the pop. base on the fitnessEval result ''' fitnessAll = numpy.zeros(self.length) fitnessNorm = numpy.zeros(self.length) for i in range(self.length): self.Ind[i].fitnessEval() fitnessAll[i] = self.Ind[i].fitness maxFitness = fitnessAll.max() for i in range(self.length): fitnessNorm[i] = (maxFitness - fitnessAll[i]) / maxFitness fitnessSorted = fitnessNorm.argsort() # Compute the selection probabilities of each individual probability = numpy.zeros(self.length) S = 2.0 for i in range(self.length): probability[fitnessSorted[i]] = ((2-S)/self.length) + (2*i*(S-1))/(self.length*(self.length-1)) self.rankingComputed = 1 self.fitness = fitnessAll return [fitnessAll, fitnessSorted[::-1], probability]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def sortedbyAge(self): ''' Sorting the pop. base of the age ''' ageAll = numpy.zeros(self.length) for i in range(self.length): ageAll[i] = self.Ind[i].age ageSorted = ageAll.argsort() return ageSorted[::-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def RWSelection(self, mating_pool_size): ''' Make Selection of the mating pool with the roulette wheel algorithm ''' A = numpy.zeros(self.length) mating_pool = numpy.zeros(mating_pool_size) [F,S,P] = self.rankingEval() P_Sorted = numpy.zeros(self.length) for i in range(self.length): P_Sorted[i] = P[S[i]] for i in range(self.length): A[i] = P_Sorted[0:(i+1)].sum() i = 0 j = 0 while j < mating_pool_size: r = numpy.random.random() i = 0 while A[i] < r: i += 1 if numpy.shape(numpy.where(mating_pool==i))[1] == 0: mating_pool[j] =S[i] j += 1 return mating_pool
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def SUSSelection(self, mating_pool_size): ''' Make Selection of the mating pool with the stochastic universal sampling algorithm ''' A = numpy.zeros(self.length) mating_pool = numpy.zeros(mating_pool_size) r = numpy.random.random()/float(mating_pool_size) [F,S,P] = self.rankingEval() P_Sorted = numpy.zeros(self.length) for i in range(self.length): P_Sorted[i] = P[S[i]] for i in range(self.length): A[i] = P_Sorted[0:(i+1)].sum() i = 0 j = 0 while j < mating_pool_size: i = 0 while A[i] <= r: i += 1 mating_pool[j] = S[i] j += 1 r += (1/float(mating_pool_size)) return mating_pool
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def safe_join(base, *paths): """ Joins one or more path components to the base path component intelligently. Returns a normalized, absolute version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised). """
base = base paths = [p for p in paths] final_path = abspath(os.path.join(base, *paths)) base_path = abspath(base) base_path_len = len(base_path) # Ensure final_path starts with base_path (using normcase to ensure we # don't false-negative on case insensitive operating systems like Windows) # and that the next character after the final path is os.sep (or nothing, # in which case final_path must be equal to base_path). if not os.path.normcase(final_path).startswith(os.path.normcase(base_path)) \ or final_path[base_path_len:base_path_len + 1] not in ("", os.path.sep): raise ValueError("The joined path (%s) is located outside of the base " "path component (%s)" % (final_path, base_path)) return final_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filepath_to_uri(path): """ Convert an file system path to a URI portion that is suitable for inclusion in a URL. We are assuming input is either UTF-8 or unicode already. This method will encode certain chars that would normally be recognized as special chars for URIs. Note that this method does not encode the ' character, as it is a valid character within URIs. See encodeURIComponent() JavaScript function for more details. Returns an ASCII string containing the encoded result. """
if path is None: return path # I know about `os.sep` and `os.altsep` but I want to leave # some flexibility for hardcoding separators. return urllib.quote(path.replace("\\", "/"), safe=b"/~!*()'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def schema_file(self): """ Gets the full path to the file in which to load configuration schema. """
path = os.getcwd() + '/' + self.lazy_folder return path + self.schema_filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_ignore(self): """ Writes a .gitignore file to ignore the generated data file. """
path = self.lazy_folder + self.ignore_filename # If the file exists, return. if os.path.isfile(os.path.realpath(path)): return None sp, sf = os.path.split(self.data_file) #Write the file. try: handle = open(path,'w') handle.write(sf + '\n') except IOError as e: raise e # Close the handle and return. handle.close() return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choose_schema(self, out_file): """ Finds all schema templates and prompts to choose one. Copies the file to self.lazy_folder. """
path = os.path.dirname(lazyconf.__file__) + '/schema/' self.prompt.header('Choose a template for your config file: ') i = 0 choices = [] for filename in os.listdir(path): if filename.endswith('.json'): try: template = self._load(path + filename) description = template.get('_meta.description') prompt_string = str(i + 1) + '. ' + filename i += 1 if description: self.prompt.notice(prompt_string + ': ' + description) else: self.prompt.notice(prompt_string) choices.append(template) except IOError as e: print self.prompt.error(str(e)) val = 0 while val is 0 or val > i: val = self.prompt.int('Choice', default = 1) if val is 0 or val > i: self.prompt.error('Please choose a value between 1 and ' + str(i) + '.') schema = choices[val-1] if '_meta' in schema.data.keys(): del(schema.data['_meta']) schema.save(out_file, as_schema=True) sp, sf = os.path.split(out_file) self.prompt.success('Saved to ' + self.lazy_folder + sf + '.') return schema
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure_data(self, data, key_string = ''): """ Goes through all the options in `data`, and prompts new values. This function calls itself recursively if it finds an inner dictionary. Arguments: data -- The dictionary to loop through. key_string -- The dot-notated key of the dictionary being checked through. """
# If there's no keys in this dictionary, we have nothing to do. if len(data.keys()) == 0: return # Split the key string by its dots to find out how deep we are. key_parts = key_string.rsplit('.') prefix = ' ' * (len(key_parts) - 1) # Attempt to get a label for this key string. label = self.data.get_label(key_string) # If we are have any key string or label, write the header for this section. if label: p = prefix if len(p) > 0: p += ' ' self.prompt.header(p + '[' + label + ']') # Add to the prefix to indicate options on this level. prefix = prefix + ' ' # If this section has an '_enabled' key, process it first, as it could enable or disable this whole section. if '_enabled' in data.keys(): s = self.data.get_key_string(key_string, '_enabled') #Prompt whether to enable this section. Use the existing value as the default. data['_enabled'] = self.prompt.bool(prefix + self.data.get_label(s), None, data['_enabled']) # Return if this section is now disabled. if data['_enabled'] is False: return # Loop through the rest of the dictionary and prompt for every key. If the value is a dictionary, call this function again for the next level. for k, v in data.iteritems(): # If we hit the '_enabled' key, we've already processed it (but still need it in the dictionary for saving). Ignore it. if k == '_enabled': continue # Get the type of the value at this key, and the dot-noted format of this key. t = type(v) s = self.data.get_key_string(key_string, k) # If the value type is a dictionary, call this function. if t is dict: self.configure_data(v, s) # Otherwise, parse the value. else: label = prefix + self.data.get_label(s) self.parse_value(data, label, s, None, v)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure(self): """ The main configure function. Uses a schema file and an optional data file, and combines them with user prompts to write a new data file. """
# Make the lazy folder if it doesn't already exist. path = os.getcwd() + '/' + self.lazy_folder if not os.path.exists(path): os.makedirs(path) schema_file = self.schema_file data_file = self.data_file # Initialise the schema and data objects. schema, data = Schema(), Schema() # Load the schema from a file. try: schema.load(schema_file) except IOError as e: # If we can't load the schema, choose from templates. self.prompt.error("Could not find schema in " + schema_file + " - Choosing from default templates...") schema = self.choose_schema(schema_file) except (Exception, ValueError) as e: self.prompt.error("Error: " + str(e) + " - Aborting...") return False else: sp, sf = os.path.split(schema_file) self.prompt.success('Loaded schema from ' + self.lazy_folder + sf) # Load the data from a file. try: data.load(data_file) except (Exception, IOError, ValueError) as e: self.prompt.error('Could not find data file. Copying from schema...') else: sp, sf = os.path.split(data_file) self.prompt.success('Loaded data from ' + self.lazy_folder + sf) # Store the internals of the schema (labels, selects, etc.) in data. data.internal = schema.internal # If we have data from a data file, merge the schema file into it. if data.data: # Create a new Merge instance using the data from the schema and data files. m = Merge(schema.data, data.data) mods = m.merge() for a in mods['added']: self.prompt.success('Added ' + a + ' to data.') for r in mods['removed']: self.prompt.error('Removed ' + r + ' from data.') for k,m in mods['modified']: self.prompt.notice('Modified ' + k + ': ' + m[0] + ' became ' + m[1] + '.' ) # Otherwise, reference the data from the schema file verbatim. else: data.data = schema.data # Store the data. self.data = data # Configure the data. self.configure_data(data.data) # Save the data to the out file. self.data.save(self.data_file) self.add_ignore() sp, sf = os.path.split(self.data_file) self.prompt.success('Saved to ' + self.lazy_folder + sf + '.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_value(self, inner_dict, label, key, value, default): """ Parses a single value and sets it in an inner dictionary. Arguments: inner_dict -- The dictionary containing the value to set label -- The label to show for the prompt. key -- The key in the dictionary to set the value for. value -- The value to set. If there is a value, don't prompt for one. default -- The default value in the prompt. This is taken from the schema and defines the type of the value. """
t = type(default) if t is dict: return select = self.data.get_select(key) k = key.split('.')[-1] if select: inner_dict[k] = self.prompt.select(label, select, value, default = default) # If the value type is a boolean, prompt a boolean. elif t is bool: inner_dict[k] = self.prompt.bool(label, value, default = default) # If the value is an int, prompt and int. elif t is int: inner_dict[k] = self.prompt.int(label, value, default = default) # If someone has put a list in data, we default it to an empty string. If it had come from the schema, it would already be a string. elif t is list: inner_dict[k] = self.prompt.prompt(label + ':', value, default = '') # If none of the above are true, it's a string. else: inner_dict[k] = self.prompt.prompt(label + ':', value, default = default)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, key, value): """ Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set. """
d = self.data.data keys = key.split('.') latest = keys.pop() for k in keys: d = d.setdefault(k, {}) schema = Schema().load(self.schema_file) self.data.internal = schema.internal self.parse_value(d, '', key, value, schema.get(key)) self.data.save(self.data_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load(self, data_file): """ Internal load function. Creates the object and returns it. Arguments: data_file -- The filename to load. """
# Load the data from a file. try: data = Schema().load(data_file) except (Exception, IOError, ValueError) as e: raise e return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, data_file = None): """ Loads a data file and sets it to self.data. Arguments: data_file -- The filename to load. """
if not data_file: data_file = '' elif data_file[-1] != '/': data_file += '/' if data_file[-6:] != self.lazy_folder: data_file += self.lazy_folder data_file += self.data_filename self.data = self._load(data_file) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_for(self, query_type, value): """ Create a query and run it for the given arg if it doesn't exist, and return the tweets for the query. """
from yacms.twitter.models import Query lookup = {"type": query_type, "value": value} query, created = Query.objects.get_or_create(**lookup) if created: query.run() elif not query.interested: query.interested = True query.save() return query.tweets.all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def untar(original_tarball, output_directory): """Untar given tarball file into directory. Here we decide if our file is actually a tarball, then we untar it and return a list of extracted files. :param: tarball (string): the name of the tar file from arXiv :param: output_directory (string): the directory to untar in :return: list of absolute file paths """
if not tarfile.is_tarfile(original_tarball): raise InvalidTarball tarball = tarfile.open(original_tarball) # set mtimes of members to now epochsecs = int(time()) for member in tarball.getmembers(): member.mtime = epochsecs tarball.extractall(output_directory) file_list = [] for extracted_file in tarball.getnames(): if extracted_file == '': break if extracted_file.startswith('./'): extracted_file = extracted_file[2:] # ensure we are actually looking at the right file extracted_file = os.path.join(output_directory, extracted_file) # Add to full list of extracted files file_list.append(extracted_file) return file_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detect_images_and_tex( file_list, allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'), timeout=20): """Detect from a list of files which are TeX or images. :param: file_list (list): list of absolute file paths :param: allowed_image_types (list): list of allows image formats :param: timeout (int): the timeout value on shell commands. list of images in the tarball and the name of the TeX file in the tarball. """
tex_file_extension = 'tex' image_list = [] might_be_tex = [] for extracted_file in file_list: # Ignore directories and hidden (metadata) files if os.path.isdir(extracted_file) \ or os.path.basename(extracted_file).startswith('.'): continue magic_str = magic.from_file(extracted_file, mime=True) if magic_str == "application/x-tex": might_be_tex.append(extracted_file) elif magic_str.startswith('image/') \ or magic_str == "application/postscript": image_list.append(extracted_file) # If neither, maybe it is TeX or an image anyway, otherwise, # we don't care. else: _, dotted_file_extension = os.path.splitext(extracted_file) file_extension = dotted_file_extension[1:] if file_extension == tex_file_extension: might_be_tex.append(extracted_file) elif file_extension in allowed_image_types: image_list.append(extracted_file) return image_list, might_be_tex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_images(image_list, image_format="png", timeout=20): """Convert images from list of images to given format, if needed. Figure out the types of the images that were extracted from the tarball and determine how to convert them into PNG. extracted from the tarball in step 1 :param: image_format (string): which image format to convert to. (PNG by default) :param: timeout (int): the timeout value on shell commands. image files when all have been converted to PNG format. """
png_output_contains = 'PNG image' image_mapping = {} for image_file in image_list: if os.path.isdir(image_file): continue if not os.path.exists(image_file): continue cmd_out = check_output(['file', image_file], timeout=timeout) if cmd_out.find(png_output_contains) > -1: # Already PNG image_mapping[image_file] = image_file else: # we're just going to assume that ImageMagick can convert all # the image types that we may be faced with # for sure it can do EPS->PNG and JPG->PNG and PS->PNG # and PSTEX->PNG converted_image_file = get_converted_image_name(image_file) try: convert_image(image_file, converted_image_file, image_format) except (MissingDelegateError, ResourceLimitError): # Too bad, cannot convert image format. continue if os.path.exists(converted_image_file): image_mapping[converted_image_file] = image_file return image_mapping
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_image(from_file, to_file, image_format): """Convert an image to given format."""
with Image(filename=from_file) as original: with original.convert(image_format) as converted: converted.save(filename=to_file) return to_file
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rotate_image(filename, line, sdir, image_list): """Rotate a image. Given a filename and a line, figure out what it is that the author wanted to do wrt changing the rotation of the image and convert the file so that this rotation is reflected in its presentation. :param: filename (string): the name of the file as specified in the TeX :param: line (string): the line where the rotate command was found :output: the image file rotated in accordance with the rotate command :return: True if something was rotated """
file_loc = get_image_location(filename, sdir, image_list) degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line) if len(degrees) < 1: return False degrees = degrees[0].split('=')[-1].strip() if file_loc is None or file_loc == 'ERROR' or\ not re.match('-*\\d+', degrees): return False if degrees: try: degrees = int(degrees) except (ValueError, TypeError): return False if not os.path.exists(file_loc): return False with Image(filename=file_loc) as image: with image.clone() as rotated: rotated.rotate(degrees) rotated.save(filename=file_loc) return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_by_owner(cls, owner): """ get all entities owned by specified owner """
return cls.query(cls.owner==cls._get_key(owner))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean(self): """ Cleans the data and throws ValidationError on failure """
errors = {} cleaned = {} for name, validator in self.validate_schema.items(): val = getattr(self, name, None) try: cleaned[name] = validator.to_python(val) except formencode.api.Invalid, err: errors[name] = err if errors: raise ValidationError('Invalid data', errors) return cleaned
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_defaults(self, **defaults): """ Add all keyword arguments to self.args args: **defaults: key and value represents dictionary key and value """
try: defaults_items = defaults.iteritems() except AttributeError: defaults_items = defaults.items() for key, val in defaults_items: if key not in self.args.keys(): self.args[key] = val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_args(self, **kwargs): """ Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value """
try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_important_variables(self): """ Check all the variables needed are defined """
if len(self.important_variables - set(self.args.keys())): raise TypeError("Some important variables are not set")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_bestfit_line(self, x_min=None, x_max=None, resolution=None): """ Method to get bestfit line using the defined self.bestfit_func method args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max returns: [bestfit_x, bestfit_y] """
x = self.args["x"] if x_min is None: x_min = min(x) if x_max is None: x_max = max(x) if resolution is None: resolution = self.args.get("resolution", 1000) bestfit_x = np.linspace(x_min, x_max, resolution) return [bestfit_x, self.bestfit_func(bestfit_x)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_rmse(self, data_x=None, data_y=None): """ Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max """
if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mae(self, data_x=None, data_y=None): """ Get Mean Absolute Error using self.bestfit_func args: data_x: array_like, default=x x value used to determine rmse, used if only a section of x is to be calculated data_y: array_like, default=y y value used to determine rmse, used if only a section of y is to be calculated """
if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") mae_y = self.bestfit_func(data_x) return np.mean(abs(mae_y - data_y))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _backup_bytes(target, offset, length): """ Read bytes from one file and write it to a backup file with the .bytes_backup suffix """
click.echo('Backup {l} byes at position {offset} on file {file} to .bytes_backup'.format( l=length, offset=offset, file=target)) with open(target, 'r+b') as f: f.seek(offset) with open(target + '.bytes_backup', 'w+b') as b: for _ in xrange(length): byte = f.read(1) b.write(byte) b.flush() f.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _smudge_bytes(target, offset, magic_bytes): """ Write magic bytes to a file relative from offset """
click.echo('Writing {c} magic byes at position {offset} on file {file}'.format( c=len(magic_bytes), offset=offset, file=target)) with open(target, 'r+b') as f: f.seek(offset) f.write(magic_bytes) f.flush() click.echo('Changes written')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smudge(newtype, target): """ Smudge magic bytes with a known type """
db = smudge_db.get() magic_bytes = db[newtype]['magic'] magic_offset = db[newtype]['offset'] _backup_bytes(target, magic_offset, len(magic_bytes)) _smudge_bytes(target, magic_offset, magic_bytes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smudgeraw(target, offset, magicbytes): """ Smudge magic bytes with raw bytes """
magicbytes = magicbytes.replace('\\x', '').decode('hex') _backup_bytes(target, offset, len(magicbytes)) _smudge_bytes(target, offset, magicbytes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restore(source, offset): """ Restore a smudged file from .bytes_backup """
backup_location = os.path.join( os.path.dirname(os.path.abspath(source)), source + '.bytes_backup') click.echo('Reading backup from: {location}'.format(location=backup_location)) if not os.path.isfile(backup_location): click.echo('No backup found for: {source}'.format(source=source)) return with open(backup_location, 'r+b') as b: data = b.read() click.echo('Restoring {c} bytes from offset {o}'.format(c=len(data), o=offset)) with open(source, 'r+b') as f: f.seek(offset) f.write(data) f.flush() click.echo('Changes written')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def available(): """ List available types for 'smudge' """
db = smudge_db.get() click.echo('{:<6} {:<6} {:<50}'.format('Type', 'Offset', 'Magic')) for k, v in db.items(): click.echo('{type:<6} {offset:<6} {magic}'.format( type=k, magic=v['magic'].encode('hex'), offset=v['offset']))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upcoming_releases(self, product): """ Get upcoming releases for this product. Specifically we search for releases with a GA date greater-than or equal to today's date. :param product: str, eg. "ceph" :returns: deferred that when fired returns a list of Munch (dict-like) objects representing all releases, sorted by shortname. """
url = 'api/v6/releases/' url = url + '?product__shortname=' + product url = url + '&ga_date__gte=' + date.today().strftime('%Y-%m-%d') url = url + '&ordering=shortname_sort' releases = yield self._get(url) result = munchify(releases) defer.returnValue(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def newest_release(self, product): """ Get the shortname of the newest upcoming release for a product. :param product: str, eg. "ceph" :returns: deferred that when fired returns the shortname of the newest release. """
releases = yield self.upcoming_releases(product) if not releases: raise ProductPagesException('no upcoming releases') defer.returnValue(releases[0].shortname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def product_url(self, product): """ Return a human-friendly URL for this product. :param product: str, eg. "ceph" :returns: str, URL """
url = 'product/%s' % product return posixpath.join(self.url, url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def release(self, shortname): """ Get a specific release by its shortname. :param shortname: str, eg. "ceph-3-0" :returns: deferred that when fired returns a Release (Munch, dict-like) object representing this release. :raises: ReleaseNotFoundException if this release does not exist. """
url = 'api/v6/releases/?shortname=%s' % shortname releases = yield self._get(url) # Note, even if this shortname does not exist, _get() will not errback # for this url. It simply returns an empty list. So check that here: if not releases: raise ReleaseNotFoundException('no release %s' % shortname) release = Release.fromDict(releases[0]) release.connection = self defer.returnValue(release)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def schedule_url(self, release): """ Return a human-friendly URL for this release. :param release: str, release shortname eg. "ceph-3-0" :returns: str, URL """
product, _ = release.split('-', 1) url = 'product/%s/release/%s/schedule/tasks' % (product, release) return posixpath.join(self.url, url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get(self, url, headers={}): """ Get a JSON API endpoint and return the parsed data. :param url: str, *relative* URL (relative to pp-admin/ api endpoint) :param headers: dict (optional) :returns: deferred that when fired returns the parsed data from JSON or errbacks with ProductPagesException """
# print('getting %s' % url) headers = headers.copy() headers['Accept'] = 'application/json' url = posixpath.join(self.url, url) try: response = yield treq.get(url, headers=headers, timeout=5) if response.code != 200: err = '%s returned %s' % (url, response.code) raise ProductPagesException(err) else: content = yield treq.json_content(response) defer.returnValue(content) except Exception as e: # For example, if treq.get() timed out, or if treq.json_content() # could not parse the JSON, etc. # TODO: better handling here for the specific errors? # I suspect it's not good to catch Exception with inlineCallbacks raise ProductPagesException('treq error: %s' % e.message)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_datetime(secs): """ Return a UTC date from a timestamp. @type secs: C{long} @param secs: Seconds since 1970. @return: UTC timestamp. @rtype: C{datetime.datetime} """
if negative_timestamp_broken and secs < 0: return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=secs) return datetime.datetime.utcfromtimestamp(secs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_class_sealed(klass): """ Whether or not the supplied class can accept dynamic properties. @rtype: C{bool} @since: 0.5 """
mro = inspect.getmro(klass) new = False if mro[-1] is object: mro = mro[:-1] new = True for kls in mro: if new and '__dict__' in kls.__dict__: return False if not hasattr(kls, '__slots__'): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lower_underscore(string, prefix='', suffix=''): """ Generate an underscore-separated lower-case identifier, given English text, a prefix, and an optional suffix. Useful for function names and variable names. `prefix` can be set to `''`, though be careful - without a prefix, the function will throw `InvalidIdentifier` when your string starts with a number. Example: 'this_is_an_identifier' """
return require_valid(append_underscore_if_keyword('_'.join( word.lower() for word in en.words(' '.join([prefix, string, suffix]))) ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upper_underscore(string, prefix='', suffix=''): """ Generate an underscore-separated upper-case identifier. Useful for constants. Takes a string, prefix, and optional suffix. `prefix` can be set to `''`, though be careful - without a prefix, the function will throw `InvalidIdentifier` when your string starts with a number. Example: 'THIS_IS_A_CONSTANT' """
return require_valid(append_underscore_if_keyword('_'.join( word.upper() for word in en.words(' '.join([prefix, string, suffix]))) ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upper_camel(string, prefix='', suffix=''): """ Generate a camel-case identifier with the first word capitalised. Useful for class names. Takes a string, prefix, and optional suffix. `prefix` can be set to `''`, though be careful - without a prefix, the function will throw `InvalidIdentifier` when your string starts with a number. Example: 'IAmAClass' """
return require_valid(append_underscore_if_keyword(''.join( upper_case_first_char(word) for word in en.words(' '.join([prefix, string, suffix]))) ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lower_camel(string, prefix='', suffix=''): """ Generate a camel-case identifier. Useful for unit test methods. Takes a string, prefix, and optional suffix. `prefix` can be set to `''`, though be careful - without a prefix, the function will throw `InvalidIdentifier` when your string starts with a number. Example: 'testUserCanLogin' """
return require_valid(append_underscore_if_keyword(''.join( word.lower() if index == 0 else upper_case_first_char(word) for index, word in enumerate(en.words(' '.join([prefix, string, suffix])))) ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid(identifier): """ If the identifier is valid for Python, return True, otherwise False. """
return ( isinstance(identifier, six.string_types) and bool(NAME_RE.search(identifier)) and not keyword.iskeyword(identifier) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def receiver(url, **kwargs): """ Return receiver instance from connection url string url <str> connection url eg. 'tcp://0.0.0.0:8080' """
res = url_to_resources(url) fnc = res["receiver"] return fnc(res.get("url"), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sender(url, **kwargs): """ Return sender instance from connection url string url <str> connection url eg. 'tcp://0.0.0.0:8080' """
res = url_to_resources(url) fnc = res["sender"] return fnc(res.get("url"), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listen(url, prefix=None, **kwargs): """ bind and return a connection instance from url arguments: - url (str): xbahn connection url """
return listener(url, prefix=get_prefix(prefix), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(url, prefix=None, **kwargs): """ connect and return a connection instance from url arguments: - url (str): xbahn connection url """
return connection(url, prefix=get_prefix(prefix), **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_data(self, message): """ make data string from message according to transport_content_type Returns: str: message data """
if not isinstance(message, Message): return message return message.export(self.transport_content_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_message(self, data): """ Create a Message instance from data, data will be loaded via munge according to the codec specified in the transport_content_type attribute Returns: Message: message object """
data = self.codec.loads(data) msg = Message( data.get("data"), *data.get("args",[]), **data.get("kwargs",{}) ) msg.meta.update(data.get("meta")) self.trigger("make_message", data, msg) return msg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_permissions(): """ Checks if current user can access docker """
if ( not grp.getgrnam('docker').gr_gid in os.getgroups() and not os.geteuid() == 0 ): exitStr = """ User doesn't have permission to use docker. You can do either of the following, 1. Add user to the 'docker' group (preferred) 2. Run command as superuser using either 'sudo' or 'su -c' """ exit(exitStr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_base_image_cmd(self, force): """ Build the glusterbase image """
check_permissions() basetag = self.conf.basetag basedir = self.conf.basedir verbose = self.conf.verbose if self.image_exists(tag=basetag): if not force: echo("Image with tag '{0}' already exists".format(basetag)) return self.image_by_tag(basetag) else: self.remove_image(basetag) echo("Building base image") stream = self.build(path=basedir, rm=True, tag=basetag) err = self.__handle_build_stream(stream, verbose) if err: echo("Building base image failed with following error:") echo(err) return None image = self.image_by_tag(basetag) echo("Built base image {0} (Id: {1})".format(basetag, image['Id'])) return image
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_main_image_cmd(self, srcdir, force): """ Build the main image to be used for launching containers """
check_permissions() basetag = self.conf.basetag basedir = self.conf.basedir maintag = self.conf.maintag if not self.image_exists(tag=basetag): if not force: exit("Base image with tag {0} does not exist".format(basetag)) else: echo("FORCE given. Forcefully building the base image.") self.build_base_image_cmd(force) if self.image_exists(tag=maintag): self.remove_image(tag=maintag) build_command = "/build/make-install-gluster.sh" container = self.create_container(image=basetag, command=build_command, volumes=["/build", "/src"]) self.start(container, binds={basedir: "/build", srcdir: "/src"}) echo('Building main image') while self.inspect_container(container)["State"]["Running"]: time.sleep(5) if not self.inspect_container(container)["State"]["ExitCode"] == 0: echo("Build failed") echo("Dumping logs") echo(self.logs(container)) exit() # The docker remote api expects the repository and tag to be seperate # items for commit repo = maintag.split(':')[0] tag = maintag.split(':')[1] image = self.commit(container['Id'], repository=repo, tag=tag) echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def launch_cmd(self, n, force): """ Launch the specified docker containers using the main image """
check_permissions() prefix = self.conf.prefix maintag = self.conf.maintag commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf" for i in range(1, n+1): cName = "{0}-{1}".format(prefix, i) if self.container_exists(name=cName): if not force: exit("Container with name {0} already " "exists.".format(cName)) else: if self.container_running(name=cName): self.stop(cName) self.remove_container(cName, v=True) c = self.create_container(image=maintag, name=cName, command=commandStr, volumes=["/bricks"]) self.start(c['Id'], privileged=True) time.sleep(2) # Wait for container to startup echo("Launched {0} (Id: {1})".format(cName, c['Id'])) c = None cName = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_cmd(self, name, force): """ Stop the specified or all docker containers launched by us """
check_permissions() if name: echo("Would stop container {0}".format(name)) else: echo("Would stop all containers") echo("For now use 'docker stop' to stop the containers")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ssh_cmd(self, name, ssh_command): """ SSH into given container and executre command if given """
if not self.container_exists(name=name): exit("Unknown container {0}".format(name)) if not self.container_running(name=name): exit("Container {0} is not running".format(name)) ip = self.get_container_ip(name) if not ip: exit("Failed to get network address for " "container {0}".format(name)) if ssh_command: ssh.do_cmd('root', ip, 'password', " ".join(ssh_command)) else: ssh.launch_shell('root', ip, 'password')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip_cmd(self, name): """ Print ip of given container """
if not self.container_exists(name=name): exit('Unknown container {0}'.format(name)) ip = self.get_container_ip(name) if not ip: exit("Failed to get network address for" " container {0}".format(name)) else: echo(ip)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pack(self, message_type, client_id, client_storage, args, kwargs): """ Packs a message """
return pickle.dumps( (message_type, client_id, client_storage, args, kwargs), protocol=2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dispatch_message(self, message_type, client_id, client_storage, args, kwargs): """ Calls callback functions """
logger.debug("Backend message ({message_type}) : {args} {kwargs}".format( message_type=dict(MESSAGES_TYPES)[message_type], args=args, kwargs=kwargs)) if message_type in [ON_OPEN, ON_CLOSE, ON_RECEIVE]: # Find if client exists in clients_list client = next( (c for c in self.factory.clients_list if c._client_id == client_id), None) # Create a fake client if it doesn't exists if not client: client = FakeClient(storage=client_storage, factory=self.factory) if message_type == ON_OPEN: reactor.callInThread( self.factory.mease.call_openers, client, self.factory.clients_list) elif message_type == ON_CLOSE: reactor.callInThread( self.factory.mease.call_closers, client, self.factory.clients_list) elif message_type == ON_RECEIVE: reactor.callInThread( self.factory.mease.call_receivers, client, self.factory.clients_list, kwargs.get('message', '')) elif message_type == ON_SEND: routing = kwargs.pop('routing') reactor.callInThread( self.factory.mease.call_senders, routing, self.factory.clients_list, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _loop(self, *args, **kwargs): """Loops the target function :param args: The args specified on initiation :param kwargs: The kwargs specified on initiation """
self.on_start(*self.on_start_args, **self.on_start_kwargs) try: while not self._stop_signal: self.target(*args, **kwargs) finally: self.on_stop(*self.on_stop_args, **self.on_stop_kwargs) self._stop_signal = False self._lock.set()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self, subthread=True): """Starts the loop Tries to start the loop. Raises RuntimeError if the loop is currently running. :param subthread: True/False value that specifies whether or not to start the loop within a subthread. If True the threading.Thread object is found in Loop._loop_thread. """
if self.is_running(): raise RuntimeError('Loop is currently running') else: self._lock.clear() self._stop_signal = False # just in case self._in_subthread = subthread if subthread: self._loop_thread = threading.Thread(target=self._loop, args=self.args, kwargs=self.kwargs) self._loop_thread.start() else: self._loop(*self.args, **self.kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self, silent=False): """Sends a stop signal to the loop thread and waits until it stops A stop signal is sent using Loop.send_stop_signal(silent) (see docs for Loop.send_stop_signal) :param silent: True/False same parameter as in Loop.send_stop_signal(silent) """
self.send_stop_signal(silent) self._lock.wait()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_stop_signal(self, silent=False): """Sends a stop signal to the loop thread :param silent: True/False value that specifies whether or not to raise RuntimeError if the loop is currently not running :return: """
if self.is_running(): self._stop_signal = True elif not silent: raise RuntimeError('Loop is currently not running')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restart(self, subthread=None): """Restarts the loop function Tries to restart the loop thread using the current thread. Raises RuntimeError if a previous call to Loop.start was not made. :param subthread: True/False value used when calling Loop.start(subthread=subthread). If set to None it uses the same value as the last call to Loop.start. """
if self._in_subthread is None: raise RuntimeError('A call to start must first be placed before restart') self.stop(silent=True) if subthread is None: subthread = self._in_subthread self.__init__(self.target, self.args, self.kwargs, self.on_stop) self.start(subthread=subthread)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_property(prop_defs, prop_name, cls_names=[], hierarchy=[]): """ Generates a property class from the defintion dictionary args: prop_defs: the dictionary defining the property prop_name: the base name of the property cls_name: the name of the rdf_class with which the property is associated """
register = False try: cls_names.remove('RdfClassBase') except ValueError: pass if cls_names: new_name = "%s_%s" % (prop_name.pyuri, "_".join(cls_names)) prop_defs['kds_appliesToClass'] = cls_names elif not cls_names: cls_names = [Uri('kdr_AllClasses')] register = True new_name = prop_name else: new_name = prop_name new_prop = types.new_class(new_name, (RdfPropertyBase, list,), {'metaclass': RdfPropertyMeta, 'prop_defs': prop_defs, 'class_names': cls_names, 'prop_name': prop_name, 'hierarchy': hierarchy}) if register: global properties global domain_props properties[new_name] = new_prop for domain in new_prop.rdfs_domain: try: # domain_props[domain].append(new_prop) domain_props[domain][prop_name] = prop_defs except KeyError: # domain_props[domain] = [new_prop] domain_props[domain] = {} domain_props[domain][prop_name] = prop_defs except TypeError: pass return new_prop
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def link_property(prop, cls_object): """ Generates a property class linked to the rdfclass args: prop: unlinked property class cls_name: the name of the rdf_class with which the property is associated cls_object: the rdf_class """
register = False cls_name = cls_object.__name__ if cls_name and cls_name != 'RdfBaseClass': new_name = "%s_%s" % (prop._prop_name, cls_name) else: new_name = prop._prop_name new_prop = types.new_class(new_name, (prop,), {'metaclass': RdfLinkedPropertyMeta, 'cls_name': cls_name, 'prop_name': prop._prop_name, 'linked_cls': cls_object}) return new_prop
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_properties(cls_def): """ cycles through the class definiton and returns all properties """
# pdb.set_trace() prop_list = {prop: value for prop, value in cls_def.items() \ if 'rdf_Property' in value.get('rdf_type', "") or \ value.get('rdfs_domain')} return prop_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_prop_defs(prop_defs, hierarchy, cls_names): """ Reads through the prop_defs and returns a dictionary filtered by the current class args: prop_defs: the defintions from the rdf vocabulary defintion cls_object: the class object to tie the property cls_names: the name of the classes """
def _is_valid(test_list, valid_list): """ reads the list of classes in appliesToClass and returns whether the test_list matches args: test_list: the list of clasees to test against valid_list: list of possible matches """ for test in test_list: if test in valid_list: return True return False new_dict = {} valid_classes = [Uri('kdr_AllClasses')] + cls_names + hierarchy for def_name, value in prop_defs.items(): new_dict[def_name] = [] empty_def = [] try: for item in value: if item.get('kds_appliesToClass'): if _is_valid(item['kds_appliesToClass'], valid_classes): new_dict[def_name].append(item) else: empty_def.append(item) if not new_dict[def_name]: new_dict[def_name] = empty_def except AttributeError: new_dict[def_name] = value return new_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_processors(processor_cat, prop_defs, data_attr=None): """ reads the prop defs and adds applicable processors for the property Args: processor_cat(str): The category of processors to retreive prop_defs: property defintions as defined by the rdf defintions data_attr: the attr to manipulate during processing. Returns: list: a list of processors """
processor_defs = prop_defs.get(processor_cat,[]) processor_list = [] for processor in processor_defs: proc_class = PropertyProcessor[processor['rdf_type'][0]] processor_list.append(proc_class(processor.get('kds_params', [{}]), data_attr)) return processor_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_rdf_list(rdf_list): """ takes an rdf list and merges it into a python list args: rdf_list: the RdfDataset object with the list values returns: list of values """
# pdb.set_trace() if isinstance(rdf_list, list): rdf_list = rdf_list[0] rtn_list = [] # for item in rdf_list: item = rdf_list if item.get('rdf_rest') and item.get('rdf_rest',[1])[0] != 'rdf_nil': rtn_list += merge_rdf_list(item['rdf_rest'][0]) if item.get('rdf_first'): rtn_list += item['rdf_first'] rtn_list.reverse() return rtn_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def es_json(self, **kwargs): """ Returns a JSON object of the property for insertion into es """
rtn_list = [] rng_defs = get_prop_range_defs(self.class_names, self.kds_rangeDef) # if self.__class__._prop_name == 'bf_partOf': # pdb.set_trace() rng_def = get_prop_range_def(rng_defs) idx_types = rng_def.get('kds_esIndexType', []).copy() if 'es_Ignore' in idx_types: return rtn_list ranges = self.rdfs_range # pylint: disable=no-member # copy the current data into the es_values attribute then run # the es_processors to manipulate that data self.es_values = self.copy() # determine if using inverseOf object if rng_def.get('kds_esLookup'): self.es_values += self.dataset.json_qry("%s.$" % getattr(self, rng_def['kds_esLookup'][0])[0].pyuri, {'$':self.bound_class.subject}) self.es_values = list(set(self.es_values)) self._run_processors(self._es_processors) if not idx_types: nested = False for rng in ranges: if range_is_obj(rng, MODULE.rdfclass): nested = True break value_class = [value.__class__ for value in self.es_values if isinstance(value, MODULE.rdfclass.RdfClassBase)] if value_class or nested: nested = True else: nested = False if nested: idx_types.append('es_Nested') rtn_obj = {} if 'es_Nested' in idx_types: if kwargs.get('depth', 0) > 6: return [val.subject.sparql_uri for val in self] for value in self.es_values: try: new_value = value.es_json('es_Nested', **kwargs) except AttributeError: new_value = convert_value_to_es(value, ranges, self, "missing_obj") rtn_list.append(new_value) if rng_def.get("kds_esField"): es_value_fld = rng_def['kds_esValue'][0] \ if rng_def['kds_esValue'] else None es_field = rng_def['kds_esField'][0] for item in value.get(es_field): if new_value.get(es_value_fld): val = new_value.get(es_value_fld , []) try: rtn_obj[item.pyuri] += val except KeyError: rtn_obj[item.pyuri] = val else: for value in self.es_values: rtn_list.append(convert_value_to_es(value, ranges, self)) if rtn_obj: return rtn_obj return rtn_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_in_batches(cmd_array): """Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`. :param cmd_array: `shlex.split`-ed command :returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN). Returned `cmd_array` drops IN_BATCH related tokens. :raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0 """
res_cmd_array = cmd_array[:] res_batch_to_file_s = [] in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array) for batch_id, cmdidx in enumerate(in_batches_cmdidx): if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0` res_batch_to_file_s.append(BatchToFile('STDIN')) del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1] else: # IN_BATCHx is TMPFILE batch_to_file = BatchToFile('TMPFILE') res_batch_to_file_s.append(batch_to_file) res_cmd_array[cmdidx] = batch_to_file.tmpfile_path() return (res_cmd_array, tuple(res_batch_to_file_s))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_out_batch(cmd_array): """Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`. :param cmd_array: `shlex.split`-ed command :rtype: ([cmd_array], batch_from_file) :returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT). Returned `cmd_array` drops OUT_BATCH related tokens. :raises: `IndexError` if multiple OUT_BATCH are found """
res_cmd_array = cmd_array[:] res_batch_from_file = None out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array) if out_batch_cmdidx is None: return (res_cmd_array, res_batch_from_file) if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH` res_batch_from_file = BatchFromFile('STDOUT') del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1] else: # OUT_BATCH is TMPFILE res_batch_from_file = BatchFromFile('TMPFILE') res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path() return (res_cmd_array, res_batch_from_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _in_batches_cmdidx(cmd_array): """Raise `IndexError` if IN_BATCH0 - IN_BATCHx is not used sequentially in `cmd_array` $ cat a.txt IN_BATCH1 IN_BATCH0 b.txt c.txt IN_BATCH2 => (3, 2, 5) """
in_batches_cmdidx_dict = {} for cmdidx, tok in enumerate(cmd_array): mat = BatchCommand.in_batches_pat.match(tok) if mat: batch_idx = int(mat.group(1)) if batch_idx in in_batches_cmdidx_dict: raise IndexError( 'IN_BATCH%d is used multiple times in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' % (batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array))) in_batches_cmdidx_dict[batch_idx] = cmdidx in_batches_cmdidx = [] for batch_idx in range(len(in_batches_cmdidx_dict)): try: cmdidx = in_batches_cmdidx_dict[batch_idx] in_batches_cmdidx.append(cmdidx) except KeyError: raise IndexError('IN_BATCH%d is not found in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' % (batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array))) return tuple(in_batches_cmdidx)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _out_batch_cmdidx(cmd_array): """Raise `IndexError` if OUT_BATCH is used multiple time :returns: OUT_BATCH cmdidx (None if OUT_BATCH is not in `cmd_array`) $ cat a.txt > OUT_BATCH => 3 """
out_batch_cmdidx = None for cmdidx, tok in enumerate(cmd_array): mat = BatchCommand.out_batch_pat.match(tok) if mat: if out_batch_cmdidx: raise IndexError( 'OUT_BATCH is used multiple times in command below:%s$ %s' % (os.linesep, list2cmdline(cmd_array))) out_batch_cmdidx = cmdidx return out_batch_cmdidx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_builder(self, corpus): ''' creates a builder object for a wordlist ''' builder = WordBuilder(chunk_size=self.chunk_size) builder.ingest(corpus) return builder
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_common(self, filename): ''' Process lists of common name words ''' word_list = [] words = open(filename) for word in words.readlines(): word_list.append(word.strip()) return word_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_scientific_name(self): ''' Get a new flower name ''' genus = self.genus_builder.get_word() species = self.species_builder.get_word() return '%s %s' % (genus, species)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_common_name(self): ''' Get a flower's common name ''' name = random.choice(self.common_first) if random.randint(0, 1) == 1: name += ' ' + random.choice(self.common_first).lower() name += ' ' + random.choice(self.common_second).lower() return name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def certify_dict_schema( value, schema=None, key_certifier=None, value_certifier=None, required=None, allow_extra=None, ): """ Certify the dictionary schema. :param dict|Mapping|MutableMapping value: The mapping value to certify against the schema. :param object schema: The schema to validate with. :param callable key_certifier: A certifier to use on the dictionary's keys. :param callable value_certifier: A certifier to use on the dictionary's values. :param bool required: Whether the value can't be `None`. Defaults to True. :param bool allow_extra: Set to `True` to ignore extra keys. :return: The certified mapping :rtype: dict|Mapping|MutableMapping """
if key_certifier is not None or value_certifier is not None: for key, val in value.items(): if key_certifier is not None: key_certifier(key) if value_certifier is not None: value_certifier(val) if schema: if not isinstance(schema, dict): raise CertifierParamError( name='schema', value=schema, ) for key, certifier in schema.items(): if key not in value: raise CertifierValueError( message="key \'{key}\' missing from dictionary".format( key=key), required=required, ) val = value[key] certifier(value=val) if not allow_extra and set(schema) != set(value): values = set(value) - set(schema) raise CertifierValueError( message="encountered unexpected keys: {unexpected!r}".format( unexpected=values), value=values, required=required, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def certify_dict( value, schema=None, allow_extra=False, required=True, key_certifier=None, value_certifier=None, include_collections=False, ): """ Certifies a dictionary, checking it against an optional schema. The schema should be a dictionary, with keys corresponding to the expected keys in `value`, but with the values replaced by functions which will be called to with the corresponding value in the input. A simple example: :param dict|Mapping|MutableMapping value: The value to be certified. :param dict schema: The schema against which the value should be checked. :param bool allow_extra: Set to `True` to ignore extra keys. :param bool required: Whether the value can't be `None`. Defaults to True. :param callable key_certifier: callable that receives the key to certify (ignoring schema keys). :param callable value_certifier: callable that receives the value to certify (ignoring schema values). :param bool include_collections: Include types from collections. :return: The certified dict. :rtype: dict|Mapping|MutableMapping :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The value is invalid """
cls = dict # Certify our kwargs: certify_params( (certify_bool, 'allow_extra', allow_extra), (certify_bool, 'include_collections', include_collections), ) if certify_required( value=value, required=required, ): return # Check the type(s): types = [cls] if include_collections: types.extend([Mapping, MutableMapping]) types = tuple(types) if not isinstance(value, types): raise CertifierTypeError( message="Expected {t} but the type is {cls!r}".format( cls=cls, t=value.__class__.__name__, ), value=value, required=required, ) certify_dict_schema( value=value, schema=schema, key_certifier=key_certifier, value_certifier=value_certifier, required=required, allow_extra=allow_extra, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def certify_iterable_schema(value, schema=None, required=True): """ Certify an iterable against a schema. :param iterable value: The iterable to certify against the schema. :param iterable schema: The schema to use :param bool required: Whether the value can't be `None`. Defaults to True. :return: The validated iterable. :rtype: iterable """
if schema is not None: if len(schema) != len(value): raise CertifierValueError( "encountered {extra} extra items".format( extra=len(value) - len(schema)), value=value, required=required, ) for index, certifier in enumerate(schema): try: certifier(value=value[index]) except CertifierError as exc: six.raise_from( CertifierValueError( message="invalid value {value!r} for item {index}".format( index=index, value=value[index]), value=value, required=required, ), exc, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def certify_iterable( value, types, certifier=None, min_len=None, max_len=None, schema=None, required=True ): """ Validates an iterable sequence, checking it against an optional schema. The schema should be a list of expected values replaced by functions which will be called to with the corresponding value in the input. :param iterable value: The value to be certified. :param tuple(object) types: A tuple of types of the expected iterable. :param func|None certifier: A function to be called on each value in the iterable to check that it is valid. :param int|None min_len: The minimum acceptable length for the iterable. If None, the minimum length is not checked. :param int|None max_len: The maximum acceptable length for the iterable. If None, the maximum length is not checked. :param tuple|None schema: The schema against which the value should be checked. For single-item tuple make sure to add comma at the end of schema tuple, that is, for example: schema=(certify_int(),) :param bool required: Whether the value can't be `None`. Defaults to True. :return: The certified iterable. :rtype: iterable :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid. """
certify_required( value=value, required=required, ) certify_params( (_certify_int_param, 'max_len', max_len, dict(negative=False, required=False)), (_certify_int_param, 'min_len', min_len, dict(negative=False, required=False)), ) # Check the type(s): if types and not isinstance(value, types): raise CertifierTypeError( message="value is not an expected type ({value_type!r})".format( value_type=value.__class__.__name__ ), value=value, required=required, ) if min_len is not None and len(value) < min_len: raise CertifierValueError( message="expected at least {expected} elements, " "but set is of length {actual}".format(expected=min_len, actual=len(value)), value=value, required=required, ) if max_len is not None and len(value) > max_len: raise CertifierValueError( message=("expected at most {expected} elements, " "but {cls} is of length {actual}").format( expected=max_len, actual=len(value), cls=types, ), value=value, required=required, ) # Apply the certifier to all values: if certifier is not None: map(certifier, value) certify_iterable_schema( value=value, schema=schema, required=required, )