text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_property_names(obj): """ Recursively gets names of all properties implemented in specified object and its subobjects. The object can be a user defined object, map or array. Returned property name correspondently are object properties, map keys or array indexes. :param obj: an object to introspect. :return: a list with property names. """
property_names = [] if obj != None: cycle_detect = [] RecursiveObjectReader._perform_get_property_names(obj, None, property_names, cycle_detect) return property_names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_properties(obj): """ Get values of all properties in specified object and its subobjects and returns them as a map. The object can be a user defined object, map or array. Returned properties correspondently are object properties, map key-pairs or array elements with their indexes. :param obj: an object to get properties from. :return: a map, containing the names of the object's properties and their values. """
properties = {} if obj != None: cycle_detect = [] RecursiveObjectReader._perform_get_properties(obj, None, properties, cycle_detect) return properties
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hookable(cls): """ Initialise hookery in a class that declares hooks by decorating it with this decorator. This replaces the class with another one which has the same name, but also inherits Hookable which has HookableMeta set as metaclass so that sub-classes of cls will have hook descriptors initialised properly. When you say: @hookable class My: before = Hook() then @hookable changes My.before to be a HookDescriptor which is then changed into Hook if anyone accesses it. There is no need to decorate sub-classes of cls with @hookable. """
assert isinstance(cls, type) # For classes that won't have descriptors initialised by metaclass, need to do it here. hook_definitions = [] if not issubclass(cls, Hookable): for k, v in list(cls.__dict__.items()): if isinstance(v, (ClassHook, InstanceHook)): delattr(cls, k) if v.name is None: v.name = k hook_definitions.append((k, v)) hookable_cls = type(cls.__name__, (cls, Hookable), {}) for k, v in hook_definitions: setattr(hookable_cls, k, HookDescriptor(defining_hook=v, defining_class=hookable_cls)) return hookable_cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _triggering_ctx(self): """ Context manager that ensures that a hook is not re-triggered by one of its handlers. """
if self._is_triggering: raise RuntimeError('{} cannot be triggered while it is being handled'.format(self)) self._is_triggering = True try: yield self finally: self._is_triggering = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unregister_handler(self, handler_or_func): """ Remove the handler from this hook's list of handlers. This does not give up until the handler is found in the class hierarchy. """
index = -1 for i, handler in enumerate(self._direct_handlers): if handler is handler_or_func or handler._original_func is handler_or_func: index = i break if index >= 0: self._direct_handlers.pop(index) self._cached_handlers = None elif self.parent_class_hook is not None and self.parent_class_hook.has_handler(handler_or_func): self.parent_class_hook.unregister_handler(handler_or_func) self._cached_handlers = None elif self.instance_class_hook is not None and self.instance_class_hook.has_handler(handler_or_func): self.instance_class_hook.unregister_handler(handler_or_func) self._cached_handlers = None else: raise ValueError('{} is not a registered handler of {}'.format(handler_or_func, self))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notifySolved(self, identifier, title): """Notifies the user that a particular exercise has been solved. """
notify(self.workbench, u"Congratulations", u"Congratulations! You " "have completed the '{title}' exercise.".format(title=title)) return {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepend_urls(self): """ Add the following array of urls to the Tileset base urls """
return [ url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/generate%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('generate'), name="api_tileset_generate"), url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('download'), name="api_tileset_download"), url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/status%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('status'), name="api_tileset_status"), url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/stop%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('stop'), name="api_tileset_stop"), ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self, request, **kwargs): """ proxy for the tileset.generate method """
# method check to avoid bad requests self.method_check(request, allowed=['get']) # create a basic bundle object for self.get_cached_obj_get. basic_bundle = self.build_bundle(request=request) # using the primary key defined in the url, obtain the tileset tileset = self.cached_obj_get( bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) # Return what the method output, tastypie will handle the serialization return self.create_response(request, tileset.generate())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(self, request, **kwargs): """ proxy for the helpers.tileset_download method """
# method check to avoid bad requests self.method_check(request, allowed=['get']) # create a basic bundle object for self.get_cached_obj_get. basic_bundle = self.build_bundle(request=request) # using the primary key defined in the url, obtain the tileset tileset = self.cached_obj_get( bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) filename = helpers.get_tileset_filename(tileset) filename = os.path.abspath(filename) if os.path.isfile(filename): response = serve(request, os.path.basename(filename), os.path.dirname(filename)) response['Content-Disposition'] = 'attachment; filename="{}"'.format(os.path.basename(filename)) else: response = self.create_response(request, {'status': 'not generated'}) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure(self, config): """ Configures the component with specified parameters. :param config: configuration parameters to set. """
dependencies = config.get_section("dependencies") names = dependencies.get_key_names() for name in names: locator = dependencies.get(name) if locator == None: continue try: descriptor = Descriptor.from_string(locator) if descriptor != None: self._dependencies[name] = descriptor else: self._dependencies[name] = locator except Exception as ex: self._dependencies[name] = locator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _locate(self, name): """ Gets a dependency locator by its name. :param name: the name of the dependency to locate. :return: the dependency locator or null if locator was not configured. """
if name == None: raise Exception("Dependency name cannot be null") if self._references == None: raise Exception("References shall be set") return self._dependencies.get(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_optional(self, name): """ Gets all optional dependencies by their name. :param name: the dependency name to locate. :return: a list with found dependencies or empty list of no dependencies was found. """
locator = self._locate(name) return self._references.get_optional(locator) if locator != None else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_one_optional(self, name): """ Gets one optional dependency by its name. :param name: the dependency name to locate. :return: a dependency reference or null of the dependency was not found """
locator = self._locate(name) return self._references.get_one_optional(locator) if locator != None else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, name, required): """ Finds all matching dependencies by their name. :param name: the dependency name to locate. :param required: true to raise an exception when no dependencies are found. :return: a list of found dependencies """
if name == None: raise Exception("Name cannot be null") locator = self._locate(name) if locator == None: if required: raise ReferenceException(None, name) return None return self._references.find(locator, required)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obtain_to(filename): """ Return the digital elevation map projected to the lat lon matrix coordenates. Keyword arguments: filename -- the name of a netcdf file. """
root, _ = nc.open(filename) lat, lon = nc.getvar(root, 'lat')[0,:], nc.getvar(root, 'lon')[0,:] nc.close(root) return obtain(lat, lon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve(config, config_as_default = False): """ Resolves an "options" configuration section from component configuration parameters. :param config: configuration parameters :param config_as_default: (optional) When set true the method returns the entire parameter set when "options" section is not found. Default: false :return: configuration parameters from "options" section """
options = config.get_section("options") if len(options) == 0 and config_as_default: options = config return options
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_form_view(self, view, opts): """Checks if the form referenced in the view exists or attempts to create it by parsing the template """
name = opts.get("name", opts.get("form")) if isinstance(name, Form): return template = opts.get("template", getattr(view, "template", None)) if not template: if not name: raise NoFormError("No form name specified in the form action and no template") return try: as_ = opts.get("var_name", getattr(self.form, "as_", "form")) form_class = create_from_template(current_app, template, var_name=as_) except NoFormError: if not name: raise return if not name: name = view.name self.forms[name] = form_class self.form_created_from_view_signal.send(self, view=view, form_class=form_class) return form_class
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populate_obj(self, obj=None, form=None): """Populates an object with the form's data """
if not form: form = current_context.data.form if obj is None: obj = AttrDict() form.populate_obj(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def overlap(self, feature, stranded: bool=False): """Determine if a feature's position overlaps with the entry Args: feature (class): GFF3Entry object stranded (bool): allow features to overlap on different strands if True [default: False] Returns: bool: True if features overlap, else False """
# Allow features to overlap on different strands feature_strand = feature.strand strand = self.strand if stranded and ((strand == '.') or (strand == '+' and \ feature_strand in ['-', '.']) or (strand == '-' and \ feature_strand in ['+', '.'])): return False iv_1 = set(range(feature.start, feature.end + 1)) iv_2 = set(range(self.start, self.end + 1)) if len(iv_1.intersection(iv_2)) > 0: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self): """Restore GFF3 entry to original format Returns: str: properly formatted string containing the GFF3 entry """
none_type = type(None) # Format attributes for writing attrs = self.attribute_string() # Place holder if field value is NoneType for attr in self.__dict__.keys(): if type(attr) == none_type: setattr(self, attr, '.') # Format entry for writing fstr = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}{9}'\ .format(self.seqid, self.source, self.type, str(self.start), str(self.end), self._score_str, self.strand, self.phase, attrs, os.linesep) return fstr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attribute_string(self): """Restore an entries attributes in original format, escaping reserved characters when necessary Returns: str: escaped attributes as tag=value pairs, separated by semi-colon """
escape_map = {ord('='): '%3D', ord(','): '%2C', ord(';'): '%3B', ord('&'): '%26', ord('\t'): '%09', } list_type = type(list()) attrs = self.attributes if type(attrs) is OrderedDict: reserved_attrs = [] other_attrs = [] for name, value in attrs.items(): # Escape reserved characters name = name.translate(escape_map) if type(value) == list_type: value = ','.join([i.translate(escape_map) for i in value]) else: value = value.translate(escape_map) # Regain original formatting of attribute column out_attr = '{0}={1}'.format(name, value) # Order attributes so that reserved tags are output first if name[0].isupper(): reserved_attrs.append(out_attr) else: other_attrs.append(out_attr) out_attrs = ';'.join(reserved_attrs + other_attrs) else: out_attrs = attrs return out_attrs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iterate(self, start_line=None, parse_attr=True, headers=False, comments=False): """Iterate over GFF3 file, returning GFF3 entries Args: start_line (str): Next GFF3 entry. If 'handle' has been partially read and you want to start iterating at the next entry, read the next GFF3 entry and pass it to this variable when calling gff3_iter. See 'Examples' for proper usage. parse_attr (bool): Parse attributes column into a dictionary such that the string "tag1=value1;tag2=value2" becomes: tag1: value1 tag2: value2 headers (bool): Yields headers if True, else skips lines starting with "##" comments (bool): Yields comments if True, else skips lines starting with "#" Yields: GFF3Entry: class containing all GFF3 data, yields str for headers if headers options is True then yields GFF3Entry for entries Examples: The following three examples demonstrate how to use gff3_iter. Note: These doctests will not pass, examples are only in doctest format as per convention. bio_utils uses pytests for testing. """
handle = self.handle # Speed tricks: reduces function calls split = str.split strip = str.strip if start_line is None: line = next(handle) # Read first GFF3 else: line = start_line # Set header to given header # Check if input is text or bytestream if (isinstance(line, bytes)): def next_line(i): return next(i).decode('utf-8') line = strip(line.decode('utf-8')) else: next_line = next line = strip(line) # Manual 'for' loop isn't needed to read the file properly and quickly, # unlike fasta_iter and fastq_iter, but it is necessary begin iterating # partway through a file when the user gives a starting line. try: # Manually construct a for loop to improve speed by using 'next' while True: # Loop until StopIteration Exception raised self.current_line += 1 data = GFF3Entry() # Initialize early to prevent access error if line.startswith('##FASTA'): # Skip FASTA entries raise FastaFound if line.startswith('##') and not headers: line = strip(next_line(handle)) continue elif line.startswith('##') and headers: yield line line = strip(next_line(handle)) continue if line.startswith('#') and not comments: line = strip(next_line(handle)) continue elif line.startswith('#') and comments: yield line line = strip(next_line(handle)) continue split_line = split(line, '\t') data.origline = line data.seqid = split_line[0] data.source = split_line[1] data.type = split_line[2] data.start = int(split_line[3]) data.end = int(split_line[4]) try: # Make float unless dot data.score = float(split_line[5]) except ValueError: data.score = split_line[5] data._score_str = split_line[5] data.strand = split_line[6] try: # Get phase as int unless phase not given data.phase = int(split_line[7]) except ValueError: data.phase = split_line[7] data.attributes = split_line[8] if parse_attr: attributes = split(data.attributes, ';') data.attributes = OrderedDict() for attribute in attributes: split_attribute = attribute.split('=') key = split_attribute[0] value = split_attribute[-1].split(',') if ',' in \ split_attribute[-1] else split_attribute[-1] if not key == '': # Avoid semicolon split at end data.attributes[key] = value line = strip(next_line(handle)) # Raises StopIteration at EOF yield data except StopIteration: # Yield last GFF3 entry if data.origline: yield data else: #handle case where GFF ends in comment pass except FastaFound: # When FASTA found, last entry is repeat so pass pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put(self, locator = None, component = None): """ Puts a new reference into this reference map. :param locator: a component reference to be added. :param component: a locator to find the reference by. """
if component == None: raise Exception("Component cannot be null") self._lock.acquire() try: self._references.append(Reference(locator, component)) finally: self._lock.release()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_all(self, locator): """ Removes all component references that match the specified locator. :param locator: a locator to remove reference by. :return: a list, containing all removed references. """
components = [] if locator == None: return components self._lock.acquire() try: for reference in reversed(self._references): if reference.match(locator): self._references.remove(reference) components.append(reference.get_component()) finally: self._lock.release() return components
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_locators(self): """ Gets locators for all registered component references in this reference map. :return: a list with component locators. """
locators = [] self._lock.acquire() try: for reference in self._references: locators.append(reference.get_locator()) finally: self._lock.release() return locators
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all(self): """ Gets all component references registered in this reference map. :return: a list with component references. """
components = [] self._lock.acquire() try: for reference in self._references: components.append(reference.get_component()) finally: self._lock.release() return components
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_one_optional(self, locator): """ Gets an optional component reference that matches specified locator. :param locator: the locator to find references by. :return: a matching component reference or null if nothing was found. """
try: components = self.find(locator, False) return components[0] if len(components) > 0 else None except Exception as ex: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_one_required(self, locator): """ Gets a required component reference that matches specified locator. :param locator: the locator to find a reference by. :return: a matching component reference. :raises: a [[ReferenceException]] when no references found. """
components = self.find(locator, True) return components[0] if len(components) > 0 else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize(**kwargs): """ Loads the globally shared YAML configuration """
global config config_opts = kwargs.setdefault('config',{}) if isinstance(config_opts,basestring): config_opts = {'config_filename':config_opts} kwargs['config'] = config_opts if 'environment' in kwargs: config_opts['environment'] = kwargs['environment'] config.load_config(**config_opts) # Overlay the subconfig if kwargs.get('name'): subconfig = config.get(kwargs.get('name'),{}) config.overlay_add(subconfig) config.overlay_add(app_config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_amend_key_(self,key,value): """ This will take a stringified key representation and value and load it into the configuration file for furthur usage. The good part about this method is that it doesn't clobber, only appends when keys are missing. """
cfg_i = self._cfg keys = key.split('.') last_key = keys.pop() trail = [] for e in keys: cfg_i.setdefault(e,{}) cfg_i = cfg_i[e] trail.append(e) if not isinstance(cfg_i,dict): raise Exception('.'.join(trail) + ' has conflicting dict/scalar types!') cfg_i.setdefault(last_key,value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_amend_(self,config_amend): """ This will take a YAML or dict configuration and load it into the configuration file for furthur usage. The good part about this method is that it doesn't clobber, only appends when keys are missing. This should provide a value in dictionary format like: { 'default': { 'togglsync': { 'dsn': 'sqlite:///zerp-toggl.db', 'default': { 'username': 'abced', 'toggl_api_key': 'arfarfarf', }, 'dev': { 'cache': False } } } OR at user's preference can also use yaml format: default: togglsync: dsn: 'sqlite:///zerp-toggl.db' default: username: 'abced' toggl_api_key: 'arfarfarf' dev: cache: False Then the code will append the key/values where they may be missing. If there is a conflict between a dict key and a value, this function will throw an exception. IMPORTANT: after making the change to the configuration, remember to save the changes with cfg.save_() """
if not isinstance(config_amend,dict): config_amend = yaml.load(config_amend) def merge_dicts(source,target,breadcrumbs=None): """ Function to update the configuration if required. Returns True if a change was made. """ changed = False if breadcrumbs is None: breadcrumbs = [] # Don't descend if we're not a dict if not isinstance(source,dict): return source # Let's start iterating over things for k,v in source.items(): # New key, simply add. if k not in target: target[k] = v changed = True continue # Not new key.... so is it a dict? elif isinstance(target[k],dict): trail = breadcrumbs+[k] if isinstance(v,dict): if merge_dicts(v,target[k],trail): changed = True else: raise Exception('.'.join(trail) + ' has conflicting dict/scalar types!') else: trail = breadcrumbs+[k] if isinstance(v,dict): raise Exception('.'.join(trail) + ' has conflicting dict/scalar types!') return changed if merge_dicts(config_amend,self._cfg): self.overlay_load() return self._cfg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_string(self, input_string): """ Return string type user input """
if input_string in ('--input', '--outname', '--framework'): # was the flag set? try: index = self.args.index(input_string) + 1 except ValueError: # it wasn't, so if it's required, exit if input_string in self.required: print("\n {flag} is required".format(input_string)) print_short_help() sys.exit(1) # it wasn't, if its optional, return the default else: return None # the flag was set, so check if a value was set, otherwise exit try: if self.args[index] in self.flags: print("\n {flag} was set but a value was not specified".format(flag=input_string)) print_short_help() sys.exit(1) except IndexError: print("\n {flag} was set but a value was not specified".format(input_string)) print_short_help() sys.exit(1) # a value was set, so check and assign the appropriate value or exit if input_string == '--input': return os.path.abspath(self.args[index]) elif input_string == '--outname': return format(self.args[index])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_executor(elem, doc): """Determines the executor for the code in `elem.text`. The elem attributes and classes select the executor in this order (highest to lowest): - first element class (.class) determines language and thus executor Args: elem The AST element. doc The document. Returns: The command to execute code. """
executor = EXECUTORS['default'] if 'cmd' in elem.attributes.keys(): executor = elem.attributes['cmd'] elif 'runas' in elem.attributes.keys(): executor = EXECUTORS[elem.attributes['runas']] elif elem.classes[0] != 'exec': executor = EXECUTORS[elem.classes[0]] return executor
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute_code_block(elem, doc): """Executes a code block by passing it to the executor. Args: elem The AST element. doc The document. Returns: The output of the command. """
command = select_executor(elem, doc).split(' ') code = elem.text if 'plt' in elem.attributes or 'plt' in elem.classes: code = save_plot(code, elem) command.append(code) if 'args' in elem.attributes: for arg in elem.attributes['args'].split(): command.append(arg) cwd = elem.attributes['wd'] if 'wd' in elem.attributes else None return subprocess.run(command, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd).stdout
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute_interactive_code(elem, doc): """Executes code blocks for a python shell. Parses the code in `elem.text` into blocks and executes them. Args: elem The AST element. doc The document. Return: The code with inline results. """
code_lines = [l[4:] for l in elem.text.split('\n')] code_blocks = [[code_lines[0]]] for line in code_lines[1:]: if line.startswith(' ') or line == '': code_blocks[-1].append(line) else: code_blocks.append([line]) final_code = [] try: child = replwrap.REPLWrapper("python", ">>> ", None) except NameError: pf.debug('Can not run interactive session. No output produced ' + '(Code was:\n{!s}\n)' .format(elem)) pf.debug('Please pip install pexpect.') return '' for code_block in code_blocks: result = child.run_command('\n'.join(code_block) + '\n').rstrip('\r\n') final_code += [('>>> ' if i == 0 else '... ') + l for i, l in enumerate(code_block)] if result: final_code += [r for r in result.split('\n') if r.strip() not in code_block] return '\n'.join(final_code)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_file(filename): """Reads a file which matches the pattern `filename`. Args: filename The filename pattern Returns: The file content or the empty string, if the file is not found. """
hits = glob.glob('**/{}'.format(filename), recursive=True) if not len(hits): pf.debug('No file "{}" found.'.format(filename)) return '' elif len(hits) > 1: pf.debug('File pattern "{}" ambiguous. Using first.'.format(filename)) with open(hits[0], 'r') as f: return f.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_lines(code, line_spec): """Removes all lines not matching the line_spec. Args: code The code to filter line_spec The line specification. This should be a comma-separated string of lines or line ranges, e.g. 1,2,5-12,15 If a line range starts with -, all lines up to this line are included. If a line range ends with -, all lines from this line on are included. All lines mentioned (ranges are inclusive) are used. Returns: Only the specified lines. """
code_lines = code.splitlines() line_specs = [line_denom.strip() for line_denom in line_spec.split(',')] single_lines = set(map(int, filter(lambda line: '-' not in line, line_specs))) line_ranges = set(filter(lambda line: '-' in line, line_specs)) for line_range in line_ranges: begin, end = line_range.split('-') if not begin: begin = 1 if not end: end = len(code_lines) single_lines.update(range(int(begin), int(end) + 1)) keep_lines = [] for line_number, line in enumerate(code_lines, 1): if line_number in single_lines: keep_lines.append(line) return '\n'.join(keep_lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_import_statements(code): """Removes lines with import statements from the code. Args: code: The code to be stripped. Returns: The code without import statements. """
new_code = [] for line in code.splitlines(): if not line.lstrip().startswith('import ') and \ not line.lstrip().startswith('from '): new_code.append(line) while new_code and new_code[0] == '': new_code.pop(0) while new_code and new_code[-1] == '': new_code.pop() return '\n'.join(new_code)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_plot(code, elem): """Converts matplotlib plots to tikz code. If elem has either the plt attribute (format: plt=width,height) or the attributes width=width and/or height=height, the figurewidth and -height are set accordingly. If none are given, a height of 4cm and a width of 6cm is used as default. Args: code: The matplotlib code. elem: The element. Returns: The code and some code to invoke matplotlib2tikz. """
if 'plt' in elem.attributes: figurewidth, figureheight = elem.attributes['plt'].split(',') else: try: figureheight = elem.attributes['height'] except KeyError: figureheight = '4cm' try: figurewidth = elem.attributes['width'] except KeyError: figurewidth = '6cm' return f"""import matplotlib matplotlib.use('TkAgg') {code} from matplotlib2tikz import get_tikz_code tikz = get_tikz_code(figureheight='{figureheight}', figurewidth='{figurewidth}') # noqa print(tikz)"""
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trimpath(attributes): """Simplifies the given path. If pathdepth is in attributes, the last pathdepth elements will be returned. If pathdepth is "full", the full path will be returned. Otherwise the filename only will be returned. Args: attributes: The element attributes. Returns: The trimmed path. """
if 'pathdepth' in attributes: if attributes['pathdepth'] != 'full': pathelements = [] remainder = attributes['file'] limit = int(attributes['pathdepth']) while len(pathelements) < limit and remainder: remainder, pe = os.path.split(remainder) pathelements.insert(0, pe) return os.path.join(*pathelements) return attributes['file'] return os.path.basename(attributes['file'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare(doc): """Sets the caption_found and plot_found variables to False."""
doc.caption_found = False doc.plot_found = False doc.listings_counter = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maybe_center_plot(result): """Embeds a possible tikz image inside a center environment. Searches for matplotlib2tikz last commend line to detect tikz images. Args: result: The code execution result Returns: The input result if no tikzpicture was found, otherwise a centered version. """
begin = re.search('(% .* matplotlib2tikz v.*)', result) if begin: result = ('\\begin{center}\n' + result[begin.end():] + '\n\\end{center}') return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def action(elem, doc): # noqa """Processes pf.CodeBlocks. For details and a specification of how each command should behave, check the example files (especially the md and pdf)! Args: elem: The element to process. doc: The document. Returns: A changed element or None. """
if isinstance(elem, pf.CodeBlock): doc.listings_counter += 1 elems = [elem] if 'hide' not in elem.classes else [] if 'file' in elem.attributes: elem.text = read_file(elem.attributes['file']) filename = trimpath(elem.attributes) prefix = pf.Emph(pf.Str('File:')) if 'exec' in elem.classes: if 'interactive' in elem.classes or elem.text[:4] == '>>> ': elem.text = execute_interactive_code(elem, doc) else: result = execute_code_block(elem, doc) if 'hideimports' in elem.classes: elem.text = remove_import_statements(elem.text) if 'plt' in elem.attributes or 'plt' in elem.classes: doc.plot_found = True result = maybe_center_plot(result) block = pf.RawBlock(result, format='latex') else: block = pf.CodeBlock(result, classes=['changelog']) elems += [pf.Para(pf.Emph(pf.Str('Output:'))), block] if 'lines' in elem.attributes: elem.text = filter_lines(elem.text, elem.attributes['lines']) label = elem.attributes.get('label', f'cl:{doc.listings_counter}') if 'caption' in elem.attributes.keys(): doc.caption_found = True cap = pf.convert_text(elem.attributes['caption'], output_format='latex') # noqa if 'shortcaption' in elem.attributes.keys(): shortcap = pf.convert_text(elem.attributes['shortcaption'], output_format='latex') # noqa else: shortcap = cap if 'file' in elem.attributes.keys(): cap += pf.convert_text(f'&nbsp;(`{filename}`)', output_format='latex') # noqa elems = make_codelisting(elems, cap, label, shortcaption=shortcap, above='capbelow' not in elem.classes) elif 'caption' in elem.classes: doc.caption_found = True cap = '' if 'file' in elem.attributes.keys(): cap = pf.convert_text(f'`{filename}`', output_format='latex') elems = make_codelisting(elems, cap, label, above='capbelow' not in elem.classes) else: if 'file' in elem.attributes.keys(): elems.insert(0, pf.Para(prefix, pf.Space, pf.Code(filename))) return elems
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def finalize(doc): """Adds the pgfplots and caption packages to the header-includes if needed. """
if doc.plot_found: pgfplots_inline = pf.MetaInlines(pf.RawInline( r'''% \makeatletter \@ifpackageloaded{pgfplots}{}{\usepackage{pgfplots}} \makeatother \usepgfplotslibrary{groupplots} ''', format='tex')) try: doc.metadata['header-includes'].append(pgfplots_inline) except KeyError: doc.metadata['header-includes'] = pf.MetaList(pgfplots_inline) if doc.caption_found: caption_inline = pf.MetaInlines(pf.RawInline( r'''% \makeatletter \@ifpackageloaded{caption}{}{\usepackage{caption}} \@ifpackageloaded{cleveref}{}{\usepackage{cleveref}} \@ifundefined{codelisting}{% \DeclareCaptionType{codelisting}[Code Listing][List of Code Listings] \crefname{codelisting}{code listing}{code listings} \Crefname{codelisting}{Code Listing}{Code Listings} \captionsetup[codelisting]{position=bottom} }{} \makeatother ''', format='tex')) try: doc.metadata['header-includes'].append(caption_inline) except KeyError: doc.metadata['header-includes'] = pf.MetaList(caption_inline)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rescue(f, on_success, on_error=reraise, on_complete=nop): ''' Functional try-except-finally :param function f: guarded function :param function on_succes: called when f is executed without error :param function on_error: called with `error` parameter when f failed :param function on_complete: called as finally block :returns function: call signature is equal f signature ''' def _rescue(*args, **kwargs): try: return on_success(f(*args, **kwargs)) except Exception as e: return on_error(e) finally: on_complete() return _rescue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_file(self, location): """Read in a yaml file and return as a python object"""
try: return yaml.load(open(location)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: raise self.BadFileErrorKls("Failed to read yaml", location=location, error_type=error.__class__.__name__, error="{0}{1}".format(error.problem, error.problem_mark))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _trigger_job(job): """ trigger a job """
if job.api_instance().is_running(): return "{0}, {1} is already running".format(job.host, job.name) else: requests.get(job.api_instance().get_build_triggerurl()) return "triggering {0}, {1}...".format(job.host, job.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def observed(cls, _func): """ Decorate methods to be observable. If they are called on an instance stored in a property, the model will emit before and after notifications. """
def wrapper(*args, **kwargs): self = args[0] assert(isinstance(self, Observable)) self._notify_method_before(self, _func.__name__, args, kwargs) res = _func(*args, **kwargs) self._notify_method_after(self, _func.__name__, res, args, kwargs) return res return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def emit(self, arg=None): """Emits the signal, passing the optional argument"""
for model,name in self.__get_models__(): model.notify_signal_emit(name, arg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _linearize(cls, inst_list): """ A generator function which performs linearization of the list of instructions; that is, each instruction which should be executed will be yielded in turn, recursing into ``Instructions`` instances that appear in the list. :param inst_list: A list (or other sequence) of instructions. :returns: An iterator which returns all instructions. """
for inst in inst_list: # Check if we need to recurse if isinstance(inst, Instructions): for sub_inst in cls._linearize(inst.instructions): yield sub_inst else: yield inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_to_known_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False): """ Add the remote host SSH public key to the `known_hosts` file. :param hosts: the list of the remote `Host` objects. :param known_hosts: the `known_hosts` file to store the SSH public keys. :param dry: perform a dry run. """
to_add = [] with open(known_hosts) as fh: known_hosts_set = set(line.strip() for line in fh.readlines()) cmd = ['ssh-keyscan'] + [host.hostname for host in hosts] logger.debug('Call: %s', ' '.join(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() for line in stdout.splitlines(): line = line.strip() logger.info('[%s] Add the remote host SSH public key to [%s]...', line.split(' ', 1)[0], known_hosts) if line not in known_hosts_set: known_hosts_set.add(line) to_add.append('{0}\n'.format(line)) if not dry: with open(known_hosts, 'a') as fh: fh.writelines(to_add)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_from_known_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False): """ Remove the remote host SSH public key to the `known_hosts` file. :param hosts: the list of the remote `Host` objects. :param known_hosts: the `known_hosts` file to store the SSH public keys. :param dry: perform a dry run. """
for host in hosts: logger.info('[%s] Removing the remote host SSH public key from [%s]...', host.hostname, known_hosts) cmd = ['ssh-keygen', '-f', known_hosts, '-R', host.hostname] logger.debug('Call: %s', ' '.join(cmd)) if not dry: try: subprocess.check_call(cmd) except subprocess.CalledProcessError as ex: logger.error(format_error(format_exception(ex)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_requests(self): """ Loop that runs in a thread to process requests synchronously. """
while True: id, args, kwargs = self.request_queue.get() try: response = self._make_request(*args, **kwargs) except Exception as e: response = e self.results[id] = response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_from_address(self): """ Cache the coinbase address so that we don't make two requests for every single transaction. """
if self._coinbase_cache_til is not None: if time.time - self._coinbase_cache_til > 30: self._coinbase_cache_til = None self._coinbase_cache = None if self._coinbase_cache is None: self._coinbase_cache = self.get_coinbase() return self._coinbase_cache
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_pulls(self, testpulls=None): """Finds a list of new pull requests that need to be processed. :arg testpulls: a list of tserver.FakePull instances so we can test the code functionality without making live requests to github. """
#We check all the repositories installed for new (open) pull requests. #If any exist, we check the pull request number against our archive to #see if we have to do anything for it. result = {} for lname, repo in self.repositories.items(): if lname not in self.archive: raise ValueError("Trying to find pull requests for a repository " "that hasn't been installed. Use server.install().") if self.runnable is not None and lname not in self.runnable: #We just ignore this repository completely and don't even bother #performing a live check on github. continue pulls = testpulls if testpulls is not None else repo.repo.get_pulls("open") result[lname] = [] for pull in pulls: newpull = True if pull.snumber in self.archive[lname]: #Check the status of that pull request processing. If it was #successful, we just ignore this open pull request; it is #obviously waiting to be merged in. if self.archive[lname][pull.snumber]["completed"] == True: newpull = False if newpull: #Add the pull request to the list that needs to be processed. #We don't add the request to the archive yet because the #processing step hasn't happened yet. result[lname].append(PullRequest(self, repo, pull, testpulls is not None)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _save_archive(self): """Saves the JSON archive of processed pull requests. """
import json from utility import json_serial with open(self.archpath, 'w') as f: json.dump(self.archive, f, default=json_serial)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_repos(self): """Gets a list of all the installed repositories in this server. """
result = {} for xmlpath in self.installed: repo = RepositorySettings(self, xmlpath) result[repo.name.lower()] = repo return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_installed(self): """Gets a list of the file paths to repo settings files that are being monitored by the CI server. """
from utility import get_json #This is a little tricky because the data file doesn't just have a list #of installed servers. It also manages the script's database that tracks #the user's interactions with it. fulldata = get_json(self.instpath, {}) if "installed" in fulldata: return fulldata["installed"] else: return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def uninstall(self, xmlpath): """Uninstalls the repository with the specified XML path from the server. """
from os import path fullpath = path.abspath(path.expanduser(xmlpath)) if fullpath in self.installed: repo = RepositorySettings(self, fullpath) if repo.name.lower() in self.repositories: del self.repositories[repo.name.lower()] if repo.name.lower() in self.archive: del self.archive[repo.name.lower()] self._save_archive() self.installed.remove(fullpath) self._save_installed() else: warn("The repository at {} was not installed to begin with.".format(fullpath))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install(self, xmlpath): """Installs the repository at the specified XML path as an additional repo to monitor pull requests for. """
#Before we can install it, we need to make sure that none of the existing #installed paths point to the same repo. from os import path fullpath = path.abspath(path.expanduser(xmlpath)) if path.isfile(fullpath): repo = RepositorySettings(self, fullpath) if repo.name.lower() not in self.repositories: self.installed.append(fullpath) self._save_installed() self.archive[repo.name.lower()] = {} self._save_archive() self.repositories[repo.name.lower()] = repo else: warn("The file {} does not exist; install aborted.".format(fullpath))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _save_installed(self): """Saves the list of installed repo XML settings files."""
import json from utility import json_serial, get_json #This is a little tricky because the data file doesn't just have a list #of installed servers. It also manages the script's database that tracks #the user's interactions with it. fulldata = get_json(self.instpath, {}) fulldata["installed"] = self.installed with open(self.instpath, 'w') as f: json.dump(fulldata, f, default=json_serial)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(self, archive): """Creates the repo folder locally, copies the static files and folders available locally, initalizes the repo with git so it has the correct remote origin and is ready to sync. :arg staging: the full path to the directory to stage the unit tests in. """
from os import makedirs, path, chdir, system, getcwd self.repodir = path.abspath(path.expanduser(self.repo.staging)) if ("stage" in archive and path.isdir(archive["stage"]) and self.repodir != archive["stage"] and archive["stage"] is not None): #We have a previous attempt in a different staging directory to clean. from shutil import rmtree rmtree(archive["stage"]) if not path.isdir(self.repodir): makedirs(self.repodir) #Copy across all the static files so that we don't have to download them #again and chew up the bandwidth. We don't have to copy files that already #exist in the local repo. self.repo.static.copy(self.repodir) cwd = getcwd() chdir(self.repodir) if not self._is_gitted(): #Next we need to initialize the git repo, then add all the static files #and folders to be tracked so that when we pull from origin master they #can be merged into the repo without re-downloading them. system("git init") if not self.testmode: system("git remote add origin {}.git".format(self.repo.repo.html_url)) for file in self.repo.static.files: #Here the 2:: removes the ./ specifying the path relative to the git #repository root. It is added by convention in the config files. system("git add {}".format(file["target"][2::])) for folder in self.repo.static.folders: system("git add {}".format(file["target"][2::])) #Now sync with the master branch so that we get everything else that isn't #static. Also, fetch the changes from the pull request head so that we #can merge them into a new branch for unit testing. if not self.testmode: system("git pull origin master") #Even though we have initialized the repo before, we still need to fetch the #pull request we are wanting to merge in. if not self.testmode: system("git fetch origin pull/{0}/head:testing_{0}".format(self.pull.number)) system("git checkout testing_{}".format(pull.number)) #The local repo now has the pull request's proposed changes and is ready #to be unit tested. chdir(cwd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fields_common(self): """Returns a dictionary of fields and values that are common to all events for which fields dictionaries are created. """
result = {} if not self.testmode: result["__reponame__"] = self.repo.repo.full_name result["__repodesc__"] = self.repo.repo.description result["__repourl__"] = self.repo.repo.html_url result["__repodir__"] = self.repodir if self.organization is not None: owner = self.repo.organization else: owner = self.repo.user result["__username__"] = owner.name result["__userurl__"] = owner.html_url result["__useravatar__"] = owner.avatar_url result["__useremail__"] = owner.email return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wiki(self): """Returns the wiki markup describing the details of the github pull request as well as a link to the details on github. """
date = self.pull.created_at.strftime("%m/%d/%Y %H:%M") return "{} {} ({} [{} github])\n".format(self.pull.avatar_url, self.pull.body, date, self.pull.html_url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fields_general(self, event): """Appends any additional fields to the common ones and returns the fields dictionary. """
result = self._fields_common() basic = { "__test_html__": self.repo.testing.html(False), "__test_text__": self.repo.testing.text(False)} full = { "__test_html__": self.repo.testing.html(), "__test_text__": self.repo.testing.text()} if event in ["finish", "success"]: full["__percent__"] = "{0:.2%}".format(self.percent) full["__status__"] = self.message extra = { "start": basic, "error": basic, "finish": full, "success": full, "timeout": basic } if event in extra: result.update(extra[event]) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_site(self): """Returns the mwclient.Site for accessing and editing the wiki pages. """
import mwclient parts = self.server.settings.wiki.replace("http", "").replace("://", "").split("/") self.url = parts[0] if len(parts) > 1 and parts[1].strip() != "": self.relpath = '/' + '/'.join(parts[1:len(parts)]) #The API expects us to have a trailing forward-slash. if self.relpath[-1] != "/": self.relpath += "/" if not self.testmode: self.site = mwclient.Site(self.url, path=self.relpath) else: if not self.testmode: self.site = mwclient.Site(self.url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _site_login(self, repo): """Logs the user specified in the repo into the wiki. :arg repo: an instance of config.RepositorySettings with wiki credentials. """
try: if not self.testmode: self.site.login(repo.wiki["user"], repo.wiki["password"]) except LoginError as e: print(e[1]['result']) self.basepage = repo.wiki["basepage"]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, request): """Creates a new wiki page for the specified PullRequest instance. The page gets initialized with basic information about the pull request, the tests that will be run, etc. Returns the URL on the wiki. :arg request: the PullRequest instance with testing information. """
self._site_login(request.repo) self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number) #We add the link to the main repo page during this creation; we also create #the full unit test report page here. self._edit_main(request) return self._create_new(request)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, request): """Updates the wiki page with the results of the unit tests run for the pull request. :arg percent: the percent success rate of the unit tests. :arg ttotal: the total time elapsed in running *all* the unit tests. """
from os import path self._site_login(request.repo) self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number) #Before we can update the results from stdout, we first need to upload them to the #server. The files can be quite big sometimes; if a file is larger than 1MB, we ... for i, test in enumerate(request.repo.testing.tests): test["remote_file"] = "{}_{}.txt".format(self.prefix, i) if test["result"] is not None and path.isfile(test["result"]): #Over here, we might consider doing something different if the wiki server #is the same physical machine as the CI server; we needn't use the network #protocols for the copy then. However, the machine knows already if an address #it is accessing is its own; the copy, at worst, would be through the named #pipes over TCP. It is wasteful compared to a HDD copy, but simplifies the #uploading (which must also make an entry in the wiki database). if not self.testmode: self.site.upload(open(test["result"]), test["remote_file"], '`stdout` from `{}`'.format(test["command"])) #Now we can just overwrite the page with the additional test results, including the #links to the stdout files we uploaded. head = list(self._newpage_head) #Add a link to the details page that points back to the github pull request URL. head.append("==Github Pull Request Info==\n") head.append(request.wiki()) head.append("==Commands Run for Unit Testing==\n") head.append(request.repo.testing.wiki()) if not self.testmode: page = self.site.Pages[self.newpage] result = page.save('\n'.join(head), summary='Edited by CI bot with uploaded unit test details.', minor=True, bot=True) return result[u'result'] == u'Success' else: return '\n'.join(head)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_new(self, request): """Creates the new wiki page that houses the details of the unit testing runs. """
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number) head = list(self._newpage_head) head.append(request.repo.testing.wiki(False)) if not self.testmode: page = self.site.Pages[self.newpage] result = page.save('\n'.join(head), summary='Created by CI bot for unit test details.', bot=True) return result[u'result'] == u'Success' else: return '\n'.join(head)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _edit_main(self, request): """Adds the link to the new unit testing results on the repo's main wiki page. """
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number) if not self.testmode: page = site.pages[self.basepage] text = page.text() else: text = "This is a fake wiki page.\n\n<!--@CI:Placeholder-->" self.newpage = self.prefix link = "Pull Request #{}".format(request.pull.number) text = text.replace("<!--@CI:Placeholder-->", "* [[{}|{}]]\n<!--@CI:Placeholder-->".format(self.newpage, link)) if not self.testmode: result = page.save(text, summary="Added {} unit test link.".format(link), minor=True, bot=True) return result[u'result'] == u'Success' else: return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def email(self, repo, event, fields, dryrun=False): """Sends an email to the configured recipients for the specified event. :arg repo: the name of the repository to include in the email subject. :arg event: one of ["start", "success", "failure", "timeout", "error"]. :arg fields: a dictionary of field values to replace into the email template contents to specialize them. :arg dryrun: when true, the email object and contents are initialized, but the request is never sent to the SMTP server. """
tcontents = self._get_template(event, "txt", fields) hcontents = self._get_template(event, "html", fields) if tcontents is not None and hcontents is not None: return Email(self.server, repo, self.settings[repo], tcontents, hcontents, dryrun)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detect_sys(): """Tries to identify your python platform :returns: a dict with the gathered information :rtype: dict :raises: None the returned dict has these keys: 'system', 'bit', 'compiler', 'python_version_tuple' eg.:: {'system':'Windows', 'bit':'32bit', 'compiler':'MSC v.1500 32bit (Intel)', 'python_version_tuple':('2', '7', '6')} """
system = platform.system() bit = platform.architecture()[0] compiler = platform.python_compiler() ver = platform.python_version_tuple() return {'system': system, 'bit': bit, 'compiler': compiler, 'python_version_tuple': ver}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_maya_location(self, ): """ Return the installation path to maya :returns: path to maya :rtype: str :raises: errors.SoftwareNotFoundError """
import _winreg # query winreg entry # the last flag is needed, if we want to test with 32 bit python! # Because Maya is an 64 bit key! for ver in MAYA_VERSIONS: try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, MAYA_REG_KEY.format(mayaversion=ver), 0, _winreg.KEY_READ | _winreg.KEY_WOW64_64KEY) value = _winreg.QueryValueEx(key, "MAYA_INSTALL_LOCATION")[0] except WindowsError: log.debug('Maya %s installation not found in registry!' % ver) if not value: raise errors.SoftwareNotFoundError('Maya %s installation not found in registry!' % MAYA_VERSIONS) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_maya_envpath(self): """Return the PYTHONPATH neccessary for running mayapy If you start native mayapy, it will setup these paths. You might want to prepend this to your path if running from an external intepreter. :returns: the PYTHONPATH that is used for running mayapy :rtype: str :raises: None """
opj = os.path.join ml = self.get_maya_location() mb = self.get_maya_bin() msp = self.get_maya_sitepackage_dir() pyzip = opj(mb, "python27.zip") pydir = opj(ml, "Python") pydll = opj(pydir, "DLLs") pylib = opj(pydir, "lib") pyplat = opj(pylib, "plat-win") pytk = opj(pylib, "lib-tk") path = os.pathsep.join((pyzip, pydll, pylib, pyplat, pytk, mb, pydir, msp)) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _sort(self): """ Sort the response dictionaries priority levels for ordered iteration """
self._log.debug('Sorting responses by priority') self._responses = OrderedDict(sorted(list(self._responses.items()), reverse=True)) self.sorted = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_blueprints(app, application_package_name=None, blueprint_directory=None): """Register Flask blueprints on app object"""
if not application_package_name: application_package_name = 'app' if not blueprint_directory: blueprint_directory = os.path.join(os.getcwd(), application_package_name) blueprint_directories = get_child_directories(blueprint_directory) for directory in blueprint_directories: abs_package = '{}.{}'.format(application_package_name, directory) service = importlib.import_module(abs_package) app.register_blueprint(service.blueprint_api, url_prefix='')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_child_directories(path): """Return names of immediate child directories"""
if not _is_valid_directory(path): raise exceptions.InvalidDirectory entries = os.listdir(path) directory_names = [] for entry in entries: abs_entry_path = os.path.join(path, entry) if _is_valid_directory(abs_entry_path): directory_names.append(entry) return directory_names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self): """ Deletes all the keys from redis along with emptying the objects internal `_data` dict, then deleting itself at the end of it all. """
redis_search_key = ":".join([self.namespace, self.key, "*"]) keys = self.conn.keys(redis_search_key) if keys: for key in keys: part = key.split(":")[-1] self._data.pop(part) self.conn.delete(part) del self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, part): """ Retrieves a part of the model from redis and stores it. :param part: The part of the model to retrieve. :raises RedisORMException: If the redis type is different from string or list (the only two supported types at this time.) """
redis_key = ':'.join([self.namespace, self.key, part]) objectType = self.conn.type(redis_key) if objectType == "string": self._data[part] = self.conn.get(redis_key) elif objectType == "list": self._data[part] = RedisList(redis_key, self.conn) else: raise RedisORMException("Other types besides string and list are unsupported at this time.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload(client, source_dir): """Upload inappproducts to play store."""
print('') print('upload inappproducs') print('---------------------') products_folder = os.path.join(source_dir, 'products') product_files = filter(os.path.isfile, list_dir_abspath(products_folder)) current_product_skus = map(lambda product: product['sku'], client.list_inappproducts()) print(current_product_skus) for product_file in product_files: with open(product_file) as product_file: product = json.load(product_file) #check if the product is new sku = product['sku'] product['packageName'] = client.package_name print(sku) if sku in current_product_skus: print("update product {0}".format(sku)) client.update_inappproduct(product, sku) else: print("create product {0}".format(sku)) client.insert_inappproduct(product)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(client, target_dir): """Download inappproducts from play store."""
print('') print("download inappproducts") print('---------------------') products = client.list_inappproducts() for product in products: path = os.path.join(target_dir, 'products') del product['packageName'] mkdir_p(path) with open(os.path.join(path, product['sku'] + '.json'), 'w') as outfile: print("save product for {0}".format(product['sku'])) json.dump( product, outfile, sort_keys=True, indent=4, separators=(',', ': '))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dataset_exists(dataset_name): '''If a dataset with the given name exists, return its absolute path; otherwise return None''' dataset_dir = os.path.join(LIB_DIR, 'datasets') dataset_path = os.path.join(dataset_dir, dataset_name) return dataset_path if os.path.isdir(dataset_path) else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_script(self, args, event_writer, input_stream): """Handles all the specifics of running a modular input :param args: List of command line arguments passed to this script. :param event_writer: An ``EventWriter`` object for writing events. :param input_stream: An input stream for reading inputs. :returns: An integer to be used as the exit value of this program. """
try: if len(args) == 1: # This script is running as an input. Input definitions will be # passed on stdin as XML, and the script will write events on # stdout and log entries on stderr. self._input_definition = InputDefinition.parse(input_stream) self.stream_events(self._input_definition, event_writer) event_writer.close() return 0 elif str(args[1]).lower() == "--scheme": # Splunk has requested XML specifying the scheme for this # modular input Return it and exit. scheme = self.get_scheme() if scheme is None: event_writer.log( EventWriter.FATAL, "Modular input script returned a null scheme.") return 1 else: event_writer.write_xml_document(scheme.to_xml()) return 0 elif args[1].lower() == "--validate-arguments": validation_definition = ValidationDefinition.parse(input_stream) try: self.validate_input(validation_definition) return 0 except Exception as e: root = ET.Element("error") ET.SubElement(root, "message").text = e.message event_writer.write_xml_document(root) return 1 else: err_string = "ERROR Invalid arguments to modular input script:" + ' '.join( args) event_writer._err.write(err_string) except Exception as e: err_string = EventWriter.ERROR + e.message event_writer._err.write(err_string) return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def money(s, thousand_sep=".", decimal_sep=","): """Converts money amount in string to a Decimal object. With the default arguments, the format is expected to be ``-38.500,00``, where dots separate thousands and comma the decimals. Args: thousand_sep: Separator for thousands. decimal_sep: Separator for decimals. Returns: A ``Decimal`` object of the string encoded money amount. """
s = s.replace(thousand_sep, "") s = s.replace(decimal_sep, ".") return Decimal(s)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def csv_row_to_transaction(index, row, source_encoding="latin1", date_format="%d-%m-%Y", thousand_sep=".", decimal_sep=","): """ Parses a row of strings to a ``Transaction`` object. Args: index: The index of this row in the original CSV file. Used for sorting ``Transaction``s by their order of appearance. row: The row containing strings for [transfer_date, posted_date, message, money_amount, money_total]. source_encoding: The encoding that will be used to decode strings to UTF-8. date_format: The format of dates in this row. thousand_sep: The thousand separator in money amounts. decimal_sep: The decimal separator in money amounts. Returns: A ``Transaction`` object. """
xfer, posted, message, amount, total = row xfer = Parse.date(xfer) posted = Parse.date(posted) message = Parse.to_utf8(message, source_encoding) amount = Parse.money(amount) total = Parse.money(total) return Transaction(index, xfer, posted, message, amount, total)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def csv_to_transactions(handle, source_encoding="latin1", date_format="%d-%m-%Y", thousand_sep=".", decimal_sep=","): """ Parses CSV data from stream and returns ``Transactions``. Args: index: The index of this row in the original CSV file. Used for sorting ``Transaction``s by their order of appearance. row: The row containing strings for [transfer_date, posted_date, message, money_amount, money_total]. source_encoding: The encoding that will be used to decode strings to UTF-8. date_format: The format of dates in this row. thousand_sep: The thousand separator in money amounts. decimal_sep: The decimal separator in money amounts. Returns: A ``Transactions`` object. """
trans = Transactions() rows = csv.reader(handle, delimiter=";", quotechar="\"") for index, row in enumerate(rows): trans.append(Parse.csv_row_to_transaction(index, row)) return trans
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_sqlite3(self, location=":memory:"): """Returns an SQLITE3 connection to a database containing the transactions."""
def decimal_to_sqlite3(n): return int(100*n) def sqlite3_to_decimal(s): return Decimal(s)/100 sqlite3.register_adapter(Decimal, decimal_to_sqlite3) sqlite3.register_converter("decimal", sqlite3_to_decimal) con = sqlite3.connect(location, detect_types=sqlite3.PARSE_COLNAMES | sqlite3.PARSE_DECLTYPES) cur = con.cursor() cur.execute("""create table transactions( id primary key, xfer date, posted date, message text, amount decimal, total decimal)""") for t in self: cur.execute("INSERT INTO transactions values(?,?,?,?,?,?)", (t.index, t.xfer, t.posted, t.message, t.amount, t.total)) return con
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def group_by(self, key, field=lambda x: x.xfer): """Returns all transactions whose given ``field`` matches ``key``. Returns: A ``Transactions`` object. """
return Transactions([t for t in self.trans if field(t) == key])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def range(self, start_date=None, stop_date=None, field=lambda x: x.xfer): """Return a ``Transactions`` object in an inclusive date range. Args: start_date: A ``datetime.Date`` object that marks the inclusive start date for the range. stop_date: A ``datetime.Date`` object that marks the inclusive end date for the range. field: The field to compare start and end dates to. Default is the ``xfer`` field. Returns: A ``Transactions`` object. """
assert start_date <= stop_date, \ "Start date must be earlier than end date." out = Transactions() for t in self.trans: date = field(t) if (start_date is not None) and not (date >= start_date): continue if (stop_date is not None) and not (date <= stop_date): continue out.append(t) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def agg_grid(grid, agg=None): """ Many functions return a 2d list with a complex data type in each cell. For instance, grids representing environments have a set of resources, while reading in multiple data files at once will yield a list containing the values for that cell from each file. In order to visualize these data types it is helpful to summarize the more complex data types with a single number. For instance, you might want to take the length of a resource set to see how many resource types are present. Alternately, you might want to take the mode of a list to see the most common phenotype in a cell. This function facilitates this analysis by calling the given aggregation function (agg) on each cell of the given grid and returning the result. agg - A function indicating how to summarize grid contents. Default: len. """
grid = deepcopy(grid) if agg is None: if type(grid[0][0]) is list and type(grid[0][0][0]) is str: agg = string_avg else: agg = mode for i in range(len(grid)): for j in range(len(grid[i])): grid[i][j] = agg(grid[i][j]) return grid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flatten_array(grid): """ Takes a multi-dimensional array and returns a 1 dimensional array with the same contents. """
grid = [grid[i][j] for i in range(len(grid)) for j in range(len(grid[i]))] while type(grid[0]) is list: grid = flatten_array(grid) return grid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepend_zeros_to_lists(ls): """ Takes a list of lists and appends 0s to the beggining of each sub_list until they are all the same length. Used for sign-extending binary numbers. """
longest = max([len(l) for l in ls]) for i in range(len(ls)): while len(ls[i]) < longest: ls[i].insert(0, "0")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def squared_toroidal_dist(p1, p2, world_size=(60, 60)): """ Separated out because sqrt has a lot of overhead """
halfx = world_size[0]/2.0 if world_size[0] == world_size[1]: halfy = halfx else: halfy = world_size[1]/2.0 deltax = p1[0] - p2[0] if deltax < -halfx: deltax += world_size[0] elif deltax > halfx: deltax -= world_size[0] deltay = p1[1] - p2[1] if deltay < -halfy: deltay += world_size[1] elif deltay > halfy: deltay -= world_size[1] return deltax*deltax + deltay*deltay
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def phenotype_to_res_set(phenotype, resources): """ Converts a binary string to a set containing the resources indicated by the bits in the string. Inputs: phenotype - a binary string resources - a list of string indicating which resources correspond to which indices of the phenotype returns: A set of strings indicating resources """
assert(phenotype[0:2] == "0b") phenotype = phenotype[2:] # Fill in leading zeroes while len(phenotype) < len(resources): phenotype = "0" + phenotype res_set = set() for i in range(len(phenotype)): if phenotype[i] == "1": res_set.add(resources[i]) assert(phenotype.count("1") == len(res_set)) return res_set
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def res_set_to_phenotype(res_set, full_list): """ Converts a set of strings indicating resources to a binary string where the positions of 1s indicate which resources are present. Inputs: res_set - a set of strings indicating which resources are present full_list - a list of strings indicating all resources which could could be present, and the order in which they should map to bits in the phenotype returns: A binary string """
full_list = list(full_list) phenotype = len(full_list) * ["0"] for i in range(len(full_list)): if full_list[i] in res_set: phenotype[i] = "1" assert(phenotype.count("1") == len(res_set)) # Remove uneceesary leading 0s while phenotype[0] == "0" and len(phenotype) > 1: phenotype = phenotype[1:] return "0b"+"".join(phenotype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def weighted_hamming(b1, b2): """ Hamming distance that emphasizes differences earlier in strings. """
assert(len(b1) == len(b2)) hamming = 0 for i in range(len(b1)): if b1[i] != b2[i]: # differences at more significant (leftward) bits # are more important if i > 0: hamming += 1 + 1.0/i # This weighting is completely arbitrary return hamming
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def n_tasks(dec_num): """ Takes a decimal number as input and returns the number of ones in the binary representation. This translates to the number of tasks being done by an organism with a phenotype represented as a decimal number. """
bitstring = "" try: bitstring = dec_num[2:] except: bitstring = bin(int(dec_num))[2:] # cut off 0b # print bin(int(dec_num)), bitstring return bitstring.count("1")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_to_pysal(data): """ Pysal expects a distance matrix, and data formatted in a numpy array. This functions takes a data grid and returns those things. """
w = pysal.lat2W(len(data[0]), len(data)) data = np.array(data) data = np.reshape(data, (len(data)*len(data[0]), 1)) return w, data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def median(ls): """ Takes a list and returns the median. """
ls = sorted(ls) return ls[int(floor(len(ls)/2.0))]