text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_object(self, filename, img_properties=None): """Create an image object on local disk from the given file. The file is copied to a new local directory that is created for the image object. The optional list of image properties will be associated with the new object together with the set of default properties for images. Parameters filename : string Path to file on disk img_properties : Dictionary, optional Set of image properties. Returns ------- ImageHandle Handle for created image object """
# Get the file name, i.e., last component of the given absolute path prop_name = os.path.basename(os.path.normpath(filename)) # Ensure that the image file has a valid suffix. Currently we do not # check whether the file actually is an image. If the suffix is valid # get the associated Mime type from the dictionary. prop_mime = None pos = prop_name.rfind('.') if pos >= 0: suffix = prop_name[pos:].lower() if suffix in VALID_IMGFILE_SUFFIXES: prop_mime = VALID_IMGFILE_SUFFIXES[suffix] if not prop_mime: raise ValueError('unsupported image type: ' + prop_name) # Create a new object identifier. identifier = str(uuid.uuid4()).replace('-','') # The sub-folder to store the image is given by the first two # characters of the identifier. image_dir = self.get_directory(identifier) # Create the directory if it doesn't exists if not os.access(image_dir, os.F_OK): os.makedirs(image_dir) # Create the initial set of properties for the new image object. properties = { datastore.PROPERTY_NAME: prop_name, datastore.PROPERTY_FILENAME : prop_name, datastore.PROPERTY_FILESIZE : os.path.getsize(filename), datastore.PROPERTY_MIMETYPE : prop_mime } # Add additional image properties (if given). Note that this will not # override the default image properties. if not img_properties is None: for prop in img_properties: if not prop in properties: properties[prop] = img_properties[prop] # Copy original file to new object's directory shutil.copyfile(filename, os.path.join(image_dir, prop_name)) # Create object handle and store it in database before returning it obj = ImageHandle(identifier, properties, image_dir) self.insert_object(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(self, document): """Create image object from JSON document retrieved from database. Parameters document : JSON Json document in database Returns ------- ImageHandle Handle for image object """
# Get object properties from Json document identifier = str(document['_id']) active = document['active'] timestamp = datetime.datetime.strptime(document['timestamp'], '%Y-%m-%dT%H:%M:%S.%f') properties = document['properties'] # The directory is not materilaized in database to allow moving the # base directory without having to update the database. directory = self.get_directory(identifier) # Cretae image handle return ImageHandle(identifier, properties, directory, timestamp=timestamp, is_active=active)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_directory(self, identifier): """Implements the policy for naming directories for image objects. Image object directories are name by their identifier. In addition, these directories are grouped in parent directories named by the first two characters of the identifier. The aim is to avoid having too many sub-folders in a single directory. Parameters identifier : string Unique object identifier Returns ------- string Path to image objects data directory """
return os.path.join( os.path.join(self.directory, identifier[:2]), identifier )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_object(self, name, images, filename, options=None, object_identifier=None, read_only=False): """Create an image group object with the given list of images. The file name specifies the location on local disk where the tar-file containing the image group files is located. The file will be copied to the image groups data directory. Parameters name : string User-provided name for the image group images : List(GroupImage) List of objects describing images in the group filename : string Location of local file containing all images in the group List of image group options. If None, default values will be used. object_identifier : string Unique object identifier, optional read_only : boolean, optional Optional value for the read-only property Returns ------- ImageGroupHandle Object handle for created image group """
# Raise an exception if given image group is not valied. self.validate_group(images) # Create a new object identifier if none is given. if object_identifier is None: identifier = str(uuid.uuid4()).replace('-','') else: identifier = object_identifier # Create the initial set of properties. prop_filename = os.path.basename(os.path.normpath(filename)) prop_mime = 'application/x-tar' if filename.endswith('.tar') else 'application/x-gzip' properties = { datastore.PROPERTY_NAME: name, datastore.PROPERTY_FILENAME : prop_filename, datastore.PROPERTY_FILESIZE : os.path.getsize(filename), datastore.PROPERTY_MIMETYPE : prop_mime } if read_only: properties[datastore.PROPERTY_READONLY] = True # Directories are simply named by object identifier directory = os.path.join(self.directory, identifier) # Create the directory if it doesn't exists if not os.access(directory, os.F_OK): os.makedirs(directory) # Move original file to object directory shutil.copyfile(filename, os.path.join(directory, prop_filename)) # Get dictionary of given options. If none are given opts will be an # empty dictionary. If duplicate attribute names are present an # exception will be raised. opts = attribute.to_dict(options, self.attribute_defs) # Create the image group object and store it in the database before # returning it. obj = ImageGroupHandle( identifier, properties, directory, images, opts ) self.insert_object(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(self, document): """Create image group object from JSON document retrieved from database. Parameters document : JSON Json document in database Returns ------- ImageGroupHandle Handle for image group object """
# Get object attributes from Json document identifier = str(document['_id']) # Create list of group images from Json images = list() for grp_image in document['images']: images.append(GroupImage( grp_image['identifier'], grp_image['folder'], grp_image['name'], os.path.join( self.image_manager.get_directory(grp_image['identifier']), grp_image['name'] ) )) # Create list of properties and add group size properties = document['properties'] properties[PROPERTY_GROUPSIZE] = len(document['images']) # Directories are simply named by object identifier directory = os.path.join(self.directory, identifier) # Create image group handle. return ImageGroupHandle( identifier, properties, directory, images, attribute.attributes_from_dict(document['options']), timestamp=datetime.datetime.strptime( document['timestamp'], '%Y-%m-%dT%H:%M:%S.%f' ), is_active=document['active'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_collections_for_image(self, image_id): """Get identifier of all collections that contain a given image. Parameters image_id : string Unique identifierof image object Returns ------- List(string) List of image collection identifier """
result = [] # Get all active collections that contain the image identifier for document in self.collection.find({'active' : True, 'images.identifier' : image_id}): result.append(str(document['_id'])) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self, img_coll): """Create a Json-like dictionary for image group. Extends the basic object with an array of image identifiers. Parameters img_coll : ImageGroupHandle Returns ------- (JSON) Json-like object, i.e., dictionary. """
# Get the basic Json object from the super class json_obj = super(DefaultImageGroupManager, self).to_dict(img_coll) # Add list of images as Json array images = [] for img_group in img_coll.images: images.append({ 'identifier' : img_group.identifier, 'folder' : img_group.folder, 'name' : img_group.name }) json_obj['images'] = images # Transform dictionary of options into list of elements, one per typed # attribute in the options set. json_obj['options'] = attribute.attributes_to_dict(img_coll.options) return json_obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_group(images): """Validates that the combination of folder and name for all images in a group is unique. Raises a ValueError exception if uniqueness constraint is violated. Parameters images : List(GroupImage) List of images in group """
image_ids = set() for image in images: key = image.folder + image.name if key in image_ids: raise ValueError('Duplicate images in group: ' + key) else: image_ids.add(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_object(self, name, image_sets): """Create a prediction image set list. Parameters name : string User-provided name for the image group. image_sets : list(PredictionImageSet) List of prediction image sets Returns ------- PredictionImageSetHandle Object handle for created prediction image set """
# Create a new object identifier identifier = str(uuid.uuid4()).replace('-','') properties = {datastore.PROPERTY_NAME: name} # Create the image group object and store it in the database before # returning it. obj = PredictionImageSetHandle(identifier, properties, image_sets) self.insert_object(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(self, document): """Create a prediction image set resource from a dictionary serialization. Parameters document : dict Dictionary serialization of the resource Returns ------- PredictionImageSetHandle Handle for prediction image sets """
return PredictionImageSetHandle( str(document['_id']), document['properties'], [PredictionImageSet.from_dict(img) for img in document['images']], timestamp=datetime.datetime.strptime( document['timestamp'], '%Y-%m-%dT%H:%M:%S.%f' ), is_active=document['active'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self, img_sets): """Create a dictionary serialization for a prediction image set handle. Parameters img_sets : PredictionImageSetHandle Returns ------- dict Dictionary serialization of the resource """
# Get the basic Json object from the super class json_obj = super(DefaultPredictionImageSetManager, self).to_dict(img_sets) # Add list of image sets as Json array json_obj['images'] = [img_set.to_dict() for img_set in img_sets.images] return json_obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _load_tasks(self, tasks, vars={}, additional_conditions=[]): ''' handle task and handler include statements ''' results = [] if tasks is None: # support empty handler files, and the like. tasks = [] for x in tasks: task_vars = self.vars.copy() task_vars.update(vars) if 'include' in x: tokens = shlex.split(x['include']) items = [''] included_additional_conditions = list(additional_conditions) for k in x: if k.startswith("with_"): plugin_name = k[5:] if plugin_name not in utils.plugins.lookup_loader: raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) terms = utils.template_ds(self.basedir, x[k], task_vars) items = utils.plugins.lookup_loader.get(plugin_name, basedir=self.basedir, runner=None).run(terms, inject=task_vars) elif k.startswith("when_"): included_additional_conditions.append(utils.compile_when_to_only_if("%s %s" % (k[5:], x[k]))) elif k in ("include", "vars", "only_if"): pass else: raise errors.AnsibleError("parse error: task includes cannot be used with other directives: %s" % k) if 'vars' in x: task_vars.update(x['vars']) if 'only_if' in x: included_additional_conditions.append(x['only_if']) for item in items: mv = task_vars.copy() mv['item'] = item for t in tokens[1:]: (k,v) = t.split("=", 1) mv[k] = utils.template_ds(self.basedir, v, mv) include_file = utils.template(self.basedir, tokens[0], mv) data = utils.parse_yaml_from_file(utils.path_dwim(self.basedir, include_file)) results += self._load_tasks(data, mv, included_additional_conditions) elif type(x) == dict: results.append(Task(self,x,module_vars=task_vars, additional_conditions=additional_conditions)) else: raise Exception("unexpected task type") for x in results: if self.tags is not None: x.tags.extend(self.tags) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_vars(self): ''' load the vars section from a play, accounting for all sorts of variable features including loading from yaml files, prompting, and conditional includes of the first file found in a list. ''' if self.vars is None: self.vars = {} if type(self.vars) not in [dict, list]: raise errors.AnsibleError("'vars' section must contain only key/value pairs") vars = {} # translate a list of vars into a dict if type(self.vars) == list: for item in self.vars: if getattr(item, 'items', None) is None: raise errors.AnsibleError("expecting a key-value pair in 'vars' section") k, v = item.items()[0] vars[k] = v else: vars.update(self.vars) if type(self.playbook.extra_vars) == dict: vars.update(self.playbook.extra_vars) if type(self.vars_prompt) == list: for var in self.vars_prompt: if not 'name' in var: raise errors.AnsibleError("'vars_prompt' item is missing 'name:'") vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self.playbook.extra_vars: vars[vname] = self.playbook.callbacks.on_vars_prompt ( vname, private, prompt, encrypt, confirm, salt_size, salt, default ) elif type(self.vars_prompt) == dict: for (vname, prompt) in self.vars_prompt.iteritems(): prompt_msg = "%s: " % prompt if vname not in self.playbook.extra_vars: vars[vname] = self.playbook.callbacks.on_vars_prompt( varname=vname, private=False, prompt=prompt_msg, default=None ) else: raise errors.AnsibleError("'vars_prompt' section is malformed, see docs") results = self.playbook.extra_vars.copy() results.update(vars) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def key_event_to_name(event): """ Converts a keystroke event into a corresponding key name. """
key_code = event.key() modifiers = event.modifiers() if modifiers & QtCore.Qt.KeypadModifier: key = keypad_map.get(key_code) else: key = None if key is None: key = key_map.get(key_code) name = '' if modifiers & QtCore.Qt.ControlModifier: name += 'Ctrl' if modifiers & QtCore.Qt.AltModifier: name += '-Alt' if name else 'Alt' if modifiers & QtCore.Qt.MetaModifier: name += '-Meta' if name else 'Meta' if modifiers & QtCore.Qt.ShiftModifier and ((name != '') or (key is not None and len(key) > 1)): name += '-Shift' if name else 'Shift' if key: if name: name += '-' name += key return name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def readmodule(module, path=None): '''Backwards compatible interface. Call readmodule_ex() and then only keep Class objects from the resulting dictionary.''' res = {} for key, value in _readmodule(module, path or []).items(): if isinstance(value, Class): res[key] = value return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _basic_field_data(field, obj): """Returns ``obj.field`` data as a dict"""
value = field.value_from_object(obj) return {Field.TYPE: FieldType.VAL, Field.VALUE: value}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _related_field_data(field, obj): """Returns relation ``field`` as a dict. Dict contains related pk info and some meta information for reconstructing objects. """
data = _basic_field_data(field, obj) relation_info = { Field.REL_DB_TABLE: field.rel.to._meta.db_table, Field.REL_APP: field.rel.to._meta.app_label, Field.REL_MODEL: field.rel.to.__name__ } data[Field.TYPE] = FieldType.REL data[Field.REL] = relation_info return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _m2m_field_data(field, obj): """Returns m2m ``field`` as a dict. Value is an array of related primary keys and some meta information for reconstructing objects. """
data = _basic_field_data(field, obj) data[Field.TYPE] = FieldType.M2M related = field.rel.to relation_info = { Field.REL_DB_TABLE: related._meta.db_table, Field.REL_APP: related._meta.app_label, Field.REL_MODEL: related.__name__ } data[Field.REL] = relation_info value = data[Field.VALUE] value = [x[0] for x in value.values_list('pk')] data[Field.VALUE] = value return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dump_model(obj): """Returns ``obj`` as a dict. Returnded dic has a form of: { 'field_name': { 'type': `FieldType`, 'value': field value, # if field is a relation, it also has: 'rel': { 'db_table': model db table, 'app_label': model app label, 'model_name': model name } } } """
data = {} for field in obj._meta.fields: if isinstance(field, RELATED_FIELDS): field_data = _related_field_data(field, obj) else: field_data = _basic_field_data(field, obj) data[field.name] = field_data if obj.pk: for m2m in obj._meta.many_to_many: field_data = _m2m_field_data(m2m, obj) data[m2m.name] = field_data return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restore_model(cls, data): """Returns instance of ``cls`` with attributed loaded from ``data`` dict. """
obj = cls() for field in data: setattr(obj, field, data[field][Field.VALUE]) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve_object(cache, template, indexes): """Retrieve an object from Redis using a pipeline. Arguments: template: a dictionary containg the keys for the object and template strings for the corresponding redis keys. The template string uses named string interpolation format. Example: { 'username': 'user:$(id)s:username', 'email': 'user:$(id)s:email', 'phone': 'user:$(id)s:phone' } indexes: a dictionary containing the values to use to cosntruct the redis keys: Example: { 'id': 342 } Returns: a dictionary with the same keys as template, but containing the values retrieved from redis, if all the values are retrieved. If any value is missing, returns None. Example: { 'username': 'bob', 'email': 'bob@example.com', 'phone': '555-555-5555' } """
keys = [] with cache as redis_connection: pipe = redis_connection.pipeline() for (result_key, redis_key_template) in template.items(): keys.append(result_key) pipe.get(redis_key_template % indexes) results = pipe.execute() return None if None in results else dict(zip(keys, results))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_object(cache, template, indexes, data): """Set an object in Redis using a pipeline. Only sets the fields that are present in both the template and the data. Arguments: template: a dictionary containg the keys for the object and template strings for the corresponding redis keys. The template string uses named string interpolation format. Example: { 'username': 'user:%(id)s:username', 'email': 'user:%(id)s:email', 'phone': 'user:%(id)s:phone' } indexes: a dictionary containing the values to use to cosntruct the redis keys: Example: { 'id': 342 } data: a dictionary returning the data to store. Example: { 'username': 'bob', 'email': 'bob@example.com', 'phone': '555-555-5555' } """
# TODO(mattmillr): Handle expiration times with cache as redis_connection: pipe = redis_connection.pipeline() for key in set(template.keys()) & set(data.keys()): pipe.set(template[key] % indexes, str(data[key])) pipe.execute()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_object(cache, template, indexes): """Delete an object in Redis using a pipeline. Deletes all fields defined by the template. Arguments: template: a dictionary containg the keys for the object and template strings for the corresponding redis keys. The template string uses named string interpolation format. Example: { 'username': 'user:%(id)s:username', 'email': 'user:%(id)s:email', 'phone': 'user:%(id)s:phone' } indexes: a dictionary containing the values to use to construct the redis keys: Example: { 'id': 342 } """
with cache as redis_connection: pipe = redis_connection.pipeline() for key in set(template.keys()): pipe.delete(template[key] % indexes) pipe.execute()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_value(cache, key, value): """Set a value by key. Arguments: cache: instance of Cache key: 'user:342:username', """
with cache as redis_connection: return redis_connection.set(key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(url, filename, properties): """Create new subject at given SCO-API by uploading local file. Expects an tar-archive containing FreeSurfer archive file. Allows to update properties of created resource. Parameters url : string Url to POST image group create request filename : string Path to tar-archive on local disk properties : Dictionary Set of additional properties for subject (may be None) Returns ------- string Url of created subject resource """
# Ensure that the file has valid suffix if not has_tar_suffix(filename): raise ValueError('invalid file suffix: ' + filename) # Upload file to create subject. If response is not 201 the uploaded # file is not a valid FreeSurfer archive files = {'file': open(filename, 'rb')} response = requests.post(url, files=files) if response.status_code != 201: raise ValueError('invalid file: ' + filename) # Get image group HATEOAS references from successful response links = references_to_dict(response.json()['links']) resource_url = links[REF_SELF] # Update subject properties if given if not properties is None: obj_props = [] # Catch TypeErrors if properties is not a list. try: for key in properties: obj_props.append({'key':key, 'value':properties[key]}) except TypeError as ex: raise ValueError('invalid property set') try: req = urllib2.Request(links[REF_UPSERT_PROPERTIES]) req.add_header('Content-Type', 'application/json') response = urllib2.urlopen( req, json.dumps({'properties' : obj_props}) ) except urllib2.URLError as ex: raise ValueError(str(ex)) return resource_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getdata(inputfile, argnum=None, close=False): """ Get data from the .dat files args: inputfile: file Input File close: bool, default=False Closes inputfile if True inputfile (File): Input file close (boolean): Closes inputfile if True (default: False) returns: dictionary: data: list of parsed data variables: dictionary of errors and other additional variables """
# get data and converts them to list # outputtype - list, dict, all output = [] add_data = {} line_num = 0 for line in inputfile: line_num += 1 if ("#" not in line) and (line != ""): linesplit = line.split() if argnum is not None and len(linesplit) != int(argnum): raise ValueError( "Line {:d} has {:d} arguments (need {:d})".format( line_num, len(linesplit), argnum)) output.append(linesplit) # additional float variable if "#f" in line: data = line.split()[1].split("=") add_data[data[0]] = float(data[1]) # additional list float variable if "#l" in line: data = line.split()[1].split("=") add_data[data[0]] = [float(e) for e in data[1].split(",")] if close: inputfile.close() output = cleandata(output) return { "data": np.array(output), "variables": add_data, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_supervisor(self, update=False): """ install supervisor config for redis """
script = supervisor.Recipe( self.buildout, self.name, {'user': self.options.get('user'), 'program': self.options.get('program'), 'command': templ_cmd.render(config=self.conf_filename, prefix=self.prefix), 'stopwaitsecs': '30', 'killasgroup': 'true', }) return script.install(update)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format(val, valtype, floatfmt, missingval="", has_invisible=True): """Format a value accoding to its type. Unicode is supported: tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """
if val is None: return missingval if valtype in [int, _long_type, _text_type]: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "ascii") except TypeError: return _text_type(val) elif valtype is float: is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type)) if is_a_colored_number: raw_val = _strip_invisible(val) formatted_val = format(float(raw_val), floatfmt) return val.replace(raw_val, formatted_val) else: return format(float(val), floatfmt) else: return "{0}".format(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_view_model(cls): """ Get the model to use in the filter_class by inspecting the queryset or by using a declared auto_filters_model """
msg = 'When using get_queryset you must set a auto_filters_model field in the viewset' if cls.queryset is not None: return cls.queryset.model else: assert hasattr(cls, 'auto_filters_model'), msg return cls.auto_filters_model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auto_filters(cls): """ Adds a dynamic filterclass to a viewset with all auto filters available for the field type that are declared in a tuple auto_filter_fields @auto_filters auto_filters_fields('id', 'location', 'category') """
msg = 'Viewset must have auto_filters_fields or auto_filters_exclude attribute when using auto_filters decorator' if not hasattr(cls, 'auto_filters_fields') and not hasattr(cls, 'auto_filters_exclude'): raise AssertionError(msg) dict_ = {} view_model = get_view_model(cls) auto_filters_fields = get_auto_filters_fields(cls, view_model) for auto_filter in auto_filters_fields: dict_[auto_filter] = AutoFilters(name=auto_filter) # create the inner Meta class and then the filter class dict_['Meta'] = type('Meta', (object, ), {'model': view_model, 'fields': ()}) filter_class = type('DynamicFilterClass', (FilterSet, ), dict_) cls.filter_class = filter_class return cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def env(*_vars, **kwargs): """Search for the first defined of possibly many env vars. Returns the first environment variable defined in vars, or returns the default defined in kwargs. """
for v in _vars: value = os.environ.get(v, None) if value: return value return kwargs.get('default', '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_option_parser(self, description, version): """Return an argparse option parser for this application. Subclasses may override this method to extend the parser with more global options. :param description: full description of the application :paramtype description: str :param version: version number for the application :paramtype version: str """
parser = argparse.ArgumentParser( description=description, add_help=False, ) parser.add_argument( '--version', action='version', version=__version__, ) parser.add_argument( '-v', '--verbose', '--debug', action='count', dest='verbose_level', default=self.DEFAULT_VERBOSE_LEVEL, help=_('Increase verbosity of output and show tracebacks on' ' errors. You can repeat this option.')) parser.add_argument( '-q', '--quiet', action='store_const', dest='verbose_level', const=0, help=_('Suppress output except warnings and errors.')) parser.add_argument( '-h', '--help', action=HelpAction, nargs=0, default=self, # tricky help=_("Show this help message and exit.")) parser.add_argument( '-r', '--retries', metavar="NUM", type=check_non_negative_int, default=0, help=_("How many times the request to the Neutron server should " "be retried if it fails.")) # FIXME(bklei): this method should come from keystoneauth1 self._append_global_identity_args(parser) return parser
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bash_completion(self): """Prints all of the commands and options for bash-completion."""
commands = set() options = set() for option, _action in self.parser._option_string_actions.items(): options.add(option) for _name, _command in self.command_manager: commands.add(_name) cmd_factory = _command.load() cmd = cmd_factory(self, None) cmd_parser = cmd.get_parser('') for option, _action in cmd_parser._option_string_actions.items(): options.add(option) print(' '.join(commands | options))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, argv): """Equivalent to the main program for the application. :param argv: input arguments and options :paramtype argv: list of str """
try: index = 0 command_pos = -1 help_pos = -1 help_command_pos = -1 for arg in argv: if arg == 'bash-completion' and help_command_pos == -1: self._bash_completion() return 0 if arg in self.commands[self.api_version]: if command_pos == -1: command_pos = index elif arg in ('-h', '--help'): if help_pos == -1: help_pos = index elif arg == 'help': if help_command_pos == -1: help_command_pos = index index = index + 1 if command_pos > -1 and help_pos > command_pos: argv = ['help', argv[command_pos]] if help_command_pos > -1 and command_pos == -1: argv[help_command_pos] = '--help' self.options, remainder = self.parser.parse_known_args(argv) self.configure_logging() self.interactive_mode = not remainder self.initialize_app(remainder) except Exception as err: if self.options.verbose_level >= self.DEBUG_LEVEL: self.log.exception(err) raise else: self.log.error(err) return 1 if self.interactive_mode: _argv = [sys.argv[0]] sys.argv = _argv return self.interact() return self.run_subcommand(remainder)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authenticate_user(self): """Confirm user authentication Make sure the user has provided all of the authentication info we need. """
cloud_config = os_client_config.OpenStackConfig().get_one_cloud( cloud=self.options.os_cloud, argparse=self.options, network_api_version=self.api_version, verify=not self.options.insecure) verify, cert = cloud_config.get_requests_verify_args() # TODO(singhj): Remove dependancy on HTTPClient # for the case of token-endpoint authentication # When using token-endpoint authentication legacy # HTTPClient will be used, otherwise SessionClient # will be used. if self.options.os_token and self.options.os_url: auth = None auth_session = None else: auth = cloud_config.get_auth() auth_session = session.Session( auth=auth, verify=verify, cert=cert, timeout=self.options.http_timeout) interface = self.options.os_endpoint_type or self.endpoint_type if interface.endswith('URL'): interface = interface[:-3] self.client_manager = clientmanager.ClientManager( retries=self.options.retries, raise_errors=False, session=auth_session, url=self.options.os_url, token=self.options.os_token, region_name=cloud_config.get_region_name(), api_version=cloud_config.get_api_version('network'), service_type=cloud_config.get_service_type('network'), service_name=cloud_config.get_service_name('network'), endpoint_type=interface, auth=auth, insecure=not verify, log_credentials=True) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure_logging(self): """Create logging handlers for any log output."""
root_logger = logging.getLogger('') # Set up logging to a file root_logger.setLevel(logging.DEBUG) # Send higher-level messages to the console via stderr console = logging.StreamHandler(self.stderr) console_level = {self.WARNING_LEVEL: logging.WARNING, self.INFO_LEVEL: logging.INFO, self.DEBUG_LEVEL: logging.DEBUG, }.get(self.options.verbose_level, logging.DEBUG) # The default log level is INFO, in this situation, set the # log level of the console to WARNING, to avoid displaying # useless messages. This equals using "--quiet" if console_level == logging.INFO: console.setLevel(logging.WARNING) else: console.setLevel(console_level) if logging.DEBUG == console_level: formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT) else: formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT) logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING) logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING) console.setFormatter(formatter) root_logger.addHandler(console) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_substring(words, position=0, _last_letter=''): """Finds max substring shared by all strings starting at position Args: words (list): list of unicode of all words to compare position (int): starting position in each word to begin analyzing for substring _last_letter (unicode): last common letter, only for use internally unless you really know what you are doing Returns: unicode: max str common to all words Examples: .. code-block:: Python 'aaa' 'bbb' '' """
# If end of word is reached, begin reconstructing the substring try: letter = [word[position] for word in words] except IndexError: return _last_letter # Recurse if position matches, else begin reconstructing the substring if all(l == letter[0] for l in letter) is True: _last_letter += max_substring(words, position=position + 1, _last_letter=letter[0]) return _last_letter else: return _last_letter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _map_generator(f, generator): """Apply ``f`` to the results of the given bi-directional generator. Unfortunately, generator comprehension (``f(x) for x in gen``) does not work for as expected for bi-directional generators. It won't send exceptions and results back. This function implements a map function for generators that sends values and exceptions back and forth as expected. """
item = next(generator) while True: try: result = yield f(item) except Exception: item = generator.throw(*sys.exc_info()) else: item = generator.send(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compile_markdown_file(source_file): '''Compiles a single markdown file to a remark.js slideshow.''' template = pkg_resources.resource_string('md2remark.resources.templates', 'slideshow.mustache') renderer = pystache.Renderer(search_dirs='./templates') f = open(source_file, 'r') slideshow_md = f.read() f.close() slideshow_name = os.path.split(source_file)[1].split('.')[0] rendered_text = renderer.render(template, {'title': slideshow_name, 'slideshow': slideshow_md}) if not os.path.exists('md2remark_build'): os.makedirs('md2remark_build') f = open(os.path.join('md2remark_build', slideshow_name + '.html'), 'w') f.write(rendered_text) f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compile_slides(source): '''Compiles the source to a remark.js slideshow.''' # if it's a directory, do all md files. if os.path.isdir(source): for f in os.listdir(source): if f.lower().endswith('.md'): compile_markdown_file(os.path.join(source, f)) else: compile_markdown_file(source)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_cl_args(arg_vector): '''Parses the command line arguments''' parser = argparse.ArgumentParser(description='Compiles markdown files into html files for remark.js') parser.add_argument('source', metavar='source', help='the source to compile. If a directory is provided, all markdown files in that directory are compiled. Output is saved in the current working directory under a md2remark_build subdirectory.') return parser.parse_args(arg_vector)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_filter_qobj(self, keys=None): """ Return a copy of this Query object with additional where clauses for the keys in the argument """
# only care about columns in aggregates right? cols = set() for agg in self.select.aggregates: cols.update(agg.cols) sels = [SelectExpr(col, [col], col, None) for col in cols] select = Select(sels) where = list(self.where) if keys: keys = list(keys) keys = map(sqlize, list(keys)) expr = self.select.nonaggs[0].expr clause = [] if None in keys: clause.append("%s is null" % expr) if len([k for k in keys if k is not None]) > 0: clause.append("%s in %%s" % expr) clause = " or ".join(clause) where.append(clause) else: where.append( '%s = %%s' % (self.select.nonaggs[0].expr ) ) q = Query(self.db, select, self.fr, where) return q
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def total_build_duration_for_chain(self, build_chain_id): """Returns the total duration for one specific build chain run"""
return sum([ int(self.__build_duration_for_id(id)) for id in self.__build_ids_of_chain(build_chain_id) ])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_cycle_time(self, build_id): """Returns a BuildCycleTime object for the given build"""
json_form = self.__retrieve_as_json(self.builds_path % build_id) return BuildCycleTime( build_id, json_form[u'buildTypeId'], as_date(json_form, u'startDate'), (as_date(json_form, u'finishDate') - as_date(json_form, u'queuedDate')).seconds * 1000 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_stats_for_chain(self, build_chain_id): """Returns a list of Build tuples for all elements in the build chain. This method allows insight into the runtime of each configuratio inside the build chain. """
json_form = self.__retrieve_as_json(self.build_chain_path % build_chain_id) builds = [{'build_id': build[u'id'], 'configuration_id': build[u'buildTypeId']} for build in json_form[u'build']] return [ BuildStat( build['build_id'], build['configuration_id'], self.__build_duration_for_id(build['build_id']), self.__build_start_date_for_id(build['build_id'])) for build in builds ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quadratic_2d(data): """ Compute the quadratic estimate of the centroid in a 2d-array. Args: data (2darray): two dimensional data array Returns center (tuple): centroid estimate on the row and column directions, respectively """
arg_data_max = np.argmax(data) i, j = np.unravel_index(arg_data_max, data.shape) z_ = data[i-1:i+2, j-1:j+2] # our quadratic function is defined as # f(x, y | a, b, c, d, e, f) := a + b * x + c * y + d * x^2 + e * xy + f * y^2 # therefore, the best fit coeffiecients are given as # note that they are unique and the uncertainty in each of them (#TODO) can be # computed following the derivations done by Vakili & Hogg (2016) and # Teague & Foreman-Mackey (2018) try: a = (-z_[0,0] + 2*z_[0,1] - z_[0,2] + 2*z_[1,0] + 5*z_[1,1] + 2*z_[1,2] - z_[2,0] + 2*z_[2,1] - z_[2,2]) / 9 b = (-z_[0,0] - z_[0,1] - z_[0,2] + z_[2,0] + z_[2,1] + z_[2,2]) / 6 c = (-z_[0,0] + z_[0,2] - z_[1,0] + z_[1,2] - z_[2,0] + z_[2,2]) / 6 d = (z_[0,0] + z_[0,1] + z_[0,2] - z_[1,0]*2 - z_[1,1]*2 - z_[1,2]*2 + z_[2,0] + z_[2,1] + z_[2,2])/6 e = (z_[0,0] - z_[0,2] - z_[2,0] + z_[2,2]) * .25 f = (z_[0,0] - 2 * z_[0,1] + z_[0,2] + z_[1,0] - 2 * z_[1,1] + z_[1,2] + z_[2,0] - 2 * z_[2,1] + z_[2,2]) / 6 except IndexError: return (i, j) # see https://en.wikipedia.org/wiki/Quadratic_function det = 4 * d * f - e ** 2 xm = - (2 * f * b - c * e) / det ym = - (2 * d * c - b * e) / det return (i+xm, j+ym)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def depsOf_of_mirteFile_instance_definition(man, insts): """ Returns a function that returns the dependencies of an instance definition by its name, where insts is a dictionary of instance definitions from a mirteFile """
return lambda x: [a[1] for a in six.iteritems(insts[x]) if a[0] in [dn for dn, d in ( six.iteritems(man.modules[insts[x]['module']].deps) if 'module' in insts[x] else [])]]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def depsOf_of_mirteFile_module_definition(defs): """ Returns a function that returns the dependencies of a module definition by its name, where defs is a dictionary of module definitions from a mirteFile """
return lambda x: (list(filter(lambda z: z is not None and z in defs, map(lambda y: y[1].get('type'), six.iteritems(defs[x]['settings']) if 'settings' in defs[x] else [])))) + \ (list(defs[x]['inherits']) if 'inherits' in defs[x] else [])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _generate(self, size=None): "Generates a new word" corpus_letters = list(self.vectors.keys()) current_letter = random.choice(corpus_letters) if size is None: size = int(random.normalvariate(self.avg, self.std_dev)) letters = [current_letter] for _ in range(size): if current_letter not in corpus_letters: # current_letter = random.choice(corpus_letters) break found_letter = self.vectors[current_letter].choose() letters.append(found_letter) current_letter = found_letter return ''.join(letters)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _print_tasks(env, tasks, mark_active=False): """ Prints task information using io stream. `env` ``Environment`` object. `tasks` List of tuples (task_name, options, block_options). `mark_active` Set to ``True`` to mark active task. """
if env.task.active and mark_active: active_task = env.task.name else: active_task = None for task, options, blocks in tasks: # print heading invalid = False if task == active_task: method = 'success' else: if options is None and blocks is None: method = 'error' invalid = True else: method = 'write' opts = list(options or []) blks = list(blocks or []) write = getattr(env.io, method) write('~' * 80) write(' ' + task) write('~' * 80) env.io.write('') # non-block options if opts: for opt, values in opts: env.io.write(' {0}: {1}'.format(opt, ', '.join(str(v) for v in values))) env.io.write('') # block options if blks: had_options = False for block, options in blks: if options: had_options = True env.io.write(' {{ {0} }}'.format(block)) for opt, values in options: env.io.write(' {0}: {1}'.format(opt, ', '.join(str(v) for v in values))) env.io.write('') if not had_options: blks = None if not opts and not blks: if invalid: env.io.write(' Invalid task.') else: env.io.write(' Empty task.') env.io.write('')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _edit_task_config(env, task_config, confirm): """ Launches text editor to edit provided task configuration file. `env` Runtime ``Environment`` instance. `task_config` Path to task configuration file. `confirm` If task config is invalid after edit, prompt to re-edit. Return boolean. * Raises ``InvalidTaskConfig`` if edited task config fails to parse and `confirm` is ``False``. """
# get editor program if common.IS_MACOSX: def_editor = 'open' else: def_editor = 'vi' editor = os.environ.get('EDITOR', def_editor) def _edit_file(filename): """ Launches editor for given filename. """ proc = subprocess.Popen('{0} {1}'.format(editor, filename), shell=True) proc.communicate() if proc.returncode == 0: try: # parse temp configuration file parser_ = parser.parse_config(filename, 'task') registration.run_option_hooks(parser_, disable_missing=False) except (parser.ParseError, errors.InvalidTaskConfig) as exc: reason = unicode(getattr(exc, 'reason', exc)) raise errors.InvalidTaskConfig(task_config, reason=reason) return True else: return False try: # create temp copy of task config fd, tmpname = tempfile.mkstemp(suffix='.cfg', prefix='focus_') with open(task_config, 'r') as file_: os.write(fd, file_.read()) os.close(fd) while True: try: # launch editor if not _edit_file(tmpname): return False # overwrite original with temp with open(tmpname, 'r') as temp: with open(task_config, 'w', 0) as config: config.write(temp.read()) return True except errors.InvalidTaskConfig as exc: if not confirm: raise # reraise # prompt to re-edit env.io.error(unicode(exc)) while True: try: resp = env.io.prompt('Would you like to retry? (y/n) ') resp = resp.strip().lower() except KeyboardInterrupt: return True if resp == 'y': break elif resp == 'n': return True except OSError: return False finally: common.safe_remove_file(tmpname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self, env, args): """ Starts a new task. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """
# start the task if env.task.start(args.task_name): env.io.success(u'Task Loaded.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_parser(self, parser): """ Setup the argument parser. `parser` ``FocusArgParser`` object. """
parser.add_argument('task_name', help='task to create') parser.add_argument('clone_task', nargs='?', help='existing task to clone') parser.add_argument('--skip-edit', action='store_true', help='skip editing of task configuration')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self, env, args): """ Creates a new task. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """
task_name = args.task_name clone_task = args.clone_task if not env.task.create(task_name, clone_task): raise errors.FocusError(u'Could not create task "{0}"' .format(task_name)) # open in task config in editor if not args.skip_edit: task_config = env.task.get_config_path(task_name) if not _edit_task_config(env, task_config, confirm=True): raise errors.FocusError(u'Could not open task config: {0}' .format(task_config))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self, env, args): """ Edits task configuration. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """
task_name = args.task_name if not env.task.exists(task_name): raise errors.TaskNotFound(task_name) if env.task.active and task_name == env.task.name: raise errors.ActiveTask # open in task config in editor task_config = env.task.get_config_path(task_name) if not _edit_task_config(env, task_config, confirm=True): raise errors.FocusError(u'Could not open task config: {0}' .format(task_config))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self, env, args): """ Lists all valid tasks. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """
tasks = env.task.get_list_info() if not tasks: env.io.write("No tasks found.") else: if args.verbose: _print_tasks(env, tasks, mark_active=True) else: if env.task.active: active_task = env.task.name else: active_task = None for task, options, blocks in tasks: if task == active_task: env.io.success(task + ' *') else: if options is None and blocks is None: env.io.error(task + ' ~') else: env.io.write(task)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def no_content_response(response): "Cautious assessment of the response body for no content." if not hasattr(response, '_container'): return True if response._container is None: return True if isinstance(response._container, (list, tuple)): if len(response._container) == 1 and not response._container[0]: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_template_file(file_name, context): """ Renders and overrides Jinja2 template files """
with open(file_name, 'r+') as f: template = Template(f.read()) output = template.render(context) f.seek(0) f.write(output) f.truncate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(name, output, font): """ Easily bootstrap an OS project to fool HR departments and pad your resume. """
# The path of the directory where the final files will end up in bootstrapped_directory = os.getcwd() + os.sep + name.lower().replace(' ', '-') + os.sep # Copy the template files to the target directory copy_tree(get_real_path(os.sep + 'my-cool-os-template'), bootstrapped_directory) # Create the necessary assembly mov instructions for printing out the output on boot start_byte = int('0xb8000', 16) instructions_list = [] for c in output: char_as_hex = '0x02'+ c.encode('hex') instructions_list.append('\tmov word [{0}], {1} ; {2}'.format(hex(start_byte), char_as_hex, c)) start_byte += 2 # Render the ASCII banner to be displayed in the README (A must for any serious hobby OS project!) banner = Figlet(font=font).renderText(name) render_template_file(bootstrapped_directory + 'README.md', {'name' : name, 'banner' : banner}) render_template_file(bootstrapped_directory + 'grub.cfg' , {'name' : name}) render_template_file(bootstrapped_directory + 'boot.asm' , {'instructions_list' : instructions_list}) print('finished bootstrapping project into directory ' + bootstrapped_directory)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dbfreader(f): """Returns an iterator over records in a Xbase DBF file. The first row returned contains the field names. The second row contains field specs: (type, size, decimal places). Subsequent rows contain the data records. If a record is marked as deleted, it is skipped. File should be opened for binary reads. """
# See DBF format spec at: # http://www.pgts.com.au/download/public/xbase.htm#DBF_STRUCT numrec, lenheader = struct.unpack('<xxxxLH22x', f.read(32)) numfields = (lenheader - 33) // 32 fields = [] for fieldno in xrange(numfields): name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32)) name = name.replace('\0', '') # eliminate NULs from string fields.append((name, typ, size, deci)) yield [field[0] for field in fields] yield [tuple(field[1:]) for field in fields] # replacing missing values with np.NaN. trade-off to make integers as # floats. See # http://stackoverflow.com/questions/11548005/numpy-or-pandas-keeping-array-type-as-integer-while-having-a-nan-value # The limitation is not solved it seems. (Numpy). terminator = f.read(1) assert terminator == '\r' fields.insert(0, ('DeletionFlag', 'C', 1, 0)) fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in fields]) fmtsiz = struct.calcsize(fmt) for i in xrange(numrec): record = struct.unpack(fmt, f.read(fmtsiz)) if record[0] != ' ': continue # deleted record result = [] for (name, typ, size, deci), value in itertools.izip(fields, record): if name == 'DeletionFlag': continue if typ == "N": value = value.replace('\0', '').lstrip() if value == '': # value = 0 value = np.NaN # 0 is a value. elif deci: value = float(value) # value = decimal.Decimal(value) Not necessary. else: value = int(value) elif typ == 'D': y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8]) value = datetime.date(y, m, d) elif typ == 'L': value = ((value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?') elif typ == 'F': # Can this type not be null? value = float(value) result.append(value) yield result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dbf_asdict(fn, usecols=None, keystyle='ints'): """Return data from dbf file fn as a dict. fn: str The filename string. usecols: seqence The columns to use, 0-based. keystyle: str 'ints' or 'names' accepted. Should be 'ints' (default) when this function is given to a ChannelPack as loadfunc. If 'names' is used, keys will be the field names from the dbf file. """
if keystyle not in ['ints', 'names']: raise ValueError('Unknown keyword: ' + str(keystyle)) with open(fn, 'rb') as fo: rit = dbfreader(fo) names = rit.next() specs = rit.next() # NOQA R = [tuple(r) for r in rit] def getkey(i): if keystyle == 'ints': return i else: return names[i] R = zip(*R) d = dict() for i in usecols or range(len(names)): # d[getkey(i)] = R['f' + str(i)] # Default numpy fieldname d[getkey(i)] = np.array(R[i]) return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self): """ Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise. """
for character, followers in self.items(): for follower in followers: if follower not in self: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_word(self, length, prefix=0, start=False, end=False, flatten=False): """ Generate a random word of length from this table. :param length: the length of the generated word; >= 1; :param prefix: if greater than 0, the maximum length of the prefix to consider to choose the next character; :param start: if True, the generated word starts as a word of table; :param end: if True, the generated word ends as a word of table; :param flatten: whether or not consider the table as flattened; :return: a random word of length generated from table. :raises GenerationError: if no word of length can be generated. """
if start: word = ">" length += 1 return self._extend_word(word, length, prefix=prefix, end=end, flatten=flatten)[1:] else: first_letters = list(k for k in self if len(k) == 1 and k != ">") while True: word = random.choice(first_letters) try: word = self._extend_word(word, length, prefix=prefix, end=end, flatten=flatten) return word except GenerationError: first_letters.remove(word[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extend_word(self, word, length, prefix=0, end=False, flatten=False): """ Extend the given word with a random suffix up to length. :param length: the length of the extended word; >= len(word); :param prefix: if greater than 0, the maximum length of the prefix to consider to choose the next character; :param end: if True, the generated word ends as a word of table; :param flatten: whether or not consider the table as flattened; :return: a random word of length generated from table, extending word. :raises GenerationError: if the generated word cannot be extended to length. """
if len(word) == length: if end and "<" not in self[word[-1]]: raise GenerationError(word + " cannot be extended") else: return word else: # len(word) < length exclude = {"<"} while True: choices = self.weighted_choices(word[-prefix if prefix > 0 else 0:], exclude=exclude, flatten=flatten) if not choices: raise GenerationError(word + " cannot be extended") # Extend with the weighted choice character = random_weighted_choice(choices) word += character try: word = self._extend_word(word, length, prefix=prefix, end=end, flatten=flatten) return word except GenerationError: exclude.add(character) word = word[:-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_module(module_path): """ Try to import and return the given module, if it exists, None if it doesn't exist :raises ImportError: When imported module contains errors """
if six.PY2: try: return importlib.import_module(module_path) except ImportError: tb = sys.exc_info()[2] stack = traceback.extract_tb(tb, 3) if len(stack) > 2: raise else: from importlib import find_loader if find_loader(module_path): return importlib.import_module(module_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_password(length, chars=string.letters + string.digits + '#$%&!'): """ Generate and return a random password :param length: Desired length :param chars: Character set to use """
return get_random_string(length, chars)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __load_section(self, section_key): """ Reads the set of article links for a section if they are not cached. """
if self._sections[section_key] is not None: return articles = [] for page in count(1): if page > 50: raise Exception('Last page detection is probably broken') url = '{domain}{section}&iMenuID=1&iSubMenuID={page}'.format( domain = DOMAIN, section = SECTIONS[section_key], page = page ) body = self._session.get(url).content # This is a very hacky way of detecting the last page # that will probably break again in the future if "알수 없는 주소" in body: # "Unknown Address" break # Parse out all the article links root = html.fromstring(body) title_lines = root.find_class('ListNewsLineTitle') for title_line in title_lines: title_link = title_line.find('a') # The links do a JS open in a new window, so we need to parse # it out using this ugly, brittle junk href = title_link.get('href') match = re.match("javascript:article_open\('(.+)'\)", href) if not match: raise Exception("The site's link format has changed and is not compatible") path = match.group(1).decode('string_escape') articles.append(Article( self._session, title_link.text_content().strip(), DOMAIN + '/en/' + path )) self._sections[section_key] = articles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __load(self): """ Loads text and photos if they are not cached. """
if self._text is not None: return body = self._session.get(self.url).content root = html.fromstring(body) self._text = "\n".join(( p_tag.text_content() for p_tag in root.findall('.//p[@class="ArticleContent"]') if 'justify' in p_tag.get('style', '') )) # TODO fix this self._photos = []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_whitelisted(self, req): """Return True if role is whitelisted or roles cannot be determined."""
if not self.roles_whitelist: return False if not hasattr(req, 'context'): self.log.info("No context found.") return False if not hasattr(req.context, 'roles'): self.log.info("No roles found in context") return False roles = req.context.roles self.log.debug("Received request from user with roles: %s", ' '.join(roles)) for key in self.roles_whitelist: if key in roles: self.log.debug("User role (%s) is whitelisted.", key) return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_topic(request, forum_slug=None): """ Adds a topic to a given forum """
forum = Forum.objects.get(slug=forum_slug) form = AddTopicForm(request.POST or None, request.FILES or None, initial={'forum': forum}) current_time = time.time() user = request.user if form.is_valid(): instance = form.save(commit=False) instance.forum = forum instance.name = strip_tags(instance.name) instance.slug = slugify(instance.name) instance.user = user instance.author = user.display_name instance.lastpost_author = user.display_name instance.created_int = current_time instance.modified_int = current_time instance.save() # and now add the child post post = Post( topic = instance, text = request.POST['text'], user = user, post_date_int = current_time ) if request.FILES: post.image = request.FILES['image'] post.save() return HttpResponseRedirect("/forum/%s/?new_topic=%s" % (forum_slug, instance.id)) return render(request, 'fretboard/add_edit.html', { 'form': form, 'form_title': 'Add a topic', 'FORUM_BASE_NAME': FORUM_BASE_NAME })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_post(request, t_slug, t_id, p_id = False): # topic slug, topic id, post id """ Creates a new post and attaches it to a topic """
topic = get_object_or_404(Topic, id=t_id) topic_url = '{0}page{1}/'.format(topic.get_short_url(), topic.page_count) user = request.user current_time = time.time() form_title = 'Add a post' if topic.is_locked: # If we mistakenly allowed reply on locked topic, bail with error msg. messages.error(request, 'Sorry, but this topic is closed') return HttpResponseRedirect(topic_url) q = None if p_id: # if there's a post id, it's a quote q = Post.objects.get(id=p_id) form_title = "Respond to post" form = PostForm(request.POST or None, request.FILES or None) if form.is_valid(): # we're going to save this inital data now, # rather than on the model save() # because we only want to bother with this stuff one time # and it will never update or change. instance = form.save(commit=False) instance.topic = topic instance.user = user instance.author_name = user.display_name instance.avatar = user.avatar instance.post_date_int = current_time instance.quote = q instance.save() update_post_relations(user, topic) return HttpResponseRedirect('%s?new_post=%s#post-%s' % (topic_url, t_id, instance.id)) return render(request, 'fretboard/add_edit.html', { 'form': form, 'form_title': form_title, 'quote': q, 'FORUM_BASE_NAME': FORUM_BASE_NAME })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edit_post(request, post_id): """ Allows user to edit an existing post. This needs to be rewritten. Badly. """
post = get_object_or_404(Post, id=post_id) user = request.user topic = post.topic # oughta build a get_absolute_url method for this, maybe. post_url = '{0}page{1}/#post{2}'.format(topic.get_short_url(), topic.page_count, post.id) if topic.is_locked: messages.error(request, 'Sorry, but this topic is closed') return HttpResponseRedirect(post_url) if user.is_staff is False and user.id != post.author.id: messages.error(request, "Sorry, but you can't edit this post.") return HttpResponseRedirect(post_url) if request.POST and len(request.POST['text']) > 1: if request.is_ajax and 'body' in request.POST: # AJAX REQUEST post.text = request.POST['body'] post.save(update_fields=['text', 'text_formatted']) return HttpResponse(str(post.text)) post.text = request.POST['text'] post.save(update_fields=['text', 'text_formatted']) if 'name' in request.POST: # updated topic topic.name = request.POST['name'] topic.save(update_fields=['name']) return HttpResponseRedirect(post_url) # this is a get request else: if post == topic.post_set.all()[0]: form = AddTopicForm(instance=topic, initial={'text': post.text}) else: form = PostForm(instance=post) return render(request, 'fretboard/add_edit.html', { 'quote': post.quote, 'form' : form, 'form_title': 'Edit post', })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_post(request, post_id, topic_id): """ Deletes a post, if the user has correct permissions. Also updates topic.post_count """
try: topic = Topic.objects.get(id=topic_id) post = Post.objects.get(id=post_id) except: messages.error(request, 'Sorry, but this post can not be found. It may have been deleted already.') raise Http404 return_url = "/forum/%s/%s/%s/" % (topic.forum.slug, topic.slug, topic_id) if request.user.is_authenticated() and (request.user.is_staff or request.user.id == post.author.id): post.delete() update_post_relations(request.user, topic, deleting=True) topic_posts = topic.post_set.count() pmax = (topic_posts / PAGINATE_BY) + 1 # if no posts are left, delete topic. if topic_posts == 0: topic.delete() return HttpResponseRedirect("/forum/%s/" % topic.forum.slug) return HttpResponseRedirect("%spage%s/" % (return_url, pmax)) else: raise Http404
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """
msg = self.format(record) + '\000' """ We need to convert record level to lowercase, maybe this will change in the future. """ prio = '<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname)) prio = prio.encode('utf-8') # Message is a string. Convert to bytes as required by RFC 5424. msg = msg.encode('utf-8') if codecs: msg = codecs.BOM_UTF8 + msg msg = prio + msg try: if self.unixsocket: try: self.socket.send(msg) except socket.error: self._connect_unixsocket(self.address) self.socket.send(msg) elif self.socktype == socket.SOCK_DGRAM: self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jackknife_loop(func, data, d=1, combolimit=int(1e6)): """Generic Jackknife Subsampling procedure func : function A function pointer to a python function that - accept an <Observations x Features> matrix as input variable, and - returns an array/list or scalar value as estimate, metric, model parameter, jackknife replicate, etc. data : ndarray A <Observations x Features> numpy array d : int The number of observations to leave out for each Jackknife subsample, i.e. the subsample size is N-d. (The default is d=1 for the "Delete-1 Jackknife" procedure.) combolimit : int Maximum numbers of subsamples for binocoeff(N,d) combinations. (Default combolimit=1e6) Notes: ------ Be aware that binom(N,d) can quickly exceed your computer's capabilities. The "Delete-d Jackknife" approaches are reasonable for small sample sizes, e.g. N=50 and d=3 result in 19600 subsamples to compute. Returns: -------- theta_subs : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for each subsample. It is a <C x M> matrix, i.e. C=binocoeff(N,d) subsamples, and M parameters that are returned by the model. theta_full : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for the full sample. It is a <1 x M> vecotr with the M parameters that are returned by the model. Example: -------- import numpy as np import oxyba as ox from sklearn.datasets import load_boston def myfunc(data): import oxyba as ox return ox.linreg_ols_lu( data[:,0], data[:,1:] ) tmp = load_boston() y = tmp.target X = tmp.data[:,[5,12]] theta_subs, theta_full = ox.jackknife_loop(myfunc, np.c_[y, X], d=1) """
# load modules import scipy.special import warnings import itertools import numpy as np # How many observations contains data? N = data.shape[0] # throw a warning! numcombos = scipy.special.comb(N, d, exact=True) # binocoeff if numcombos > 1e5: warnings.warn(( "N={0:d} and d={1:d} result in {2:d} " "combinations to compute").format(N, d, numcombos)) if numcombos > combolimit: raise Exception("Number of combinations exceeds 'combolimit'.") # list of tuples that contain all combinations of # row indicies to leave out leaveout = list(itertools.combinations(range(N), d)) # store all metrics, estimates, model parameters # as list/array or scalar in one list theta_subsample = [] # loop over all combinations idx = np.arange(0, N) for c in range(numcombos): # create true/false index for the c-th subsample # i.e. all true except the d leaveout indicies subidx = np.isin(idx, leaveout[c], assume_unique=True, invert=True) # compute metrics and store them theta_subsample.append(func(data[subidx, :])) # compute metrics on the full sample theta_fullsample = func(data) # done return np.array(theta_subsample), np.array(theta_fullsample)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_zone(server, token, domain, identifier, dtype, master=None): """Create zone records. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name identifier: Template ID dtype: MASTER|SLAVE|NATIVE (default: MASTER) master: master server ip address when dtype is SLAVE (default: None) ContentType: application/json x-authentication-token: token """
method = 'PUT' uri = 'https://' + server + '/zone' obj = JSONConverter(domain) obj.generate_zone(domain, identifier, dtype, master) connect.tonicdns_client(uri, method, token, obj.zone)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_records(server, token, domain, data): """Create records of specific domain. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name data: Create records ContentType: application/json x-authentication-token: token """
method = 'PUT' uri = 'https://' + server + '/zone/' + domain for i in data: connect.tonicdns_client(uri, method, token, i)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_records(server, token, data): """Delete records of specific domain. Arguments: server: TonicDNS API server token: TonicDNS API authentication token data: Delete records ContentType: application/json x-authentication-token: token """
method = 'DELETE' uri = 'https://' + server + '/zone' for i in data: connect.tonicdns_client(uri, method, token, i)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_zone(server, token, domain, keyword='', raw_flag=False): """Retrieve zone records. Argument: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name keyword: Search keyword x-authentication-token: token """
method = 'GET' uri = 'https://' + server + '/zone/' + domain data = connect.tonicdns_client(uri, method, token, data=False, keyword=keyword, raw_flag=raw_flag) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_zone(server, token, domain): """Delete specific zone. Argument: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name x-authentication-token: token """
method = 'DELETE' uri = 'https://' + server + '/zone/' + domain connect.tonicdns_client(uri, method, token, data=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_template(server, token, identifier, template): """Create template. Argument: server: TonicDNS API server token: TonicDNS API authentication token identifier: Template identifier template: Create template datas ContentType: application/json x-authentication-token: token """
method = 'PUT' uri = 'https://' + server + '/template/' + identifier connect.tonicdns_client(uri, method, token, data=template)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_templates(server, token): """Retrieve all templates. Argument: server: TonicDNS API server token: TonicDNS API authentication token x-authentication-token: token """
method = 'GET' uri = 'https://' + server + '/template' connect.tonicdns_client(uri, method, token, data=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_soa_serial(server, token, soa_content): """Update SOA serial Argument: server: TonicDNS API server token: TonicDNS API authentication token soa_content: SOA record data x-authentication-token: token Get SOA record `cur_soa` is current SOA record. `new_soa` is incremental serial SOA record. """
method = 'GET' uri = 'https://' + server + '/zone/' + soa_content.get('domain') cur_soa, new_soa = connect.tonicdns_client( uri, method, token, data=False, keyword='serial', content=soa_content) # set JSON domain = soa_content.get('domain') cur_o = JSONConverter(domain) new_o = JSONConverter(domain) cur_o.records = [cur_soa] new_o.records = [new_soa] cur_o.generata_data(False) new_o.generata_data(True) # Create new SOA record uri = 'https://' + server + '/zone/' + domain method = 'PUT' connect.tonicdns_client(uri, method, token, new_o.dict_records[0]) # Delete current SOA record why zone has only one SOA record. method = 'DELETE' uri = 'https://' + server + '/zone' connect.tonicdns_client(uri, method, token, cur_o.dict_records[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decompose_seconds_in_day(seconds): """Decomposes seconds in day into hour, minute and second components. Arguments --------- seconds : int A time of day by the number of seconds passed since midnight. Returns ------- hour : int The hour component of the given time of day. minut : int The minute component of the given time of day. second : int The second component of the given time of day. """
if seconds > SECONDS_IN_DAY: seconds = seconds - SECONDS_IN_DAY if seconds < 0: raise ValueError("seconds param must be non-negative!") hour = int(seconds / 3600) leftover = seconds - hour * 3600 minute = int(leftover / 60) second = leftover - minute * 60 return hour, minute, second
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def seconds_in_day_to_time(seconds): """Decomposes atime of day into hour, minute and seconds components. Arguments --------- seconds : int A time of day by the number of seconds passed since midnight. Returns ------- datetime.time The corresponding time of day as a datetime.time object. Example ------- datetime.time(6, 30, 30) """
try: return time(*decompose_seconds_in_day(seconds)) except ValueError: print("Seconds = {}".format(seconds)) print("H = {}, M={}, S={}".format(*decompose_seconds_in_day(seconds))) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _as_rdf_xml(self, ns): """ Return identity details for the element as XML nodes """
self.rdf_identity = self._get_identity(ns) elements = [] elements.append(ET.Element(NS('sbol', 'persistentIdentity'), attrib={NS('rdf', 'resource'): self._get_persistent_identitity(ns)})) if self.name is not None: name = ET.Element(NS('dcterms', 'title')) name.text = self.name elements.append(name) if self.display_id is not None: display_id = ET.Element(NS('sbol', 'displayId')) display_id.text = self.display_id elements.append(display_id) if self.version is not None: version = ET.Element(NS('sbol', 'version')) version.text = self.version elements.append(version) if self.was_derived_from is not None: elements.append(ET.Element(NS('prov', 'wasDerivedFrom'), attrib={NS('rdf', 'resource'): self.was_derived_from})) if self.description is not None: description = ET.Element(NS('dcterms', 'description')) description.text = self.description elements.append(description) for a in self.annotations: elements.append(a._as_rdf_xml(ns)) return elements
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_requirement_files(args=None): """ Get the "best" requirements file we can find """
if args and args.input_filename: return [args.input_filename] paths = [] for regex in settings.REQUIREMENTS_SOURCE_GLOBS: paths.extend(glob.glob(regex)) return paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_domains(self): """ Return all domains. Domain is a key, so group by them """
self.connect() results = self.server.list_domains(self.session_id) return {i['domain']: i['subdomains'] for i in results}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_websites(self): """ Return all websites, name is not a key """
self.connect() results = self.server.list_websites(self.session_id) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def website_exists(self, website, websites=None): """ Look for websites matching the one passed """
if websites is None: websites = self.list_websites() if isinstance(website, str): website = {"name": website} ignored_fields = ('id',) # changes in these fields are ignored results = [] for other in websites: different = False for key in website: if key in ignored_fields: continue if other.get(key) != website.get(key): different = True break if different is False: results.append(other) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_month_namedays(self, month=None): """Return names as a tuple based on given month. If no month given, use current one"""
if month is None: month = datetime.now().month return self.NAMEDAYS[month-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_moderator_permissions(self, request): """ Find if user have global or per object permission firstly on category instance, if not then on thread instance """
return any(request.user.has_perm(perm) for perm in self.permission_required)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getvar(key, default=None, template='OPENSHIFT_{key}'): """ Get OPENSHIFT envvar """
return os.environ.get(template.format(key=key), default)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(parent, idx, value): """Add a value to a dict."""
if isinstance(parent, dict): if idx in parent: raise JSONPatchError("Item already exists") parent[idx] = value elif isinstance(parent, list): if idx == "" or idx == "~": parent.append(value) else: parent.insert(int(idx), value) else: raise JSONPathError("Invalid path for operation")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove(parent, idx): """Remove a value from a dict."""
if isinstance(parent, dict): del parent[idx] elif isinstance(parent, list): del parent[int(idx)] else: raise JSONPathError("Invalid path for operation")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(parent, idx, value, check_value=_NO_VAL): """Replace a value in a dict."""
if isinstance(parent, dict): if idx not in parent: raise JSONPatchError("Item does not exist") elif isinstance(parent, list): idx = int(idx) if idx < 0 or idx >= len(parent): raise JSONPatchError("List index out of range") if check_value is not _NO_VAL: if parent[idx] != check_value: raise JSONPatchError("Check value did not pass") parent[idx] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(parent, idx, value): """Merge a value."""
target = get_child(parent, idx) for key, val in value.items(): target[key] = val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(src_parent, src_idx, dest_parent, dest_idx): """Copy an item."""
if isinstance(dest_parent, list): dest_idx = int(dest_idx) dest_parent[dest_idx] = get_child(src_parent, src_idx)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move(src_parent, src_idx, dest_parent, dest_idx): """Move an item."""
copy(src_parent, src_idx, dest_parent, dest_idx) remove(src_parent, src_idx)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_remove(parent, idx, value): """Remove an item from a list."""
lst = get_child(parent, idx) if value in lst: lst.remove(value)