text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def select(self, name): ''' Returns a new PluginSet that has only the plugins in this that are named `name`. ''' return PluginSet(self.group, name, [ plug for plug in self.plugins if plug.name == name])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def browse_home_listpage_url(self, state=None, county=None, zipcode=None, street=None, **kwargs): """ Construct an url of home list page by state, county, zipcode, street. Example: - https://www.zillow.com/browse/homes/ca/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ """
url = self.domain_browse_homes for item in [state, county, zipcode, street]: if item: url = url + "/%s" % item url = url + "/" return url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _render_bundle(bundle_name): """ Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES """
try: bundle = get_bundles()[bundle_name] except KeyError: raise ImproperlyConfigured("Bundle '%s' is not defined" % bundle_name) if bundle.use_bundle: return _render_file(bundle.bundle_type, bundle.get_url(), attrs=({'media':bundle.media} if bundle.media else {})) # Render files individually bundle_files = [] for bundle_file in bundle.files: if bundle_file.precompile_in_debug: bundle_files.append(_render_file(bundle_file.bundle_type, bundle_file.precompile_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) else: bundle_files.append(_render_file(bundle_file.file_type, bundle_file.file_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) return '\n'.join(bundle_files)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_string(self, string_representation, resource=None): """ Extracts resource data from the given string and converts them to a new resource or updates the given resource from it. """
stream = NativeIO(string_representation) return self.from_stream(stream, resource=resource)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_string(self, obj): """ Converts the given resource to a string representation and returns it. """
stream = NativeIO() self.to_stream(obj, stream) return text_(stream.getvalue(), encoding=self.encoding)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_from_bytes(self, byte_representation): """ Converts the given bytes representation to resource data. """
text = byte_representation.decode(self.encoding) return self.data_from_string(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_to_string(self, data_element): """ Converts the given data element into a string representation. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` :returns: string representation (using the MIME content type configured for this representer) """
stream = NativeIO() self.data_to_stream(data_element, stream) return stream.getvalue()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_from_resource_class(cls, resource_class): """ Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class. """
mp_reg = get_mapping_registry(cls.content_type) mp = mp_reg.find_or_create_mapping(resource_class) return cls(resource_class, mp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_from_stream(self, stream): """ Creates a data element reading a representation from the given stream. :returns: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` """
parser = self._make_representation_parser(stream, self.resource_class, self._mapping) return parser.run()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_to_stream(self, data_element, stream): """ Writes the given data element to the given stream. """
generator = \ self._make_representation_generator(stream, self.resource_class, self._mapping) generator.run(data_element)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resource_from_data(self, data_element, resource=None): """ Converts the given data element to a resource. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` """
return self._mapping.map_to_resource(data_element, resource=resource)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure(self, options=None, attribute_options=None): # pylint: disable=W0221 """ Configures the options and attribute options of the mapping associated with this representer with the given dictionaries. :param dict options: configuration options for the mapping associated with this representer. :param dict attribute_options: attribute options for the mapping associated with this representer. """
self._mapping.update(options=options, attribute_options=attribute_options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def with_updated_configuration(self, options=None, attribute_options=None): """ Returns a context in which this representer is updated with the given options and attribute options. """
return self._mapping.with_updated_configuration(options=options, attribute_options= attribute_options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def jsPath(path): '''Returns a relative path without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") return jsPath
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def jsName(path,name): '''Returns a name string without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") jsName=jsPath+'_slash_'+name return jsName
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getStartNodes(fdefs,calls): '''Return a list of nodes in fdefs that have no inbound edges''' s=[] for source in fdefs: for fn in fdefs[source]: inboundEdges=False for call in calls: if call.target==fn: inboundEdges=True if not inboundEdges: s.append(fn) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getChildren(current,calls,blacklist=[]): ''' Return a list of the children of current that are not in used. ''' return [c.target for c in calls if c.source==current and c.target not in blacklist]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def tagAttributes(fdef_master_list,node,depth=0): '''recursively tag objects with sizes, depths and path names ''' if type(node)==list: for i in node: depth+=1 tagAttributes(fdef_master_list,i,depth) if type(node)==dict: for x in fdef_master_list: if jsName(x.path,x.name)==node['name']: node['path']=x.path node['depth']=depth if "children" not in node: node["size"]=x.weight for i in node.values(): depth+=1 tagAttributes(fdef_master_list,i,depth) return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def tagAttributes_while(fdef_master_list,root): '''Tag each node under root with the appropriate depth. ''' depth = 0 current = root untagged_nodes = [root] while untagged_nodes: current = untagged_nodes.pop() for x in fdef_master_list: if jsName(x.path,x.name) == current['name']: current['path'] = x.path if children in current: for child in children: child["depth"] = depth untagged_nodes.append(child) if depth not in current: current["depth"] = depth depth += 1 return root
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def noEmptyNests(node): '''recursively make sure that no dictionaries inside node contain empty children lists ''' if type(node)==list: for i in node: noEmptyNests(i) if type(node)==dict: for i in node.values(): noEmptyNests(i) if node["children"] == []: node.pop("children") return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_old_tmp_files(profiles=None, max_lifetime=(7 * 24)): """ Removes old temp files that is older than expiration_hours. If profiles is None then will be use all profiles. """
assert isinstance(profiles, (list, tuple)) or profiles is None if profiles is None: profiles = dju_settings.DJU_IMG_UPLOAD_PROFILES.keys() profiles = set(('default',) + tuple(profiles)) total = removed = 0 old_dt = datetime.datetime.utcnow() - datetime.timedelta(hours=max_lifetime) for profile in profiles: conf = get_profile_configs(profile=profile) root_path = os.path.join(settings.MEDIA_ROOT, dju_settings.DJU_IMG_UPLOAD_SUBDIR, conf['PATH']) for file_path in get_files_recursive(root_path): m = re_tmp.match(os.path.basename(file_path)) if m is None: continue total += 1 fdt = dtstr_to_datetime(m.group('dtstr')) if fdt and old_dt > fdt: os.remove(file_path) removed += 1 return removed, total
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def next_task(self, item, **kwargs): """Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue. """
filename = os.path.basename(item) try: self.tx_importer.import_batch(filename=filename) except TransactionImporterError as e: raise TransactionsFileQueueError(e) from e else: self.archive(filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_public_comments_for_model(model): """ Get visible comments for the model. """
if not IS_INSTALLED: # No local comments, return empty queryset. # The project might be using DISQUS or Facebook comments instead. return CommentModelStub.objects.none() else: return CommentModel.objects.for_model(model).filter(is_public=True, is_removed=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_comments_are_open(instance): """ Check if comments are open for the instance """
if not IS_INSTALLED: return False try: # Get the moderator which is installed for this model. mod = moderator._registry[instance.__class__] except KeyError: # No moderator = no restrictions return True # Check the 'enable_field', 'auto_close_field' and 'close_after', # by reusing the basic Django policies. return CommentModerator.allow(mod, None, instance, None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_comments_are_moderated(instance): """ Check if comments are moderated for the instance """
if not IS_INSTALLED: return False try: # Get the moderator which is installed for this model. mod = moderator._registry[instance.__class__] except KeyError: # No moderator = no moderation return False # Check the 'auto_moderate_field', 'moderate_after', # by reusing the basic Django policies. return CommentModerator.moderate(mod, None, instance, None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_local_indices(shape, num_partitions, coordinate): """ calculate local indices, return start and stop index per dimension per process for local data field :param shape: global shape of data :param num_partitions: number of partition for each dimension (from MPI.Compute_dims()) :param coordinate: cartesian coordinate descriptor (from CARTESIAN_COMMUNICATOR.Get_coords(rank)) """
dimension = len(shape) # check matching of cartesian communicator and shape assert dimension == len(num_partitions) decomposed_shapes = [] # build shape list for every dimension for idx in range(dimension): local_shape = shape[idx] // num_partitions[idx] temp_shape_list = [] for _ in range(num_partitions[idx]): temp_shape_list.append(local_shape) # expand local partitions to match global shape for j in range(shape[idx] % num_partitions[idx]): temp_shape_list[j] += 1 # decomposed_shapes[dimension][partition] decomposed_shapes.append(temp_shape_list) # calculate indices for partitions indices = [] # TODO: redefine calculation -> first select and calculate for i in range(dimension): temp_index_list = [] start_idx = 0 end_idx = 0 for j in range(num_partitions[i]): end_idx = end_idx + decomposed_shapes[i][j] temp_index_list.append([start_idx, end_idx]) start_idx = end_idx indices.append(temp_index_list) start_index = [] stop_index = [] shape = [] # select partition, start and stop index for idx in range(dimension): start_index.append(indices[idx][coordinate[idx]][0]) stop_index.append(indices[idx][coordinate[idx]][1]) shape.append(decomposed_shapes[idx][coordinate[idx]]) shape = tuple(shape) start_index = tuple(start_index) stop_index = tuple(stop_index) return start_index, stop_index, shape
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_file(self, filename): """Read in file contents and set the current string."""
with open(filename, 'r') as sourcefile: self.set_string(sourcefile.read())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_string(self, string): """Set the working string and its length then reset positions."""
self.string = string self.length = len(string) self.reset_position()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_string(self, string): """Add to the working string and its length and reset eos."""
self.string += string self.length += len(string) self.eos = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_position(self): """Reset all current positions."""
self.pos = 0 self.col = 0 self.row = 1 self.eos = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_space(self, length=1, offset=0): """Returns boolean if self.pos + length < working string length."""
return self.pos + (length + offset) - 1 < self.length
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eol_distance_next(self, offset=0): """Return the amount of characters until the next newline."""
distance = 0 for char in self.string[self.pos + offset:]: if char == '\n': break else: distance += 1 return distance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eol_distance_last(self, offset=0): """Return the ammount of characters until the last newline."""
distance = 0 for char in reversed(self.string[:self.pos + offset]): if char == '\n': break else: distance += 1 return distance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spew_length(self, length): """Move current position backwards by length."""
pos = self.pos if not pos or length > pos: return None row = self.row for char in reversed(self.string[pos - length:pos]): pos -= 1 if char == '\n': # handle a newline char row -= 1 self.pos = pos self.col = self.eol_distance_last() self.row = row if self.has_space(): # Set eos if there is no more space left. self.eos = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eat_length(self, length): """Move current position forward by length and sets eos if needed."""
pos = self.pos if self.eos or pos + length > self.length: return None col = self.col row = self.row for char in self.string[pos:pos + length]: col += 1 pos += 1 if char == '\n': # handle a newline char col = 0 row += 1 self.pos = pos self.col = col self.row = row if not self.has_space(): # Set eos if there is no more space left. self.eos = 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eat_string(self, string): """Move current position by length of string and count lines by \n."""
pos = self.pos if self.eos or pos + len(string) > self.length: return None col = self.col row = self.row for char in string: col += 1 pos += 1 if char == '\n': # handle a newline char col = 0 row += 1 self.pos = pos self.col = col self.row = row if not self.has_space(): # Set eos if there is no more space left. self.eos = 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eat_line(self): """Move current position forward until the next line."""
if self.eos: return None eat_length = self.eat_length get_char = self.get_char has_space = self.has_space while has_space() and get_char() != '\n': eat_length(1) eat_length(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_char(self, offset=0): """Return the current character in the working string."""
if not self.has_space(offset=offset): return '' return self.string[self.pos + offset]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_length(self, length, trim=0, offset=0): """Return string at current position + length. If trim == true then get as much as possible before eos. """
if trim and not self.has_space(offset + length): return self.string[self.pos + offset:] elif self.has_space(offset + length): return self.string[self.pos + offset:self.pos + offset + length] else: return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_string(self, offset=0): """Return non space chars from current position until a whitespace."""
if not self.has_space(offset=offset): return '' # Get a char for each char in the current string from pos onward # solong as the char is not whitespace. string = self.string pos = self.pos + offset for i, char in enumerate(string[pos:]): if char.isspace(): return string[pos:pos + i] else: return string[pos:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rest_of_string(self, offset=0): """A copy of the current position till the end of the source string."""
if self.has_space(offset=offset): return self.string[self.pos + offset:] else: return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_current_line(self): """Return a SourceLine of the current line."""
if not self.has_space(): return None pos = self.pos - self.col string = self.string end = self.length output = [] while pos < len(string) and string[pos] != '\n': output.append(string[pos]) pos += 1 if pos == end: break else: output.append(string[pos]) if not output: return None return SourceLine(''.join(output), self.row)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_lines(self, first, last): """Return SourceLines for lines between and including first & last."""
line = 1 linestring = [] linestrings = [] for char in self.string: if line >= first and line <= last: linestring.append(char) if char == '\n': linestrings.append((''.join(linestring), line)) linestring = [] elif line > last: break if char == '\n': line += 1 if linestring: linestrings.append((''.join(linestring), line)) elif not linestrings: return None return [SourceLine(string, lineno) for string, lineno in linestrings]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_surrounding_lines(self, past=1, future=1): """Return the current line and x,y previous and future lines. Returns a list of SourceLine's. """
string = self.string pos = self.pos - self.col end = self.length row = self.row linesback = 0 while linesback > -past: if pos <= 0: break elif string[pos - 2] == '\n': linesback -= 1 pos -= 1 output = [] linestring = [] lines = future + 1 while linesback < lines: if pos >= end: linestring.append(string[pos - 1]) output.append( SourceLine(''.join(linestring[:-1]), row + linesback)) break elif string[pos] == '\n': linestring.append(string[pos]) pos += 1 output.append( SourceLine(''.join(linestring), row + linesback)) linesback += 1 linestring = [] linestring.append(string[pos]) pos += 1 return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_lines(self): """Return all lines of the SourceString as a list of SourceLine's."""
output = [] line = [] lineno = 1 for char in self.string: line.append(char) if char == '\n': output.append(SourceLine(''.join(line), lineno)) line = [] lineno += 1 if line: output.append(SourceLine(''.join(line), lineno)) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_string(self, string, word=0, offset=0): """Returns 1 if string can be matches against SourceString's current position. If word is >= 1 then it will only match string followed by whitepsace. """
if word: return self.get_string(offset) == string return self.get_length(len(string), offset) == string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_any_string(self, strings, word=0, offset=0): """Attempts to match each string in strings in order. Will return the string that matches or an empty string if no match. If word arg >= 1 then only match if string is followed by a whitespace which is much higher performance. If word is 0 then you should sort the strings argument yourself by length. """
if word: current = self.get_string(offset) return current if current in strings else '' current = '' currentlength = 0 length = 0 for string in strings: length = len(string) if length != currentlength: current = self.get_length(length, offset) if string == current: return string return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_any_char(self, chars, offset=0): """Match and return the current SourceString char if its in chars."""
if not self.has_space(offset=offset): return '' current = self.string[self.pos + offset] return current if current in chars else ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_function_pattern(self, first, rest=None, least=1, offset=0): """Match each char sequentially from current SourceString position until the pattern doesnt match and return all maches. Integer argument least defines and minimum amount of chars that can be matched. This version takes functions instead of string patterns. Each function must take one argument, a string, and return a value that can be evauluated as True or False. If rest is defined then first is used only to match the first arg and the rest of the chars are matched against rest. """
if not self.has_space(offset=offset): return '' firstchar = self.string[self.pos + offset] if not first(firstchar): return '' output = [firstchar] pattern = first if rest is None else rest for char in self.string[self.pos + offset + 1:]: if pattern(char): output.append(char) else: break if len(output) < least: return '' return ''.join(output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_indents_last_line(self, spacecount, tabs=0, back=5): """Finds the last meaningful line and returns its indent level. Back specifies the amount of lines to look back for a none whitespace line. """
if not self.has_space(): return 0 lines = self.get_surrounding_lines(back, 0) for line in reversed(lines): if not line.string.isspace(): return line.count_indents(spacecount, tabs) return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_indents_length_last_line(self, spacecount, tabs=0, back=5): """Finds the last meaningful line and returns its indent level and character length. Back specifies the amount of lines to look back for a none whitespace line. """
if not self.has_space(): return 0 lines = self.get_surrounding_lines(back, 0) for line in reversed(lines): if not line.string.isspace(): return line.count_indents_length(spacecount, tabs) return (0, 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def skip_whitespace(self, newlines=0): """Moves the position forwards to the next non newline space character. If newlines >= 1 include newlines as spaces. """
if newlines: while not self.eos: if self.get_char().isspace(): self.eat_length(1) else: break else: char = '' while not self.eos: char = self.get_char() if char.isspace() and char != '\n': self.eat_length(1) else: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_print(self, carrot=False): """Return a string of this line including linenumber. If carrot is True then a line is added under the string with a carrot under the current character position. """
lineno = self.lineno padding = 0 if lineno < 1000: padding = 1 if lineno < 100: padding = 2 if lineno < 10: padding = 3 string = str(lineno) + (' ' * padding) + '|' + self.string if carrot: string += '\n' + (' ' * (self.col + 5)) return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def safe_exit(output): """exit without breaking pipes."""
try: sys.stdout.write(output) sys.stdout.flush() except IOError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False): """returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers """
try: return main(endpoint, stype, selector, clean, raw, verbose) except StandardError as err: return err
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self, _file): """return local file contents as endpoint."""
with open(_file) as fh: data = fh.read() if self.verbose: sys.stdout.write("read %d bytes from %s\n" % (fh.tell(), _file)) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GET(self, url): """returns text content of HTTP GET response."""
r = requests.get(url) if self.verbose: sys.stdout.write("%s %s\n" % (r.status_code, r.encoding)) sys.stdout.write(str(r.headers) + "\n") self.encoding = r.encoding return r.text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select(self, html, stype, expression): """returns WHATWG spec HTML fragment from selector expression."""
etree = html5lib.parse(html, treebuilder='lxml', namespaceHTMLElements=False) if stype == 'css': selector = lxml.cssselect.CSSSelector(expression) frag = list(selector(etree)) else: frag = etree.xpath(expression) if not frag: raise RuntimeError("Nothing found for: %s" % expression) return "".join([lxml.etree.tostring(x) for x in frag])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean(self, html): """removes evil HTML per lxml.html.clean defaults."""
return lxml.html.clean.clean_html(unicode(html, self.encoding))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filesystem_repository(_context, name=None, make_default=False, aggregate_class=None, repository_class=None, directory=None, content_type=None): """ Directive for registering a file-system based repository. """
cnf = {} if not directory is None: cnf['directory'] = directory if not content_type is None: cnf['content_type'] = content_type _repository(_context, name, make_default, aggregate_class, repository_class, REPOSITORY_TYPES.FILE_SYSTEM, 'add_filesystem_repository', cnf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rdb_repository(_context, name=None, make_default=False, aggregate_class=None, repository_class=None, db_string=None, metadata_factory=None): """ Directive for registering a RDBM based repository. """
cnf = {} if not db_string is None: cnf['db_string'] = db_string if not metadata_factory is None: cnf['metadata_factory'] = metadata_factory _repository(_context, name, make_default, aggregate_class, repository_class, REPOSITORY_TYPES.RDB, 'add_rdb_repository', cnf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def messaging(_context, repository, reset_on_start=False): """ Directive for setting up the user message resource in the appropriate repository. :param str repository: The repository to create the user messages resource in. """
discriminator = ('messaging', repository) reg = get_current_registry() config = Configurator(reg, package=_context.package) _context.action(discriminator=discriminator, # pylint: disable=E1101 callable=config.setup_system_repository, args=(repository,), kw=dict(reset_on_start=reset_on_start))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter(self, dict, keep): """ Remove any keys not in 'keep' """
if not keep: return dict result = {} for key, value in dict.iteritems(): if key in keep: result[key] = value return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main( upload='usbasp', core='arduino', replace_existing=True, ): """install custom boards."""
def install(mcu, f_cpu, kbyte): board = AutoBunch() board.name = TEMPL_NAME.format(mcu=mcu, f_cpu=format_freq(f_cpu), upload=upload) board_id = TEMPL_ID.format(mcu=mcu, f_cpu=(f_cpu), upload=upload) board.upload.using = upload board.upload.maximum_size = kbyte * 1024 board.build.mcu = mcu board.build.f_cpu = str(f_cpu) + 'L' board.build.core = core # for 1.0 board.build.variant = 'standard' install_board(board_id, board, replace_existing=replace_existing) install('atmega8', 1000000, 8) install('atmega8', 8000000, 8) install('atmega8', 12000000, 8) install('atmega88', 1000000, 8) install('atmega88', 8000000, 8) install('atmega88', 12000000, 8) install('atmega88', 20000000, 8) install('atmega328p', 20000000, 32) install('atmega328p', 16000000, 32) install('atmega328p', 8000000, 32) install('atmega328p', 1000000, 32)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_county_estimate(self, table, variable, code, datum): """ Creates new estimate from a census series. Data has following signature from API: { 'B00001_001E': '5373', 'NAME': 'Anderson County, Texas', 'county': '001', 'state': '48' } """
try: division = Division.objects.get( code="{}{}".format(datum["state"], datum["county"]), level=self.COUNTY_LEVEL, ) CensusEstimate.objects.update_or_create( division=division, variable=variable, defaults={"estimate": datum[code] or 0}, ) except ObjectDoesNotExist: print("ERROR: {}, {}".format(datum["NAME"], datum["state"]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_district_estimates_by_state( self, api, table, variable, estimate, state ): """ Calls API for all districts in a state and a given estimate. """
state = Division.objects.get(level=self.STATE_LEVEL, code=state) district_data = api.get( ("NAME", estimate), { "for": "congressional district:*", "in": "state:{}".format(state.code), }, year=int(table.year), ) for datum in district_data: self.write_district_estimate(table, variable, estimate, datum)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_county_estimates_by_state( self, api, table, variable, estimate, state ): """ Calls API for all counties in a state and a given estimate. """
state = Division.objects.get(level=self.STATE_LEVEL, code=state) county_data = api.get( ("NAME", estimate), {"for": "county:*", "in": "state:{}".format(state.code)}, year=int(table.year), ) for datum in county_data: self.write_county_estimate(table, variable, estimate, datum)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_state_estimates_by_state( self, api, table, variable, estimate, state ): """ Calls API for a state and a given estimate. """
state = Division.objects.get(level=self.STATE_LEVEL, code=state) state_data = api.get( ("NAME", estimate), {"for": "state:{}".format(state.code)}, year=int(table.year), ) for datum in state_data: self.write_state_estimate(table, variable, estimate, datum)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aggregate_variable(estimate, id): """ Aggregate census table variables by a custom label. """
estimates = [ variable.estimates.get(division__id=id).estimate for variable in estimate.variable.label.variables.all() ] method = estimate.variable.label.aggregation if method == "s": aggregate = sum(estimates) elif method == "a": aggregate = statistics.mean(estimates) elif method == "m": aggregate = statistics.median(estimates) else: aggregate = None return aggregate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aggregate_national_estimates_by_district(self): """ Aggregates district-level estimates for each table within the country. Creates data structure designed for an export in this format: """
data = {} fips = "00" aggregated_labels = [] states = Division.objects.filter(level=self.DISTRICT_LEVEL) estimates = CensusEstimate.objects.filter( division__level=self.DISTRICT_LEVEL ) for estimate in estimates: series = estimate.variable.table.series year = estimate.variable.table.year table = estimate.variable.table.code label = estimate.variable.label.label table_label = "{}{}".format(table, label) code = estimate.variable.code if series not in data: data[series] = {} if year not in data[series]: data[series][year] = {} if table not in data[series][year]: data[series][year][table] = {} if fips not in data[series][year][table]: data[series][year][table][fips] = {} if label is not None: if table_label not in aggregated_labels: # c= {**a, **b} aggregated_labels.append(table_label) data[series][year][table][fips][label] = [ self.aggregate_variable(estimate, division.id) for division in states if len( CensusEstimate.objects.filter( variable=estimate.variable, division=division.id, ) ) > 0 ] else: if code in data[series][year][table][fips]: data[series][year][table][fips][code].append( estimate.estimate ) else: data[series][year][table][fips][code] = [estimate.estimate] # print(data) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aggregate_state_estimates_by_county(self, parent): """ Aggregates county-level estimates for each table within a given state. Creates data structure designed for an export in this format: """
data = {} for division in tqdm( Division.objects.filter(level=self.COUNTY_LEVEL, parent=parent) ): fips = division.code id = division.id aggregated_labels = [] # Keep track of already agg'ed variables for estimate in division.census_estimates.all(): series = estimate.variable.table.series year = estimate.variable.table.year table = estimate.variable.table.code label = estimate.variable.label.label table_label = "{}{}".format(table, label) code = estimate.variable.code if series not in data: data[series] = {} if year not in data[series]: data[series][year] = {} if table not in data[series][year]: data[series][year][table] = {} if fips not in data[series][year][table]: data[series][year][table][fips] = {} if label is not None: if table_label not in aggregated_labels: aggregated_labels.append(table_label) data[series][year][table][fips][ label ] = self.aggregate_variable(estimate, id) else: data[series][year][table][division.code][ code ] = estimate.estimate # print(data) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def xml(self, fn=None, src='word/document.xml', XMLClass=XML, **params): "return the src with the given transformation applied, if any." if src in self.xml_cache: return self.xml_cache[src] if src not in self.zipfile.namelist(): return x = XMLClass( fn=fn or (self.fn and self.fn.replace('.docx', '.xml')) or None, root=self.zipfile.read(src)) self.xml_cache[src] = x return x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def endnotemap(self, cache=True): """return the endnotes from the docx, keyed to string id."""
if self.__endnotemap is not None and cache==True: return self.__endnotemap else: x = self.xml(src='word/endnotes.xml') d = Dict() if x is None: return d for endnote in x.root.xpath("w:endnote", namespaces=self.NS): id = endnote.get("{%(w)s}id" % self.NS) typ = endnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=endnote) if cache==True: self.__endnotemap = d return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def footnotemap(self, cache=True): """return the footnotes from the docx, keyed to string id."""
if self.__footnotemap is not None and cache==True: return self.__footnotemap else: x = self.xml(src='word/footnotes.xml') d = Dict() if x is None: return d for footnote in x.root.xpath("w:footnote", namespaces=self.NS): id = footnote.get("{%(w)s}id" % self.NS) typ = footnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=footnote) if cache==True: self.__footnotemap = d return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commentmap(self, cache=True): """return the comments from the docx, keyed to string id."""
if self.__commentmap is not None and cache==True: return self.__commentmap else: x = self.xml(src='word/comments.xml') d = Dict() if x is None: return d for comment in x.root.xpath("w:comment", namespaces=self.NS): id = comment.get("{%(w)s}id" % self.NS) typ = comment.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=comment) if cache==True: self.__commentmap = d return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def selector(C, style): """return the selector for the given stylemap style"""
clas = C.classname(style.name) if style.type == 'paragraph': # heading outline levels are 0..7 internally, indicating h1..h8 outlineLvl = int((style.properties.get('outlineLvl') or {}).get('val') or 8) + 1 if outlineLvl < 9: tag = 'h%d' % outlineLvl else: tag = 'p' elif style.type == 'character': tag = 'span' elif style.type == 'table': tag = 'table' elif style.type == 'numbering': tag = 'ol' return "%s.%s" % (tag, clas)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_collection_from_stream(resource, stream, content_type): """ Creates a new collection for the registered resource and calls `load_into_collection_from_stream` with it. """
coll = create_staging_collection(resource) load_into_collection_from_stream(coll, stream, content_type) return coll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_into_collection_from_file(collection, filename, content_type=None): """ Loads resources from the specified file into the given collection resource. If no content type is provided, an attempt is made to look up the extension of the given filename in the MIME content type registry. """
if content_type is None: ext = os.path.splitext(filename)[1] try: content_type = MimeTypeRegistry.get_type_for_extension(ext) except KeyError: raise ValueError('Could not infer MIME type for file extension ' '"%s".' % ext) load_into_collection_from_stream(collection, open(filename, 'rU'), content_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_collection_from_file(resource, filename, content_type=None): """ Creates a new collection for the registered resource and calls `load_into_collection_from_file` with it. """
coll = create_staging_collection(resource) load_into_collection_from_file(coll, filename, content_type=content_type) return coll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_into_collection_from_url(collection, url, content_type=None): """ Loads resources from the representation contained in the given URL into the given collection resource. :returns: collection resource """
parsed = urlparse.urlparse(url) scheme = parsed.scheme # pylint: disable=E1101 if scheme == 'file': # Assume a local path. load_into_collection_from_file(collection, parsed.path, # pylint: disable=E1101 content_type=content_type) else: raise ValueError('Unsupported URL scheme "%s".' % scheme)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_collection_from_url(resource, url, content_type=None): """ Creates a new collection for the registered resource and calls `load_into_collection_from_url` with it. """
coll = create_staging_collection(resource) load_into_collection_from_url(coll, url, content_type=content_type) return coll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_into_collections_from_zipfile(collections, zipfile): """ Loads resources contained in the given ZIP archive into each of the given collections. The ZIP file is expected to contain a list of file names obtained with the :func:`get_collection_filename` function, each pointing to a file of zipped collection resource data. :param collections: sequence of collection resources :param str zipfile: ZIP file name """
with ZipFile(zipfile) as zipf: names = zipf.namelist() name_map = dict([(os.path.splitext(name)[0], index) for (index, name) in enumerate(names)]) for coll in collections: coll_name = get_collection_name(coll) index = name_map.get(coll_name) if index is None: continue coll_fn = names[index] ext = os.path.splitext(coll_fn)[1] try: content_type = \ MimeTypeRegistry.get_type_for_extension(ext) except KeyError: raise ValueError('Could not infer MIME type for file ' 'extension "%s".' % ext) # Strings are always written as UTF-8 encoded byte strings when # the zip file is created, so we have to wrap the iterator into # a decoding step. coll_data = DecodingStream(zipf.open(coll_fn, 'r')) load_into_collection_from_stream(coll, coll_data, content_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_resource_dependency_graph(resource_classes, include_backrefs=False): """ Builds a graph of dependencies among the given resource classes. The dependency graph is a directed graph with member resource classes as nodes. An edge between two nodes represents a member or collection attribute. :param resource_classes: resource classes to determine interdependencies of. :type resource_classes: sequence of registered resources. :param bool include_backrefs: flag indicating if dependencies introduced by back-references (e.g., a child resource referencing its parent) should be included in the dependency graph. """
def visit(mb_cls, grph, path, incl_backrefs): for attr_name in get_resource_class_attribute_names(mb_cls): if is_resource_class_terminal_attribute(mb_cls, attr_name): continue child_descr = getattr(mb_cls, attr_name) child_mb_cls = get_member_class(child_descr.attr_type) # We do not follow cyclic references back to a resource class # that is last in the path. if len(path) > 0 and child_mb_cls is path[-1] \ and not incl_backrefs: continue if not grph.has_node(child_mb_cls): grph.add_node(child_mb_cls) path.append(mb_cls) visit(child_mb_cls, grph, path, incl_backrefs) path.pop() if not grph.has_edge((mb_cls, child_mb_cls)): grph.add_edge((mb_cls, child_mb_cls)) dep_grph = digraph() for resource_class in resource_classes: mb_cls = get_member_class(resource_class) if not dep_grph.has_node(mb_cls): dep_grph.add_node(mb_cls) visit(mb_cls, dep_grph, [], include_backrefs) return dep_grph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_resource_graph(resource, dependency_graph=None): """ Traverses the graph of resources that is reachable from the given resource. If a resource dependency graph is given, links to other resources are only followed if the dependency graph has an edge connecting the two corresponding resource classes; otherwise, a default graph is built which ignores all direct cyclic resource references. :resource: a :class:`everest.resources.MemberResource` instance. :returns: a :class:`ResourceGraph` instance representing the graph of resources reachable from the given resource. """
def visit(rc, grph, dep_grph): mb_cls = type(rc) attr_map = get_resource_class_attributes(mb_cls) for attr_name, attr in iteritems_(attr_map): if is_resource_class_terminal_attribute(mb_cls, attr_name): continue # Only follow the resource attribute if the dependency graph # has an edge here. child_mb_cls = get_member_class(attr.attr_type) if not dep_grph.has_edge((mb_cls, child_mb_cls)): continue child_rc = getattr(rc, attr_name) if is_resource_class_collection_attribute(mb_cls, attr_name): for child_mb in child_rc: if not grph.has_node(child_mb): # Ignore cyclic references. grph.add_node(child_mb) grph.add_edge((rc, child_mb)) visit(child_mb, grph, dep_grph) else: # Member. if not grph.has_node(child_rc): # Ignore cyclic references. grph.add_node(child_rc) grph.add_edge((rc, child_rc)) visit(child_rc, grph, dep_grph) if dependency_graph is None: dependency_graph = build_resource_dependency_graph( [get_member_class(resource)]) graph = ResourceGraph() if provides_member_resource(resource): rcs = [resource] else: rcs = resource for rc in rcs: graph.add_node(rc) visit(rc, graph, dependency_graph) return graph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_connected_resources(resource, dependency_graph=None): """ Collects all resources connected to the given resource and returns a dictionary mapping member resource classes to new collections containing the members found. """
# Build a resource_graph. resource_graph = \ build_resource_graph(resource, dependency_graph=dependency_graph) entity_map = OrderedDict() for mb in topological_sorting(resource_graph): mb_cls = get_member_class(mb) ents = entity_map.get(mb_cls) if ents is None: ents = [] entity_map[mb_cls] = ents ents.append(mb.get_entity()) return entity_map
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_files(self, resource, directory): """ Dumps the given resource and all resources linked to it into a set of representation files in the given directory. """
collections = self.__collect(resource) for (mb_cls, coll) in iteritems_(collections): fn = get_write_collection_path(mb_cls, self.__content_type, directory=directory) with open_text(os.path.join(directory, fn)) as strm: dump_resource(coll, strm, content_type=self.__content_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_zipfile(self, resource, zipfile): """ Dumps the given resource and all resources linked to it into the given ZIP file. """
rpr_map = self.to_strings(resource) with ZipFile(zipfile, 'w') as zipf: for (mb_cls, rpr_string) in iteritems_(rpr_map): fn = get_collection_filename(mb_cls, self.__content_type) zipf.writestr(fn, rpr_string, compress_type=ZIP_DEFLATED)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self): """Returns the file contents as validated JSON text. """
p = os.path.join(self.path, self.name) try: with open(p) as f: json_text = f.read() except FileNotFoundError as e: raise JSONFileError(e) from e try: json.loads(json_text) except (json.JSONDecodeError, TypeError) as e: raise JSONFileError(f"{e} Got {p}") from e return json_text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exists(self, batch_id=None): """Returns True if batch_id exists in the history. """
try: self.model.objects.get(batch_id=batch_id) except self.model.DoesNotExist: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update( self, filename=None, batch_id=None, prev_batch_id=None, producer=None, count=None, ): """Creates an history model instance. """
# TODO: refactor model enforce unique batch_id # TODO: refactor model to not allow NULLs if not filename: raise BatchHistoryError("Invalid filename. Got None") if not batch_id: raise BatchHistoryError("Invalid batch_id. Got None") if not prev_batch_id: raise BatchHistoryError("Invalid prev_batch_id. Got None") if not producer: raise BatchHistoryError("Invalid producer. Got None") if self.exists(batch_id=batch_id): raise IntegrityError("Duplicate batch_id") try: obj = self.model.objects.get(batch_id=batch_id) except self.model.DoesNotExist: obj = self.model( filename=filename, batch_id=batch_id, prev_batch_id=prev_batch_id, producer=producer, total=count, ) obj.transaction_file.name = filename obj.save() return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populate(self, deserialized_txs=None, filename=None, retry=None): """Populates the batch with unsaved model instances from a generator of deserialized objects. """
if not deserialized_txs: raise BatchError("Failed to populate batch. There are no objects to add.") self.filename = filename if not self.filename: raise BatchError("Invalid filename. Got None") try: for deserialized_tx in deserialized_txs: self.peek(deserialized_tx) self.objects.append(deserialized_tx.object) break for deserialized_tx in deserialized_txs: self.objects.append(deserialized_tx.object) except DeserializationError as e: raise BatchDeserializationError(e) from e except JSONFileError as e: raise BatchDeserializationError(e) from e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def peek(self, deserialized_tx): """Peeks into first tx and sets self attrs or raise. """
self.batch_id = deserialized_tx.object.batch_id self.prev_batch_id = deserialized_tx.object.prev_batch_id self.producer = deserialized_tx.object.producer if self.batch_history.exists(batch_id=self.batch_id): raise BatchAlreadyProcessed( f"Batch {self.batch_id} has already been processed" ) if self.prev_batch_id != self.batch_id: if not self.batch_history.exists(batch_id=self.prev_batch_id): raise InvalidBatchSequence( f"Invalid import sequence. History does not exist for prev_batch_id. " f"Got file='{self.filename}', prev_batch_id=" f"{self.prev_batch_id}, batch_id={self.batch_id}." )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """Saves all model instances in the batch as model. """
saved = 0 if not self.objects: raise BatchError("Save failed. Batch is empty") for deserialized_tx in self.objects: try: self.model.objects.get(pk=deserialized_tx.pk) except self.model.DoesNotExist: data = {} for field in self.model._meta.get_fields(): try: data.update({field.name: getattr(deserialized_tx, field.name)}) except AttributeError: pass self.model.objects.create(**data) saved += 1 return saved
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_batch(self, filename): """Imports the batch of outgoing transactions into model IncomingTransaction. """
batch = self.batch_cls() json_file = self.json_file_cls(name=filename, path=self.path) try: deserialized_txs = json_file.deserialized_objects except JSONFileError as e: raise TransactionImporterError(e) from e try: batch.populate(deserialized_txs=deserialized_txs, filename=json_file.name) except ( BatchDeserializationError, InvalidBatchSequence, BatchAlreadyProcessed, ) as e: raise TransactionImporterError(e) from e batch.save() batch.update_history() return batch
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timelimit(timeout): """borrowed from web.py"""
def _1(function): def _2(*args, **kw): class Dispatch(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None self.error = None self.setDaemon(True) self.start() def run(self): try: self.result = function(*args, **kw) except: self.error = sys.exc_info() c = Dispatch() c.join(timeout) if c.isAlive(): raise TimeoutError, 'took too long' if c.error: raise c.error[0], c.error[1] return c.result return _2 return _1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _populateBuffer(self, stream, n): """ Iterator that returns N steps of the genshi stream. Found that performance really sucks for n = 1 (0.5 requests/second for the root resources versus 80 requests/second for a blocking algorithm). Hopefully increasing the number of steps per timeslice will significantly improve performance. """
try: for x in xrange(n): output = stream.next() self._buffer.write(output) except StopIteration, e: self._deferred.callback(None) except Exception, e: self._deferred.errback(e) else: self.delayedCall = reactor.callLater(CALL_DELAY, self._populateBuffer, stream, n)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_message( json_meta, data, data_type=0, version=b'\x00\x01@\x00'): """Create message, containing meta and data in df-envelope format. @json_meta - metadata @data - binary data @data_type - data type code for binary data @version - version of machine header @return - message as bytearray """
__check_data(data) meta = __prepare_meta(json_meta) data = __compress(json_meta, data) header = __create_machine_header( json_meta, data, data_type, version) return header + meta + data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_from_file(filename, nodata=False): """Parse df message from file. @filename - path to file @nodata - do not load data @return - [binary header, metadata, binary data] """
header = None with open(filename, "rb") as file: header = read_machine_header(file) meta_raw = file.read(header['meta_len']) meta = __parse_meta(meta_raw, header) data = b'' if not nodata: data = __decompress(meta, file.read(header['data_len'])) return header, meta, data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_message(message, nodata=False): """Parse df message from bytearray. @message - message data @nodata - do not load data @return - [binary header, metadata, binary data] """
header = read_machine_header(message) h_len = __get_machine_header_length(header) meta_raw = message[h_len:h_len + header['meta_len']] meta = __parse_meta(meta_raw, header) data_start = h_len + header['meta_len'] data = b'' if not nodata: data = __decompress( meta, message[data_start:data_start + header['data_len']] ) return header, meta, data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_machine_header(data): """Parse binary header. @data - bytearray, contains binary header of file opened in 'rb' mode @return - parsed binary header """
if isinstance(data, (bytes, bytearray)): stream = io.BytesIO(data) elif isinstance(data, io.BufferedReader): stream = data else: raise ValueError("data should be either bytearray or file 'rb' mode.") header = dict() header_type = stream.read(6) if header_type == b"#!\x00\x01@\x00": header['type'] = header_type[2:6] header['time'] = struct.unpack('>I', stream.read(4))[0] header['meta_type'] = struct.unpack('>I', stream.read(4))[0] header['meta_len'] = struct.unpack('>I', stream.read(4))[0] header['data_type'] = struct.unpack('>I', stream.read(4))[0] header['data_len'] = struct.unpack('>I', stream.read(4))[0] stream.read(4) elif header_type == b"#~DF02": header['type'] = header_type[2:6] header['meta_type'] = stream.read(2) header['meta_len'] = struct.unpack('>I', stream.read(4))[0] header['data_len'] = struct.unpack('>I', stream.read(4))[0] stream.read(4) else: raise NotImplementedError( "Parser for machine header %s not implemented" % (header_type.decode())) return header