text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_req(name, old_req, config={}): """ Takes a requirement and updates it based on a specific attribute key args: name: the name of the attribute old_req: the requirement definition """
if not name: return old_req new_req = copy.deepcopy(old_req) del_idxs = [] if "req_items" in old_req: req_key = get_req_key(old_req['req_items']) for i, item in enumerate(old_req['req_items']): if name == item[req_key] and item.get("dict_params"): for param, value in item['dict_params'].items(): new_req['item_dict'][param].update(value) if item.get("remove_if"): test_val = get_attr(config, item['remove_if']['attr']) if test_val == item['remove_if']['value']: del_idxs.append(i) for idx in sorted(del_idxs, reverse=True): del new_req['req_items'][idx] return new_req
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_options_from_str(obj_str, **kwargs): """ Returns a list of options from a python object string args: obj_str: python list of options or a python object path Example: "rdfframework.connections.ConnManager[{param1}]" kwargs: * kwargs used to format the 'obj_str' """
if isinstance(obj_str, list): return obj_str try: obj = get_obj_frm_str(obj_str, **kwargs) if obj: return list(obj) except AttributeError: pass return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def strip_errors(obj): """ Reads through and error object and replaces the error dict with the value args: obj: the error object/dictionary """
rtn_obj = copy.deepcopy(obj) try: del rtn_obj["__error_keys__"] except KeyError: pass for key in obj.get('__error_keys__', []): rtn_obj[key] = rtn_obj[key]['value'] return rtn_obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _peek(self, *types): """Returns the token type for lookahead; if there are any args then the list of args is the set of token types to allow"""
tok = self._scanner.token(self._pos, types) return tok[2]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def last_midnight(): """ return a datetime of last mid-night """
now = datetime.now() return datetime(now.year, now.month, now.day)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle(self, *args, **options): """ With no arguments, find the first user in the system with the is_superuser or is_staff flag set to true, or just the first user in the system period. With a single argument, look for the user with that value as the USERNAME_FIELD value. When a user is found, print out a URL slug you can paste into your browser to login as the user. """
user_model = get_user_model() if len(args) == 0: # find the first superuser, or staff member or user filters = [{"is_superuser": True}, {"is_staff": True}, {}] user = None for f in filters: try: user = user_model._default_manager.filter(**f).order_by("pk").first() if user: break except FieldError as e: pass if user is None: raise CommandError("No users found!") elif len(args) == 1: # find the user with the USERNAME_FIELD equal to the command line # argument try: user = user_model._default_manager.get_by_natural_key(args[0]) except user_model.DoesNotExist as e: raise CommandError("The user does not exist") else: raise CommandError("You passed me too many arguments") signer = TimestampSigner() signature = signer.sign(str(user.pk)) self.stdout.write(reverse(login, args=(signature,)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_config(configfile): """ Return a dict with configuration from the supplied yaml file """
try: with open(configfile, 'r') as ymlfile: try: config = yaml.load(ymlfile) return config except yaml.parser.ParserError: raise PyYAMLConfigError( 'Could not parse config file: {}'.format(configfile), ) except IOError: raise PyYAMLConfigError( 'Could not open config file: {}'.format(configfile), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_config(configfile, content): """ Write dict to a file in yaml format """
with open(configfile, 'w+') as ymlfile: yaml.dump( content, ymlfile, default_flow_style=False, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_record(): """ Install an httplib wrapper that records but does not modify calls. """
global record, playback, current if record: raise StateError("Already recording.") if playback: raise StateError("Currently playing back.") record = True current = ReplayData() install(RecordingHTTPConnection, RecordingHTTPSConnection)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_types(func): """ Check if annotated function arguments are of the correct type """
call = PythonCall(func) @wraps(func) def decorator(*args, **kwargs): parameters = call.bind(args, kwargs) for arg_name, expected_type in func.__annotations__.items(): if not isinstance(parameters[arg_name], expected_type): raise TypeError("{} must be a {}".format( arg_name, expected_type)) return call.apply(args, kwargs) return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_absolute_uri(self, uri): """ Return a fully qualified absolute url for the given uri. """
request = self.context.get('request', None) return ( request.build_absolute_uri(uri) if request is not None else uri )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_resource_uri(self, obj): """ Return the uri of the given object. """
url = 'api:%s:%s-detail' % ( self.api_version, getattr( self, 'resource_view_name', self.Meta.model._meta.model_name ) ) return reverse(url, request=self.context.get('request', None), kwargs={ self.lookup_field: getattr(obj, self.lookup_field) })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render(self, template: str, **vars) -> str: """ Render the named template. The current context will be available to the template as the ``ctx`` variable. :param template: name of the template file :param vars: extra template variables :return: the rendered results """
vars.setdefault('ctx', self._ctx) return self._renderer.render(template, **vars)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_string(self, source: str, **vars) -> str: """ Render the template contained in the given string. The current context will be available to the template as the ``ctx`` variable. :param source: content of the template to render :param vars: extra variables made available to the template :return: the rendered results """
vars.setdefault('ctx', self._ctx) return self._renderer.render_string(source, **vars)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_header(name, header, required, stream, encoder, strict=False): """ Write AMF message header. @param name: Name of the header. @param header: Header value. @param required: Whether understanding this header is required (?). @param stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} that will receive the encoded header. @param encoder: An encoder capable of encoding C{AMF0}. @param strict: Use strict encoding policy. Default is C{False}. Will write the correct header length after writing the header. """
stream.write_ushort(len(name)) stream.write_utf8_string(name) stream.write_uchar(required) write_pos = stream.tell() stream.write_ulong(0) old_pos = stream.tell() encoder.writeElement(header) new_pos = stream.tell() if strict: stream.seek(write_pos) stream.write_ulong(new_pos - old_pos) stream.seek(new_pos)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_body(stream, decoder, strict=False, logger=None): """ Read an AMF message body from the stream. @type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} @param decoder: An AMF0 decoder. @param strict: Use strict decoding policy. Default is `False`. @param logger: Used to log interesting events whilst reading a remoting body. @type logger: A C{logging.Logger} instance or C{None}. @return: A C{tuple} containing the C{id} of the request and the L{Request} or L{Response} """
def _read_args(): # we have to go through this insanity because it seems that amf0 # does not keep the array of args in the object references lookup type_byte = stream.peek(1) if type_byte == '\x11': if not decoder.use_amf3: raise pyamf.DecodeError( "Unexpected AMF3 type with incorrect message type") return decoder.readElement() if type_byte != '\x0a': raise pyamf.DecodeError("Array type required for request body") stream.read(1) x = stream.read_ulong() return [decoder.readElement() for i in xrange(x)] target = stream.read_utf8_string(stream.read_ushort()) response = stream.read_utf8_string(stream.read_ushort()) status = STATUS_OK is_request = True for code, s in STATUS_CODES.iteritems(): if not target.endswith(s): continue is_request = False status = code target = target[:0 - len(s)] if logger: logger.debug('Remoting target: %r' % (target,)) data_len = stream.read_ulong() pos = stream.tell() if is_request: data = _read_args() else: data = decoder.readElement() if strict and pos + data_len != stream.tell(): raise pyamf.DecodeError("Data read from stream does not match body " "length (%d != %d)" % (pos + data_len, stream.tell(),)) if is_request: return response, Request(target, body=data) if status == STATUS_ERROR and isinstance(data, pyamf.ASObject): data = get_fault(data) return target, Response(data, status)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_body(name, message, stream, encoder, strict=False): """ Write AMF message body. @param name: The name of the request. @param message: The AMF L{Message} @type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} @param encoder: An AMF0 encoder. @param strict: Use strict encoding policy. Default is `False`. """
def _encode_body(message): if isinstance(message, Response): encoder.writeElement(message.body) return stream.write('\x0a') stream.write_ulong(len(message.body)) for x in message.body: encoder.writeElement(x) if not isinstance(message, (Request, Response)): raise TypeError("Unknown message type") target = None if isinstance(message, Request): target = unicode(message.target) else: target = u"%s%s" % (name, _get_status(message.status)) target = target.encode('utf8') stream.write_ushort(len(target)) stream.write_utf8_string(target) response = 'null' if isinstance(message, Request): response = name stream.write_ushort(len(response)) stream.write_utf8_string(response) if not strict: stream.write_ulong(0) _encode_body(message) return write_pos = stream.tell() stream.write_ulong(0) old_pos = stream.tell() _encode_body(message) new_pos = stream.tell() stream.seek(write_pos) stream.write_ulong(new_pos - old_pos) stream.seek(new_pos)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode(stream, strict=False, logger=None, timezone_offset=None): """ Decodes the incoming stream as a remoting message. @type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} @param strict: Enforce strict decoding. Default is `False`. @param logger: Used to log interesting events whilst decoding a remoting message. @type logger: U{logging.Logger<http:// docs.python.org/library/logging.html#loggers>} @param timezone_offset: The difference between the current timezone and UTC. Date/times should always be handled in UTC to avoid confusion but this is required for legacy systems. @type timezone_offset: U{datetime.datetime.timedelta<http:// docs.python.org/library/datetime.html#datetime.timedelta} @return: Message L{envelope<Envelope>}. """
if not isinstance(stream, util.BufferedByteStream): stream = util.BufferedByteStream(stream) if logger: logger.debug('remoting.decode start') msg = Envelope() msg.amfVersion = stream.read_ushort() # see http://osflash.org/documentation/amf/envelopes/remoting#preamble # why we are doing this... if msg.amfVersion > 0x09: raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" % msg.amfVersion) decoder = pyamf.get_decoder(pyamf.AMF0, stream, strict=strict, timezone_offset=timezone_offset) context = decoder.context decoder.use_amf3 = msg.amfVersion == pyamf.AMF3 header_count = stream.read_ushort() for i in xrange(header_count): name, required, data = _read_header(stream, decoder, strict) msg.headers[name] = data if required: msg.headers.set_required(name) body_count = stream.read_short() for i in xrange(body_count): context.clear() target, payload = _read_body(stream, decoder, strict, logger) msg[target] = payload if strict and stream.remaining() > 0: raise RuntimeError("Unable to fully consume the buffer") if logger: logger.debug('remoting.decode end') return msg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def txtpack(fn, **kwargs): """Return a ChannelPack instance loaded with text data file fn. Attempt to read out custom channel names from the file and call instance.set_channel_names(). Then return the pack. This is a lazy function to get a loaded instance, using the cleverness provided by pulltxt module. No delimiter or rows-to-skip and such need to be provided. However, if necessary, `**kwargs` can be used to override clevered items to provide to numpys loadtxt. usecols might be such an item for example. Also, the cleverness is only clever if all data is numerical. Note that the call signature is the same as numpys `loadtxt <http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html#numpy-loadtxt>`_, which look like this:: np.loadtxt(fname, dtype=<type 'float'>, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0) But, when using this function as a wrapper, the only meaningful argument to override should be `usecols`. """
loadfunc = pulltxt.loadtxt_asdict cp = ChannelPack(loadfunc) cp.load(fn, **kwargs) names = pulltxt.PP.channel_names(kwargs.get('usecols', None)) cp.set_channel_names(names) cp._patpull = pulltxt.PP # Give a reference to the patternpull. # cp.set_basefilemtime() return cp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dbfpack(fn, usecols=None): """Return a ChannelPack instance loaded with dbf data file fn. This is a lazy function to get a loaded instance, using pulldbf module."""
loadfunc = pulldbf.dbf_asdict cp = ChannelPack(loadfunc) cp.load(fn, usecols) names = pulldbf.channel_names(fn, usecols) cp.set_channel_names(names) # cp.set_basefilemtime() return cp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, *args, **kwargs): """Load data using loadfunc. args, kwargs: forward to the loadfunc. args[0] must be the filename, so it means that loadfunc must take the filename as it's first argument. Set the filename attribute. .. note:: Updates the mask if not no_auto. ChannelPack is assuming a need for loading data from disc. If there is a desire to load some made-up data, a filename pointing to some actual file is nevertheless required. Here is a suggestion:: True {2: 'ch2', 5: 'ch5'} """
D = self.loadfunc(*args, **kwargs) if self.chnames is not None: if set(D) - set(self.chnames): raise ValueError('New data set have different keys') self.D = D self.keys = sorted(self.D.keys()) # If not all the same, there should have been an error already self.rec_cnt = len(self.D[self.keys[0]]) fallnames = _fallback_names(self.keys) self.chnames_0 = dict(zip(self.keys, fallnames)) self._set_filename(args[0]) self.set_basefilemtime() self.args = args self.kwargs = kwargs if not self.no_auto: # Called here if a reload is done on the current instance I guess. self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_load(self, *args, **kwargs): """Append data using loadfunc. args, kwargs: forward to the loadfunc. args[0] must be the filename, so it means that loadfunc must take the filename as it's first argument. If self is not already a loaded instance, call load and return. Make error if there is a mismatch of channels indexes or channels count. Append the data to selfs existing data. Set filename to the new file. Create new attribute - a dict with meta-data on all files loaded, 'metamulti.' .. note:: Updates the mask if not no_auto. """
if not self.D: self.load(*args, **kwargs) return newD = self.loadfunc(*args, **kwargs) s1, s2 = set(self.D.keys()), set(newD.keys()) offenders = s1 ^ s2 if offenders: mess = ('Those keys (respectively) were in one of the dicts ' + 'but not the other: {}.') offs = ', '.join([str(n) for n in offenders]) raise KeyError(mess.format(offs)) # Append the data early to fail if fail before other actions. for k, a in self.D.iteritems(): self.D[k] = np.append(a, newD.pop(k)) if not hasattr(self, 'metamulti'): self.metamulti = dict(filenames=[], mtimestamps=[], mtimenames=[], slices=[]) self.metamulti['filenames'].append(self.filename) self.metamulti['mtimestamps'].append(self.mtimestamp) self.metamulti['mtimenames'].append(self.mtimefs) self.metamulti['slices'].append(slice(0, self.rec_cnt)) self.rec_cnt = len(self.D[self.keys[0]]) self._set_filename(args[0]) self.set_basefilemtime() start = self.metamulti['slices'][-1].stop stop = self.rec_cnt self.metamulti['filenames'].append(self.filename) self.metamulti['mtimestamps'].append(self.mtimestamp) self.metamulti['mtimenames'].append(self.mtimefs) self.metamulti['slices'].append(slice(start, stop)) if not self.no_auto: self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_samplerate(self, rate): """Set sample rate to rate. rate: int or float rate is given as samples / timeunit. If sample rate is set, it will have an impact on the duration rule conditions. If duration is set to 2.5 and samplerate is 100, a duration of 250 records is required for the logical conditions to be true. .. note:: Updates the mask if not no_auto."""
# Test and set value: float(rate) self.conconf.set_condition('samplerate', rate) if not self.no_auto: self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_condition(self, conkey, cond): """Add a condition, one of the addable ones. conkey: str One of 'cond', startcond' or 'stopcond'. 'start' or 'stop' is accepted as shorts for 'startcond' or 'stopcond'. If the conkey is given with an explicit number (like 'stopcond3') and already exist, it will be over-written, else created. When the trailing number is implicit, the first condition with a value of None is taken. If no None value is found, a new condition is added. cond: str .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_duration` :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.set_stopextend` :meth:`~channelpack.ChannelPack.clear_conditions` """
# Audit: if conkey == 'start' or conkey == 'stop': conkey += 'cond' if not any(conkey.startswith(addable) for addable in _ADDABLES): raise KeyError(conkey) if not self.conconf.valid_conkey(conkey): raise KeyError(conkey) self._parse_cond(cond) # Checking conkey = self.conconf.next_conkey(conkey) self.conconf.set_condition(conkey, cond) if not self.no_auto: self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spit_config(self, conf_file=None, firstwordonly=False): """Write a config_file based on this instance. conf_file: str (or Falseish) If conf_file is Falseish, write the file to the directory where self.filename sits, if self is not already associated with such a file. If associated, and conf_file is Falseish, use self.conf_file. If conf_file is a file name, write to that file and set self.conf_file to conf_file. firstwordonly: bool or "pattern" Same meaning as in name method, and applies to the channel names spitted. There is no effect on the instance channel names until eat_config is called. Sections in the ini/cfg kind of file can be: [channels] A mapping of self.D integer keys to channel names. Options are numbers corresponding to the keys. Values are the channel names, being the fallback names if custom names are not available (self.chnames). (When spitting that is). [conditions] Options correspond to the keys in self.conditions, values correspond to the values in the same. """
chroot = os.path.dirname(self.filename) chroot = os.path.abspath(chroot) # Figure out file name of conf_file: if hasattr(self, 'conf_file') and not conf_file: cfgfn = self.conf_file elif conf_file: cfgfn = conf_file else: cfgfn = os.path.join(chroot, CONFIG_FILE) with open(cfgfn, 'wb') as fo: self.conconf.spit_config(fo, firstwordonly=firstwordonly) self.conf_file = os.path.abspath(cfgfn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eat_config(self, conf_file=None): """ Read the the conf_file and update this instance accordingly. conf_file: str or Falseish If conf_file is Falseish, look in the directory where self.filename sits if self is not already associated with a conf_file. If associated, and conf_file arg is Falseish, read self.conf_file. If conf_file arg is a file name, read from that file, but do not update self.conf_file accordingly. An Implicit IOError is raised if no conf_file was found. See spit_config for documentation on the file layout. .. note:: Updates the mask if not no_auto. .. note:: If the config_file exist because of an earlier spit, and custom channel names was not available, channels are listed as the fallback names in the file. Then after this eat, self.chnames will be set to the list in the conf_file section 'channels'. The result can be that self.chnames and self.chnames_0 will be equal. The message then is that, if channel names are updated, you should spit before you eat. """
chroot = os.path.dirname(self.filename) # "channels root dir" chroot = os.path.abspath(chroot) # Figure out file name of conf_file: if hasattr(self, 'conf_file') and not conf_file: cfgfn = self.conf_file elif conf_file: cfgfn = conf_file else: cfgfn = os.path.join(chroot, CONFIG_FILE) with open(cfgfn, 'r') as fo: self.conconf.eat_config(fo) # Update mask: if not self.no_auto: self.make_mask() else: self.make_mask(dry=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_stopextend(self, n): """Extend the True elements by n when setting the conditions based on a 'stopcond' condition. n is an integer >= 0. .. note:: Updates the mask if not no_auto. """
self.conconf.set_condition('stopextend', n) if not self.no_auto: self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_duration(self, rule): """Set the duration according to rule. rule: str The rule operating on the variable ``dur``. rule is an expression like:: setting a duration rule assuming a pack sp:: The identifier ``dur`` must be present or the rule will fail. .. note:: The logical ``or`` and ``and`` operators must be used. ``dur`` is a primitive, not an array. .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.add_condition` :meth:`~channelpack.ChannelPack.pprint_conditions` """
self.conconf.set_condition('duration', rule) if not self.no_auto: self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_conditions(self, *conkeys, **noclear): """Clear conditions. Clear only the conditions conkeys if specified. Clear only the conditions not specified by conkeys if noclear is True (False default). .. note:: Updates the mask if not no_auto. """
offenders = set(conkeys) - set(self.conconf.conditions.keys()) if offenders: raise KeyError(', '.join([off for off in offenders])) # Valid keywords subtracted offenders = set(noclear) - set({'noclear'}) if offenders: raise KeyError(', '.join([off for off in offenders])) noclear = noclear.get('noclear', False) for ck in self.conconf.conditions: if not conkeys: # self.conconf.set_condition(ck, None) self.conconf.reset() break elif not noclear and ck in conkeys: self.conconf.set_condition(ck, None) elif noclear and ck not in conkeys: self.conconf.set_condition(ck, None) if not self.no_auto: self.make_mask()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_mask(self, clean=True, dry=False): """Set the attribute self.mask to a mask based on the conditions. clean: bool If not True, let the current mask be a condition as well. If True, the mask is set solely on the pack's current conditions dry: bool If True, only try to make a mask, but don't touch self.mask This method is called automatically unless ``no_auto`` is set to True, whenever conditions are updated. .. seealso:: :meth:`~channelpack.ChannelPack.pprint_conditions` """
cc = self.conconf # All True initially. mask = np.ones(self.rec_cnt) == True # NOQA for cond in cc.conditions_list('cond'): try: mask = mask & self._mask_array(cond) except Exception: print cond print 'produced an error:' raise # re-raise mask = mask & datautils.startstop_bool(self) samplerate = cc.get_condition('samplerate') if samplerate is not None: samplerate = float(samplerate) mask = datautils.duration_bool(mask, cc.get_condition('duration'), samplerate) if dry: return if not clean and self.mask is not None: self.mask = self.mask & mask else: self.mask = mask
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_channel_names(self, names): """ Set self.chnames. Custom channel names that can be used in calls on this object and in condition strings. names: list or None It is the callers responsibility to make sure the list is in column order. self.chnames will be a dict with channel integer indexes as keys. If names is None, self.chnames will be None. """
if not names: self.chnames = None return if len(names) != len(self.keys): raise ValueError('len(names) != len(self.D.keys())') self.chnames = dict(zip(self.keys, names))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def counter(self, ch, part=None): """Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned. """
return Counter(self(self._key(ch), part=part))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def records(self, part=None, fallback=True): """Return an iterator over the records in the pack. Each record is supplied as a namedtuple with the channel names as field names. This is useful if each record make a meaningful data set on its own. part: int or None Same meaning as in :meth:`~channelpack.ChannelPack.__call__`. fallback: boolean The named tuple requires python-valid naming. If fallback is False, there will be an error if ``self.chnames`` is not valid names and not None. If True, fall back to the ``self.chnames_0`` on error. .. note:: The error produced on invalid names if fallback is False is not produced until iteration start. Here is a good post on stack overflow on the subject `231767 <http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python>`_ """
names_0 = [self.chnames_0[k] for k in sorted(self.chnames_0.keys())] if self.chnames is not None: names = [self.chnames[k] for k in sorted(self.chnames.keys())] try: Record = namedtuple('Record', names) except NameError: # no names Record = namedtuple('Record', names_0) names = names_0 except ValueError: # no good names if fallback: Record = namedtuple('Record', names_0) names = names_0 else: raise for tup in zip(*[self(name, part) for name in names]): yield Record(*tup)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _key(self, ch): """Return the integer key for ch. It is the key for the first value found in chnames and chnames_0, that matches ch. Or if ch is an int, ch is returned if it is a key in self.D"""
if ch in self.D: return ch if isinstance(ch, int): raise KeyError(ch) # dont accept integers as custom names if self.chnames: for item in self.chnames.items(): if item[1] == ch: return item[0] for item in self.chnames_0.items(): if item[1] == ch: return item[0] # If we got here, ch can be an int represented by a string if it comes # from a condition string: try: chint = int(ch) if chint in self.D: return chint except ValueError: pass raise KeyError(ch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def name(self, ch, firstwordonly=False): """Return channel name for ch. ch is the channel name or the index number for the channel name, 0-based. ch: str or int. The channel name or indexed number. firstwordonly: bool or "pattern". If True, return only the first non-spaced word in the name. If a string, use as a re-pattern to re.findall and return the first element found. There will be error if no match. r'\w+' is good pattern for excluding leading and trailing obscure characters. Returned channel name is the fallback string if "custom" names are not available. """
names = self.chnames or self.chnames_0 i = self._key(ch) if not firstwordonly: return names[i] elif firstwordonly is True or firstwordonly == 1: return names[i].split()[0].strip() # According to user pattern return re.findall(firstwordonly, names[i])[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_names(self, pat): """pat a shell pattern. See fnmatch.fnmatchcase. Print the results to stdout."""
for item in self.chnames.items(): if fnmatch.fnmatchcase(item[1], pat): print item
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_condition(self, conkey, val): """Set condition conkey to value val. Convert val to str if not None. conkey: str A valid condition key. val: str, int, float, None Can always be None. Can be number or string depending on conkey. """
if not any([conkey.startswith(c) for c in _COND_PREFIXES]): raise KeyError(conkey) if val in NONES: self.conditions[conkey] = None else: self.conditions[conkey] = str(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spit_config(self, conf_file, firstwordonly=False): """conf_file a file opened for writing."""
cfg = ConfigParser.RawConfigParser() for sec in _CONFIG_SECS: cfg.add_section(sec) sec = 'channels' for i in sorted(self.pack.D): cfg.set(sec, str(i), self.pack.name(i, firstwordonly=firstwordonly)) sec = 'conditions' for k in self.sorted_conkeys(): cfg.set(sec, k, self.conditions[k]) cfg.write(conf_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eat_config(self, conf_file): """conf_file a file opened for reading. Update the packs channel names and the conditions, accordingly. """
# Read the file: cfg = ConfigParser.RawConfigParser() cfg.readfp(conf_file) # Update channel names: sec = 'channels' mess = 'missmatch of channel keys' assert(set(self.pack.D.keys()) == set([int(i) for i in cfg.options(sec)])), mess # NOQA if not self.pack.chnames: self.pack.chnames = dict(self.pack.chnames_0) for i in cfg.options(sec): # i is a string. self.pack.chnames[self.pack._key(int(i))] = cfg.get(sec, i) # Update conditions: sec = 'conditions' # conkeys = set(self.conditions.keys()) # conops = set(cfg.options(sec)) # This check should be superfluous: # -------------------------------------------------- # for conkey in conkeys: # if not any([conkey.startswith(c) for c in _COND_PREFIXES]): # raise KeyError(conkey) # -------------------------------------------------- # for con in conkeys - conops: # Removed conditions. # self.set_condition(con, None) conops = cfg.options(sec) self.reset() # Scary for con in conops: self.set_condition(con, cfg.get(sec, con))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cond_int(self, conkey): """Return the trailing number from cond if any, as an int. If no trailing number, return the string conkey as is. This is used for sorting the conditions properly even when passing the number 10. The name of this function could be improved since it might return a string."""
m = re.match(self.numrx, conkey) if not m: return conkey return int(m.group(1))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def valid_conkey(self, conkey): """Check that the conkey is a valid one. Return True if valid. A condition key is valid if it is one in the _COND_PREFIXES list. With the prefix removed, the remaining string must be either a number or the empty string."""
for prefix in _COND_PREFIXES: trailing = conkey.lstrip(prefix) if trailing == '' and conkey: # conkey is not empty return True try: int(trailing) return True except ValueError: pass return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sorted_conkeys(self, prefix=None): """Return all condition keys in self.conditions as a list sorted suitable for print or write to a file. If prefix is given return only the ones prefixed with prefix."""
# Make for defined and sorted output: conkeys = [] for cond in _COND_PREFIXES: conkeys += sorted([key for key in self.conditions if key.startswith(cond)], key=self.cond_int) if not prefix: return conkeys return [key for key in conkeys if key.startswith(prefix)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def isordinal(x): """Checks if a list or array contains ordinal data. Warning: -------- This is not a reliable check for a variable being ordinal. The following criteria are used - There are more observations than unique values. Why? Ordinal means discrete or countable and I just assume that an ordinal sample should have some recurring (countable) values. - Values are integers or strings. Why? Ordinal scale data are usually labels (e.g. strings) or are encoded labels (e.g. as integers). - Ordinal scale data is sortable Integers imply that the Analyst encoded the labels according to the ordinal data's "natural order". Strings would imply that the alphabetic order would be the natual order (what is usually not the case) Usage: ------ obs = 10 np.random.seed(42) x1 = np.random.randint(1,50, (obs,)) x2 = np.random.randint(0,3, (obs,)) x3 = np.random.uniform(0,3, (obs,)) flag, msg = isordinal(x1) if not flag: warnings.warn(msg) """
import numpy as np if len(x) == len(np.unique(x)): return False, ("number of observations equals the " "number of unique values.") if not isinstance(x[0], str): if not np.all(np.equal(np.mod(x, 1), 0)): return False, "elements are not integer or strings." return True, "is ordinal"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, key, default=NoDefault): """Retrieve a value from its key. Retrieval steps are: 1) Normalize the key 2) For each option group: a) Retrieve the value at that key b) If no value exists, continue c) If the value is an instance of 'Default', continue d) Otherwise, return the value 3) If no option had a non-default value for the key, return the first Default() option for the key (or :arg:`default`). """
key = normalize_key(key) if default is NoDefault: defaults = [] else: defaults = [default] for options in self.options: try: value = options[key] except KeyError: continue if isinstance(value, Default): defaults.append(value.value) continue else: return value if defaults: return defaults[0] return NoDefault
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_children(self, include_self=False): """ Return all subsidiaries of this company. """
ownership = Ownership.objects.filter(parent=self) subsidiaries = Company.objects.filter(child__in=ownership) for sub in subsidiaries: subsidiaries = subsidiaries | sub.get_all_children() if include_self is True: self_company = Company.objects.filter(id=self.id) subsidiaries = subsidiaries | self_company return subsidiaries
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_parents(self): """ Return all parents of this company. """
ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership) for parent in parents: parents = parents | parent.get_all_parents() return parents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_related_companies(self, include_self=False): """ Return all parents and subsidiaries of the company Include the company if include_self = True """
parents = self.get_all_parents() subsidiaries = self.get_all_children() related_companies = parents | subsidiaries if include_self is True: company_qs = Company.objects.filter(id=self.id) related_companies = related_companies | company_qs related_companies_ids = [company.id for company in list(set(related_companies))] related_companies = Company.objects.filter(id__in=related_companies_ids) return related_companies
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_immediate_children(self): """ Return all direct subsidiaries of this company. Excludes subsidiaries of subsidiaries """
ownership = Ownership.objects.filter(parent=self) subsidiaries = Company.objects.filter(child__in=ownership).distinct() return subsidiaries
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_immediate_children_ownership(self): """ Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS. Excludes subsidiaries of subsidiaries. """
ownership = Ownership.objects.filter(parent=self).select_related('child', 'child__country') return ownership
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_immediate_parents(self): """ Return all direct parents of this company. Excludes parents of parents """
ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership).distinct() return parents
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_directors(self): """ Return all directors for this company """
directors = Director.objects.filter(company=self, is_current=True).select_related('person') return directors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cache_data(self): """ Cache some basic data such as financial statement metrics """
# Set Slug if not set if not self.slug_name: self.slug_name = slugify(self.name).strip() if len(self.slug_name) > 255: self.slug_name = self.slug_name[0:254]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_name_on_date(self, date): """ Get the name of a company on a given date. This takes into accounts and name changes that may have occurred. """
if date is None: return self.name post_name_changes = CompanyNameChange.objects.filter(company=self, date__gte=date).order_by('date') if post_name_changes.count() == 0: return self.name else: return post_name_changes[0].name_before
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ This method autogenerates the auto_generated_description field """
# Cache basic data self.cache_data() # Ensure slug doesn't change if self.id is not None: db_company = Company.objects.get(id=self.id) if self.slug_name != db_company.slug_name: raise ValueError("Cannot reset slug_name") if str(self.trade_name).strip() == "": self.trade_name = None # Short description check if len(str(self.short_description)) > 370: raise AssertionError("Short description must be no more than 370 characters") if self.sub_industry is not None: # Cache GICS self.industry = self.sub_industry.industry self.industry_group = self.sub_industry.industry.industry_group self.sector = self.sub_industry.industry.industry_group.sector # Cache GICS names self.sub_industry_name = self.sub_industry.name self.industry_name = self.industry.name self.industry_group_name = self.industry_group.name self.sector_name = self.sector.name # Call save method super(Company, self).save(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *args, **kwargs): """ Generate a name, and ensure amount is less than or equal to 100 """
self.name = str(self.parent.name) + " - " + str(self.child.name) + " - " + str(self.ownership_type) if self.amount > 100: raise ValueError("Ownership amount cannot be more than 100%") elif self.amount < 0: raise ValueError("Ownership amount cannot be less than 0%") else: super(Ownership, self).save(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tenure(self): """ Calculates board tenure in years """
if self.end_date: return round((date.end_date - self.start_date).days / 365., 2) else: return round((date.today() - self.start_date).days / 365., 2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_toc_tree(title, input, output, content_directory): """ Builds Sphinx documentation table of content tree file. :param title: Package title. :type title: unicode :param input: Input file to convert. :type input: unicode :param output: Output file. :type output: unicode :param content_directory: Directory containing the content to be included in the table of content. :type content_directory: unicode :return: Definition success. :rtype: bool """
LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(build_toc_tree.__name__, output)) file = File(input) file.cache() existing_files = [foundations.strings.get_splitext_basename(item) for item in glob.glob("{0}/*{1}".format(content_directory, FILES_EXTENSION))] relative_directory = content_directory.replace("{0}/".format(os.path.dirname(output)), "") toc_tree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existing_files: link = "{0}/{1}".format(relative_directory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(build_toc_tree.__name__, data.replace("\n", ""))) toc_tree.append(data) toc_tree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(toc_tree) content.extend(TOCTREE_TEMPLATE_END) file = File(output) file.content = content file.write() return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_command_line_arguments(): """ Retrieves command line arguments. :return: Namespace. :rtype: Namespace """
parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-h", "--help", action="help", help="'Displays this help message and exit.'") parser.add_argument("-t", "--title", type=unicode, dest="title", help="'Package title.'") parser.add_argument("-i", "--input", type=unicode, dest="input", help="'Input file to convert.'") parser.add_argument("-o", "--output", type=unicode, dest="output", help="'Output file.'") parser.add_argument("-c", "--content_directory", type=unicode, dest="content_directory", help="'Content directory.'") if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initCTR(self, iv=0): """Initializes CTR mode of the cypher"""
assert struct.calcsize("Q") == self.blocksize() self.ctr_iv = iv self._calcCTRBUF()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _calcCTRBUF(self): """Calculates one block of CTR keystream"""
self.ctr_cks = self.encrypt(struct.pack("Q", self.ctr_iv)) # keystream block self.ctr_iv += 1 self.ctr_pos = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _nextCTRByte(self): """Returns one byte of CTR keystream"""
b = ord(self.ctr_cks[self.ctr_pos]) self.ctr_pos += 1 if self.ctr_pos >= len(self.ctr_cks): self._calcCTRBUF() return b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _call(self, method, path, data=None): """ Do the actual HTTP request """
if is_python3(): conn = http.client.HTTPConnection(API_HOST) else: conn = httplib.HTTPConnection(API_HOST) headers = {'User-Agent' : USER_AGENT} if data: headers.update( {'Content-type': 'application/x-www-form-urlencoded'} ) conn.request(method, path, self._urlencode(data), headers) else: conn.request(method, path, None, headers) response = conn.getresponse() result = [response.status, self._parse_body(response.read())] conn.close() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_body(self, body): """ For just call a deserializer for FORMAT"""
if is_python3(): return json.loads(body.decode('UTF-8')) else: return json.loads(body)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_urls(self): """ Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model """
urls = super(CompetitionEntryAdmin, self).get_urls() csv_urls = patterns('', url( r'^exportcsv/$', self.admin_site.admin_view(self.csv_export), name='competition-csv-export' ) ) return csv_urls + urls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def csv_export(self, request): """ Return a CSV document of the competition entry and its user details """
response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=competitionentries.csv' # create the csv writer with the response as the output file writer = UnicodeWriter(response) writer.writerow([ 'Competition ID', 'Competition', 'First Name', 'Last Name', 'Email Address', 'Cell Number', 'Question', 'Answer File', 'Answer Option', 'Answer Text', 'Has Correct Answer', 'Winner', 'Time Stamp' ]) # This sucks big time. get_urls is cached upon first call, which means # it has no concept of a filter currently being applied to the # changelist. Grab the querystring from the referrer and re-use # changelist API to apply the filtering for us. try: dc, qs = request.META.get('HTTP_REFERER', '').split('?') except ValueError: qs = '' request.META['QUERY_STRING'] = qs queryset = self.get_changelist(request)( request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self ).get_query_set(request) # select_related is too slow, so cache for fast lookups. This will not # scale indefinitely. competition_map = {} ids = queryset.distinct('competition').values_list( 'competition_id', flat=True ) for obj in Competition.objects.filter(id__in=ids): competition_map[obj.id] = obj # Looking up individual members is too slow, so cache for fast # lookups. This will not scale indefinitely. member_mobile_number_map = {} ids = queryset.distinct('user').values_list( 'user_id', flat=True ) for di in Member.objects.filter(id__in=ids).values( 'id', 'mobile_number' ): member_mobile_number_map[di['id']] = di['mobile_number'] for entry in queryset: competition = competition_map[entry.competition_id] entry.competition = competition row = [ entry.competition.id, entry.competition.title, entry.user.first_name, entry.user.last_name, entry.user.email, member_mobile_number_map.get(entry.user_id, ''), entry.competition.question, entry.answer_file.name if entry.answer_file else '', entry.answer_option.text if entry.answer_option else '', entry.answer_text, entry.has_correct_answer(), entry.winner, entry.timestamp ] writer.writerow(['' if f is None else unicode(f) for f in row]) # '' instead of None return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def filelist(folderpath, ext=None): ''' Returns a list of all the files contained in the folder specified by `folderpath`. To filter the files by extension simply add a list containing all the extension with `.` as the second argument. If `flat` is False, then the Path objects are returned. ''' if not ext: ext = [] if os.path.exists(folderpath) and os.path.isdir(folderpath): return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ] else: log.warn('"{}" does not exist or is not a directory'.format(folderpath))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def particles(category=None): ''' Returns a dict containing old greek particles grouped by category. ''' filepath = os.path.join(os.path.dirname(__file__), './particles.json') with open(filepath) as f: try: particles = json.load(f) except ValueError as e: log.error('Bad json format in "{}"'.format(filepath)) else: if category: if category in particles: return particles[category] else: log.warn('Category "{}" not contained in particle dictionary!'.format(category)) return particles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parts(): ''' Returns the dictionary with the part as key and the contained book as indices. ''' parts = { 'Canon': [ _ for _ in range(1, 5) ], 'Apostle': [ 5 ], 'Paul': [ _ for _ in range(6, 19) ], 'General': [ _ for _ in range(19, 26) ], 'Apocalypse': [ 27 ] } return parts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def component_activated(self, component): """Initialize additional member variables for components. Every component activated through the `Environment` object gets an additional member variable: `env` (the environment object) """
component.env = self super(Environment, self).component_activated(component)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interpret(self, msg): """ Create a slide show """
self.captions = msg.get('captions', '.') for item in msg['slides']: self.add(item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_duration(self, duration): """ Calculate how long each slide should show """
fixed = sum(int(x.get('time', 0)) for x in self.slides) nfixed = len([x for x in self.slides if x.get('time', 0) > 0]) unfixed = len(self.slides) - nfixed self.wait = max(1, int(duration / unfixed))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Run the show """
self.show() if not self.wait: return for image in self.slides: wait = image.get('time', 0) wait = max(self.wait, wait) print('waiting %d seconds %s' % ( wait, image.get('image', ''))) yield image time.sleep(wait) self.next()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fastaIterator(fn, useMutableString=False, verbose=False): """ A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress """
fh = fn if type(fh).__name__ == "str": fh = open(fh) if verbose: try: pind = __build_progress_indicator(fh) except ProgressIndicatorError as e: sys.stderr.write("Warning: unable to show progress for stream. " + "Reason: " + str(e)) verbose = False prev_line = None while True: seqHeader = __read_seq_header(fh, prev_line) name = seqHeader[1:].strip() seq_data, prev_line = __read_seq_data(fh) if verbose: pind.done = fh.tell() pind.showProgress(to_strm=sys.stderr) yield Sequence(name, seq_data, useMutableString) # remember where we stopped for next call, or finish if prev_line == "": break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_default(self): """ Returns the default value for this field. The default implementation on models.Field calls force_unicode on the default, which means you can't set arbitrary Python objects as the default. To fix this, we just return the value without calling force_unicode on it. Note that if you set a callable as a default, the field will still call it. It will *not* try to pickle and encode it. """
if self.has_default(): if callable(self.default): return self.default() return self.default # If the field doesn't have a default, then we punt to models.Field. return super(PickledObjectField, self).get_default()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_python(self, value): """ B64decode and unpickle the object, optionally decompressing it. If an error is raised in de-pickling and we're sure the value is a definite pickle, the error is allowed to propogate. If we aren't sure if the value is a pickle or not, then we catch the error and return the original value instead. """
if value is not None: try: value = dbsafe_decode(value, self.compress) except: # If the value is a definite pickle; and an error is raised in # de-pickling it should be allowed to propogate. if isinstance(value, PickledObject): raise return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_db_prep_value(self, value): """ Pickle and b64encode the object, optionally compressing it. The pickling protocol is specified explicitly (by default 2), rather than as -1 or HIGHEST_PROTOCOL, because we don't want the protocol to change over time. If it did, ``exact`` and ``in`` lookups would likely fail, since pickle would now be generating a different string. """
if value is not None and not isinstance(value, PickledObject): # We call force_unicode here explicitly, so that the encoded string # isn't rejected by the postgresql_psycopg2 backend. Alternatively, # we could have just registered PickledObject with the psycopg # marshaller (telling it to store it like it would a string), but # since both of these methods result in the same value being stored, # doing things this way is much easier. value = force_unicode(dbsafe_encode(value, self.compress)) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nextComment(self, text, start=0): """Return the next comment found in text starting at start. """
m = min([self.lineComment(text, start), self.blockComment(text, start), self._emptylineregex.search(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def isLineComment(self, text): """Return true if the text is a line comment. """
m = self.lineComment(text, 0) return m and m.start(0) == 0 and m.end(0) == len(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nextValidComment(self, text, start=0): """Return the next actual comment. """
m = min([self.lineComment(text, start), self.blockComment(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extractContent(self, text): """Extract the content of comment text. """
m = self.nextValidComment(text) return '' if m is None else m.group(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chunkComment(self, text, start=0): """Return a list of chunks of comments. """
# Build a list of comments comm, out = self.nextComment(text, start), [] while comm: out.append(comm.group(0)) comm = self.nextComment(text, comm.start(0) + 1) # Collect the comments according to whether they are line # comments or block comments. out = [list(g) for (_, g) in groupby(out, self.isLineComment)] # Filter out seperator lines. out = [i for i in out if i != ['']] return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def code(self, text): """Return the code instead of the comments. """
comm = self.nextValidComment(text) while comm: text = text[:comm.start()] + text[comm.end():] comm = self.nextValidComment(text, comm.end(0)) return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_object(self, *args, **kwargs): """ Should memoize the object to avoid multiple query if get_object is used many times in the view """
self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug']) return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def each_cons(sequence, size): """Iterates lazily through a sequence looking at a sliding window with given size, for each time. each_cons([1, 2, 3, 4], 2) --> [(0, 1), (1, 2), (2, 3), (3, 4)] """
return zip(*(islice(it, start, None) for start, it in enumerate(tee(sequence, size))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def __filter(self, name): '''Filter out problematic characters. This should become a separate module allowing the user to define filter rules from a bootstrap file and most likely become a separate module. ''' name = name.replace("'", '') name = name.replace('"', '') name = name.replace('!(null)', '') name = name.replace(" ", "_") name = name.replace("/", "_") name = name.replace(".", "_") return name.lower()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self): """ Check if pkg has a later version Returns true if later version exists """
current = self._get_current() highest = self._get_highest_version() return highest > current
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bugzscout_app(environ, start_response): """Simple WSGI application that returns 200 OK response with 'Hellow world!' in the body. If an uncaught exception is thrown, it is reported to BugzScout. :param environ: WSGI environ :param start_response: function that accepts status string and headers """
try: start_response('200 OK', [('content-type', 'text/html')]) return ['Hellow world!'] except Exception as ex: # Set the description to a familiar string with the exception # message. Add the stack trace to extra. b.submit_error('An error occurred in MyApp: {0}'.format(ex.message), extra=traceback.extract_tb(*sys.exc_info())) # Reraise the exception. raise ex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _request(self, base_url, client_id, client_secret, parameters, **kwargs): """Make an API request to get the token"""
logging.debug('Getting an OAuth token for client "%s" with scope "%s"', client_id, parameters.get('scope')) headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'} api = API(base_url, auth_username=client_id, auth_password=client_secret, **kwargs) endpoint = api.auth.token response = yield endpoint.post(body=urllib.urlencode(parameters), request_timeout=60, headers=headers) logging.debug('Received token: %s', response.get('access_token')) raise Return(response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cached_request(self, base_url, client_id, client_secret, parameters, **kwargs): """Cache the token request and use cached responses if available"""
key = (base_url, client_id, tuple(parameters.items())) cached = self._cache.get(key, {}) if not cached.get('access_token') or self._expired(cached): cached = yield self._request(base_url, client_id, client_secret, parameters, **kwargs) self._cache[key] = cached # Purge cache when adding a new item so it doesn't grow too large # It's assumed the cache size is small enough that it's OK to loop # over the whole cache regularly. If not, could change this to # just pop off the oldest one self.purge_cache() logging.debug('Using a cached token: %s', cached.get('access_token')) raise Return(cached)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def purge_cache(self): """ Purge expired cached tokens and oldest tokens if more than cache_size """
if len(self._cache) > self.max_cache_size: items = sorted(self._cache.items(), key=lambda (k, v): v['expiry']) self._cache = {k: v for k, v in items[self.max_cache_size:] if not self._expired(v)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_user_agent(self): """ Create the user agent and return it as a string. """
user_agent = '{}/{} {}'.format(pyspacegdn.__title__, pyspacegdn.__version__, default_user_agent()) if self.client_name: user_agent = '{}/{} {}'.format(self.client_name, self.client_version, user_agent) return user_agent
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def load_configuration(app_name): ''' creates a new configuration and loads the appropriate files. ''' if sys.prefix == '/usr': conf_dir = '/etc' share_dir = '/usr/share' else: conf_dir = os.path.join(sys.prefix, 'etc') share_dir = os.path.join(sys.prefix, 'share') # Step 1: try to locate pynlp.yml yml_config = {} for fname in [ '%s.yml'%(app_name,), os.path.expanduser('~/.%s.yml'%(app_name,)), os.path.join(conf_dir, '%s.yml'%(app_name,))]: if os.path.exists(fname): yml_config = yaml.load(open(fname)) break try: data_dir = yml_config['paths']['data_dir'] except KeyError: try: data_dir = os.environ[app_name.upper()] except KeyError: data_dir = os.path.join(share_dir, app_name) return AppContext(yml_config, data_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_base_wrappers(method='get', template_name='', predicates=(), wrappers=()): """ basic View Wrappers used by view_config. """
wrappers += (preserve_view(MethodPredicate(method), *predicates),) if template_name: wrappers += (render_template(template_name),) return wrappers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def view_config( method='get', template_name='', predicates=(), wrappers=(), base_wrappers_getter=get_base_wrappers, ): """ Creating Views applied some configurations and store it to _wrapped attribute on each Views. * _wrapped expects to be called by Controller (subclasses of uiro.controller.BaseController) * The original view will not be affected by this decorator. """
wrappers = base_wrappers_getter(method, template_name, predicates, wrappers) def wrapper(view_callable): def _wrapped(*args, **kwargs): return reduce( lambda a, b: b(a), reversed(wrappers + (view_callable,)) )(*args, **kwargs) view_callable._wrapped = _wrapped view_callable._order = next(_counter) return view_callable return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preserve_view(*predicates): """ Raising ViewNotMatched when applied request was not apposite. preserve_view calls all Predicates and when return values of them was all True it will call a wrapped view. It raises ViewNotMatched if this is not the case. Predicates: This decorator takes Predicates one or more, Predicate is callable to return True or False in response to inputted request. If the request was apposite it should return True. """
def wrapper(view_callable): def _wrapped(self, request, context, *args, **kwargs): if all([predicate(request, context) for predicate in predicates]): return view_callable(self, request, context, *args, **kwargs) else: raise ViewNotMatched return _wrapped return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_template(template_name, template_getter=get_app_template): """ Decorator to specify which template to use for Wrapped Views. It will return string rendered by specified template and returned dictionary from wrapped views as a context for template. The returned value was not dictionary, it does nothing, just returns the result. """
def wrapper(func): template = template_getter(template_name) def _wraped(self, request, context, *args, **kwargs): res = func(self, request, context, *args, **kwargs) if isinstance(res, dict): return template.render(**res) else: return res return _wraped return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prompt(self, prompt_msg=None, newline=False): """ Writes prompt message to output stream and reads line from standard input stream. `prompt_msg` Message to write. `newline` Append newline character to prompt message before writing. Return string. """
if prompt_msg is not None: self.write(prompt_msg, newline) return self._input.readline().rstrip(os.linesep)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, buf, newline=True): """ Writes buffer to output stream. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """
buf = buf or '' if newline: buf += os.linesep try: self._output.write(buf) if hasattr(self._output, 'flush'): self._output.flush() except IOError as exc: if exc.errno != errno.EPIPE: # silence EPIPE errors raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def success(self, buf, newline=True): """ Same as `write`, but adds success coloring if enabled. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """
if self._colored: buf = self.ESCAPE_GREEN + buf + self.ESCAPE_CLEAR self.write(buf, newline)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def error(self, buf, newline=True): """ Similar to `write`, except it writes buffer to error stream. If coloring enabled, adds error coloring. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """
buf = buf or '' if self._colored: buf = self.ESCAPE_RED + buf + self.ESCAPE_CLEAR if newline: buf += os.linesep try: self._error.write(buf) if hasattr(self._error, 'flush'): self._error.flush() except IOError as exc: if exc.errno != errno.EPIPE: # silence EPIPE errors raise