text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def svalue(self): """Get serialized value. :rtype: str """
result = self._svalue if result is None: # try to get svalue from value if svalue is None try: value = self.value except Parameter.Error: pass else: result = self._svalue = self.serializer(value) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def svalue(self, value): """Change of serialized value. Nonify this value as well. :param str value: serialized value to use. """
if value is not None: # if value is not None self._value = None self._error = None self._svalue = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve( self, configurable=None, conf=None, scope=None, ptype=None, parser=None, error=True, svalue=None, safe=None, besteffort=None ): """Resolve this parameter value related to a configurable and a configuration. Save error in this attribute `error` in case of failure. :param str svalue: serialized value too resolve. Default is this svalue. :param Configurable configurable: configurable to use for foreign parameter resolution. :param Configuration conf: configuration to use for cross-value resolution. :param dict scope: variables to use for local expression evaluation. :param type ptype: return type. Default is this ptype. :param parser: specific parser to use. Default this parser. :param bool error: raise an error if True (False by default). :param bool safe: if True (default) resolve without builtins functions. :param bool besteffort: best effort flag. Default is this besteffort. :return: newly resolved value. :raises: Parameter.Error for any raised exception. """
result = self._value # if cached value is None and serialiazed value exists if self._value is None and self._svalue is not None: self._error = None # nonify error. if ptype is None: ptype = self.ptype if parser is None: # init parser parser = self.parser if svalue is None: svalue = self._svalue if conf is None: # init conf conf = self.conf if configurable is None: # init configurable configurable = self.configurable if scope is None: scope = self.scope else: scope, selfscope = self.scope.copy(), scope scope.update(selfscope) if safe is None: safe = self.safe if besteffort is None: besteffort = self.besteffort # parse value if str and if parser exists try: result = self._value = parser( svalue=svalue, conf=conf, configurable=configurable, ptype=ptype, scope=scope, safe=safe, besteffort=besteffort ) except Exception as ex: self._error = ex if error: msg = 'Impossible to parse value ({0}) with {1}.' msg = msg.format(self._svalue, self.parser) reraise(Parameter.Error, Parameter.Error(msg)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value(self): """Get parameter value. If this cached value is None and this serialized value is not None, calculate the new value from the serialized one. :return: parameter value. :raises: TypeError if serialized value is not an instance of self ptype . ParserError if parsing step raised an error. """
result = self._value if result is None and self._svalue is not None: try: result = self._value = self.resolve() except Exception as e: reraise( Parameter.Error, Parameter.Error('Call the method "resolve" first.') ) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value(self, value): """Change of parameter value. If an error occured, it is stored in this error attribute. :param value: new value to use. If input value is not an instance of self.ptype, self error :raises: TypeError if input value is not an instance of self ptype. """
if value is None or ( self.ptype is None or isinstance(value, self.ptype) ): self._value = value else: # raise wrong type error error = TypeError( 'Wrong value type of {0} ({1}). {2} expected.'.format( self.name, value, self.ptype ) ) self._error = error raise error
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_cartesian(r, theta, theta_units="radians"): """ Converts polar r, theta to cartesian x, y. """
assert theta_units in ['radians', 'degrees'],\ "kwarg theta_units must specified in radians or degrees" # Convert to radians if theta_units == "degrees": theta = to_radians(theta) theta = to_proper_radians(theta) x = r * cos(theta) y = r * sin(theta) return x, y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_index(data,col_index): """ Sets the index if the index is not present :param data: pandas table :param col_index: column name which will be assigned as a index """
if col_index in data: data=data.reset_index().set_index(col_index) if 'index' in data: del data['index'] return data elif data.index.name==col_index: return data else: logging.error("something's wrong with the df") df2info(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fhs2data_combo(fhs,cols,index,labels=None,col_sep=': '): """ Collates data from multiple csv files :param fhs: list of paths to csv files :param cols: list of column names to concatenate :param index: name of the column name to be used as the common index of the output pandas table """
if labels is None: labels=[basename(fh) for fh in fhs] if len(fhs)>0: for fhi,fh in enumerate(fhs): label=labels[fhi] data=pd.read_csv(fh).set_index(index) if fhi==0: data_combo=pd.DataFrame(index=data.index) for col in cols: data_combo.loc[:,'%s%s%s' % (label,col_sep,col)]=data.loc[:,col] else: for col in cols: data_combo.loc[:,'%s%s%s' % (label,col_sep,col)]=data.loc[:,col] return data_combo else: logging.error('no fhs found: len(fhs)=0')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __create_session(username=None, password=None): """grabs the configuration, and makes the call to Authentise to create the session"""
config = Config() if not username or not password: username = config.username password = config.password payload = { "username": username, "password": password, } session_resp = requests.post("https://users.{}/sessions/".format(config.host), json=payload) if session_resp.status_code == 403: raise errors.ResourceError("bad user credentials") return session_resp.cookies["session"]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_user(cls, username, password, name, email): """utility class method to create a user"""
config = Config() payload = {"username": username, "email": email, "name": name, "password": password, } user_creation_resp = requests.post("https://users.{}/users/".format(config.host), json=payload) if user_creation_resp.status_code != 201: raise errors.ResourceError("couldnt create user")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wrap_text(paragraph, line_count, min_char_per_line=0): """Wraps the given text to the specified number of lines."""
one_string = strip_all_white_space(paragraph) if min_char_per_line: lines = wrap(one_string, width=min_char_per_line) try: return lines[:line_count] except IndexError: return lines else: return wrap(one_string, len(one_string)/line_count)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jsonify(data, pretty=False, **kwargs): """Serialize Python objects to JSON with optional 'pretty' formatting Raises: TypeError: from :mod:`json` lib ValueError: from :mod:`json` lib JSONDecodeError: from :mod:`json` lib """
isod = isinstance(data, OrderedDict) params = { 'for_json': True, 'default': _complex_encode, } if pretty: params['indent'] = 2 params['sort_keys'] = False if isod else True params.update(kwargs) try: return json.dumps(data, ensure_ascii=False, **params) except UnicodeDecodeError: return json.dumps(data, **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_emit_api(self, action): """Build emit api."""
args = {'action': action} args.update(self.context) return ( '%(scheme)s://%(sender)s:%(token)s@%(domain)s:%(port)d' '/event/%(project)s/emit/%(action)s' % args )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize(data, format: str = 'json', pretty: bool = False): """Serialize a stellata object to a string format."""
def encode(obj): if isinstance(obj, stellata.model.Model): return obj.to_dict() elif isinstance(obj, datetime.datetime): return int(obj.timestamp()) elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj) elif hasattr(obj, 'serialize'): return obj.serialize() return obj if format == 'msgpack': return msgpack.packb(data, default=encode) if format == 'json': if pretty: return json.dumps(data, default=encode, indent=4) return json.dumps(data, default=encode) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_exe_doc(program, parser, stack=1, under='-'): """ Takes the name of a script and a parser that will give the help message for it. The module that called this function will then add a header to the docstring of the script, followed immediately by the help message generated by the OptionParser :param str program: Name of the program that we want to make the header :param optparser.Option parser: Either a parser or a callable with no arguments that will give the desired parser :param int stack: How far up the stack to get the docstring to change :param str under: The character you want for the program underline """
if os.path.basename(sys.argv[0]) == 'sphinx-build': # Get the calling module mod = inspect.getmodule(inspect.stack()[stack][0]) # Get parser _parser = parser() if '__call__' in dir(parser) else parser # Make the parser use the correct program _parser.set_usage(mod.__usage__.replace('%prog', program)) # Modify docs by adding a header and usate mod.__doc__ = '\n'.join(['', program, under * len(program), '::', ''] + [' %s' % l for l in _parser.format_help().split('\n')]) + \ mod.__doc__
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_key(self, key_id): """ Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """
url = ENCRYPTION_KEY_URL.format(key_id) return self._key_from_json(self._get_resource(url))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_current_key(self, resource_name): """ Returns a restclients.Key object for the given resource. If the resource isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """
url = ENCRYPTION_CURRENT_KEY_URL.format(resource_name) return self._key_from_json(self._get_resource(url))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _key_from_json(self, data): """ Internal method, for creating the Key object. """
key = Key() key.algorithm = data["Algorithm"] key.cipher_mode = data["CipherMode"] key.expiration = datetime.strptime(data["Expiration"].split(".")[0], "%Y-%m-%dT%H:%M:%S") key.key_id = data["ID"] key.key = data["Key"] key.size = data["KeySize"] key.url = data["KeyUrl"] return key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def newSession(): """ Returns a new Requests session with pre-loaded default HTTP Headers Generates a new Requests session and consults with the Configuration class to determine if a Configuration exists and attempts to use the configured HTTP Request headers first. If this fails, it attempts to create a new default configuration and use those values. Finally, if a configuration cannot be initiaized it uses the hard-coded Mozilla headers. Returns request-client - The configured Requests session Raises HTTPException """
from neolib.config.Configuration import Configuration s = requests.session() if not Configuration.loaded(): if not Configuration.initialize(): s.headers.update(Page._defaultVars) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) else: s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict()) return requests.session()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_model_id_constraints(model): """Returns constraints to target a specific model."""
pkname = model.primary_key_name pkey = model.primary_key return get_id_constraints(pkname, pkey)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_id_constraints(pkname, pkey): """Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length. """
if isinstance(pkname, str): return {pkname: pkey} else: return dict(zip(pkname, pkey))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_model(self, constructor, table_name, constraints=None, *, columns=None, order_by=None): """Calls DataAccess.find and passes the results to the given constructor."""
data = self.find(table_name, constraints, columns=columns, order_by=order_by) return constructor(data) if data else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_models(self, constructor, table_name, constraints=None, *, columns=None, order_by=None, limiting=None): """Calls DataAccess.find_all and passes the results to the given constructor."""
for record in self.find_all(table_name, constraints, columns=columns, order_by=order_by, limiting=limiting): yield constructor(record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_model(self, constructor, constraints=None, *, columns=None, table_name=None, order_by=None): """Specialization of DataAccess.find that returns a model instead of cursor object."""
return self._find_model(constructor, table_name or constructor.table_name, constraints, columns=columns, order_by=order_by)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_models(self, constructor, constraints=None, *, columns=None, order_by=None, limiting=None, table_name=None): """Specialization of DataAccess.find_all that returns models instead of cursor objects."""
return self._find_models( constructor, table_name or constructor.table_name, constraints, columns=columns, order_by=order_by, limiting=limiting)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def page_models(self, constructor, paging, constraints=None, *, columns=None, order_by=None): """Specialization of DataAccess.page that returns models instead of cursor objects."""
records, count = self.page(constructor.table_name, paging, constraints, columns=columns, order_by=order_by) return ([constructor(r) for r in records], count)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_model_by_id(self, constructor, id_, *, columns=None): """Searches for a model by id, according to its class' primary_key_name. If primary_key_name is a tuple, id_ must be a tuple with a matching length. """
return self.find_model( constructor, get_id_constraints(constructor.primary_key_name, id_), columns=columns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh_model(self, model, *, overwrite=False): """Pulls the model's record from the database. If overwrite is True, the model values are overwritten and returns the model, otherwise a new model instance with the newer record is returned. """
new_model = self.find_model_by_id(model.__class__, model.primary_key) if overwrite: model.update(new_model.to_dict(use_default_excludes=False)) return model else: return new_model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_model(self, model, *, include_keys=None): """Updates a model. :include_keys: if given, only updates the given attributes. Otherwise, updates all non-id attributes. """
id_constraints = get_model_id_constraints(model) if include_keys is None: include_keys = set( model.attrs.keys()).difference(model.exclude_keys_sql).difference(id_constraints.keys()) # If include_keys was not null but was empty if not include_keys: return model values = model.to_dict(include_keys=include_keys) returnings = [] _, updated_ts = model.timestamps if model.timestamps else (None, None) if updated_ts and updated_ts not in values: values[updated_ts] = utc_now() if self.core.supports_timezones else local_now() returnings.append(updated_ts) returning = ", ".join(returnings) cr = self.update(model.table_name, values, id_constraints, returning=returning) if returning and self.core.supports_returning_syntax: rec = cr.fetchone() for idx, attr_name in enumerate(returnings): setattr(model, attr_name, rec[idx]) return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_model(self, model, *, upsert=None): """Inserts a record for the given model. If model's primary key is auto, the primary key will be set appropriately. """
pkname = model.primary_key_name include_keys = set(model.attrs.keys()).difference(model.exclude_keys_sql) if model.primary_key_is_auto: if pkname in include_keys: include_keys.remove(pkname) else: if isinstance(pkname, str): include_keys.add(pkname) else: include_keys.update(set(pkname)) data = model.to_dict(include_keys=include_keys) returnings = [] if model.primary_key_is_auto: returnings.append(pkname) if model.timestamps: returnings.extend(ts_name for ts_name in model.timestamps if ts_name) returning = ", ".join(returnings) cr = self.insert(model.table_name, data, returning=returning, upsert=upsert) if self.core.supports_returning_syntax: if returning: rec = cr.fetchone() if rec: for idx, attr_name in enumerate(returnings): setattr(model, attr_name, rec[idx]) else: if model.primary_key_is_auto: setattr(model, model.primary_key_name, cr.lastrowid) return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_model(self, model_or_type, id_=None): """Deletes a model. :model_or_type: if a model, delete that model. If it is a ModelBase subclass, id_ must be specified, and the associated record is deleted. """
if not id_: constraints = get_model_id_constraints(model_or_type) else: constraints = get_id_constraints(model_or_type.primary_key_name, id_) self.delete(model_or_type.table_name, constraints) return model_or_type
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_or_build(self, constructor, props): """Looks for a model that matches the given dictionary constraints. If it is not found, a new model of the given type is constructed and returned. """
model = self.find_model(constructor, props) return model or constructor(**props)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_or_create(self, constructor, props, *, comp=None): """Looks for a model taht matches the given dictionary constraints. If it is not found, a new model of the given type is created and saved to the database, then returned. """
model = self.find_model(constructor, comp or props) if model is None: model = constructor(**props) self.insert_model(model) return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_or_upsert(self, constructor, props, *, comp=None, return_status=False): """This finds or upserts a model with an auto primary key, and is a bit more flexible than find_or_create. First it looks for the model matching either comp, or props if comp is None. If not found, it will try to upsert the model, doing nothing. If the returned model is new, meaning it's primary key is not set, then the upsert was unable to create the model, meaning there was a conflict. If there is a conflict, find model is run again, and this time it will succeed*. Otherwise, the constructed model is returned. *this is not entirely true. It's possible that the upsert returns with None, meaning that a record was created between the first find and the upsert, and then deleted between the upsert and the second find. This situation is out of the scope of this method. A possible solution would be to repeat the find/uspert cycle until a model can be returned, but I'm going to avoid that for simplicty for now. :param constructor: the model constructor :param props: the properties to construct the model with if not found :param comp: the properties to search for the model with. If None, props is used :param return_status: if True, a 2-tuple of (model, status) is returned, where status is what occurred with the model. Either 'found', 'created' or 'duplicate'. """
model = self.find_model(constructor, comp or props) status = _UPSERT_STATUS_FOUND if model is None: model = constructor(**props) status = _UPSERT_STATUS_CREATED self.insert_model(model, upsert=Upsert(Upsert.DO_NOTHING)) if model.is_new: model = self.find_model(constructor, comp or props) status = _UPSERT_STATUS_DUPLICATE if return_status: return (model, status) else: return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def temp_copy(self): """Yields a new Vcs object that represents a temporary, disposable copy of the current repository. The copy is deleted at the end of the context. The following are not copied: - ignored files - easyci private directory (.git/eci for git) Yields: Vcs """
with contextmanagers.temp_dir() as temp_dir: temp_root_path = os.path.join(temp_dir, 'root') path = os.path.join(self.path, '') # adds trailing slash check_call(['rsync', '-r', "--exclude={}".format(self.private_dir()), "--filter=dir-merge,- {}".format( self.ignore_patterns_file()), path, temp_root_path]) copy = self.__class__(path=temp_root_path) yield copy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def published(self, for_user=None, include_login_required=False): """ Override ``DisplayableManager.published`` to exclude pages with ``login_required`` set to ``True``. if the user is unauthenticated and the setting ``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED`` is ``False``. The extra ``include_login_required`` arg allows callers to override the ``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED`` behaviour in special cases where they want to deal with the ``login_required`` field manually, such as the case in ``PageMiddleware``. """
published = super(PageManager, self).published(for_user=for_user) unauthenticated = for_user and not for_user.is_authenticated() if (unauthenticated and not include_login_required and not settings.PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED): published = published.exclude(login_required=True) return published
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def atomic_symlink(src, dst): """Create or update a symbolic link atomically. This function is similar to :py:func:`os.symlink` but will update a symlink atomically."""
dst_dir = os.path.dirname(dst) tmp = None max_tries = getattr(os, 'TMP_MAX', 10000) try: if not os.path.exists(dst_dir): os.makedirs(dst_dir) for n in range(max_tries): try: # mktemp is described as being unsafe. That is not true in this case since symlink is an # atomic operation at the file system level; if some other processes creates a file with # 'our' name then symlink will fail. tmp = tempfile.mktemp(dir=dst_dir) os.symlink(src, tmp) logger.debug('created symlink %s', tmp) except OSError as e: if e.errno == errno.EEXIST: continue # Someone else grabbed the temporary name first else: raise logger.debug('renaming %s to %s', tmp, dst) os.rename(tmp, dst) return except: if tmp and os.path.exists(tmp): os.remove(tmp) raise raise IOError(errno.EEXIST, 'No usable temporary file name found')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(): #pylint: disable=too-many-locals """Execute the command loop """
store = EventStore(STORE_PATH) with open(CREDENTIAL_PATH, 'r') as cred_file: creds = json.load(cred_file) uname, pword = creds['uname'], creds['pword'] mgr = KindleProgressMgr(store, uname, pword) print 'Detecting updates to Kindle progress:' events = mgr.detect_events() if events is None: print 'Failed to retrieve Kindle progress updates' return elif not events: print ' No updates detected' else: for event in events: print ' ' + str(event) print print 'Finished updating.' print 'Mark new books as \'reading\' or old books as \'read\'? (y/N)' if safe_raw_input('> ') == 'y': _change_state_prompt(mgr) mgr.commit_events()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _change_state_prompt(mgr): """Runs a prompt to change the state of books. Registers `Event`s with `mgr` as they are requested. Args: mgr: A `KindleProgressMgr` object with the `books` and `progress` fields populated. """
cmd = '' book_range = range(1, len(mgr.books) + 1) ind_to_book = dict(zip(book_range, mgr.books)) get_book = lambda cmd_str: ind_to_book[int(cmd_str.split()[1])] while cmd != 'q': print 'Books:' for i in book_range: print '\t%d: %s' % (i, ind_to_book[i]) print 'Commands:' print '| start {#} | Start reading book with index {#}' print '| finish {#} | Finish reading book with index {#}' print '| q | Quit' cmd = safe_raw_input('> ') if cmd is None or cmd == 'q': break elif cmd.startswith('start '): book = get_book(cmd) initial_progress = mgr.progress[book.asin].locs[1] event = SetReadingEvent(book.asin, initial_progress) elif cmd.startswith('finish '): event = SetFinishedEvent(get_book(cmd).asin) else: print 'Invalid command' event = None if event is not None: print print 'REGISTERED EVENT:' print ' ' + str(event) mgr.register_events((event)) print
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def change_sheet(self, sheet_name_or_num): """ Calling this method changes the sheet in anticipation for the next time you create an iterator. If you change the active sheet while iterating on a UnicodeReader instance, it will continue to iterate correctly until completion. The next time you iterate through reader, it will begin all over again at whichever sheet you most recently changed to. """
if isinstance(sheet_name_or_num, int): self._sheet = self.__wb[self.__wb.sheetnames[sheet_name_or_num]] elif isinstance(sheet_name_or_num, basestring): self._sheet = self.__wb[sheet_name_or_num] else: reason = "Must enter either sheet name or sheet number." raise Exception(reason)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _os_install(self, package_file): """ take in a dict return a string of docker build RUN directives one RUN per package type one package type per JSON key """
packages = " ".join(json.load(package_file.open())) if packages: for packager in self.pkg_install_cmds: if packager in self.context.externalbasis: installer = self.pkg_install_cmds[packager] return f"RUN {installer} {packages}" else: return ""
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def connect(self): """ Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything. """
if self.connected or self.is_connecting: return self._is_connecting = True try: logger.info("Connecting to RabbitMQ...") self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters) logger.info("Getting channel...") self._channel = await self._protocol.channel() if self._global_qos is not None: logger.info("Setting prefetch count on connection (%s)", self._global_qos) await self._channel.basic_qos(0, self._global_qos, 1) logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type) await self._channel.exchange(self._exchange_name, self._exchange_type) except (aioamqp.AmqpClosedConnection, Exception): logger.error("Error initializing RabbitMQ connection", exc_info=True) self._is_connecting = False raise exceptions.StreamConnectionError self._is_connecting = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def consume_queue(self, subscriber: AbstractSubscriber) -> None: """ Subscribe to the queue consuming. :param subscriber: :return: """
queue_name = subscriber.name topics = subscriber.requested_topics if queue_name in self._known_queues: raise exceptions.ConsumerError("Queue '%s' already being consumed" % queue_name) await self._declare_queue(queue_name) # TODO: There is a lot of room to improvement here. Figure out routing done the right way for key in topics: self._routing.setdefault(key, set()) if subscriber in self._routing[key]: logger.warning("Subscriber '%s' already receiving routing_key '%s'", subscriber, key) break await self._bind_key_to_queue(key, queue_name) self._routing[key].add(subscriber) logger.info("Consuming queue '%s'", queue_name) await asyncio.wait_for( self._channel.basic_consume(callback=self._on_message, queue_name=queue_name), timeout=10 ) self._add_to_known_queue(queue_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None: """ Bind to queue with specified routing key. :param routing_key: Routing key to bind with. :param queue_name: Name of the queue :return: Does not return anything """
logger.info("Binding key='%s'", routing_key) result = await self._channel.queue_bind( exchange_name=self._exchange_name, queue_name=queue_name, routing_key=routing_key, ) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def _on_message(self, channel, body, envelope, properties) -> None: """ Fires up when message is received by this consumer. :param channel: Channel, through which message is received :param body: Body of the message (serialized). :param envelope: Envelope object with message meta :type envelope: aioamqp.Envelope :param properties: Properties of the message :return: Coroutine object with result of message handling operation """
subscribers = self._get_subscribers(envelope.routing_key) if not subscribers: logger.debug("No route for message with key '%s'", envelope.routing_key) return body = self._serializer.deserialize(body) for subscriber in subscribers: # Check later if ensure_future can be applied here await subscriber.on_message(body, envelope.routing_key) await self._channel.basic_client_ack(envelope.delivery_tag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_this(func, types, args=None, delimiter_chars=":"): """Create an ArgParser for the given function converting the command line arguments according to the list of types. Args: func: the function for which the command line arguments to be parsed types: a list of types - as accepted by argparse - that will be used to convert the command line arguments args: a list of arguments to be parsed if None sys.argv is used delimiter_chars: characters used to separate the parameters from their help message in the docstring. Defaults to ':' """
_LOG.debug("Creating parser for %s", func.__name__) (func_args, dummy_1, dummy_2, defaults) = getargspec(func) types, func_args = _check_types(func.__name__, types, func_args, defaults) args_and_defaults = _get_args_and_defaults(func_args, defaults) parser = _get_arg_parser(func, types, args_and_defaults, delimiter_chars) arguments = parser.parse_args(_get_args_to_parse(args, sys.argv)) return _call(func, func_args, arguments)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_sub_parsers(self, top_level_parser, methods_to_parse, class_name): """Add all the sub-parsers to the top_level_parser. Args: top_level_parser: the top level parser methods_to_parse: dict of method name pointing to their associated argument parser class_name: name of the decorated class Returns: a dict of registered name of the parser i.e. sub command name pointing to the method real name """
description = "Accessible methods of {}".format(class_name) sub_parsers = top_level_parser.add_subparsers(description=description, dest="method") # Holds the mapping between the name registered for the parser # and the method real name. It is useful in the 'inner_call' # method retrieve the real method parser_to_method = {} for method_name, parser in methods_to_parse.items(): # We use the name provided in 'create_parser` or the name of the # decorated method parser_name = parser.get_name() or method_name # Make the method name compatible for the argument parsing if parser_name.startswith("_"): if not self._parse_private: # We skip private methods if the caller asked not to # parse them continue # 'Private' methods are exposed without their leading or # trailing '_'s. Also works for 'special' methods. parser_name = parser_name.strip("_") parser_name = parser_name.replace("_", "-") parser_to_method[parser_name] = method_name sub_parsers.add_parser(parser_name, parents=[parser], add_help=False, description=parser.description) return parser_to_method
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_class_parser(self, init_parser, methods_to_parse, cls): """Creates the complete argument parser for the decorated class. Args: init_parser: argument parser for the __init__ method or None methods_to_parse: dict of method name pointing to their associated argument parser cls: the class we are decorating Returns: The decorated class with an added attribute 'parser' """
top_level_parents = [init_parser] if init_parser else [] description = self._description or cls.__doc__ top_level_parser = argparse.ArgumentParser(description=description, parents=top_level_parents, add_help=False, conflict_handler="resolve") top_level_parser.add_argument("-h", "--help", action=FullHelpAction, help="Display this help message") parser_to_method = self._add_sub_parsers(top_level_parser, methods_to_parse, cls.__name__) # Update the dict with the __init__ method so we can instantiate # the decorated class if init_parser: parser_to_method["__init__"] = "__init__" top_level_parser.call = self._get_parser_call_method(parser_to_method) cls.parser = top_level_parser
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_parser_call_method(self, parser_to_method): """Return the parser special method 'call' that handles sub-command calling. Args: parser_to_method: mapping of the parser registered name to the method it is linked to """
def inner_call(args=None, instance=None): """Allows to call the method invoked from the command line or provided argument. Args: args: list of arguments to parse, defaults to command line arguments instance: an instance of the decorated class. If instance is None, the default, and __init__ is decorated the object will be instantiated on the fly from the command line arguments """ parser = self._cls.parser namespace = parser.parse_args(_get_args_to_parse(args, sys.argv)) if instance is None: # If the __init__ method is not part of the method to # decorate we cannot instantiate the class if "__init__" not in parser_to_method: raise ParseThisError(("'__init__' method is not decorated. " "Please provide an instance to " "'{}.parser.call' or decorate the " "'__init___' method with " "'create_parser'" .format(self._cls.__name__))) # We instantiate the class from the command line arguments instance = _call_method_from_namespace(self._cls, "__init__", namespace) method_name = parser_to_method[namespace.method] return _call_method_from_namespace(instance, method_name, namespace) return inner_call
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gp_lcltpt(): """ example plot to display linecolors, linetypes and pointtypes .. image:: pics/gp_lcltpt.png :width: 450 px """
inDir, outDir = getWorkDirs() nSets = len(default_colors) make_plot( data = [ np.array([ [0,i,0,0,0], [1,i,0,0,0] ]) for i in xrange(nSets) ], properties = [ 'with linespoints lw 4 lc %s lt %d pt %d' % (col, i, i) for i, col in enumerate(default_colors) ], titles = [''] * nSets, yr = [-1, 51], name = os.path.join(outDir, 'gp_lcltpt'), ylabel = 'linecolor / linetype / pointtype', xlabel = '', ) return 'done'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_analyses(prepared_analyses=None,log_dir=default_log_dir): """ If all defaults are ok, this should be the only function needed to run the analyses. """
if prepared_analyses == None: prepared_analyses = prepare_analyses() state_collection = funtool.state_collection.StateCollection([],{}) for analysis in prepared_analyses: state_collection= funtool.analysis.run_analysis(analysis, state_collection, log_dir) return state_collection
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_analysis( named_analysis, prepared_analyses=None,log_dir=default_log_dir): """ Runs just the named analysis. Otherwise just like run_analyses """
if prepared_analyses == None: prepared_analyses = prepare_analyses() state_collection = funtool.state_collection.StateCollection([],{}) for analysis in prepared_analyses: if analysis.name == named_analysis: state_collection= funtool.analysis.run_analysis(analysis, state_collection, log_dir) return state_collection
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def poll(self): """ Poll the job status. Returns the changes in this iteration."""
self.runner.module_name = 'async_status' self.runner.module_args = "jid=%s" % self.jid self.runner.pattern = "*" self.runner.background = 0 self.runner.inventory.restrict_to(self.hosts_to_poll) results = self.runner.run() self.runner.inventory.lift_restriction() hosts = [] poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}} for (host, res) in results['contacted'].iteritems(): if res.get('started',False): hosts.append(host) poll_results['polled'][host] = res else: self.results['contacted'][host] = res poll_results['contacted'][host] = res if 'failed' in res: self.runner.callbacks.on_async_failed(host, res, self.jid) else: self.runner.callbacks.on_async_ok(host, res, self.jid) for (host, res) in results['dark'].iteritems(): self.results['dark'][host] = res poll_results['dark'][host] = res self.runner.callbacks.on_async_failed(host, res, self.jid) self.hosts_to_poll = hosts if len(hosts)==0: self.completed = True return poll_results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait(self, seconds, poll_interval): """ Wait a certain time for job completion, check status every poll_interval. """
# jid is None when all hosts were skipped if self.jid is None: return self.results clock = seconds - poll_interval while (clock >= 0 and not self.completed): time.sleep(poll_interval) poll_results = self.poll() for (host, res) in poll_results['polled'].iteritems(): if res.get('started'): self.runner.callbacks.on_async_poll(host, res, self.jid, clock) clock = clock - poll_interval return self.results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_edit_form(obj, field_names, data=None, files=None): """ Returns the in-line editing form for editing a single model field. """
# Map these form fields to their types defined in the forms app so # we can make use of their custom widgets. from yacms.forms import fields widget_overrides = { forms.DateField: fields.DATE, forms.DateTimeField: fields.DATE_TIME, forms.EmailField: fields.EMAIL, } class EditForm(forms.ModelForm): """ In-line editing form for editing a single model field. """ app = forms.CharField(widget=forms.HiddenInput) model = forms.CharField(widget=forms.HiddenInput) id = forms.CharField(widget=forms.HiddenInput) fields = forms.CharField(widget=forms.HiddenInput) class Meta: model = obj.__class__ fields = field_names.split(",") def __init__(self, *args, **kwargs): super(EditForm, self).__init__(*args, **kwargs) self.uuid = str(uuid4()) for f in self.fields.keys(): field_class = self.fields[f].__class__ try: widget = fields.WIDGETS[widget_overrides[field_class]] except KeyError: pass else: self.fields[f].widget = widget() css_class = self.fields[f].widget.attrs.get("class", "") css_class += " " + field_class.__name__.lower() self.fields[f].widget.attrs["class"] = css_class self.fields[f].widget.attrs["id"] = "%s-%s" % (f, self.uuid) if settings.FORMS_USE_HTML5 and self.fields[f].required: self.fields[f].widget.attrs["required"] = "" initial = {"app": obj._meta.app_label, "id": obj.id, "fields": field_names, "model": obj._meta.object_name.lower()} return EditForm(instance=obj, initial=initial, data=data, files=files)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_disabled_regions(contents): """Filter regions that are contained in back-ticks."""
contents = list(contents) in_backticks = False contents_len = len(contents) index = 0 while index < contents_len: character = contents[index] if character == "`": # Check to see if we should toggle the in_backticks # mode here by looking ahead for another two characters. if ((index + 2) < contents_len and "".join(contents[index:index + 3]) == "```"): in_backticks = not in_backticks index += 3 continue if in_backticks: contents[index] = " " index += 1 return "".join(contents)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spellcheck(contents, technical_terms=None, spellcheck_cache=None): """Run spellcheck on the contents of a file. :technical_terms: is a path to a file containing a list of "technical" terms. These may be symbols as collected from files by using the generic linter or other such symbols. If a symbol-like term is used within contents and it does not appear in :technical_terms: then an error will result. :spellcheck_cache: is a path to a directory where graph files generated by the spellchecking engine should be stored. It is used for caching purposes between invocations, since generating the spellchecking graph is an expensive operation which can take a few seconds to complete. """
contents = spelling.filter_nonspellcheckable_tokens(contents) contents = _filter_disabled_regions(contents) lines = contents.splitlines(True) user_words, valid_words = valid_words_dictionary.create(spellcheck_cache) technical_words = technical_words_dictionary.create(technical_terms, spellcheck_cache) return sorted([e for e in spellcheck_region(lines, valid_words, technical_words, user_words)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _report_spelling_error(error, file_path): """Report a spelling error."""
line = error.line_offset + 1 code = "file/spelling_error" description = _SPELLCHECK_MESSAGES[error.error_type].format(error.word) if error.suggestions is not None: description = (description + ", perhaps you meant: " + ", ".join(error.suggestions)) sys.stdout.write("{0}:{1} [{2}] {3}\n".format(file_path, line, code, description))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(arguments=None): # suppress(unused-function) """Entry point for the spellcheck linter."""
dictionary_path = os.path.abspath("DICTIONARY") result = _parse_arguments(arguments) num_errors = 0 for found_filename in result.files: file_path = os.path.abspath(found_filename) with io.open(file_path, "r+", encoding="utf-8") as found_file: jobstamps_dependencies = [file_path] if os.path.exists(dictionary_path): jobstamps_dependencies.append(dictionary_path) if (result.technical_terms and os.path.exists(result.technical_terms)): jobstamps_dependencies.append(result.technical_terms) kwargs = { "jobstamps_dependencies": jobstamps_dependencies, "jobstamps_cache_output_directory": result.stamp_file_path, } errors = jobstamp.run(spellcheck, found_file.read(), result.technical_terms, result.spellcheck_cache, **kwargs) for error in errors: _report_spelling_error(error, file_path) num_errors += len(errors) return num_errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def recover(self, state): "recompute the actual value, then compare it against the truth" newval = self.f.recover(state) return self.errtype(self.value, newval)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare_version(value): """ Determines if the provided version value compares with program version. `value` Version comparison string (e.g. ==1.0, <=1.0, >1.1) Supported operators: <, <=, ==, >, >= """
# extract parts from value import re res = re.match(r'(<|<=|==|>|>=)(\d{1,2}\.\d{1,2}(\.\d{1,2})?)$', str(value).strip()) if not res: return False operator, value, _ = res.groups() # break into pieces value = tuple(int(x) for x in str(value).split('.')) if len(value) < 3: value += (0,) version = __version_info__ if operator in ('<', '<='): if version < value: return True if operator != '<=': return False elif operator in ('>=', '>'): if version > value: return True if operator != '>=': return False return value == version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_publications(return_namedtuples=True): """ Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple. """
sources = [ ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications, ] # get data from all scrappers publications = [] for source in sources: publications.extend( filters.filter_publications(source()) ) # convert to namedtuples if return_namedtuples: publications = map(lambda x: x.to_namedtuple(), publications) return publications
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_cell(self, cell, coords, cell_mode=CellMode.cooked): """Parses a cell according to its cell.value_type."""
# pylint: disable=too-many-return-statements if cell_mode == CellMode.cooked: if cell.covered or cell.value_type is None or cell.value is None: return None vtype = cell.value_type if vtype == 'string': return cell.value if vtype == 'float' or vtype == 'percentage' or vtype == 'currency': return cell.value if vtype == 'boolean': return cell.value if vtype == 'date': date_tuple = tuple([int(i) if i is not None else 0 \ for i in _DATE_REGEX.match(cell.value).groups()]) return self.tuple_to_datetime(date_tuple) if vtype == 'time': hour, minute, second = _TIME_REGEX.match(cell.value).groups() # TODO: This kills off the microseconds date_tuple = (0, 0, 0, int(hour), int(minute), round(float(second))) return self.tuple_to_datetime(date_tuple) raise ValueError("Unhandled cell type: {0}".format(vtype)) else: return cell
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_row(self, row_index): """Returns the row at row_index."""
if self._raw_rows is None: self._raw_rows = list(self.raw_sheet.rows()) return self._raw_rows[row_index]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_config(sections, section_contents): """Create a config file from the provided sections and key value pairs. Args: sections (List[str]): A list of section keys. key_value_pairs (Dict[str, str]): A list of of dictionaries. Must be as long as the list of sections. That is to say, if there are two sections, there should be two dicts. Returns: configparser.ConfigParser: A ConfigParser. Raises: ValueError """
sections_length, section_contents_length = len(sections), len(section_contents) if sections_length != section_contents_length: raise ValueError("Mismatch between argument lengths.\n" "len(sections) = {}\n" "len(section_contents) = {}" .format(sections_length, section_contents_length)) config = configparser.ConfigParser() for section, section_content in zip(sections, section_contents): config[section] = section_content return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_config(config, config_path=CONFIG_PATH): """Write the config to the output path. Creates the necessary directories if they aren't there. Args: config (configparser.ConfigParser): A ConfigParser. """
if not os.path.exists(config_path): os.makedirs(os.path.dirname(config_path)) with open(config_path, 'w', encoding='utf-8') as f: config.write(f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_config(config_path=CONFIG_PATH): """Read the config information from the config file. Args: config_path (str): Relative path to the email config file. Returns: defaultdict: A defaultdict with the config information. Raises: IOError """
if not os.path.isfile(config_path): raise IOError("No config file found at %s" % config_path) config_parser = configparser.ConfigParser() config_parser.read(config_path) config = _config_parser_to_defaultdict(config_parser) return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_config(config): """Check that all sections of the config contain the keys that they should. Args: config (defaultdict): A defaultdict. Raises: ConfigurationError """
for section, expected_section_keys in SECTION_KEYS.items(): section_content = config.get(section) if not section_content: raise ConfigurationError("Config file badly formed! Section {} is missing." .format(section)) elif not _section_is_healthy(section_content, expected_section_keys): raise ConfigurationError("The {} section of the configuration file is badly formed!" .format(section))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_config_diagnostics(config_path=CONFIG_PATH): """Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options. """
config = read_config(config_path) missing_sections = set() malformed_entries = defaultdict(set) for section, expected_section_keys in SECTION_KEYS.items(): section_content = config.get(section) if not section_content: missing_sections.add(section) else: for option in expected_section_keys: option_value = section_content.get(option) if not option_value: malformed_entries[section].add(option) return config_path, missing_sections, malformed_entries
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_attribute_from_config(config, section, attribute): """Try to parse an attribute of the config file. Args: config (defaultdict): A defaultdict. section (str): The section of the config file to get information from. attribute (str): The attribute of the section to fetch. Returns: str: The string corresponding to the section and attribute. Raises: ConfigurationError """
section = config.get(section) if section: option = section.get(attribute) if option: return option raise ConfigurationError("Config file badly formed!\n" "Failed to get attribute '{}' from section '{}'!" .format(attribute, section))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def valid_config_exists(config_path=CONFIG_PATH): """Verify that a valid config file exists. Args: config_path (str): Path to the config file. Returns: boolean: True if there is a valid config file, false if not. """
if os.path.isfile(config_path): try: config = read_config(config_path) check_config(config) except (ConfigurationError, IOError): return False else: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_to_string(config): """Nice output string for the config, which is a nested defaultdict. Args: config (defaultdict(defaultdict)): The configuration information. Returns: str: A human-readable output string detailing the contents of the config. """
output = [] for section, section_content in config.items(): output.append("[{}]".format(section)) for option, option_value in section_content.items(): output.append("{} = {}".format(option, option_value)) return "\n".join(output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _config_parser_to_defaultdict(config_parser): """Convert a ConfigParser to a defaultdict. Args: config_parser (ConfigParser): A ConfigParser. """
config = defaultdict(defaultdict) for section, section_content in config_parser.items(): if section != 'DEFAULT': for option, option_value in section_content.items(): config[section][option] = option_value return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def start(self, *args, **kwargs): ''' Launch IPython notebook server in background process. Arguments and keyword arguments are passed on to `Popen` call. By default, notebook server is launched using current working directory as the notebook directory. ''' if 'stderr' in kwargs: raise ValueError('`stderr` must not be specified, since it must be' ' monitored to determine which port the notebook ' 'server is running on.') args_ = ('%s' % sys.executable, '-m', 'IPython', 'notebook') + self.args args_ = args_ + tuple(args) self.process = Popen(args_, stderr=PIPE, **kwargs) self._notebook_dir = os.getcwd() # Determine which port the notebook is running on. cre_address = re.compile(r'The \w+ Notebook is running at: ' r'(?P<address>https?://.*?:' r'(?P<port>\d+)[^\r]*/)\r?$') cre_notebook_dir = re.compile(r'Serving notebooks from local ' r'directory:\s+(?P<notebook_dir>[^\r]*)\r?$') match = None self.stderr_lines = [] while not self.process.poll() and match is None: stderr_line = self.process.stderr.readline() self.stderr_lines.append(stderr_line) match = cre_address.search(stderr_line) dir_match = cre_notebook_dir.search(stderr_line) if dir_match: self._notebook_dir = dir_match.group('notebook_dir') if match: # Notebook was started successfully. self.address = match.group('address') self.port = int(match.group('port')) else: raise IOError(''.join(self.stderr_lines))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def open(self, filename=None): ''' Open a browser tab with the notebook path specified relative to the notebook directory. If no filename is specified, open the root of the notebook server. ''' if filename is None: address = self.address + 'tree' else: notebook_path = self.resource_filename(filename) if not notebook_path.isfile(): raise IOError('Notebook path not found: %s' % notebook_path) else: address = '%snotebooks/%s' % (self.address, filename) webbrowser.open_new_tab(address)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_session(self, notebook_dir=None, no_browser=True, **kwargs): ''' Return handle to IPython session for specified notebook directory. If an IPython notebook session has already been launched for the notebook directory, reuse it. Otherwise, launch a new IPython notebook session. By default, notebook session is launched using current working directory as the notebook directory. Arguments --------- - `notebook_dir`: Directory to start IPython notebook session in. - `no_browser`: Do not launch new browser tab. ''' if notebook_dir in self.sessions and self.sessions[notebook_dir].is_alive(): # Notebook process is already running for notebook directory, session = self.sessions[notebook_dir] if 'daemon' in kwargs: # Override `daemon` setting of existing session. session.daemon = kwargs['daemon'] if not no_browser: session.open() else: # Notebook process is not running for notebook directory, so # start new IPython notebook process. # Use default `daemon` setting for manager if no specified. daemon = kwargs.pop('daemon', self.daemon) if no_browser: kwargs['no_browser'] = None if notebook_dir is not None: kwargs['notebook_dir'] = notebook_dir session = Session(daemon=daemon, **kwargs) session.start() self.sessions[str(session.notebook_dir)] = session return session
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_blob(profile, sha): """Fetch a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the blob to fetch. Returns: A dict with data about the blob. """
resource = "/blobs/" + sha data = api.get_request(profile, resource) return prepare(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_blob(profile, content): """Create a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. content The (UTF-8 encoded) content to create in the blob. Returns: A dict with data about the newly created blob. """
resource = "/blobs" payload = {"content": content} data = api.post_request(profile, resource, payload) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """ Copy constructor for Sequence objects. """
return Sequence(self.name, self.sequenceData, self.start, self.end, self.strand, self.remaining, self.meta_data, self.mutableString)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def effective_len(self): """ Get the length of the sequence if N's are disregarded. """
if self._effective_len is None: self._effective_len = len([nuc for nuc in self.sequenceData if nuc != "N" and nuc != "n"]) return self._effective_len
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def percentNuc(self, nuc): """ return the percentage of the sequence which is equal to the passed nuc. :param nuc: the nucleotide to compute percentage composition for. There is no check to make sure this is a valid nucleotide. :return: the percentage of the sequence that is <nuc> """
count = reduce(lambda x, y: x + 1 if y == nuc else x, self.sequenceData, 0) return count / float(len(self.sequenceData))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reverseComplement(self, isRNA=None): """ Reverse complement this sequence in-place. :param isRNA: if True, treat this sequence as RNA. If False, treat it as DNA. If None (default), inspect the sequence and make a guess as to whether it is RNA or DNA. """
isRNA_l = self.isRNA() if isRNA is None else isRNA tmp = "" for n in self.sequenceData: if isRNA_l: tmp += RNA_COMPLEMENTS[n] else: tmp += DNA_COMPLEMENTS[n] self.sequenceData = tmp[::-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maskRegion(self, region): """ Replace nucleotides in this sequence in the regions given by Ns :param region: any object with .start and .end attributes. Co-ords are zero based and inclusive of both end points. Any other attributes (e.g. chrom.) are ignored. :raise SequenceError: if region specifies nucleotides not present in this sequence """
if region.start < 0 or region.end < 0 or \ region.start > len(self) or region.end > len(self): raise SequenceError("cannot mask region " + str(region.start) + " to " + str(region.end) + " in " + self.name + ". " + "Region specifies nucleotides not present in " + "this read. Valid range would have been 0 -- " + str(len(self))) if self.mutableString: for i in range(region.start, region.end + 1): self.sequenceData[i] = 'N' else: self.sequenceData = "".join([self.sequenceData[:region.start], ("N" * (region.end - region.start + 1)), self.sequenceData[region.end + 1:]])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maskRegions(self, regions, verbose=False): """ Mask the given regions in this sequence with Ns. :param region: iterable of regions to mask. Each region can be any object with .start and .end attributes. Co-ords are zero based and inclusive of both end points. Any other attributes (e.g. chrom.) are ignored. :param verbose: print status messages to stderr if True """
if verbose: pind = ProgressIndicator(totalToDo=len(regions), messagePrefix="completed", messageSuffix="of masking regions in " + self.name) for region in regions: self.maskRegion(region) if verbose: pind.done += 1 pind.showProgress()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split(self, point=None): """ Split this sequence into two halves and return them. The original sequence remains unmodified. :param point: defines the split point, if None then the centre is used :return: two Sequence objects -- one for each side """
if point is None: point = len(self) / 2 r1 = Sequence(self.name + ".1", self.sequenceData[:point]) r2 = Sequence(self.name + ".2", self.sequenceData[point:]) return r1, r2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maskMatch(self, mask): """ Determine whether this sequence matches the given mask. :param mask: string to match against. Ns in the mask are considered to match anything in the sequence -- all other chars must match exactly. :return: True if the mask matches at all places, otherwise false """
if len(mask) > len(self.sequenceData): return False lim = len(mask) for i in range(0, lim): if mask[i] == "N" or mask[i] == "n": continue if mask[i] != self.sequenceData[i]: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def walk_recursive(f, data): """ Recursively apply a function to all dicts in a nested dictionary :param f: Function to apply :param data: Dictionary (possibly nested) to recursively apply function to :return: """
results = {} if isinstance(data, list): return [walk_recursive(f, d) for d in data] elif isinstance(data, dict): results = funcy.walk_keys(f, data) for k, v in data.iteritems(): if isinstance(v, dict): results[f(k)] = walk_recursive(f, v) elif isinstance(v, list): results[f(k)] = [walk_recursive(f, d) for d in v] else: return f(data) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, config, strip_app_name=False, filter_by_app_name=False, key_normalisation_func=default_key_normalisation_func): """ Add a dict of config data. Values from later dicts will take precedence over those added earlier, so the order data is added matters. Note: Double underscores can be used to indicate dict key name boundaries. i.e. if we have a dict like: { 'logging': { 'level': INFO } } we could pass an environment variable LOGGING__LEVEL=DEBUG to override the log level. Note: Key names will be normalised by recursively applying the key_normalisation_func function. By default this will: 1) Convert keys to lowercase 2) Replace hyphens with underscores 3) Strip leading underscores This allows key names from different sources (e.g. CLI args, env vars, etc.) to be able to override each other. :param config dict: config data :param strip_app_name boolean: If True, the configured app_name will stripped from the start of top-level input keys if present. :param filter_by_app_name boolean: If True, keys that don't begin with the app name will be discarded. :return: """
config = walk_recursive(key_normalisation_func, OrderedDict(config)) if filter_by_app_name: config = funcy.compact(funcy.select_keys( lambda k: k.startswith(self._app_name), config)) if strip_app_name: strip_app_name_regex = re.compile("^%s" % self._app_name) config = funcy.walk_keys( lambda k: re.sub(strip_app_name_regex, '', k), config) self._sources.append(config) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_param(self, name, value): """Set a GO-PCA Server parameter. Parameters name: str The parameter name. value: ? The parameter value. """
if name not in self.param_names: raise ValueError('No GO-PCA Server parameter named "%s"!' %(param)) self.__params[name] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_params(self, params): """Sets multiple GO-PCA Server parameters using a dictionary. Parameters params: dict Dictionary containing the parameter values. Returns ------- None """
for k,v in params.iteritems(): self.set_param(k,v)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_params(self): """Reset all parameters to their default values."""
self.__params = dict([p, None] for p in self.param_names) self.set_params(self.param_defaults)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(cli, command, docker_id): """Creates waybill shims from a given command name and docker image"""
content = waybill_template.format(command=command, docker_id=docker_id) waybill_dir = cli.get_waybill_dir() waybill_filename = os.path.join(waybill_dir, command + '.waybill') with open(waybill_filename, 'wb') as filehandle: filehandle.write(content) cli.log.info('Created waybill {0}'.format(waybill_filename))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(cli, yaml_filename): """Creates waybill shims from a given yaml file definiations"""
"""Expected Definition: - name: NAME docker_id: IMAGE - name: NAME docker_id: IMAGE """ with open(yaml_filename, 'rb') as filehandle: for waybill in yaml.load(filehandle.read()): cli.create(waybill.name, waybill.docker_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shellinit(cli): """Implements the waybill shims in the active shell"""
output = 'eval echo "Initializing Waybills"' if which('docker') is None: raise ValueError("Unable to find program 'docker'. Please make sure it is installed and setup properly") for waybill in cli.get_waybills(): output += ' && source {0}'.format(waybill) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def service_data(self): """ Returns all introspected service data. If the data has been previously accessed, a memoized version of the data is returned. :returns: A dict of introspected service data :rtype: dict """
# Lean on the cache first. if self._loaded_service_data is not None: return self._loaded_service_data # We don't have a cache. Build it. self._loaded_service_data = self._introspect_service( # We care about the ``botocore.session`` here, not the # ``kotocore.session``. self.session.core_session, self.service_name ) # Clear out the API version, just in case. self._api_version = None return self._loaded_service_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_version(self): """ Returns API version introspected from the service data. If the data has been previously accessed, a memoized version of the API version is returned. :returns: The service's version :rtype: string """
# Lean on the cache first. if self._api_version is not None: return self._api_version # We don't have a cache. Build it. self._api_version = self._introspect_api_version( self.session.core_session, self.service_name ) return self._api_version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def construct_for(self, service_name): """ Builds a new, specialized ``Connection`` subclass for a given service. This will introspect a service, determine all the API calls it has & constructs a brand new class with those methods on it. :param service_name: The name of the service to construct a connection for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :returns: A new connection class for that service """
# Construct a new ``ConnectionDetails`` (or similar class) for storing # the relevant details about the service & its operations. details = self.details_class(service_name, self.session) # Make sure the new class gets that ``ConnectionDetails`` instance as a # ``cls._details`` attribute. attrs = { '_details': details, } # Determine what we should call it. klass_name = self._build_class_name(service_name) # Construct what the class ought to have on it. attrs.update(self._build_methods(details)) # Create the class. return type( klass_name, (self.base_connection,), attrs )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getMoviesFromJSON(jsonURL): """Main function for this library Returns list of Movie classes from apple.com/trailers json URL such as: http://trailers.apple.com/trailers/home/feeds/just_added.json The Movie classes use lazy loading mechanisms so that data not directly available from JSON are loaded on demand. Currently these lazy loaded parts are: * poster * trailerLinks * description Be warned that accessing these fields can take long time due to network access. Therefore do the loading in thread separate from UI thread or your users will notice. There are optional fields that may or may not be present in every Movie instance. These include: * actors (list) * directors (list) * rating (string) * genre (string) * studio (string) * releasedate (sring) Please take care when trying to access these fields as they may not exist. """
response = urllib.request.urlopen(jsonURL) jsonData = response.read().decode('utf-8') objects = json.loads(jsonData) # make it work for search urls if jsonURL.find('quickfind') != -1: objects = objects['results'] optionalInfo = ['actors','directors','rating','genre','studio','releasedate'] movies = [] for obj in objects: movie = Movie() movie.title = obj['title'] movie.baseURL = obj['location'] movie.posterURL = obj['poster'] # sometimes posters don't have http part if movie.posterURL.find('http:') == -1: movie.posterURL = "http://apple.com%s" % movie.posterURL movie.trailers = obj['trailers'] for i in optionalInfo: if i in obj: setattr(movie, i, obj[i]) movies.append(movie) return movies
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_description(self): """Returns description text as provided by the studio"""
if self._description: return self._description try: trailerURL= "http://trailers.apple.com%s" % self.baseURL response = urllib.request.urlopen(trailerURL) Reader = codecs.getreader("utf-8") responseReader = Reader(response) trailerHTML = responseReader.read() description = re.search('<meta *name="Description" *content="(.*?)" *[/]*>' ,trailerHTML) if description: self._description = description.group(1) else: self._description = "None" except: self._description = "Error" return self._description
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unwrap_one_layer(r, L, n): """For a set of points in a 2 dimensional periodic system, extend the set of points to tile the points at a given period. Parameters r: float array, shape (:, 2). Set of points. L: float array, shape (2,) System lengths. n: integer. Period to unwrap. Returns ------- rcu: float array, shape (:, 2). The set of points. tiled at the periods at a distance `n` from the origin. """
try: L[0] except (TypeError, IndexError): L = np.ones([r.shape[1]]) * L if n == 0: return list(r) rcu = [] for x, y in r: for ix in range(-n, n + 1): for iy in range(-n, n + 1): if abs(ix) == n or abs(iy) == n: rcu.append(np.array([x + ix * L[0], y + iy * L[1]])) return rcu