text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit_lsq(self, x, y_obs, params_start=None): """ Fit curve by method of least squares. Parameters x : iterable Independent variable y_obs : iterable Dependent variable (values observed at x) params_start : iterable Optional start values for all parameters. Default 1. Returns ------- array Best fit values of parameters Notes ----- If least squares fit does not converge, ValueError is raised with convergence message. """
# Set up variables x = np.atleast_1d(x) y_obs = np.atleast_1d(y_obs) if not params_start: params_start = np.ones(self.n_parameters) # Error checking if len(x) != len(y_obs): raise ValueError, "x and y_obs must be the same length" if len(params_start) != self.n_parameters: raise ValueError, "Incorrect number of values in params_start" # Calculate fit def residuals(params, x, y_obs): y_pred = self.vals(x, *params) return y_obs - y_pred params_fit, _, _, msg, ier = optimize.leastsq(residuals, params_start, args=(x, y_obs), full_output=True) # Check for convergence if ier > 4: raise ValueError, ("Least squares fit did not converge with " "message %s" % msg) return tuple(params_fit)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit_lsq(self, df): """ Parameterize generic SAR curve from empirical data set Parameters df : DataFrame Result data frame from empirical SAR analysis Notes ----- Simply returns S0 and N0 from empirical SAR output, which are two fixed parameters of METE SAR and EAR. This simply returns n_spp and n_individs from the 1,1 division in the dataframe. An error will be thrown if this division is not present The ``fit_lsq`` is retained for consistency with other curves. """
tdf = df.set_index('div') return tdf.ix['1,1']['n_spp'], tdf.ix['1,1']['n_individs']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def after_insert(mapper, connection, target): """Update reply order cache and send record-after-update signal."""
record_after_update.send(CmtRECORDCOMMENT, recid=target.id_bibrec) from .api import get_reply_order_cache_data if target.in_reply_to_id_cmtRECORDCOMMENT > 0: parent = CmtRECORDCOMMENT.query.get( target.in_reply_to_id_cmtRECORDCOMMENT) if parent: trans = connection.begin() parent_reply_order = parent.reply_order_cached_data \ if parent.reply_order_cached_data else '' parent_reply_order += get_reply_order_cache_data(target.id) connection.execute( db.update(CmtRECORDCOMMENT.__table__). where(CmtRECORDCOMMENT.id == parent.id). values(reply_order_cached_data=parent_reply_order)) trans.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_collapsed(self, id_user): """Return true if the comment is collapsed by user."""
return CmtCOLLAPSED.query.filter(db.and_( CmtCOLLAPSED.id_bibrec == self.id_bibrec, CmtCOLLAPSED.id_cmtRECORDCOMMENT == self.id, CmtCOLLAPSED.id_user == id_user)).count() > 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collapse(self, id_user): """Collapse comment beloging to user."""
c = CmtCOLLAPSED(id_bibrec=self.id_bibrec, id_cmtRECORDCOMMENT=self.id, id_user=id_user) db.session.add(c) db.session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expand(self, id_user): """Expand comment beloging to user."""
CmtCOLLAPSED.query.filter(db.and_( CmtCOLLAPSED.id_bibrec == self.id_bibrec, CmtCOLLAPSED.id_cmtRECORDCOMMENT == self.id, CmtCOLLAPSED.id_user == id_user)).delete(synchronize_session=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(cls, *criteria, **filters): """Count how many comments."""
return cls.query.filter(*criteria).filter_by(**filters).count()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_version(version=None): """Returns a tuple of the django version. If version argument is non-empty, then checks for correctness of the tuple provided. """
if version[4] > 0: # 0.2.1-alpha.1 return "%s.%s.%s-%s.%s" % (version[0], version[1], version[2], version[3], version[4]) elif version[3] != '': # 0.2.1-alpha return "%s.%s.%s-%s" % (version[0], version[1], version[2], version[3]) elif version[2] > 0: # 0.2.1 return "%s.%s.%s" % (version[0], version[1], version[2]) else: # 0.2 return "%s.%s" % (version[0], version[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _push_packet(self, packet): """ Appends a packet to the internal read queue, or notifies a waiting listener that a packet just came in. """
self._read_queue.append((decode(packet), packet)) if self._read_waiter is not None: w, self._read_waiter = self._read_waiter, None w.set_result(None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_data(self): """ Reads data from the connection and adds it to _push_packet, until the connection is closed or the task in cancelled. """
while True: try: data = yield from self._socket.recv() except asyncio.CancelledError: break except ConnectionClosed: break self._push_packet(data) self._loop.call_soon(self.close)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_message(self): """ Waits until a connection is available on the wire, or until the connection is in a state that it can't accept messages. It returns True if a message is available, False otherwise. """
if self._state != states['open']: return False if len(self._read_queue) > 0: return True assert self._read_waiter is None or self._read_waiter.cancelled(), \ "You may only use one wait_message() per connection." self._read_waiter = asyncio.Future(loop=self._loop) yield from self._read_waiter return self.wait_message()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_reservation_ports(session, reservation_id, model_name='Generic Traffic Generator Port'): """ Get all Generic Traffic Generator Port in reservation. :return: list of all Generic Traffic Generator Port resource objects in reservation """
reservation_ports = [] reservation = session.GetReservationDetails(reservation_id).ReservationDescription for resource in reservation.Resources: if resource.ResourceModelName == model_name: reservation_ports.append(resource) return reservation_ports
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_reservation_resources(session, reservation_id, *models): """ Get all resources of given models in reservation. :param session: CloudShell session :type session: cloudshell.api.cloudshell_api.CloudShellAPISession :param reservation_id: active reservation ID :param models: list of requested models :return: list of all resources of models in reservation """
models_resources = [] reservation = session.GetReservationDetails(reservation_id).ReservationDescription for resource in reservation.Resources: if resource.ResourceModelName in models: models_resources.append(resource) return models_resources
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_current_container_id(read_from='/proc/self/cgroup'): """ Get the ID of the container the application is currently running in, otherwise return `None` if not running in a container. This is a best-effort guess, based on cgroups. :param read_from: the cgroups file to read from (default: `/proc/self/cgroup`) """
if not os.path.exists(read_from): return with open(read_from, 'r') as cgroup: for line in cgroup: if re.match('.*/[0-9a-f]{64}$', line.strip()): return re.sub('.*/([0-9a-f]{64})$', '\\1', line.strip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_configuration(key, path=None, default=None, single_config=False, fallback_to_env=True): """ Read configuration from a file, Docker config or secret or from the environment variables. :param key: the configuration key :param path: the path of the configuration file (regular file or Docker config or secret) :param default: the default value when not found elsewhere (default: `None`) :param single_config: treat the configuration file as containing the full configuration, otherwise the file is expected to be a '=' separated key-value list line by line (default: `False`) :param fallback_to_env: look for the configuration key in the environment variables if not found elsewhere (default: `True`) """
if path and os.path.exists(path): with open(path, 'r') as config_file: if single_config: return config_file.read() for line in config_file: if line.startswith('%s=' % key): return line.split('=', 1)[1].strip() if fallback_to_env and key in os.environ: return os.environ[key] return default
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CleanString(s): """Cleans up string. Doesn't catch everything, appears to sometimes allow double underscores to occur as a result of replacements. """
punc = (' ', '-', '\'', '.', '&amp;', '&', '+', '@') pieces = [] for part in s.split(): part = part.strip() for p in punc: part = part.replace(p, '_') part = part.strip('_') part = part.lower() pieces.append(part) return '_'.join(pieces)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DedupVcardFilenames(vcard_dict): """Make sure every vCard in the dictionary has a unique filename."""
remove_keys = [] add_pairs = [] for k, v in vcard_dict.items(): if not len(v) > 1: continue for idx, vcard in enumerate(v): fname, ext = os.path.splitext(k) fname = '{}-{}'.format(fname, idx + 1) fname = fname + ext assert fname not in vcard_dict add_pairs.append((fname, vcard)) remove_keys.append(k) for k, v in add_pairs: vcard_dict[k].append(v) for k in remove_keys: vcard_dict.pop(k) return vcard_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def WriteVcard(filename, vcard, fopen=codecs.open): """Writes a vCard into the given filename."""
if os.access(filename, os.F_OK): logger.warning('File exists at "{}", skipping.'.format(filename)) return False try: with fopen(filename, 'w', encoding='utf-8') as f: logger.debug('Writing {}:\n{}'.format(filename, u(vcard.serialize()))) f.write(u(vcard.serialize())) except OSError: logger.error('Error writing to file "{}", skipping.'.format(filename)) return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dial(self, target): ''' connects to a node :param url: string (optional) - resource in which to connect. if not provided, will use default for the stage :returns: provider, error ''' if not target: return None, "target network must be specified with -t or --target" url = get_url(self.config, target) try: if url.startswith('ws'): self.w3 = Web3(WebsocketProvider(url)) elif url.startswith('http'): self.w3 = Web3(HTTPProvider(url)) elif url.endswith('ipc'): if url == 'ipc': url = None self.w3 = Web3(Web3.IPCProvider(url)) else: return None, "Invalid Provider URL: {}".format(url) except Exception as e: return None, e return self.w3, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def load(filepath): ''' loads a contract file :param filepath: (string) - contract filename :return: source, err ''' try: with open(filepath, "r") as fh: source = fh.read() except Exception as e: return None, e return source, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compile(source, ezo): ''' compiles the source code :param source: (string) - contract source code :param ezo: - ezo reference for Contract object creation :return: (list) compiled source ''' try: compiled = compile_source(source) compiled_list = [] for name in compiled: c = Contract(name, ezo) interface = compiled[name] c.abi = interface['abi'] c.bin = interface['bin'] compiled_list.append(c) except Exception as e: return None, e return compiled_list, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_address(name, hash, db, target=None): ''' fetches the contract address of deployment :param hash: the contract file hash :return: (string) address of the contract error, if any ''' key = DB.pkey([EZO.DEPLOYED, name, target, hash]) d, err = db.get(key) if err: return None, err if not d: return None, None return d['address'].lower(), None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def put(contract_name, abi): ''' save the contract's ABI :param contract_name: string - name of the contract :param abi: the contract's abi JSON file :return: None, None if saved okay None, error is an error ''' if not Catalog.path: return None, "path to catalog must be set before saving to it" if not contract_name: return None, "contract name must be provided before saving" if not abi: return None, "contract ABI missing" abi_file = "{}/{}.abi".format(Catalog.path, contract_name) try: with open(abi_file, "w+") as file: file.write(abi) except Exception as e: return None, "Catalog.put error: {}".format(e) return None, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_creators(self, attribute='creatorName'): """Get DataCite creators."""
if 'creators' in self.xml: if isinstance(self.xml['creators']['creator'], list): return [c[attribute] for c in self.xml['creators']['creator']] else: return self.xml['creators']['creator'][attribute] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dates(self): """Get DataCite dates."""
if 'dates' in self.xml: if isinstance(self.xml['dates']['date'], dict): return self.xml['dates']['date'].values()[0] return self.xml['dates']['date'] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_description(self, description_type='Abstract'): """Get DataCite description."""
if 'descriptions' in self.xml: if isinstance(self.xml['descriptions']['description'], list): for description in self.xml['descriptions']['description']: if description_type in description: return description[description_type] elif isinstance(self.xml['descriptions']['description'], dict): description = self.xml['descriptions']['description'] if description_type in description: return description[description_type] elif len(description) == 1: # return the only description return description.values()[0] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def itemgetter(iterable, indexes): ''' same functionality as operator.itemgetter except, this one supports both positive and negative indexing of generators as well ''' indexes = indexes if isinstance(indexes, tuple) else tuple(indexes) assert all(isinstance(i, int) for i in indexes), 'indexes needs to be a tuple of ints' positive_indexes=[i for i in indexes if i>=0] negative_indexes=[i for i in indexes if i<0] out = {} if len(negative_indexes): # if there are any negative indexes negative_index_buffer = deque(maxlen=min(indexes)*-1) for i,x in enumerate(iterable): if i in positive_indexes: out[i]=x negative_index_buffer.append(x) out.update({ni:negative_index_buffer[ni] for ni in negative_indexes}) else: # if just positive results out.update({i:x for i,x in enumerate(iterable) if i in positive_indexes}) return itemgetter(*indexes)(out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render(self, context): """Handle the actual rendering. """
user = self._get_value(self.user_key, context) feature = self._get_value(self.feature, context) if feature is None: return '' allowed = show_feature(user, feature) return self.nodelist.render(context) if allowed else ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_value(self, key, context): """Works out whether key is a value or if it's a variable referencing a value in context and returns the correct value. """
string_quotes = ('"', "'") if key[0] in string_quotes and key[-1] in string_quotes: return key[1:-1] if key in string.digits: return int(key) return context.get(key, None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def client(self, client_name, **params): """Initialize OAuth client from registry."""
if client_name not in self.cfg.clients: raise OAuthException('Unconfigured client: %s' % client_name) if client_name not in ClientRegistry.clients: raise OAuthException('Unsupported services: %s' % client_name) params = dict(self.cfg.clients[client_name], **params) return ClientRegistry.clients[client_name](**params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def login(self, client_name, request, redirect_uri=None, **params): """Process login with OAuth. :param client_name: A name one of configured clients :param request: Web request :param redirect_uri: An URI for authorization redirect """
client = self.client(client_name, logger=self.app.logger) redirect_uri = redirect_uri or self.cfg.redirect_uri or '%s://%s%s' % ( request.scheme, request.host, request.path) session = await self.app.ps.session(request) if isinstance(client, OAuth1Client): oauth_verifier = request.query.get('oauth_verifier') if not oauth_verifier: # Get request credentials token, secret = await client.get_request_token( oauth_callback=redirect_uri) # Save the credentials in current user session session['oauth_token'] = token session['oauth_token_secret'] = secret url = client.get_authorize_url() raise muffin.HTTPFound(url) # Check request_token oauth_token = request.query.get('oauth_token') if session['oauth_token'] != oauth_token: raise muffin.HTTPForbidden(reason='Invalid token.') client.oauth_token = oauth_token client.oauth_token_secret = session.get('oauth_token_secret') # Get access tokens return client, await client.get_access_token(oauth_verifier) if isinstance(client, OAuth2Client): code = request.query.get('code') if not code: # Authorize an user state = sha1(str(random()).encode('ascii')).hexdigest() session['oauth_secret'] = state url = client.get_authorize_url( redirect_uri=redirect_uri, state=state, **params) raise muffin.HTTPFound(url) # Check state state = request.query.get('state') oauth_secret = session.pop('oauth_secret', '') if oauth_secret != state: raise muffin.HTTPForbidden(reason='Invalid token "%s".' % oauth_secret) # Get access token return client, await client.get_access_token(code, redirect_uri=redirect_uri) return client
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh(self, client_name, refresh_token, **params): """Get refresh token. :param client_name: A name one of configured clients :param redirect_uri: An URI for authorization redirect :returns: a coroutine """
client = self.client(client_name, logger=self.app.logger) return client.get_access_token(refresh_token, grant_type='refresh_token', **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chain(*args): """itertools.chain, just better"""
has_iter = partial(hasattr, name='__iter__') # check if a single iterable is being passed for # the case that it's a generator of generators if len(args) == 1 and hasattr(args[0], '__iter__'): args = args[0] for arg in args: # if the arg is iterable if hasattr(arg, '__iter__'): # iterate through it for i in arg: yield i # otherwise else: # yield the whole argument yield arg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_celcius_commands(): """Query cron for all celcius commands"""
p = subprocess.Popen(["crontab", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return [x for x in out.split('\n') if 'CJOBID' in x]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def VcardFieldsEqual(field1, field2): """Handle comparing vCard fields where inputs are lists of components. Handle parameters? Are any used aside from 'TYPE'? Note: force cast to string to compare sub-objects like Name and Address """
field1_vals = set([ str(f.value) for f in field1 ]) field2_vals = set([ str(f.value) for f in field2 ]) if field1_vals == field2_vals: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def VcardMergeListFields(field1, field2): """Handle merging list fields that may include some overlap."""
field_dict = {} for f in field1 + field2: field_dict[str(f)] = f return list(field_dict.values())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def SetVcardField(new_vcard, field_name, values): """Set vCard field values and parameters on a new vCard."""
for val in values: new_field = new_vcard.add(field_name) new_field.value = val.value if val.params: new_field.params = val.params return new_vcard
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CopyVcardFields(new_vcard, auth_vcard, field_names): """Copy vCard field values from an authoritative vCard into a new one."""
for field in field_names: value_list = auth_vcard.contents.get(field) new_vcard = SetVcardField(new_vcard, field, value_list) return new_vcard
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def MergeVcards(vcard1, vcard2): """Create a new vCard and populate it."""
new_vcard = vobject.vCard() vcard1_fields = set(vcard1.contents.keys()) vcard2_fields = set(vcard2.contents.keys()) mutual_fields = vcard1_fields.intersection(vcard2_fields) logger.debug('Potentially conflicting fields: {}'.format(mutual_fields)) for field in mutual_fields: val1 = vcard1.contents.get(field) val2 = vcard2.contents.get(field) new_values = [] if not VcardFieldsEqual(val1, val2): # we have a conflict, if a list maybe append otherwise prompt user if field not in MERGEABLE_FIELDS: context_str = GetVcardContextString(vcard1, vcard2) new_values.extend(SelectFieldPrompt(field, context_str, val1, val2)) else: new_values.extend(VcardMergeListFields(val1, val2)) else: new_values.extend(val1) logger.debug('Merged values for field {}: {}'.format( field.upper(), u(str(new_values))) ) new_vcard = SetVcardField(new_vcard, field, new_values) new_vcard = CopyVcardFields(new_vcard, vcard1, vcard1_fields - vcard2_fields) new_vcard = CopyVcardFields(new_vcard, vcard2, vcard2_fields - vcard1_fields) return new_vcard
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def SelectFieldPrompt(field_name, context_str, *options): """Prompts user to pick from provided options. It is possible to provide a function as an option although it is not yet tested. This could allow a user to be prompted to provide their own value rather than the listed options. Args: field_name (string): Name of the field. context_str (string): Printed to give the user context. options: Variable arguments, should be vobject Components in a list. As retrieved from a vCard.contents dictionary. Returns: One of the options passed in. Ideally always a list. """
option_format_str = '[ {} ] "{}"' option_dict = {} print(context_str) print('Please select one of the following options for field "{}"'.format( field_name) ) for cnt, option in enumerate(options): option_dict['{}'.format(cnt + 1)] = option if not callable(option): print(option_format_str.format(cnt + 1, u(str(option)))) else: print(option_format_str.format(cnt + 1, option.__name__)) choice = None while choice not in option_dict: choice = input('option> ').strip() new_value = option_dict[choice] if callable(new_value): return new_value() else: return new_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_fixture(model_class, **kwargs): """ Take the model_klass and generate a fixure for it Args: model_class (MongoEngine Document): model for which a fixture is needed kwargs (dict): any overrides instead of random values Returns: dict for now, other fixture types are not implemented yet """
all_fields = get_fields(model_class) fields_for_random_generation = map( lambda x: getattr(model_class, x), all_fields ) overrides = {} for kwarg, value in kwargs.items(): if kwarg in all_fields: kwarg_field = getattr(model_class, kwarg) fields_for_random_generation.remove(kwarg_field) overrides.update({kwarg_field: value}) random_values = get_random_values(fields_for_random_generation) values = dict(overrides, **random_values) assert len(all_fields) == len(values), ( "Mismatch in values, {} != {}".format( len(all_fields), len(values) ) ) data = {k.name: v for k, v in values.items()} return model_class(**data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_fields(model_class): """ Pass in a mongo model class and extract all the attributes which are mongoengine fields Returns: list of strings of field attributes """
return [ attr for attr, value in model_class.__dict__.items() if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField)) # noqa ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_page_meta(self, page): """ Cache the page meta from the frontmatter and assign new keys The cache data will be used to build links or other properties """
meta = self._pages_meta.get(page) if not meta: src_file = os.path.join(self.pages_dir, page) with open(src_file) as f: _, _ext = os.path.splitext(src_file) markup = _ext.replace(".", "") _meta, _ = frontmatter.parse(f.read()) meta = self.default_page_meta.copy() meta["meta"].update(self.config.get("site.meta", {})) meta.update(_meta) dest_file, url = self._get_dest_file_and_url(page, meta) meta["url"] = url meta["filepath"] = dest_file if meta.get("markup") is None: meta["markup"] = markup self._pages_meta[page] = meta return meta
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_page_content(self, page): """ Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page) with open(src_file) as f: _meta, content = frontmatter.parse(f.read()) return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs): """ Build the A HREF LINK To a page."""
anchor = "" if "#" in page: page, anchor = page.split("#") anchor = "#" + anchor meta = self._get_page_meta(page) return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format( url=meta.get("url", "/") + anchor, text=text or meta.get("title") or title, title=title or "", _class=_class, id=id )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _url_to(self, page): """ Get the url of a page """
anchor = "" if "#" in page: page, anchor = page.split("#") anchor = "#" + anchor meta = self._get_page_meta(page) return meta.get("url")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_dest_file_and_url(self, filepath, page_meta={}): """ Return tuple of the file destination and url """
filename = filepath.split("/")[-1] filepath_base = filepath.replace(filename, "").rstrip("/") slug = page_meta.get("slug") fname = slugify(slug) if slug else filename \ .replace(".html", "") \ .replace(".md", "") \ .replace(".jade", "") if page_meta.get("pretty_url") is False: dest_file = os.path.join(filepath_base, "%s.html" % fname) else: dest_dir = filepath_base if filename not in ["index.html", "index.md", "index.jade"]: dest_dir = os.path.join(filepath_base, fname) dest_file = os.path.join(dest_dir, "index.html") url = "/" + dest_file.replace("index.html", "") return dest_file, url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_static(self): """ Build static files """
if not os.path.isdir(self.build_static_dir): os.makedirs(self.build_static_dir) copy_tree(self.static_dir, self.build_static_dir) if self.webassets_cmd: self.webassets_cmd.build()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_pages(self): """Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir): base_dir = root.replace(self.pages_dir, "").lstrip("/") if not base_dir.startswith("_"): for f in files: src_file = os.path.join(base_dir, f) self._build_page(src_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def publish(self, target="S3", sitename=None, purge_files=True): """ To publish programatically :param target: Where to pusblish at, S3 :param sitename: The site name :param purge_files: if True, it will delete old files :return: """
self.build() endpoint = self.config.get("hosting.%s" % target) if target.upper() == "S3": p = publisher.S3Website(sitename=sitename or self.config.get("sitename"), aws_access_key_id=endpoint.get("aws_access_key_id"), aws_secret_access_key=endpoint.get("aws_secret_access_key"), region=endpoint.get("aws_region")) if not p.website_exists: if p.create_website() is True: # Need to give it enough time to create it # Should be a one time thing time.sleep(10) p.create_www_website() p.create_manifest_from_s3_files() if purge_files: exclude_files = endpoint.get("purge_exclude_files", []) p.purge_files(exclude_files=exclude_files) p.upload(self.build_dir) return p.website_endpoint_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def sort_matches(matches): '''Sorts a ``list`` of matches best to worst''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [(multipliers[x.type]*(x.amount if x.amount else 1),x) for x in matches] return [x[1] for x in sorted(matches,reverse=True)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all(self): """Get all ObjectRocket instances the current client has access to. :returns: A list of :py:class:`bases.BaseInstance` instances. :rtype: list """
response = requests.get(self._url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_instance_list(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, name, plan, zone, service_type='mongodb', instance_type='mongodb_sharded', version='2.4.6'): """Create an ObjectRocket instance. :param str name: The name to give to the new instance. :param int plan: The plan size of the new instance. :param str zone: The zone that the new instance is to exist in. :param str service_type: The type of service that the new instance is to provide. :param str instance_type: The instance type to create. :param str version: The version of the service the new instance is to provide. """
# Build up request data. url = self._url request_data = { 'name': name, 'service': service_type, 'plan': plan, 'type': instance_type, 'version': version, 'zone': zone } # Call to create an instance. response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) # Log outcome of instance creation request. if response.status_code == 200: logger.info('Successfully created a new instance with: {}'.format(request_data)) else: logger.info('Failed to create instance with: {}'.format(request_data)) logger.info('Response: [{0}] {1}'.format(response.status_code, response.content)) data = self._get_response_data(response) return self._concrete_instance(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, instance_name): """Get an ObjectRocket instance by name. :param str instance_name: The name of the instance to retrieve. :returns: A subclass of :py:class:`bases.BaseInstance`, or None if instance does not exist. :rtype: :py:class:`bases.BaseInstance` """
url = self._url + instance_name + '/' response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_instance(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _concrete_instance(self, instance_doc): """Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance` """
if not isinstance(instance_doc, dict): return None # Attempt to instantiate the appropriate class for the given instance document. try: service = instance_doc['service'] cls = self._service_class_map[service] return cls(instance_document=instance_doc, instances=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error( 'Instance construction failed. You probably need to upgrade to a more ' 'recent version of the client. Instance document which generated this ' 'warning: {}'.format(instance_doc) ) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _concrete_instance_list(self, instance_docs): """Concretize a list of instance documents. :param list instance_docs: A list of instance documents. Should come from the API. :returns: A list of :py:class:`bases.BaseInstance`s. :rtype: list """
if not instance_docs: return [] return list( filter(None, [self._concrete_instance(instance_doc=doc) for doc in instance_docs]) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, name): """Retrieves the cluster with the given name. :param str name: name of the cluster (identifier) :return: :py:class:`elasticluster.cluster.Cluster` """
path = self._get_cluster_storage_path(name) try: with open(path, 'r') as storage: cluster = self.load(storage) # Compatibility with previous version of Node for node in sum(cluster.nodes.values(), []): if not hasattr(node, 'ips'): log.debug("Monkey patching old version of `Node` class: %s", node.name) node.ips = [node.ip_public, node.ip_private] node.preferred_ip = None cluster.storage_file = path return cluster except IOError as ex: raise ClusterNotFound("Error accessing storage file %s: %s" % (path, ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_or_update(self, cluster): """Save or update the cluster to persistent state. :param cluster: cluster to save or update :type cluster: :py:class:`elasticluster.cluster.Cluster` """
if not os.path.exists(self.storage_path): os.makedirs(self.storage_path) path = self._get_cluster_storage_path(cluster.name) cluster.storage_file = path with open(path, 'wb') as storage: self.dump(cluster, storage)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format(color, style=''): """Return a QTextCharFormat with the given attributes. """
_color = QColor() _color.setNamedColor(color) _format = QTextCharFormat() _format.setForeground(_color) if 'bold' in style: _format.setFontWeight(QFont.Bold) if 'italic' in style: _format.setFontItalic(True) return _format
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def highlightBlock(self, text): """Apply syntax highlighting to the given block of text. """
# Do other syntax formatting for expression, nth, format in self.rules: index = expression.indexIn(text, 0) while index >= 0: # We actually want the index of the nth match index = expression.pos(nth) length = len(expression.cap(nth)) self.setFormat(index, length, format) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) # Do multi-line strings in_multiline = self.match_multiline(text, *self.tri_single) if not in_multiline: in_multiline = self.match_multiline(text, *self.tri_double)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_multiline(self, text, delimiter, in_state, style): """Do highlighting of multi-line strings. ``delimiter`` should be a ``QRegExp`` for triple-single-quotes or triple-double-quotes, and ``in_state`` should be a unique integer to represent the corresponding state changes when inside those strings. Returns True if we're still inside a multi-line string when this function is finished. """
# If inside triple-single quotes, start at 0 if self.previousBlockState() == in_state: start = 0 add = 0 # Otherwise, look for the delimiter on this line else: start = delimiter.indexIn(text) # Move past this match add = delimiter.matchedLength() # As long as there's a delimiter match on this line... while start >= 0: # Look for the ending delimiter end = delimiter.indexIn(text, start + add) # Ending delimiter on this line? if end >= add: length = end - start + add + delimiter.matchedLength() self.setCurrentBlockState(0) # No; multi-line string else: self.setCurrentBlockState(in_state) length = len(text) - start + add # Apply formatting self.setFormat(start, length, style) # Look for the next match start = delimiter.indexIn(text, start + length) # Return True if still inside a multi-line string, False otherwise if self.currentBlockState() == in_state: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config(name='SEARCH_URL', default='simple://'): """Returns configured SEARCH dictionary from SEARCH_URL"""
config = {} s = env(name, default) if s: config = parse_search_url(s) return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wash_urlargd(form, content): """Wash the complete form based on the specification in content. Content is a dictionary containing the field names as a key, and a tuple (type, default) as value. 'type' can be list, unicode, legacy.wsgi.utils.StringField, int, tuple, or legacy.wsgi.utils.Field (for file uploads). The specification automatically includes the 'ln' field, which is common to all queries. Arguments that are not defined in 'content' are discarded. .. note:: In case `list` or `tuple` were asked for, we assume that `list` or `tuple` of strings is to be returned. Therefore beware when you want to use ``wash_urlargd()`` for multiple file upload forms. :returns: argd dictionary that can be used for passing function parameters by keywords. """
result = {} for k, (dst_type, default) in content.items(): try: value = form[k] except KeyError: result[k] = default continue src_type = type(value) # First, handle the case where we want all the results. In # this case, we need to ensure all the elements are strings, # and not Field instances. if src_type in (list, tuple): if dst_type is list: result[k] = [x for x in value] continue if dst_type is tuple: result[k] = tuple([x for x in value]) continue # in all the other cases, we are only interested in the # first value. value = value[0] # Allow passing argument modyfing function. if isinstance(dst_type, types.FunctionType): result[k] = dst_type(value) continue # Maybe we already have what is expected? Then don't change # anything. if isinstance(value, dst_type): result[k] = value continue # Since we got here, 'value' is sure to be a single symbol, # not a list kind of structure anymore. if dst_type in (int, float, long, bool): try: result[k] = dst_type(value) except: result[k] = default elif dst_type is tuple: result[k] = (value, ) elif dst_type is list: result[k] = [value] else: raise ValueError( 'cannot cast form value %s of type %r into type %r' % ( value, src_type, dst_type)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wash_html_id(dirty): """Strip non-alphabetic or newline characters from a given string. It can be used as a HTML element ID (also with jQuery and in all browsers). :param dirty: the string to wash :returns: the HTML ID ready string """
import re if not dirty[0].isalpha(): # we make sure that the first character is a lowercase letter dirty = 'i' + dirty non_word = re.compile(r'[^\w]+') return non_word.sub('', dirty)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quote(self, text=None): """Quote this post. Parameters text : str Text to quote. Defaults to the whole text of the post. Returns ------- str A NationStates bbCode quote of the post. """
text = text or re.sub(r'\[quote=.+?\[/quote\]', '', self.text, flags=re.DOTALL ).strip('\n') return f'[quote={self.author.id};{self.id}]{text}[/quote]'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def factbook(self, root): """Region's World Factbook Entry. Returns ------- an :class:`ApiQuery` of str """
# This lib might have been a mistake, but the line below # definitely isn't. return html.unescape(html.unescape(root.find('FACTBOOK').text))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def delegate(self, root): """Regional World Assembly Delegate. Returns ------- an :class:`ApiQuery` of :class:`Nation` an :class:`ApiQuery` of None If the region has no delegate. """
nation = root.find('DELEGATE').text if nation == '0': return None return aionationstates.Nation(nation)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def founder(self, root): """Regional Founder. Returned even if the nation has ceased to exist. Returns ------- an :class:`ApiQuery` of :class:`Nation` an :class:`ApiQuery` of None If the region is Game-Created and doesn't have a founder. """
nation = root.find('FOUNDER').text if nation == '0': return None return aionationstates.Nation(nation)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def officers(self, root): """Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer` """
officers = sorted( root.find('OFFICERS'), # I struggle to say what else this tag would be useful for. key=lambda elem: int(elem.find('ORDER').text) ) return [Officer(elem) for elem in officers]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def messages(self): """Iterate through RMB posts from newest to oldest. Returns ------- an asynchronous generator that yields :class:`Post` """
# Messages may be posted on the RMB while the generator is running. oldest_id_seen = float('inf') for offset in count(step=100): posts_bunch = await self._get_messages(offset=offset) for post in reversed(posts_bunch): if post.id < oldest_id_seen: yield post oldest_id_seen = posts_bunch[0].id if len(posts_bunch) < 100: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_empty(values, default=None): """ Eliminates None or empty items from lists, tuples or sets passed in. If values is None or empty after filtering, the default is returned. """
if values is None: return default elif hasattr(values, '__len__') and len(values) == 0: return default elif hasattr(values, '__iter__') and not isinstance(values, _filtered_types): filtered = type(values) if isinstance(values, _filter_types) else list values = filtered( v for v in values if not (v is None or (hasattr(v, '__len__') and len(v) == 0)) ) return default if len(values) == 0 else values return values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unit(self, unit): """Sets the unit of this Dimensions. :param unit: The unit of this Dimensions. :type: str """
allowed_values = ["cm", "inch", "foot"] # noqa: E501 if unit is not None and unit not in allowed_values: raise ValueError( "Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501 .format(unit, allowed_values) ) self._unit = unit
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_names(lang="en"): """This resource returns an dictionary of the localized map names for the specified language. Only maps with events are listed - if you need a list of all maps, use ``maps.json`` instead. :param lang: The language to query the names for. :return: the response is a dictionary where the key is the map id and the value is the name of the map in the specified language. """
cache_name = "map_names.%s.json" % lang data = get_cached("map_names.json", cache_name, params=dict(lang=lang)) return dict([(item["id"], item["name"]) for item in data])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maps(map_id=None, lang="en"): """This resource returns details about maps in the game, including details about floor and translation data on how to translate between world coordinates and map coordinates. :param map_id: Only list this map. :param lang: Show localized texts in the specified language. The response is a dictionary where the key is the map id and the value is a dictionary containing the following properties: map_name (string) The map name. min_level (number) The minimal level of this map. max_level (number) The maximum level of this map. default_floor (number) The default floor of this map. floors (list) A list of available floors for this map. region_id (number) The id of the region this map belongs to. region_name (string) The name of the region this map belongs to. continent_id (number) The id of the continent this map belongs to. continent_name (string) The name of the continent this map belongs to. map_rect (rect) The dimensions of the map. continent_rect (rect) The dimensions of the map within the continent coordinate system. If a map_id is given, only the values for that map are returned. """
if map_id: cache_name = "maps.%s.%s.json" % (map_id, lang) params = {"map_id": map_id, "lang": lang} else: cache_name = "maps.%s.json" % lang params = {"lang": lang} data = get_cached("maps.json", cache_name, params=params).get("maps") return data.get(str(map_id)) if map_id else data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_floor(continent_id, floor, lang="en"): """This resource returns details about a map floor, used to populate a world map. All coordinates are map coordinates. The returned data only contains static content. Dynamic content, such as vendors, is not currently available. :param continent_id: The continent. :param floor: The map floor. :param lang: Show localized texts in the specified language. The response is an object with the following properties: texture_dims (dimension) The dimensions of the texture. clamped_view (rect) If present, it represents a rectangle of downloadable textures. Every tile coordinate outside this rectangle is not available on the tile server. regions (object) A mapping from region id to an object. Each region object contains the following properties: name (string) The region name. label_coord (coordinate) The coordinates of the region label. maps (object) A mapping from the map id to an object. Each map object contains the following properties: name (string) The map name. min_level (number) The minimum level of the map. max_level (number) The maximum level of the map. default_floor (number) The default floor of the map. map_rect (rect) The dimensions of the map. continent_rect (rect) The dimensions of the map within the continent coordinate system. points_of_interest (list) A list of points of interest (landmarks, waypoints and vistas) Each points of interest object contains the following properties: poi_id (number) The point of interest id. name (string) The name of the point of interest. type (string) The type. This can be either "landmark" for actual points of interest, "waypoint" for waypoints, or "vista" for vistas. floor (number) The floor of this object. coord (coordinate) The coordinates of this object. tasks (list) A list of renown hearts. Each task object contains the following properties: task_id (number) The renown heart id. objective (string) The objective or name of the heart. level (number) The level of the heart. coord (coordinate) The coordinates where it takes place. skill_challenges (list) A list of skill challenges. Each skill challenge object contains the following properties: coord (coordinate) The coordinates of this skill challenge. sectors (list) A list of areas within the map. Each sector object contains the following properties: sector_id (number) The area id. name (string) The name of the area. level (number) The level of the area. coord (coordinate) The coordinates of this area (this is usually the center position). Special types: Dimension properties are two-element lists of width and height. Coordinate properties are two-element lists of the x and y position. Rect properties are two-element lists of coordinates of the upper-left and lower-right coordinates. """
cache_name = "map_floor.%s-%s.%s.json" % (continent_id, floor, lang) params = {"continent_id": continent_id, "floor": floor, "lang": lang} return get_cached("map_floor.json", cache_name, params=params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_value_based_inclusive_interval(cls, field, max_value=None): """ This is applicable to fields with max_value and min_value as validators. Note: 1. This is different from fields with max_length as a validator 2. This means that the two methods based on value and length are almost the same method but for the max_* attribute that is being checked. Probably need to DRY this out at some point. """
if field.max_value is None: field.max_value = max_value or MAX_LENGTH if field.min_value is None: field.min_value = 0 Interval = namedtuple('interval', ['start', 'stop']) return Interval(start=field.min_value, stop=field.max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _async_recv(self): """No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function"""
logging.info("Receive loop started") recbuffer = b"" while not self._stop_event.is_set(): time.sleep(0.01) try: recbuffer = recbuffer + self._socket.recv(1024) data = recbuffer.split(b'\r\n') recbuffer = data.pop() if data: for line in data: self._process_data(line.decode(encoding='UTF-8', errors='ignore')) except BlockingIOError as e: pass logging.info("Receive loop stopped")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(arguments=None): """ The main function used when ``directory_script_runner.py`` is run as a single script from the cl, or when installed as a cl command """
# setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName="fundmentals" ) arguments, settings, log, dbConn = su.setup() # UNPACK REMAINING CL ARGUMENTS USING `EXEC` TO SETUP THE VARIABLE NAMES # AUTOMATICALLY for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) if successFlag and successFlag.lower() == "none": successFlag = None if failureFlag and failureFlag.lower() == "none": failureFlag = None directory_script_runner( log=log, pathToScriptDirectory=pathToDirectory, databaseName=databaseName, loginPath=loginPath, successRule=successFlag, failureRule=failureFlag ) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def weight_unit(self, weight_unit): """Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str """
allowed_values = ["pound", "kilogram"] # noqa: E501 if weight_unit is not None and weight_unit not in allowed_values: raise ValueError( "Invalid value for `weight_unit` ({0}), must be one of {1}" # noqa: E501 .format(weight_unit, allowed_values) ) self._weight_unit = weight_unit
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dimensions_unit(self, dimensions_unit): """Sets the dimensions_unit of this MeasurementSettings. :param dimensions_unit: The dimensions_unit of this MeasurementSettings. :type: str """
allowed_values = ["inch", "cm", "foot", "meter"] # noqa: E501 if dimensions_unit is not None and dimensions_unit not in allowed_values: raise ValueError( "Invalid value for `dimensions_unit` ({0}), must be one of {1}" # noqa: E501 .format(dimensions_unit, allowed_values) ) self._dimensions_unit = dimensions_unit
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def already_resolved(self, pattern: QueryTriple) -> bool: """ Determine whether pattern has already been loaded into the cache. The "wild card" - `(None, None, None)` - always counts as resolved. :param pattern: pattern to check :return: True it is a subset of elements already loaded """
if self.sparql_locked or pattern == (None, None, None): return True for resolved_node in self.resolved_nodes: if resolved_node != (None, None, None) and \ (pattern[0] == resolved_node[0] or resolved_node[0] is None) and \ (pattern[1] == resolved_node[1] or resolved_node[1] is None) and\ (pattern[2] == resolved_node[2] or resolved_node[2] is None): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_base(fix_environ): """Activate the base compatibility."""
def _is_android(): import os vm_path = os.sep+"system"+os.sep+"bin"+os.sep+"dalvikvm" if os.path.exists(vm_path) or os.path.exists(os.sep+"system"+vm_path): return True try: import android del android # Unused import (imported only for Android detection) return True except ImportError: pass return False def _fix_android_environ(): import os if "LD_LIBRARY_PATH" not in os.environ: os.environ["LD_LIBRARY_PATH"] = "" lib_path = os.pathsep+"/system/lib"+os.pathsep+"/vendor/lib" if sys.python_bits == 64: lib_path = os.pathsep+"/system/lib64"+os.pathsep+"/vendor/lib64" + lib_path os.environ["LD_LIBRARY_PATH"] += lib_path if sys.platform.startswith("linux") and sys.platform != "linux-android": if _is_android(): sys.platform = "linux-android" elif "-" not in sys.platform: sys.platform = "linux" sys.platform_codename = sys.platform if sys.platform_codename == "win32": sys.platform_codename = "win" elif sys.platform_codename == "linux-android": sys.platform_codename = "android" if 'maxsize' in sys.__dict__: if sys.maxsize > 2**32: sys.python_bits = 64 else: sys.python_bits = 32 else: import struct sys.python_bits = 8 * struct.calcsize("P") if sys.python_bits == 32: sys.maxsize = 2147483647 else: sys.maxsize = int("9223372036854775807") if fix_environ and sys.platform == "linux-android": _fix_android_environ()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_subprocess(override_debug=False, override_exception=False): """Activate the subprocess compatibility."""
import subprocess # Exceptions if subprocess.__dict__.get("SubprocessError") is None: subprocess.SubprocessError = _Internal.SubprocessError if _InternalReferences.UsedCalledProcessError is None: if "CalledProcessError" in subprocess.__dict__: _subprocess_called_process_error(True, subprocess) else: _subprocess_called_process_error(False, subprocess) subprocess.CalledProcessError = _InternalReferences.UsedCalledProcessError def _check_output(*args, **kwargs): if "stdout" in kwargs: raise ValueError("stdout argument not allowed, " "it will be overridden.") process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kwargs) stdout_data, __ = process.communicate() ret_code = process.poll() if ret_code is None: raise RuntimeWarning("The process is not yet terminated.") if ret_code: cmd = kwargs.get("args") if cmd is None: cmd = args[0] raise _InternalReferences.UsedCalledProcessError(returncode=ret_code, cmd=cmd, output=stdout_data) return stdout_data try: subprocess.check_output except AttributeError: subprocess.check_output = _check_output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_all(override_debug=False, override_all=False): """Activate the full compatibility."""
fix_base(True) fix_builtins(override_debug) fix_subprocess(override_debug, override_all) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smart_scrub(df,col_name,error_rate = 0): """ Scrubs from the front and back of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns a tuple of the strings removed from the front and back of the elements df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe. """
scrubf = smart_scrubf(df,col_name,error_rate) scrubb = smart_scrubb(df,col_name,error_rate) return (scrubf, scrubb)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smart_scrubf(df,col_name,error_rate = 0): """ Scrubs from the front of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe. """
scrubbed = "" while True: valcounts = df[col_name].str[:len(scrubbed)+1].value_counts() if not len(valcounts): break if not valcounts[0] >= (1-error_rate) * _utils.rows(df): break scrubbed=valcounts.index[0] if scrubbed == '': return None which = df[col_name].str.startswith(scrubbed) _basics.col_scrubf(df,col_name,which,len(scrubbed),True) if not which.all(): new_col_name = _basics.colname_gen(df,"{}_sf-{}".format(col_name,scrubbed)) df[new_col_name] = which return scrubbed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smart_scrubb(df,col_name,error_rate = 0): """ Scrubs from the back of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed. df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe. """
scrubbed = "" while True: valcounts = df[col_name].str[-len(scrubbed)-1:].value_counts() if not len(valcounts): break if not valcounts[0] >= (1-error_rate) * _utils.rows(df): break scrubbed=valcounts.index[0] if scrubbed == '': return None which = df[col_name].str.endswith(scrubbed) _basics.col_scrubb(df,col_name,which,len(scrubbed),True) if not which.all(): new_col_name = _basics.colname_gen(df,"{}_sb-{}".format(col_name,scrubbed)) df[new_col_name] = which return scrubbed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_all_for_order(cls, order_id, **kwargs): """Find shipping methods for order. Find all shipping methods suitable for an order. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True :param async bool :param str order_id: Order ID to get shipping methods for. (required) :return: page[ShippingMethod] If the method is called asynchronously, returns the request thread. """
kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._find_all_for_order_with_http_info(order_id, **kwargs) else: (data) = cls._find_all_for_order_with_http_info(order_id, **kwargs) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract(cls, obj): """ Extract span context from the given object :param Any obj: Object to use as context :return: a SpanContext instance extracted from the inner span object or None if no such span context could be found. """
span = cls.extract_span(obj) if span: return span.context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_additional_fields(data): """description of fix_additional_fields"""
result = dict() for key, value in data.items(): if isinstance(value, dict): result.update(KserSpan.to_flat_dict(key, value)) else: result[key] = value return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_keys(cls, data): """Filter GELF record keys using exclude_patterns :param dict data: Log record has dict :return: the filtered log record :rtype: dict """
keys = list(data.keys()) for pattern in cls.EXCLUDE_PATTERNS: for key in keys: if re.match(pattern, key): keys.remove(key) return dict(filter(lambda x: x[0] in keys, data.items()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_config(self): """Write the configuration to a local file. :return: Boolean if successful """
json.dump( self.config, open(CONFIG_FILE, 'w'), indent=4, separators=(',', ': ') ) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_config(self, **kwargs): """Load the configuration for the user or seed it with defaults. :return: Boolean if successful """
virgin_config = False if not os.path.exists(CONFIG_PATH): virgin_config = True os.makedirs(CONFIG_PATH) if not os.path.exists(CONFIG_FILE): virgin_config = True if not virgin_config: self.config = json.load(open(CONFIG_FILE)) else: self.logger.info('[!] Processing whitelists, this may take a few minutes...') process_whitelists() if kwargs: self.config.update(kwargs) if virgin_config or kwargs: self.write_config() if 'api_key' not in self.config: sys.stderr.write('configuration missing API key\n') if 'email' not in self.config: sys.stderr.write('configuration missing email\n') if not ('api_key' in self.config and 'email' in self.config): sys.stderr.write('Errors have been reported. Run blockade-cfg ' 'to fix these warnings.\n') try: last_update = datetime.strptime(self.config['whitelist_date'], "%Y-%m-%d") current = datetime.now() delta = (current - last_update).days if delta > 14: self.logger.info('[!] Refreshing whitelists, this may take a few minutes...') process_whitelists() self.config['whitelist_date'] = datetime.now().strftime("%Y-%m-%d") self.write_config() except Exception as e: self.logger.error(str(e)) self.logger.info('[!] Processing whitelists, this may take a few minutes...') process_whitelists() self.config['whitelist_date'] = datetime.now().strftime("%Y-%m-%d") self.write_config() return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chi2comb_cdf(q, chi2s, gcoef, lim=1000, atol=1e-4): r"""Function distribution of combination of chi-squared distributions. Parameters q : float Value point at which distribution function is to be evaluated. chi2s : ChiSquared Chi-squared distributions. gcoef : float Coefficient of the standard Normal distribution. lim : int Maximum number of integration terms. atol : float Absolute error tolerance. Returns ------- result : float Estimated c.d.f. evaluated at ``q``. error : int 0: completed successfully 1: required accuracy not achieved 2: round-off error possibly significant 3: invalid parameters 4: unable to locate integration parameters 5: out of memory info : Info Algorithm information. """
int_type = "i" if array(int_type, [0]).itemsize != ffi.sizeof("int"): int_type = "l" if array(int_type, [0]).itemsize != ffi.sizeof("int"): raise RuntimeError("Could not infer a proper integer representation.") if array("d", [0.0]).itemsize != ffi.sizeof("double"): raise RuntimeError("Could not infer a proper double representation.") q = float(q) c_chi2s = ffi.new("struct chi2comb_chisquareds *") c_info = ffi.new("struct chi2comb_info *") ncents = array("d", [float(i.ncent) for i in chi2s]) coefs = array("d", [float(i.coef) for i in chi2s]) dofs = array(int_type, [int(i.dof) for i in chi2s]) c_chi2s.ncents = ffi.cast("double *", ncents.buffer_info()[0]) c_chi2s.coefs = ffi.cast("double *", coefs.buffer_info()[0]) c_chi2s.dofs = ffi.cast("int *", dofs.buffer_info()[0]) c_chi2s.n = len(chi2s) result = ffi.new("double *") errno = c_chi2comb_cdf(q, c_chi2s, gcoef, lim, atol, c_info, result) info = Info() methods = ["emag", "niterms", "nints", "intv", "truc", "sd", "ncycles"] for i in methods: setattr(info, i, getattr(c_info, i)) return (result[0], errno, info)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_feature_model(): """Return the FeatureFlipper model defined in settings.py """
try: return apps.get_model(flipper_settings.FEATURE_FLIPPER_MODEL) except ValueError: raise ImproperlyConfigured( "FEATURE_FLIPPER_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "FEATURE_FLIPPER_MODEL refers to model '{}' that has not been" " installed".format(flipper_settings.FEATURE_FLIPPER_MODEL))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show_feature(self, user, feature): """Return True or False for the given feature. """
user_filter = { self.model.USER_FEATURE_FIELD: user, } return self.get_feature(feature).filter( models.Q(**user_filter) | models.Q(everyone=True)).exists()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_z(bands, filling, interaction, hund_cu, name): """Calculates the quasiparticle weight of degenerate system of N-bands at a given filling within an interaction range and saves the file"""
while True: try: data = np.load(name+'.npz') break except IOError: dopout = [] for dop in filling: slsp = Spinon(slaves=2*bands, orbitals=bands, \ hopping=[0.5]*2*bands, populations=[dop]*2*bands) dopout.append(solve_loop(slsp, interaction, hund_cu)[0][0]) np.savez(name, zeta=dopout, u_int=interaction, doping=filling, hund=hund_cu) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def label_saves(name): """Labels plots and saves file"""
plt.legend(loc=0) plt.ylim([0, 1.025]) plt.xlabel('$U/D$', fontsize=20) plt.ylabel('$Z$', fontsize=20) plt.savefig(name, dpi=300, format='png', transparent=False, bbox_inches='tight', pad_inches=0.05)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_curves_z(data, name, title=None): """Generates a simple plot of the quasiparticle weight decay curves given data object with doping setup"""
plt.figure() for zet, c in zip(data['zeta'], data['doping']): plt.plot(data['u_int'], zet[:, 0], label='$n={}$'.format(str(c))) if title != None: plt.title(title) label_saves(name+'.png')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pick_flat_z(data): """Generate a 2D array of the quasiparticle weight by only selecting the first particle data"""
zmes = [] for i in data['zeta']: zmes.append(i[:, 0]) return np.asarray(zmes)