Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
374,500
def find_copy_constructor(type_): copy_ = type_.constructors( lambda x: is_copy_constructor(x), recursive=False, allow_empty=True) if copy_: return copy_[0] return None
Returns reference to copy constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the copy constructor
374,501
def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None): param_table = {: market, : stock_type} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str if code_list is not None: if is_str(code_list): code_list = code_list.split() elif isinstance(code_list, list): pass else: return RET_ERROR, "code list must be like [, ] or " query_processor = self._get_sync_query_processor( StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp) kargs = { "market": market, : stock_type, : code_list, : self.get_sync_conn_id() } ret_code, msg, basic_info_list = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg col_list = [ , , , , , , , , , , , , ] basic_info_table = pd.DataFrame(basic_info_list, columns=col_list) return RET_OK, basic_info_table
获取指定市场中特定类型的股票基本信息 :param market: 市场类型,futuquant.common.constant.Market :param stock_type: 股票类型, futuquant.common.constant.SecurityType :param code_list: 如果不为None,应该是股票code的iterable类型,将只返回指定的股票信息 :return: (ret_code, content) ret_code 等于RET_OK时, content为Pandas.DataFrame数据, 否则为错误原因字符串, 数据列格式如下 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 股票代码 name str 名字 lot_size int 每手数量 stock_type str 股票类型,参见SecurityType stock_child_type str 涡轮子类型,参见WrtType stock_owner str 所属正股的代码 option_type str 期权类型,Qot_Common.OptionType strike_time str 行权日 strike_price float 行权价 suspension bool 是否停牌(True表示停牌) listing_date str 上市时间 stock_id int 股票id delisting bool 是否退市 ================= =========== ============================================================================== :example: .. code-block:: python from futuquant import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) print(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.WARRANT)) print(quote_ctx.get_stock_basicinfo(Market.US, SecurityType.DRVT, 'US.AAPL190621C140000')) quote_ctx.close()
374,502
def get_item_abspath(self, identifier): admin_metadata = self.get_admin_metadata() uuid = admin_metadata["uuid"] dataset_cache_abspath = os.path.join(self._irods_cache_abspath, uuid) mkdir_parents(dataset_cache_abspath) irods_item_path = os.path.join(self._data_abspath, identifier) relpath = self._get_metadata_with_cache(irods_item_path, "handle") _, ext = os.path.splitext(relpath) local_item_abspath = os.path.join( dataset_cache_abspath, identifier + ext) if not os.path.isfile(local_item_abspath): tmp_local_item_abspath = local_item_abspath + ".tmp" _get_file_forcefully(irods_item_path, tmp_local_item_abspath) os.rename(tmp_local_item_abspath, local_item_abspath) return local_item_abspath
Return absolute path at which item content can be accessed. :param identifier: item identifier :returns: absolute path from which the item content can be accessed
374,503
def remove_relation(post_id, tag_id): entry = TabPost2Tag.delete().where( (TabPost2Tag.post_id == post_id) & (TabPost2Tag.tag_id == tag_id) ) entry.execute() MCategory.update_count(tag_id)
Delete the record of post 2 tag.
374,504
def _perform_validation(self, path, value, results): name = path if path != None else "value" value = ObjectReader.get_value(value) super(ArraySchema, self)._perform_validation(path, value, results) if value == None: return if isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple): index = 0 for element in value: element_path = str(index) if path == None or len(path) == 0 else path + "." + str(index) self._perform_type_validation(element_path, self.value_type, element, results) index += 1 else: results.append( ValidationResult( path, ValidationResultType.Error, "VALUE_ISNOT_ARRAY", name + " type must be List or Array", "List", type(value) ) )
Validates a given value against the schema and configured validation rules. :param path: a dot notation path to the value. :param value: a value to be validated. :param results: a list with validation results to add new results.
374,505
def correlation(s, o): if s.size == 0: corr = np.NaN else: corr = np.corrcoef(o, s)[0, 1] return corr
correlation coefficient input: s: simulated o: observed output: correlation: correlation coefficient
374,506
def on_delete(resc, req, resp, rid): signals.pre_req.send(resc.model) signals.pre_req_delete.send(resc.model) model = find(resc.model, rid) goldman.sess.store.delete(model) resp.status = falcon.HTTP_204 signals.post_req.send(resc.model) signals.post_req_delete.send(resc.model)
Delete the single item Upon a successful deletion an empty bodied 204 is returned.
374,507
def store_many_vectors(self, hash_name, bucket_keys, vs, data): if data is None: data = itertools.repeat(data) for v, k, d in zip(vs, bucket_keys, data): self.store_vector(hash_name, k, v, d)
Store a batch of vectors. Stores vector and JSON-serializable data in bucket with specified key.
374,508
def extract_paths(self, paths, ignore_nopath): try: super().extract_paths( paths=paths, ignore_nopath=ignore_nopath, ) except ExtractPathError as err: LOGGER.debug( , self.vm.name(), err.message ) if self._has_guestfs: self.extract_paths_dead(paths, ignore_nopath) else: raise
Extract the given paths from the domain Attempt to extract all files defined in ``paths`` with the method defined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`, if it fails, and `guestfs` is available it will try extracting the files with guestfs. Args: paths(list of tuples): files to extract in `[(src1, dst1), (src2, dst2)...]` format. ignore_nopath(boolean): if True will ignore none existing paths. Returns: None Raises: :exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing path was found on the VM, and `ignore_nopath` is False. :exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.
374,509
def get_default_query_from_module(module): if isinstance(module, types.ModuleType): return module.__dict__.get(_SQL_MODULE_LAST, None) return None
Given a %%sql module return the default (last) query for the module. Args: module: the %%sql module. Returns: The default query associated with this module.
374,510
def has_equal_ast(state, incorrect_msg=None, code=None, exact=True, append=None): if utils.v2_only(): state.assert_is_not(["object_assignments"], "has_equal_ast", ["check_object"]) state.assert_is_not(["function_calls"], "has_equal_ast", ["check_function"]) if code and incorrect_msg is None: raise InstructorError( "If you manually specify the code to match inside has_equal_ast(), " "you have to explicitly set the `incorrect_msg` argument." ) if ( append is None ): append = incorrect_msg is None if incorrect_msg is None: incorrect_msg = "Expected `{{sol_str}}`, but got `{{stu_str}}`." def parse_tree(tree): crnt = ( tree.body[0] if isinstance(tree, ast.Module) and len(tree.body) == 1 else tree ) return ast.dump(crnt.value if isinstance(crnt, ast.Expr) else crnt) stu_rep = parse_tree(state.student_ast) sol_rep = parse_tree(state.solution_ast if not code else ast.parse(code)) fmt_kwargs = { "sol_str": state.solution_code if not code else code, "stu_str": state.student_code, } _msg = state.build_message(incorrect_msg, fmt_kwargs, append=append) if exact and not code: state.do_test(EqualTest(stu_rep, sol_rep, Feedback(_msg, state))) elif not sol_rep in stu_rep: state.report(Feedback(_msg, state)) return state
Test whether abstract syntax trees match between the student and solution code. ``has_equal_ast()`` can be used in two ways: * As a robust version of ``has_code()``. By setting ``code``, you can look for the AST representation of ``code`` in the student's submission. But be aware that ``a`` and ``a = 1`` won't match, as reading and assigning are not the same in an AST. Use ``ast.dump(ast.parse(code))`` to see an AST representation of ``code``. * As an expression-based check when using more advanced SCT chain, e.g. to compare the equality of expressions to set function arguments. Args: incorrect_msg: message displayed when ASTs mismatch. When you specify ``code`` yourself, you have to specify this. code: optional code to use instead of the solution AST. exact: whether the representations must match exactly. If false, the solution AST only needs to be contained within the student AST (similar to using test student typed). Defaults to ``True``, unless the ``code`` argument has been specified. :Example: Student and Solution Code:: dict(a = 'value').keys() SCT:: # all pass Ex().has_equal_ast() Ex().has_equal_ast(code = "dict(a = 'value').keys()") Ex().has_equal_ast(code = "dict(a = 'value')", exact = False) Student and Solution Code:: import numpy as np arr = np.array([1, 2, 3, 4, 5]) np.mean(arr) SCT:: # Check underlying value of arugment a of np.mean: Ex().check_function('numpy.mean').check_args('a').has_equal_ast() # Only check AST equality of expression used to specify argument a: Ex().check_function('numpy.mean').check_args('a').has_equal_ast()
374,511
def runGetOutput(cmd, raiseOnFailure=False, encoding=sys.getdefaultencoding()): &&|s output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii"). If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type) If unsure, leave this as it results = Simple.runGetResults(cmd, stdout=True, stderr=subprocess.STDOUT, encoding=encoding) if raiseOnFailure is True and results[] != 0: try: if issubclass(cmd.__class__, (list, tuple)): cmdStr = .join(cmd) else: cmdStr = cmd except: cmdStr = repr(cmd) failMsg = "Command failed with returnCode=%d" %(cmdStr, results[]) raise SimpleCommandFailure(failMsg, results[], results.get(, None), results.get(, None)) return results[]
runGetOutput - Simply runs a command and returns the output as a string. Use #runGetResults if you need something more complex. @param cmd <str/list> - String of command and arguments, or list of command and arguments If cmd is a string, the command will be executed as if ran exactly as written in a shell. This mode supports shell-isms like '&&' and '|' If cmd is a list, the first element will be the executable, and further elements are arguments that will be passed to that executable. @param raiseOnFailure <True/False> - Default False, if True a non-zero return from the command (failure) will raise a SimpleCommandFailure, which contains all gathered output and return code. @see #SimpleCommandFailure @param encoding <None/str> - Default sys.getdefaultencoding(), the program's output will automatically be decoded using the provided codec (e.x. "utf-8" or "ascii"). If None or False-ish, data will not be decoded (i.e. in python3 will be "bytes" type) If unsure, leave this as it's default value, or provide "utf-8" @return <str> - String of data output by the executed program. This combines stdout and stderr into one string. If you need them separate, use #runGetResults @raises SimpleCommandFailure - * If command cannot be executed (like program not found, insufficient permissions, etc) * If #raiseOnFailure is set to True, and the program returns non-zero
374,512
def python_type(self): from ambry.valuetype import resolve_value_type if self.valuetype and resolve_value_type(self.valuetype): return resolve_value_type(self.valuetype)._pythontype elif self.datatype: try: return self.types[self.datatype][1] except KeyError: return resolve_value_type(self.datatype)._pythontype else: from ambry.exc import ConfigurationError raise ConfigurationError("Can't get python_type: neither datatype of valuetype is defined")
Return the python type for the row, possibly getting it from a valuetype reference
374,513
def run(self, schedule_type, lookup_id, **kwargs): log = self.get_logger(**kwargs) log.info("Queuing <%s> <%s>" % (schedule_type, lookup_id)) task_run = QueueTaskRun() task_run.task_id = self.request.id or uuid4() task_run.started_at = now() tr_qs = QueueTaskRun.objects schedules = Schedule.objects.filter(enabled=True) if schedule_type == "crontab": schedules = schedules.filter(celery_cron_definition=lookup_id) tr_qs = tr_qs.filter(celery_cron_definition=lookup_id) scheduler_type = CrontabSchedule task_run.celery_cron_definition_id = lookup_id elif schedule_type == "interval": schedules = schedules.filter(celery_interval_definition=lookup_id) tr_qs = tr_qs.filter(celery_interval_definition=lookup_id) scheduler_type = IntervalSchedule task_run.celery_interval_definition_id = lookup_id try: last_task_run = tr_qs.latest("started_at") except QueueTaskRun.DoesNotExist: pass else: sched = scheduler_type.objects.get(id=lookup_id) due, due_next = sched.schedule.is_due(last_task_run.started_at) if not due and due_next >= settings.DEFAULT_CLOCK_SKEW_SECONDS: return ( "Aborted Queuing <%s> <%s> due to last task run (%s) " "at %s" % ( schedule_type, lookup_id, last_task_run.id, last_task_run.started_at, ) ) task_run.save() queued = 0 schedules = schedules.values("id", "auth_token", "endpoint", "payload") for schedule in schedules.iterator(): schedule["schedule_id"] = str(schedule.pop("id")) DeliverTask.apply_async(kwargs=schedule) queued += 1 task_run.completed_at = now() task_run.save() return "Queued <%s> Tasks" % (queued,)
Loads Schedule linked to provided lookup
374,514
def _make_meta(self, tracker_url, root_name, private, progress): if self._fifo: piece_size_exp = 20 else: total_size = self._calc_size() if total_size: piece_size_exp = int(math.log(total_size) / math.log(2)) - 9 else: piece_size_exp = 0 piece_size_exp = min(max(15, piece_size_exp), 24) piece_size = 2 ** piece_size_exp info, totalhashed = self._make_info(piece_size, progress, self.walk() if self._fifo else sorted(self.walk())) info["x_cross_seed"] = hashlib.md5(tracker_url).hexdigest() if private: info["private"] = 1 if root_name: info["name"] = root_name meta = { "info": info, "announce": tracker_url.strip(), } return check_meta(meta), totalhashed
Create torrent dict.
374,515
def start_runs( logdir, steps, run_name, thresholds, mask_every_other_prediction=False): tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(42) distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.) examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = ( probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue ) for i, color in enumerate((, , )): description = ( % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: consecutive_indices = tf.reshape( tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op( name=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name= % color, description=description) merged_summary_op = tf.compat.v1.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.compat.v1.Session() writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close()
Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1.
374,516
def assign_edge_colors_and_widths(self): if self.style.edge_widths is None: if not self.style.edge_style["stroke-width"]: self.style.edge_style.pop("stroke-width") self.style.edge_style.pop("stroke") self.edge_widths = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke-width"], (list, tuple)): raise ToytreeError( "Use edge_widths not edge_style for multiple edge widths") width = self.style.edge_style["stroke-width"] self.style.edge_style.pop("stroke-width") self.edge_widths = [width] * self.nedges else: self.style.edge_style.pop("stroke-width") if isinstance(self.style.edge_widths, (str, int)): self.edge_widths = [int(self.style.edge_widths)] * self.nedges elif isinstance(self.style.edge_widths, (list, tuple)): if len(self.style.edge_widths) != self.nedges: raise ToytreeError("edge_widths arg is the wrong length") for cidx in range(self.nedges): self.edge_widths[cidx] = self.style.edge_widths[cidx] if self.style.edge_colors is None: if self.style.edge_style["stroke"] is None: self.style.edge_style.pop("stroke") self.edge_colors = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke"], (list, tuple)): raise ToytreeError( "Use edge_colors not edge_style for multiple edge colors") color = self.style.edge_style["stroke"] if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.style.edge_style.pop("stroke") self.edge_colors = [color] * self.nedges else: self.style.edge_style.pop("stroke") if isinstance(self.style.edge_colors, (str, int)): color = self.style.edge_colors if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.edge_colors = [color] * self.nedges elif isinstance(self.style.edge_colors, (list, tuple)): if len(self.style.edge_colors) != self.nedges: raise ToytreeError("edge_colors arg is the wrong length") for cidx in range(self.nedges): self.edge_colors[cidx] = self.style.edge_colors[cidx] self.edge_colors = [i if i else " self.edge_widths = [i if i else 2 for i in self.edge_widths]
Resolve conflict of 'node_color' and 'node_style['fill'] args which are redundant. Default is node_style.fill unless user entered node_color. To enter multiple colors user must use node_color not style fill. Either way, we build a list of colors to pass to Drawing.node_colors which is then written to the marker as a fill CSS attribute.
374,517
def visit_tryexcept(self, node): trys = ["try:\n%s" % self._stmt_list(node.body)] for handler in node.handlers: trys.append(handler.accept(self)) if node.orelse: trys.append("else:\n%s" % self._stmt_list(node.orelse)) return "\n".join(trys)
return an astroid.TryExcept node as string
374,518
def peek_step(self, val: ArrayValue, sn: "DataNode") -> Tuple[ObjectValue, "DataNode"]: keys = self.parse_keys(sn) for en in val: flag = True try: for k in keys: if en[k] != keys[k]: flag = False break except KeyError: continue if flag: return (en, sn) return (None, sn)
Return the entry addressed by the receiver + its schema node. Args: val: Current value (array). sn: Current schema node.
374,519
def clientConnected(self, proto): proto.uniqueName = % (self.next_id,) self.next_id += 1 self.clients[proto.uniqueName] = proto
Called when a client connects to the bus. This method assigns the new connection a unique bus name.
374,520
def set_thresholds(self, touch, release): assert touch >= 0 and touch <= 255, assert release >= 0 and release <= 255, for i in range(12): self._i2c_retry(self._device.write8, MPR121_TOUCHTH_0 + 2*i, touch) self._i2c_retry(self._device.write8, MPR121_RELEASETH_0 + 2*i, release)
Set the touch and release threshold for all inputs to the provided values. Both touch and release should be a value between 0 to 255 (inclusive).
374,521
def page_uri_handler(context, content, pargs, kwargs): uri = pargs[0] return url_for(, uri=uri)
Shortcode for getting the link to internal pages using the flask `url_for` method. Activate with 'shortcodes' template filter. Within the content use the chill page_uri shortcode: "[chill page_uri idofapage]". The argument is the 'uri' for a page that chill uses. Does not verify the link to see if it's valid.
374,522
def _grab_history(self): default_location = None config = self.setup_cfg.config if config and config.has_option(, ): default_location = config.get(, ) history_file = self.vcs.history_file(location=default_location) if not history_file: logger.warn("No history file found") self.data[] = None self.data[] = None return logger.debug("Checking %s", history_file) history_lines = open(history_file).read().split() headings = utils.extract_headings_from_history(history_lines) if not len(headings): logger.error("No detectable version heading in the history " "file %s", history_file) sys.exit() good_heading = self.data[] % self.data line = headings[0][] previous = history_lines[line] history_lines[line] = good_heading logger.debug("Set heading from %r to %r.", previous, good_heading) history_lines[line + 1] = utils.fix_rst_heading( heading=good_heading, below=history_lines[line + 1]) logger.debug("Set line below heading to %r", history_lines[line + 1]) self.data[] = history_lines self.data[] = history_file
Calculate the needed history/changelog changes Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them, check if the first one matches the version and whether it has a the current date.
374,523
def health(self, session=None): BJ = jobs.BaseJob payload = {} scheduler_health_check_threshold = timedelta(seconds=conf.getint(, )) latest_scheduler_heartbeat = None payload[] = {: } try: latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\ filter(BJ.state == , BJ.job_type == ).\ scalar() except Exception: payload[][] = if not latest_scheduler_heartbeat: scheduler_status = else: if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold: scheduler_status = else: scheduler_status = payload[] = {: scheduler_status, : str(latest_scheduler_heartbeat)} return wwwutils.json_response(payload)
An endpoint helping check the health status of the Airflow instance, including metadatabase and scheduler.
374,524
def apply_projection(projection, value): if isinstance(value, Sequence): return [ apply_projection(projection, item) for item in value ] elif not isinstance(value, Mapping): return value try: current_projection = [p[0] for p in projection] except IndexError: return value for name in list(value.keys()): if name not in current_projection: value.pop(name) elif isinstance(value[name], dict): value[name] = apply_projection( [p[1:] for p in projection if p[0] == name], value[name] ) return value
Apply projection.
374,525
def find_neighbor_pores(self, pores, mode=, flatten=True, include_input=False): r pores = self._parse_indices(pores) if sp.size(pores) == 0: return sp.array([], ndmin=1, dtype=int) if not in self._am.keys(): self.get_adjacency_matrix(fmt=) neighbors = topotools.find_neighbor_sites(sites=pores, logic=mode, am=self._am[], flatten=flatten, include_input=include_input) return neighbors
r""" Returns a list of pores that are direct neighbors to the given pore(s) Parameters ---------- pores : array_like Indices of the pores whose neighbors are sought flatten : boolean If ``True`` (default) the returned result is a compressed array of all neighbors. If ``False``, a list of lists with each sub-list containing the neighbors for each input site. Note that an *unflattened* list might be slow to generate since it is a Python ``list`` rather than a Numpy ``array``. include_input : bool If ``False`` (default) then the input pores are not included in the returned list(s). Note that since pores are not neighbors of themselves, the neighbors of pore N will not include N, even if this flag is ``True``. mode : string Specifies logic to filter the resulting list. Options are: **'or'** : (default) All neighbors of the input pores. This is also known as the 'union' in set theory or 'any' in boolean logic. Both keywords are accepted and treated as 'or'. **'xor'** : Only neighbors of one and only one input pore. This is useful for finding the pores that are not shared by any of the input pores. This is known as 'exclusive_or' in set theory, and is an accepted input. **'xnor'** : Neighbors that are shared by two or more input pores. This is equivalent to finding all neighbors with 'or', minus those found with 'xor', and is useful for finding neighbors that the inputs have in common. **'and'** : Only neighbors shared by all input pores. This is also known as 'intersection' in set theory and (somtimes) as 'all' in boolean logic. Both keywords are accepted and treated as 'and'. Returns ------- If ``flatten`` is ``True``, returns a 1D array of pore indices filtered according to the specified mode. If ``flatten`` is ``False``, returns a list of lists, where each list contains the neighbors of the corresponding input pores. Notes ----- The ``logic`` options are applied to neighboring pores only, thus it is not possible to obtain pores that are part of the global set but not neighbors. This is because (a) the list of global pores might be very large, and (b) it is not possible to return a list of neighbors for each input pores if global pores are considered. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> Ps = pn.find_neighbor_pores(pores=[0, 2]) >>> print(Ps) [ 1 3 5 7 25 27] >>> Ps = pn.find_neighbor_pores(pores=[0, 1]) >>> print(Ps) [ 2 5 6 25 26] >>> Ps = pn.find_neighbor_pores(pores=[0, 1], mode='union', ... include_input=True) >>> print(Ps) [ 0 1 2 5 6 25 26] >>> Ps = pn.find_neighbor_pores(pores=[0, 2], flatten=False) >>> print(Ps) [array([ 1, 5, 25]), array([ 1, 3, 7, 27])] >>> Ps = pn.find_neighbor_pores(pores=[0, 2], mode='xnor') >>> print(Ps) [1] >>> Ps = pn.find_neighbor_pores(pores=[0, 2], mode='xor') >>> print(Ps) [ 3 5 7 25 27]
374,526
def get_overridden_calculated_entry(self): if not bool(self._my_map[]): raise errors.IllegalState() mgr = self._get_provider_manager() if not mgr.supports_grade_entry_lookup(): raise errors.OperationFailed() lookup_session = mgr.get_grade_entry_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_gradebook_view() osid_object = lookup_session.get_grade_entry(self.get_overridden_calculated_entry_id()) return osid_object
Gets the calculated entry this entry overrides. return: (osid.grading.GradeEntry) - the calculated entry raise: IllegalState - ``overrides_calculated_entry()`` is ``false`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
374,527
def is_applicable(self, date_string, strip_timezone=False, settings=None): if strip_timezone: date_string, _ = pop_tz_offset_from_string(date_string, as_offset=False) date_string = self._translate_numerals(date_string) if settings.NORMALIZE: date_string = normalize_unicode(date_string) date_string = self._simplify(date_string, settings=settings) dictionary = self._get_dictionary(settings) date_tokens = dictionary.split(date_string) return dictionary.are_tokens_valid(date_tokens)
Check if the locale is applicable to translate date string. :param date_string: A string representing date and/or time in a recognizably valid format. :type date_string: str|unicode :param strip_timezone: If True, timezone is stripped from date string. :type strip_timezone: bool :return: boolean value representing if the locale is applicable for the date string or not.
374,528
def clipValue(self, value, minValue, maxValue): return min(max(value, minValue), maxValue)
Makes sure that value is within a specific range. If not, then the lower or upper bounds is returned
374,529
def fit_classifier(self, name, analytes, method, samples=None, subset=None, filt=True, sort_by=0, **kwargs): if samples is not None: subset = self.make_subset(samples) self.get_focus(subset=subset, filt=filt) c = classifier(analytes, sort_by) c.fit(data=self.focus, method=method, **kwargs) self.classifiers[name] = c return name
Create a clustering classifier based on all samples, or a subset. Parameters ---------- name : str The name of the classifier. analytes : str or iterable Which analytes the clustering algorithm should consider. method : str Which clustering algorithm to use. Can be: 'meanshift' The `sklearn.cluster.MeanShift` algorithm. Automatically determines number of clusters in data based on the `bandwidth` of expected variation. 'kmeans' The `sklearn.cluster.KMeans` algorithm. Determines the characteristics of a known number of clusters within the data. Must provide `n_clusters` to specify the expected number of clusters. samples : iterable list of samples to consider. Overrides 'subset'. subset : str The subset of samples used to fit the classifier. Ignored if 'samples' is specified. sort_by : int Which analyte the resulting clusters should be sorted by - defaults to 0, which is the first analyte. **kwargs : method-specific keyword parameters - see below. Meanshift Parameters bandwidth : str or float The bandwith (float) or bandwidth method ('scott' or 'silverman') used to estimate the data bandwidth. bin_seeding : bool Modifies the behaviour of the meanshift algorithm. Refer to sklearn.cluster.meanshift documentation. K - Means Parameters n_clusters : int The number of clusters expected in the data. Returns ------- name : str
374,530
def nx_contracted_nodes(G, u, v, self_loops=True, inplace=False): import itertools as it if G.is_directed(): in_edges = ((w, u, d) for w, x, d in G.in_edges(v, data=True) if self_loops or w != u) out_edges = ((u, w, d) for x, w, d in G.out_edges(v, data=True) if self_loops or w != u) new_edges = it.chain(in_edges, out_edges) else: new_edges = ((u, w, d) for x, w, d in G.edges(v, data=True) if self_loops or w != u) if inplace: H = G new_edges = list(new_edges) else: H = G.copy() node_dict = nx_node_dict(H) v_data = node_dict[v] H.remove_node(v) H.add_edges_from(new_edges) if in node_dict[u]: node_dict[u][][v] = v_data else: node_dict[u][] = {v: v_data} return H
copy of networkx function with inplace modification TODO: commit to networkx
374,531
def aoi(self, **kwargs): g = self._parse_geoms(**kwargs) if g is None: return self else: return self[g]
Subsets the Image by the given bounds Args: bbox (list): optional. A bounding box array [minx, miny, maxx, maxy] wkt (str): optional. A WKT geometry string geojson (str): optional. A GeoJSON geometry dictionary Returns: image: an image instance of the same type
374,532
def poly(self, return_coeffs=False): p = self.bpoints() coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0]) if return_coeffs: return coeffs else: return np.poly1d(coeffs)
returns the quadratic as a Polynomial object.
374,533
def mosaic_inline(self, imagelist, bg_ref=None, trim_px=None, merge=False, allow_expand=True, expand_pad_deg=0.01, max_expand_pct=None, update_minmax=True, suppress_callback=False): header = self.get_header() ((xrot_ref, yrot_ref), (cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header) scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref) mydata = self._get_data() count = 1 res = [] for image in imagelist: name = image.get(, % (count)) count += 1 data_np = image._get_data() if 0 in data_np.shape: self.logger.info("Skipping image with zero length axis") continue ctr_x, ctr_y = trcalc.get_center(data_np) ra, dec = image.pixtoradec(ctr_x, ctr_y) ht, wd = data_np.shape[:2] if trim_px: xlo, xhi = trim_px, wd - trim_px ylo, yhi = trim_px, ht - trim_px data_np = data_np[ylo:yhi, xlo:xhi, ...] ht, wd = data_np.shape[:2] if bg_ref is not None: bg = iqcalc.get_median(data_np) bg_inc = bg_ref - bg data_np = data_np + bg_inc if update_minmax: maxval = np.nanmax(data_np) minval = np.nanmin(data_np) self.maxval = max(self.maxval, maxval) self.minval = min(self.minval, minval) header = image.get_header() ((xrot, yrot), (cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header) self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f " "cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2)) if (not np.isclose(math.fabs(cdelt1), scale_x) or not np.isclose(math.fabs(cdelt2), scale_y)): nscale_x = math.fabs(cdelt1) / scale_x nscale_y = math.fabs(cdelt2) / scale_y self.logger.debug("scaling piece by x(%f), y(%f)" % ( nscale_x, nscale_y)) data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic( data_np, 0, 0, wd - 1, ht - 1, nscale_x, nscale_y, logger=self.logger) rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref flip_x = False flip_y = False if (np.isclose(math.fabs(rot_dx), 180.0) or np.isclose(math.fabs(rot_dy), 180.0)): rotdata = trcalc.transform(data_np, flip_x=True, flip_y=True) rot_dx = 0.0 rot_dy = 0.0 else: rotdata = data_np if not np.isclose(rot_dy, 0.0): rot_deg = rot_dy self.logger.debug("rotating %s by %f deg" % (name, rot_deg)) rotdata = trcalc.rotate(rotdata, rot_deg, logger=self.logger) if np.sign(cdelt1) != np.sign(cdelt1_ref): flip_x = True if np.sign(cdelt2) != np.sign(cdelt2_ref): flip_y = True if flip_x or flip_y: rotdata = trcalc.transform(rotdata, flip_x=flip_x, flip_y=flip_y) ht, wd = rotdata.shape[:2] ctr_x, ctr_y = trcalc.get_center(rotdata) x0, y0 = self.radectopix(ra, dec) "allow_expand=False") expand_x = max(int(expand_pad_deg / scale_x), 0) expand_y = max(int(expand_pad_deg / scale_y), 0) nx1_off, nx2_off = 0, 0 if xlo < 0: nx1_off = abs(xlo) + expand_x if xhi > mywd: nx2_off = (xhi - mywd) + expand_x xlo, xhi = xlo + nx1_off, xhi + nx1_off ny1_off, ny2_off = 0, 0 if ylo < 0: ny1_off = abs(ylo) + expand_y if yhi > myht: ny2_off = (yhi - myht) + expand_y ylo, yhi = ylo + ny1_off, yhi + ny1_off new_wd = mywd + nx1_off + nx2_off new_ht = myht + ny1_off + ny2_off old_area = mywd * myht new_area = new_wd * new_ht expand_pct = new_area / old_area if ((max_expand_pct is not None) and (expand_pct > max_expand_pct)): raise Exception("New area exceeds current one by %.2f %%;" "increase max_expand_pct (%.2f) to allow" % (expand_pct * 100, max_expand_pct)) new_data = np.zeros((new_ht, new_wd)) new_data[ny1_off:ny1_off + myht, nx1_off:nx1_off + mywd] = \ mydata self._data = new_data mydata = new_data if (nx1_off > 0) or (ny1_off > 0): crpix1, crpix2 = self.get_keywords_list(, ) kwds = dict(CRPIX1=crpix1 + nx1_off, CRPIX2=crpix2 + ny1_off) self.update_keywords(kwds) try: if merge: mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...] else: idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0) mydata[ylo:yhi, xlo:xhi, ...][idx] = \ rotdata[0:ht, 0:wd, ...][idx] except Exception as e: self.logger.error("Error fitting tile: %s" % (str(e))) raise res.append((xlo, ylo, xhi, yhi)) return res
Drops new images into the current image (if there is room), relocating them according the WCS between the two images.
374,534
def exceptions(self): def make_param(text): if in text and in text: word_split = list(split_delimited(, , text)) if word_split[1] != : text = .join([word_split[0], ] + word_split[1:]) else: word_split = text.split() text = % (word_split[0], .join(word_split[1:])) return ParamDoc(text) return [make_param(text) for text in self.get_as_list() + self.get_as_list()]
Returns a list of ParamDoc objects (with empty names) of the exception tags for the function. >>> comments = parse_comments_for_file('examples/module_closure.js') >>> fn1 = FunctionDoc(comments[1]) >>> fn1.exceptions[0].doc 'Another exception' >>> fn1.exceptions[1].doc 'A fake exception' >>> fn1.exceptions[1].type 'String'
374,535
def copy_unit_properties(self, sorting, unit_ids=None): if unit_ids is None: unit_ids = sorting.get_unit_ids() if isinstance(unit_ids, int): curr_property_names = sorting.get_unit_property_names(unit_id=unit_ids) for curr_property_name in curr_property_names: value = sorting.get_unit_property(unit_id=unit_ids, property_name=curr_property_name) self.set_unit_property(unit_id=unit_ids, property_name=curr_property_name, value=value) else: for unit_id in unit_ids: curr_property_names = sorting.get_unit_property_names(unit_id=unit_id) for curr_property_name in curr_property_names: value = sorting.get_unit_property(unit_id=unit_id, property_name=curr_property_name) self.set_unit_property(unit_id=unit_id, property_name=curr_property_name, value=value)
Copy unit properties from another sorting extractor to the current sorting extractor. Parameters ---------- sorting: SortingExtractor The sorting extractor from which the properties will be copied unit_ids: (array_like, int) The list (or single value) of unit_ids for which the properties will be copied.
374,536
def iter_links(operations, page): for operation, ns, rule, func in operations: yield Link.for_( operation=operation, ns=ns, type=ns.subject_name, qs=page.to_items(), )
Generate links for an iterable of operations on a starting page.
374,537
def resume_transfer_operation(self, operation_name): self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
Resumes an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None
374,538
def pid(self): try: return self._pid except AttributeError: self._pid = os.getpid() return self._pid
The pid of the process associated to the scheduler.
374,539
def get_unique_families(hkls): def is_perm(hkl1, hkl2): h1 = np.abs(hkl1) h2 = np.abs(hkl2) return all([i == j for i, j in zip(sorted(h1), sorted(h2))]) unique = collections.defaultdict(list) for hkl1 in hkls: found = False for hkl2 in unique.keys(): if is_perm(hkl1, hkl2): found = True unique[hkl2].append(hkl1) break if not found: unique[hkl1].append(hkl1) pretty_unique = {} for k, v in unique.items(): pretty_unique[sorted(v)[-1]] = len(v) return pretty_unique
Returns unique families of Miller indices. Families must be permutations of each other. Args: hkls ([h, k, l]): List of Miller indices. Returns: {hkl: multiplicity}: A dict with unique hkl and multiplicity.
374,540
def parse_config_h(fp, vars=None): if vars is None: vars = {} define_rx = re.compile(" undef_rx = re.compile("/[*] while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass vars[n] = v else: m = undef_rx.match(line) if m: vars[m.group(1)] = 0 return vars
Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary.
374,541
def replace_word_tokens(string, language): words = mathwords.word_groups_for_language(language) operators = words[].copy() if in words: operators.update(words[]) for operator in list(operators.keys()): if operator in string: string = string.replace(operator, operators[operator]) numbers = words[] for number in list(numbers.keys()): if number in string: string = string.replace(number, str(numbers[number])) scales = words[] end_index_characters = mathwords.BINARY_OPERATORS end_index_characters.add() word_matches = find_word_groups(string, list(scales.keys())) for match in word_matches: string = string.replace(match, + match + ) for scale in list(scales.keys()): for _ in range(0, string.count(scale)): start_index = string.find(scale) - 1 end_index = len(string) while is_int(string[start_index - 1]) and start_index > 0: start_index -= 1 end_index = string.find(, start_index) + 1 end_index = string.find(, end_index) + 1 add = if string[end_index] in end_index_characters: add = string = string[:start_index] + + string[start_index:] string = string.replace( scale, + str(scales[scale]) + + add, 1 ) string = string.replace(, ) return string
Given a string and an ISO 639-2 language code, return the string with the words replaced with an operational equivalent.
374,542
def console(self, ttynum=-1, stdinfd=0, stdoutfd=1, stderrfd=2, escape=1): if not self.running: return False return _lxc.Container.console(self, ttynum, stdinfd, stdoutfd, stderrfd, escape)
Attach to console of running container.
374,543
def send(self,text): self.s.write(text) time.sleep(0.001*len(text))
Send a string to the PiLite, can be simple text or a $$$ command
374,544
def add_json(self, json_obj, **kwargs): return self.add_bytes(encoding.Json().encode(json_obj), **kwargs)
Adds a json-serializable Python dict as a json file to IPFS. .. code-block:: python >>> c.add_json({'one': 1, 'two': 2, 'three': 3}) 'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob' Parameters ---------- json_obj : dict A json-serializable Python dictionary Returns ------- str : Hash of the added IPFS object
374,545
def load(obj, settings_module, identifier="py", silent=False, key=None): mod, loaded_from = get_module(obj, settings_module, silent) if mod and loaded_from: obj.logger.debug("py_loader: {}".format(mod)) else: obj.logger.debug( "py_loader: %s (Ignoring, Not Found)", settings_module ) return for setting in dir(mod): if setting.isupper(): if key is None or key == setting: setting_value = getattr(mod, setting) obj.logger.debug( "py_loader: loading %s: %s (%s)", setting, "*****" if "secret" in settings_module else setting_value, identifier, ) obj.set(setting, setting_value, loader_identifier=identifier) obj._loaded_files.append(mod.__file__)
Tries to import a python module
374,546
def swap(self, fn, *args, **kwargs): s current state, `args`, and `kwargs`. The return value of this invocation becomes the new value of the atom. Returns the new value. :param fn: A function which will be passed the current state. Should return a new state. This absolutely *MUST NOT* mutate the reference to the current state! If it does, this function may loop indefinitely. :param \*args: Arguments to be passed to `fn`. :param \*\*kwargs: Keyword arguments to be passed to `fn`. ' while True: oldval = self.deref() newval = fn(oldval, *args, **kwargs) if self._state.compare_and_set(oldval, newval): self.notify_watches(oldval, newval) return newval
Given a mutator `fn`, calls `fn` with the atom's current state, `args`, and `kwargs`. The return value of this invocation becomes the new value of the atom. Returns the new value. :param fn: A function which will be passed the current state. Should return a new state. This absolutely *MUST NOT* mutate the reference to the current state! If it does, this function may loop indefinitely. :param \*args: Arguments to be passed to `fn`. :param \*\*kwargs: Keyword arguments to be passed to `fn`.
374,547
def set(self, obj, id, payload, action=, async=False): self.url = .format(self.base_url, obj, id) self.method = if action: self.url += .format(action) self.payload = json.dumps(payload) if async: session = FuturesSession() return session.put(url=self.url, auth=self.auth, headers=self.headers, data=self.payload, cert=self.ca_cert) else: self.resp = requests.put(url=self.url, auth=self.auth, headers=self.headers, data=self.payload, cert=self.ca_cert) if self.__process_resp__(obj): return self.res return False
Function set Set an object by id @param obj: object name ('hosts', 'puppetclasses'...) @param id: the id of the object (name or id) @param action: specific action of an object ('power'...) @param payload: the dict of the payload @param async: should this request be async, if true use return.result() to get the response @return RETURN: the server response
374,548
def profile_cancel(self, query_id, timeout=10): result = Result(*self.perform_request(**{ : , : .format(query_id), : { : timeout } })) return result
Cancel the query that has the given queryid. :param query_id: The UUID of the query in standard UUID format that Drill assigns to each query. :param timeout: int :return: pydrill.client.Result
374,549
def OnExpandAll(self): root = self.tree.GetRootItem() fn = self.tree.Expand self.traverse(root, fn) self.tree.Expand(root)
expand all nodes
374,550
def onStart(self, event): c = event.container print * 5, , c kv = lambda s: s.split(, 1) env = {k: v for (k, v) in (kv(s) for s in c.attrs[][])} print env
Display the environment of a started container
374,551
def fit(self, counts_df, val_set=None): if self.stop_crit == : if val_set is None: raise ValueError("If is set to , must provide a validation set.") if self.verbose: self._print_st_msg() self._process_data(counts_df) if self.verbose: self._print_data_info() if (val_set is not None) and (self.stop_crit!=) and (self.stop_crit!=): self._process_valset(val_set) else: self.val_set = None self._cast_before_fit() self._fit() if self.keep_data: if self.users_per_batch == 0: self._store_metadata() else: self._st_ix_user = self._st_ix_user[:-1] if self.produce_dicts and self.reindex: self.user_dict_ = {self.user_mapping_[i]:i for i in range(self.user_mapping_.shape[0])} self.item_dict_ = {self.item_mapping_[i]:i for i in range(self.item_mapping_.shape[0])} self.is_fitted = True del self.input_df del self.val_set return self
Fit Hierarchical Poisson Model to sparse count data Fits a hierarchical Poisson model to count data using mean-field approximation with either full-batch coordinate-ascent or mini-batch stochastic coordinate-ascent. Note ---- DataFrames and arrays passed to '.fit' might be modified inplace - if this is a problem you'll need to pass a copy to them, e.g. 'counts_df=counts_df.copy()'. Note ---- Forcibly terminating the procedure should still keep the last calculated shape and rate parameter values, but is not recommended. If you need to make predictions on a forced-terminated object, set the attribute 'is_fitted' to 'True'. Note ---- Fitting in mini-batches is more prone to numerical instability and compared to full-batch variational inference, it is more likely that all your parameters will turn to NaNs (which means the optimization procedure failed). Parameters ---------- counts_df : pandas data frame (nobs, 3) or coo_matrix Input data with one row per non-zero observation, consisting of triplets ('UserId', 'ItemId', 'Count'). Must containin columns 'UserId', 'ItemId', and 'Count'. Combinations of users and items not present are implicitly assumed to be zero by the model. Can also pass a sparse coo_matrix, in which case 'reindex' will be forced to 'False'. val_set : pandas data frame (nobs, 3) Validation set on which to monitor log-likelihood. Same format as counts_df. Returns ------- self : obj Copy of this object
374,552
def use_app(backend_name=None, call_reuse=True): global default_app if default_app is not None: names = default_app.backend_name.lower().replace(, ).strip() names = [name for name in names.split() if name] if backend_name and backend_name.lower() not in names: raise RuntimeError( % names) else: if call_reuse: default_app.reuse() return default_app default_app = Application(backend_name) return default_app
Get/create the default Application object It is safe to call this function multiple times, as long as backend_name is None or matches the already selected backend. Parameters ---------- backend_name : str | None The name of the backend application to use. If not specified, Vispy tries to select a backend automatically. See ``vispy.use()`` for details. call_reuse : bool Whether to call the backend's `reuse()` function (True by default). Not implemented by default, but some backends need it. For example, the notebook backends need to inject some JavaScript in a notebook as soon as `use_app()` is called.
374,553
def optimise_xy(xy, *args): z, elements, coordinates = args window_com = np.array([xy[0], xy[1], z]) return -pore_diameter(elements, coordinates, com=window_com)[0]
Return negative pore diameter for x and y coordinates optimisation.
374,554
def read(self, vals): i = 0 if len(vals[i]) == 0: self.leapyear_observed = None else: self.leapyear_observed = vals[i] i += 1 if len(vals[i]) == 0: self.daylight_saving_start_day = None else: self.daylight_saving_start_day = vals[i] i += 1 if len(vals[i]) == 0: self.daylight_saving_end_day = None else: self.daylight_saving_end_day = vals[i] i += 1 count = int(vals[i]) i += 1 for _ in range(count): obj = Holiday() obj.read(vals[i:i + obj.field_count]) self.add_holiday(obj) i += obj.field_count
Read values. Args: vals (list): list of strings representing values
374,555
def signin(request, auth_form=AuthenticationForm, template_name=, redirect_field_name=REDIRECT_FIELD_NAME, redirect_signin_function=signin_redirect, extra_context=None): form = auth_form() if request.method == : form = auth_form(request.POST, request.FILES) if form.is_valid(): identification, password, remember_me = (form.cleaned_data[], form.cleaned_data[], form.cleaned_data[]) user = authenticate(identification=identification, password=password) if user.is_active: login(request, user) if remember_me: request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400) else: request.session.set_expiry(0) if userena_settings.USERENA_USE_MESSAGES: messages.success(request, _(), fail_silently=True) userena_signals.account_signin.send(sender=None, user=user) redirect_to = redirect_signin_function( request.GET.get(redirect_field_name, request.POST.get(redirect_field_name)), user) return HttpResponseRedirect(redirect_to) else: return redirect(reverse(, kwargs={: user.username})) if not extra_context: extra_context = dict() extra_context.update({ : form, : request.GET.get(redirect_field_name, request.POST.get(redirect_field_name)), }) return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
Signin using email or username with password. Signs a user in by combining email/username with password. If the combination is correct and the user :func:`is_active` the :func:`redirect_signin_function` is called with the arguments ``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is trying the login. The returned value of the function will be the URL that is redirected to. A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``. :param auth_form: Form to use for signing the user in. Defaults to the :class:`AuthenticationForm` supplied by userena. :param template_name: String defining the name of the template to use. Defaults to ``userena/signin_form.html``. :param redirect_field_name: Form field name which contains the value for a redirect to the succeeding page. Defaults to ``next`` and is set in ``REDIRECT_FIELD_NAME`` setting. :param redirect_signin_function: Function which handles the redirect. This functions gets the value of ``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It must return a string which specifies the URI to redirect to. :param extra_context: A dictionary containing extra variables that should be passed to the rendered template. The ``form`` key is always the ``auth_form``. **Context** ``form`` Form used for authentication supplied by ``auth_form``.
374,556
def refitPrefixes(self): for c in self.children: c.refitPrefixes() if self.prefix is not None: ns = self.resolvePrefix(self.prefix) if ns[1] is not None: self.expns = ns[1] self.prefix = None self.nsprefixes = {} return self
Refit namespace qualification by replacing prefixes with explicit namespaces. Also purges prefix mapping table. @return: self @rtype: L{Element}
374,557
def switch(request, url): app_label, model_name, object_id, field = url.split() try: from django.apps import apps model = apps.get_model(app_label, model_name) except ImportError: from django.db.models import get_model model = get_model(app_label, model_name) object = get_object_or_404(model, pk=object_id) perm_str = % (app_label, model.__name__) if not request.user.has_perm(perm_str.lower()): raise PermissionDenied setattr(object, field, getattr(object, field) == 0) object.save() if request.is_ajax(): return JsonResponse({: object.pk, : field, : getattr(object, field)}) else: msg = _(u) % {: field, : object} messages.success(request, msg) return HttpResponseRedirect(request.META.get(, ))
Set/clear boolean field value for model object
374,558
def _resolve(self, name): config = self._get_config(name) if not config: raise RuntimeError( % name) if config[] in self._custom_creators: repository = self._call_custom_creator(config) else: repository = getattr(self, % config[])(config) if in config: serializer = self._resolve_serializer(config[]) else: serializer = self._serializer repository.get_store().set_serializer(serializer) return repository
Resolve the given store :param name: The store to resolve :type name: str :rtype: Repository
374,559
def add(self, defn): if defn.name not in self: self[defn.name] = defn else: msg = "Duplicate packet name " % defn.name log.error(msg) raise util.YAMLError(msg)
Adds the given Packet Definition to this Telemetry Dictionary.
374,560
def downloadArchiveAction(self, request, queryset): output = io.BytesIO() z = zipfile.ZipFile(output, ) for sub in queryset: sub.add_to_zipfile(z) z.close() output.seek(0) response = HttpResponse( output, content_type="application/x-zip-compressed") response[] = return response
Download selected submissions as archive, for targeted correction.
374,561
def _dump_to_file(self, file): xmltodict.unparse(self.object(), file, pretty=True)
dump to the file
374,562
def evaluate_stacked_ensemble(path, ensemble_id): with functions.DBContextManager(path) as session: stacked_ensemble = session.query(models.StackedEnsemble).filter_by( id=ensemble_id).first() if not stacked_ensemble: raise exceptions.UserError( .format(ensemble_id)) stacked_ensemble.job_id = get_current_job().id stacked_ensemble.job_status = session.add(stacked_ensemble) session.commit() try: meta_features_list = [] for base_learner in stacked_ensemble.base_learners: mf = np.load(base_learner.meta_features_path(path)) if len(mf.shape) == 1: mf = mf.reshape(-1, 1) meta_features_list.append(mf) secondary_features = np.concatenate(meta_features_list, axis=1) extraction = session.query(models.Extraction).first() return_splits_iterable = functions.import_object_from_string_code( extraction.meta_feature_generation[], ) X, y = extraction.return_train_dataset() indices_list = [test_index for train_index, test_index in return_splits_iterable(X, y)] indices = np.concatenate(indices_list) X, y = X[indices], y[indices] est = stacked_ensemble.return_secondary_learner() return_splits_iterable_stacked_ensemble = functions.import_object_from_string_code( extraction.stacked_ensemble_cv[], ) preds = [] trues_list = [] for train_index, test_index in return_splits_iterable_stacked_ensemble(secondary_features, y): X_train, X_test = secondary_features[train_index], secondary_features[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train) preds.append( getattr(est, stacked_ensemble.base_learner_origin. meta_feature_generator)(X_test) ) trues_list.append(y_test) preds = np.concatenate(preds, axis=0) y_true = np.concatenate(trues_list) for key in stacked_ensemble.base_learner_origin.metric_generators: metric_generator = functions.import_object_from_string_code( stacked_ensemble.base_learner_origin.metric_generators[key], ) stacked_ensemble.individual_score[key] = metric_generator(y_true, preds) stacked_ensemble.job_status = session.add(stacked_ensemble) session.commit() except: session.rollback() stacked_ensemble.job_status = stacked_ensemble.description[] = repr(sys.exc_info()[0]) stacked_ensemble.description[] = repr(sys.exc_info()[1]) stacked_ensemble.description[] = \ traceback.format_exception(*sys.exc_info()) session.add(stacked_ensemble) session.commit() raise
Evaluates the ensemble and updates the database when finished/ Args: path (str): Path to Xcessiv notebook ensemble_id (str): Ensemble ID
374,563
def list_objects(self, bucket_name=None, **kwargs): if not bucket_name: bucket_name = self.bucket_name return self.client.list_objects(Bucket=bucket_name, **kwargs)
This method is primarily for illustration and just calls the boto3 client implementation of list_objects but is a common task for first time Predix BlobStore users.
374,564
def spreadsheet(service, id): request = service.spreadsheets().get(spreadsheetId=id) try: response = request.execute() except apiclient.errors.HttpError as e: if e.resp.status == 404: raise KeyError(id) else: raise return response
Fetch and return spreadsheet meta data with Google sheets API.
374,565
def status_bar(python_input): TB = @if_mousedown def toggle_paste_mode(mouse_event): python_input.paste_mode = not python_input.paste_mode @if_mousedown def enter_history(mouse_event): python_input.enter_history() def get_text_fragments(): python_buffer = python_input.default_buffer result = [] append = result.append append((TB, )) result.extend(get_inputmode_fragments(python_input)) append((TB, )) append((TB, % (python_buffer.working_index + 1, len(python_buffer._working_lines)))) app = get_app() if not python_input.vi_mode and app.current_buffer == python_input.search_buffer: append((TB, )) elif bool(app.current_buffer.selection_state) and not python_input.vi_mode: append((TB, )) else: result.extend([ (TB + , , enter_history), (TB, , enter_history), (TB + , , toggle_paste_mode), (TB, , toggle_paste_mode), ]) if python_input.paste_mode: append((TB + , , toggle_paste_mode)) else: append((TB, , toggle_paste_mode)) return result return ConditionalContainer( content=Window(content=FormattedTextControl(get_text_fragments), style=TB), filter=~is_done & renderer_height_is_known & Condition(lambda: python_input.show_status_bar and not python_input.show_exit_confirmation))
Create the `Layout` for the status bar.
374,566
def handle_inittarget( state_change: ActionInitTarget, channel_state: NettingChannelState, pseudo_random_generator: random.Random, block_number: BlockNumber, ) -> TransitionResult[TargetTransferState]: transfer = state_change.transfer route = state_change.route assert channel_state.identifier == transfer.balance_proof.channel_identifier is_valid, channel_events, errormsg = channel.handle_receive_lockedtransfer( channel_state, transfer, ) if is_valid: unlock_failed = EventUnlockClaimFailed( identifier=transfer.payment_identifier, secrethash=transfer.lock.secrethash, reason=errormsg, ) channel_events.append(unlock_failed) iteration = TransitionResult(None, channel_events) return iteration
Handles an ActionInitTarget state change.
374,567
def create_secret(self, value, contributor, metadata=None, expires=None): if metadata is None: metadata = {} secret = self.create( value=value, contributor=contributor, metadata=metadata, expires=expires, ) return str(secret.handle)
Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle
374,568
def inception_v3(pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), ), **kwargs): r net = Inception3(**kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file(, root=root), ctx=ctx) return net
r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters.
374,569
def call(method, *args, **kwargs): napalm.callclicommandsshow versionshow chassis fan kwargs_copy = {} kwargs_copy.update(kwargs) for karg, warg in six.iteritems(kwargs_copy): if warg is None: kwargs.pop(karg) return salt.utils.napalm.call(NETWORK_DEVICE, method, *args, **kwargs)
Calls a specific method from the network driver instance. Please check the readthedocs_ page for the updated list of getters. .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix :param method: specifies the name of the method to be called :param params: contains the mapping between the name and the values of the parameters needed to call the method :return: A dictionary with three keys: - result (True/False): if the operation succeeded - out (object): returns the object as-is from the call - comment (string): provides more details in case the call failed - traceback (string): complete traceback in case of exception. Please submit an issue including this traceback on the `correct driver repo`_ and make sure to read the FAQ_ .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _FAQ: https://github.com/napalm-automation/napalm#faq Example: .. code-block:: python __proxy__['napalm.call']('cli' **{ 'commands': [ 'show version', 'show chassis fan' ] })
374,570
def _parse_one_event(self): else: if open_brace_idx > 0: self._buf = self._buf[open_brace_idx:] try: event, idx = self._decoder.raw_decode(self._buf) self._buf = self._buf[idx:] return event except ValueError: return None
Parse the stream buffer and return either a single event or None
374,571
def extract_lookups(value): lookups = set() if isinstance(value, basestring): lookups = lookups.union(extract_lookups_from_string(value)) elif isinstance(value, list): for v in value: lookups = lookups.union(extract_lookups(v)) elif isinstance(value, dict): for v in value.values(): lookups = lookups.union(extract_lookups(v)) return lookups
Recursively extracts any stack lookups within the data structure. Args: value (one of str, list, dict): a structure that contains lookups to output values Returns: list: list of lookups if any
374,572
def get_differing_atom_residue_ids(self, pdb_name, pdb_list): assert(pdb_name in self.pdb_names) assert(set(pdb_list).intersection(set(self.pdb_names)) == set(pdb_list)) differing_atom_residue_ids = set() for other_pdb in pdb_list: differing_atom_residue_ids = differing_atom_residue_ids.union(set(self.differing_atom_residue_ids[(pdb_name, other_pdb)])) return sorted(differing_atom_residue_ids)
Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.
374,573
def _assertField(self, name): if name not in self._names: msg = values = self._defn.name, name raise AttributeError(msg % values)
Raise AttributeError when PacketHistory has no field with the given name.
374,574
def get_or_guess_labels(model, x, **kwargs): if in kwargs and in kwargs: raise ValueError("Can not set both and .") if in kwargs: labels = kwargs[] elif in kwargs and kwargs[] is not None: labels = kwargs[] else: _, labels = torch.max(model(x), 1) return labels
Get the label to use in generating an adversarial example for x. The kwargs are fed directly from the kwargs of the attack. If 'y' is in kwargs, then assume it's an untargeted attack and use that as the label. If 'y_target' is in kwargs and is not none, then assume it's a targeted attack and use that as the label. Otherwise, use the model's prediction as the label and perform an untargeted attack. :param model: PyTorch model. Do not add a softmax gate to the output. :param x: Tensor, shape (N, d_1, ...). :param y: (optional) Tensor, shape (N). :param y_target: (optional) Tensor, shape (N).
374,575
def get_sn(unit): sn = 0 match_re = re.findall(str(sentence_delimiters), unit) if match_re: string = .join(match_re) sn = len(string) return int(sn)
获取文本行的句子数量 Keyword arguments: unit -- 文本行 Return: sn -- 句数
374,576
def run(cmd_str,cwd=,verbose=False): bwd = os.getcwd() os.chdir(cwd) try: exe_name = cmd_str.split()[0] if "window" in platform.platform().lower(): if not exe_name.lower().endswith("exe"): raw = cmd_str.split() raw[0] = exe_name + ".exe" cmd_str = .join(raw) else: if exe_name.lower().endswith(): raw = cmd_str.split() exe_name = exe_name.replace(,) raw[0] = exe_name cmd_str = .format(*raw) if os.path.exists(exe_name) and not exe_name.startswith(): cmd_str = "./" + cmd_str except Exception as e: os.chdir(bwd) raise Exception("run() error preprocessing command line :{0}".format(str(e))) if verbose: print("run():{0}".format(cmd_str)) try: ret_val = os.system(cmd_str) except Exception as e: os.chdir(bwd) raise Exception("run() raised :{0}".format(str(e))) os.chdir(bwd) if "window" in platform.platform().lower(): if ret_val != 0: raise Exception("run() returned non-zero")
an OS agnostic function to execute a command line Parameters ---------- cmd_str : str the str to execute with os.system() cwd : str the directory to execute the command in verbose : bool flag to echo to stdout complete cmd str Note ---- uses platform to detect OS and adds .exe suffix or ./ prefix as appropriate for Windows, if os.system returns non-zero, raises exception Example ------- ``>>>import pyemu`` ``>>>pyemu.helpers.run("pestpp pest.pst")``
374,577
def parent(self): if self._has_parent is None: _parent = self._ctx.backend.get_parent(self._ctx.dev) self._has_parent = _parent is not None if self._has_parent: self._parent = Device(_parent, self._ctx.backend) else: self._parent = None return self._parent
Return the parent device.
374,578
def find_optimal_allocation(self, tokens): token_ranges = self.find_tracked_words(tokens) token_ranges.sort() for offset in range(1, len(token_ranges)): to_be_removed = [] for candidate in token_ranges[offset:]: for i in range(offset): if token_ranges[i].overlaps_with(candidate): to_be_removed.append(candidate) break token_ranges = [token for token in token_ranges if token not in to_be_removed] token_ranges.sort(key=lambda token: token.get_start_index()) return token_ranges
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie :param tokens: tokens tokenize :type tokens: list of str :return: Optimal allocation of tokens to phrases :rtype: list of TokenTrie.Token
374,579
def attended_by(self, email): for attendee in self["attendees"] or []: if (attendee["email"] == email and attendee["responseStatus"] == "accepted"): return True return False
Check if user attended the event
374,580
def read_string(self, registeraddress, numberOfRegisters=16, functioncode=3): _checkFunctioncode(functioncode, [3, 4]) _checkInt(numberOfRegisters, minvalue=1, description=) return self._genericCommand(functioncode, registeraddress, \ numberOfRegisters=numberOfRegisters, payloadformat=)
Read a string from the slave. Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits). For example 16 consecutive registers can hold 32 characters (32 bytes). Args: * registeraddress (int): The slave register start address (use decimal numbers, not hex). * numberOfRegisters (int): The number of registers allocated for the string. * functioncode (int): Modbus function code. Can be 3 or 4. Returns: The string (str). Raises: ValueError, TypeError, IOError
374,581
def get_dependencies(self): return super().get_dependencies() + [ Data.collection_set, Data.entity_set, Data.parents, ]
Return dependencies, which should trigger updates of this model.
374,582
def validate_auth_mechanism(option, value): if value not in MECHANISMS and value != : raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) return value
Validate the authMechanism URI option.
374,583
def print_genl_msg(_, ofd, hdr, ops, payloadlen): data = nlmsg_data(hdr) if payloadlen.value < GENL_HDRLEN: return data print_genl_hdr(ofd, data) payloadlen.value -= GENL_HDRLEN data = bytearray_ptr(data, GENL_HDRLEN) if ops: hdrsize = ops.co_hdrsize - GENL_HDRLEN if hdrsize > 0: if payloadlen.value < hdrsize: return data ofd(, hdrsize) dump_hex(ofd, data, hdrsize, 0) payloadlen.value -= hdrsize data = bytearray_ptr(data, hdrsize) return data
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L831. Positional arguments: _ -- unused. ofd -- function to call with arguments similar to `logging.debug`. hdr -- Netlink message header (nlmsghdr class instance). ops -- cache operations (nl_cache_ops class instance). payloadlen -- length of payload in message (ctypes.c_int instance). Returns: data (bytearray_ptr).
374,584
def put(self, key): self._consul_request(, self._key_url(key[]), json=key) return key[]
Put and return the only unique identifier possible, its url
374,585
def _el_orb_tuple(string): el_orbs = [] for split in string.split(): splits = split.split() el = splits[0] if len(splits) == 1: el_orbs.append(el) else: el_orbs.append((el, tuple(splits[1:]))) return el_orbs
Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (`str`): The selected elements and orbitals in in the form: `"Sn.s.p,O"`. Returns: A list of tuples specifying which elements/orbitals to plot. The output for the above example would be: `[('Sn', ('s', 'p')), 'O']`
374,586
def to_dict(self): d = {: self.component_id, : self.instance_name} props = [] for name in self.properties: p = {: name} if self.properties[name]: p[] = str(self.properties[name]) props.append(p) if props: d[RTS_EXT_NS_YAML + ] = props return d
Save this target component into a dictionary.
374,587
def stencils(self): if not self._stencils: self._stencils = self.manifest[] return self._stencils
List of stencils.
374,588
def location(self, value): warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2) self._location = value
(Deprecated) Set `Bucket.location` This can only be set at bucket **creation** time. See https://cloud.google.com/storage/docs/json_api/v1/buckets and https://cloud.google.com/storage/docs/bucket-locations .. warning:: Assignment to 'Bucket.location' is deprecated, as it is only valid before the bucket is created. Instead, pass the location to `Bucket.create`.
374,589
def _set_relative_pythonpath(self, value): self.pythonpath = [osp.abspath(osp.join(self.root_path, path)) for path in value]
Set PYTHONPATH list relative paths
374,590
def sysinfo2float(version_info=sys.version_info): vers_str = .join([str(v) for v in version_info[0:3]]) if version_info[3] != : vers_str += + .join([str(i) for i in version_info[3:]]) if IS_PYPY: vers_str += else: try: import platform platform = platform.python_implementation() if platform in (, ): vers_str += platform pass except ImportError: pass except AttributeError: pass return py_str2float(vers_str)
Convert a sys.versions_info-compatible list into a 'canonic' floating-point number which that can then be used to look up a magic number. Note that this can only be used for released version of C Python, not interim development versions, since we can't represent that as a floating-point number. For handling Pypy, pyston, jython, etc. and interim versions of C Python, use sysinfo2magic.
374,591
def route_handler(context, content, pargs, kwargs): (node, rule_kw) = node_from_uri(pargs[0]) if node == None: return u"<!-- 404 -->".format(pargs[0]) rule_kw.update( node ) values = rule_kw values.update( request.form.to_dict(flat=True) ) values.update( request.args.to_dict(flat=True) ) values[] = request.method noderequest = values.copy() noderequest.pop() noderequest.pop() noderequest.pop() rendered = render_node(node[], noderequest=noderequest, **values) if rendered: if not isinstance(rendered, (str, unicode, int, float)): return encoder.encode(rendered) return rendered return "<!-- 404 -->".format(pargs[0])
Route shortcode works a lot like rendering a page based on the url or route. This allows inserting in rendered HTML within another page. Activate it with the 'shortcodes' template filter. Within the content use the chill route shortcode: "[chill route /path/to/something/]" where the '[chill' and ']' are the shortcode starting and ending tags. And 'route' is this route handler that takes one argument which is the url.
374,592
def get(key, default=-1): if isinstance(key, int): return ECDSA_LOW_Curve(key) if key not in ECDSA_LOW_Curve._member_map_: extend_enum(ECDSA_LOW_Curve, key, default) return ECDSA_LOW_Curve[key]
Backport support for original codes.
374,593
def get_certificate_json(self, certificate_uid): if certificate_uid.startswith(URN_UUID_PREFIX): uid = certificate_uid[len(URN_UUID_PREFIX):] elif certificate_uid.startswith(): last_slash = certificate_uid.rindex() uid = certificate_uid[last_slash + 1:] else: uid = certificate_uid logging.debug(, uid) certificate_bytes = self._get_certificate_raw(uid) logging.debug(, uid) certificate_json = helpers.certificate_bytes_to_json(certificate_bytes) return certificate_json
Returns certificate as json. Propagates KeyError if key isn't found :param certificate_uid: :return:
374,594
def process_am1(self, am1): self.minw = min(map(lambda l: self.wght[l], am1)) self.core_sels, b = am1, len(am1) - 1 self.cost += b * self.minw self.garbage = set() self.process_sels() self.topv += 1 selv = self.topv self.oracle.add_clause([-l for l in self.rels] + [-selv]) self.sels.append(selv) self.wght[selv] = self.minw self.smap[selv] = len(self.wght) - 1 self.bckp_set.add(selv) self.filter_assumps()
Due to the solving process involving multiple optimization levels to be treated individually, new soft clauses for the detected intrinsic AtMost1 constraints should be remembered. The method is a slightly modified version of the base method :func:`RC2.process_am1` taking care of this.
374,595
def raw_file(client, src, dest, opt): path, key = path_pieces(src) resp = client.read(path) if not resp: client.revoke_self_token() raise aomi.exceptions.VaultData("Unable to retrieve %s" % path) else: if in resp and key in resp[]: secret = resp[][key] if is_base64(secret): LOG.debug() secret = portable_b64decode(secret) if is_aws(resp[]) and not in path: renew_secret(client, resp, opt) write_raw_file(secret, dest) else: client.revoke_self_token() e_msg = "Key %s not found in %s" % (key, path) raise aomi.exceptions.VaultData(e_msg)
Write the contents of a vault path/key to a file. Is smart enough to attempt and handle binary files that are base64 encoded.
374,596
def _report_external_dependencies(self, sect, _, _dummy): dep_info = _make_tree_defs(self._external_dependencies_info().items()) if not dep_info: raise EmptyReportError() tree_str = _repr_tree_defs(dep_info) sect.append(VerbatimText(tree_str))
return a verbatim layout for displaying dependencies
374,597
def get_namespaces(namespace="", apiserver_url=None): ** apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False ret = _get_namespaces(apiserver_url, namespace) return ret
.. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
374,598
def shutdown(at_time=None): * cmd = [, , (.format(at_time) if at_time else )] ret = __salt__[](cmd, python_shell=False) return ret
Shutdown a running system at_time The wait time in minutes before the system will be shutdown. CLI Example: .. code-block:: bash salt '*' system.shutdown 5
374,599
def hydrate_input_uploads(input_, input_schema, hydrate_values=True): from resolwe.flow.managers import manager files = [] for field_schema, fields in iterate_fields(input_, input_schema): name = field_schema[] value = fields[name] if in field_schema: if field_schema[] == : files.append(value) elif field_schema[] == : files.extend(value) urlregex = re.compile(r) for value in files: if in value: if isinstance(value[], str): if not urlregex.search(value[]): value[] = manager.get_executor().resolve_upload_path(value[]) else: value[] =
Hydrate input basic:upload types with upload location. Find basic:upload fields in input. Add the upload location for relative paths.