Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,300
def PriceHourly(self): units = self.PriceUnits() return(units[]*self.cpu+units[]*self.memory+units[]*self.storage+units[])
Returns the total hourly price for the server. Sums unit prices with unit volumes. >>> clc.v2.Server("NY1BTDIPHYP0101").PriceHourly() 0.02857
371,301
def sequencetyper(self): for sample in self.metadata.samples: if sample.general.bestassemblyfile != : if type(sample[self.analysistype].allelenames) == list: if sample[self.analysistype].profile != : sample[self.analysistype].profilematches = dict() sample[self.analysistype].sequencetypematches = dict() profiledata = sample[self.analysistype].profiledata for gene in sorted(sample[self.analysistype].allelenames): try: allelenumber = sample[self.analysistype].allelematches[gene].split()[1] for sequencetype in profiledata: refallele = profiledata[sequencetype][gene] if allelenumber == refallele: try: sample[self.analysistype].profilematches[sequencetype] += 1 sample[self.analysistype].sequencetypematches[sequencetype].append( refallele) except KeyError: sample[self.analysistype].profilematches[sequencetype] = 1 sample[self.analysistype].sequencetypematches[sequencetype] = list() sample[self.analysistype].sequencetypematches[sequencetype].append( refallele) except KeyError: pass
Determines the sequence type of each strain based on comparisons to sequence type profiles
371,302
def fields(self): return {k:getattr(self, k, None) for k in self.schema.fields}
return all the fields and their raw values for this Orm instance. This property returns a dict with the field names and their current values if you want to control the values for outputting to an api, use .jsonable()
371,303
def meta_wrapped(f): @wraps(f) def wrapped(self, field, *args, **kwargs): html = "{label}{errors}{original}<small>{description}</small>".format( label=field.label(class_=), original=f(self, field, *args, **kwargs), errors=render_field_errors(field) or , description=render_field_description(field) ) return HTMLString(html) return wrapped
Add a field label, errors, and a description (if it exists) to a field.
371,304
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce, request, request_token=None, access_token=None): log.debug(, client_key) nonce_exists = self._noncegetter( client_key=client_key, timestamp=timestamp, nonce=nonce, request_token=request_token, access_token=access_token ) if nonce_exists: return False self._noncesetter( client_key=client_key, timestamp=timestamp, nonce=nonce, request_token=request_token, access_token=access_token ) return True
Validate the timestamp and nonce is used or not.
371,305
def proximal_l2(space, lam=1, g=None): r lam = float(lam) if g is not None and g not in space: raise TypeError(.format(g, space)) class ProximalL2(Operator): def __init__(self, sigma): super(ProximalL2, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): dtype = getattr(self.domain, , float) eps = np.finfo(dtype).resolution * 10 if g is None: x_norm = x.norm() * (1 + eps) if x_norm > 0: step = self.sigma * lam / x_norm else: step = np.infty if step < 1.0: out.lincomb(1.0 - step, x) else: out.set_zero() else: x_norm = (x - g).norm() * (1 + eps) if x_norm > 0: step = self.sigma * lam / x_norm else: step = np.infty if step < 1.0: out.lincomb(1.0 - step, x, step, g) else: out.assign(g) return ProximalL2
r"""Proximal operator factory of the l2-norm/distance. Function for the proximal operator of the functional ``F`` where ``F`` is the l2-norm (or distance to g, if given):: ``F(x) = lam ||x - g||_2`` Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- Most problems are forumlated for the squared norm/distance, in that case use `proximal_l2_squared` instead. The :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \lambda \|x - g\|_2 For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F` is given by .. math:: \mathrm{prox}_{\sigma F}(y) = \begin{cases} \frac{1 - c}{\|y-g\|} \cdot y + c \cdot g & \text{if } c < g, \\ g & \text{else}, \end{cases} where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`. See Also -------- proximal_l2_squared : proximal for squared norm/distance proximal_convex_conj_l2 : proximal for convex conjugate
371,306
def nla_get_u64(nla): tmp = c_uint64(0) if nla and nla_len(nla) >= sizeof(tmp): tmp = c_uint64.from_buffer(nla_data(nla)[:SIZEOF_U64]) return int(tmp.value)
Return value of 64 bit integer attribute as an int(). https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L649 Positional arguments: nla -- 64 bit integer attribute (nlattr class instance). Returns: Payload as an int().
371,307
def parents(self): if self._parents is None: bottomups = tuple(Relationship.bottomup()) self._parents = TermList() self._parents.extend( [ other for rship,others in six.iteritems(self.relations) for other in others if rship in bottomups ] ) return self._parents
~TermList: The direct parents of the `Term`.
371,308
def get_dos_from_id(self, task_id): args = {: task_id} fields = [] structure = self.get_structure_from_id(task_id) dosid = None for r in self.query(fields, args): dosid = r[][-1][] if dosid is not None: self._fs = gridfs.GridFS(self.db, ) with self._fs.get(dosid) as dosfile: s = dosfile.read() try: d = json.loads(s) except: s = zlib.decompress(s) d = json.loads(s.decode("utf-8")) tdos = Dos.from_dict(d) pdoss = {} for i in range(len(d[])): ados = d[][i] all_ados = {} for j in range(len(ados)): orb = Orbital(j) odos = ados[str(orb)] all_ados[orb] = {Spin(int(k)): v for k, v in odos[].items()} pdoss[structure[i]] = all_ados return CompleteDos(structure, tdos, pdoss) return None
Overrides the get_dos_from_id for the MIT gridfs format.
371,309
def grab_gpus(num_gpus=1, gpu_select=None, gpu_fraction=0.95, max_procs=-1): os.environ[] = "" if num_gpus == 0: return 0 logger = logging.getLogger(__name__) try: py3nvml.nvmlInit() except: str_ = warnings.warn(str_, RuntimeWarning) logger.warn(str_) return 0 numDevices = py3nvml.nvmlDeviceGetCount() gpu_free = [False]*numDevices if gpu_select is None: gpu_check = [True] * numDevices else: gpu_check = [False] * numDevices try: gpu_check[gpu_select] = True except TypeError: try: for i in gpu_select: gpu_check[i] = True except: raise ValueError() for i in range(numDevices): if not gpu_check[i]: continue handle = py3nvml.nvmlDeviceGetHandleByIndex(i) info = py3nvml.nvmlDeviceGetMemoryInfo(handle) str_ = "GPU {}:\t".format(i) + \ "Used Mem: {:>6}MB\t".format(info.used/(1024*1024)) + \ "Total Mem: {:>6}MB".format(info.total/(1024*1024)) logger.debug(str_) if max_procs >= 0: procs_ok = get_free_gpus(max_procs=max_procs) else: procs_ok = [True,] * numDevices for i in range(numDevices): if gpu_check[i] and procs_ok[i]: handle = py3nvml.nvmlDeviceGetHandleByIndex(i) info = py3nvml.nvmlDeviceGetMemoryInfo(handle) if (info.free+10)/info.total >= gpu_fraction: gpu_free[i] = True else: logger.info(.format(i)) py3nvml.nvmlShutdown() if sum(gpu_free) == 0: warnings.warn("Could not find enough GPUs for your job", RuntimeWarning) logger.warn(str_) return 0 else: if sum(gpu_free) >= num_gpus: available_gpus = [i for i, x in enumerate(gpu_free) if x] use_gpus = .join(list(str(s) for s in available_gpus[:num_gpus])) logger.debug(.format(sum(gpu_free))) logger.info(.format(use_gpus)) os.environ[] = use_gpus return num_gpus else: s = "Only {} GPUs found but {}".format(sum(gpu_free), num_gpus) + \ "requested. Allocating these and continuing." warnings.warn(s, RuntimeWarning) logger.warn(s) available_gpus = [i for i, x in enumerate(gpu_free) if x] use_gpus = .join(list(str(s) for s in available_gpus)) logger.debug(.format(sum(gpu_free))) logger.info(.format(use_gpus)) os.environ[] = use_gpus return sum(gpu_free)
Checks for gpu availability and sets CUDA_VISIBLE_DEVICES as such. Note that this function does not do anything to 'reserve' gpus, it only limits what GPUS your program can see by altering the CUDA_VISIBLE_DEVICES variable. Other programs can still come along and snatch your gpu. This function is more about preventing **you** from stealing someone else's GPU. If more than 1 GPU is requested but the full amount are available, then it will set the CUDA_VISIBLE_DEVICES variable to see all the available GPUs. A warning is generated in this case. If one or more GPUs were requested and none were available, a Warning will be raised. Before raising it, the CUDA_VISIBLE_DEVICES will be set to a blank string. This means the calling function can ignore this warning and proceed if it chooses to only use the CPU, and it should still be protected against putting processes on a busy GPU. You can call this function with num_gpus=0 to blank out the CUDA_VISIBLE_DEVICES environment variable. Parameters ---------- num_gpus : int How many gpus your job needs (optional) gpu_select : iterable A single int or an iterable of ints indicating gpu numbers to search through. If left blank, will search through all gpus. gpu_fraction : float The fractional of a gpu memory that must be free for the script to see the gpu as free. Defaults to 1. Useful if someone has grabbed a tiny amount of memory on a gpu but isn't using it. max_procs : int Maximum number of processes allowed on a GPU (as well as memory restriction). Returns ------- success : int Number of gpus 'grabbed' Raises ------ RuntimeWarning If couldn't connect with NVIDIA drivers. If 1 or more gpus were requested and none were available. ValueError If the gpu_select option was not understood (can fix by leaving this field blank, providing an int or an iterable of ints).
371,310
def outputPoint(self): node = self.outputNode() if not node: return self._outputPoint hotspot = self.outputHotspot() olocation = self.outputLocation() ofixedx = self.outputFixedX() ofixedy = self.outputFixedY() loc_left = XNodeConnection.Location.Left loc_right = XNodeConnection.Location.Right loc_top = XNodeConnection.Location.Top loc_bot = XNodeConnection.Location.Bottom irect = self.inputRect() orect = self.outputRect() if olocation & loc_right and orect.right() < irect.left(): if hotspot: return node.mapToScene(QPointF(hotspot.rect().right(), hotspot.rect().center().y())) else: return node.positionAt(loc_right, ofixedx, ofixedy) elif olocation & loc_left and irect.right() < orect.left(): if hotspot: return node.mapToScene(QPointF(hotspot.rect().left(), hotspot.rect().center().y())) else: return node.positionAt(loc_left, ofixedx, ofixedy) elif olocation & loc_bot and orect.bottom() < irect.top(): if hotspot: return node.mapToScene(QPointF(hotspot.rect().center().x(), hotspot.rect().bottom())) else: return node.positionAt(loc_bot, ofixedx, ofixedy) elif olocation & loc_top and irect.bottom() < orect.top(): if hotspot: return node.mapToScene(QPointF(hotspot.rect().center().x(), hotspot.rect().top())) else: return node.positionAt(loc_top, ofixedx, ofixedy) else: if hotspot: return node.mapToScene(hotspot.rect().center()) else: return node.positionAt(olocation, ofixedx, ofixedy)
Returns a scene space point that the connection \ will draw to as its output source. If the connection \ has a node defined, then it will calculate the output \ point based on the position of the node, factoring in \ preference for output location and fixed positions. If \ there is no node connected, then the point defined using \ the setOutputPoint method will be used. :return <QPointF>
371,311
def _compute_raw_moments(self, n_counter, k_counter): r alpha_multipliers, beta_multipliers = self._get_parameter_symbols(n_counter, k_counter) out_mat = sp.Matrix([a * b for a,b in zip(alpha_multipliers, beta_multipliers)]) out_mat = out_mat.applyfunc(sp.expand) return out_mat
r""" Compute :math:`X_i` Gamma type 1: :math:`X_i = \frac {\beta_i}{\beta_0}Y_0 + Y_i` Gamma type 2: :math:`X_i = \sum_{k=0}^{i} \frac {\beta_i}{\beta_k}Y_k` :param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments :type n_counter: list[:class:`~means.core.descriptors.Moment`] :param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments :type k_counter: list[:class:`~means.core.descriptors.Moment`] :return: a vector of parametric expression for raw moments
371,312
def isMasterReqLatencyTooHigh(self): format(MONITORING_PREFIX, self, r[1], r[0])) else: logger.trace("{} found master's latency to be lower than the " "threshold for all requests.".format(self)) return r
Return whether the request latency of the master instance is greater than the acceptable threshold
371,313
def _ancestors_or_self( self, qname: Union[QualName, bool] = None) -> List[InstanceNode]: res = [] if qname and self.qual_name != qname else [self] return res + self.up()._ancestors(qname)
XPath - return the list of receiver's ancestors including itself.
371,314
def _check_portname(name): if not isinstance(name, string_types) or not in name: raise SaltInvocationError( {0}\.format(name) ) path = os.path.join(, name) if not os.path.isdir(path): raise SaltInvocationError({0}\.format(path)) return path
Check if portname is valid and whether or not the directory exists in the ports tree.
371,315
def register_calculator_view(request): if request.method == "POST": form = CalculatorRegistrationForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user obj.save() messages.success(request, "Successfully added calculator.") return redirect("itemreg") else: messages.error(request, "Error adding calculator.") else: form = CalculatorRegistrationForm() return render(request, "itemreg/register_form.html", {"form": form, "action": "add", "type": "calculator", "form_route": "itemreg_calculator"})
Register a calculator.
371,316
def ip_hide_as_path_holder_as_path_access_list_instance(self, **kwargs): config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") hide_as_path_holder = ET.SubElement(ip, "hide-as-path-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") as_path = ET.SubElement(hide_as_path_holder, "as-path") access_list = ET.SubElement(as_path, "access-list") name_key = ET.SubElement(access_list, "name") name_key.text = kwargs.pop() seq_keyword_key = ET.SubElement(access_list, "seq-keyword") seq_keyword_key.text = kwargs.pop() instance = ET.SubElement(access_list, "instance") instance.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
371,317
def run(self, *args): params = self.parser.parse_args(args) organization = params.organization domain = params.domain is_top_domain = params.top_domain overwrite = params.overwrite if params.add: code = self.add(organization, domain, is_top_domain, overwrite) elif params.delete: code = self.delete(organization, domain) else: term = organization code = self.registry(term) return code
List, add or delete organizations and domains from the registry. By default, it prints the list of organizations available on the registry.
371,318
def remove_old(self, max_log_time): files = glob.glob(.format(self.log_dir)) files = list(map(lambda x: os.path.basename(x), files)) for log_file in files: name = os.path.splitext(log_file)[0] timestamp = name.split(, maxsplit=1)[1] time = datetime.strptime(timestamp, ) now = datetime.now() delta = now - time seconds = delta.total_seconds() if seconds > int(max_log_time): log_filePath = os.path.join(self.log_dir, log_file) os.remove(log_filePath)
Remove all logs which are older than the specified time.
371,319
def ingest_data(self, data, cat_name, id_col, ra_col=, dec_col=, cat_loc=, append=False, count=-1): if not append and cat_name in self.catalogs: print(.format(cat_name)) else: if isinstance(data, str): cat_loc = cat_loc or data data = pd.read_csv(data, sep=, comment=, engine=)[:count] elif isinstance(data, pd.core.frame.DataFrame): cat_loc = cat_loc or type(data) elif isinstance(data, (at.QTable, at.Table)): cat_loc = cat_loc or type(data) data = pd.DataFrame(list(data), columns=data.colnames) else: print("Sorry, but I cannot read that data. Try an ascii file cat_loc, astropy table, or pandas data frame.") return if isinstance(data[ra_col][0], str): crds = coord.SkyCoord(ra=data[ra_col], dec=data[dec_col], unit=(q.hour, q.deg), frame=) data.insert(0,, crds.dec) data.insert(0,, crds.ra) elif isinstance(data[ra_col][0], float): data.rename(columns={ra_col:, dec_col:}, inplace=True) else: print("I cancatID{}_{}dec_corrdecra_corrrasource_idIngesting {} rows from {} catalog...cat_locid_colra_coldec_col{}append=False' to create it.".format(cat_name))
Ingest a data file and regroup sources Parameters ---------- data: str, pandas.DataFrame, astropy.table.Table The path to the exported VizieR data or the data table cat_name: str The name of the added catalog id_col: str The name of the column containing the unique ids ra_col: str The name of the RA column dec_col: str The name of the DEC column cat_loc: str The location of the original catalog data append: bool Append the catalog rather than replace count: int The number of table rows to add (This is mainly for testing purposes)
371,320
def save(self, filename="temp.pkl"): output = open(filename, ) cPickle.dump(self.tm, output, protocol=cPickle.HIGHEST_PROTOCOL)
Save TM in the filename specified above
371,321
def send_ether_over_wpa(self, pkt, **kwargs): payload = LLC() / SNAP() / pkt[Ether].payload dest = pkt.dst if dest == "ff:ff:ff:ff:ff:ff": self.send_wpa_to_group(payload, dest) else: assert dest == self.client self.send_wpa_to_client(payload)
Send an Ethernet packet using the WPA channel Extra arguments will be ignored, and are just left for compatibility
371,322
def colorbar(self, cmap, position="right", label="", clim=("", ""), border_width=0.0, border_color="black", **kwargs): self._configure_2d() cbar = scene.ColorBarWidget(orientation=position, label_str=label, cmap=cmap, clim=clim, border_width=border_width, border_color=border_color, **kwargs) CBAR_LONG_DIM = 50 if cbar.orientation == "bottom": self.grid.remove_widget(self.cbar_bottom) self.cbar_bottom = self.grid.add_widget(cbar, row=5, col=4) self.cbar_bottom.height_max = \ self.cbar_bottom.height_max = CBAR_LONG_DIM elif cbar.orientation == "top": self.grid.remove_widget(self.cbar_top) self.cbar_top = self.grid.add_widget(cbar, row=1, col=4) self.cbar_top.height_max = self.cbar_top.height_max = CBAR_LONG_DIM elif cbar.orientation == "left": self.grid.remove_widget(self.cbar_left) self.cbar_left = self.grid.add_widget(cbar, row=2, col=1) self.cbar_left.width_max = self.cbar_left.width_min = CBAR_LONG_DIM else: self.grid.remove_widget(self.cbar_right) self.cbar_right = self.grid.add_widget(cbar, row=2, col=5) self.cbar_right.width_max = \ self.cbar_right.width_min = CBAR_LONG_DIM return cbar
Show a ColorBar Parameters ---------- cmap : str | vispy.color.ColorMap Either the name of the ColorMap to be used from the standard set of names (refer to `vispy.color.get_colormap`), or a custom ColorMap object. The ColorMap is used to apply a gradient on the colorbar. position : {'left', 'right', 'top', 'bottom'} The position of the colorbar with respect to the plot. 'top' and 'bottom' are placed horizontally, while 'left' and 'right' are placed vertically label : str The label that is to be drawn with the colorbar that provides information about the colorbar. clim : tuple (min, max) the minimum and maximum values of the data that is given to the colorbar. This is used to draw the scale on the side of the colorbar. border_width : float (in px) The width of the border the colormap should have. This measurement is given in pixels border_color : str | vispy.color.Color The color of the border of the colormap. This can either be a str as the color's name or an actual instace of a vipy.color.Color Returns ------- colorbar : instance of ColorBarWidget See also -------- ColorBarWidget
371,323
def exit_with_exc_info(code=1, message=, print_tb=False, exception=None): exc_type, exc_value = (exception.__class__, exception) \ if exception is not None else sys.exc_info()[:2] if exc_type is not None: if print_tb: traceback.print_exc() elif isinstance(exc_value, KeyboardInterrupt): sys.stderr.write() else: for line in traceback.format_exception_only(exc_type, exc_value): sys.stderr.write(line) sys.stderr.write(message) if message != and not message.endswith(): sys.stderr.write() sys.exit(code)
Exits the program, printing information about the last exception (if any) and an optional error message. Uses *exception* instead if provided. :param code: Exit code. :type code: integer (valid exit code, 0-255) :param message: Message to be printed after the exception information. :type message: string :param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it. :type print_tb: boolean :type exception: an exception to use in place of the last exception raised
371,324
def timestamp(num_params, p_levels, k_choices, N): string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params, p_levels, k_choices, N, dt.strftime(dt.now(), "%d%m%y%H%M%S")) return string
Returns a uniform timestamp with parameter values for file identification
371,325
def command_line_arguments(command_line_parameters): parser = argparse.ArgumentParser(description=, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(, , choices = all_algorithms, default = (,), nargs = , help = ) parser.add_argument(, action = , help = ) parser.add_argument(, , choices = available_databases, default = , help = ) parser.add_argument(, , default = , help = ) parser.add_argument(, , help = ) parser.add_argument(, , action = , help = ) parser.add_argument(, , type=int, help = ) parser.add_argument(, , action = , help = ) parser.add_argument(, , action = , help = ) parser.add_argument(, , nargs=, choices = (, , , , , ), help = ) parser.add_argument(, nargs = argparse.REMAINDER, help = ) bob.core.log.add_command_line_option(parser) args = parser.parse_args(command_line_parameters) if args.all: args.algorithms = all_algorithms bob.core.log.set_verbosity_level(logger, args.verbose) return args
Defines the command line parameters that are accepted.
371,326
def send_login_signal(self, request, user, profile, client): signals.login.send(sender=profile.__class__, user=user, profile=profile, client=client, request=request)
Send a signal that a user logged in. This signal should be sent only if the user was *not* logged into Django.
371,327
def update_utxoset(self, transaction): spent_outputs = [ spent_output for spent_output in transaction.spent_outputs ] if spent_outputs: self.delete_unspent_outputs(*spent_outputs) self.store_unspent_outputs( *[utxo._asdict() for utxo in transaction.unspent_outputs] )
Update the UTXO set given ``transaction``. That is, remove the outputs that the given ``transaction`` spends, and add the outputs that the given ``transaction`` creates. Args: transaction (:obj:`~bigchaindb.models.Transaction`): A new transaction incoming into the system for which the UTXO set needs to be updated.
371,328
def list_from_env(key, default=""): try: val = os.environ.get(key, default) return val.split() except (KeyError, ValueError): return []
Splits a string in the format "a,b,c,d,e,f" into ['a', 'b', 'c', 'd', 'e', 'f', ]
371,329
def run(self, steps=None, resume=False, redo=None): recipe = { "name" : self.name, "steps" : [] } start_at = 0 if redo: recipe = utils.readJson(redo) self.log.info(.format(recipe[], redo)) self.log.info() self.jobs = [] for step in recipe[]: if step[] == : self.log.info({0}\{1}\.format(step[], step[])) cont = docker.Container(step[], step[], label=step[], logger=self.log, shared_memory=step[]) self.log.debug(.format(step[], step[])) cont.volumes = step[] cont.environs = step[] cont.shared_memory = step[] cont.input_content = step[] cont.msdir_content = step[] cont.logfile = step[] job = StimelaJob(step[], recipe=self, label=step[]) job.job = cont job.jtype = elif step[] == : name = step[] func = inspect.currentframe().f_back.f_locals[step[]] job = StimelaJob(name, recipe=self, label=step[]) job.python_job(func, step[]) job.jtype = self.jobs.append(job) elif resume: self.log.info("Resuming recipe from last run.") try: recipe = utils.readJson(self.resume_file) except IOError: raise StimelaRecipeExecutionError("Cannot resume pipeline, resume file not found".format(self.resume_file)) steps_ = recipe.pop() recipe[] = [] _steps = [] for step in steps_: if step[] == : recipe[].append(step) continue label = step[] number = step[] if label == self.jobs[number-1].label: self.log.info({0}\.format(number, label)) _steps.append(number) else: raise StimelaRecipeExecutionError(.format(label)) if len(_steps)==0: self.log.info() sys.exit(0) steps = _steps if getattr(steps, , False): _steps = [] if isinstance(steps[0], str): labels = [ job.label.split()[0] for job in self.jobs] for step in steps: try: _steps.append(labels.index(step)+1) except ValueError: raise StimelaCabParameterError(t existRunning job {}STEP {0} :: {1}functiondockersingularitya\n-----------------------------------\nStimela version : {}\nCab name : {}\n-------------------------------------\ncompletedRecipe execution failed while running job {}Completed jobs : {}Remaining jobs : {}failedLogging remaining task: {}remainingSaving pipeline information in {}dockersingularitySaving pipeline information in {}Recipe executed successfully') return 0
Run a Stimela recipe. steps : recipe steps to run resume : resume recipe from last run redo : Re-run an old recipe from a .last file
371,330
def get_force_single(self, component_info=None, data=None, component_position=None): components = [] append_components = components.append for _ in range(component_info.plate_count): component_position, plate = QRTPacket._get_exact( RTForcePlateSingle, data, component_position ) component_position, force = QRTPacket._get_exact( RTForce, data, component_position ) append_components((plate, force)) return components
Get a single force data channel.
371,331
def download(self, path, args=[], filepath=None, opts={}, compress=True, **kwargs): url = self.base + path wd = filepath or params = [] params.append((, )) params.append((, )) if compress: params.append((, )) for opt in opts.items(): params.append(opt) for arg in args: params.append((, arg)) method = res = self._do_request(method, url, params=params, stream=True, **kwargs) self._do_raise_for_status(res) mode = if compress else with tarfile.open(fileobj=res.raw, mode=mode) as tf: tf.extractall(path=wd)
Makes a request to the IPFS daemon to download a file. Downloads a file or files from IPFS into the current working directory, or the directory given by ``filepath``. Raises ------ ~ipfsapi.exceptions.ErrorResponse ~ipfsapi.exceptions.ConnectionError ~ipfsapi.exceptions.ProtocolError ~ipfsapi.exceptions.StatusError ~ipfsapi.exceptions.TimeoutError Parameters ---------- path : str The REST command path to send filepath : str The local path where IPFS will store downloaded files Defaults to the current working directory. args : list Positional parameters to be sent along with the HTTP request opts : dict Query string paramters to be sent along with the HTTP request compress : bool Whether the downloaded file should be GZip compressed by the daemon before being sent to the client kwargs : dict Additional arguments to pass to :mod:`requests`
371,332
def plugin_method(*plugin_names): def wrapper(callable_obj): for plugin_name in plugin_names: if not hasattr(callable_obj, plugin_name): setattr(callable_obj, plugin_name, True) return callable_obj return wrapper
Plugin Method decorator. Signs a web handler function with the plugins to be applied as attributes. Args: plugin_names (list): A list of plugin callable names Returns: A wrapped handler callable. Examples: >>> @plugin_method('json', 'bill') ... def method(): ... return "Hello!" ... >>> print method.json True >>> print method.bill True
371,333
def _get_attrs(self): attrs = [] attrs.append(("N Blocks", self.n_blocks, "{}")) bds = self.bounds attrs.append(("X Bounds", (bds[0], bds[1]), "{:.3f}, {:.3f}")) attrs.append(("Y Bounds", (bds[2], bds[3]), "{:.3f}, {:.3f}")) attrs.append(("Z Bounds", (bds[4], bds[5]), "{:.3f}, {:.3f}")) return attrs
An internal helper for the representation methods
371,334
def set_source_ip_for_interface(source_ip_address, desired_source_ip_address, device_num=0): log = logging.getLogger(mod_logger + ) if not isinstance(source_ip_address, basestring): msg = log.error(msg) raise TypeError(msg) if not isinstance(desired_source_ip_address, basestring): msg = log.error(msg) raise TypeError(msg) if not validate_ip_address(ip_address=source_ip_address): msg = log.error(msg) raise ValueError(msg) if not validate_ip_address(ip_address=desired_source_ip_address): msg = log.error(msg) raise ValueError(msg) log.debug() try: int(device_num) except ValueError: if isinstance(device_num, basestring): device_name = device_num log.info(.format( d=device_name)) else: raise TypeError() else: device_name = .format(n=str(device_num)) log.info(.format(d=device_name)) command = [, , , , , , device_name, , source_ip_address, , , , desired_source_ip_address] log.info(.format(c=command)) try: result = run_command(command, timeout_sec=20) except CommandError: _, ex, trace = sys.exc_info() msg = .format(c=.join(command), e=str(ex)) log.error(msg) raise OSError, msg, trace if int(result[]) != 0: msg = .format( c=result[], o=result[]) log.error(msg) raise OSError(msg) log.info(.format( d=device_name, i=desired_source_ip_address))
Configures the source IP address for a Linux interface :param source_ip_address: (str) Source IP address to change :param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets :param device_num: (int) Integer interface device number to configure :return: None :raises: TypeError, ValueError, OSError
371,335
def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False): r if communities is None: if isinstance(tnet, dict): if in tnet.keys(): communities = tnet[] else: raise ValueError() else: raise ValueError() tnet = process_input(tnet, [, , ], ) if tnet.nettype[0] == : if tnet.hdf5 == False: if sum(tnet.network[] < 0) > 0 and not removeneg: print( ) else: tnet.network[][tnet.network[] < 0] = 0 part = np.zeros([tnet.netshape[0], tnet.netshape[1]]) if len(communities.shape) == 1: for t in np.arange(0, tnet.netshape[1]): C = communities snapshot = tnet.get_network_when(t=t) if tnet.nettype[1] == : i_at_t = snapshot[].values else: i_at_t = np.concatenate( [snapshot[].values, snapshot[].values]) i_at_t = np.unique(i_at_t).tolist() i_at_t = list(map(int, i_at_t)) for i in i_at_t: if tnet.nettype[1] == : df = tnet.get_network_when(i=i, t=t) j_at_t = df[].values if tnet.nettype == : k_i = df[].sum() elif tnet.nettype == : k_i = len(df) elif tnet.nettype[1] == : df = tnet.get_network_when(ij=i, t=t) j_at_t = np.concatenate([df[].values, df[].values]) if tnet.nettype == : k_i = df[].sum() elif tnet.nettype == : k_i = len(df) j_at_t = list(map(int, j_at_t)) for c in np.unique(C[j_at_t]): ci = np.where(C == c)[0].tolist() k_is = tnet.get_network_when(i=i, j=ci, t=t) if tnet.nettype[1] == : k_is2 = tnet.get_network_when(j=i, i=ci, t=t) k_is = pd.concat([k_is, k_is2]) if len(k_is) > 0: if tnet.nettype[0] == : k_is = len(k_is) else: k_is = k_is[].sum() part[i, t] += np.square(k_is/k_i) part[i_at_t, t] = 1 - part[i_at_t, t] if decay is not None and t > 0: part[i_at_t, t] += decay*part[i_at_t, t-1] else: for t in np.arange(0, tnet.netshape[1]): snapshot = tnet.get_network_when(t=t) if tnet.nettype[1] == : i_at_t = snapshot[].values else: i_at_t = np.concatenate( [snapshot[].values, snapshot[].values]) i_at_t = np.unique(i_at_t).tolist() i_at_t = list(map(int, i_at_t)) for i in i_at_t: for tc in np.arange(0, tnet.netshape[1]): C = communities[:, tc] if tnet.nettype[1] == : df = tnet.get_network_when(i=i, t=t) j_at_t = df[].values if tnet.nettype == : k_i = df[].sum() elif tnet.nettype == : k_i = len(df) elif tnet.nettype[1] == : df = tnet.get_network_when(ij=i, t=t) j_at_t = np.concatenate( [df[].values, df[].values]) if tnet.nettype == : k_i = df[].sum() elif tnet.nettype == : k_i = len(df) j_at_t = list(map(int, j_at_t)) for c in np.unique(C[j_at_t]): ci = np.where(C == c)[0].tolist() k_is = tnet.get_network_when(i=i, j=ci, t=t) if tnet.nettype[1] == : k_is2 = tnet.get_network_when(j=i, i=ci, t=t) k_is = pd.concat([k_is, k_is2]) if tnet.nettype[0] == : k_is = len(k_is) else: k_is = k_is[].sum() part[i, t] += np.square(k_is/k_i) part[i, t] = part[i, t] / tnet.netshape[1] part[i_at_t, t] = 1 - part[i_at_t, t] if decay is not None and t > 0: part[i_at_t, t] += decay*part[i_at_t, t-1] part[np.isnan(part) == 1] = 0 return part
r''' Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes. Parameters ---------- tnet : array, dict graphlet or contact sequence input. Only positive matrices considered. communities : array community vector. Either 1D (node) community index or 2D (node,time). removeneg : bool (default false) If true, all values < 0 are made to be 0. Returns ------- P : array participation coefficient Notes ----- Static participatoin coefficient is: .. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2 Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_ This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t. If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in. References ---------- .. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_]
371,336
def compile_tag_re(self, tags): return re.compile(self.raw_tag_re % tags, self.re_flags)
Return the regex used to look for Mustache tags compiled to work with specific opening tags, close tags, and tag types.
371,337
def handle_404(request, exception): error = format_error(title=, detail=str(exception)) return json(return_an_error(error), status=HTTPStatus.NOT_FOUND)
Handle 404 Not Found This handler should be used to handle error http 404 not found for all endpoints or if resource not available.
371,338
def median_high(data): data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") return data[n // 2]
Return the high median of data. When the number of data points is odd, the middle value is returned. When it is even, the larger of the two middle values is returned.
371,339
def is_text_file(file_path: str) -> bool: import codecs expanded_path = os.path.abspath(os.path.expanduser(file_path.strip())) valid_text_file = False try: with codecs.open(expanded_path, encoding=, errors=) as f: if sum(1 for line in f) > 0: valid_text_file = True except OSError: pass except UnicodeDecodeError: try: with codecs.open(expanded_path, encoding=, errors=) as f: if sum(1 for line in f) > 0: valid_text_file = True except OSError: pass except UnicodeDecodeError: pass return valid_text_file
Returns if a file contains only ASCII or UTF-8 encoded text. :param file_path: path to the file being checked :return: True if the file is a text file, False if it is binary.
371,340
def addFeatureSet(self, featureSet): id_ = featureSet.getId() self._featureSetIdMap[id_] = featureSet self._featureSetIds.append(id_) name = featureSet.getLocalId() self._featureSetNameMap[name] = featureSet
Adds the specified featureSet to this dataset.
371,341
def add_mountains(self): from noise import pnoise2 import random random.seed() octaves = (random.random() * 0.5) + 0.5 freq = 17.0 * octaves for y in range(self.grd.grid_height - 1): for x in range(self.grd.grid_width - 1): pixel = self.grd.get_tile(y,x) if pixel == : n = int(pnoise2(x/freq, y / freq, 1)*11+5) if n < 1: self.grd.set_tile(y, x, )
instead of the add_blocks function which was to produce line shaped walls for blocking path finding agents, this function creates more natural looking blocking areas like mountains
371,342
def _make_eval_func(self, tensors, session, feed_dict, fetches, callback=None): if not isinstance(tensors, list): tensors = [tensors] num_tensors = len(tensors) def eval_func(x): shapes = dict(zip(self._vars, self._var_shapes)) augmented_feed_dict = { var: x[packing_slice].reshape(shapes[var]) for var, packing_slice in zip(self._vars, self._packing_slices) } augmented_feed_dict.update(feed_dict) augmented_fetches = tensors + fetches augmented_fetch_vals = session.run( augmented_fetches, feed_dict=augmented_feed_dict) if callable(callback): callback(*augmented_fetch_vals[num_tensors:]) return augmented_fetch_vals[:num_tensors] return eval_func
Construct a function that evaluates a `Tensor` or list of `Tensor`s.
371,343
def _load_options(self, container, **options): for opt in self.dict_options(): options.setdefault(opt, container) return anyconfig.utils.filter_options(self._load_opts, options)
Select backend specific loading options.
371,344
def numberOfConnectedDistalSynapses(self, cells=None): if cells is None: cells = xrange(self.numberOfCells()) n = _countWhereGreaterEqualInRows(self.internalDistalPermanences, cells, self.connectedPermanenceDistal) for permanences in self.distalPermanences: n += _countWhereGreaterEqualInRows(permanences, cells, self.connectedPermanenceDistal) return n
Returns the number of connected distal synapses on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells.
371,345
def parse_results_mol2(mol2_outpath): docked_ligands = pd.DataFrame() lines = [line.strip() for line in open(mol2_outpath, )] props = {} for i, line in enumerate(lines): if line.startswith(): ligand = line.strip().strip().replace(, ).replace(, ).split()[1] line = lines[i + 1] props = {} props[] = ligand if line.startswith(): splitter = line.strip().strip().replace(, ).replace(, ).split() props[splitter[0]] = float(splitter[1]) if line.startswith(): if props: docked_ligands = docked_ligands.append(props, ignore_index=True) return docked_ligands
Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results. Args: mol2_outpath (str): Path to mol2 output file Returns: DataFrame: Pandas DataFrame of the results
371,346
def set_server_admin_password(self, server_name, admin_password): _validate_not_none(, server_name) _validate_not_none(, admin_password) return self._perform_post( self._get_servers_path(server_name) + , _SqlManagementXmlSerializer.set_server_admin_password_to_xml( admin_password ) )
Reset the administrator password for a server. server_name: Name of the server to change the password. admin_password: The new administrator password for the server.
371,347
def decrypt(self, data, nounce=None): if nounce is None: nounce = self._in_counter.to_bytes(length=8, byteorder=) self._in_counter += 1 decrypted = self._enc_in.open( b + nounce, data, bytes()) if not decrypted: raise Exception() return bytes(decrypted)
Decrypt data with counter or specified nounce.
371,348
def set_connection_logging(self, loadbalancer, val): uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) val = str(val).lower() req_body = {"connectionLogging": { "enabled": val, }} resp, body = self.api.method_put(uri, body=req_body) return body
Sets the connection logging for the given load balancer.
371,349
def mlem(op, x, data, niter, callback=None, **kwargs): osmlem([op], x, [data], niter=niter, callback=callback, **kwargs)
Maximum Likelihood Expectation Maximation algorithm. Attempts to solve:: max_x L(x | data) where ``L(x | data)`` is the Poisson likelihood of ``x`` given ``data``. The likelihood depends on the forward operator ``op`` such that (approximately):: op(x) = data Parameters ---------- op : `Operator` Forward operator in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : ``op.range`` `element-like` Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains a ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op.adjoint(op.range.one())`` Notes ----- Given a forward model :math:`A` and data :math:`g`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: P(g | g \text{ is } Poisson(A(x)) \text{ distributed}). The algorithm is explicitly given by: .. math:: x_{n+1} = \frac{x_n}{A^* 1} A^* (g / A(x_n)) See Also -------- osmlem : Ordered subsets MLEM loglikelihood : Function for calculating the logarithm of the likelihood
371,350
def reset(self): self.getsCounter = 0 self.processedRequests = {} self.responses = {} self.transactions = {}
Clear the values of all attributes of the transaction store.
371,351
def chooseStep(self, divisors=None, binary=False): self.binary = binary if divisors is None: divisors = [4, 5, 6] else: for divisor in divisors: self.checkFinite(divisor, ) if divisor < 1: raise GraphError( ) if self.minValue == self.maxValue: if self.minValue == 0.0: self.maxValue = 1.0 elif self.minValue < 0.0: self.minValue *= 1.1 self.maxValue *= 0.9 else: self.minValue *= 0.9 self.maxValue *= 1.1 variance = self.maxValue - self.minValue bestSlop = None bestStep = None for step in self.generateSteps(variance / float(max(divisors))): if ( bestSlop is not None and step * min(divisors) >= 2 * bestSlop + variance ): break for divisor in divisors: slop = self.computeSlop(step, divisor) if slop is not None and (bestSlop is None or slop < bestSlop): bestSlop = slop bestStep = step self.step = bestStep
Choose a nice, pretty size for the steps between axis labels. Our main constraint is that the number of divisions must be taken from the divisors list. We pick a number of divisions and a step size that minimizes the amount of whitespace ("slop") that would need to be included outside of the range [self.minValue, self.maxValue] if we were to push out the axis values to the next larger multiples of the step size. The minimum step that could possibly cover the variance satisfies minStep * max(divisors) >= variance or minStep = variance / max(divisors) It's not necessarily possible to cover the variance with a step that size, but we know that any smaller step definitely *cannot* cover it. So we can start there. For a sufficiently large step size, it is definitely possible to cover the variance, but at some point the slop will start growing. Let's define the slop to be slop = max(minValue - bottom, top - maxValue) Then for a given, step size, we know that slop >= (1/2) * (step * min(divisors) - variance) (the factor of 1/2 is for the best-case scenario that the slop is distributed equally on the two sides of the range). So suppose we already have a choice that yields bestSlop. Then there is no need to choose steps so large that the slop is guaranteed to be larger than bestSlop. Therefore, the maximum step size that we need to consider is maxStep = (2 * bestSlop + variance) / min(divisors)
371,352
def available_modes_with_ids(self): if not self._available_mode_ids: all_modes = FIXED_MODES.copy() self._available_mode_ids = all_modes modes = self.get_available_modes() try: if modes: simple_modes = dict( [(m.get("type", m.get("name")), m.get("id")) for m in modes] ) all_modes.update(simple_modes) self._available_mode_ids = all_modes except TypeError: _LOGGER.debug("Did not receive a valid response. Passing..") return self._available_mode_ids
Return list of objects containing available mode name and id.
371,353
def guess_media_type(filepath): o = subprocess.check_output([, , , filepath]) o = o.strip() return o
Returns the media-type of the file at the given ``filepath``
371,354
def volume_adjusted_moving_average(close_data, volume, period): catch_errors.check_for_input_len_diff(close_data, volume) catch_errors.check_for_period_error(close_data, period) avg_vol = np.mean(volume) vol_incr = avg_vol * 0.67 vol_ratio = [val / vol_incr for val in volume] close_vol = np.array(close_data) * vol_ratio vama = [sum(close_vol[idx+1-period:idx+1]) / period for idx in range(period-1, len(close_data))] vama = fill_for_noncomputable_vals(close_data, vama) return vama
Volume Adjusted Moving Average. Formula: VAMA = SUM(CLOSE * VolumeRatio) / period
371,355
def exists(name, path=None): * _exists = name in ls_(path=path) if not _exists: _exists = name in ls_(cache=False, path=path) return _exists
Returns whether the named container exists. path path to the container parent directory (default: /var/lib/lxc) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' lxc.exists name
371,356
def get_pandasframe(self): if self.dataset: self._load_dimensions() return self._get_pandasframe_one_dataset() return self._get_pandasframe_across_datasets()
The method loads data from dataset
371,357
def get_site(self, site_id): url = "/2/sites/%s" % site_id return self.site_from_json(self._get_resource(url)["site"])
Returns site data. http://dev.wheniwork.com/#get-existing-site
371,358
def add_trendline(self,date0,date1,on=,text=None,**kwargs): d={:,:date0,:date1,:on,:text} d.update(**kwargs) self.trendlines.append(d)
Adds a trendline to the QuantFigure. Given 2 dates, the trendline is connected on the data points that correspond to those dates. Parameters: date0 : string Trendline starting date date1 : string Trendline end date on : string Indicate the data series in which the trendline should be based. 'close' 'high' 'low' 'open' text : string If passed, then an annotation will be added to the trendline (at mid point) kwargs: from_strfmt : string Defines the date formating in which date0 and date1 are stated. default: '%d%b%y' to_strfmt : string Defines the date formatting to which it should be converted. This should match the same format as the timeseries index. default : '%Y-%m-%d'
371,359
def endings(self): if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(ENDING)
The list of word endings. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
371,360
def dumpdb(args): if len(args.args) < 2: raise ParserError() ds = _get_dataset(args) db = Database(ds, fname=args.args[1]) mdpath = Path(args.args[2]) if len(args.args) > 2 else ds.tablegroup._fname args.log.info(.format(db.to_cldf(mdpath.parent, mdname=mdpath.name)))
cldf dumpdb <DATASET> <SQLITE_DB_PATH> [<METADATA_PATH>]
371,361
def write_conll(self, fname): if not in self.fields: raise InvalidFieldsException("dataset is not in CONLL format: missing label field") def instance_to_conll(inst): tab = [v for k, v in inst.items() if k != ] return .format(inst[], .join([.join([ if e is None else str(e) for e in row]) for row in zip(*tab)])) with open(fname, ) as f: f.write(.format(.join([k for k in self.fields if k != ]))) for i, d in enumerate(self): f.write(.format(instance_to_conll(d))) if i != len(self) - 1: f.write()
Serializes the dataset in CONLL format to fname
371,362
def fail(self, cmd, title=None, message=None): if message is None: message = self.handle_exc() else: message = escape(message) self.db.send( % dump({ : escape(title or % cmd), : message }) )
Send back captured exceptions
371,363
def _knit(fin, fout, opts_knit=, opts_chunk=): script = ( ) rcmd = (, , script.format(input=fin, output=fout, opts_knit=opts_knit, opts_chunk=opts_chunk) ) p = subprocess.Popen(rcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate()
Use knitr to convert r markdown (or anything knitr supports) to markdown. fin / fout - strings, input / output filenames. opts_knit - string, options to pass to knit opts_shunk - string, chunk options options are passed verbatim to knitr:knit running in Rscript.
371,364
def map_pixel_inv(row, col, cellx, celly, xmin, ymax): col = np.asarray(col) row = np.asarray(row) point_x = xmin+col*cellx point_y = ymax+row*celly return point_x, point_y
Usage: map_pixel(xcoord, ycoord, x_cell_size, y_cell_size, xmin, ymax) where: xmin is leftmost X coordinate in system ymax is topmost Y coordinate in system Example: raster = HMISea.tif ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster) row, col = map_pixel(x,y,geot[1],geot[-1], geot[0],geot[3])
371,365
def close(self): self.flush() self.stream.close() logging.StreamHandler.close(self)
Close the stream.
371,366
def _proxy(self): if self._context is None: self._context = CountryContext(self._version, iso_code=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: CountryContext for this CountryInstance :rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
371,367
def datashape_type_to_numpy(type_): if isinstance(type_, Option): type_ = type_.ty if isinstance(type_, DateTime): return np.dtype() if isinstance(type_, String): return np.dtype(object) if type_ in integral: return np.dtype() else: return type_.to_numpy_dtype()
Given a datashape type, return the associated numpy type. Maps datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the numpy datetime returned by datashape isn't supported by pipeline. Parameters ---------- type_: datashape.coretypes.Type The datashape type. Returns ------- type_ np.dtype The numpy dtype.
371,368
def process_response(self, request, response): if not hasattr(request, ) or not request._cache_update_cache: return response patch_response_headers(response, timeout) if timeout: cache_key = learn_cache_key(request, response, timeout, self.key_prefix) cache.set(cache_key, response, timeout) logging.debug("UpdateCacheMiddleware: setting %s -> %s params are: %s" % (cache_key, request.path, get_cache_key_parameters(request))) return response
Sets the cache, if needed.
371,369
def plot_final(self, ax): bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if (self.cadence == ) or (len(self.time) < 4000): ax.plot(M(self.time), M(self.flux), ls=, marker=, color=, markersize=2, alpha=0.3) else: ax.plot(M(self.time), M(self.flux), ls=, marker=, color=, markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker=, alpha=0) ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker=, alpha=0) if self.cadence == : gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) y += med ax.plot(M(self.time), M(y), , lw=0.5, alpha=0.5) self.cdppg = self._mission.CDPP(self.apply_mask( self.flux - y + med), cadence=self.cadence) else: ax.margins(0.01, 0.1) flux = np.delete(self.flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
Plots the final de-trended light curve.
371,370
async def handle_json_response(responses): json_data = {} if responses.status != 200: err_msg = HttpProcessingError(code=responses.status, message=await responses.json()) logging.error("Wallabag: aiohttp error {err_msg}".format( err_msg=err_msg)) else: try: json_data = responses.json() except ClientResponseError as e: logging.error("Wallabag: aiohttp error {code} {message}" .format(code=e.code, message=e.message)) return await json_data
get the json data response :param responses: the json response :return the json data without 'root' node
371,371
def hide_routemap_holder_route_map_content_set_origin_origin_igp(self, **kwargs): config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop() action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop() instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop() content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") origin = ET.SubElement(set, "origin") origin_igp = ET.SubElement(origin, "origin-igp") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
371,372
def set_kill_on_exit_mode(bKillOnExit = False): try: win32.DebugSetProcessKillOnExit(bKillOnExit) except (AttributeError, WindowsError): return False return True
Defines the behavior of the debugged processes when the debugging thread dies. This method only affects the calling thread. Works on the following platforms: - Microsoft Windows XP and above. - Wine (Windows Emulator). Fails on the following platforms: - Microsoft Windows 2000 and below. - ReactOS. @type bKillOnExit: bool @param bKillOnExit: C{True} to automatically kill processes when the debugger thread dies. C{False} to automatically detach from processes when the debugger thread dies. @rtype: bool @return: C{True} on success, C{False} on error. @note: This call will fail if a debug port was not created. That is, if the debugger isn't attached to at least one process. For more info see: U{http://msdn.microsoft.com/en-us/library/ms679307.aspx}
371,373
def prepare(self, config_file=None, user=None, password=None, **kwargs): if config_file is not None: self.read_config(config_file) else: self._prepare_account(user, password, **kwargs) self.autologin()
登录的统一接口 :param config_file 登录数据文件,若无则选择参数登录模式 :param user: 各家券商的账号或者雪球的用户名 :param password: 密码, 券商为加密后的密码,雪球为明文密码 :param account: [雪球登录需要]雪球手机号(邮箱手机二选一) :param portfolio_code: [雪球登录需要]组合代码 :param portfolio_market: [雪球登录需要]交易市场, 可选['cn', 'us', 'hk'] 默认 'cn'
371,374
def query_parent_objects(self, context, query=None): if query is None: return self.get_parent_objects(context) catalog = None try: catalogs = api.get_catalogs_for(context) catalog = catalogs[0] return map(api.get_object, catalog(query)) except (IndexError, UnicodeDecodeError, ParseError, APIError) as e: logger.warn("UniqueFieldValidator: Catalog query {} failed " "for catalog {} ({}) -> returning object values of {}" .format(query, repr(catalog), str(e), repr(api.get_parent(context)))) return self.get_parent_objects(context)
Return the objects of the same type from the parent object :param query: Catalog query to narrow down the objects :type query: dict :returns: Content objects of the same portal type in the parent
371,375
def clear_sonos_playlist(self, sonos_playlist, update_id=0): if not isinstance(sonos_playlist, DidlPlaylistContainer): sonos_playlist = self.get_sonos_playlist_by_attr(, sonos_playlist) count = self.music_library.browse(ml_item=sonos_playlist).total_matches tracks = .join([str(x) for x in range(count)]) if tracks: return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks, new_pos=, update_id=update_id) else: return {: 0, : update_id, : count}
Clear all tracks from a Sonos playlist. This is a convenience method for :py:meth:`reorder_sonos_playlist`. Example:: device.clear_sonos_playlist(sonos_playlist) Args: sonos_playlist (:py:class:`~.soco.data_structures.DidlPlaylistContainer`): Sonos playlist object or the item_id (str) of the Sonos playlist. update_id (int): Optional update counter for the object. If left at the default of 0, it will be looked up. Returns: dict: See :py:meth:`reorder_sonos_playlist` Raises: ValueError: If sonos_playlist specified by string and is not found. SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
371,376
def insertOntology(self, ontology): try: models.Ontology.create( id=ontology.getName(), name=ontology.getName(), dataurl=ontology.getDataUrl(), ontologyprefix=ontology.getOntologyPrefix()) except Exception: raise exceptions.DuplicateNameException( ontology.getName())
Inserts the specified ontology into this repository.
371,377
def rotate_in_plane(chi, phase): v = chi.T sp = np.sin(phase) cp = np.cos(phase) res = 1.*v res[0] = v[0]*cp + v[1]*sp res[1] = v[1]*cp - v[0]*sp return res.T
For transforming spins between the coprecessing and coorbital frames
371,378
def padRectEqually(rect, padding, bounds, clipExcess = True): return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
Applies equal padding to all sides of a rectangle, ensuring the padded rectangle falls within the specified bounds. The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
371,379
def _get_rh_methods(rh): for k, v in vars(rh).items(): if all([ k in HTTP_METHODS, is_method(v), hasattr(v, "input_schema") ]): yield (k, v)
Yield all HTTP methods in ``rh`` that are decorated with schema.validate
371,380
def do__relative_load(self, args: argparse.Namespace) -> None: file_path = args.file_path relative_path = os.path.join(self._current_script_dir or , file_path) self.do_load(relative_path)
Run commands in script file that is encoded as either ASCII or UTF-8 text
371,381
def data_check(data,target): if isinstance(data, pd.DataFrame) or isinstance(data, pd.core.frame.DataFrame): data_index = data.index if target is None: transformed_data = data.ix[:,0].values data_name = str(data.columns.values[0]) else: transformed_data = data[target].values data_name = str(target) is_pandas = True elif isinstance(data, np.ndarray): data_name = "Series" is_pandas = False if any(isinstance(i, np.ndarray) for i in data): if target is None: transformed_data = data[0] data_index = list(range(len(data[0]))) else: transformed_data = data[target] data_index = list(range(len(data[target]))) else: transformed_data = data data_index = list(range(len(data))) else: raise Exception("The data input is not pandas or numpy compatible!") return transformed_data, data_name, is_pandas, data_index
Checks data type Parameters ---------- data : pd.DataFrame or np.array Field to specify the time series data that will be used. target : int or str Target column Returns ---------- transformed_data : np.array Raw data array for use in the model data_name : str Name of the data is_pandas : Boolean True if pandas data, else numpy data_index : np.array The time indices for the data
371,382
def emit(self, record): self.records.append(Record(levelno=record.levelno, levelname=record.levelname, message=self.format(record))) return super(SetupLogChecker, self).emit(record)
Store the message, not only the record.
371,383
def extract_common(self, keys): keys = set(keys) new = self.__class__() intersection = self.intersection(keys) for key in keys: dict.__setitem__(new, key, _shallowcopy(intersection)) dict.__setitem__(new.offsets, key, self.offsets[key]) return new
Return a new segmentlistdict containing only those segmentlists associated with the keys in keys, with each set to their mutual intersection. The offsets are preserved.
371,384
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False): return len(get_task_trackers(properties, hadoop_conf_dir, offline))
Get the number of task trackers in the Hadoop cluster. All arguments are passed to :func:`get_task_trackers`.
371,385
def make_parser(defaults=None): if defaults is None: defaults = DEFAULTS ctypes = API.list_types() ctypes_s = ", ".join(ctypes) type_help = "Select type of %s config files from " + \ ctypes_s + " [Automatically detected by file ext]" mts = API.MERGE_STRATEGIES mts_s = ", ".join(mts) mt_help = "Select strategy to merge multiple configs from " + \ mts_s + " [%(merge)s]" % defaults parser = argparse.ArgumentParser(usage=USAGE) parser.set_defaults(**defaults) parser.add_argument("inputs", type=str, nargs=, help="Input files") parser.add_argument("--version", action="version", version="%%(prog)s %s" % anyconfig.globals.VERSION) lpog = parser.add_argument_group("List specific options") lpog.add_argument("-L", "--list", action="store_true", help="List supported config types") spog = parser.add_argument_group("Schema specific options") spog.add_argument("--validate", action="store_true", help="Only validate input files and do not output. " "You must specify schema file with -S/--schema " "option.") spog.add_argument("--gen-schema", action="store_true", help="Generate JSON schema for givne config file[s] " "and output it instead of (merged) configuration.") gspog = parser.add_argument_group("Query/Get/set options") gspog.add_argument("-Q", "--query", help=_QUERY_HELP) gspog.add_argument("--get", help=_GET_HELP) gspog.add_argument("--set", help=_SET_HELP) parser.add_argument("-o", "--output", help="Output file path") parser.add_argument("-I", "--itype", choices=ctypes, metavar="ITYPE", help=(type_help % "Input")) parser.add_argument("-O", "--otype", choices=ctypes, metavar="OTYPE", help=(type_help % "Output")) parser.add_argument("-M", "--merge", choices=mts, metavar="MERGE", help=mt_help) parser.add_argument("-A", "--args", help="Argument configs to override") parser.add_argument("--atype", choices=ctypes, metavar="ATYPE", help=_ATYPE_HELP_FMT % ctypes_s) cpog = parser.add_argument_group("Common options") cpog.add_argument("-x", "--ignore-missing", action="store_true", help="Ignore missing input files") cpog.add_argument("-T", "--template", action="store_true", help="Enable template config support") cpog.add_argument("-E", "--env", action="store_true", help="Load configuration defaults from " "environment values") cpog.add_argument("-S", "--schema", help="Specify Schema file[s] path") cpog.add_argument("-e", "--extra-opts", help="Extra options given to the API call, " "--extra-options indent:2 (specify the " "indent for pretty-printing of JSON outputs) " "for example") cpog.add_argument("-v", "--verbose", action="count", dest="loglevel", help="Verbose mode; -v or -vv (more verbose)") return parser
:param defaults: Default option values
371,386
def load_notebook_node(notebook_path): nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4) if not hasattr(nb.metadata, ): nb.metadata[] = { : dict(), : dict(), : __version__, } for cell in nb.cells: if not hasattr(cell.metadata, ): cell.metadata[] = [] return nb
Returns a notebook object with papermill metadata loaded from the specified path. Args: notebook_path (str): Path to the notebook file. Returns: nbformat.NotebookNode
371,387
def _splitit(self, line, isheader): line_wrapped = [] for cell, width in zip(line, self._width): array = [] for c in cell.split(): array.extend(textwrap.wrap(unicode(c, ), width)) line_wrapped.append(array) max_cell_lines = reduce(max, map(len, line_wrapped)) for cell, valign in zip(line_wrapped, self._valign): if isheader: valign = "t" if valign == "m": missing = max_cell_lines - len(cell) cell[:0] = [""] * (missing / 2) cell.extend([""] * (missing / 2 + missing % 2)) elif valign == "b": cell[:0] = [""] * (max_cell_lines - len(cell)) else: cell.extend([""] * (max_cell_lines - len(cell))) return line_wrapped
Split each element of line to fit the column width Each element is turned into a list, result of the wrapping of the string to the desired width
371,388
def elbow_method(data, k_min, k_max, distance=): k_range = range(k_min, k_max) k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range] centroids = [X.model.cluster_centers_ for X in k_means_var] k_euclid = [cdist(data, cent, distance) for cent in centroids] dist = [np.min(ke, axis=1) for ke in k_euclid] wcss = [sum(d ** 2) for d in dist] tss = sum(pdist(data) ** 2) / data.shape[0] bss = tss - wcss fig = plt.figure() ax = fig.add_subplot(111) ax.plot(k_range, bss / tss * 100, ) ax.set_ylim((0, 100)) plt.grid(True) plt.xlabel() plt.ylabel() plt.title() plt.show()
Calculates and plots the plot of variance explained - number of clusters Implementation reference: https://github.com/sarguido/k-means-clustering.rst :param data: The dataset :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return:
371,389
def index_resolver(index, strict=False): if strict: return lambda id_: index[id_] else: return index.get
Returns a function that accepts a value and returns index[value].
371,390
def get_departures(self, station): url = + station raw_departures = self._request(, url) return self.parse_departures(raw_departures)
Fetch the current departure times from this station http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station} @param station: station to lookup
371,391
def create(obj: PersistedObject, obj_type: Type[T], extensions_supported: Iterable[str]): msg = "{obj} cannot be parsed as a {typ} because no parser supporting that extension ({ext}) is able to " \ "create this type of object." \ "".format(obj=obj, typ=get_pretty_type_str(obj_type), ext=obj.get_pretty_file_ext()) if extensions_supported is not None and len(extensions_supported) > 0: msg += " If you wish to parse this fileobject to that precise type, you may wish to either " \ "(1) replace the file with any of the following extensions currently supported : {exts} " \ "(see get_capabilities_for_type({typ}, strict_type_matching=False) for details)." \ " Or (2) register a new parser." \ "".format(exts=extensions_supported, typ=get_pretty_type_str(obj_type)) else: raise ValueError( ) e = NoParserFoundForObjectExt(msg) e.extensions_supported = extensions_supported return e
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param extensions_supported: :return:
371,392
def get_subtask_fields(config_class): from lsst.pex.config import ConfigurableField, RegistryField def is_subtask_field(obj): return isinstance(obj, (ConfigurableField, RegistryField)) return _get_alphabetical_members(config_class, is_subtask_field)
Get all configurable subtask fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- subtask_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.ConfigurableField`` or ``RegistryField``). The mapping is alphabetically ordered by attribute name.
371,393
def get_configuration_dict(self, secret_attrs=False): cd = {: self.repo_nexml2json, : len(self._shards), : self._filepath_args, : [], } for i in self._shards: cd[].append(i.get_configuration_dict(secret_attrs=secret_attrs)) return cd
Type-specific configuration for backward compatibility
371,394
def rows(self, offs): lkey = s_common.int64en(offs) for lkey, byts in self.slab.scanByRange(lkey, db=self.db): indx = s_common.int64un(lkey) yield indx, byts
Iterate over raw indx, bytes tuples from a given offset.
371,395
def rank_dated_files(pattern, dir, descending=True): files = glob.glob(op.join(dir, pattern)) return sorted(files, reverse=descending)
Search a directory for files that match a pattern. Return an ordered list of these files by filename. Args: pattern: The glob pattern to search for. dir: Path to directory where the files will be searched for. descending: Default True, will sort alphabetically by descending order. Returns: list: Rank-ordered list by filename.
371,396
def amplify_ground_shaking(T, vs30, gmvs): gmvs[gmvs > MAX_GMV] = MAX_GMV interpolator = interpolate.interp1d( [0, 0.1, 0.2, 0.3, 0.4, 5], [(760 / vs30)**0.35, (760 / vs30)**0.35, (760 / vs30)**0.25, (760 / vs30)**0.10, (760 / vs30)**-0.05, (760 / vs30)**-0.05], ) if T <= 0.3 else interpolate.interp1d( [0, 0.1, 0.2, 0.3, 0.4, 5], [(760 / vs30)**0.65, (760 / vs30)**0.65, (760 / vs30)**0.60, (760 / vs30)**0.53, (760 / vs30)**0.45, (760 / vs30)**0.45], ) return interpolator(gmvs) * gmvs
:param T: period :param vs30: velocity :param gmvs: ground motion values for the current site in units of g
371,397
def find_equips( self, name, iexact, environment, equip_type, group, ip, pagination): if not isinstance(pagination, Pagination): raise InvalidParameterError( u"Invalid parameter: pagination must be a class of type .") equip_map = dict() equip_map["start_record"] = pagination.start_record equip_map["end_record"] = pagination.end_record equip_map["asorting_cols"] = pagination.asorting_cols equip_map["searchable_columns"] = pagination.searchable_columns equip_map["custom_search"] = pagination.custom_search equip_map["nome"] = name equip_map["exato"] = iexact equip_map["ambiente"] = environment equip_map["tipo_equipamento"] = equip_type equip_map["grupo"] = group equip_map["ip"] = ip url = "equipamento/find/" code, xml = self.submit({"equipamento": equip_map}, "POST", url) key = "equipamento" return get_list_map( self.response( code, xml, [ key, "ips", "grupos"]), key)
Find vlans by all search parameters :param name: Filter by vlan name column :param iexact: Filter by name will be exact? :param environment: Filter by environment ID related :param equip_type: Filter by equipment_type ID related :param group: Filter by equipment group ID related :param ip: Filter by each octs in ips related :param pagination: Class with all data needed to paginate :return: Following dictionary: :: {'equipamento': {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_ambiente': < id_ambiente >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'ativada': < ativada >, 'ambiente_name': < divisao_dc-ambiente_logico-grupo_l3 > 'redeipv4': [ { all networkipv4 related } ], 'redeipv6': [ { all networkipv6 related } ] }, 'total': {< total_registros >} } :raise InvalidParameterError: Some parameter was invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
371,398
def main(): parser = optparse.OptionParser(usage="%prog [options] <model_path> [another_model_path..]", version=xtuml.version.complete_string, formatter=optparse.TitledHelpFormatter()) parser.add_option("-v", "--verbosity", dest=, action="count", default=1, help="increase debug logging level") parser.add_option("-f", "--function", dest=, action="store", help="invoke function named NAME", metavar=) parser.add_option("-c", "--component", dest=, action="store", help="look for the function in a component named NAME", metavar=, default=None) (opts, args) = parser.parse_args() if len(args) == 0 or not opts.function: parser.print_help() sys.exit(1) levels = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, } logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG)) from bridgepoint import ooaofooa mm = ooaofooa.load_metamodel(args) c_c = mm.select_any(, where(Name=opts.component)) domain = ooaofooa.mk_component(mm, c_c, derived_attributes=False) func = domain.find_symbol(opts.function) return func()
Parse command line options and launch the interpreter
371,399
def focal(self): if self._focal is None: focal = [(px / 2.0) / np.tan(np.radians(fov / 2.0)) for px, fov in zip(self._resolution, self.fov)] self._focal = np.asanyarray(focal, dtype=np.float64) return self._focal
Get the focal length in pixels for the camera. Returns ------------ focal : (2,) float Focal length in pixels