code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def output_before_run(self, run): """ The method output_before_run() prints the name of a file to terminal. It returns the name of the logfile. @param run: a Run object """ # output in terminal runSet = run.runSet try: OutputHandler.print_lock.acquire() try: runSet.started_runs += 1 except AttributeError: runSet.started_runs = 1 timeStr = time.strftime("%H:%M:%S", time.localtime()) + " " progressIndicator = " ({0}/{1})".format(runSet.started_runs, len(runSet.runs)) terminalTitle = TERMINAL_TITLE.format(runSet.full_name + progressIndicator) if self.benchmark.num_of_threads == 1: util.printOut(terminalTitle + timeStr + self.format_sourcefile_name(run.identifier, runSet), '') else: util.printOut(terminalTitle + timeStr + "starting " + self.format_sourcefile_name(run.identifier, runSet)) finally: OutputHandler.print_lock.release()
The method output_before_run() prints the name of a file to terminal. It returns the name of the logfile. @param run: a Run object
def cee_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map") name = ET.SubElement(cee_map, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()]
List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1
def get_exception_based_on_api_message(message, image_name=""): """Return the exception matching the given API error message.""" msg_bigger_than_source = re.compile('Image was not scaled, is the requested width bigger than the source?') msg_does_not_exist = re.compile('The source file .* does not exist') msg_does_not_exist_bis = re.compile('<div class="error"><p>Value not found') if re.search(msg_bigger_than_source, message): msg = "File %s requested at a width bigger than source" % image_name return RequestedWidthBiggerThanSourceException(msg) elif re.search(msg_does_not_exist, message): msg = "File %s does not exist" % image_name return FileDoesNotExistException(msg) elif re.search(msg_does_not_exist_bis, message): msg = "File %s does not exist" % image_name return FileDoesNotExistException(msg) else: return DownloadException(message)
Return the exception matching the given API error message.
def get_gopath(self, target): """Returns the $GOPATH for the given target.""" return os.path.join(self.workdir, target.id)
Returns the $GOPATH for the given target.
def write(self, content=None): """ Write report to file. Parameters ---------- content: str 'summary', 'extended', 'powerflow' """ if self.system.files.no_output is True: return t, _ = elapsed() if not content: logger.warning('report content not specified.') return self.update(content) system = self.system file = system.files.output export = all_formats.get(system.config.export, 'txt') module = importlib.import_module('andes.formats.' + export) dump_data = getattr(module, 'dump_data') text = list() header = list() rowname = list() data = list() text.append(self.info) header.append(None) rowname.append(None) data.append(None) if content == 'summary' or 'extended' or 'powerflow': text.append(['SUMMARY:\n']) header.append(None) rowname.append(self._basic_name) data.append([self.basic[item] for item in self._basic]) if content == 'extended' or 'powerflow': text.append(['EXTENDED SUMMARY:\n']) header.append(['P (pu)', 'Q (pu)']) rowname.append( ['Generation', 'Load', 'Shunt Inj', 'Losses', 'Line Charging']) Pcol = [ self.extended['Pg'], self.extended['Pl'], self.extended['Psh'], self.extended['Ploss'], self.extended['Pch'], ] Qcol = [ self.extended['Qg'], self.extended['Ql'], self.extended['Qsh'], self.extended['Qloss'], self.extended['Qch'], ] data.append([Pcol, Qcol]) if content == 'powerflow' and system.pflow.solved: idx, name, Vm, Va, Pg, Qg, Pl, Ql = system.get_busdata() Va_unit = 'deg' if system.pflow.config.usedegree else 'rad' text.append(['BUS DATA:\n']) # todo: consider system.pflow.config.units header.append([ 'Vm(pu)', 'Va({:s})'.format(Va_unit), 'Pg (pu)', 'Qg (pu)', 'Pl (pu)', 'Ql (pu)' ]) name = [str(i) + '-' + j[:8] for i, j in zip(idx, name)] rowname.append(name) data.append([Vm, Va, Pg, Qg, Pl, Ql]) # Node data if hasattr(system, 'Node') and system.Node.n: idx, name, V = system.get_nodedata() text.append(['NODE DATA:\n']) header.append(['V(pu)']) rowname.append(name) data.append([V]) # Line data name, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss = \ system.get_linedata() text.append(['LINE DATA:\n']) header.append([ 'From Bus', 'To Bus', 'P From (pu)', 'Q From (pu)', 'P To (pu)', 'Q To(pu)', 'P Loss(pu)', 'Q Loss(pu)' ]) rowname.append(name) data.append([fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss]) # Additional Algebraic data text.append(['OTHER ALGEBRAIC VARIABLES:\n']) header.append(['']) rowname.append( system.varname.unamey[2 * system.Bus.n:system.dae.m]) data.append([round(i, 5) for i in system.dae.y[2 * system.Bus.n:]]) # Additional State variable data if system.dae.n: text.append(['OTHER STATE VARIABLES:\n']) header.append(['']) rowname.append(system.varname.unamex[:]) data.append([round(i, 5) for i in system.dae.x[:]]) dump_data(text, header, rowname, data, file) _, s = elapsed(t) logger.info('report written to <{:s}> in {:s}.'.format(system.files.output, s))
Write report to file. Parameters ---------- content: str 'summary', 'extended', 'powerflow'
def deploy(self): """ Open a ZIP archive, validate requirements then deploy the webfont into project static files """ self._info("* Opening archive: {}", self.archive_path) if not os.path.exists(self.archive_path): self._error("Given path does not exists: {}", self.archive_path) with zipfile.ZipFile(self.archive_path, 'r') as zip_archive: font_dir = self.requirements['font_dir']+'/' allowed_extensions = ['.'+item for item in self.requirements['extensions']] members = [member for member in zip_archive.namelist()] if settings.ICOMOON_MANIFEST_FILENAME not in members: raise self._error("Icomoon archive must contain a JSON manifest '{}'", settings.ICOMOON_MANIFEST_FILENAME) if font_dir not in members: raise self._error("Icomoon archive must contain the font directory '{}'", font_dir) # Scan for supported font files font_files = [] for item in members: # Dont catch the font_dir itself nor sub directories, just files with allowed extensions if item.startswith(font_dir) and not item.endswith('/') and os.path.splitext(item)[-1] in allowed_extensions: font_files.append(item) if not font_files: self._error("Font dir does not contain any supported format: {}", ', '.join(allowed_extensions)) else: self._debug("* Finded font files in archive: {}", ', '.join(font_files)) # Extract files from archive tmp_container, css_content = self.extract(zip_archive, font_files) # Install files self.install(tmp_container, font_dir, css_content)
Open a ZIP archive, validate requirements then deploy the webfont into project static files
def descendants(self, cl=None, noduplicates=True): """ returns all descendants in the taxonomy """ if not cl: cl = self if cl.children(): bag = [] for x in cl.children(): if x.uri != cl.uri: # avoid circular relationships bag += [x] + self.descendants(x, noduplicates) else: bag += [x] # finally: if noduplicates: return remove_duplicates(bag) else: return bag else: return []
returns all descendants in the taxonomy
def add_reader( self, fd: IFileLike, callback: typing.Callable[[IFileLike], typing.Any], ) -> None: """Add a file descriptor to the processor and wait for READ. Args: fd (IFileLike): Any obect that exposes a 'fileno' method that returns a valid file descriptor integer. callback (typing.Callable[[IFileLike], typing.Any]): A function that consumes the IFileLike object whenever the READ event is fired. """ raise NotImplementedError()
Add a file descriptor to the processor and wait for READ. Args: fd (IFileLike): Any obect that exposes a 'fileno' method that returns a valid file descriptor integer. callback (typing.Callable[[IFileLike], typing.Any]): A function that consumes the IFileLike object whenever the READ event is fired.
def send_template_message(self, user_id, template_id, data, url='', topcolor='#FF0000'): """ 发送模版消息 详情请参考 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html :param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source (OpenID) :param template_id: 模板ID :param data: 模板消息数据 (dict形式),示例如下: { "first": { "value": "恭喜你购买成功!", "color": "#173177" }, "keynote1":{ "value": "巧克力", "color": "#173177" }, "keynote2": { "value": "39.8元", "color": "#173177" }, "keynote3": { "value": "2014年9月16日", "color": "#173177" }, "remark":{ "value": "欢迎再次购买!", "color": "#173177" } } :param url: 跳转地址 (默认为空) :param topcolor: 顶部颜色RGB值 (默认 '#FF0000' ) :return: 返回的 JSON 数据包 """ unicode_data = {} if data: unicode_data = self._transcoding_dict(data) return self.request.post( url='https://api.weixin.qq.com/cgi-bin/message/template/send', data={ 'touser': user_id, "template_id": template_id, "url": url, "topcolor": topcolor, "data": unicode_data } )
发送模版消息 详情请参考 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html :param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source (OpenID) :param template_id: 模板ID :param data: 模板消息数据 (dict形式),示例如下: { "first": { "value": "恭喜你购买成功!", "color": "#173177" }, "keynote1":{ "value": "巧克力", "color": "#173177" }, "keynote2": { "value": "39.8元", "color": "#173177" }, "keynote3": { "value": "2014年9月16日", "color": "#173177" }, "remark":{ "value": "欢迎再次购买!", "color": "#173177" } } :param url: 跳转地址 (默认为空) :param topcolor: 顶部颜色RGB值 (默认 '#FF0000' ) :return: 返回的 JSON 数据包
def do_fileplaceholder(parser, token): """ Method that parse the fileplaceholder template tag. """ name, params = parse_placeholder(parser, token) return FilePlaceholderNode(name, **params)
Method that parse the fileplaceholder template tag.
def Deserialize(self, reader): """ Deserialize full object. Args: reader (neo.IO.BinaryReader): """ self.Script = reader.ReadVarBytes() self.ParameterList = reader.ReadVarBytes() self.ReturnType = reader.ReadByte()
Deserialize full object. Args: reader (neo.IO.BinaryReader):
def path_param(name, ns): """ Build a path parameter definition. """ if ns.identifier_type == "uuid": param_type = "string" param_format = "uuid" else: param_type = "string" param_format = None kwargs = { "name": name, "in": "path", "required": True, "type": param_type, } if param_format: kwargs["format"] = param_format return swagger.PathParameterSubSchema(**kwargs)
Build a path parameter definition.
def _serialize(self, value, *args, **kwargs): """Serialize given datetime to timestamp.""" if value is not None: value = super(MSTimestamp, self)._serialize(value, *args) * 1e3 return value
Serialize given datetime to timestamp.
def pretty_print_probabilities(self, decimal_digits=2): """ Prints outcome probabilities, ignoring all outcomes with approximately zero probabilities (up to a certain number of decimal digits) and rounding the probabilities to decimal_digits. :param int decimal_digits: The number of digits to truncate to. :return: A dict with outcomes as keys and probabilities as values. :rtype: dict """ outcome_dict = {} qubit_num = len(self) for index, amplitude in enumerate(self.amplitudes): outcome = get_bitstring_from_index(index, qubit_num) prob = round(abs(amplitude) ** 2, decimal_digits) if prob != 0.: outcome_dict[outcome] = prob return outcome_dict
Prints outcome probabilities, ignoring all outcomes with approximately zero probabilities (up to a certain number of decimal digits) and rounding the probabilities to decimal_digits. :param int decimal_digits: The number of digits to truncate to. :return: A dict with outcomes as keys and probabilities as values. :rtype: dict
def _postprocess_request(self, r): """ This converts the response to either the response or a parsed :class:`pytgbot.api_types.receivable.Receivable`. :param r: the request response :type r: requests.Response :return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type. :rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable """ from DictObject import DictObject import requests assert isinstance(r, requests.Response) try: logger.debug(r.json()) res = DictObject.objectify(r.json()) except Exception: logger.exception("Parsing answer failed.\nRequest: {r!s}\nContent: {r.content}".format(r=r)) raise # end if res["response"] = r # TODO: does this failes on json lists? Does TG does that? # TG should always return an dict, with at least a status or something. if self.return_python_objects: if res.ok != True: raise TgApiServerException( error_code=res.error_code if "error_code" in res else None, response=res.response if "response" in res else None, description=res.description if "description" in res else None, request=r.request ) # end if not ok if "result" not in res: raise TgApiParseException('Key "result" is missing.') # end if no result return res.result # end if return_python_objects return res # end def _postprocess_request def _do_fileupload(self, file_param_name, value, _command=None, **kwargs): """ :param file_param_name: For what field the file should be uploaded. :type file_param_name: str :param value: File to send. You can either pass a file_id as String to resend a file file that is already on the Telegram servers, or upload a new file, specifying the file path as :class:`pytgbot.api_types.sendable.files.InputFile`. :type value: pytgbot.api_types.sendable.files.InputFile | str :param _command: Overwrite the sended command. Default is to convert `file_param_name` to camel case (`"voice_note"` -> `"sendVoiceNote"`) :param kwargs: will get json encoded. :return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type. :rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable :raises TgApiTypeError, TgApiParseException, TgApiServerException: Everything from :meth:`Bot.do`, and :class:`TgApiTypeError` """ from pytgbot.api_types.sendable.files import InputFile from luckydonaldUtils.encoding import unicode_type from luckydonaldUtils.encoding import to_native as n if isinstance(value, str): kwargs[file_param_name] = str(value) elif isinstance(value, unicode_type): kwargs[file_param_name] = n(value) elif isinstance(value, InputFile): kwargs["files"] = value.get_request_files(file_param_name) else: raise TgApiTypeError("Parameter {key} is not type (str, {text_type}, {input_file_type}), but type {type}".format( key=file_param_name, type=type(value), input_file_type=InputFile, text_type=unicode_type)) # end if if not _command: # command as camelCase # "voice_note" -> "sendVoiceNote" # https://stackoverflow.com/a/10984923/3423324 command = re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), "send_" + file_param_name) else: command = _command # end def return self.do(command, **kwargs)
This converts the response to either the response or a parsed :class:`pytgbot.api_types.receivable.Receivable`. :param r: the request response :type r: requests.Response :return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type. :rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable
def emit_event(self, event): """Emit the specified event (notify listeners)""" with self._lock: listeners = list(self._event_listeners) for cb in list(self._event_listeners): # noinspection PyBroadException try: cb(event) except: # Don't let exceptions from callbacks kill our thread of execution logger.exception("Event callback resulted in unhandled exception")
Emit the specified event (notify listeners)
def focusout(self, event): """Change style on focus out events.""" bc = self.style.lookup("TEntry", "bordercolor", ("!focus",)) dc = self.style.lookup("TEntry", "darkcolor", ("!focus",)) lc = self.style.lookup("TEntry", "lightcolor", ("!focus",)) self.style.configure("%s.spinbox.TFrame" % self.frame, bordercolor=bc, darkcolor=dc, lightcolor=lc)
Change style on focus out events.
def clipping_params(ts, capacity=100, rate_limit=float('inf'), method=None, max_attempts=100): """Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate" Assumes that signal can be linearly interpolated between points (trapezoidal integration) Arguments: ts (TimeSeries): Time series to attempt to clip to as low a max value as possible capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series) method (str): scipy optimization algorithm name, one of: 'L-BFGS-B': Byrd, 1995, "A Limited Memory Algorithm for Bound Constrained Optimization" 'TNC': Truncated Newton in C, or Newton Conjugate-Gradient, each variable may be constrained with upper and lower bounds 'COBYLA': Constrained Optimization by Linear Approximation. Fortran implementation. 'SLSQP': Kraft, 1988, Sequential Least Squares Programming or Quadratic Programming, infinite bounds converted to large floats TODO: Bisection search for the optimal threshold. Returns: 2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE >>> clipping_params(ts, capacity=60000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 218.13... >>> clipping_params(ts, capacity=30000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 224.15358... """ VALID_METHODS = ['L-BFGS-B', 'TNC', 'SLSQP', 'COBYLA'] # print('in clipping params for ts.index={0} and method={1}'.format(ts.index[0], method)) ts.index = ts.index.astype(np.int64) costs = [] def cost_fun(x, *args): thresh = x[0] ts, capacity, bounds = args integral = clipped_area(ts, thresh=thresh) terms = np.array([(10. * (integral - capacity) / capacity) ** 2, 2. / 0.1**((bounds[0] - thresh) * capacity / bounds[0]), 2. / 0.1**((thresh - bounds[1]) * capacity / bounds[1]), 1.2 ** (integral / capacity)]) return sum(terms) bounds = (ts.min(), ts.max()) done, attempts = 0, 0 thresh0 = bounds[0] + 0.5 * (bounds[1] - bounds[0]) if not method or not method in VALID_METHODS: while attempts < max_attempts and not done: for optimizer_method in VALID_METHODS: optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=optimizer_method) if optimum.success: done = True break if done: break attempts += 1 thresh0 = bounds[0] + random.random() * (bounds[1] - bounds[0]) else: optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=method) thresh = optimum.x[0] integral = clipped_area(ts, thresh=thresh) params = dict(optimum) params.update({'costs': costs, 'threshold': thresh, 'initial_guess': thresh0, 'attempts': attempts, 'integral': integral, 'method': method}) return params
Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate" Assumes that signal can be linearly interpolated between points (trapezoidal integration) Arguments: ts (TimeSeries): Time series to attempt to clip to as low a max value as possible capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series) method (str): scipy optimization algorithm name, one of: 'L-BFGS-B': Byrd, 1995, "A Limited Memory Algorithm for Bound Constrained Optimization" 'TNC': Truncated Newton in C, or Newton Conjugate-Gradient, each variable may be constrained with upper and lower bounds 'COBYLA': Constrained Optimization by Linear Approximation. Fortran implementation. 'SLSQP': Kraft, 1988, Sequential Least Squares Programming or Quadratic Programming, infinite bounds converted to large floats TODO: Bisection search for the optimal threshold. Returns: 2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE >>> clipping_params(ts, capacity=60000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 218.13... >>> clipping_params(ts, capacity=30000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 224.15358...
def check_for_session(self, status=None): """ check_for_session: see if session is in progress Args: status (str): step to check if last session reached (optional) Returns: boolean indicating if session exists """ status = Status.LAST if status is None else status return os.path.isfile(self.get_restore_path(status)) and os.path.getsize(self.get_restore_path(status)) > 0
check_for_session: see if session is in progress Args: status (str): step to check if last session reached (optional) Returns: boolean indicating if session exists
def _get_conversion_type(self, convert_to=None): '''a helper function to return the conversion type based on user preference and input recipe. Parameters ========== convert_to: a string either docker or singularity (default None) ''' acceptable = ['singularity', 'docker'] # Default is to convert to opposite kind conversion = "singularity" if self.name == "singularity": conversion = "docker" # Unless the user asks for a specific type if convert_to is not None and convert_to in acceptable: conversion = convert_to return conversion
a helper function to return the conversion type based on user preference and input recipe. Parameters ========== convert_to: a string either docker or singularity (default None)
def bamsort_and_index(job, job_vars): """ Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam') output = os.path.join(work_dir, 'sorted.bam') # Command -- second argument is "Output Prefix" cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')] cmd2 = ['index', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd1, work_dir=work_dir, sudo=sudo) docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd2, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['sorted.bam'] = job.fileStore.writeGlobalFile(output) ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai')) # Run child job output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv() rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv() return rseq_id, output_ids
Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids
def _translate_range(self, len_, start, end): """ Translate range to valid bounds. """ if start < 0: start += len_ start = max(0, min(start, len_)) if end < 0: end += len_ end = max(-1, min(end, len_ - 1)) return start, end
Translate range to valid bounds.
def get_input(self, more=False): """Prompt for code input.""" received = None try: received = self.prompt.input(more) except KeyboardInterrupt: print() printerr("KeyboardInterrupt") except EOFError: print() self.exit_runner() else: if received.startswith(exit_chars): self.exit_runner() received = None return received
Prompt for code input.
def p_to_find(self, ): ''' To find, pager. ''' kwd = { 'pager': '', } self.render('user/user_find_list.html', kwd=kwd, view=MUser.get_by_keyword(""), cfg=config.CMS_CFG, userinfo=self.userinfo)
To find, pager.
def handle_fk_field(self, obj, field): """ Called to handle a ForeignKey (we need to treat them slightly differently from regular fields). """ self._start_relational_field(field) related = getattr(obj, field.name) if related is not None: if self.use_natural_keys and hasattr(related, 'natural_key'): # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(smart_unicode(key_value)) self.xml.endElement("natural") else: if field.rel.field_name == related._meta.pk.name: # Related to remote object via primary key related = related._get_pk_val() else: # Related to remote object via other field related = getattr(related, field.rel.field_name) self.xml.characters(smart_unicode(related)) else: self.xml.addQuickElement("None") self.xml.endElement("field")
Called to handle a ForeignKey (we need to treat them slightly differently from regular fields).
def contains(self, *items): """Asserts that val contains the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') elif len(items) == 1: if items[0] not in self.val: if self._check_dict_like(self.val, return_as_bool=True): self._err('Expected <%s> to contain key <%s>, but did not.' % (self.val, items[0])) else: self._err('Expected <%s> to contain item <%s>, but did not.' % (self.val, items[0])) else: missing = [] for i in items: if i not in self.val: missing.append(i) if missing: if self._check_dict_like(self.val, return_as_bool=True): self._err('Expected <%s> to contain keys %s, but did not contain key%s %s.' % (self.val, self._fmt_items(items), '' if len(missing) == 0 else 's', self._fmt_items(missing))) else: self._err('Expected <%s> to contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) return self
Asserts that val contains the given item or items.
def logger(): """Access global logger""" global _LOGGER if _LOGGER is None: logging.basicConfig() _LOGGER = logging.getLogger() _LOGGER.setLevel('INFO') return _LOGGER
Access global logger
def execute(self, statement, *args, **kwargs): """ This convenience method will execute the query passed in as is. For more complex functionality you may want to use the sqlalchemy engine directly, but this serves as an example implementation. :param select_query: SQL statement to execute that will identify the resultset of interest. """ with self.engine.connect() as conn: s = sqlalchemy.sql.text(statement) return conn.execute(s, **kwargs)
This convenience method will execute the query passed in as is. For more complex functionality you may want to use the sqlalchemy engine directly, but this serves as an example implementation. :param select_query: SQL statement to execute that will identify the resultset of interest.
def get_timestamp_expression(self, time_grain): """Getting the time component of the query""" label = utils.DTTM_ALIAS db = self.table.database pdf = self.python_date_format is_epoch = pdf in ('epoch_s', 'epoch_ms') if not self.expression and not time_grain and not is_epoch: sqla_col = column(self.column_name, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label) grain = None if time_grain: grain = db.grains_dict().get(time_grain) if not grain: raise NotImplementedError( f'No grain spec for {time_grain} for database {db.database_name}') col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name) expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain) sqla_col = literal_column(expr, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label)
Getting the time component of the query
def _h2ab_s(s): """Define the saturated line boundary between Region 4 and 2a-2b, h=f(s) Parameters ---------- s : float Specific entropy, [kJ/kgK] Returns ------- h : float Specific enthalpy, [kJ/kg] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 5.85 ≤ s ≤ s"(273.15K) References ---------- IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for Region 3, Equations as a Function of h and s for the Region Boundaries, and an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 5 Examples -------- >>> _h2ab_s(7) 2723.729985 >>> _h2ab_s(9) 2511.861477 """ # Check input parameters if s < 5.85 or s > 9.155759395: raise NotImplementedError("Incoming out of bound") sigma1 = s/5.21 sigma2 = s/9.2 I = [1, 1, 2, 2, 4, 4, 7, 8, 8, 10, 12, 12, 18, 20, 24, 28, 28, 28, 28, 28, 32, 32, 32, 32, 32, 36, 36, 36, 36, 36] J = [8, 24, 4, 32, 1, 2, 7, 5, 12, 1, 0, 7, 10, 12, 32, 8, 12, 20, 22, 24, 2, 7, 12, 14, 24, 10, 12, 20, 22, 28] n = [-0.524581170928788e3, -0.926947218142218e7, -0.237385107491666e3, 0.210770155812776e11, -0.239494562010986e2, 0.221802480294197e3, -0.510472533393438e7, 0.124981396109147e7, 0.200008436996201e10, -0.815158509791035e3, -0.157612685637523e3, -0.114200422332791e11, 0.662364680776872e16, -0.227622818296144e19, -0.171048081348406e32, 0.660788766938091e16, 0.166320055886021e23, -0.218003784381501e30, -0.787276140295618e30, 0.151062329700346e32, 0.795732170300541e7, 0.131957647355347e16, -0.325097068299140e24, -0.418600611419248e26, 0.297478906557467e35, -0.953588761745473e20, 0.166957699620939e25, -0.175407764869978e33, 0.347581490626396e35, -0.710971318427851e39] suma = 0 for i, j, ni in zip(I, J, n): suma += ni * (1/sigma1-0.513)**i * (sigma2-0.524)**j return 2800*exp(suma)
Define the saturated line boundary between Region 4 and 2a-2b, h=f(s) Parameters ---------- s : float Specific entropy, [kJ/kgK] Returns ------- h : float Specific enthalpy, [kJ/kg] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 5.85 ≤ s ≤ s"(273.15K) References ---------- IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for Region 3, Equations as a Function of h and s for the Region Boundaries, and an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 5 Examples -------- >>> _h2ab_s(7) 2723.729985 >>> _h2ab_s(9) 2511.861477
def validate_schema(cls, tx): """Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER` transaction, all the validations for `CREATE` transaction should be inherited """ _validate_schema(TX_SCHEMA_COMMON, tx) _validate_schema(TX_SCHEMA_TRANSFER, tx) _validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER` transaction, all the validations for `CREATE` transaction should be inherited
def split_data(self, train_images, train_labels): """ :param train_images: numpy array (image_dim, image_dim, num_images) :param train_labels: numpy array (labels) :return: train_images, train_labels, valid_images, valid_labels """ valid_images = train_images[:self.num_valid_images] valid_labels = train_labels[:self.num_valid_images] train_images = train_images[self.num_valid_images:] train_labels = train_labels[self.num_valid_images:] return train_images, train_labels, valid_images, valid_labels
:param train_images: numpy array (image_dim, image_dim, num_images) :param train_labels: numpy array (labels) :return: train_images, train_labels, valid_images, valid_labels
def extract_xyz_matrix_from_loop_json(pdb_lines, parsed_loop_json_contents, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, allow_overlaps = False, include_all_columns = False): '''A utility wrapper to extract_xyz_matrix_from_pdb_residue_range. This accepts PDB file lines and a loop.json file (a defined Rosetta format) and returns a pandas dataframe of the X, Y, Z coordinates for the requested atom types for all residues in all loops defined by the loop.json file. The dataframe is indexed by a string identifying the PDB residue and atom type. loop_json_contents should be a Python dict read in from a loop.json file e.g. json.loads(file_contents).''' # Create one dataframe per loop segment dataframes = [] for loop_set in parsed_loop_json_contents['LoopSet']: start_pdb_residue_id = PDB.ChainResidueID2String(loop_set['start']['chainID'], str(loop_set['start']['resSeq']) + loop_set['start']['iCode']) stop_pdb_residue_id = PDB.ChainResidueID2String(loop_set['stop']['chainID'], str(loop_set['stop']['resSeq']) + loop_set['stop']['iCode']) dataframes.append(PDB.extract_xyz_matrix_from_pdb_residue_range(pdb_lines, start_pdb_residue_id = start_pdb_residue_id, stop_pdb_residue_id = stop_pdb_residue_id, atoms_of_interest = atoms_of_interest, expected_num_residues = None, expected_num_residue_atoms = expected_num_residue_atoms, include_all_columns = include_all_columns)) # Concatenate the dataframes dataframe = pandas.concat(dataframes, verify_integrity = (allow_overlaps == False)) # note: the pandas documentation notes that verify_integrity is relatively expensive if expected_num_residues != None and expected_num_residue_atoms != None: assert(dataframe.shape[0] == expected_num_residues * expected_num_residue_atoms) return dataframe
A utility wrapper to extract_xyz_matrix_from_pdb_residue_range. This accepts PDB file lines and a loop.json file (a defined Rosetta format) and returns a pandas dataframe of the X, Y, Z coordinates for the requested atom types for all residues in all loops defined by the loop.json file. The dataframe is indexed by a string identifying the PDB residue and atom type. loop_json_contents should be a Python dict read in from a loop.json file e.g. json.loads(file_contents).
def parse_timedelta(text): """Robustly parses a short text description of a time period into a :class:`datetime.timedelta`. Supports weeks, days, hours, minutes, and seconds, with or without decimal points: Args: text (str): Text to parse. Returns: datetime.timedelta Raises: ValueError: on parse failure. >>> parse_td('1d 2h 3.5m 0s') datetime.timedelta(1, 7410) Also supports full words and whitespace. >>> parse_td('2 weeks 1 day') datetime.timedelta(15) Negative times are supported, too: >>> parse_td('-1.5 weeks 3m 20s') datetime.timedelta(-11, 43400) """ td_kwargs = {} for match in _PARSE_TD_RE.finditer(text): value, unit = match.group('value'), match.group('unit') try: unit_key = _PARSE_TD_KW_MAP[unit] except KeyError: raise ValueError('invalid time unit %r, expected one of %r' % (unit, _PARSE_TD_KW_MAP.keys())) try: value = float(value) except ValueError: raise ValueError('invalid time value for unit %r: %r' % (unit, value)) td_kwargs[unit_key] = value return timedelta(**td_kwargs)
Robustly parses a short text description of a time period into a :class:`datetime.timedelta`. Supports weeks, days, hours, minutes, and seconds, with or without decimal points: Args: text (str): Text to parse. Returns: datetime.timedelta Raises: ValueError: on parse failure. >>> parse_td('1d 2h 3.5m 0s') datetime.timedelta(1, 7410) Also supports full words and whitespace. >>> parse_td('2 weeks 1 day') datetime.timedelta(15) Negative times are supported, too: >>> parse_td('-1.5 weeks 3m 20s') datetime.timedelta(-11, 43400)
def read_block(self, block): """Read an 8-byte data block at address (block * 8). """ if block < 0 or block > 255: raise ValueError("invalid block number") log.debug("read block {0}".format(block)) cmd = "\x02" + chr(block) + 8 * chr(0) + self.uid return self.transceive(cmd)[1:9]
Read an 8-byte data block at address (block * 8).
def to_array(self): """ Serializes this MessageEntity to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(MessageEntity, self).to_array() array['type'] = u(self.type) # py2: type unicode, py3: type str array['offset'] = int(self.offset) # type int array['length'] = int(self.length) # type int if self.url is not None: array['url'] = u(self.url) # py2: type unicode, py3: type str if self.user is not None: array['user'] = self.user.to_array() # type User return array
Serializes this MessageEntity to a dictionary. :return: dictionary representation of this object. :rtype: dict
def _init_sub_dsp(self, dsp, fringe, outputs, no_call, initial_dist, index, full_name): """ Initialize the dispatcher as sub-dispatcher and update the fringe. :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param outputs: Ending data nodes. :type outputs: list[str], iterable :param no_call: If True data node estimation function is not used. :type no_call: bool """ # Initialize as sub-dispatcher. sol = self.__class__( dsp, {}, outputs, False, None, None, no_call, False, wait_in=self._wait_in.get(dsp, None), index=self.index + index, full_name=full_name ) sol.sub_sol = self.sub_sol for f in sol.fringe: # Update the fringe. item = (initial_dist + f[0], (2,) + f[1][1:], f[-1]) heapq.heappush(fringe, item) return sol
Initialize the dispatcher as sub-dispatcher and update the fringe. :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param outputs: Ending data nodes. :type outputs: list[str], iterable :param no_call: If True data node estimation function is not used. :type no_call: bool
def _handle_timeout(self) -> None: """Called by IOLoop when the requested timeout has passed.""" self._timeout = None while True: try: ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout)
Called by IOLoop when the requested timeout has passed.
def exists(self, client=None): """API call: test for the existence of the taskqueue via a GET request See https://cloud.google.com/appengine/docs/python/taskqueue/rest/taskqueues/get :type client: :class:`taskqueue.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current taskqueue. """ client = self._require_client(client) try: response = client.connection.api_request(method='GET', path=self.path) except NotFound: return False else: # projectname gets prefixed, this retrieves correct path with prefixed project name # see https://code.google.com/p/googleappengine/issues/detail?id=10199 if os.path.split(response.get("id"))[-1] == self.id: self.full_name = response.get("id") return True else: return False
API call: test for the existence of the taskqueue via a GET request See https://cloud.google.com/appengine/docs/python/taskqueue/rest/taskqueues/get :type client: :class:`taskqueue.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current taskqueue.
def host(self): '''Return the host committee. ''' _id = None for participant in self['participants']: if participant['type'] == 'host': if set(['participant_type', 'id']) < set(participant): # This event uses the id keyname "id". if participant['participant_type'] == 'committee': _id = participant['id'] if _id is None: continue return self.committees_dict.get(_id) else: return participant['participant']
Return the host committee.
def output(self, value): """ Sets the client's output (on, off, int) Sets the general purpose output on some display modules to this value. Use on to set all outputs to high state, and off to set all to low state. The meaning of the integer value depends on your specific device, usually it is a bit pattern describing the state of each output line. Return None or LCDd response on error """ response = self.request(("output %s" % (value)).encode()) if "success" in response: return None else: return response
Sets the client's output (on, off, int) Sets the general purpose output on some display modules to this value. Use on to set all outputs to high state, and off to set all to low state. The meaning of the integer value depends on your specific device, usually it is a bit pattern describing the state of each output line. Return None or LCDd response on error
def get_clone(rec): """ >>> get_clone("Medicago truncatula chromosome 2 clone mth2-48e18") ('2', 'mth2-48e18') """ s = rec.description chr = re.search(chr_pat, s) clone = re.search(clone_pat, s) chr = chr.group(1) if chr else "" clone = clone.group(1) if clone else "" return chr, clone
>>> get_clone("Medicago truncatula chromosome 2 clone mth2-48e18") ('2', 'mth2-48e18')
def clear_relation(cls): """ Clear relation properties for reference Model, such as OneToOne, Reference, ManyToMany """ for k, v in cls.properties.items(): if isinstance(v, ReferenceProperty): if hasattr(v, 'collection_name') and hasattr(v.reference_class, v.collection_name): delattr(v.reference_class, v.collection_name) if isinstance(v, OneToOne): #append to reference_class._onetoone del v.reference_class._onetoone[v.collection_name]
Clear relation properties for reference Model, such as OneToOne, Reference, ManyToMany
def __getNumberOfFollowers(self, web): """Scrap the number of followers from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ counters = web.find_all('span', {'class': 'Counter'}) try: if 'k' not in counters[2].text: self.followers = int(counters[2].text) else: follText = counters[2].text.replace(" ", "") follText = follText.replace("\n", "").replace("k", "") if follText and len(follText) > 1: self.followers = int(follText.split(".")[0])*1000 + \ int(follText.split(".")[1]) * 100 elif follText: self.followers = int(follText.split(".")[0])*1000 except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error)
Scrap the number of followers from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node.
def to_dict(self): """Render a MessageElement as python dict :return: Python dict representation :rtype: dict """ obj_dict = super(Cell, self).to_dict() child_dict = { 'type': self.__class__.__name__, 'header_flag': self.header_flag, 'align': self.align, 'wrap_slash': self.wrap_slash, 'content': self.content.to_dict() } obj_dict.update(child_dict) return obj_dict
Render a MessageElement as python dict :return: Python dict representation :rtype: dict
def ensure_parent_directory(path, ensure_parent=True): """ Ensures the parent directory exists. :param string path: the path of the file :param bool ensure_parent: if ``True``, ensure the parent directory of ``path`` exists; if ``False``, ensure ``path`` exists :raises: OSError: if the path cannot be created """ parent_directory = os.path.abspath(path) if ensure_parent: parent_directory = os.path.dirname(parent_directory) if not os.path.exists(parent_directory): try: os.makedirs(parent_directory) except (IOError, OSError): raise OSError(u"Directory '%s' cannot be created" % parent_directory)
Ensures the parent directory exists. :param string path: the path of the file :param bool ensure_parent: if ``True``, ensure the parent directory of ``path`` exists; if ``False``, ensure ``path`` exists :raises: OSError: if the path cannot be created
def delete(self, **kw): """ Delete a policy route from the engine. You can delete using a single field or multiple fields for a more exact match. Use a keyword argument to delete a route by any valid attribute. :param kw: use valid Route keyword values to delete by exact match """ delete_by = [] for field, val in kw.items(): if val is not None: delete_by.append(field) self.items[:] = [route for route in self.items if not all(route.get(field) == kw.get(field) for field in delete_by)]
Delete a policy route from the engine. You can delete using a single field or multiple fields for a more exact match. Use a keyword argument to delete a route by any valid attribute. :param kw: use valid Route keyword values to delete by exact match
def create_invoice_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesInvoice url_reverse = 'CDNX_invoicing_invoicesaless_list' # type_doc msg_error_relation = _("Hay lineas asignadas a facturas") msg_error_not_found = _('Sales albaran not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False) """ context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=False ) if new_list_lines: new_pk = new_list_lines.first() if new_pk: context = SalesLines.create_invoice_from_order( new_pk.order.pk, [x['pk'] for x in new_list_lines.values('pk')]) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context """
context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=False ) if new_list_lines: new_pk = new_list_lines.first() if new_pk: context = SalesLines.create_invoice_from_order( new_pk.order.pk, [x['pk'] for x in new_list_lines.values('pk')]) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context
def traveling_salesman_qubo(G, lagrange=2, weight='weight'): """Return the QUBO with ground states corresponding to a minimum TSP route. If :math:`|G|` is the number of nodes in the graph, the resulting qubo will have: * :math:`|G|^2` variables/nodes * :math:`2 |G|^2 (|G| - 1)` interactions/edges Parameters ---------- G : NetworkX graph A complete graph in which each edge has a attribute giving its weight. lagrange : number, optional (default 2) Lagrange parameter to weight constraints (no edges within set) versus objective (largest set possible). weight : optional (default 'weight') The name of the edge attribute containing the weight. Returns ------- QUBO : dict The QUBO with ground states corresponding to a minimum travelling salesperson route. """ N = G.number_of_nodes() # some input checking if N in (1, 2) or len(G.edges) != N*(N-1)//2: msg = "graph must be a complete graph with at least 3 nodes or empty" raise ValueError(msg) # Creating the QUBO Q = defaultdict(float) # Constraint that each row has exactly one 1 for node in G: for pos_1 in range(N): Q[((node, pos_1), (node, pos_1))] -= lagrange for pos_2 in range(pos_1+1, N): Q[((node, pos_1), (node, pos_2))] += 2.0*lagrange # Constraint that each col has exactly one 1 for pos in range(N): for node_1 in G: Q[((node_1, pos), (node_1, pos))] -= lagrange for node_2 in set(G)-{node_1}: Q[((node_1, pos), (node_2, pos))] += 2.0*lagrange # Objective that minimizes distance for u, v in itertools.combinations(G.nodes, 2): for pos in range(N): nextpos = (pos + 1) % N # going from u -> v Q[((u, pos), (v, nextpos))] += G[u][v][weight] # going from v -> u Q[((v, pos), (u, nextpos))] += G[u][v][weight] return Q
Return the QUBO with ground states corresponding to a minimum TSP route. If :math:`|G|` is the number of nodes in the graph, the resulting qubo will have: * :math:`|G|^2` variables/nodes * :math:`2 |G|^2 (|G| - 1)` interactions/edges Parameters ---------- G : NetworkX graph A complete graph in which each edge has a attribute giving its weight. lagrange : number, optional (default 2) Lagrange parameter to weight constraints (no edges within set) versus objective (largest set possible). weight : optional (default 'weight') The name of the edge attribute containing the weight. Returns ------- QUBO : dict The QUBO with ground states corresponding to a minimum travelling salesperson route.
def match(self, props=None, rng=None, offset=None): """ Provide any of the args and match or dont. :param props: Should be a subset of my props. :param rng: Exactly match my range. :param offset: I start after this offset. :returns: True if all the provided predicates match or are None """ if rng: s, e = rng else: e = s = None return ((e is None or self.end == e) and (s is None or self.start == s)) and \ (props is None or props.issubset(self.props)) and \ (offset is None or self.start >= offset)
Provide any of the args and match or dont. :param props: Should be a subset of my props. :param rng: Exactly match my range. :param offset: I start after this offset. :returns: True if all the provided predicates match or are None
def all_pages(method, request, accessor, cond=None): """Helper to process all pages using botocore service methods (exhausts NextToken). note: `cond` is optional... you can use it to make filtering more explicit if you like. Alternatively you can do the filtering in the `accessor` which is perfectly fine, too Note: lambda uses a slightly different mechanism so there is a specific version in ramuda_utils. :param method: service method :param request: request dictionary for service call :param accessor: function to extract data from each response :param cond: filter function to return True / False based on a response :return: list of collected resources """ if cond is None: cond = lambda x: True result = [] next_token = None while True: if next_token: request['nextToken'] = next_token response = method(**request) if cond(response): data = accessor(response) if data: if isinstance(data, list): result.extend(data) else: result.append(data) if 'nextToken' not in response: break next_token = response['nextToken'] return result
Helper to process all pages using botocore service methods (exhausts NextToken). note: `cond` is optional... you can use it to make filtering more explicit if you like. Alternatively you can do the filtering in the `accessor` which is perfectly fine, too Note: lambda uses a slightly different mechanism so there is a specific version in ramuda_utils. :param method: service method :param request: request dictionary for service call :param accessor: function to extract data from each response :param cond: filter function to return True / False based on a response :return: list of collected resources
def wait_until_title_contains(self, partial_title, timeout=None): """ Waits for title to contain <partial_title> @type partial_title: str @param partial_title: the partial title to locate @type timeout: int @param timeout: the maximum number of seconds the driver will wait before timing out @rtype: webdriverwrapper.WebElementWrapper @return: Returns the element found """ timeout = timeout if timeout is not None else self.timeout def wait(): ''' Wait function passed to executor ''' return WebDriverWait(self.driver, timeout).until(EC.title_contains(partial_title)) return self.execute_and_handle_webdriver_exceptions( wait, timeout, partial_title, 'Timeout waiting for title to contain: ' + str(partial_title))
Waits for title to contain <partial_title> @type partial_title: str @param partial_title: the partial title to locate @type timeout: int @param timeout: the maximum number of seconds the driver will wait before timing out @rtype: webdriverwrapper.WebElementWrapper @return: Returns the element found
def TriToBin(self, x, y, z): ''' Turn an x-y-z triangular coord to an a-b coord. if z is negative, calc with its abs then return (a, -b). :param x,y,z: the three numbers of the triangular coord :type x,y,z: float or double are both OK, just numbers :return: the corresponding a-b coord :rtype: a tuple consist of a and b ''' if (z >= 0): if (x + y + z == 0): return (0, 0) else: Sum = x + y + z X = 100.0 * x / Sum Y = 100.0 * y / Sum Z = 100.0 * z / Sum if (X + Y != 0): a = Z / 2.0 + (100.0 - Z) * Y / (Y + X) else: a = Z / 2.0 b = Z / 2.0 * (np.sqrt(3)) return (a, b) else: z = abs(z) if (x + y + z == 0): return (0, 0) else: Sum = x + y + z X = 100.0 * x / Sum Y = 100.0 * y / Sum Z = 100.0 * z / Sum if (X + Y != 0): a = Z / 2.0 + (100.0 - Z) * Y / (Y + X) else: a = Z / 2.0 b = Z / 2.0 * (np.sqrt(3)) return (a, -b)
Turn an x-y-z triangular coord to an a-b coord. if z is negative, calc with its abs then return (a, -b). :param x,y,z: the three numbers of the triangular coord :type x,y,z: float or double are both OK, just numbers :return: the corresponding a-b coord :rtype: a tuple consist of a and b
def tags(self, value): # pylint: disable-msg=E0102 """Set the tags in the configuraton (setter)""" if not isinstance(value, list): raise TypeError self._config['tags'] = value
Set the tags in the configuraton (setter)
def LaplaceCentreWeight(self): """Centre weighting matrix for TV Laplacian.""" sz = [1,] * self.S.ndim for ax in self.axes: sz[ax] = self.S.shape[ax] lcw = 2*len(self.axes)*np.ones(sz, dtype=self.dtype) for ax in self.axes: lcw[(slice(None),)*ax + ([0, -1],)] -= 1.0 return lcw
Centre weighting matrix for TV Laplacian.
def rpc_get_namespace_cost( self, namespace_id, **con_info ): """ Return the cost of a given namespace, including fees. Returns {'amount': ..., 'units': ...} """ if not check_namespace(namespace_id): return {'error': 'Invalid namespace', 'http_status': 400} db = get_db_state(self.working_dir) res = get_namespace_cost( db, namespace_id ) db.close() units = res['units'] amount = res['amount'] ns = res['namespace'] if amount is None: # invalid return {'error': 'Invalid namespace', 'http_status': 404} ret = { 'units': units, 'amount': amount, } if ns is not None: ret['warning'] = 'Namespace already exists' return self.success_response( ret )
Return the cost of a given namespace, including fees. Returns {'amount': ..., 'units': ...}
def get_attached_cdroms(self, datacenter_id, server_id, depth=1): """ Retrieves a list of CDROMs attached to the server. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s/servers/%s/cdroms?depth=%s' % ( datacenter_id, server_id, str(depth))) return response
Retrieves a list of CDROMs attached to the server. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int``
def r_squared(model, fit_result, data): """ Calculates the coefficient of determination, R^2, for the fit. (Is not defined properly for vector valued functions.) :param model: Model instance :param fit_result: FitResults instance :param data: data with which the fit was performed. """ # First filter out the dependent vars y_is = [data[var] for var in model if var in data] x_is = [value for var, value in data.items() if var.name in model.__signature__.parameters] y_bars = [np.mean(y_i) if y_i is not None else None for y_i in y_is] f_is = model(*x_is, **fit_result.params) SS_res = np.sum([np.sum((y_i - f_i)**2) for y_i, f_i in zip(y_is, f_is) if y_i is not None]) SS_tot = np.sum([np.sum((y_i - y_bar)**2) for y_i, y_bar in zip(y_is, y_bars) if y_i is not None]) return 1 - SS_res/SS_tot
Calculates the coefficient of determination, R^2, for the fit. (Is not defined properly for vector valued functions.) :param model: Model instance :param fit_result: FitResults instance :param data: data with which the fit was performed.
def get_fn(name): """Get the full path to one of the reference files shipped for utils. In the source distribution, these files are in ``mbuild/utils/reference``, but on installation, they're moved to somewhere in the user's python site-packages directory. Parameters ---------- name : str Name of the file to load (with respect to the reference/ folder). """ fn = resource_filename('mbuild', os.path.join('utils', 'reference', name)) if not os.path.exists(fn): raise IOError('Sorry! {} does not exists.'.format(fn)) return fn
Get the full path to one of the reference files shipped for utils. In the source distribution, these files are in ``mbuild/utils/reference``, but on installation, they're moved to somewhere in the user's python site-packages directory. Parameters ---------- name : str Name of the file to load (with respect to the reference/ folder).
def back_tick(cmd, ret_err=False, as_str=True, raise_err=None): """ Run command `cmd`, return stdout, or stdout, stderr if `ret_err` Roughly equivalent to ``check_output`` in Python 2.7 Parameters ---------- cmd : sequence command to execute ret_err : bool, optional If True, return stderr in addition to stdout. If False, just return stdout as_str : bool, optional Whether to decode outputs to unicode string on exit. raise_err : None or bool, optional If True, raise RuntimeError for non-zero return code. If None, set to True when `ret_err` is False, False if `ret_err` is True Returns ------- out : str or tuple If `ret_err` is False, return stripped string containing stdout from `cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where ``stdout`` is the stripped stdout, and ``stderr`` is the stripped stderr. Raises ------ Raises RuntimeError if command returns non-zero exit code and `raise_err` is True """ if raise_err is None: raise_err = False if ret_err else True cmd_is_seq = isinstance(cmd, (list, tuple)) proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq) out, err = proc.communicate() retcode = proc.returncode cmd_str = ' '.join(cmd) if cmd_is_seq else cmd if retcode is None: proc.terminate() raise RuntimeError(cmd_str + ' process did not terminate') if raise_err and retcode != 0: raise RuntimeError('{0} returned code {1} with error {2}'.format( cmd_str, retcode, err.decode('latin-1'))) out = out.strip() if as_str: out = out.decode('latin-1') if not ret_err: return out err = err.strip() if as_str: err = err.decode('latin-1') return out, err
Run command `cmd`, return stdout, or stdout, stderr if `ret_err` Roughly equivalent to ``check_output`` in Python 2.7 Parameters ---------- cmd : sequence command to execute ret_err : bool, optional If True, return stderr in addition to stdout. If False, just return stdout as_str : bool, optional Whether to decode outputs to unicode string on exit. raise_err : None or bool, optional If True, raise RuntimeError for non-zero return code. If None, set to True when `ret_err` is False, False if `ret_err` is True Returns ------- out : str or tuple If `ret_err` is False, return stripped string containing stdout from `cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where ``stdout`` is the stripped stdout, and ``stderr`` is the stripped stderr. Raises ------ Raises RuntimeError if command returns non-zero exit code and `raise_err` is True
def quantileclip(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None, weights=None, fclip=0.10): """Combine arrays using the sigma-clipping, with masks. Inputs and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param fclip: fraction of points removed on both ends. Maximum is 0.4 (80% of points rejected) :return: mean, variance of the mean and number of points stored """ return generic_combine(intl_combine.quantileclip_method(fclip), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales, weights=weights)
Combine arrays using the sigma-clipping, with masks. Inputs and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the mean, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :param fclip: fraction of points removed on both ends. Maximum is 0.4 (80% of points rejected) :return: mean, variance of the mean and number of points stored
def get_build_report(self, project, build_id, type=None): """GetBuildReport. [Preview API] Gets a build report. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str type: :rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') response = self._send(http_method='GET', location_id='45bcaa88-67e1-4042-a035-56d3b4a7d44c', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('BuildReportMetadata', response)
GetBuildReport. [Preview API] Gets a build report. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str type: :rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>`
def plot_ants_plane(off_screen=False, notebook=None): """ Demonstrate how to create a plot class to plot multiple meshes while adding scalars and text. Plot two ants and airplane """ # load and shrink airplane airplane = vtki.PolyData(planefile) airplane.points /= 10 # pts = airplane.points # gets pointer to array # pts /= 10 # shrink # rotate and translate ant so it is on the plane ant = vtki.PolyData(antfile) ant.rotate_x(90) ant.translate([90, 60, 15]) # Make a copy and add another ant ant_copy = ant.copy() ant_copy.translate([30, 0, -10]) # Create plotting object plotter = vtki.Plotter(off_screen=off_screen, notebook=notebook) plotter.add_mesh(ant, 'r') plotter.add_mesh(ant_copy, 'b') # Add airplane mesh and make the color equal to the Y position plane_scalars = airplane.points[:, 1] plotter.add_mesh(airplane, scalars=plane_scalars, stitle='Plane Y\nLocation') plotter.add_text('Ants and Plane Example') plotter.plot()
Demonstrate how to create a plot class to plot multiple meshes while adding scalars and text. Plot two ants and airplane
def getSlicesForText(self, body, getFingerprint=None, startIndex=0, maxResults=10): """Get a list of slices of the text Args: body, str: The text to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) Returns: list of Text Raises: CorticalioException: if the request was not successful """ return self._text.getSlicesForText(self._retina, body, getFingerprint, startIndex, maxResults)
Get a list of slices of the text Args: body, str: The text to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) Returns: list of Text Raises: CorticalioException: if the request was not successful
def consume_gas(self, amount: int, reason: str) -> None: """ Consume ``amount`` of gas from the remaining gas. Raise `eth.exceptions.OutOfGas` if there is not enough gas remaining. """ return self._gas_meter.consume_gas(amount, reason)
Consume ``amount`` of gas from the remaining gas. Raise `eth.exceptions.OutOfGas` if there is not enough gas remaining.
def get(self, *, kind: Type=None, tag: Hashable=None, **kwargs) -> Iterator: """ Get an iterator of GameObjects by kind or tag. kind: Any type. Pass to get a subset of contained GameObjects with the given type. tag: Any Hashable object. Pass to get a subset of contained GameObjects with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: scene.get(type=MyGameObject) scene.get(tag="red") scene.get(type=MyGameObject, tag="red") """ return self.game_objects.get(kind=kind, tag=tag, **kwargs)
Get an iterator of GameObjects by kind or tag. kind: Any type. Pass to get a subset of contained GameObjects with the given type. tag: Any Hashable object. Pass to get a subset of contained GameObjects with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: scene.get(type=MyGameObject) scene.get(tag="red") scene.get(type=MyGameObject, tag="red")
def todict(self): """Returns a dictionary fully representing the state of this object """ return {'index': self.index, 'seed': hb_encode(self.seed), 'n': self.n, 'root': hb_encode(self.root), 'hmac': hb_encode(self.hmac), 'timestamp': self.timestamp}
Returns a dictionary fully representing the state of this object
def create_nation_fixtures(self): """ Create national US and State Map """ SHP_SLUG = "cb_{}_us_state_500k".format(self.YEAR) DOWNLOAD_PATH = os.path.join(self.DOWNLOAD_DIRECTORY, SHP_SLUG) shape = shapefile.Reader( os.path.join(DOWNLOAD_PATH, "{}.shp".format(SHP_SLUG)) ) fields = shape.fields[1:] field_names = [f[0] for f in fields] features = [] for shp in shape.shapeRecords(): state = dict(zip(field_names, shp.record)) geodata = { "type": "Feature", "geometry": shp.shape.__geo_interface__, "properties": { "state": state["STATEFP"], "name": state["NAME"], }, } features.append(geodata) Geometry.objects.update_or_create( division=self.NATION, subdivision_level=self.STATE_LEVEL, simplification=self.THRESHOLDS["nation"], source=os.path.join( self.SHP_SOURCE_BASE.format(self.YEAR), SHP_SLUG ) + ".zip", series=self.YEAR, defaults={ "topojson": self.toposimplify( geojson.FeatureCollection(features), self.THRESHOLDS["nation"], ) }, ) geo, created = Geometry.objects.update_or_create( division=self.NATION, subdivision_level=self.COUNTY_LEVEL, simplification=self.THRESHOLDS["nation"], source=os.path.join( self.SHP_SOURCE_BASE.format(self.YEAR), SHP_SLUG ) + ".zip", series=self.YEAR, defaults={"topojson": self.get_state_county_shps("00")}, ) tqdm.write("Nation\n") tqdm.write( self.TQDM_PREFIX + "> FIPS {} @ ~{}kb ".format( "00", round(len(json.dumps(geo.topojson)) / 1000) ) ) tqdm.write(self.style.SUCCESS("Done.\n"))
Create national US and State Map
def adjustTitleFont(self): """ Adjusts the font used for the title based on the current with and \ display name. """ left, top, right, bottom = self.contentsMargins() r = self.roundingRadius() # include text padding left += 5 + r / 2 top += 5 + r / 2 right += 5 + r / 2 bottom += 5 + r / 2 r = self.rect() rect_l = r.left() + left rect_r = r.right() - right rect_t = r.top() + top rect_b = r.bottom() - bottom # ensure we have a valid rect rect = QRect(rect_l, rect_t, rect_r - rect_l, rect_b - rect_t) if rect.width() < 10: return font = XFont(QApplication.font()) font.adaptSize(self.displayName(), rect, wordWrap=self.wordWrap()) self._titleFont = font
Adjusts the font used for the title based on the current with and \ display name.
def get_nlp_base(self): ''' getter ''' if isinstance(self.__nlp_base, NlpBase) is False: raise TypeError("The type of self.__nlp_base must be NlpBase.") return self.__nlp_base
getter
def http_basic(r, username, password): """Attaches HTTP Basic Authentication to the given Request object. Arguments should be considered non-positional. """ username = str(username) password = str(password) auth_s = b64encode('%s:%s' % (username, password)) r.headers['Authorization'] = ('Basic %s' % auth_s) return r
Attaches HTTP Basic Authentication to the given Request object. Arguments should be considered non-positional.
def _process_output(res, parse_json=True): """Process output.""" res_payload = res.payload.decode('utf-8') output = res_payload.strip() _LOGGER.debug('Status: %s, Received: %s', res.code, output) if not output: return None if not res.code.is_successful(): if 128 <= res.code < 160: raise ClientError(output) elif 160 <= res.code < 192: raise ServerError(output) if not parse_json: return output return json.loads(output)
Process output.
def message(request, socket, context, message): """ Event handler for a room receiving a message. First validates a joining user's name and sends them the list of users. """ room = get_object_or_404(ChatRoom, id=message["room"]) if message["action"] == "start": name = strip_tags(message["name"]) user, created = room.users.get_or_create(name=name) if not created: socket.send({"action": "in-use"}) else: context["user"] = user users = [u.name for u in room.users.exclude(id=user.id)] socket.send({"action": "started", "users": users}) user.session = socket.session.session_id user.save() joined = {"action": "join", "name": user.name, "id": user.id} socket.send_and_broadcast_channel(joined) else: try: user = context["user"] except KeyError: return if message["action"] == "message": message["message"] = strip_tags(message["message"]) message["name"] = user.name socket.send_and_broadcast_channel(message)
Event handler for a room receiving a message. First validates a joining user's name and sends them the list of users.
def reset(self): """ Reset the timeout. Starts a new timer. """ self.counter += 1 local_counter = self.counter def timer_timeout(): if self.counter == local_counter and self.running: self.callback() self.loop.call_later(self.timeout, timer_timeout)
Reset the timeout. Starts a new timer.
def array(self, envelope=()): """Returns an NDArray, optionally subset by spatial envelope. Keyword args: envelope -- coordinate extent tuple or Envelope """ args = () if envelope: args = self.get_offset(envelope) return self.ds.ReadAsArray(*args)
Returns an NDArray, optionally subset by spatial envelope. Keyword args: envelope -- coordinate extent tuple or Envelope
def refresh_db(cache_valid_time=0, failhard=False, **kwargs): ''' Updates the APT database to latest packages based upon repositories Returns a dict, with the keys being package databases and the values being the result of the update attempt. Values can be one of the following: - ``True``: Database updated successfully - ``False``: Problem updating database - ``None``: Database already up-to-date cache_valid_time .. versionadded:: 2016.11.0 Skip refreshing the package database if refresh has already occurred within <value> seconds failhard If False, return results of Err lines as ``False`` for the package database that encountered the error. If True, raise an error with a list of the package databases that encountered errors. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) failhard = salt.utils.data.is_true(failhard) ret = {} error_repos = list() if cache_valid_time: try: latest_update = os.stat(APT_LISTS_PATH).st_mtime now = time.time() log.debug("now: %s, last update time: %s, expire after: %s seconds", now, latest_update, cache_valid_time) if latest_update + cache_valid_time > now: return ret except TypeError as exp: log.warning("expected integer for cache_valid_time parameter, failed with: %s", exp) except IOError as exp: log.warning("could not stat cache directory due to: %s", exp) call = _call_apt(['apt-get', '-q', 'update'], scope=False) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += call['stderr'] raise CommandExecutionError(comment) else: out = call['stdout'] for line in out.splitlines(): cols = line.split() if not cols: continue ident = ' '.join(cols[1:]) if 'Get' in cols[0]: # Strip filesize from end of line ident = re.sub(r' \[.+B\]$', '', ident) ret[ident] = True elif 'Ign' in cols[0]: ret[ident] = False elif 'Hit' in cols[0]: ret[ident] = None elif 'Err' in cols[0]: ret[ident] = False error_repos.append(ident) if failhard and error_repos: raise CommandExecutionError('Error getting repos: {0}'.format(', '.join(error_repos))) return ret
Updates the APT database to latest packages based upon repositories Returns a dict, with the keys being package databases and the values being the result of the update attempt. Values can be one of the following: - ``True``: Database updated successfully - ``False``: Problem updating database - ``None``: Database already up-to-date cache_valid_time .. versionadded:: 2016.11.0 Skip refreshing the package database if refresh has already occurred within <value> seconds failhard If False, return results of Err lines as ``False`` for the package database that encountered the error. If True, raise an error with a list of the package databases that encountered errors. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db
def mod(self): """Modulus of vector.""" return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
Modulus of vector.
def set_energy_range(self, logemin, logemax): """Set the energy range of the analysis. Parameters ---------- logemin: float Lower end of energy range in log10(E/MeV). logemax : float Upper end of energy range in log10(E/MeV). """ if logemin is None: logemin = self.log_energies[0] if logemax is None: logemax = self.log_energies[-1] imin = int(utils.val_to_edge(self.log_energies, logemin)[0]) imax = int(utils.val_to_edge(self.log_energies, logemax)[0]) if imin - imax == 0: imin = int(len(self.log_energies) - 1) imax = int(len(self.log_energies) - 1) klims = self.like.logLike.klims() if imin != klims[0] or imax != klims[1]: self.like.selectEbounds(imin, imax) return np.array([self.log_energies[imin], self.log_energies[imax]])
Set the energy range of the analysis. Parameters ---------- logemin: float Lower end of energy range in log10(E/MeV). logemax : float Upper end of energy range in log10(E/MeV).
def extract_packing_plan(self, topology): """ Returns the representation of packing plan that will be returned from Tracker. """ packingPlan = { "id": "", "container_plans": [] } if not topology.packing_plan: return packingPlan container_plans = topology.packing_plan.container_plans containers = [] for container_plan in container_plans: instances = [] for instance_plan in container_plan.instance_plans: instance_resources = {"cpu": instance_plan.resource.cpu, "ram": instance_plan.resource.ram, "disk": instance_plan.resource.disk} instance = {"component_name" : instance_plan.component_name, "task_id" : instance_plan.task_id, "component_index": instance_plan.component_index, "instance_resources": instance_resources} instances.append(instance) required_resource = {"cpu": container_plan.requiredResource.cpu, "ram": container_plan.requiredResource.ram, "disk": container_plan.requiredResource.disk} scheduled_resource = {} if container_plan.scheduledResource: scheduled_resource = {"cpu": container_plan.scheduledResource.cpu, "ram": container_plan.scheduledResource.ram, "disk": container_plan.scheduledResource.disk} container = {"id": container_plan.id, "instances": instances, "required_resources": required_resource, "scheduled_resources": scheduled_resource} containers.append(container) packingPlan["id"] = topology.packing_plan.id packingPlan["container_plans"] = containers return json.dumps(packingPlan)
Returns the representation of packing plan that will be returned from Tracker.
def sliced_wasserstein(PD1, PD2, M=50): """ Implementation of Sliced Wasserstein distance as described in Sliced Wasserstein Kernel for Persistence Diagrams by Mathieu Carriere, Marco Cuturi, Steve Oudot (https://arxiv.org/abs/1706.03358) Parameters ----------- PD1: np.array size (m,2) Persistence diagram PD2: np.array size (n,2) Persistence diagram M: int, default is 50 Iterations to run approximation. Returns -------- sw: float Sliced Wasserstein distance between PD1 and PD2 """ diag_theta = np.array( [np.cos(0.25 * np.pi), np.sin(0.25 * np.pi)], dtype=np.float32 ) l_theta1 = [np.dot(diag_theta, x) for x in PD1] l_theta2 = [np.dot(diag_theta, x) for x in PD2] if (len(l_theta1) != PD1.shape[0]) or (len(l_theta2) != PD2.shape[0]): raise ValueError("The projected points and origin do not match") PD_delta1 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta1] PD_delta2 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta2] # i have the input now to compute the sw sw = 0 theta = 0.5 step = 1.0 / M for i in range(M): l_theta = np.array( [np.cos(theta * np.pi), np.sin(theta * np.pi)], dtype=np.float32 ) V1 = [np.dot(l_theta, x) for x in PD1] + [np.dot(l_theta, x) for x in PD_delta2] V2 = [np.dot(l_theta, x) for x in PD2] + [np.dot(l_theta, x) for x in PD_delta1] sw += step * cityblock(sorted(V1), sorted(V2)) theta += step return sw
Implementation of Sliced Wasserstein distance as described in Sliced Wasserstein Kernel for Persistence Diagrams by Mathieu Carriere, Marco Cuturi, Steve Oudot (https://arxiv.org/abs/1706.03358) Parameters ----------- PD1: np.array size (m,2) Persistence diagram PD2: np.array size (n,2) Persistence diagram M: int, default is 50 Iterations to run approximation. Returns -------- sw: float Sliced Wasserstein distance between PD1 and PD2
def infer_data_type(data_container): """ For a given container of data, infer the type of data as one of continuous, categorical, or ordinal. For now, it is a one-to-one mapping as such: - str: categorical - int: ordinal - float: continuous There may be better ways that are not currently implemented below. For example, with a list of numbers, we can check whether the number of unique entries is less than or equal to 12, but has over 10000+ entries. This would be a good candidate for floats being categorical. :param data_container: A generic container of data points. :type data_container: `iterable` """ # Defensive programming checks. # 0. Ensure that we are dealing with lists or tuples, and nothing else. assert isinstance(data_container, list) or isinstance( data_container, tuple ), "data_container should be a list or tuple." # 1. Don't want to deal with only single values. assert ( len(set(data_container)) > 1 ), "There should be more than one value in the data container." # 2. Don't want to deal with mixed data. assert is_data_homogenous( data_container ), "Data are not of a homogenous type!" # Once we check that the data type of the container is homogenous, we only # need to check the first element in the data container for its type. datum = data_container[0] # Return statements below # treat binomial data as categorical # TODO: make tests for this. if len(set(data_container)) == 2: return "categorical" elif isinstance(datum, str): return "categorical" elif isinstance(datum, int): return "ordinal" elif isinstance(datum, float): return "continuous" else: raise ValueError("Not possible to tell what the data type is.")
For a given container of data, infer the type of data as one of continuous, categorical, or ordinal. For now, it is a one-to-one mapping as such: - str: categorical - int: ordinal - float: continuous There may be better ways that are not currently implemented below. For example, with a list of numbers, we can check whether the number of unique entries is less than or equal to 12, but has over 10000+ entries. This would be a good candidate for floats being categorical. :param data_container: A generic container of data points. :type data_container: `iterable`
def true_num_genes(model, custom_spont_id=None): """Return the number of genes in a model ignoring spontaneously labeled genes. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of genes excluding spontaneous genes """ true_num = 0 for gene in model.genes: if not is_spontaneous(gene, custom_id=custom_spont_id): true_num += 1 return true_num
Return the number of genes in a model ignoring spontaneously labeled genes. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of genes excluding spontaneous genes
def poll(args): """Poll data from the sensor.""" backend = _get_backend(args) poller = MiFloraPoller(args.mac, backend) print("Getting data from Mi Flora") print("FW: {}".format(poller.firmware_version())) print("Name: {}".format(poller.name())) print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE))) print("Moisture: {}".format(poller.parameter_value(MI_MOISTURE))) print("Light: {}".format(poller.parameter_value(MI_LIGHT))) print("Conductivity: {}".format(poller.parameter_value(MI_CONDUCTIVITY))) print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
Poll data from the sensor.
def get_node_id(nuc_or_sat, namespace=None): """return the node ID of the given nucleus or satellite""" node_type = get_node_type(nuc_or_sat) if node_type == 'leaf': leaf_id = nuc_or_sat[0].leaves()[0] if namespace is not None: return '{0}:{1}'.format(namespace, leaf_id) else: return string(leaf_id) #else: node_type == 'span' span_start = nuc_or_sat[0].leaves()[0] span_end = nuc_or_sat[0].leaves()[1] if namespace is not None: return '{0}:span:{1}-{2}'.format(namespace, span_start, span_end) else: return 'span:{0}-{1}'.format(span_start, span_end)
return the node ID of the given nucleus or satellite
def valid_path(path): ''' Check if an entry in the class path exists as either a directory or a file ''' # check if the suffic of classpath suffix exists as directory if path.endswith('*'): Log.debug('Checking classpath entry suffix as directory: %s', path[:-1]) if os.path.isdir(path[:-1]): return True return False # check if the classpath entry is a directory Log.debug('Checking classpath entry as directory: %s', path) if os.path.isdir(path): return True else: # check if the classpath entry is a file Log.debug('Checking classpath entry as file: %s', path) if os.path.isfile(path): return True return False
Check if an entry in the class path exists as either a directory or a file
def build_remap_symbols(self, name_generator, children_only=None): """ The children_only flag is inapplicable, but this is included as the Scope class is defined like so. Here this simply just place the catch symbol with the next replacement available. """ replacement = name_generator(skip=(self._reserved_symbols)) self.remapped_symbols[self.catch_symbol] = next(replacement) # also to continue down the children. for child in self.children: child.build_remap_symbols(name_generator, False)
The children_only flag is inapplicable, but this is included as the Scope class is defined like so. Here this simply just place the catch symbol with the next replacement available.
def extract(self, m): """ extract info specified in option """ self._clear() self.m = m # self._preprocess() if self.option != []: self._url_filter() self._email_filter() if 'tex' in self.option: self._tex_filter() # if 'email' in self.option: # self._email_filter() if 'telephone' in self.option: self._telephone_filter() if 'QQ' in self.option: self._QQ_filter() if 'emoji' in self.option: self._emoji_filter() if 'wechat' in self.option: self._wechat_filter() self._filter() if 'blur' in self.option: self._blur = get_number(self.m, self._limit) return self._get_result()
extract info specified in option
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS) """ return 1./(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**1.5 \ -3.*R**2./(R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**2.5
NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS)
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') return self._fsapfs_file_entry.read(size=size)
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
def params(self): """ Read self params from configuration. """ parser = JinjaInterpolationNamespace() parser.read(self.configuration) return dict(parser['params'] or {})
Read self params from configuration.
def _detect_buffer_encoding(self, f): """Guess by checking BOM, and checking `_special_encode_check`, and using memory map.""" encoding = None with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as m: encoding = self._analyze_file(m) return encoding
Guess by checking BOM, and checking `_special_encode_check`, and using memory map.
def predict_is(self,h=5): """ Makes dynamic in-sample predictions with the estimated model Parameters ---------- h : int (default : 5) How many steps would you like to forecast? Returns ---------- - pd.DataFrame with predicted values """ predictions = [] for t in range(0,h): data1 = self.data_original.iloc[:-h+t,:] data2 = self.data_original.iloc[-h+t:,:] x = NDynReg(formula=self.formula, data=data1, family=self.family) x.fit(print_progress=False) if t == 0: predictions = x.predict(1,oos_data=data2) else: predictions = pd.concat([predictions,x.predict(1,oos_data=data2)]) predictions.rename(columns={0:self.data_name}, inplace=True) predictions.index = self.index[-h:] return predictions
Makes dynamic in-sample predictions with the estimated model Parameters ---------- h : int (default : 5) How many steps would you like to forecast? Returns ---------- - pd.DataFrame with predicted values
def on_batch_end(self, last_input, last_output, **kwargs): "Steps through the generators then each of the critics." self.G_A.zero_grad(); self.G_B.zero_grad() fake_A, fake_B = last_output[0].detach(), last_output[1].detach() real_A, real_B = last_input self._set_trainable(D_A=True) self.D_A.zero_grad() loss_D_A = 0.5 * (self.crit(self.D_A(real_A), True) + self.crit(self.D_A(fake_A), False)) loss_D_A.backward() self.opt_D_A.step() self._set_trainable(D_B=True) self.D_B.zero_grad() loss_D_B = 0.5 * (self.crit(self.D_B(real_B), True) + self.crit(self.D_B(fake_B), False)) loss_D_B.backward() self.opt_D_B.step() self._set_trainable() metrics = self.learn.loss_func.metrics + [loss_D_A, loss_D_B] for n,m in zip(self.names,metrics): self.smootheners[n].add_value(m)
Steps through the generators then each of the critics.
def index(self, index): """ :type index: int """ if self._index != index: self._dirty = True self._index = index
:type index: int
def removedirs_p(self): """ Like :meth:`removedirs`, but does not raise an exception if the directory is not empty or does not exist. """ with contextlib.suppress(FileExistsError, DirectoryNotEmpty): with DirectoryNotEmpty.translate(): self.removedirs() return self
Like :meth:`removedirs`, but does not raise an exception if the directory is not empty or does not exist.
def _create_hosting_devices_from_config(self): """To be called late during plugin initialization so that any hosting device specified in the config file is properly inserted in the DB. """ hd_dict = config.get_specific_config('cisco_hosting_device') attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[ ciscohostingdevicemanager.DEVICES] adm_context = bc.context.get_admin_context() for hd_uuid, kv_dict in hd_dict.items(): # ensure hd_uuid is properly formatted hd_uuid = config.uuidify(hd_uuid) try: old_hd = self.get_hosting_device(adm_context, hd_uuid) is_create = False except ciscohostingdevicemanager.HostingDeviceNotFound: old_hd = {} is_create = True kv_dict['id'] = hd_uuid kv_dict['tenant_id'] = self.l3_tenant_id() # make sure we keep using same config agent if it has been assigned kv_dict['cfg_agent_id'] = old_hd.get('cfg_agent_id') # make sure we keep using management port if it exists kv_dict['management_port_id'] = old_hd.get('management_port_id') config.verify_resource_dict(kv_dict, True, attr_info) hd = {ciscohostingdevicemanager.DEVICE: kv_dict} try: if is_create: self.create_hosting_device(adm_context, hd) else: self.update_hosting_device(adm_context, kv_dict['id'], hd) except n_exc.NeutronException: with excutils.save_and_reraise_exception(): LOG.error('Invalid hosting device specification in ' 'configuration file for device = %s', hd_uuid)
To be called late during plugin initialization so that any hosting device specified in the config file is properly inserted in the DB.
def switch_to_next_app(self): """ switches to the next app """ log.debug("switching to next app...") cmd, url = DEVICE_URLS["switch_to_next_app"] self.result = self._exec(cmd, url)
switches to the next app
def ls_mux(sel, lsls_di, ls_do): """ Multiplexes a list of input signal structures to an output structure. A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n] ls_do[0] = lsls_di[sel][0] ls_do[1] = lsls_di[sel][1] ... ls_do[n] = lsls_di[sel][n] sel - select index lsls_di - list of input signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]] ls_do - output signal structure: [sig, sig, ..., sig] """ N = len(ls_do) lsls_in = [list(x) for x in zip(*lsls_di)] return [mux(sel, lsls_in[i], ls_do[i]) for i in range(N)]
Multiplexes a list of input signal structures to an output structure. A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n] ls_do[0] = lsls_di[sel][0] ls_do[1] = lsls_di[sel][1] ... ls_do[n] = lsls_di[sel][n] sel - select index lsls_di - list of input signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]] ls_do - output signal structure: [sig, sig, ..., sig]
def render(self, data, accepted_media_type=None, renderer_context=None): """ Render `data` into JSON, returning a bytestring. """ if data is None: return bytes() renderer_context = renderer_context or {} indent = self.get_indent(accepted_media_type, renderer_context) if indent is None: separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS else: separators = INDENT_SEPARATORS ret = json.dumps( data, cls=self.encoder_class, indent=indent, ensure_ascii=self.ensure_ascii, separators=separators ) # On python 2.x json.dumps() returns bytestrings if ensure_ascii=True, # but if ensure_ascii=False, the return type is underspecified, # and may (or may not) be unicode. # On python 3.x json.dumps() returns unicode strings. if isinstance(ret, six.text_type): # We always fully escape \u2028 and \u2029 to ensure we output JSON # that is a strict javascript subset. If bytes were returned # by json.dumps() then we don't have these characters in any case. # See: http://timelessrepo.com/json-isnt-a-javascript-subset ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029') return bytes(ret.encode('utf-8')) return ret
Render `data` into JSON, returning a bytestring.