code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_cdn_log_retention(self, container): resp, resp_body = self.api.cdn_request("/%s" % utils.get_name(container), method="HEAD") return resp.headers.get("x-log-retention").lower() == "true"
Returns the status of the setting for CDN log retention for the specified container.
def haarpsi_weight_map(img1, img2, axis): r impl = 'pyfftw' if PYFFTW_AVAILABLE else 'numpy' dec_lo_lvl3 = np.repeat([np.sqrt(2), np.sqrt(2)], 4) dec_hi_lvl3 = np.repeat([-np.sqrt(2), np.sqrt(2)], 4) if axis == 0: fh_lvl3 = dec_hi_lvl3 fv_lvl3 = dec_lo_lvl3 elif axis == 1: fh_lvl3 = dec_lo_lvl3 fv_lvl3 = dec_hi_lvl3 else: raise ValueError('`axis` out of the valid range 0 -> 1') img1_lvl3 = filter_image_sep2d(img1, fh_lvl3, fv_lvl3, impl=impl) img2_lvl3 = filter_image_sep2d(img2, fh_lvl3, fv_lvl3, impl=impl) np.abs(img1_lvl3, out=img1_lvl3) np.abs(img2_lvl3, out=img2_lvl3) return np.maximum(img1_lvl3, img2_lvl3)
r"""Weighting map for directional features along an axis. Parameters ---------- img1, img2 : array-like The images to compare. They must have equal shape. axis : {0, 1} Direction in which to look for edge similarities. Returns ------- weight_map : `numpy.ndarray` The pointwise weight map. See Notes for details. Notes ----- The pointwise weight map of associated with input images :math:`f_1, f_2` and axis :math:`k` is defined as .. math:: \mathrm{W}_{f_1, f_2}^{(k)}(x) = \max \left\{ \left|g_3^{(k)} \ast f_1 \right|(x), \left|g_3^{(k)} \ast f_2 \right|(x) \right\}, see `[Rei+2016] <https://arxiv.org/abs/1607.06140>`_ equations (11) and (13). Here, :math:`g_3^{(k)}` is a Haar wavelet filter for scaling level 3 that performs high-pass filtering in axis :math:`k` and low-pass filtering in the other axes. Such a filter can be computed as :: f_lo_level1 = [np.sqrt(2), np.sqrt(2)] # low-pass Haar filter f_hi_level1 = [-np.sqrt(2), np.sqrt(2)] # high-pass Haar filter f_lo_level3 = np.repeat(f_lo_level1, 4) f_hi_level3 = np.repeat(f_hi_level1, 4) References ---------- [Rei+2016] Reisenhofer, R, Bosse, S, Kutyniok, G, and Wiegand, T. *A Haar Wavelet-Based Perceptual Similarity Index for Image Quality Assessment*. arXiv:1607.06140 [cs], Jul. 2016.
def body(self, **kwargs): text_content, html_content = None, None if self.plain: text_content = mold.cast(self.plain, **kwargs) if self.html: html_content = mold.cast(self.html, **kwargs) return text_content, html_content
Return the plain and html versions of our contents. Return: tuple Exceptions: None
def block(self, **kwargs): path = '/users/%s/block' % self.id server_data = self.manager.gitlab.http_post(path, **kwargs) if server_data is True: self._attrs['state'] = 'blocked' return server_data
Block the user. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabBlockError: If the user could not be blocked Returns: bool: Whether the user status has been changed
def getProcessor(self, request): if request.target == 'null' or not request.target: from pyamf.remoting import amf3 return amf3.RequestProcessor(self) else: from pyamf.remoting import amf0 return amf0.RequestProcessor(self)
Returns request processor. @param request: The AMF message. @type request: L{Request<remoting.Request>}
def pep423_name(name): name = name.lower() if any(i not in name for i in (VCS_LIST + SCHEME_LIST)): return name.replace("_", "-") else: return name
Normalize package name to PEP 423 style standard.
def handle_request_parsing_error(err, req, schema, error_status_code, error_headers): abort(error_status_code, errors=err.messages)
webargs error handler that uses Flask-RESTful's abort function to return a JSON error response to the client.
def _prepare_version(self): if config.VERSION not in self._config: self._config[config.VERSION] = __version__
Setup the application version
def add_section(self, section_name): self.section_headings.append(section_name) if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] return
Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method.
def get_pias_script(environ=None): if os.path.basename(sys.argv[0]) == "pias": return sys.argv[0] filepath = find_executable("pias", environ) if filepath is not None: return filepath filepath = os.path.join(os.path.dirname(__file__), "__main__.py") if os.path.exists(filepath): return filepath raise RuntimeError("Could not locate the pias script.")
Get the path to the playitagainsam command-line script.
def _add_punctuation_spacing(self, input): for replacement in punct_spacing: input = re.sub(replacement[0], replacement[1], input) return input
Adds additional spacing to punctuation characters. For example, this puts an extra space after a fullwidth full stop.
def _translate_page_into(page, language, default=None): if page.language == language: return page translations = dict((t.language, t) for t in page.available_translations()) translations[page.language] = page if language in translations: return translations[language] else: if hasattr(default, '__call__'): return default(page=page) return default
Return the translation for a given page
def oc_attr_isdefault(o): if not o._changed() and not o.default(): return True if o == o.default(): return True return False
Return wether an OC attribute has been defined or not.
def cmd(send, *_): thread_names = [] for x in sorted(threading.enumerate(), key=lambda k: k.name): res = re.match(r'Thread-(\d+$)', x.name) if res: tid = int(res.group(1)) if x._target.__name__ == '_worker': thread_names.append((tid, "%s running server thread" % x.name)) elif x._target.__module__ == 'multiprocessing.pool': thread_names.append((tid, "%s running multiprocessing pool worker thread" % x.name)) else: res = re.match(r'Thread-(\d+)', x.name) tid = 0 if res: tid = int(res.group(1)) thread_names.append((tid, x.name)) for x in sorted(thread_names, key=lambda k: k[0]): send(x[1])
Enumerate threads. Syntax: {command}
def task_class(self): from scenario_player.tasks.base import get_task_class_for_type root_task_type, _ = self.task task_class = get_task_class_for_type(root_task_type) return task_class
Return the Task class type configured for the scenario.
def wrap_prompts_class(Klass): try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations.
def Contains(self, other): if other is None: raise ValueError("other is None.") if isinstance(other, Range): if other.low >= self.low and other.high <= self.high: return True return False else: return self.Contains(Range(other, other))
Checks if the passed parameter is in the range of this object.
def send_message_batch(self, queue, messages): params = {} for i, msg in enumerate(messages): p_name = 'SendMessageBatchRequestEntry.%i.Id' % (i+1) params[p_name] = msg[0] p_name = 'SendMessageBatchRequestEntry.%i.MessageBody' % (i+1) params[p_name] = msg[1] p_name = 'SendMessageBatchRequestEntry.%i.DelaySeconds' % (i+1) params[p_name] = msg[2] return self.get_object('SendMessageBatch', params, BatchResults, queue.id, verb='POST')
Delivers up to 10 messages to a queue in a single request. :type queue: A :class:`boto.sqs.queue.Queue` object. :param queue: The Queue to which the messages will be written. :type messages: List of lists. :param messages: A list of lists or tuples. Each inner tuple represents a single message to be written and consists of and ID (string) that must be unique within the list of messages, the message body itself which can be a maximum of 64K in length, and an integer which represents the delay time (in seconds) for the message (0-900) before the message will be delivered to the queue.
def keyReleaseEvent(self, event): if self.isVisible(): qsc = get_shortcut(context='Editor', name='Go to next file') for key in qsc.split('+'): key = key.lower() if ((key == 'ctrl' and event.key() == Qt.Key_Control) or (key == 'alt' and event.key() == Qt.Key_Alt)): self.item_selected() event.accept()
Reimplement Qt method. Handle "most recent used" tab behavior, When ctrl is released and tab_switcher is visible, tab will be changed.
def info(self, req) -> ResponseInfo: r = ResponseInfo() r.version = "1.0" r.last_block_height = 0 r.last_block_app_hash = b'' return r
Since this will always respond with height=0, Tendermint will resync this app from the begining
def factorset_product(*factorsets_list): r if not all(isinstance(factorset, FactorSet) for factorset in factorsets_list): raise TypeError("Input parameters must be FactorSet instances") return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
r""" Base method used for product of factor sets. Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`. Parameters ---------- factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn All the factor sets to be multiplied Returns ------- Product of factorset in factorsets_list Examples -------- >>> from pgmpy.factors import FactorSet >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.factors import factorset_product >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> factor_set1 = FactorSet(phi1, phi2) >>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8)) >>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8)) >>> factor_set2 = FactorSet(phi3, phi4) >>> factor_set3 = factorset_product(factor_set1, factor_set2) >>> print(factor_set3) set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>, <DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>, <DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>, <DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>])
def apply_transaction(self, transaction: BaseTransaction ) -> Tuple[BaseBlock, Receipt, BaseComputation]: vm = self.get_vm(self.header) base_block = vm.block receipt, computation = vm.apply_transaction(base_block.header, transaction) header_with_receipt = vm.add_receipt_to_header(base_block.header, receipt) vm.state.persist() new_header = header_with_receipt.copy(state_root=vm.state.state_root) transactions = base_block.transactions + (transaction, ) receipts = base_block.get_receipts(self.chaindb) + (receipt, ) new_block = vm.set_block_transactions(base_block, new_header, transactions, receipts) self.header = new_block.header return new_block, receipt, computation
Applies the transaction to the current tip block. WARNING: Receipt and Transaction trie generation is computationally heavy and incurs significant performance overhead.
def reversed(self): new_cub = CubicBezier(self.end, self.control2, self.control1, self.start) if self._length_info['length']: new_cub._length_info = self._length_info new_cub._length_info['bpoints'] = ( self.end, self.control2, self.control1, self.start) return new_cub
returns a copy of the CubicBezier object with its orientation reversed.
def execute(self, conn, data_tier_name='', transaction = False, cache=None): if cache: ret=cache.get("DATA_TIERS") if not ret==None: return ret sql = self.sql binds={} if data_tier_name: op = ('=', 'like')['%' in data_tier_name] sql += "WHERE DT.DATA_TIER_NAME %s :datatier" %op binds = {"datatier":data_tier_name} result = self.dbi.processData(sql, binds, conn, transaction) plist = self.formatDict(result) return plist
returns id for a given datatier name
def _check_type_and_load_cert(self, msg, key_type, cert_type): key_types = key_type cert_types = cert_type if isinstance(key_type, string_types): key_types = [key_types] if isinstance(cert_types, string_types): cert_types = [cert_types] if msg is None: raise SSHException("Key object may not be empty") msg.rewind() type_ = msg.get_text() if type_ in key_types: pass elif type_ in cert_types: self.load_certificate(Message(msg.asbytes())) msg.get_string() else: err = "Invalid key (class: {}, data type: {}" raise SSHException(err.format(self.__class__.__name__, type_))
Perform message type-checking & optional certificate loading. This includes fast-forwarding cert ``msg`` objects past the nonce, so that the subsequent fields are the key numbers; thus the caller may expect to treat the message as key material afterwards either way. The obtained key type is returned for classes which need to know what it was (e.g. ECDSA.)
def export_stl_ascii(mesh): blob = np.zeros((len(mesh.faces), 4, 3)) blob[:, 0, :] = mesh.face_normals blob[:, 1:, :] = mesh.triangles format_string = 'facet normal {} {} {}\nouter loop\n' format_string += 'vertex {} {} {}\n' * 3 format_string += 'endloop\nendfacet\n' format_string *= len(mesh.faces) export = 'solid \n' export += format_string.format(*blob.reshape(-1)) export += 'endsolid' return export
Convert a Trimesh object into an ASCII STL file. Parameters --------- mesh : trimesh.Trimesh Returns --------- export : str Mesh represented as an ASCII STL file
def build_graph(formula): graph = {} for clause in formula: for (lit, _) in clause: for neg in [False, True]: graph[(lit, neg)] = [] for ((a_lit, a_neg), (b_lit, b_neg)) in formula: add_edge(graph, (a_lit, a_neg), (b_lit, not b_neg)) add_edge(graph, (b_lit, b_neg), (a_lit, not a_neg)) return graph
Builds the implication graph from the formula
def create_hparams_from_json(json_path, hparams=None): tf.logging.info("Loading hparams from existing json %s" % json_path) with tf.gfile.Open(json_path, "r") as f: hparams_values = json.load(f) hparams_values.pop("bottom", None) hparams_values.pop("loss", None) hparams_values.pop("name", None) hparams_values.pop("top", None) hparams_values.pop("weights_fn", None) new_hparams = hparam.HParams(**hparams_values) if hparams: for key in sorted(new_hparams.values().keys()): if hasattr(hparams, key): value = getattr(hparams, key) new_value = getattr(new_hparams, key) if value != new_value: tf.logging.info("Overwrite key %s: %s -> %s" % ( key, value, new_value)) setattr(hparams, key, new_value) else: hparams = new_hparams return hparams
Loading hparams from json; can also start from hparams if specified.
def point(self, pos): return ((1 - pos) ** 3 * self.start) + \ (3 * (1 - pos) ** 2 * pos * self.control1) + \ (3 * (1 - pos) * pos ** 2 * self.control2) + \ (pos ** 3 * self.end)
Calculate the x,y position at a certain position of the path
def expand(self, flm, nmax=None): if nmax is None: nmax = (self.lmax+1)**2 elif nmax is not None and nmax > (self.lmax+1)**2: raise ValueError( "nmax must be less than or equal to (lmax+1)**2 " + "where lmax is {:s}. Input value is {:s}" .format(repr(self.lmax), repr(nmax)) ) coeffsin = flm.to_array(normalization='4pi', csphase=1, lmax=self.lmax) return self._expand(coeffsin, nmax)
Return the Slepian expansion coefficients of the input function. Usage ----- s = x.expand(flm, [nmax]) Returns ------- s : SlepianCoeff class instance The Slepian expansion coefficients of the input function. Parameters ---------- flm : SHCoeffs class instance The input function to expand in Slepian functions. nmax : int, optional, default = (x.lmax+1)**2 The number of Slepian expansion coefficients to compute. Description ----------- The global function f is input using its spherical harmonic expansion coefficients flm. The expansion coefficients of the function f using Slepian functions g is given by f_alpha = sum_{lm}^{lmax} f_lm g(alpha)_lm
def backwards(apps, schema_editor): RecurrenceRule = apps.get_model('icekit_events', 'RecurrenceRule') descriptions = [d for d, rr in RULES] RecurrenceRule.objects.filter(description__in=descriptions).delete()
Delete initial recurrence rules.
def get_campaign(self, campaign_id): api = self._get_api(update_service.DefaultApi) return Campaign(api.update_campaign_retrieve(campaign_id))
Get existing update campaign. :param str campaign_id: Campaign ID to retrieve (Required) :return: Update campaign object matching provided ID :rtype: Campaign
def _prepare_lsm_gag(self): lsm_required_vars = (self.lsm_precip_data_var, self.lsm_precip_type) return self.lsm_input_valid and (None not in lsm_required_vars)
Determines whether to prepare gage data from LSM
def to_rgba(color, alpha): if type(color) == tuple: color, alpha = color color = color.lower() if 'rgba' in color: cl = list(eval(color.replace('rgba', ''))) if alpha: cl[3] = alpha return 'rgba' + str(tuple(cl)) elif 'rgb' in color: r, g, b = eval(color.replace('rgb', '')) return 'rgba' + str((r, g, b, alpha)) else: return to_rgba(hex_to_rgb(color), alpha)
Converts from hex|rgb to rgba Parameters: ----------- color : string Color representation on hex or rgb alpha : float Value from 0 to 1.0 that represents the alpha value. Example: to_rgba('#E1E5ED',0.6) to_rgba('#f03',0.7) to_rgba('rgb(23,23,23)',.5)
def ls(ctx, name): session = create_session(ctx.obj['AWS_PROFILE_NAME']) client = session.client('emr') results = client.list_clusters( ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING'] ) for cluster in results['Clusters']: click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State']))
List EMR instances
def _get_candidate_names(): global _name_sequence if _name_sequence is None: _once_lock.acquire() try: if _name_sequence is None: _name_sequence = _RandomNameSequence() finally: _once_lock.release() return _name_sequence
Common setup sequence for all user-callable interfaces.
def reply(self, message: typing.Union[int, types.Message]): setattr(self, 'reply_to_message_id', message.message_id if isinstance(message, types.Message) else message) return self
Reply to message :param message: :obj:`int` or :obj:`types.Message` :return: self
def infer_config_base_dir() -> Path: if 'OT_API_CONFIG_DIR' in os.environ: return Path(os.environ['OT_API_CONFIG_DIR']) elif IS_ROBOT: return Path('/data') else: search = (Path.cwd(), Path.home()/'.opentrons') for path in search: if (path/_CONFIG_FILENAME).exists(): return path else: return search[-1]
Return the directory to store data in. Defaults are ~/.opentrons if not on a pi; OT_API_CONFIG_DIR is respected here. When this module is imported, this function is called automatically and the result stored in :py:attr:`APP_DATA_DIR`. This directory may not exist when the module is imported. Even if it does exist, it may not contain data, or may require data to be moved to it. :return pathlib.Path: The path to the desired root settings dir.
def _decode_addr_key(self, obj_dict): key = b'Addr' if key in obj_dict: try: ip_addr = socket.inet_ntop(socket.AF_INET6, obj_dict[key]) if ip_addr.startswith('::ffff:'): ip_addr = ip_addr.lstrip('::ffff:') obj_dict[key] = ip_addr.encode('utf-8') except ValueError: ip_addr = socket.inet_ntop(socket.AF_INET, obj_dict[key]) obj_dict[key] = ip_addr.encode('utf-8') return obj_dict
Callback function to handle the decoding of the 'Addr' field. Serf msgpack 'Addr' as an IPv6 address, and the data needs to be unpack using socket.inet_ntop(). See: https://github.com/KushalP/serfclient-py/issues/20 :param obj_dict: A dictionary containing the msgpack map. :return: A dictionary with the correct 'Addr' format.
def _validate_signal(self, sig): if not isinstance(sig, int): raise TypeError('sig must be an int, not {!r}'.format(sig)) if signal is None: raise RuntimeError('Signals are not supported') if not (1 <= sig < signal.NSIG): raise ValueError('sig {} out of range(1, {})'.format(sig, signal.NSIG)) if sys.platform == 'win32': raise RuntimeError('Signals are not really supported on Windows')
Internal helper to validate a signal. Raise ValueError if the signal number is invalid or uncatchable. Raise RuntimeError if there is a problem setting up the handler.
def wb010(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `wb010`'.format(value)) self._wb010 = value
Corresponds to IDD Field `wb010` Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `wb010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def d2Ibr_dV2(Ybr, V, lam): nb = len(V) diaginvVm = spdiag(div(matrix(1.0, (nb, 1)), abs(V))) Haa = spdiag(mul(-(Ybr.T * lam), V)) Hva = -1j * Haa * diaginvVm Hav = Hva Hvv = spmatrix([], [], [], (nb, nb)) return Haa, Hav, Hva, Hvv
Computes 2nd derivatives of complex branch current w.r.t. voltage.
def make(parser): mgr_parser = parser.add_subparsers(dest='subcommand') mgr_parser.required = True mgr_create = mgr_parser.add_parser( 'create', help='Deploy Ceph MGR on remote host(s)' ) mgr_create.add_argument( 'mgr', metavar='HOST[:NAME]', nargs='+', type=colon_separated, help='host (and optionally the daemon name) to deploy on', ) parser.set_defaults( func=mgr, )
Ceph MGR daemon management
def getConst(name, timeout=0.1): from . import _control import time timeStamp = time.time() while True: _control.execQueue.socket.pumpInfoSocket() constants = dict(reduce( lambda x, y: x + list(y.items()), elements.values(), [] )) timeoutHappened = time.time() - timeStamp > timeout if constants.get(name) is not None or timeoutHappened: return constants.get(name) time.sleep(0.01)
Get a shared constant. :param name: The name of the shared variable to retrieve. :param timeout: The maximum time to wait in seconds for the propagation of the constant. :returns: The shared object. Usage: value = getConst('name')
def _seed(self, seed=-1): if seed != -1: self._random = np.random.RandomState(seed) else: self._random = np.random.RandomState()
Initialize the random seed
def get_components(self, uri): try: component_definition = self._components[uri] except KeyError: return False sorted_sequences = sorted(component_definition.sequence_annotations, key=attrgetter('first_location')) return [c.component for c in sorted_sequences]
Get components from a component definition in order
def report_status(self): current_status = { 'current_track': self.core.playback.current_track.get(), 'state': self.core.playback.state.get(), 'time_position': self.core.playback.time_position.get(), } send_webhook(self.config, {'status_report': current_status}) self.report_again(current_status)
Get status of player from mopidy core and send webhook.
def create_archive( self, archive_name, authority_name, archive_path, versioned, raise_on_err=True, metadata=None, user_config=None, tags=None, helper=False): archive_metadata = self._create_archive_metadata( archive_name=archive_name, authority_name=authority_name, archive_path=archive_path, versioned=versioned, raise_on_err=raise_on_err, metadata=metadata, user_config=user_config, tags=tags, helper=helper) if raise_on_err: self._create_archive( archive_name, archive_metadata) else: self._create_if_not_exists( archive_name, archive_metadata) return self.get_archive(archive_name)
Create a new data archive Returns ------- archive : object new :py:class:`~datafs.core.data_archive.DataArchive` object
def _find_last_good_run(build): run_name = request.form.get('run_name', type=str) utils.jsonify_assert(run_name, 'run_name required') last_good_release = ( models.Release.query .filter_by( build_id=build.id, status=models.Release.GOOD) .order_by(models.Release.created.desc()) .first()) last_good_run = None if last_good_release: logging.debug('Found last good release for: build_id=%r, ' 'release_name=%r, release_number=%d', build.id, last_good_release.name, last_good_release.number) last_good_run = ( models.Run.query .filter_by(release_id=last_good_release.id, name=run_name) .first()) if last_good_run: logging.debug('Found last good run for: build_id=%r, ' 'release_name=%r, release_number=%d, ' 'run_name=%r', build.id, last_good_release.name, last_good_release.number, last_good_run.name) return last_good_release, last_good_run
Finds the last good release and run for a build.
def transformFromNative(obj): if obj.value and type(obj.value[0]) == datetime.date: obj.isNative = False obj.value_param = 'DATE' obj.value = ','.join([dateToString(val) for val in obj.value]) return obj else: if obj.isNative: obj.isNative = False transformed = [] tzid = None for val in obj.value: if tzid is None and type(val) == datetime.datetime: tzid = TimezoneComponent.registerTzinfo(val.tzinfo) if tzid is not None: obj.tzid_param = tzid transformed.append(dateTimeToString(val)) obj.value = ','.join(transformed) return obj
Replace the date, datetime or period tuples in obj.value with appropriate strings.
def fax(self): if self._fax is None: from twilio.rest.fax import Fax self._fax = Fax(self) return self._fax
Access the Fax Twilio Domain :returns: Fax Twilio Domain :rtype: twilio.rest.fax.Fax
def add_user_js(self, js_list): if isinstance(js_list, string_types): js_list = [js_list] for js_path in js_list: if js_path and js_path not in self.user_js: if js_path.startswith("http:"): self.user_js.append({ 'path_url': js_path, 'contents': '', }) elif not os.path.exists(js_path): raise IOError('%s user js file not found' % (js_path,)) else: with codecs.open(js_path, encoding=self.encoding) as js_file: self.user_js.append({ 'path_url': utils.get_path_url(js_path, self.relative), 'contents': js_file.read(), })
Adds supplementary user javascript files to the presentation. The ``js_list`` arg can be either a ``list`` or a string.
def unlink_rich_menu_from_user(self, user_id, timeout=None): self._delete( '/v2/bot/user/{user_id}/richmenu'.format(user_id=user_id), timeout=timeout )
Call unlink rich menu from user API. https://developers.line.me/en/docs/messaging-api/reference/#unlink-rich-menu-from-user :param str user_id: ID of the user :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float)
def _init_itemid2name(self): if not hasattr(self.args, 'id2sym'): return None fin_id2sym = self.args.id2sym if fin_id2sym is not None and os.path.exists(fin_id2sym): id2sym = {} cmpl = re.compile(r'^\s*(\S+)[\s,;]+(\S+)') with open(fin_id2sym) as ifstrm: for line in ifstrm: mtch = cmpl.search(line) if mtch: id2sym[mtch.group(1)] = mtch.group(2) return id2sym
Print gene symbols instead of gene IDs, if provided.
def gamma_coag(ConcClay, ConcAluminum, coag, material, DiamTube, RatioHeightDiameter): return (1 - np.exp(( (-frac_vol_floc_initial(ConcAluminum, 0*u.kg/u.m**3, coag, material) * material.Diameter) / (frac_vol_floc_initial(0*u.kg/u.m**3, ConcClay, coag, material) * coag.Diameter)) * (1 / np.pi) * (ratio_area_clay_total(ConcClay, material, DiamTube, RatioHeightDiameter) / ratio_clay_sphere(RatioHeightDiameter)) ))
Return the coverage of clay with nanoglobs. This function accounts for loss to the tube flocculator walls and a poisson distribution on the clay given random hits by the nanoglobs. The poisson distribution results in the coverage only gradually approaching full coverage as coagulant dose increases. :param ConcClay: Concentration of clay in suspension :type ConcClay: float :param ConcAluminum: Concentration of aluminum in solution :type ConcAluminum: float :param coag: Type of coagulant in solution, e.g. floc_model.PACl :type coag: floc_model.Material :param material: Type of clay in suspension, e.g. floc_model.Clay :type material: floc_model.Material :param DiamTube: Diameter of flocculator tube (assumes tube flocculator for calculation of reactor surface area) :type DiamTube: float :param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter :type RatioHeightDiameter: float :return: Fraction of the clay surface area that is coated with coagulant precipitates :rtype: float
def _verify_signature(self, message, signature): if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY: actual_checksum = signature[4:12] actual_seq_num = struct.unpack("<I", signature[12:16])[0] else: actual_checksum = signature[8:12] actual_seq_num = struct.unpack("<I", signature[12:16])[0] expected_signature = calc_signature(message, self.negotiate_flags, self.incoming_signing_key, self.incoming_seq_num, self.incoming_handle) expected_checksum = expected_signature.checksum expected_seq_num = struct.unpack("<I", expected_signature.seq_num)[0] if actual_checksum != expected_checksum: raise Exception("The signature checksum does not match, message has been altered") if actual_seq_num != expected_seq_num: raise Exception("The signature sequence number does not match up, message not received in the correct sequence") self.incoming_seq_num += 1
Will verify that the signature received from the server matches up with the expected signature computed locally. Will throw an exception if they do not match @param message: The message data that is received from the server @param signature: The signature of the message received from the server
def _create_table_xml_file(self, data, fname=None): content = self._xml_pretty_print(data) if not fname: fname = self.name with open(fname+".xml", 'w') as f: f.write(content)
Creates a xml file of the table
def _is_related(parent_entry, child_entry): if parent_entry.header.mft_record == child_entry.header.base_record_ref and \ parent_entry.header.seq_number == child_entry.header.base_record_seq: return True else: return False
This function checks if a child entry is related to the parent entry. This is done by comparing the reference and sequence numbers.
def decodeMotorInput(self, motorInputPattern): key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0] motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0] return motorCommand
Decode motor command from bit vector. @param motorInputPattern (1D numpy.array) Encoded motor command. @return (1D numpy.array) Decoded motor command.
def generate_seed(seed): if seed is None: random.seed() seed = random.randint(0, sys.maxsize) random.seed(a=seed) return seed
Generate seed for random number generator
def main(filename): if filename: if not os.path.exists(filename): logger.error("'%s' doesn't exists!" % filename) sys.stderr.write("'%s' doesn't exists!\n" % filename) sys.exit(1) logger.info("Processing '%s'" % filename) for ir in process_log(sh.tail("-f", filename, _iter=True)): print ir else: logger.info("Processing stdin.") for ir in process_log(_read_stdin()): print ir
Open `filename` and start processing it line by line. If `filename` is none, process lines from `stdin`.
def buttonDown(self, button=mouse.LEFT): self._lock.acquire() mouse.press(button) self._lock.release()
Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT
def pandas_df_to_biopython_seqrecord( df, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, ): seq_records = [] for i, row in df.iterrows(): try: seq = Seq(row[sequence_col], alphabet=alphabet) id = row[id_col] description = "" if extra_data is not None: description = " ".join([row[key] for key in extra_data]) record = SeqRecord( seq=seq, id=id, description=description, ) seq_records.append(record) except TypeError: pass return seq_records
Convert pandas dataframe to biopython seqrecord for easy writing. Parameters ---------- df : Dataframe Pandas dataframe to convert id_col : str column in dataframe to use as sequence label sequence_col str: column in dataframe to use as sequence data extra_data : list extra columns to use in sequence description line alphabet : biopython Alphabet object Returns ------- seq_records : List of biopython seqrecords.
def run(self): try: if self._target: return_value = self._target(*self._args, **self._kwargs) if return_value is not None: self._exception = OrphanedReturn(self, return_value) except BaseException as err: self._exception = err finally: del self._target, self._args, self._kwargs
Modified ``run`` that captures return value and exceptions from ``target``
def init_logger(cls, log_level): logger = logging.getLogger("AutoMLBoard") handler = logging.StreamHandler() formatter = logging.Formatter("[%(levelname)s %(asctime)s] " "%(filename)s: %(lineno)d " "%(message)s") handler.setFormatter(formatter) logger.setLevel(log_level) logger.addHandler(handler) return logger
Initialize logger settings.
def to_float(self, col: str, **kwargs): try: self.df[col] = self.df[col].astype(np.float64, **kwargs) self.ok("Converted column values to float") except Exception as e: self.err(e, "Error converting to float")
Convert colums values to float :param col: name of the colum :type col: str, at least one :param \*\*kwargs: keyword arguments for ``df.astype`` :type \*\*kwargs: optional :example: ``ds.to_float("mycol1")``
def connect(self): logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port) try: self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port) self.connection.connect() except (httplib.HTTPException, socket.error) as e: raise errors.InterfaceError('Unable to connect to the specified service', e)
Opens a HTTP connection to the RPC server.
async def load_tracks(self, query) -> LoadResult: self.__check_node_ready() url = self._uri + quote(str(query)) data = await self._get(url) if isinstance(data, dict): return LoadResult(data) elif isinstance(data, list): modified_data = { "loadType": LoadType.V2_COMPAT, "tracks": data } return LoadResult(modified_data)
Executes a loadtracks request. Only works on Lavalink V3. Parameters ---------- query : str Returns ------- LoadResult
def _clean(): LOGGER.info('Cleaning project directory...') folders_to_cleanup = [ '.eggs', 'build', f'{config.PACKAGE_NAME()}.egg-info', ] for folder in folders_to_cleanup: if os.path.exists(folder): LOGGER.info('\tremoving: %s', folder) shutil.rmtree(folder)
Cleans up build dir
def load(cls, database, doc_id): doc = database.get(doc_id) if doc is None: return None return cls.wrap(doc)
Load a specific document from the given database. :param database: the `Database` object to retrieve the document from :param doc_id: the document ID :return: the `Document` instance, or `None` if no document with the given ID was found
def load_backends(self): for name, backend_settings in settings.storage.iteritems(): backend_path = backend_settings['backend'] backend_module, backend_cls = backend_path.rsplit('.', 1) backend_module = import_module(backend_module) backend_constructor = getattr(backend_module, backend_cls) self.backends[name] = backend_constructor(name, self.namespaces, **backend_settings)
Loads all the backends setup in settings.py.
def auto(cls, func): @functools.wraps(func) def auto_claim_handle(*args, **kwargs): with cls(): return func(*args, **kwargs) return auto_claim_handle
The ``auto`` decorator wraps ``func`` in a context manager so that a handle is obtained. .. note:: Please note, that most functions require the handle to continue being alive for future calls to data retrieved from the function. In such cases, it's advisable to use the `requires_refcount` decorator, and force the program using the library with obtaining a handle (and keeping it active.)
def types(self, *args): return ', '.join(['{0}({1})'.format(type(arg).__name__, arg) for arg in args])
Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)'
def redirect_stream(system, target): if target is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target.fileno() try: os.dup2(target_fd, system.fileno()) except OSError as err: raise DaemonError('Could not redirect {0} to {1}: {2}' .format(system, target, err))
Redirect Unix streams If None, redirect Stream to /dev/null, else redirect to target. :param system: ether sys.stdin, sys.stdout, or sys.stderr :type system: file object :param target: File like object, or None :type target: None, File Object :return: None :raise: DaemonError
def compilearg(self): if isinstance(self.value,list): value = self.delimiter.join(self.value) else: value = self.value if value.find(" ") >= 0: value = '"' + value + '"' if self.paramflag and self.paramflag[-1] == '=' or self.nospace: sep = '' elif self.paramflag: sep = ' ' else: return str(value) return self.paramflag + sep + str(value)
This method compiles the parameter into syntax that can be used on the shell, such as -paramflag=value
def qcktrc(tracelen=_default_len_out): tracestr = stypes.stringToCharP(tracelen) tracelen = ctypes.c_int(tracelen) libspice.qcktrc_c(tracelen, tracestr) return stypes.toPythonString(tracestr)
Return a string containing a traceback. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/qcktrc_c.html :param tracelen: Maximum length of output traceback string. :type tracelen: int :return: A traceback string. :rtype: str
def read_data(self, dstart=None, dend=None, swap_axes=True): data = super(FileReaderMRC, self).read_data(dstart, dend) data = data.reshape(self.data_shape, order='F') if swap_axes: data = np.transpose(data, axes=self.data_axis_order) assert data.shape == self.data_shape return data
Read the data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is equal to ``header_size``. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. swap_axes : bool, optional If ``True``, use `data_axis_order` to swap the axes in the returned array. In that case, the shape of the array may no longer agree with `data_storage_shape`. Returns ------- data : `numpy.ndarray` The data read from `file`.
def _handle_satosa_authentication_error(self, error): context = Context() context.state = error.state frontend = self.module_router.frontend_routing(context) return frontend.handle_backend_error(error)
Sends a response to the requester about the error :type error: satosa.exception.SATOSAAuthenticationError :rtype: satosa.response.Response :param error: The exception :return: response
def create(self, dbsecgroup_id, source_cidr, port=3306): body = { "security_group_rule": { "security_group_id": dbsecgroup_id, "cidr": source_cidr, "from_port": port, "to_port": port, } } return self._create("/security-group-rules", body, "security_group_rule")
Creates a security group rule. :param str dbsecgroup_id: The ID of the security group in which this rule should be created. :param str source_cidr: The source IP address range from which access should be allowed. :param int port: The port number used by db clients to connect to the db server. This would have been specified at db instance creation time. :rtype: :class:`DBSecurityGroupRule`.
def default(self, value): if isinstance(value, ObjectId): return str(value) return super(ElasticJSONSerializer, self).default(value)
Convert mongo.ObjectId.
def _server_connect(self, s): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setblocking(0) self._socket.settimeout(1.0) if self.options["tcp_nodelay"]: self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.io = tornado.iostream.IOStream(self._socket, max_buffer_size=self._max_read_buffer_size, max_write_buffer_size=self._max_write_buffer_size, read_chunk_size=self._read_chunk_size) future = self.io.connect((s.uri.hostname, s.uri.port)) yield tornado.gen.with_timeout( timedelta(seconds=self.options["connect_timeout"]), future) self.io.set_close_callback(self._process_op_err)
Sets up a TCP connection to the server.
def getUtilities(self, decision, orderVector): scoringVector = self.getScoringVector(orderVector) utilities = [] for alt in decision: altPosition = orderVector.index(alt) utility = float(scoringVector[altPosition]) if self.isLoss == True: utility = -1*utility utilities.append(utility) return utilities
Returns a floats that contains the utilities of every candidate in the decision. :ivar list<int> decision: Contains a list of integer representations of candidates in the current decision. :ivar list<int> orderVector: A list of integer representations for each candidate ordered from most preferred to least.
def computer(self, base_dn, samaccountname, attributes=()): computers = self.computers(base_dn, samaccountnames=[samaccountname], attributes=attributes) try: return computers[0] except IndexError: logging.info("%s - unable to retrieve object from AD by sAMAccountName", samaccountname)
Produces a single, populated ADComputer object through the object factory. Does not populate attributes for the caller instance. :param str base_dn: The base DN to search within :param str samaccountname: The computer's sAMAccountName :param list attributes: Object attributes to populate, defaults to all :return: A populated ADComputer object :rtype: ADComputer
def _lookup_in_all_namespaces(self, symbol): namespace = self.namespaces namespace_stack = [] for current in symbol.namespace_stack: namespace = namespace.get(current) if namespace is None or not isinstance(namespace, dict): break namespace_stack.append(namespace) for namespace in reversed(namespace_stack): try: return self._lookup_namespace(symbol, namespace) except Error: pass return None
Helper for lookup_symbol that looks for symbols in all namespaces. Args: symbol: Symbol
def version(self): branches = self.branches() if self.info['branch'] == branches.sandbox: try: return self.software_version() except Exception as exc: raise utils.CommandError( 'Could not obtain repo version, do you have a makefile ' 'with version entry?\n%s' % exc ) else: branch = self.info['branch'].lower() branch = re.sub('[^a-z0-9_-]+', '-', branch) return f"{branch}-{self.info['head']['id'][:8]}"
Software version of the current repository
def is_scaled_full_image(self): return(self.region_full and self.size_wh[0] is not None and self.size_wh[1] is not None and not self.size_bang and self.rotation_deg == 0.0 and self.quality == self.default_quality and self.format == 'jpg')
True if this request is for a scaled full image. To be used to determine whether this request should be used in the set of `sizes` specificed in the Image Information.
def validate(cls): if cls.get_dict(): cls.show_error(False) return True cls.show_error(True) return False
Make sure, that conspect element is properly selected. If not, show error.
def compute_partition_size(result, processes): try: return max(math.ceil(len(result) / processes), 1) except TypeError: return 1
Attempts to compute the partition size to evenly distribute work across processes. Defaults to 1 if the length of result cannot be determined. :param result: Result to compute on :param processes: Number of processes to use :return: Best partition size
async def listWorkerTypes(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
See the list of worker types which are known to be managed This method is only for debugging the ec2-manager This method gives output: ``v1/list-worker-types.json#`` This method is ``experimental``
def uptodate(): DETAILS = _load_state() for p in DETAILS['packages']: version_float = float(DETAILS['packages'][p]) version_float = version_float + 1.0 DETAILS['packages'][p] = six.text_type(version_float) return DETAILS['packages']
Call the REST endpoint to see if the packages on the "server" are up to date.
def set_legend(self, legend): assert(isinstance(legend, list) or isinstance(legend, tuple) or legend is None) if legend: self.legend = [quote(a) for a in legend] else: self.legend = None
legend needs to be a list, tuple or None
def change_port_speed(self, instance_id, public, speed): if public: return self.client.call('Virtual_Guest', 'setPublicNetworkInterfaceSpeed', speed, id=instance_id) else: return self.client.call('Virtual_Guest', 'setPrivateNetworkInterfaceSpeed', speed, id=instance_id)
Allows you to change the port speed of a virtual server's NICs. Example:: #change the Public interface to 10Mbps on instance 12345 result = mgr.change_port_speed(instance_id=12345, public=True, speed=10) # result will be True or an Exception :param int instance_id: The ID of the VS :param bool public: Flag to indicate which interface to change. True (default) means the public interface. False indicates the private interface. :param int speed: The port speed to set. .. warning:: A port speed of 0 will disable the interface.
def release(self): self.parent.send(('release-account', self.account_hash)) response = self.parent.recv() if isinstance(response, Exception): raise response if response != 'ok': raise ValueError('unexpected response: ' + repr(response))
Unlocks the account.
def pi_revision(): with open('/proc/cpuinfo', 'r') as infile: for line in infile: match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE) if match and match.group(1) in ['0000', '0002', '0003']: return 1 elif match: return 2 raise RuntimeError('Could not determine Raspberry Pi revision.')
Detect the revision number of a Raspberry Pi, useful for changing functionality like default I2C bus based on revision.
def set_stats(stats, value): stats["total_count"] += 1 stats["value"] += value stats["average"] = stats["value"] / stats["total_count"] if value > stats["max"]: stats["max"] = value if value < stats["min"] or stats["min"] == 0: stats["min"] = value
Updates the stats with the value passed in. :param stats: :class: `dict` :param value: :class: `int`
def access_storage_edit(name, cid, uid, perm, **kwargs): ctx = Context(**kwargs) ctx.execute_action('access:storage:edit', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'cids': cid, 'uids': uid, 'perm': perm, })
Edits ACL for the specified collection. Creates if necessary.
def copy(self, new_path, replace=False): if replace or not get_file(new_path).exists(): self.key.copy(self.key.bucket, new_path) return True return False
Uses boto to copy the file to the new path instead of uploading another file to the new key
def visit_for(self, node): fors = "for %s in %s:\n%s" % ( node.target.accept(self), node.iter.accept(self), self._stmt_list(node.body), ) if node.orelse: fors = "%s\nelse:\n%s" % (fors, self._stmt_list(node.orelse)) return fors
return an astroid.For node as string
def transform_generator(fn): if six.PY2: fn.func_dict['is_transform_generator'] = True else: fn.__dict__['is_transform_generator'] = True return fn
A decorator that marks transform pipes that should be called to create the real transform
def get_plugin_apps(self): return { _ACK_ROUTE: self._serve_ack, _COMM_ROUTE: self._serve_comm, _DEBUGGER_GRPC_HOST_PORT_ROUTE: self._serve_debugger_grpc_host_port, _DEBUGGER_GRAPH_ROUTE: self._serve_debugger_graph, _GATED_GRPC_ROUTE: self._serve_gated_grpc, _TENSOR_DATA_ROUTE: self._serve_tensor_data, _SOURCE_CODE_ROUTE: self._serve_source_code, }
Obtains a mapping between routes and handlers. This function also starts a debugger data server on separate thread if the plugin has not started one yet. Returns: A mapping between routes and handlers (functions that respond to requests).