positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _create(self): """Create the Whisper file on disk""" if not os.path.exists(settings.SALMON_WHISPER_DB_PATH): os.makedirs(settings.SALMON_WHISPER_DB_PATH) archives = [whisper.parseRetentionDef(retentionDef) for retentionDef in settings.ARCHIVES.split(",")] whisper.create(self.path, archives, xFilesFactor=settings.XFILEFACTOR, aggregationMethod=settings.AGGREGATION_METHOD)
Create the Whisper file on disk
def p_arr_access_expr(p): """ func_call : ARRAY_ID arg_list """ # This is an array access p[0] = make_call(p[1], p.lineno(1), p[2]) if p[0] is None: return entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1)) entry.accessed = True
func_call : ARRAY_ID arg_list
def launch_browser(self, profile, timeout=30): """Launches the browser for the given profile name. It is assumed the profile already exists. """ self.profile = profile self._start_from_profile_path(self.profile.path) self._wait_until_connectable(timeout=timeout)
Launches the browser for the given profile name. It is assumed the profile already exists.
def clone(self, into=None): """Clone this chroot. :keyword into: (optional) An optional destination directory to clone the Chroot into. If not specified, a temporary directory will be created. .. versionchanged:: 0.8 The temporary directory created when ``into`` is not specified is now garbage collected on interpreter exit. """ into = into or safe_mkdtemp() new_chroot = Chroot(into) for label, fileset in self.filesets.items(): for fn in fileset: new_chroot.link(os.path.join(self.chroot, fn), fn, label=label) return new_chroot
Clone this chroot. :keyword into: (optional) An optional destination directory to clone the Chroot into. If not specified, a temporary directory will be created. .. versionchanged:: 0.8 The temporary directory created when ``into`` is not specified is now garbage collected on interpreter exit.
def list_staged_files(self) -> typing.List[str]: """ :return: staged files :rtype: list of str """ staged_files: typing.List[str] = [x.a_path for x in self.repo.index.diff('HEAD')] LOGGER.debug('staged files: %s', staged_files) return staged_files
:return: staged files :rtype: list of str
def load(self, fname): """ .. todo:: REPO.load docstring """ # Imports import h5py as h5 from ..error import RepoError # If repo not None, complain if not self._repo == None: raise RepoError(RepoError.STATUS, "Repository already open", "File: {0}".format(self.fname)) ## end if # If string passed, try opening h5.File; otherwise complain if isinstance(fname, str): self.fname = fname self._repo = h5.File(fname) else: raise TypeError("Invalid filename type: {0}".format(type(fname)))
.. todo:: REPO.load docstring
def _unpack_content(raw_data, content_type=None): """Extract the correct structure for deserialization. If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. if we can't, raise. Your Pipeline should have a RawDeserializer. If not a pipeline response and raw_data is bytes or string, use content-type to decode it. If no content-type, try JSON. If raw_data is something else, bypass all logic and return it directly. :param raw_data: Data to be processed. :param content_type: How to parse if raw_data is a string/bytes. :raises JSONDecodeError: If JSON is requested and parsing is impossible. :raises UnicodeDecodeError: If bytes is not UTF8 """ # This avoids a circular dependency. We might want to consider RawDesializer is more generic # than the pipeline concept, and put it in a toolbox, used both here and in pipeline. TBD. from .pipeline.universal import RawDeserializer # Assume this is enough to detect a Pipeline Response without importing it context = getattr(raw_data, "context", {}) if context: if RawDeserializer.CONTEXT_NAME in context: return context[RawDeserializer.CONTEXT_NAME] raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") #Assume this is enough to recognize universal_http.ClientResponse without importing it if hasattr(raw_data, "body"): return RawDeserializer.deserialize_from_http_generics( raw_data.text(), raw_data.headers ) # Assume this enough to recognize requests.Response without importing it. if hasattr(raw_data, '_content_consumed'): return RawDeserializer.deserialize_from_http_generics( raw_data.text, raw_data.headers ) if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, 'read'): return RawDeserializer.deserialize_from_text(raw_data, content_type) return raw_data
Extract the correct structure for deserialization. If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. if we can't, raise. Your Pipeline should have a RawDeserializer. If not a pipeline response and raw_data is bytes or string, use content-type to decode it. If no content-type, try JSON. If raw_data is something else, bypass all logic and return it directly. :param raw_data: Data to be processed. :param content_type: How to parse if raw_data is a string/bytes. :raises JSONDecodeError: If JSON is requested and parsing is impossible. :raises UnicodeDecodeError: If bytes is not UTF8
def create(message: str, pubkey: Optional[str] = None, signing_keys: Optional[List[SigningKey]] = None, message_comment: Optional[str] = None, signatures_comment: Optional[str] = None) -> str: """ Encrypt a message in ascii armor format, optionally signing it :param message: Utf-8 message :param pubkey: Public key of recipient for encryption :param signing_keys: Optional list of SigningKey instances :param message_comment: Optional message comment field :param signatures_comment: Optional signatures comment field :return: """ # if no public key and no signing key... if not pubkey and not signing_keys: # We can not create an Ascii Armor Message raise MISSING_PUBLIC_KEY_AND_SIGNING_KEY_EXCEPTION # keep only one newline at the end of the message message = message.rstrip("\n\r") + "\n" # create block with headers ascii_armor_block = """{begin_message_header} """.format(begin_message_header=BEGIN_MESSAGE_HEADER) # if encrypted message... if pubkey: # add encrypted message fields ascii_armor_block += """{version_field} """.format(version_field=AsciiArmor._get_version_field()) # add message comment if specified if message_comment: ascii_armor_block += """{comment_field} """.format(comment_field=AsciiArmor._get_comment_field(message_comment)) # blank line separator ascii_armor_block += '\n' if pubkey: # add encrypted message pubkey_instance = PublicKey(pubkey) base64_encrypted_message = base64.b64encode(pubkey_instance.encrypt_seal(message)) # type: bytes ascii_armor_block += """{base64_encrypted_message} """.format(base64_encrypted_message=base64_encrypted_message.decode('utf-8')) else: # remove trailing spaces message = AsciiArmor._remove_trailing_spaces(message) # add dash escaped message to ascii armor content ascii_armor_block += AsciiArmor._dash_escape_text(message) # if no signature... if signing_keys is None: # add message tail ascii_armor_block += END_MESSAGE_HEADER else: # add signature blocks and close block on last signature count = 1 for signing_key in signing_keys: ascii_armor_block += AsciiArmor._get_signature_block(message, signing_key, count == len(signing_keys), signatures_comment) count += 1 return ascii_armor_block
Encrypt a message in ascii armor format, optionally signing it :param message: Utf-8 message :param pubkey: Public key of recipient for encryption :param signing_keys: Optional list of SigningKey instances :param message_comment: Optional message comment field :param signatures_comment: Optional signatures comment field :return:
def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None): """ Produce a series of bytestrings that when XORed together end up being equal to ``data`` and only contain characters from the giving ``alphabet``. The initial state (or previous state) can be given as ``iv``. Arguments: data (bytes): The data to recreate as a series of XOR operations. alphabet (bytes): The bytestring containing the allowed characters for the XOR values. If ``None``, all characters except NUL bytes, carriage returns and newlines will be allowed. max_depth (int): The maximum depth to look for a solution. min_depth (int): The minimum depth to look for a solution. iv (bytes): Initialization vector. If ``None``, it will be assumed the operation starts at an all zero string. Returns: A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother if ``iv` is not providede) will be the same as ``data``. Examples: Produce a series of strings that when XORed together will result in the string 'pwnypack' using only ASCII characters in the range 65 to 96: >>> from pwny import * >>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97))) [b'````````', b'AAAAABAA', b'QVOXQCBJ'] >>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ') 'pwnypack' """ if alphabet is None: alphabet = set(i for i in range(256) if i not in (0, 10, 13)) else: alphabet = set(six.iterbytes(alphabet)) if iv is None: iv = b'\0' * len(data) if len(data) != len(iv): raise ValueError('length of iv differs from data') if not min_depth and data == iv: return [] data = xor(data, iv) # Pre-flight check to see if we have all the bits we need. mask = 0 for ch in alphabet: mask |= ch mask = ~mask # Map all bytes in data into a {byte: [pos...]} dictionary, check # if we have enough bits along the way. data_map_tmpl = {} for i, ch in enumerate(six.iterbytes(data)): if ch & mask: raise ValueError('Alphabet does not contain enough bits.') data_map_tmpl.setdefault(ch, []).append(i) # Let's try to find a solution. for depth in range(max(min_depth, 1), max_depth + 1): # Prepare for round. data_map = data_map_tmpl.copy() results = [[None] * len(data) for _ in range(depth)] for values in itertools.product(*([alphabet] * (depth - 1))): # Prepare cumulative mask for this combination of alphabet. mask = 0 for value in values: mask ^= value for ch in list(data_map): r = ch ^ mask if r in alphabet: # Found a solution for this character, mark the result. pos = data_map.pop(ch) for p in pos: results[0][p] = r for i, value in enumerate(values): results[i + 1][p] = value if not data_map: # Aaaand.. We're done! return [ b''.join(six.int2byte(b) for b in r) for r in results ] # No solution found at this depth. Increase depth, try again. raise ValueError('No solution found.')
Produce a series of bytestrings that when XORed together end up being equal to ``data`` and only contain characters from the giving ``alphabet``. The initial state (or previous state) can be given as ``iv``. Arguments: data (bytes): The data to recreate as a series of XOR operations. alphabet (bytes): The bytestring containing the allowed characters for the XOR values. If ``None``, all characters except NUL bytes, carriage returns and newlines will be allowed. max_depth (int): The maximum depth to look for a solution. min_depth (int): The minimum depth to look for a solution. iv (bytes): Initialization vector. If ``None``, it will be assumed the operation starts at an all zero string. Returns: A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother if ``iv` is not providede) will be the same as ``data``. Examples: Produce a series of strings that when XORed together will result in the string 'pwnypack' using only ASCII characters in the range 65 to 96: >>> from pwny import * >>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97))) [b'````````', b'AAAAABAA', b'QVOXQCBJ'] >>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ') 'pwnypack'
def remove_stage_from_deployed_values(key, filename): # type: (str, str) -> None """Delete a top level key from the deployed JSON file.""" final_values = {} # type: Dict[str, Any] try: with open(filename, 'r') as f: final_values = json.load(f) except IOError: # If there is no file to delete from, then this funciton is a noop. return try: del final_values[key] with open(filename, 'wb') as f: data = serialize_to_json(final_values) f.write(data.encode('utf-8')) except KeyError: # If they key didn't exist then there is nothing to remove. pass
Delete a top level key from the deployed JSON file.
def set_border(self, thickness, color="black"): """ Sets the border thickness and color. :param int thickness: The thickenss of the border. :param str color: The color of the border. """ self._set_tk_config("highlightthickness", thickness) self._set_tk_config("highlightbackground", utils.convert_color(color))
Sets the border thickness and color. :param int thickness: The thickenss of the border. :param str color: The color of the border.
def authenticate(self, msg: Dict, identifier: Optional[str] = None, signature: Optional[str] = None, threshold: Optional[int] = None, key: Optional[str] = None) -> str: """ Authenticate the client's message with the signature provided. :param identifier: some unique identifier; if None, then try to use msg['identifier'] as identifier :param signature: a utf-8 and base58 encoded signature :param msg: the message to authenticate :param threshold: The number of successful signature verification :param key: The key of request for storing in internal maps required. By default all signatures are required to be verified. :return: the identifier; an exception of type SigningException is raised if the signature is not valid """
Authenticate the client's message with the signature provided. :param identifier: some unique identifier; if None, then try to use msg['identifier'] as identifier :param signature: a utf-8 and base58 encoded signature :param msg: the message to authenticate :param threshold: The number of successful signature verification :param key: The key of request for storing in internal maps required. By default all signatures are required to be verified. :return: the identifier; an exception of type SigningException is raised if the signature is not valid
def show_input(self, template_helper, language, seed): """ Show multiple choice problems """ choices = [] limit = self._limit if limit == 0: limit = len(self._choices) # no limit rand = Random("{}#{}#{}".format(self.get_task().get_id(), self.get_id(), seed)) # Ensure that the choices are random # we *do* need to copy the choices here random_order_choices = list(self._choices) rand.shuffle(random_order_choices) if self._multiple: # take only the valid choices in the first pass for entry in random_order_choices: if entry['valid']: choices.append(entry) limit = limit - 1 # take everything else in a second pass for entry in random_order_choices: if limit == 0: break if not entry['valid']: choices.append(entry) limit = limit - 1 else: # need to have ONE valid entry for entry in random_order_choices: if not entry['valid'] and limit > 1: choices.append(entry) limit = limit - 1 for entry in random_order_choices: if entry['valid'] and limit > 0: choices.append(entry) limit = limit - 1 rand.shuffle(choices) header = ParsableText(self.gettext(language, self._header), "rst", translation=self._translations.get(language, gettext.NullTranslations())) return str(DisplayableMultipleChoiceProblem.get_renderer(template_helper).tasks.multiple_choice( self.get_id(), header, self._multiple, choices, lambda text: ParsableText(self.gettext(language, text) if text else "", "rst", translation=self._translations.get(language, gettext.NullTranslations()))))
Show multiple choice problems
def update(cls, **kwargs): ''' If a record matching the instance id already exists in the database, update it. If a record matching the instance id does not already exist, create a new record. ''' q = cls._get_instance(**{'id': kwargs['id']}) if q: for k, v in kwargs.items(): setattr(q, k, v) _action_and_commit(q, session.add) else: cls.get_or_create(**kwargs)
If a record matching the instance id already exists in the database, update it. If a record matching the instance id does not already exist, create a new record.
async def parallel_results(future_map: Sequence[Tuple]) -> Dict: """ Run parallel execution of futures and return mapping of their results to the provided keys. Just a neat shortcut around ``asyncio.gather()`` :param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) ) :return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'} """ ctx_methods = OrderedDict(future_map) fs = list(ctx_methods.values()) results = await asyncio.gather(*fs) results = { key: results[idx] for idx, key in enumerate(ctx_methods.keys()) } return results
Run parallel execution of futures and return mapping of their results to the provided keys. Just a neat shortcut around ``asyncio.gather()`` :param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) ) :return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
def invoke_function(self, ctx, name, arguments): """ Invokes the given function :param ctx: the evaluation context :param name: the function name (case insensitive) :param arguments: the arguments to be passed to the function :return: the function return value """ from temba_expressions import EvaluationError, conversions # find function with given name func = self.get_function(name) if func is None: raise EvaluationError("Undefined function: %s" % name) args, varargs, defaults = self._get_arg_spec(func) call_args = [] passed_args = list(arguments) for arg in args: if arg == 'ctx': call_args.append(ctx) elif passed_args: call_args.append(passed_args.pop(0)) elif arg in defaults: call_args.append(defaults[arg]) else: raise EvaluationError("Too few arguments provided for function %s" % name) if varargs is not None: call_args.extend(passed_args) passed_args = [] # any unused arguments? if passed_args: raise EvaluationError("Too many arguments provided for function %s" % name) try: return func(*call_args) except Exception as e: pretty_args = [] for arg in arguments: if isinstance(arg, str): pretty = '"%s"' % arg else: try: pretty = conversions.to_string(arg, ctx) except EvaluationError: pretty = str(arg) pretty_args.append(pretty) raise EvaluationError("Error calling function %s with arguments %s" % (name, ', '.join(pretty_args)), e)
Invokes the given function :param ctx: the evaluation context :param name: the function name (case insensitive) :param arguments: the arguments to be passed to the function :return: the function return value
def decode_network(objects): """Return root object from ref-containing obj table entries""" def resolve_ref(obj, objects=objects): if isinstance(obj, Ref): # first entry is 1 return objects[obj.index - 1] else: return obj # Reading the ObjTable backwards somehow makes more sense. for i in xrange(len(objects)-1, -1, -1): obj = objects[i] if isinstance(obj, Container): obj.update((k, resolve_ref(v)) for (k, v) in obj.items()) elif isinstance(obj, Dictionary): obj.value = dict( (resolve_ref(field), resolve_ref(value)) for (field, value) in obj.value.items() ) elif isinstance(obj, dict): obj = dict( (resolve_ref(field), resolve_ref(value)) for (field, value) in obj.items() ) elif isinstance(obj, list): obj = [resolve_ref(field) for field in obj] elif isinstance(obj, Form): for field in obj.value: value = getattr(obj, field) value = resolve_ref(value) setattr(obj, field, value) elif isinstance(obj, ContainsRefs): obj.value = [resolve_ref(field) for field in obj.value] objects[i] = obj for obj in objects: if isinstance(obj, Form): obj.built() root = objects[0] return root
Return root object from ref-containing obj table entries
def git_support(fn, command): """Resolves git aliases and supports testing for both git and hub.""" # supports GitHub's `hub` command # which is recommended to be used with `alias git=hub` # but at this point, shell aliases have already been resolved if not is_app(command, 'git', 'hub'): return False # perform git aliases expansion if 'trace: alias expansion:' in command.output: search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)", command.output) alias = search.group(1) # by default git quotes everything, for example: # 'commit' '--amend' # which is surprising and does not allow to easily test for # eg. 'git commit' expansion = ' '.join(shell.quote(part) for part in shell.split_command(search.group(2))) new_script = command.script.replace(alias, expansion) command = command.update(script=new_script) return fn(command)
Resolves git aliases and supports testing for both git and hub.
def get_port_area(self, view): """Calculates the drawing area affected by the (hovered) port """ state_v = self.parent center = self.handle.pos margin = self.port_side_size / 4. if self.side in [SnappedSide.LEFT, SnappedSide.RIGHT]: height, width = self.port_size else: width, height = self.port_size upper_left = center[0] - width / 2 - margin, center[1] - height / 2 - margin lower_right = center[0] + width / 2 + margin, center[1] + height / 2 + margin port_upper_left = view.get_matrix_i2v(state_v).transform_point(*upper_left) port_lower_right = view.get_matrix_i2v(state_v).transform_point(*lower_right) size = port_lower_right[0] - port_upper_left[0], port_lower_right[1] - port_upper_left[1] return port_upper_left[0], port_upper_left[1], size[0], size[1]
Calculates the drawing area affected by the (hovered) port
def search_and_extract_nucleotides_matching_nucleotide_database(self, unpack, euk_check, search_method, maximum_range, threads, evalue, hmmsearch_output_table, hit_reads_fasta): '''As per nt_db_search() except slightly lower level. Search an input read set (unpack) and then extract the sequences that hit. Parameters ---------- hmmsearch_output_table: str path to hmmsearch output table hit_reads_fasta: str path to hit nucleotide sequences Returns ------- direction_information: dict {read_1: False ... read n: True} where True = Forward direction and False = Reverse direction result: DBSearchResult object containing file locations and hit information ''' if search_method == "hmmsearch": # First search the reads using the HMM search_result, table_list = self.nhmmer( hmmsearch_output_table, unpack, threads, evalue ) elif search_method == 'diamond': raise Exception("Diamond searches not supported for nucelotide databases yet") if maximum_range: hits = self._get_read_names( search_result, # define the span of hits maximum_range ) else: hits = self._get_sequence_directions(search_result) hit_readnames = hits.keys() if euk_check: euk_reads = self._check_euk_contamination(table_list) hit_readnames = set([read for read in hit_readnames if read not in euk_reads]) hits = {key:item for key, item in hits.iteritems() if key in hit_readnames} hit_read_count = [len(euk_reads), len(hit_readnames)] else: hit_read_count = [0, len(hit_readnames)] hit_reads_fasta, direction_information = self._extract_from_raw_reads( hit_reads_fasta, hit_readnames, unpack.read_file, unpack.format(), hits ) if not hit_readnames: result = DBSearchResult(None, search_result, hit_read_count, None) else: slash_endings=self._check_for_slash_endings(hit_readnames) result = DBSearchResult(hit_reads_fasta, search_result, hit_read_count, slash_endings) if maximum_range: n_hits = sum([len(x["strand"]) for x in hits.values()]) else: n_hits = len(hits) logging.info("%s read(s) detected" % n_hits) return result, direction_information
As per nt_db_search() except slightly lower level. Search an input read set (unpack) and then extract the sequences that hit. Parameters ---------- hmmsearch_output_table: str path to hmmsearch output table hit_reads_fasta: str path to hit nucleotide sequences Returns ------- direction_information: dict {read_1: False ... read n: True} where True = Forward direction and False = Reverse direction result: DBSearchResult object containing file locations and hit information
def apns_send_bulk_message( registration_ids, alert, application_id=None, certfile=None, **kwargs ): """ Sends an APNS notification to one or more registration_ids. The registration_ids argument needs to be a list. Note that if set alert should always be a string. If it is not set, it won"t be included in the notification. You will need to pass None to this for silent notifications. """ results = _apns_send( registration_ids, alert, batch=True, application_id=application_id, certfile=certfile, **kwargs ) inactive_tokens = [token for token, result in results.items() if result == "Unregistered"] models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False) return results
Sends an APNS notification to one or more registration_ids. The registration_ids argument needs to be a list. Note that if set alert should always be a string. If it is not set, it won"t be included in the notification. You will need to pass None to this for silent notifications.
def getRemainingCredits(self): """ Returns the remaining credits for the license key used after the request was made :return: String with remaining credits """ if 'status' in self._response.keys(): if (self._response['status'] is not None) and ('remaining_credits' in self._response['status'].keys()): if self._response['status']['remaining_credits'] is not None: return self._response['status']['remaining_credits'] else: return '' else: print("Not remaining credits field\n") else: return None
Returns the remaining credits for the license key used after the request was made :return: String with remaining credits
def vote_up_idea(self, *args, **kwargs): """ :allowed_param: 'ideaId', 'myVote' (optional) """ kwargs.update({'headers': {'content-type':'application/json'}}) return bind_api( api=self, path='/ideas/{ideaId}/vote/up', method='POST', payload_type='vote', allowed_param=['ideaId'], post_param=['myVote'] )(*args, **kwargs)
:allowed_param: 'ideaId', 'myVote' (optional)
def term_count_buckets(self): """ Returns: dict: A dictionary that maps occurrence counts to the terms that appear that many times in the text. """ buckets = {} for term, count in self.term_counts().items(): if count in buckets: buckets[count].append(term) else: buckets[count] = [term] return buckets
Returns: dict: A dictionary that maps occurrence counts to the terms that appear that many times in the text.
def _contains_blinded_text(stats_xml): """ Heuristic to determine whether the treebank has blinded texts or not """ tree = ET.parse(stats_xml) root = tree.getroot() total_tokens = int(root.find('size/total/tokens').text) unique_lemmas = int(root.find('lemmas').get('unique')) # assume the corpus is largely blinded when there are less than 1% unique tokens return (unique_lemmas / total_tokens) < 0.01
Heuristic to determine whether the treebank has blinded texts or not
def close(self): """ Close all pooled connections and disable the pool. """ # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except queue.Empty: pass
Close all pooled connections and disable the pool.
def alert_type(self, alert_type): """Sets the alert_type of this Alert. Alert type. # noqa: E501 :param alert_type: The alert_type of this Alert. # noqa: E501 :type: str """ allowed_values = ["CLASSIC", "THRESHOLD"] # noqa: E501 if alert_type not in allowed_values: raise ValueError( "Invalid value for `alert_type` ({0}), must be one of {1}" # noqa: E501 .format(alert_type, allowed_values) ) self._alert_type = alert_type
Sets the alert_type of this Alert. Alert type. # noqa: E501 :param alert_type: The alert_type of this Alert. # noqa: E501 :type: str
def _create_repo(line, filename): ''' Create repo ''' repo = {} if line.startswith('#'): repo['enabled'] = False line = line[1:] else: repo['enabled'] = True cols = salt.utils.args.shlex_split(line.strip()) repo['compressed'] = not cols[0] in 'src' repo['name'] = cols[1] repo['uri'] = cols[2] repo['file'] = os.path.join(OPKG_CONFDIR, filename) if len(cols) > 3: _set_repo_options(repo, cols[3:]) return repo
Create repo
def resize(self, logical_size): """Starts resizing this medium. This means that the nominal size of the medium is set to the new value. Both increasing and decreasing the size is possible, and there are no safety checks, since VirtualBox does not make any assumptions about the medium contents. Resizing usually needs additional disk space, and possibly also some temporary disk space. Note that resize does not create a full temporary copy of the medium, so the additional disk space requirement is usually much lower than using the clone operation. This medium will be placed to :py:attr:`MediumState.locked_write` state for the duration of this operation. Please note that the results can be either returned straight away, or later as the result of the background operation via the object returned via the @a progress parameter. in logical_size of type int New nominal capacity of the medium in bytes. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`VBoxErrorNotSupported` Medium format does not support resizing. """ if not isinstance(logical_size, baseinteger): raise TypeError("logical_size can only be an instance of type baseinteger") progress = self._call("resize", in_p=[logical_size]) progress = IProgress(progress) return progress
Starts resizing this medium. This means that the nominal size of the medium is set to the new value. Both increasing and decreasing the size is possible, and there are no safety checks, since VirtualBox does not make any assumptions about the medium contents. Resizing usually needs additional disk space, and possibly also some temporary disk space. Note that resize does not create a full temporary copy of the medium, so the additional disk space requirement is usually much lower than using the clone operation. This medium will be placed to :py:attr:`MediumState.locked_write` state for the duration of this operation. Please note that the results can be either returned straight away, or later as the result of the background operation via the object returned via the @a progress parameter. in logical_size of type int New nominal capacity of the medium in bytes. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`VBoxErrorNotSupported` Medium format does not support resizing.
def verb_chain_texts(self): """The list of texts of ``verb_chains`` layer elements.""" if not self.is_tagged(VERB_CHAINS): self.tag_verb_chains() return self.texts(VERB_CHAINS)
The list of texts of ``verb_chains`` layer elements.
def metrics_for_mode(self, mode): """Metrics available for a given mode.""" if mode not in self._values: logging.info("Mode %s not found", mode) return [] return sorted(list(self._values[mode].keys()))
Metrics available for a given mode.
def probabilities(value, rows=0, cols=0): """ :param value: input string, comma separated or space separated :param rows: the number of rows if the floats are in a matrix (0 otherwise) :param cols: the number of columns if the floats are in a matrix (or 0 :returns: a list of probabilities >>> probabilities('') [] >>> probabilities('1') [1.0] >>> probabilities('0.1 0.2') [0.1, 0.2] >>> probabilities('0.1, 0.2') # commas are ignored [0.1, 0.2] """ probs = list(map(probability, value.replace(',', ' ').split())) if rows and cols: probs = numpy.array(probs).reshape((len(rows), len(cols))) return probs
:param value: input string, comma separated or space separated :param rows: the number of rows if the floats are in a matrix (0 otherwise) :param cols: the number of columns if the floats are in a matrix (or 0 :returns: a list of probabilities >>> probabilities('') [] >>> probabilities('1') [1.0] >>> probabilities('0.1 0.2') [0.1, 0.2] >>> probabilities('0.1, 0.2') # commas are ignored [0.1, 0.2]
def event_params(segments, params, band=None, n_fft=None, slopes=None, prep=None, parent=None): """Compute event parameters. Parameters ---------- segments : instance of wonambi.trans.select.Segments list of segments, with time series and metadata params : dict of bool, or str 'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy', 'peakef'. If 'all', a dict will be created with these keys and all values as True, so that all parameters are returned. band : tuple of float band of interest for power and energy n_fft : int length of FFT. if shorter than input signal, signal is truncated; if longer, signal is zero-padded to length slopes : dict of bool 'avg_slope', 'max_slope', 'prep', 'invert' prep : dict of bool same keys as params. if True, segment['trans_data'] will be used as dat parent : QMainWindow for use with GUI only Returns ------- list of dict list of segments, with time series, metadata and parameters """ if parent is not None: progress = QProgressDialog('Computing parameters', 'Abort', 0, len(segments) - 1, parent) progress.setWindowModality(Qt.ApplicationModal) param_keys = ['dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakpf', 'energy', 'peakef'] if params == 'all': params = {k: 1 for k in param_keys} if prep is None: prep = {k: 0 for k in param_keys} if band is None: band = (None, None) params_out = [] evt_output = False for i, seg in enumerate(segments): out = dict(seg) dat = seg['data'] if params['dur']: out['dur'] = float(dat.number_of('time')) / dat.s_freq evt_output = True if params['minamp']: dat1 = dat if prep['minamp']: dat1 = seg['trans_data'] out['minamp'] = math(dat1, operator=_amin, axis='time') evt_output = True if params['maxamp']: dat1 = dat if prep['maxamp']: dat1 = seg['trans_data'] out['maxamp'] = math(dat1, operator=_amax, axis='time') evt_output = True if params['ptp']: dat1 = dat if prep['ptp']: dat1 = seg['trans_data'] out['ptp'] = math(dat1, operator=_ptp, axis='time') evt_output = True if params['rms']: dat1 = dat if prep['rms']: dat1 = seg['trans_data'] out['rms'] = math(dat1, operator=(square, _mean, sqrt), axis='time') evt_output = True for pw, pk in [('power', 'peakpf'), ('energy', 'peakef')]: if params[pw] or params[pk]: evt_output = True if prep[pw] or prep[pk]: prep_pw, prep_pk = band_power(seg['trans_data'], band, scaling=pw, n_fft=n_fft) if not (prep[pw] and prep[pk]): raw_pw, raw_pk = band_power(dat, band, scaling=pw, n_fft=n_fft) if prep[pw]: out[pw] = prep_pw else: out[pw] = raw_pw if prep[pk]: out[pk] = prep_pk else: out[pk] = raw_pk if slopes: evt_output = True out['slope'] = {} dat1 = dat if slopes['prep']: dat1 = seg['trans_data'] if slopes['invert']: dat1 = math(dat1, operator=negative, axis='time') if slopes['avg_slope'] and slopes['max_slope']: level = 'all' elif slopes['avg_slope']: level = 'average' else: level = 'maximum' for chan in dat1.axis['chan'][0]: d = dat1(chan=chan)[0] out['slope'][chan] = get_slopes(d, dat.s_freq, level=level) if evt_output: timeline = dat.axis['time'][0] out['start'] = timeline[0] out['end'] = timeline[-1] params_out.append(out) if parent: progress.setValue(i) if progress.wasCanceled(): msg = 'Analysis canceled by user.' parent.statusBar().showMessage(msg) return if parent: progress.close() return params_out
Compute event parameters. Parameters ---------- segments : instance of wonambi.trans.select.Segments list of segments, with time series and metadata params : dict of bool, or str 'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy', 'peakef'. If 'all', a dict will be created with these keys and all values as True, so that all parameters are returned. band : tuple of float band of interest for power and energy n_fft : int length of FFT. if shorter than input signal, signal is truncated; if longer, signal is zero-padded to length slopes : dict of bool 'avg_slope', 'max_slope', 'prep', 'invert' prep : dict of bool same keys as params. if True, segment['trans_data'] will be used as dat parent : QMainWindow for use with GUI only Returns ------- list of dict list of segments, with time series, metadata and parameters
def spy(object): """Spy an object. Spying means that all functions will behave as before, so they will be side effects, but the interactions can be verified afterwards. Returns Dummy-like, almost empty object as proxy to `object`. The *returned* object must be injected and used by the code under test; after that all interactions can be verified as usual. T.i. the original object **will not be patched**, and has no further knowledge as before. E.g.:: import time time = spy(time) # inject time do_work(..., time) verify(time).time() """ if inspect.isclass(object) or inspect.ismodule(object): class_ = None else: class_ = object.__class__ class Spy(_Dummy): if class_: __class__ = class_ def __getattr__(self, method_name): return RememberedProxyInvocation(theMock, method_name) def __repr__(self): name = 'Spied' if class_: name += class_.__name__ return "<%s id=%s>" % (name, id(self)) obj = Spy() theMock = Mock(obj, strict=True, spec=object) mock_registry.register(obj, theMock) return obj
Spy an object. Spying means that all functions will behave as before, so they will be side effects, but the interactions can be verified afterwards. Returns Dummy-like, almost empty object as proxy to `object`. The *returned* object must be injected and used by the code under test; after that all interactions can be verified as usual. T.i. the original object **will not be patched**, and has no further knowledge as before. E.g.:: import time time = spy(time) # inject time do_work(..., time) verify(time).time()
def generate_ulid_as_uuid(timestamp=None, monotonic=False): """ Generate an ULID, but expressed as an UUID. :param timestamp: An optional timestamp override. If `None`, the current time is used. :type timestamp: int|float|datetime.datetime|None :param monotonic: Attempt to ensure ULIDs are monotonically increasing. Monotonic behavior is not guaranteed when used from multiple threads. :type monotonic: bool :return: UUID containing ULID data. :rtype: uuid.UUID """ return uuid.UUID(bytes=generate_binary_ulid(timestamp, monotonic=monotonic))
Generate an ULID, but expressed as an UUID. :param timestamp: An optional timestamp override. If `None`, the current time is used. :type timestamp: int|float|datetime.datetime|None :param monotonic: Attempt to ensure ULIDs are monotonically increasing. Monotonic behavior is not guaranteed when used from multiple threads. :type monotonic: bool :return: UUID containing ULID data. :rtype: uuid.UUID
def flux_variability(model, reactions, fixed, tfba, solver): """Find the variability of each reaction while fixing certain fluxes. Yields the reaction id, and a tuple of minimum and maximum value for each of the given reactions. The fixed reactions are given in a dictionary as a reaction id to value mapping. This is an implementation of flux variability analysis (FVA) as described in [Mahadevan03]_. Args: model: MetabolicModel to solve. reactions: Reactions on which to report variablity. fixed: dict of additional lower bounds on reaction fluxes. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: Iterator over pairs of reaction ID and bounds. Bounds are returned as pairs of lower and upper values. """ fba = _get_fba_problem(model, tfba, solver) for reaction_id, value in iteritems(fixed): flux = fba.get_flux_var(reaction_id) fba.prob.add_linear_constraints(flux >= value) def min_max_solve(reaction_id): for direction in (-1, 1): yield fba.flux_bound(reaction_id, direction) # Solve for each reaction for reaction_id in reactions: yield reaction_id, tuple(min_max_solve(reaction_id))
Find the variability of each reaction while fixing certain fluxes. Yields the reaction id, and a tuple of minimum and maximum value for each of the given reactions. The fixed reactions are given in a dictionary as a reaction id to value mapping. This is an implementation of flux variability analysis (FVA) as described in [Mahadevan03]_. Args: model: MetabolicModel to solve. reactions: Reactions on which to report variablity. fixed: dict of additional lower bounds on reaction fluxes. tfba: If True enable thermodynamic constraints. solver: LP solver instance to use. Returns: Iterator over pairs of reaction ID and bounds. Bounds are returned as pairs of lower and upper values.
def class_name(self) -> str: """ Makes the fist letter big, keep the rest of the camelCaseApiName. """ if not self.api_name: # empty string return self.api_name # end if return self.api_name[0].upper() + self.api_name[1:]
Makes the fist letter big, keep the rest of the camelCaseApiName.
def _matrix_grad(q, h, h_dx, t, t_prime): ''' Returns the gradient with respect to a single variable''' N = len(q) W = np.zeros([N, N]) Wprime = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) Wprime[i, i] = \ 0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)]) tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)]) grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \ + (q - t).T.dot(Wprime).dot(q - t) return grad
Returns the gradient with respect to a single variable
def close(self): ''' Close the application and all installed plugins. ''' for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() self.stopped = True
Close the application and all installed plugins.
def get_recordInfo(self, headers, zoneID, zone, records): """Get the information of the records.""" if 'None' in records: #If ['None'] in record argument, query all. recordQueryEnpoint = '/' + zoneID + '/dns_records&per_page=100' recordUrl = self.BASE_URL + recordQueryEnpoint recordRequest = requests.get(recordUrl, headers=headers) recordResponse = recordRequest.json()['result'] dev = [] num = 0 for value in recordResponse: recordName = recordResponse[num]['name'] dev.append(recordName) num = num + 1 records = dev updateRecords = [] for record in records: if zone in record: recordFullname = record else: recordFullname = record + '.' + zone recordQuery = '/' + zoneID + '/dns_records?name=' + recordFullname recordUrl = self.BASE_URL + recordQuery recordInfoRequest = requests.get(recordUrl, headers=headers) recordInfoResponse = recordInfoRequest.json()['result'][0] recordID = recordInfoResponse['id'] recordType = recordInfoResponse['type'] recordProxy = str(recordInfoResponse['proxied']) recordContent = recordInfoResponse['content'] if recordProxy == 'True': recordProxied = True else: recordProxied = False updateRecords.append([recordID, recordFullname, recordType, recordContent, recordProxied]) return updateRecords
Get the information of the records.
def boot(self, name, flavor_id=0, image_id=0, timeout=300, **kwargs): ''' Boot a cloud server. ''' nt_ks = self.compute_conn kwargs['name'] = name kwargs['flavor'] = flavor_id kwargs['image'] = image_id or None ephemeral = kwargs.pop('ephemeral', []) block_device = kwargs.pop('block_device', []) boot_volume = kwargs.pop('boot_volume', None) snapshot = kwargs.pop('snapshot', None) swap = kwargs.pop('swap', None) kwargs['block_device_mapping_v2'] = _parse_block_device_mapping_v2( block_device=block_device, boot_volume=boot_volume, snapshot=snapshot, ephemeral=ephemeral, swap=swap ) response = nt_ks.servers.create(**kwargs) self.uuid = response.id self.password = getattr(response, 'adminPass', None) start = time.time() trycount = 0 while True: trycount += 1 try: return self.server_show_libcloud(self.uuid) except Exception as exc: log.debug( 'Server information not yet available: %s', exc ) time.sleep(1) if time.time() - start > timeout: log.error('Timed out after %s seconds ' 'while waiting for data', timeout) return False log.debug( 'Retrying server_show() (try %s)', trycount )
Boot a cloud server.
def post(self, request, key): """Create new email address that will wait for validation""" email = request.POST.get('email') user_id = request.POST.get('user') if not email: return http.HttpResponseBadRequest() try: EmailAddressValidation.objects.create(address=email, user_id=user_id) except IntegrityError: # 409 Conflict # duplicated entries # email exist and it's waiting for validation return http.HttpResponse(status=409) return http.HttpResponse(status=201)
Create new email address that will wait for validation
def getFactory(self): """ Return a server factory which creates AMP protocol instances. """ factory = ServerFactory() def protocol(): proto = CredReceiver() proto.portal = Portal( self.loginSystem, [self.loginSystem, OneTimePadChecker(self._oneTimePads)]) return proto factory.protocol = protocol return factory
Return a server factory which creates AMP protocol instances.
def Cx(mt, x): """ Return the Cx """ return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5)
Return the Cx
def image_import(infile, force): """Import image anchore data from a JSON file.""" ecode = 0 try: with open(infile, 'r') as FH: savelist = json.loads(FH.read()) except Exception as err: anchore_print_err("could not load input file: " + str(err)) ecode = 1 if ecode == 0: for record in savelist: try: imageId = record['image']['imageId'] if contexts['anchore_db'].is_image_present(imageId) and not force: anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.") else: imagedata = record['image']['imagedata'] try: rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata) if not rc: contexts['anchore_db'].delete_image(imageId) raise Exception("save to anchore DB failed") except Exception as err: contexts['anchore_db'].delete_image(imageId) raise err except Exception as err: anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err)) ecode = 1 sys.exit(ecode)
Import image anchore data from a JSON file.
def get_stats(self, start=int(time()), stop=int(time())+10, step=10): """ Get stats of a monitored machine :param start: Time formatted as integer, from when to fetch stats (default now) :param stop: Time formatted as integer, until when to fetch stats (default +10 seconds) :param step: Step to fetch stats (default 10 seconds) :returns: A dict of stats """ payload = { 'v': 2, 'start': start, 'stop': stop, 'step': step } data = json.dumps(payload) req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/stats", data=data) stats = req.get().json() return stats
Get stats of a monitored machine :param start: Time formatted as integer, from when to fetch stats (default now) :param stop: Time formatted as integer, until when to fetch stats (default +10 seconds) :param step: Step to fetch stats (default 10 seconds) :returns: A dict of stats
def save(self): ''' Save an instance of a Union object ''' client = self._new_api_client() params = {'id': self.id} if hasattr(self, 'id') else {} action = 'patch' if hasattr(self, 'id') else 'post' saved_model = client.make_request(self, action, url_params=params, post_data=self._to_json) self.__init__(**saved_model._to_dict)
Save an instance of a Union object
def WriteUInt256(self, value): """ Write a UInt256 type to the stream. Args: value (UInt256): Raises: Exception: when `value` is not of neocore.UInt256 type. """ if type(value) is UInt256: value.Serialize(self) else: raise Exception("Cannot write value that is not UInt256")
Write a UInt256 type to the stream. Args: value (UInt256): Raises: Exception: when `value` is not of neocore.UInt256 type.
def stop_polling(self): """ Break long-polling process. :return: """ if hasattr(self, '_polling') and self._polling: log.info('Stop polling...') self._polling = False
Break long-polling process. :return:
def pull_astro_coords(voevent, index=0): """ Deprecated alias of :func:`.get_event_position` """ import warnings warnings.warn( """ The function `pull_astro_coords` has been renamed to `get_event_position`. This alias is preserved for backwards compatibility, and may be removed in a future release. """, FutureWarning) return get_event_position(voevent, index)
Deprecated alias of :func:`.get_event_position`
def execute_plan(self, plan, allow_rf_change=False): """Submit reassignment plan for execution.""" reassignment_path = '{admin}/{reassignment_node}'\ .format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE) plan_json = dump_json(plan) base_plan = self.get_cluster_plan() if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change): _log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan)) return False # Send proposed-plan to zookeeper try: _log.info('Sending plan to Zookeeper...') self.create(reassignment_path, plan_json, makepath=True) _log.info( 'Re-assign partitions node in Zookeeper updated successfully ' 'with {plan}'.format(plan=plan), ) return True except NodeExistsError: _log.warning('Previous plan in progress. Exiting..') _log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan)) in_progress_plan = load_json(self.get(reassignment_path)[0]) in_progress_partitions = [ '{topic}-{p_id}'.format( topic=p_data['topic'], p_id=str(p_data['partition']), ) for p_data in in_progress_plan['partitions'] ] _log.warning( '{count} partition(s) reassignment currently in progress:-' .format(count=len(in_progress_partitions)), ) _log.warning( '{partitions}. In Progress reassignment plan...'.format( partitions=', '.join(in_progress_partitions), ), ) return False except Exception as e: _log.error( 'Could not re-assign partitions {plan}. Error: {e}' .format(plan=plan, e=e), ) return False
Submit reassignment plan for execution.
def timeseries(self): """ Load time series It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries()` looks for time series of the according sector in :class:`~.grid.network.TimeSeries` object. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'. """ if self._timeseries is None: if isinstance(self.grid, MVGrid): voltage_level = 'mv' elif isinstance(self.grid, LVGrid): voltage_level = 'lv' ts_total = None for sector in self.consumption.keys(): consumption = self.consumption[sector] # check if load time series for MV and LV are differentiated try: ts = self.grid.network.timeseries.load[ sector, voltage_level].to_frame('p') except KeyError: try: ts = self.grid.network.timeseries.load[ sector].to_frame('p') except KeyError: logger.exception( "No timeseries for load of type {} " "given.".format(sector)) raise ts = ts * consumption ts_q = self.timeseries_reactive if ts_q is not None: ts['q'] = ts_q.q else: ts['q'] = ts['p'] * self.q_sign * tan( acos(self.power_factor)) if ts_total is None: ts_total = ts else: ts_total.p += ts.p ts_total.q += ts.q return ts_total else: return self._timeseries
Load time series It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries()` looks for time series of the according sector in :class:`~.grid.network.TimeSeries` object. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'.
def validate_wrap(self, value): ''' Checks that value is a ``dict``, that every key is a valid MongoDB key, and that every value validates based on DictField.value_type ''' if not isinstance(value, dict): self._fail_validation_type(value, dict) for k, v in value.items(): self._validate_key_wrap(k) try: self.value_type.validate_wrap(v) except BadValueException as bve: self._fail_validation(value, 'Bad value for key %s' % k, cause=bve)
Checks that value is a ``dict``, that every key is a valid MongoDB key, and that every value validates based on DictField.value_type
def copy(self): ''' makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events ''' try: tmp = self.__class__() except Exception: tmp = self.__class__(self._pdict) tmp._serializers = self._serializers tmp.__deserializers = self.__deserializers return tmp
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
def get_path(): ''' Returns a list of items in the SYSTEM path CLI Example: .. code-block:: bash salt '*' win_path.get_path ''' ret = salt.utils.stringutils.to_unicode( __utils__['reg.read_value']( 'HKEY_LOCAL_MACHINE', 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', 'PATH')['vdata'] ).split(';') # Trim ending backslash return list(map(_normalize_dir, ret))
Returns a list of items in the SYSTEM path CLI Example: .. code-block:: bash salt '*' win_path.get_path
def get_library_state_copy_instance(self, lib_os_path): """ A method to get a state copy of the library specified via the lib_os_path. :param lib_os_path: the location of the library to get a copy for :return: """ # originally libraries were called like this; DO NOT DELETE; interesting for performance tests # state_machine = storage.load_state_machine_from_path(lib_os_path) # return state_machine.version, state_machine.root_state # TODO observe changes on file system and update data if lib_os_path in self._loaded_libraries: # this list can also be taken to open library state machines TODO -> implement it -> because faster state_machine = self._loaded_libraries[lib_os_path] # logger.info("Take copy of {0}".format(lib_os_path)) # as long as the a library state root state is never edited so the state first has to be copied here state_copy = copy.deepcopy(state_machine.root_state) return state_machine.version, state_copy else: state_machine = storage.load_state_machine_from_path(lib_os_path) self._loaded_libraries[lib_os_path] = state_machine if config.global_config.get_config_value("NO_PROGRAMMATIC_CHANGE_OF_LIBRARY_STATES_PERFORMED", False): return state_machine.version, state_machine.root_state else: state_copy = copy.deepcopy(state_machine.root_state) return state_machine.version, state_copy
A method to get a state copy of the library specified via the lib_os_path. :param lib_os_path: the location of the library to get a copy for :return:
def get_resource(url): """ Issue a GET request to R25 with the given url and return a response as an etree element. """ response = R25_DAO().getURL(url, {"Accept": "text/xml"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) tree = etree.fromstring(response.data.strip()) # XHTML response is an error response xhtml = tree.xpath("//xhtml:html", namespaces=nsmap) if len(xhtml): raise DataFailureException(url, 500, response.data) return tree
Issue a GET request to R25 with the given url and return a response as an etree element.
def flush(**kwargs): """Flush the specified names from the specified databases. This can be highly destructive as it destroys all data. """ expression = lambda target, table: target.execute(table.delete()) test = lambda target, table: not table.exists(target) op(expression, reversed(metadata.sorted_tables), test=test, primary='flush', secondary='flush', **kwargs)
Flush the specified names from the specified databases. This can be highly destructive as it destroys all data.
def prior(self): """ Model prior for particular model. Product of eclipse probability (``self.prob``), the fraction of scenario that is allowed by the various constraints (``self.selectfrac``), and all additional factors in ``self.priorfactors``. """ prior = self.prob * self.selectfrac for f in self.priorfactors: prior *= self.priorfactors[f] return prior
Model prior for particular model. Product of eclipse probability (``self.prob``), the fraction of scenario that is allowed by the various constraints (``self.selectfrac``), and all additional factors in ``self.priorfactors``.
def _query_select_options(self, query, select_columns=None): """ Add select load options to query. The goal is to only SQL select what is requested :param query: SQLAlchemy Query obj :param select_columns: (list) of columns :return: SQLAlchemy Query obj """ if select_columns: _load_options = list() for column in select_columns: if "." in column: model_relation = self.get_related_model(column.split(".")[0]) if not self.is_model_already_joinded(query, model_relation): query = query.join(model_relation) _load_options.append( Load(model_relation).load_only(column.split(".")[1]) ) else: if not self.is_relation(column) and not hasattr( getattr(self.obj, column), "__call__" ): _load_options.append(Load(self.obj).load_only(column)) else: _load_options.append(Load(self.obj)) query = query.options(*tuple(_load_options)) return query
Add select load options to query. The goal is to only SQL select what is requested :param query: SQLAlchemy Query obj :param select_columns: (list) of columns :return: SQLAlchemy Query obj
def rfc2822_datetime(s): """ Parses an RFC 2822 date string and returns a UTC datetime object, or the string if parsing failed. :param s: RFC 2822-formatted string date :return: datetime or str """ date_tuple = parsedate(s) if date_tuple is None: return None return datetime.datetime(*date_tuple[:6]).replace(tzinfo=pytz.utc)
Parses an RFC 2822 date string and returns a UTC datetime object, or the string if parsing failed. :param s: RFC 2822-formatted string date :return: datetime or str
def write_roi(self, outfile=None, save_model_map=False, **kwargs): """Write current state of the analysis to a file. This method writes an XML model definition, a ROI dictionary, and a FITS source catalog file. A previously saved analysis state can be reloaded from the ROI dictionary file with the `~fermipy.gtanalysis.GTAnalysis.load_roi` method. Parameters ---------- outfile : str String prefix of the output files. The extension of this string will be stripped when generating the XML, YAML and npy filenames. make_plots : bool Generate diagnostic plots. save_model_map : bool Save the current counts model to a FITS file. """ # extract the results in a convenient format make_plots = kwargs.get('make_plots', False) save_weight_map = kwargs.get('save_weight_map', False) if outfile is None: pathprefix = os.path.join(self.config['fileio']['workdir'], 'results') elif not os.path.isabs(outfile): pathprefix = os.path.join(self.config['fileio']['workdir'], outfile) else: pathprefix = outfile pathprefix = utils.strip_suffix(pathprefix, ['fits', 'yaml', 'npy']) # pathprefix, ext = os.path.splitext(pathprefix) prefix = os.path.basename(pathprefix) xmlfile = pathprefix + '.xml' fitsfile = pathprefix + '.fits' npyfile = pathprefix + '.npy' self.write_xml(xmlfile) self.write_fits(fitsfile) if not self.config['gtlike']['use_external_srcmap']: for c in self.components: c.like.logLike.saveSourceMaps(str(c.files['srcmap'])) if save_model_map: self.write_model_map(prefix) if save_weight_map: self.write_weight_map(prefix) o = {} o['roi'] = copy.deepcopy(self._roi_data) o['config'] = copy.deepcopy(self.config) o['version'] = fermipy.__version__ o['stversion'] = fermipy.get_st_version() o['sources'] = {} for s in self.roi.sources: o['sources'][s.name] = copy.deepcopy(s.data) for i, c in enumerate(self.components): o['roi']['components'][i][ 'src_expscale'] = copy.deepcopy(c.src_expscale) self.logger.info('Writing %s...', npyfile) np.save(npyfile, o) if make_plots: self.make_plots(prefix, None, **kwargs.get('plotting', {}))
Write current state of the analysis to a file. This method writes an XML model definition, a ROI dictionary, and a FITS source catalog file. A previously saved analysis state can be reloaded from the ROI dictionary file with the `~fermipy.gtanalysis.GTAnalysis.load_roi` method. Parameters ---------- outfile : str String prefix of the output files. The extension of this string will be stripped when generating the XML, YAML and npy filenames. make_plots : bool Generate diagnostic plots. save_model_map : bool Save the current counts model to a FITS file.
def _parse_single_response(cls, response_data): """de-serialize a JSON-RPC Response/error :Returns: | [result, id] for Responses :Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC """ if not isinstance(response_data, dict): raise errors.RPCInvalidRequest("No valid RPC-package.") if "id" not in response_data: raise errors.RPCInvalidRequest("""Invalid Response, "id" missing.""") request_id = response_data['id'] if "jsonrpc" not in response_data: raise errors.RPCInvalidRequest("""Invalid Response, "jsonrpc" missing.""", request_id) if not isinstance(response_data["jsonrpc"], (str, unicode)): raise errors.RPCInvalidRequest("""Invalid Response, "jsonrpc" must be a string.""") if response_data["jsonrpc"] != "2.0": raise errors.RPCInvalidRequest("""Invalid jsonrpc version.""", request_id) error = response_data.get('error', None) result = response_data.get('result', None) if error and result: raise errors.RPCInvalidRequest("""Invalid Response, only "result" OR "error" allowed.""", request_id) if error: if not isinstance(error, dict): raise errors.RPCInvalidRequest("Invalid Response, invalid error-object.", request_id) if not ("code" in error and "message" in error): raise errors.RPCInvalidRequest("Invalid Response, invalid error-object.", request_id) error_data = error.get("data", None) if error['code'] in errors.ERROR_CODE_CLASS_MAP: raise errors.ERROR_CODE_CLASS_MAP[error['code']](error_data, request_id) else: error_object = errors.RPCFault(error_data, request_id) error_object.error_code = error['code'] error_object.message = error['message'] raise error_object return result, request_id
de-serialize a JSON-RPC Response/error :Returns: | [result, id] for Responses :Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
def parse_roles(self, fetched_role, params): """ Parse a single IAM role and fetch additional data """ role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, [ 'CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
Parse a single IAM role and fetch additional data
def set_interface(self, interface): """Add update toolbar callback to the interface""" self.interface = interface self.interface.callbacks.update_toolbar = self._update self._update()
Add update toolbar callback to the interface
def topk(self, column_name, k=10, reverse=False): """ Get top k rows according to the given column. Result is according to and sorted by `column_name` in the given order (default is descending). When `k` is small, `topk` is more efficient than `sort`. Parameters ---------- column_name : string The column to sort on k : int, optional The number of rows to return reverse : bool, optional If True, return the top k rows in ascending order, otherwise, in descending order. Returns ------- out : SFrame an SFrame containing the top k rows sorted by column_name. See Also -------- sort Examples -------- >>> sf = turicreate.SFrame({'id': range(1000)}) >>> sf['value'] = -sf['id'] >>> sf.topk('id', k=3) +--------+--------+ | id | value | +--------+--------+ | 999 | -999 | | 998 | -998 | | 997 | -997 | +--------+--------+ [3 rows x 2 columns] >>> sf.topk('value', k=3) +--------+--------+ | id | value | +--------+--------+ | 1 | -1 | | 2 | -2 | | 3 | -3 | +--------+--------+ [3 rows x 2 columns] """ if type(column_name) is not str: raise TypeError("column_name must be a string") sf = self[self[column_name].is_topk(k, reverse)] return sf.sort(column_name, ascending=reverse)
Get top k rows according to the given column. Result is according to and sorted by `column_name` in the given order (default is descending). When `k` is small, `topk` is more efficient than `sort`. Parameters ---------- column_name : string The column to sort on k : int, optional The number of rows to return reverse : bool, optional If True, return the top k rows in ascending order, otherwise, in descending order. Returns ------- out : SFrame an SFrame containing the top k rows sorted by column_name. See Also -------- sort Examples -------- >>> sf = turicreate.SFrame({'id': range(1000)}) >>> sf['value'] = -sf['id'] >>> sf.topk('id', k=3) +--------+--------+ | id | value | +--------+--------+ | 999 | -999 | | 998 | -998 | | 997 | -997 | +--------+--------+ [3 rows x 2 columns] >>> sf.topk('value', k=3) +--------+--------+ | id | value | +--------+--------+ | 1 | -1 | | 2 | -2 | | 3 | -3 | +--------+--------+ [3 rows x 2 columns]
def prepare_namespace(self, func): """ Prepares the function to be run after deserializing it. Re-associates any previously bound variables and modules from the closure Returns: callable: ready-to-call function """ if self.is_imethod: to_run = getattr(self.obj, self.imethod_name) else: to_run = func for varname, modulename in self.global_modules.items(): to_run.__globals__[varname] = __import__(modulename) if self.global_closure: to_run.__globals__.update(self.global_closure) if self.global_functions: to_run.__globals__.update(self.global_functions) return to_run
Prepares the function to be run after deserializing it. Re-associates any previously bound variables and modules from the closure Returns: callable: ready-to-call function
def get_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> QuerySet: """ Get valid user attempts that match the given request and credentials. """ attempts = filter_user_attempts(request, credentials) if settings.AXES_COOLOFF_TIME is None: log.debug('AXES: Getting all access attempts from database because no AXES_COOLOFF_TIME is configured') return attempts threshold = get_cool_off_threshold(request.axes_attempt_time) log.debug('AXES: Getting access attempts that are newer than %s', threshold) return attempts.filter(attempt_time__gte=threshold)
Get valid user attempts that match the given request and credentials.
def get_pressure(self): """ Returns the pressure in Millibars """ self._init_pressure() # Ensure pressure sensor is initialised pressure = 0 data = self._pressure.pressureRead() if (data[0]): # Pressure valid pressure = data[1] return pressure
Returns the pressure in Millibars
def refresh_events(self): """ Retrieve the next N events from Google. """ now = datetime.datetime.now(tz=pytz.UTC) try: now, later = self.get_timerange_formatted(now) events_result = self.service.events().list( calendarId='primary', timeMin=now, timeMax=later, maxResults=10, singleEvents=True, orderBy='startTime', timeZone='utc' ).execute() self.events.clear() for event in events_result.get('items', []): self.events.append(GoogleCalendarEvent(event)) except HttpError as e: if e.resp.status in (500, 503): self.logger.warn("GoogleCalendar received %s while retrieving events" % e.resp.status) else: raise
Retrieve the next N events from Google.
def urlunsplit (urlparts): """Same as urlparse.urlunsplit but with extra UNC path handling for Windows OS.""" res = urlparse.urlunsplit(urlparts) if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]: # UNC paths must have 4 slashes: 'file:////server/path' # Depending on the path in urlparts[2], urlparse.urlunsplit() # left only two or three slashes. This is fixed below repl = 'file://' if urlparts[2].startswith('//') else 'file:/' res = res.replace('file:', repl) return res
Same as urlparse.urlunsplit but with extra UNC path handling for Windows OS.
def register(self, name, func): """ Register a new callback.\ When the name/id is not found\ a new hook is created under its name,\ meaning the hook is usually created by\ the first registered callback :param str name: Hook name :param callable func: A func reference (callback) """ try: templatehook = self._registry[name] except KeyError: templatehook = self._register(name) templatehook.register(func)
Register a new callback.\ When the name/id is not found\ a new hook is created under its name,\ meaning the hook is usually created by\ the first registered callback :param str name: Hook name :param callable func: A func reference (callback)
def get(cls, id): """ Get the pool with id 'id'. """ # cached? if CACHE: if id in _cache['Pool']: log.debug('cache hit for pool %d' % id) return _cache['Pool'][id] log.debug('cache miss for pool %d' % id) try: pool = Pool.list({'id': id})[0] except (IndexError, KeyError): raise NipapNonExistentError('no pool with ID ' + str(id) + ' found') _cache['Pool'][id] = pool return pool
Get the pool with id 'id'.
def _flush_tile_queue_blits(self, surface): """ Blit the queued tiles and block until the tile queue is empty for pygame 1.9.4 + """ tw, th = self.data.tile_size ltw = self._tile_view.left * tw tth = self._tile_view.top * th self.data.prepare_tiles(self._tile_view) blit_list = [(image, (x * tw - ltw, y * th - tth)) for x, y, l, image in self._tile_queue] surface.blits(blit_list)
Blit the queued tiles and block until the tile queue is empty for pygame 1.9.4 +
def update_placeholders(self, format_string, placeholders): """ Update a format string renaming placeholders. """ # Tokenize the format string and process them output = [] for token in self.tokens(format_string): if token.group("key") in placeholders: output.append( "{%s%s}" % (placeholders[token.group("key")], token.group("format")) ) continue elif token.group("command"): # update any placeholders used in commands commands = parse_qsl(token.group("command"), keep_blank_values=True) # placeholders only used in `if` if "if" in [x[0] for x in commands]: items = [] for key, value in commands: if key == "if": # we have to rebuild from the parts we have condition = Condition(value) variable = condition.variable if variable in placeholders: variable = placeholders[variable] # negation via `!` not_ = "!" if not condition.default else "" condition_ = condition.condition or "" # if there is no condition then there is no # value if condition_: value_ = condition.value else: value_ = "" value = "{}{}{}{}".format( not_, variable, condition_, value_ ) if value: items.append("{}={}".format(key, value)) else: items.append(key) # we cannot use urlencode because it will escape things # like `!` output.append(r"\?{} ".format("&".join(items))) continue value = token.group(0) output.append(value) return u"".join(output)
Update a format string renaming placeholders.
def diff_with_models(self): """ Return a dict stating the differences between current state of models and the configuration itself. TODO: Detect fields that are in conf, but not in models """ missing_from_conf = defaultdict(set) for model in get_models(): db_tables_and_columns = get_db_tables_and_columns_of_model(model) for (table_name, columns) in db_tables_and_columns.items(): model_strategy = self.strategy.get(table_name) for column in columns: if not model_strategy or column not in model_strategy: missing_from_conf[table_name].add(column) return missing_from_conf
Return a dict stating the differences between current state of models and the configuration itself. TODO: Detect fields that are in conf, but not in models
def segs(self, word): """Returns a list of segments from a word Args: word (unicode): input word as Unicode IPA string Returns: list: list of strings corresponding to segments found in `word` """ return [m.group('all') for m in self.seg_regex.finditer(word)]
Returns a list of segments from a word Args: word (unicode): input word as Unicode IPA string Returns: list: list of strings corresponding to segments found in `word`
def get(self, sid): """ Constructs a TaskChannelContext :param sid: The sid :returns: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext :rtype: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext """ return TaskChannelContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid, )
Constructs a TaskChannelContext :param sid: The sid :returns: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext :rtype: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext
def filter_unmanaged_physnets(query): """Filter ports managed by other ML2 plugins """ config = cfg.CONF.ml2_arista managed_physnets = config['managed_physnets'] # Filter out ports bound to segments on physnets that we're not # managing segment_model = segment_models.NetworkSegment if managed_physnets: query = (query .join_if_necessary(segment_model) .filter(segment_model.physical_network.in_( managed_physnets))) return query
Filter ports managed by other ML2 plugins
def _query_iterator(self, result, chunksize, columns, coerce_float=True, parse_dates=None): """Return generator through chunked result set.""" while True: data = result.fetchmany(chunksize) if not data: break else: self.frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) yield self.frame
Return generator through chunked result set.
def get_outcome(self, outcome): """ Returns the details of the outcome with the given id. :calls: `GET /api/v1/outcomes/:id \ <https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_ :param outcome: The outcome object or ID to return. :type outcome: :class:`canvasapi.outcome.Outcome` or int :returns: An Outcome object. :rtype: :class:`canvasapi.outcome.Outcome` """ from canvasapi.outcome import Outcome outcome_id = obj_or_id(outcome, "outcome", (Outcome,)) response = self.__requester.request( 'GET', 'outcomes/{}'.format(outcome_id) ) return Outcome(self.__requester, response.json())
Returns the details of the outcome with the given id. :calls: `GET /api/v1/outcomes/:id \ <https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_ :param outcome: The outcome object or ID to return. :type outcome: :class:`canvasapi.outcome.Outcome` or int :returns: An Outcome object. :rtype: :class:`canvasapi.outcome.Outcome`
def get(self, *args, **kwargs): """Handle reading of the model :param args: :param kwargs: """ # Create the model and fetch its data self.model = self.get_model(kwargs.get('id')) result = yield self.model.fetch() # If model is not found, return 404 if not result: LOGGER.debug('Not found') self.not_found() return # Stub to check for read permissions if not self.has_read_permission(): LOGGER.debug('Permission denied') self.permission_denied() return # Add the headers and return the content as JSON self.add_headers() self.finish(self.model_json())
Handle reading of the model :param args: :param kwargs:
def cli(env, volume_id, schedule_type): """Disables snapshots on the specified schedule for a given volume""" if (schedule_type not in ['INTERVAL', 'HOURLY', 'DAILY', 'WEEKLY']): raise exceptions.CLIAbort( '--schedule_type must be INTERVAL, HOURLY, DAILY, or WEEKLY') file_manager = SoftLayer.FileStorageManager(env.client) disabled = file_manager.disable_snapshots(volume_id, schedule_type) if disabled: click.echo('%s snapshots have been disabled for volume %s' % (schedule_type, volume_id))
Disables snapshots on the specified schedule for a given volume
def floor(self): """Round `x` and `y` down to integers.""" return Point(int(math.floor(self.x)), int(math.floor(self.y)))
Round `x` and `y` down to integers.
def _parse_transactions(self, response): """ This method parses the CSV output in `get_transactions` to generate a usable list of transactions that use native python data types """ transactions = list() if response: f = StringIO(response) reader = csv.DictReader(f) for line in reader: txn = {} txn['date'] = datetime.strptime(line['Date'], '%d/%m/%Y %H:%M:%S') txn['description'] = line['Description'] txn['amount'] = float(line['Amount'].replace(',', '')) txn['reference'] = line['Transaction number'] txn['sender'] = line['???transfer.fromOwner???'] txn['recipient'] = line['???transfer.toOwner???'] txn['currency'] = 'TSH' txn['comment'] = line['Transaction type'] transactions.append(txn) return transactions
This method parses the CSV output in `get_transactions` to generate a usable list of transactions that use native python data types
def add_word(self, word, value): """ Adds word and associated value. If word already exists, its value is replaced. """ if not word: return node = self.root for c in word: try: node = node.children[c] except KeyError: n = TrieNode(c) node.children[c] = n node = n node.output = value
Adds word and associated value. If word already exists, its value is replaced.
def ConsultarTiposContingencia(self, sep="||"): "Obtener el código y descripción para cada tipo de contingencia que puede reportar" ret = self.client.consultarTiposContingencia( authRequest={ 'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit, }, )['consultarTiposContingenciaReturn'] self.__analizar_errores(ret) array = ret.get('arrayTiposContingencia', []) lista = [it['codigoDescripcion'] for it in array] return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
Obtener el código y descripción para cada tipo de contingencia que puede reportar
def QA_fetch_get_option_50etf_contract_time_to_market(): ''' #🛠todo 获取期权合约的上市日期 ? 暂时没有。 :return: list Series ''' result = QA_fetch_get_option_list('tdx') # pprint.pprint(result) # category market code name desc code ''' fix here : See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy result['meaningful_name'] = None C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead ''' # df = pd.DataFrame() rows = [] result['meaningful_name'] = None for idx in result.index: # pprint.pprint((idx)) strCategory = result.loc[idx, "category"] strMarket = result.loc[idx, "market"] strCode = result.loc[idx, "code"] # 10001215 strName = result.loc[idx, 'name'] # 510050C9M03200 strDesc = result.loc[idx, 'desc'] # 10001215 if strName.startswith("510050"): # print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, ) if strName.startswith("510050C"): putcall = '50ETF,认购期权' elif strName.startswith("510050P"): putcall = '50ETF,认沽期权' else: putcall = "Unkown code name : " + strName expireMonth = strName[7:8] if expireMonth == 'A': expireMonth = "10月" elif expireMonth == 'B': expireMonth = "11月" elif expireMonth == 'C': expireMonth = "12月" else: expireMonth = expireMonth + '月' # 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推; # fix here : M ?? if strName[8:9] == "M": adjust = "未调整" elif strName[8:9] == 'A': adjust = " 第1次调整" elif strName[8:9] == 'B': adjust = " 第2调整" elif strName[8:9] == 'C': adjust = " 第3次调整" elif strName[8:9] == 'D': adjust = " 第4次调整" elif strName[8:9] == 'E': adjust = " 第5次调整" elif strName[8:9] == 'F': adjust = " 第6次调整" elif strName[8:9] == 'G': adjust = " 第7次调整" elif strName[8:9] == 'H': adjust = " 第8次调整" elif strName[8:9] == 'I': adjust = " 第9次调整" elif strName[8:9] == 'J': adjust = " 第10次调整" else: adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9] executePrice = strName[9:] result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % ( putcall, expireMonth, adjust, executePrice) row = result.loc[idx] rows.append(row) return rows
#🛠todo 获取期权合约的上市日期 ? 暂时没有。 :return: list Series
def newick_from_string(self): "Reads a newick string in the New Hampshire format." # split on parentheses to traverse hierarchical tree structure for chunk in self.data.split("(")[1:]: # add child to make this node a parent. self.current_parent = ( self.root if self.current_parent is None else self.current_parent.add_child() ) # get all parenth endings from this parenth start subchunks = [ch.strip() for ch in chunk.split(",")] if subchunks[-1] != '' and not subchunks[-1].endswith(';'): raise NewickError( 'Broken newick structure at: {}'.format(chunk)) # Every closing parenthesis will close a node and go up one level. for idx, leaf in enumerate(subchunks): if leaf.strip() == '' and idx == len(subchunks) - 1: continue closing_nodes = leaf.split(")") # parse features and apply to the node object self.apply_node_data(closing_nodes[0], "leaf") # next contain closing nodes and data about the internal nodes. if len(closing_nodes) > 1: for closing_internal in closing_nodes[1:]: closing_internal = closing_internal.rstrip(";") # read internal node data and go up one level self.apply_node_data(closing_internal, "internal") self.current_parent = self.current_parent.up return self.root
Reads a newick string in the New Hampshire format.
def get_user_info(self): """ Get information on the authenticated user. :rtype: .UserInfo """ response = self.get_proto(path='/user') message = yamcsManagement_pb2.UserInfo() message.ParseFromString(response.content) return UserInfo(message)
Get information on the authenticated user. :rtype: .UserInfo
def msg_curse(self, args=None, max_width=None): """Return the list to display in the UI.""" # Init the return message ret = [] # Only process if stats exist and plugin not disable if not self.stats or self.args.percpu or self.is_disable(): return ret # Build the string message # If user stat is not here, display only idle / total CPU usage (for # exemple on Windows OS) idle_tag = 'user' not in self.stats # Header msg = '{}'.format('CPU') ret.append(self.curse_add_line(msg, "TITLE")) trend_user = self.get_trend('user') trend_system = self.get_trend('system') if trend_user is None or trend_user is None: trend_cpu = None else: trend_cpu = trend_user + trend_system msg = ' {:4}'.format(self.trend_msg(trend_cpu)) ret.append(self.curse_add_line(msg)) # Total CPU usage msg = '{:5.1f}%'.format(self.stats['total']) if idle_tag: ret.append(self.curse_add_line( msg, self.get_views(key='total', option='decoration'))) else: ret.append(self.curse_add_line(msg)) # Nice CPU if 'nice' in self.stats: msg = ' {:8}'.format('nice:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional'))) msg = '{:5.1f}%'.format(self.stats['nice']) ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional'))) # ctx_switches if 'ctx_switches' in self.stats: msg = ' {:8}'.format('ctx_sw:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='ctx_switches', option='optional'))) msg = '{:>5}'.format(self.auto_unit(int(self.stats['ctx_switches'] // self.stats['time_since_update']), min_symbol='K')) ret.append(self.curse_add_line( msg, self.get_views(key='ctx_switches', option='decoration'), optional=self.get_views(key='ctx_switches', option='optional'))) # New line ret.append(self.curse_new_line()) # User CPU if 'user' in self.stats: msg = '{:8}'.format('user:') ret.append(self.curse_add_line(msg)) msg = '{:5.1f}%'.format(self.stats['user']) ret.append(self.curse_add_line( msg, self.get_views(key='user', option='decoration'))) elif 'idle' in self.stats: msg = '{:8}'.format('idle:') ret.append(self.curse_add_line(msg)) msg = '{:5.1f}%'.format(self.stats['idle']) ret.append(self.curse_add_line(msg)) # IRQ CPU if 'irq' in self.stats: msg = ' {:8}'.format('irq:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional'))) msg = '{:5.1f}%'.format(self.stats['irq']) ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional'))) # interrupts if 'interrupts' in self.stats: msg = ' {:8}'.format('inter:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional'))) msg = '{:>5}'.format(int(self.stats['interrupts'] // self.stats['time_since_update'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional'))) # New line ret.append(self.curse_new_line()) # System CPU if 'system' in self.stats and not idle_tag: msg = '{:8}'.format('system:') ret.append(self.curse_add_line(msg)) msg = '{:5.1f}%'.format(self.stats['system']) ret.append(self.curse_add_line( msg, self.get_views(key='system', option='decoration'))) else: msg = '{:8}'.format('core:') ret.append(self.curse_add_line(msg)) msg = '{:>6}'.format(self.stats['nb_log_core']) ret.append(self.curse_add_line(msg)) # IOWait CPU if 'iowait' in self.stats: msg = ' {:8}'.format('iowait:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='iowait', option='optional'))) msg = '{:5.1f}%'.format(self.stats['iowait']) ret.append(self.curse_add_line( msg, self.get_views(key='iowait', option='decoration'), optional=self.get_views(key='iowait', option='optional'))) # soft_interrupts if 'soft_interrupts' in self.stats: msg = ' {:8}'.format('sw_int:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional'))) msg = '{:>5}'.format(int(self.stats['soft_interrupts'] // self.stats['time_since_update'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional'))) # New line ret.append(self.curse_new_line()) # Idle CPU if 'idle' in self.stats and not idle_tag: msg = '{:8}'.format('idle:') ret.append(self.curse_add_line(msg)) msg = '{:5.1f}%'.format(self.stats['idle']) ret.append(self.curse_add_line(msg)) # Steal CPU usage if 'steal' in self.stats: msg = ' {:8}'.format('steal:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='steal', option='optional'))) msg = '{:5.1f}%'.format(self.stats['steal']) ret.append(self.curse_add_line( msg, self.get_views(key='steal', option='decoration'), optional=self.get_views(key='steal', option='optional'))) # syscalls # syscalls: number of system calls since boot. Always set to 0 on Linux. (do not display) if 'syscalls' in self.stats and not LINUX: msg = ' {:8}'.format('syscal:') ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional'))) msg = '{:>5}'.format(int(self.stats['syscalls'] // self.stats['time_since_update'])) ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional'))) # Return the message with decoration return ret
Return the list to display in the UI.
def get_reader_classes(parent=Reader): """Get all childless the descendants of a parent class, recursively.""" children = parent.__subclasses__() descendants = children[:] for child in children: grandchildren = get_reader_classes(child) if grandchildren: descendants.remove(child) descendants.extend(grandchildren) return descendants
Get all childless the descendants of a parent class, recursively.
def str_max_bit_rate(self): """ Returns a human readable maximun upstream- and downstream-rate of the given connection. The rate is given in bits/sec. """ upstream, downstream = self.max_bit_rate return ( fritztools.format_rate(upstream, unit='bits'), fritztools.format_rate(downstream, unit ='bits') )
Returns a human readable maximun upstream- and downstream-rate of the given connection. The rate is given in bits/sec.
def solveConsIndShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac, BoroCnstArt,aXtraGrid,vFuncBool,CubicBool): ''' Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks). Can generate a value function if requested; consumption function can be linear or cubic splines. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rfree : float Risk free interest factor on end-of-period assets. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal mar- ginal value function vPPfunc. ''' # Use the basic solver if user doesn't want cubic splines or the value function if (not CubicBool) and (not vFuncBool): solver = ConsIndShockSolverBasic(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA, Rfree,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool, CubicBool) else: # Use the "advanced" solver if either is requested solver = ConsIndShockSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree, PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool) solver.prepareToSolve() # Do some preparatory work solution_now = solver.solve() # Solve the period return solution_now
Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks). Can generate a value function if requested; consumption function can be linear or cubic splines. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rfree : float Risk free interest factor on end-of-period assets. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc and marginal mar- ginal value function vPPfunc.
def _save_training_state(self, train_iter: data_io.BaseParallelSampleIter): """ Saves current training state. """ # Create temporary directory for storing the state of the optimization process training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DIRNAME) if not os.path.exists(training_state_dirname): os.mkdir(training_state_dirname) # (1) Parameters: link current file params_base_fname = C.PARAMS_NAME % self.state.checkpoint params_file = os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME) if os.path.exists(params_file): os.unlink(params_file) os.symlink(os.path.join("..", params_base_fname), params_file) # (2) Optimizer states opt_state_fname = os.path.join(training_state_dirname, C.OPT_STATES_LAST) self.model.save_optimizer_states(opt_state_fname) # (3) Data iterator train_iter.save_state(os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME)) # (4) Random generators # RNG states: python's random and np.random provide functions for # storing the state, mxnet does not, but inside our code mxnet's RNG is # not used AFAIK with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME), "wb") as fp: pickle.dump(random.getstate(), fp) pickle.dump(np.random.get_state(), fp) # (5) Training state self.state.save(os.path.join(training_state_dirname, C.TRAINING_STATE_NAME)) # (6) Learning rate scheduler with open(os.path.join(training_state_dirname, C.SCHEDULER_STATE_NAME), "wb") as fp: pickle.dump(self.optimizer_config.lr_scheduler, fp) # First we rename the existing directory to minimize the risk of state # loss if the process is aborted during deletion (which will be slower # than directory renaming) delete_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DELETENAME) if os.path.exists(self.training_state_dirname): os.rename(self.training_state_dirname, delete_training_state_dirname) os.rename(training_state_dirname, self.training_state_dirname) if os.path.exists(delete_training_state_dirname): shutil.rmtree(delete_training_state_dirname)
Saves current training state.
def should_reuse_driver(self, scope, test_passed, context=None): """Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused """ reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver') reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session') restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or self.config.getboolean_optional('Driver', 'restart_driver_fail')) if context and scope == 'function': reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags') and context.reuse_driver_from_tags) return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session')) and (test_passed or not restart_driver_after_failure))
Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused
def _fake_designspace(self, ufos): """Build a fake designspace with the given UFOs as sources, so that all builder functions can rely on the presence of a designspace. """ designspace = designspaceLib.DesignSpaceDocument() ufo_to_location = defaultdict(dict) # Make weight and width axis if relevant for info_key, axis_def in zip( ("openTypeOS2WeightClass", "openTypeOS2WidthClass"), (WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF), ): axis = designspace.newAxisDescriptor() axis.tag = axis_def.tag axis.name = axis_def.name mapping = [] for ufo in ufos: user_loc = getattr(ufo.info, info_key) if user_loc is not None: design_loc = class_to_value(axis_def.tag, user_loc) mapping.append((user_loc, design_loc)) ufo_to_location[ufo][axis_def.name] = design_loc mapping = sorted(set(mapping)) if len(mapping) > 1: axis.map = mapping axis.minimum = min([user_loc for user_loc, _ in mapping]) axis.maximum = max([user_loc for user_loc, _ in mapping]) axis.default = min( axis.maximum, max(axis.minimum, axis_def.default_user_loc) ) designspace.addAxis(axis) for ufo in ufos: source = designspace.newSourceDescriptor() source.font = ufo source.familyName = ufo.info.familyName source.styleName = ufo.info.styleName # source.name = '%s %s' % (source.familyName, source.styleName) source.path = ufo.path source.location = ufo_to_location[ufo] designspace.addSource(source) # UFO-level skip list lib keys are usually ignored, except when we don't have a # Designspace file to start from. If they exist in the UFOs, promote them to a # Designspace-level lib key. However, to avoid accidents, expect the list to # exist in none or be the same in all UFOs. if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos): skip_export_glyphs = { frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos } if len(skip_export_glyphs) == 1: designspace.lib["public.skipExportGlyphs"] = sorted( next(iter(skip_export_glyphs)) ) else: raise ValueError( "The `public.skipExportGlyphs` list of all UFOs must either not " "exist or be the same in every UFO." ) return designspace
Build a fake designspace with the given UFOs as sources, so that all builder functions can rely on the presence of a designspace.
def ack(self): """Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.ack(self._frame) self._state = "ACK"
Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected.
def predict(self, X): """ Predict the less costly class for a given observation Parameters ---------- X : array (n_samples, n_features) Data for which to predict minimum cost label. Returns ------- y_hat : array (n_samples,) Label with expected minimum cost for each observation. """ X = _check_2d_inp(X) return np.argmax(self.decision_function(X), axis=1)
Predict the less costly class for a given observation Parameters ---------- X : array (n_samples, n_features) Data for which to predict minimum cost label. Returns ------- y_hat : array (n_samples,) Label with expected minimum cost for each observation.
def file_or_folder(path): """ Returns a File or Folder object that would represent the given path. """ target = unicode(path) return Folder(target) if os.path.isdir(target) else File(target)
Returns a File or Folder object that would represent the given path.