code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def detach_framebuffer(self, screen_id, id_p): if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") self._call("detachFramebuffer", in_p=[screen_id, id_p])
Removes the graphics updates target for a screen. in screen_id of type int in id_p of type str
def _get_var_name(self, name, fresh=False): if name not in self._var_name_mappers: self._var_name_mappers[name] = VariableNamer(name) if fresh: var_name = self._var_name_mappers[name].get_next() else: var_name = self._var_name_mappers[name].get_current() return var_name
Get variable name.
def is_regex_type(type_): return ( callable(type_) and getattr(type_, "__name__", None) == REGEX_TYPE_NAME and hasattr(type_, "__supertype__") and is_compiled_pattern(type_.__supertype__) )
Checks if the given type is a regex type. :param type_: The type to check :return: True if the type is a regex type, otherwise False :rtype: bool
def get_properties_of_managed_object(mo_ref, properties): service_instance = get_service_instance_from_managed_object(mo_ref) log.trace('Retrieving name of %s', type(mo_ref).__name__) try: items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=['name'], local_properties=True) mo_name = items[0]['name'] except vmodl.query.InvalidProperty: mo_name = '<unnamed>' log.trace('Retrieving properties \'%s\' of %s \'%s\'', properties, type(mo_ref).__name__, mo_name) items = get_mors_with_properties(service_instance, type(mo_ref), container_ref=mo_ref, property_list=properties, local_properties=True) if not items: raise salt.exceptions.VMwareApiError( 'Properties of managed object \'{0}\' weren\'t ' 'retrieved'.format(mo_name)) return items[0]
Returns specific properties of a managed object, retrieved in an optimally. mo_ref The managed object reference. properties List of properties of the managed object to retrieve.
def post_intent(self, intent_json): endpoint = self._intent_uri() return self._post(endpoint, data=intent_json)
Sends post request to create a new intent
def clean(self): self.feed() if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\g<1>', self.cleaned_html) self._remove_pre_formatting() return self.cleaned_html
Goes through the txt input and cleans up any problematic HTML.
def set_add(parent, idx, value): lst = get_child(parent, idx) if value not in lst: lst.append(value)
Add an item to a list if it doesn't exist.
def set_offset_and_sequence_number(self, event_data): if not event_data: raise Exception(event_data) self.offset = event_data.offset.value self.sequence_number = event_data.sequence_number
Updates offset based on event. :param event_data: A received EventData with valid offset and sequenceNumber. :type event_data: ~azure.eventhub.common.EventData
def __initialize_languages_model(self): languages = [PYTHON_LANGUAGE, LOGGING_LANGUAGE, TEXT_LANGUAGE] existingGrammarFiles = [os.path.normpath(language.file) for language in languages] for directory in RuntimeGlobals.resources_directories: for file in foundations.walkers.files_walker(directory, ("\.{0}$".format(self.__extension),), ("\._",)): if os.path.normpath(file) in existingGrammarFiles: continue languageDescription = get_language_description(file) if not languageDescription: continue LOGGER.debug("> Adding '{0}' language to model.".format(languageDescription)) languages.append(languageDescription) self.__languages_model = LanguagesModel(self, sorted(languages, key=lambda x: (x.name))) self.__get_supported_file_types_string()
Initializes the languages Model.
def reduce_sum_square(attrs, inputs, proto_obj): square_op = symbol.square(inputs[0]) sum_op = symbol.sum(square_op, axis=attrs.get('axes'), keepdims=attrs.get('keepdims')) return sum_op, attrs, inputs
Reduce the array along a given axis by sum square value
def get_next_line(self): line = self.freq_file.readline().strip().split() if len(line) < 1: self.load_genotypes() line = self.freq_file.readline().strip().split() info_line = self.info_file.readline().strip().split() info = float(info_line[4]) exp_freq = float(info_line[3]) return line, info, exp_freq
If we reach the end of the file, we simply open the next, until we \ run out of archives to process
def _unstack_extension_series(series, level, fill_value): from pandas.core.reshape.concat import concat dummy_arr = np.arange(len(series)) result = _Unstacker(dummy_arr, series.index, level=level, fill_value=-1).get_result() out = [] values = extract_array(series, extract_numpy=False) for col, indices in result.iteritems(): out.append(Series(values.take(indices.values, allow_fill=True, fill_value=fill_value), name=col, index=result.index)) return concat(out, axis='columns', copy=False, keys=result.columns)
Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``series.values.take``. Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series.
def delete(self, ids): url = build_uri_with_ids('api/v3/vip-request/%s/', ids) return super(ApiVipRequest, self).delete(url)
Method to delete vip's by their id's :param ids: Identifiers of vip's :return: None
def compute_err_score(true_positives, n_ref, n_est): n_ref_sum = float(n_ref.sum()) if n_ref_sum == 0: warnings.warn("Reference frequencies are all empty.") return 0., 0., 0., 0. e_sub = (np.min([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum e_miss_numerator = n_ref - n_est e_miss_numerator[e_miss_numerator < 0] = 0 e_miss = e_miss_numerator.sum()/n_ref_sum e_fa_numerator = n_est - n_ref e_fa_numerator[e_fa_numerator < 0] = 0 e_fa = e_fa_numerator.sum()/n_ref_sum e_tot = (np.max([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum return e_sub, e_miss, e_fa, e_tot
Compute error score metrics. Parameters ---------- true_positives : np.ndarray Array containing the number of true positives at each time point. n_ref : np.ndarray Array containing the number of reference frequencies at each time point. n_est : np.ndarray Array containing the number of estimate frequencies at each time point. Returns ------- e_sub : float Substitution error e_miss : float Miss error e_fa : float False alarm error e_tot : float Total error
def exclude(self, target, operation, role, value): target = {"result": self.data["proxies"]["result"], "instance": self.data["proxies"]["instance"], "plugin": self.data["proxies"]["plugin"]}[target] if operation == "add": target.add_exclusion(role, value) elif operation == "remove": target.remove_exclusion(role, value) else: raise TypeError("operation must be either `add` or `remove`")
Exclude a `role` of `value` at `target` Arguments: target (str): Destination proxy model operation (str): "add" or "remove" exclusion role (str): Role to exclude value (str): Value of `role` to exclude
def WaitUntilComplete(self,poll_freq=2,timeout=None): start_time = time.time() while not self.time_completed: status = self.Status() if status == 'executing': if not self.time_executed: self.time_executed = time.time() if clc.v2.time_utils.TimeoutExpired(start_time, timeout): raise clc.RequestTimeoutException('Timeout waiting for Request: {0}'.format(self.id), status) elif status == 'succeeded': self.time_completed = time.time() elif status in ("failed", "resumed" or "unknown"): self.time_completed = time.time() raise(clc.CLCException("%s %s execution %s" % (self.context_key,self.context_val,status))) time.sleep(poll_freq)
Poll until status is completed. If status is 'notStarted' or 'executing' continue polling. If status is 'succeeded' return Else raise exception poll_freq option is in seconds
def delete_existing_policy(self, scaling_policy, server_group): self.log.info("Deleting policy %s on %s", scaling_policy['policyName'], server_group) delete_dict = { "application": self.app, "description": "Delete scaling policy", "job": [{ "policyName": scaling_policy['policyName'], "serverGroupName": server_group, "credentials": self.env, "region": self.region, "provider": "aws", "type": "deleteScalingPolicy", "user": "foremast-autoscaling-policy" }] } wait_for_task(json.dumps(delete_dict))
Given a scaling_policy and server_group, deletes the existing scaling_policy. Scaling policies need to be deleted instead of upserted for consistency. Args: scaling_policy (json): the scaling_policy json from Spinnaker that should be deleted server_group (str): the affected server_group
def to_array(self): array = super(ReplyKeyboardMarkup, self).to_array() array['keyboard'] = self._as_array(self.keyboard) if self.resize_keyboard is not None: array['resize_keyboard'] = bool(self.resize_keyboard) if self.one_time_keyboard is not None: array['one_time_keyboard'] = bool(self.one_time_keyboard) if self.selective is not None: array['selective'] = bool(self.selective) return array
Serializes this ReplyKeyboardMarkup to a dictionary. :return: dictionary representation of this object. :rtype: dict
def on_server_shutdown(self): if not self._container: return self._container.stop() self._container.remove(v=True, force=True)
Stop the container before shutting down.
def get_fixed_param_names(self) -> List[str]: args = set(self.args.keys()) | set(self.auxs.keys()) return list(args & set(self.sym.list_arguments()))
Get the fixed params of the network. :return: List of strings, names of the layers
def GetPathSegmentAndSuffix(self, base_path, path): if path is None or base_path is None or not path.startswith(base_path): return None, None path_index = len(base_path) if base_path and not base_path.endswith(self.PATH_SEPARATOR): path_index += 1 if path_index == len(path): return '', '' path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR) return path_segment, suffix
Determines the path segment and suffix of the path. None is returned if the path does not start with the base path and an empty string if the path exactly matches the base path. Args: base_path (str): base path. path (str): path. Returns: tuple[str, str]: path segment and suffix string.
def _fluent_size(self, fluents, ordering) -> Sequence[Sequence[int]]: shapes = [] for name in ordering: fluent = fluents[name] shape = self._param_types_to_shape(fluent.param_types) shapes.append(shape) return tuple(shapes)
Returns the sizes of `fluents` following the given `ordering`. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each fluent.
def application_unauthenticated(request, token, state=None, label=None): application = base.get_application(secret_token=token) if application.expires < datetime.datetime.now(): return render( template_name='kgapplications/common_expired.html', context={'application': application}, request=request) roles = {'is_applicant', 'is_authorised'} if request.user.is_authenticated: if request.user == application.applicant: url = base.get_url( request, application, roles, label) return HttpResponseRedirect(url) state_machine = base.get_state_machine(application) return state_machine.process( request, application, state, label, roles)
An somebody is trying to access an application.
def split_fasta(f, id2f): opened = {} for seq in parse_fasta(f): id = seq[0].split('>')[1].split()[0] if id not in id2f: continue fasta = id2f[id] if fasta not in opened: opened[fasta] = '%s.fa' % fasta seq[1] += '\n' with open(opened[fasta], 'a+') as f_out: f_out.write('\n'.join(seq))
split fasta file into separate fasta files based on list of scaffolds that belong to each separate file
def create_payload(entities): types = {e.etype for e in entities} if len(types) != 1: raise ValueError("Can't create payload with " + str(len(types)) + " types") all_attrs = set() for e in entities: all_attrs.update(set(e.attrs.keys())) all_attrs = list(all_attrs) header = "entity:" + entities[0].etype + "_id" payload = '\t'.join([header] + all_attrs) + '\n' for e in entities: line = e.entity_id for a in all_attrs: line += '\t' + e.attrs.get(a, "") payload += line + '\n' return payload
Create a tsv payload describing entities. A TSV payload consists of 1 header row describing entity type and attribute names. Each subsequent line is an entity_id followed by attribute values separated by the tab "\\t" character. This payload can be uploaded to the workspace via firecloud.api.upload_entities()
def get_application_configurations(self, name=None): if hasattr(self, 'applicationConfigurations'): return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)
Retrieves application configurations for this instance. Args: name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a regular expression. If `name` is not supplied, then all application configurations are returned. Returns: list(ApplicationConfiguration): A list of application configurations matching the given `name`. .. versionadded 1.12
def generate_span_requests(self, span_datas): pb_spans = [ utils.translate_to_trace_proto(span_data) for span_data in span_datas ] yield trace_service_pb2.ExportTraceServiceRequest( node=self.node, spans=pb_spans)
Span request generator. :type span_datas: list of :class:`~opencensus.trace.span_data.SpanData` :param span_datas: SpanData tuples to convert to protobuf spans and send to opensensusd agent :rtype: list of `~gen.opencensus.agent.trace.v1.trace_service_pb2.ExportTraceServiceRequest` :returns: List of span export requests.
def _force_close(self, file_length=None): if file_length is None: file_length = self._get_offset_from_gcs() + 1 self._send_data('', 0, file_length)
Close this buffer on file_length. Finalize this upload immediately on file_length. Contents that are still in memory will not be uploaded. This is a utility method that does not modify self. Args: file_length: file length. Must match what has been uploaded. If None, it will be queried from GCS.
def reverse_translate( protein_seq, template_dna=None, leading_seq=None, trailing_seq=None, forbidden_seqs=(), include_stop=True, manufacturer=None): if manufacturer == 'gen9': forbidden_seqs += gen9.reserved_restriction_sites leading_seq = restriction_sites.get(leading_seq, leading_seq or '') trailing_seq = restriction_sites.get(trailing_seq, trailing_seq or '') codon_list = make_codon_list(protein_seq, template_dna, include_stop) sanitize_codon_list(codon_list, forbidden_seqs) dna_seq = leading_seq + ''.join(codon_list) + trailing_seq if manufacturer == 'gen9': gen9.apply_quality_control_checks(dna_seq) return dna_seq
Generate a well-behaved DNA sequence from the given protein sequence. If a template DNA sequence is specified, the returned DNA sequence will be as similar to it as possible. Any given restriction sites will not be present in the sequence. And finally, the given leading and trailing sequences will be appropriately concatenated.
def set_error_callback(self, callback): self.logger.debug('Setting error callback: %r', callback) self._on_error = callback
Assign a method to invoke when a request has encountered an unrecoverable error in an action execution. :param method callback: The method to invoke
def will_tag(self): wanttags = self.retrieve_config('Tag', 'no') if wanttags == 'yes': if aux.staggerexists: willtag = True else: willtag = False print(("You want me to tag {0}, but you have not installed " "the Stagger module. I cannot honour your request."). format(self.name), file=sys.stderr, flush=True) else: willtag = False return willtag
Check whether the feed should be tagged
def getAttributeValueData(self, index): offset = self._get_attribute_offset(index) return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
Return the data of the attribute at the given index :param index: index of the attribute
def _retrieve(self): url = "%s://%s:%d/manager/status" % (self._proto, self._host, self._port) params = {} params['XML'] = 'true' response = util.get_url(url, self._user, self._password, params) tree = ElementTree.XML(response) return tree
Query Apache Tomcat Server Status Page in XML format and return the result as an ElementTree object. @return: ElementTree object of Status Page XML.
def getPortType(self): wsdl = self.getService().getWSDL() binding = wsdl.bindings[self.binding] return wsdl.portTypes[binding.type]
Return the PortType object that is referenced by this port.
def drop_index(self, name): name = self._normalize_identifier(name) if not self.has_index(name): raise IndexDoesNotExist(name, self._name) del self._indexes[name]
Drops an index from this table. :param name: The index name :type name: str
def print_debug(*args, **kwargs): if WTF_CONFIG_READER.get("debug", False) == True: print(*args, **kwargs)
Print if and only if the debug flag is set true in the config.yaml file. Args: args : var args of print arguments.
def connect(self, their_unl, events, force_master=1, hairpin=1, nonce="0" * 64): parms = (their_unl, events, force_master, hairpin, nonce) t = Thread(target=self.connect_handler, args=parms) t.start() self.unl_threads.append(t)
A new thread is spawned because many of the connection techniques rely on sleep to determine connection outcome or to synchronise hole punching techniques. If the sleep is in its own thread it won't block main execution.
def _reload(self, force=False): self._config_map = dict() self._registered_env_keys = set() self.__reload_sources(force) self.__load_environment_keys() self.verify() self._clear_memoization()
Reloads the configuration from the file and environment variables. Useful if using `os.environ` instead of this class' `set_env` method, or if the underlying configuration file is changed externally.
def ipv4_range_type(string): import re ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' if not re.match("^{}$".format(ip_format), string): if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string): raise ValueError return string
Validates an IPv4 address or address range.
def Connect(self, Skype): self._Skype = Skype self._Skype.RegisterEventHandler('CallStatus', self._CallStatus) del self._Channels[:]
Connects this call channel manager instance to Skype. This is the first thing you should do after creating this object. :Parameters: Skype : `Skype` The Skype object. :see: `Disconnect`
def default(value): if isinstance(value, Decimal): primative = float(value) if int(primative) == primative: return int(primative) else: return primative elif isinstance(value, set): return list(value) elif isinstance(value, Binary): return b64encode(value.value) raise TypeError("Cannot encode %s value %r" % (type(value), value))
Default encoder for JSON
def mouse_move_event(self, event): self.example.mouse_position_event(event.x(), event.y())
Forward mouse cursor position events to the example
def uniform_pdf(): norm_const = 1.0 def pdf(x): return norm_const * np.sin(np.pi/180.0 * x) norm_dev = quad(pdf, 0.0, 180.0)[0] norm_const /= norm_dev return pdf
Uniform PDF for orientation averaging. Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized uniform PDF. It is normalized for the interval [0, 180].
def _get_result_files_base(self, temp_dir): if not self._use_namespaces: return super(ContainerExecutor, self)._get_result_files_base(temp_dir) else: return os.path.join(temp_dir, "temp")
Given the temp directory that is created for each run, return the path to the directory where files created by the tool are stored.
def enable_global_typechecked_profiler(flag = True): global global_typechecked_profiler, _global_type_agent, global_typelogged_profiler global_typechecked_profiler = flag if flag and checking_enabled: if _global_type_agent is None: _global_type_agent = TypeAgent() _global_type_agent.start() elif not _global_type_agent.active: _global_type_agent.start() elif not flag and not global_typelogged_profiler and \ not _global_type_agent is None and _global_type_agent.active: _global_type_agent.stop()
Enables or disables global typechecking mode via a profiler. See flag global_typechecked_profiler. Does not work if checking_enabled is false.
def closure_for_targets(cls, target_roots, exclude_scopes=None, include_scopes=None, bfs=None, postorder=None, respect_intransitive=False): target_roots = list(target_roots) if not target_roots: return OrderedSet() build_graph = target_roots[0]._build_graph addresses = [target.address for target in target_roots] dep_predicate = cls._closure_dep_predicate(target_roots, include_scopes=include_scopes, exclude_scopes=exclude_scopes, respect_intransitive=respect_intransitive) closure = OrderedSet() if not bfs: build_graph.walk_transitive_dependency_graph( addresses=addresses, work=closure.add, postorder=postorder, dep_predicate=dep_predicate, ) else: closure.update(build_graph.transitive_subgraph_of_addresses_bfs( addresses=addresses, dep_predicate=dep_predicate, )) closure.update(target_roots) return closure
Computes the closure of the given targets respecting the given input scopes. :API: public :param list target_roots: The list of Targets to start from. These targets will always be included in the closure, regardless of scope settings. :param Scope exclude_scopes: If present and non-empty, only dependencies which have none of the scope names in this Scope will be traversed. :param Scope include_scopes: If present and non-empty, only dependencies which have at least one of the scope names in this Scope will be traversed. :param bool bfs: Whether to traverse in breadth-first or depth-first order. (Defaults to True). :param bool respect_intransitive: If True, any dependencies which have the 'intransitive' scope will not be included unless they are direct dependencies of one of the root targets. (Defaults to False).
def config_logging(args): if args.quiet: logging.getLogger().setLevel(logging.CRITICAL) elif args.verbose: logging.getLogger().setLevel(logging.DEBUG)
Override root logger's level
def get_counted_number(context, config, variables, **kw): ctx = config.get("context") obj = variables.get(ctx, context) counter_type = config.get("counter_type") counter_reference = config.get("counter_reference") seq_items = get_objects_in_sequence(obj, counter_type, counter_reference) number = len(seq_items) return number
Compute the number for the sequence type "Counter"
def opt_restore(prefix, opts): return {prefix + name: value for name, value in opts.items()}
Given a dict of opts, add the given prefix to each key
def extend_webfont_settings(webfont_settings): if not webfont_settings.get('fontdir_path', False): raise IcomoonSettingsError(("Webfont settings miss the required key " "item 'fontdir_path'")) if not webfont_settings.get('csspart_path', False): webfont_settings['csspart_path'] = None return webfont_settings
Validate a webfont settings and optionally fill missing ``csspart_path`` option. Args: webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``). Returns: dict: Webfont settings
def read_binary_array(self, key, b64decode=True, decode=False): data = None if key is not None: data = self.db.read(key.strip()) if data is not None: data_decoded = [] for d in json.loads(data, object_pairs_hook=OrderedDict): if b64decode: dd = base64.b64decode(d) if decode: try: dd = dd.decode('utf-8') except UnicodeDecodeError: dd = dd.decode('latin-1') data_decoded.append(dd) else: data_decoded.append(d) data = data_decoded else: self.tcex.log.warning(u'The key field was None.') return data
Read method of CRUD operation for binary array data. Args: key (string): The variable to read from the DB. b64decode (bool): If true the data will be base64 decoded. decode (bool): If true the data will be decoded to a String. Returns: (list): Results retrieved from DB.
def set_style(primary=None, secondary=None): global _primary_style, _secondary_style if primary: _primary_style = primary if secondary: _secondary_style = secondary
Sets primary and secondary component styles.
def output_sizes(self): return tuple([l() if callable(l) else l for l in self._output_sizes])
Returns a tuple of all output sizes of all the layers.
def get_crumb_list_by_selector(self, crumb_selector): return [ self.parsedpage.get_text_from_node(crumb) for crumb in self.parsedpage.get_nodes_by_selector(crumb_selector) ]
Return a list of crumbs.
def run_keepedalive_process(main_write_pipe, process_read_pipe, obj): while obj != 'stop': oneshot_in_process(obj) main_write_pipe.send('job is done') readers = [process_read_pipe] while readers: for r in wait(readers): try: obj = r.recv() except EOFError: pass finally: readers.remove(r)
Procees who don't finish while job to do
def com_google_fonts_check_metadata_valid_name_values(style, font_metadata, font_familynames, typographic_familynames): from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: familynames = font_familynames else: familynames = typographic_familynames failed = False for font_familyname in familynames: if font_familyname not in font_metadata.name: failed = True yield FAIL, ("METADATA.pb font.name field (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.name, font_familyname) if not failed: yield PASS, ("METADATA.pb font.name field contains" " font name in right format.")
METADATA.pb font.name field contains font name in right format?
def decode_transaction_input(self, transaction_hash: bytes) -> Dict: transaction = self.contract.web3.eth.getTransaction( transaction_hash, ) return self.contract.decode_function_input( transaction['input'], )
Return inputs of a method call
def Grow(self,size): if size>1024: raise(clc.CLCException("Cannot grow disk beyond 1024GB")) if size<=self.size: raise(clc.CLCException("New size must exceed current disk size")) disk_set = [{'diskId': o.id, 'sizeGB': o.size} for o in self.parent.disks if o!=self] self.size = size disk_set.append({'diskId': self.id, 'sizeGB': self.size}) self.parent.server.dirty = True return(clc.v2.Requests(clc.v2.API.Call('PATCH','servers/%s/%s' % (self.parent.server.alias,self.parent.server.id), json.dumps([{"op": "set", "member": "disks", "value": disk_set}]), session=self.session), alias=self.parent.server.alias, session=self.session))
Grow disk to the newly specified size. Size must be less than 1024 and must be greater than the current size. >>> clc.v2.Server("WA1BTDIX01").Disks().disks[2].Grow(30).WaitUntilComplete() 0
def model(self): if self.is_bootloader: out = self.fastboot.getvar('product').strip() lines = out.decode('utf-8').split('\n', 1) if lines: tokens = lines[0].split(' ') if len(tokens) > 1: return tokens[1].lower() return None model = self.adb.getprop('ro.build.product').lower() if model == 'sprout': return model return self.adb.getprop('ro.product.name').lower()
The Android code name for the device.
def save(self, path): f = h5py.File(path, 'w') try: fm_group = f.create_group('Datamat') for field in self.fieldnames(): try: fm_group.create_dataset(field, data = self.__dict__[field]) except (TypeError,) as e: sub_group = fm_group.create_group(field) for i, d in enumerate(self.__dict__[field]): index_group = sub_group.create_group(str(i)) print((field, d)) for key, value in list(d.items()): index_group.create_dataset(key, data=value) for param in self.parameters(): fm_group.attrs[param]=self.__dict__[param] finally: f.close()
Saves Datamat to path. Parameters: path : string Absolute path of the file to save to.
def srp(x, promisc=None, iface=None, iface_hint=None, filter=None, nofilter=0, type=ETH_P_ALL, *args, **kargs): if iface is None and iface_hint is not None: iface = conf.route.route(iface_hint)[0] s = conf.L2socket(promisc=promisc, iface=iface, filter=filter, nofilter=nofilter, type=type) result = sndrcv(s, x, *args, **kargs) s.close() return result
Send and receive packets at layer 2
def job_is_running(self, job_id): job_id = normalize_job_id(job_id) if job_id not in self._jobs: return False job_desc = self._jobs[job_id] if job_desc['job']: return job_desc['job'].is_alive() return False
Check if a job is currently running. False is returned if the job does not exist. :param job_id: Job identifier to check the status of. :type job_id: :py:class:`uuid.UUID` :rtype: bool
def get_datasets(self): assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id', action=self.actions()['list_datasets']) datasets = list() if assoc_result: for dataset_dict in datasets_dicts: dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration) datasets.append(dataset) return datasets
Get any datasets in the showcase Returns: List[Dataset]: List of datasets
def prep_bootstrap(mpt): bs_ = __salt__['config.gather_bootstrap_script']() fpd_ = os.path.join(mpt, 'tmp', "{0}".format( uuid.uuid4())) if not os.path.exists(fpd_): os.makedirs(fpd_) os.chmod(fpd_, 0o700) fp_ = os.path.join(fpd_, os.path.basename(bs_)) shutil.copy(bs_, fp_) tmppath = fpd_.replace(mpt, '') return fp_, tmppath
Update and get the random script to a random place CLI Example: .. code-block:: bash salt '*' seed.prep_bootstrap /tmp
def append_faces(vertices_seq, faces_seq): vertices_len = np.array([len(i) for i in vertices_seq]) face_offset = np.append(0, np.cumsum(vertices_len)[:-1]) new_faces = [] for offset, faces in zip(face_offset, faces_seq): if len(faces) == 0: continue new_faces.append(faces + offset) vertices = vstack_empty(vertices_seq) faces = vstack_empty(new_faces) return vertices, faces
Given a sequence of zero- indexed faces and vertices combine them into a single array of faces and a single array of vertices. Parameters ----------- vertices_seq : (n, ) sequence of (m, d) float Multiple arrays of verticesvertex arrays faces_seq : (n, ) sequence of (p, j) int Zero indexed faces for matching vertices Returns ---------- vertices : (i, d) float Points in space faces : (j, 3) int Reference vertex indices
def show_rbac_policy(self, rbac_policy_id, **_params): return self.get(self.rbac_policy_path % rbac_policy_id, params=_params)
Fetch information of a certain RBAC policy.
def add_sections(app, doctree, fromdocname): needs = getattr(app.builder.env, 'needs_all_needs', {}) for key, need_info in needs.items(): sections = get_sections(need_info) need_info['sections'] = sections need_info['section_name'] = sections[0] if sections else ""
Add section titles to the needs as additional attributes that can be used in tables and filters
def _collection_default_options(self, name, **kargs): wc = (self.write_concern if self.write_concern.acknowledged else WriteConcern()) return self.get_collection( name, codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY, write_concern=wc)
Get a Collection instance with the default settings.
def get_endpoints(self): def process_result(result): return [line.split(';')[0][2:-1] for line in result.split(',')] return Command('get', ['.well-known', 'core'], parse_json=False, process_result=process_result)
Return all available endpoints on the gateway. Returns a Command.
def _validate_field(param, fields): if '/' not in param.field and param.field not in fields: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is not possible. The ' 'resource requested does not have a "%s" field. Please ' 'modify your request & retry.' % (param, param.field), 'links': LINK, 'parameter': PARAM, })
Ensure the field exists on the model
def copy_pkg(self, filename, id_=-1): for repo in self._children: repo.copy_pkg(filename, id_)
Copy a pkg, dmg, or zip to all repositories. Args: filename: String path to the local file to copy. id_: Integer ID you wish to associate package with for a JDS or CDP only. Default is -1, which is used for creating a new package object in the database.
def load_data_file(fname, directory=None, force_download=False): _url_root = 'http://github.com/vispy/demo-data/raw/master/' url = _url_root + fname if directory is None: directory = config['data_path'] if directory is None: raise ValueError('config["data_path"] is not defined, ' 'so directory must be supplied') fname = op.join(directory, op.normcase(fname)) if op.isfile(fname): if not force_download: return fname if isinstance(force_download, string_types): ntime = time.strptime(force_download, '%Y-%m-%d') ftime = time.gmtime(op.getctime(fname)) if ftime >= ntime: return fname else: print('File older than %s, updating...' % force_download) if not op.isdir(op.dirname(fname)): os.makedirs(op.abspath(op.dirname(fname))) _fetch_file(url, fname) return fname
Get a standard vispy demo data file Parameters ---------- fname : str The filename on the remote ``demo-data`` repository to download, e.g. ``'molecular_viewer/micelle.npy'``. These correspond to paths on ``https://github.com/vispy/demo-data/``. directory : str | None Directory to use to save the file. By default, the vispy configuration directory is used. force_download : bool | str If True, the file will be downloaded even if a local copy exists (and this copy will be overwritten). Can also be a YYYY-MM-DD date to ensure a file is up-to-date (modified date of a file on disk, if present, is checked). Returns ------- fname : str The path to the file on the local system.
def brier_score(self): reliability, resolution, uncertainty = self.brier_score_components() return reliability - resolution + uncertainty
Calculate the Brier Score
def _label_from_list(self, labels:Iterator, label_cls:Callable=None, from_item_lists:bool=False, **kwargs)->'LabelList': "Label `self.items` with `labels`." if not from_item_lists: raise Exception("Your data isn't split, if you don't want a validation set, please use `split_none`.") labels = array(labels, dtype=object) label_cls = self.get_label_cls(labels, label_cls=label_cls, **kwargs) y = label_cls(labels, path=self.path, **kwargs) res = self._label_list(x=self, y=y) return res
Label `self.items` with `labels`.
def manage_initial_service_status_brok(self, b): host_name = b.data['host_name'] service_description = b.data['service_description'] service_id = host_name+"/"+service_description logger.debug("got initial service status: %s", service_id) if host_name not in self.hosts_cache: logger.error("initial service status, host is unknown: %s.", service_id) return self.services_cache[service_id] = { } if 'customs' in b.data: self.services_cache[service_id]['_GRAPHITE_POST'] = \ sanitize_name(b.data['customs'].get('_GRAPHITE_POST', None)) logger.debug("initial service status received: %s", service_id)
Prepare the known services cache
def find_task(self, name): try: return self.tasks[name] except KeyError: pass similarities = [] for task_name, task in self.tasks.items(): ratio = SequenceMatcher(None, name, task_name).ratio() if ratio >= 0.75: similarities.append(task) if len(similarities) == 1: return similarities[0] else: raise NoSuchTaskError(similarities)
Find a task by name. If a task with the exact name cannot be found, then tasks with similar names are searched for. Returns ------- Task If the task is found. Raises ------ NoSuchTaskError If the task cannot be found.
def _construct_control_flow_slice(self, simruns): if self._cfg is None: l.error('Please build CFG first.') cfg = self._cfg.graph for simrun in simruns: if simrun not in cfg: l.error('SimRun instance %s is not in the CFG.', simrun) stack = [ ] for simrun in simruns: stack.append(simrun) self.runs_in_slice = networkx.DiGraph() self.cfg_nodes_in_slice = networkx.DiGraph() self.chosen_statements = { } while stack: block = stack.pop() if block.addr not in self.chosen_statements: self.chosen_statements[block.addr] = True predecessors = cfg.predecessors(block) for pred in predecessors: stack.append(pred) self.cfg_nodes_in_slice.add_edge(pred, block) self.runs_in_slice.add_edge(pred.addr, block.addr)
Build a slice of the program without considering the effect of data dependencies. This is an incorrect hack, but it should work fine with small programs. :param simruns: A list of SimRun targets. You probably wanna get it from the CFG somehow. It must exist in the CFG.
def put(self, name, handler, builder=None, request=None, get=True): chan = self._channel(name) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), builder=defaultBuilder(builder, self._nt), pvRequest=wrapRequest(request), get=get, put=True)
Write a new value to a PV. :param name: A single name string or list of name strings :param callable handler: Completion notification. Called with None (success), RemoteError, or Cancelled :param callable builder: Called when the PV Put type is known. A builder is responsible for filling in the Value to be sent. builder(value) :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list. :returns: A object with a method cancel() which may be used to abort the operation.
def get_version(self): return Version.objects.get( content_type=self.content_type, object_id=self.object_id, version_number=self.publish_version, )
Get the version object for the related object.
def _find_append_zero_crossings(x, y): r crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units) x = concatenate((x, crossings[0])) y = concatenate((y, crossings[1])) sort_idx = np.argsort(x) x = x[sort_idx] y = y[sort_idx] keep_idx = np.ediff1d(x, to_end=[1]) > 0 x = x[keep_idx] y = y[keep_idx] return x, y
r""" Find and interpolate zero crossings. Estimate the zero crossings of an x,y series and add estimated crossings to series, returning a sorted array with no duplicate values. Parameters ---------- x : `pint.Quantity` x values of data y : `pint.Quantity` y values of data Returns ------- x : `pint.Quantity` x values of data y : `pint.Quantity` y values of data
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'): params = { 'subscription_id': subscription_id, 'name': name, 'parameters': parameters, 'type': type } return self.request.post(service + '/task/', params)
Create a PYLON task :param subscription_id: The ID of the recording to create the task for :type subscription_id: str :param name: The name of the new task :type name: str :param parameters: The parameters for this task :type parameters: dict :param type: The type of analysis to create, currently only 'analysis' is accepted :type type: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def _generate_reads(seq, name): reads = dict() if len(seq) < 130 and len(seq) > 70: reads.update(_mature(seq[:40], 0, name)) reads.update(_mature(seq[-40:], len(seq) - 40, name)) reads.update(_noise(seq, name)) reads.update(_noise(seq, name, 25)) return reads
Main function that create reads from precursors
def _req_fix(self, line): deps = [] for dep in line[18:].strip().split(","): dep = dep.split("|") if self.repo == "slacky": if len(dep) > 1: for d in dep: deps.append(d.split()[0]) dep = "".join(dep) deps.append(dep.split()[0]) else: if len(dep) > 1: for d in dep: deps.append(d) deps.append(dep[0]) return deps
Fix slacky and salix requirements because many dependencies splitting with "," and others with "|"
def method_name(func): @wraps(func) def _method_name(*args, **kwargs): name = to_pascal_case(func.__name__) return func(name=name, *args, **kwargs) return _method_name
Method wrapper that adds the name of the method being called to its arguments list in Pascal case
def scan_full(self, regex, return_string=True, advance_pointer=True): regex = get_regex(regex) self.match = regex.match(self.string, self.pos) if not self.match: return if advance_pointer: self.pos = self.match.end() if return_string: return self.match.group(0) return len(self.match.group(0))
Match from the current position. If `return_string` is false and a match is found, returns the number of characters matched. >>> s = Scanner("test string") >>> s.scan_full(r' ') >>> s.scan_full(r'test ') 'test ' >>> s.pos 5 >>> s.scan_full(r'stri', advance_pointer=False) 'stri' >>> s.pos 5 >>> s.scan_full(r'stri', return_string=False, advance_pointer=False) 4 >>> s.pos 5
def get_catalogue_header_value(cls, catalog, key): header_value = None if '' in catalog: for line in catalog[''].split('\n'): if line.startswith('%s:' % key): header_value = line.split(':', 1)[1].strip() return header_value
Get `.po` header value.
def next_frame_ae(): hparams = next_frame_basic_deterministic() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.hidden_size = 256 hparams.batch_size = 8 hparams.num_hidden_layers = 4 hparams.num_compress_steps = 4 hparams.dropout = 0.4 return hparams
Conv autoencoder.
def parse_variables_mapping(variables_mapping, ignore=False): run_times = 0 parsed_variables_mapping = {} while len(parsed_variables_mapping) != len(variables_mapping): for var_name in variables_mapping: run_times += 1 if run_times > len(variables_mapping) * 4: not_found_variables = { key: variables_mapping[key] for key in variables_mapping if key not in parsed_variables_mapping } raise exceptions.VariableNotFound(not_found_variables) if var_name in parsed_variables_mapping: continue value = variables_mapping[var_name] variables = extract_variables(value) if var_name in variables: if ignore: parsed_variables_mapping[var_name] = value continue raise exceptions.VariableNotFound(var_name) if variables: if any([_var_name not in parsed_variables_mapping for _var_name in variables]): continue parsed_value = parse_lazy_data(value, parsed_variables_mapping) parsed_variables_mapping[var_name] = parsed_value return parsed_variables_mapping
eval each prepared variable and function in variables_mapping. Args: variables_mapping (dict): { "varA": LazyString(123$varB), "varB": LazyString(456$varC), "varC": LazyString(${sum_two($a, $b)}), "a": 1, "b": 2, "c": {"key": LazyString($b)}, "d": [LazyString($a), 3] } ignore (bool): If set True, VariableNotFound will be ignored. This is used when initializing tests. Returns: dict: parsed variables_mapping should not contain any variable or function. { "varA": "1234563", "varB": "4563", "varC": "3", "a": 1, "b": 2, "c": {"key": 2}, "d": [1, 3] }
def _summarize_call(parts): svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")] svtype = svtype[0] if svtype else "" start, end = _get_start_end(parts) return {"svtype": svtype, "size": int(end) - int(start)}
Provide summary metrics on size and svtype for a SV call.
def delete_group_policy(self, group_name, policy_name): params = {'GroupName' : group_name, 'PolicyName' : policy_name} return self.get_response('DeleteGroupPolicy', params, verb='POST')
Deletes the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to delete.
def delete_item2(self, tablename, key, expr_values=None, alias=None, condition=None, returns=NONE, return_capacity=None, return_item_collection_metrics=NONE, **kwargs): keywords = { 'TableName': tablename, 'Key': self.dynamizer.encode_keys(key), 'ReturnValues': returns, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), 'ReturnItemCollectionMetrics': return_item_collection_metrics, } values = build_expression_values(self.dynamizer, expr_values, kwargs) if values: keywords['ExpressionAttributeValues'] = values if alias: keywords['ExpressionAttributeNames'] = alias if condition: keywords['ConditionExpression'] = condition result = self.call('delete_item', **keywords) if result: return Result(self.dynamizer, result, 'Attributes')
Delete an item from a table For many parameters you will want to reference the DynamoDB API: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html Parameters ---------- tablename : str Name of the table to update key : dict Primary key dict specifying the hash key and, if applicable, the range key of the item. expr_values : dict, optional See docs for ExpressionAttributeValues. See also: kwargs alias : dict, optional See docs for ExpressionAttributeNames condition : str, optional See docs for ConditionExpression returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional Return either the old or new values, either all attributes or just the ones that changed. (default NONE) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) return_item_collection_metrics : (NONE, SIZE), optional SIZE will return statistics about item collections that were modified. **kwargs : dict, optional If expr_values is not provided, the kwargs dict will be used as the ExpressionAttributeValues (a ':' will be automatically prepended to all keys).
def list(self, path=None, with_metadata=False, include_partitions=False): import json sub_path = self.prefix + '/' + path.strip('/') if path else self.prefix l = {} for e in self.bucket.list(sub_path): path = e.name.replace(self.prefix, '', 1).strip('/') if path.startswith('_') or path.startswith('meta'): continue if not include_partitions and path.count('/') > 1: continue if with_metadata: d = self.metadata(path) if d and 'identity' in d: d['identity'] = json.loads(d['identity']) else: d = {} d['caches'] = [self.repo_id] if path: l[path] = d return l
Get a list of all of bundle files in the cache. Does not return partition files
def parse_device(lines): name, status_line, device = parse_device_header(lines.pop(0)) if not status_line: status_line = lines.pop(0) status = parse_device_status(status_line, device["personality"]) bitmap = None resync = None for line in lines: if line.startswith(" bitmap:"): bitmap = parse_device_bitmap(line) elif line.startswith(" ["): resync = parse_device_resync_progress(line) elif line.startswith(" \tresync="): resync = parse_device_resync_standby(line) else: raise NotImplementedError("unknown device line: {0}".format(line)) device.update({ "status": status, "bitmap": bitmap, "resync": resync, }) return (name, device)
Parse all the lines of a device block. A device block is composed of a header line with the name of the device and at least one extra line describing the device and its status. The extra lines have a varying format depending on the status and personality of the device (e.g. RAID1 vs RAID5, healthy vs recovery/resync).
def GetOutputPluginStates(output_plugins, source=None, token=None): output_plugins_states = [] for plugin_descriptor in output_plugins: plugin_class = plugin_descriptor.GetPluginClass() try: _, plugin_state = plugin_class.CreatePluginAndDefaultState( source_urn=source, args=plugin_descriptor.plugin_args, token=token) except Exception as e: raise ValueError("Plugin %s failed to initialize (%s)" % (plugin_class, e)) plugin_state["logs"] = [] plugin_state["errors"] = [] output_plugins_states.append( rdf_flow_runner.OutputPluginState( plugin_state=plugin_state, plugin_descriptor=plugin_descriptor)) return output_plugins_states
Initializes state for a list of output plugins.
def fire(self, event): self.browser.fire(self.element, event) return self
Fires a specified DOM event on the current node. :param event: the name of the event to fire (e.g., 'click'). Returns the :class:`zombie.dom.DOMNode` to allow function chaining.
async def _maybe_release_last_part(self) -> None: if self._last_part is not None: if not self._last_part.at_eof(): await self._last_part.release() self._unread.extend(self._last_part._unread) self._last_part = None
Ensures that the last read body part is read completely.
def _parse_fields(self, query): field_args = { k: v for k, v in query.items() if k.startswith('fields[') } fields = {} for k, v in field_args.items(): fields[k[7:-1]] = v.split(',') return fields
Parse the querystring args for fields. :param query: Dict of query args
def add_slices(self, dashboard_id): data = json.loads(request.form.get('data')) session = db.session() Slice = models.Slice dash = ( session.query(models.Dashboard).filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter( Slice.id.in_(data['slice_ids'])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return 'SLICES ADDED'
Add and save slices to a dashboard
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.pubrel decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_U16) if header.remaining_len != decoder.num_bytes_consumed: raise DecodeError('Extra bytes at end of packet.') return decoder.num_bytes_consumed, MqttPubrel(packet_id)
Generates a `MqttPubrel` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pubrel`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPubrel Object extracted from ``f``.
def slugify_argument(func): @six.wraps(func) def wrapped(*args, **kwargs): if "slugify" in kwargs and kwargs['slugify']: return _slugify(func(*args, **kwargs)) else: return func(*args, **kwargs) return wrapped
Wraps a function that returns a string, adding the 'slugify' argument. >>> slugified_fn = slugify_argument(lambda *args, **kwargs: "YOU ARE A NICE LADY") >>> slugified_fn() 'YOU ARE A NICE LADY' >>> slugified_fn(slugify=True) 'you-are-a-nice-lady'