code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def setValues(self, values): ncols = self.getNumCols() nindices = self.getNumIndices() for key, value in values.items(): key = Utils.convToList(key) assert len(key) == nindices value = Utils.convToList(value) assert len(value) == ncols-nindices self.addRow(key + value)
Set the values of a DataFrame from a dictionary. Args: values: Dictionary with the values to set.
def compare_version(self, version_string, op): from pkg_resources import parse_version from monty.operator import operator_from_str op = operator_from_str(op) return op(parse_version(self.version), parse_version(version_string))
Compare Abinit version to `version_string` with operator `op`
def get_view(self, view_name): return self.measure_to_view_map.get_view(view_name=view_name, timestamp=self.time)
gets the view given the view name
def obj_update_or_create(model, defaults=None, update_fields=UNSET, **kwargs): obj, created = model.objects.get_or_create(defaults=defaults, **kwargs) if created: logger.debug('CREATED %s %s', model._meta.object_name, obj.pk, extra={'pk': obj.pk}) else: obj_update(obj, defaults, update_fields=update_fields) return obj, created
Mimic queryset.update_or_create but using obj_update.
def get_rna(self) -> Rna: if self.variants: raise InferCentralDogmaException('can not get rna for variant') return Rna( namespace=self.namespace, name=self.name, identifier=self.identifier )
Get the corresponding RNA or raise an exception if it's not the reference node. :raises: InferCentralDogmaException
def write(self, outfname=None): outfname = outfname or self.filename with codecs.open(outfname, 'wb', 'windows-1252') as outf: for survey in self.surveys: outf.write('\r\n'.join(survey._serialize())) outf.write('\r\n'+'\f'+'\r\n') outf.write('\x1A')
Write or overwrite a `Survey` to the specified .DAT file
def _gather_field_values( item, *, fields=None, field_map=FIELD_MAP, normalize_values=False, normalize_func=normalize_value): it = get_item_tags(item) if fields is None: fields = list(it.keys()) normalize = normalize_func if normalize_values else lambda x: str(x) field_values = [] for field in fields: field_values.append( normalize( list_to_single_value( get_field(it, field, field_map=field_map) ) ) ) return tuple(field_values)
Create a tuple of normalized metadata field values. Parameter: item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Returns: tuple: Values from the given metadata fields.
def attachment_state(self): state = None if self.attach_data: state = self.attach_data.status return state
Get the attachment state.
def load_files(self, path): if self.verbose == 2: print("Indexing {}".format(path)) for filename in os.listdir(path): file_path = path + "/" + filename if os.path.isdir(file_path): self.load_files(file_path) elif filename.endswith(".yaml") or filename.endswith(".yml"): self.unfold_yaml(file_path)
Loads files in a given path and all its subdirectories
def read_version(): regex = re.compile('^(?P<number>\d.*?) .*$') with open('../CHANGELOG.rst') as f: for line in f: match = regex.match(line) if match: return match.group('number')
Read version from the first line starting with digit
def get_value(self, tag=None, field=None): assert not (tag is not None and field is not None), \ "Cannot filter by tag and field simultaneously." selected_fields = self._select_by_field_or_tag(tag, field) missing_fields_idents = set(selected_fields) - set(self.field_values) if missing_fields_idents: raise ValueError( "Cannot generate value with undefined fields {}.".format( ", ".join("'{}'".format(f) for f in missing_fields_idents))) value = 0 for identifier, field in iteritems(selected_fields): if field.length is None or field.start_at is None: raise ValueError( "Field '{}' does not have a fixed size/position.".format( identifier)) value |= (self.field_values[identifier] << field.start_at) return value
Generate an integer whose bits are set according to the values of fields in this bit field. All other bits are set to zero. Parameters ---------- tag : str Optionally specifies that the value should only include fields with the specified tag. field : str Optionally specifies that the value should only include the specified field. Raises ------ ValueError If a field's value, length or position has not been defined. (e.g. :py:meth:`.assign_fields` has not been called). UnknownTagError If the tag specified using the `tag` argument does not exist. UnavailableFieldError If the field specified using the `field` argument does not exist or is not available.
def get_contents_to_filename(self, filename, headers=None, cb=None, num_cb=10, torrent=False, version_id=None, res_download_handler=None, response_headers=None, callback=None): fp = open(filename, 'wb') def got_contents_to_filename(response): fp.close() if self.last_modified != None: try: modified_tuple = rfc822.parsedate_tz(self.last_modified) modified_stamp = int(rfc822.mktime_tz(modified_tuple)) os.utime(fp.name, (modified_stamp, modified_stamp)) except Exception: pass if callable(callback): callback(response) self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, version_id=version_id, res_download_handler=res_download_handler, response_headers=response_headers, callback=got_contents_to_filename)
Retrieve an object from S3 using the name of the Key object as the key in S3. Store contents of the object to a file named by 'filename'. See get_contents_to_file method for details about the parameters. :type filename: string :param filename: The filename of where to put the file contents :type headers: dict :param headers: Any additional headers to send in the request :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type torrent: bool :param torrent: If True, returns the contents of a torrent file as a string. :type res_upload_handler: ResumableDownloadHandler :param res_download_handler: If provided, this handler will perform the download. :type response_headers: dict :param response_headers: A dictionary containing HTTP headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details.
def datetime_to_timestamp(dt): epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC) return (dt - epoch).total_seconds()
Convert timezone-aware `datetime` to POSIX timestamp and return seconds since UNIX epoch. Note: similar to `datetime.timestamp()` in Python 3.3+.
def check_calling_sequence(name, function_name, function, possible_variables): try: calling_sequence = inspect.getargspec(function.input_object).args except AttributeError: calling_sequence = inspect.getargspec(function).args assert calling_sequence[0] == 'self', "Wrong syntax for 'evaluate' in %s. The first argument " \ "should be called 'self'." % name variables = filter(lambda var: var in possible_variables, calling_sequence) assert len(variables) > 0, "The name of the variables for 'evaluate' in %s must be one or more " \ "among %s, instead of %s" % (name, ','.join(possible_variables), ",".join(variables)) if variables != possible_variables[:len(variables)]: raise AssertionError("The variables %s are out of order in '%s' of %s. Should be %s." % (",".join(variables), function_name, name, possible_variables[:len(variables)])) other_parameters = filter(lambda var: var not in variables and var != 'self', calling_sequence) return variables, other_parameters
Check the calling sequence for the function looking for the variables specified. One or more of the variables can be in the calling sequence. Note that the order of the variables will be enforced. It will also enforce that the first parameter in the calling sequence is called 'self'. :param function: the function to check :param possible_variables: a list of variables to check, The order is important, and will be enforced :return: a tuple containing the list of found variables, and the name of the other parameters in the calling sequence
def fill_zero( x=None, y=None, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers', **kargs ): return line( x=x, y=y, label=label, color=color, width=width, dash=dash, opacity=opacity, mode=mode, fill='tozeroy', **kargs )
Fill to zero. Parameters ---------- x : array-like, optional y : TODO, optional label : TODO, optional Returns ------- Chart
def wrap(stream, unicode=False, window=1024, echo=False, close_stream=True): if hasattr(stream, 'read'): proxy = PollingStreamAdapter(stream) elif hasattr(stream, 'recv'): proxy = PollingSocketStreamAdapter(stream) else: raise TypeError('stream must have either read or recv method') if echo and unicode: callback = _echo_text elif echo and not unicode: callback = _echo_bytes else: callback = None if unicode: expecter = TextExpecter(proxy, input_callback=callback, window=window, close_adapter=close_stream) else: expecter = BytesExpecter(proxy, input_callback=callback, window=window, close_adapter=close_stream) return expecter
Wrap a stream to implement expect functionality. This function provides a convenient way to wrap any Python stream (a file-like object) or socket with an appropriate :class:`Expecter` class for the stream type. The returned object adds an :func:`Expect.expect` method to the stream, while passing normal stream functions like *read*/*recv* and *write*/*send* through to the underlying stream. Here's an example of opening and wrapping a pair of network sockets:: import socket import streamexpect source, drain = socket.socketpair() expecter = streamexpect.wrap(drain) source.sendall(b'this is a test') match = expecter.expect_bytes(b'test', timeout=5) assert match is not None :param stream: The stream/socket to wrap. :param bool unicode: If ``True``, the wrapper will be configured for Unicode matching, otherwise matching will be done on binary. :param int window: Historical characters to buffer. :param bool echo: If ``True``, echoes received characters to stdout. :param bool close_stream: If ``True``, and the wrapper is used as a context manager, closes the stream at the end of the context manager.
def can_edit(self, user): return self.class_.can_edit(user) and self.status != u'locked'
Return whether or not `user` can make changes to the project.
def compress_monkey_patch(): from compressor.templatetags import compress as compress_tags from compressor import base as compress_base compress_base.Compressor.filter_input = filter_input compress_base.Compressor.output = output compress_base.Compressor.hunks = hunks compress_base.Compressor.precompile = precompile compress_tags.CompressorMixin.render_compressed = render_compressed from django_pyscss import compressor as pyscss_compressor pyscss_compressor.DjangoScssFilter.input = input
patch all compress we need access to variables from widget scss for example we have:: /themes/bootswatch/cyborg/_variables but only if is cyborg active for this reasone we need dynamically append import to every scss file
def xraw_command(self, netfn, command, bridge_request=(), data=(), delay_xmit=None, retry=True, timeout=None): rsp = self.ipmi_session.raw_command(netfn=netfn, command=command, bridge_request=bridge_request, data=data, delay_xmit=delay_xmit, retry=retry, timeout=timeout) if 'error' in rsp: raise exc.IpmiException(rsp['error'], rsp['code']) rsp['data'] = buffer(rsp['data']) return rsp
Send raw ipmi command to BMC, raising exception on error This is identical to raw_command, except it raises exceptions on IPMI errors and returns data as a buffer. This is the recommend function to use. The response['data'] being a buffer allows traditional indexed access as well as works nicely with struct.unpack_from when certain data is coming back. :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether to retry this particular payload or not, defaults to true. :param timeout: A custom time to wait for initial reply, useful for a slow command. This may interfere with retry logic. :returns: dict -- The response from IPMI device
def get_all_submissions(course_id, item_id, item_type, read_replica=True): submission_qs = Submission.objects if read_replica: submission_qs = _use_read_replica(submission_qs) query = submission_qs.select_related('student_item').filter( student_item__course_id=course_id, student_item__item_id=item_id, student_item__item_type=item_type, ).order_by('student_item__student_id', '-submitted_at', '-id').iterator() for unused_student_id, row_iter in itertools.groupby(query, operator.attrgetter('student_item.student_id')): submission = next(row_iter) data = SubmissionSerializer(submission).data data['student_id'] = submission.student_item.student_id yield data
For the given item, get the most recent submission for every student who has submitted. This may return a very large result set! It is implemented as a generator for efficiency. Args: course_id, item_id, item_type (string): The values of the respective student_item fields to filter the submissions by. read_replica (bool): If true, attempt to use the read replica database. If no read replica is available, use the default database. Yields: Dicts representing the submissions with the following fields: student_item student_id attempt_number submitted_at created_at answer Raises: Cannot fail unless there's a database error, but may return an empty iterable.
def _to_utc(self, dt): tz = self._get_tz() loc_dt = tz.localize(dt) return loc_dt.astimezone(pytz.utc)
Takes a naive timezone with an localized value and return it formatted as utc.
def check_key(self, key): if self.key_size and len(key) not in self.key_size: raise TypeError('invalid key size %s, must be one of %s' % (len(key), self.key_size))
Check that the key length is valid. @param key: a byte string
def _annotation_handler(ion_type, length, ctx): _, self = yield self_handler = _create_delegate_handler(self) if ctx.annotations is not None: raise IonException('Annotation cannot be nested in annotations') ctx = ctx.derive_container_context(length, add_depth=0) (ann_length, _), _ = yield ctx.immediate_transition( _var_uint_field_handler(self_handler, ctx) ) if ann_length < 1: raise IonException('Invalid annotation length subfield; annotation wrapper must have at least one annotation.') yield ctx.read_data_transition(ann_length, self) ann_data = ctx.queue.read(ann_length) annotations = tuple(_parse_sid_iter(ann_data)) if ctx.limit - ctx.queue.position < 1: raise IonException('Incorrect annotation wrapper length.') yield ctx.immediate_transition( _start_type_handler(ctx.field_name, ctx.whence, ctx, annotations=annotations) )
Handles annotations. ``ion_type`` is ignored.
def listen(): msg = MSG() ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)
Listen for keyboard input.
def get_current_user(self): url = self.current_user_url result = self.get(url) return result
Get data from the current user endpoint
def _from_dict(cls, _dict): args = {} if 'tokens' in _dict: args['tokens'] = [ TokenResult._from_dict(x) for x in (_dict.get('tokens')) ] if 'sentences' in _dict: args['sentences'] = [ SentenceResult._from_dict(x) for x in (_dict.get('sentences')) ] return cls(**args)
Initialize a SyntaxResult object from a json dictionary.
def get_list_from_file(file_name): with open(file_name, mode='r', encoding='utf-8') as f1: lst = f1.readlines() return lst
read the lines from a file into a list
def template_sunmoon(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
return the file name for sun or moon template files
def visit_break(self, node, parent): return nodes.Break( getattr(node, "lineno", None), getattr(node, "col_offset", None), parent )
visit a Break node by returning a fresh instance of it
def from_path(cls, *path, namespace=None): parent = None for i in range(0, len(path), 2): parent = cls(*path[i:i + 2], parent=parent, namespace=namespace) return parent
Build up a Datastore key from a path. Parameters: \*path(tuple[str or int]): The path segments. namespace(str): An optional namespace for the key. This is applied to each key in the tree. Returns: anom.Key: The Datastore represented by the given path.
def get_role(resource_root, service_name, name, cluster_name="default"): return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))
Lookup a role by name @param resource_root: The root Resource object. @param service_name: Service name @param name: Role name @param cluster_name: Cluster name @return: An ApiRole object
def ppo_original_world_model_stochastic_discrete(): hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_stochastic_discrete" hparams_keys = hparams.values().keys() video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) hparams.optimization_batch_size = 1 hparams.weight_decay = 0 return hparams
Atari parameters with stochastic discrete world model as policy.
def add_update(self, selector, update, multi=False, upsert=False, collation=None, array_filters=None): validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters if multi: self.is_retryable = False self.ops.append((_UPDATE, cmd))
Create an update document and add it to the list of ops.
def get_sig_info(hdr): string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' siginfo = hdr.sprintf(string) if siginfo != '(none)': error = 0 sigtype, sigdate, sigid = siginfo.split(',') else: error = 101 sigtype = 'MD5' sigdate = 'None' sigid = 'None' infotuple = (sigtype, sigdate, sigid) return error, infotuple
hand back signature information and an error code Shamelessly stolen from Seth Vidal http://yum.baseurl.org/download/misc/checksig.py
def name(self): if self._name_map is None: self._name_map = {} for key, value in self.__class__.__dict__.items(): if isinstance(value, self.__class__): self._name_map[value] = key return self._name_map[self]
Get the enumeration name of this cursor kind.
def days_and_sids_for_frames(frames): if not frames: days = np.array([], dtype='datetime64[ns]') sids = np.array([], dtype='int64') return days, sids check_indexes_all_same( [frame.index for frame in frames], message='Frames have mistmatched days.', ) check_indexes_all_same( [frame.columns for frame in frames], message='Frames have mismatched sids.', ) return frames[0].index.values, frames[0].columns.values
Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids.
def get_source( self, environment: Environment, template: str, ) -> Tuple[str, Optional[str], Callable]: for loader in self._loaders(): try: return loader.get_source(environment, template) except TemplateNotFound: continue raise TemplateNotFound(template)
Returns the template source from the environment. This considers the loaders on the :attr:`app` and blueprints.
def get_public_orders(self, group=False): self._log('get public orders') return self._rest_client.get( endpoint='/order_book', params={'book': self.name, 'group': int(group)} )
Return public orders that are currently open. :param group: If set to True (default: False), orders with the same price are grouped. :type group: bool :return: Public orders currently open. :rtype: dict
def get_datastream(self, datastream): response = self.http.get('/Datastream/' + str(datastream)) datastream = Schemas.Datastream(datastream=response) return datastream
To get Datastream by id
def text_extents(self, text): extents = ffi.new('cairo_text_extents_t *') cairo.cairo_text_extents(self._pointer, _encode_string(text), extents) self._check_status() return ( extents.x_bearing, extents.y_bearing, extents.width, extents.height, extents.x_advance, extents.y_advance)
Returns the extents for a string of text. The extents describe a user-space rectangle that encloses the "inked" portion of the text, (as it would be drawn by :meth:`show_text`). Additionally, the :obj:`x_advance` and :obj:`y_advance` values indicate the amount by which the current point would be advanced by :meth:`show_text`. Note that whitespace characters do not directly contribute to the size of the rectangle (:obj:`width` and :obj:`height`). They do contribute indirectly by changing the position of non-whitespace characters. In particular, trailing whitespace characters are likely to not affect the size of the rectangle, though they will affect the x_advance and y_advance values. Because text extents are in user-space coordinates, they are mostly, but not entirely, independent of the current transformation matrix. If you call :meth:`context.scale(2) <scale>`, text will be drawn twice as big, but the reported text extents will not be doubled. They will change slightly due to hinting (so you can't assume that metrics are independent of the transformation matrix), but otherwise will remain unchanged. :param text: The text to measure, as an Unicode or UTF-8 string. :returns: A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)`` tuple of floats. :obj:`x_bearing` The horizontal distance from the origin to the leftmost part of the glyphs as drawn. Positive if the glyphs lie entirely to the right of the origin. :obj:`y_bearing` The vertical distance from the origin to the topmost part of the glyphs as drawn. Positive only if the glyphs lie completely below the origin; will usually be negative. :obj:`width` Width of the glyphs as drawn. :obj:`height` Height of the glyphs as drawn. :obj:`x_advance` Distance to advance in the X direction after drawing these glyphs. :obj:`y_advance` Distance to advance in the Y direction after drawing these glyphs. Will typically be zero except for vertical text layout as found in East-Asian languages.
def get_objective(self, sampler): def objective(params): circuit = self.get_circuit(params) circuit.make_cache() return self.get_energy(circuit, sampler) return objective
Get an objective function to be optimized.
def add_shortcut_to_tooltip(action, context, name): action.setToolTip(action.toolTip() + ' (%s)' % get_shortcut(context=context, name=name))
Add the shortcut associated with a given action to its tooltip
def parse(self): c = Collection() while self.index < self.datalen: g = self.parseOneGame() if g: c.append(g) else: break return c
Parses the SGF data stored in 'self.data', and returns a 'Collection'.
def load_model(modelname, add_sentencizer=False): loading_start = time.time() nlp = spacy.load(modelname) if add_sentencizer: nlp.add_pipe(nlp.create_pipe('sentencizer')) loading_end = time.time() loading_time = loading_end - loading_start if add_sentencizer: return nlp, loading_time, modelname + '_sentencizer' return nlp, loading_time, modelname
Load a specific spaCy model
def _add_person_to_group(person, group): from karaage.datastores import add_accounts_to_group from karaage.datastores import add_accounts_to_project from karaage.datastores import add_accounts_to_institute a_list = person.account_set add_accounts_to_group(a_list, group) for project in group.project_set.all(): add_accounts_to_project(a_list, project) for institute in group.institute_set.all(): add_accounts_to_institute(a_list, institute)
Call datastores after adding a person to a group.
def _populate_ranking_payoff_arrays(payoff_arrays, scores, costs): n = payoff_arrays[0].shape[0] for p, payoff_array in enumerate(payoff_arrays): payoff_array[0, :] = 0 for i in range(1, n): for j in range(n): payoff_array[i, j] = -costs[p, i-1] prize = 1. for i in range(n): for j in range(n): if scores[0, i] > scores[1, j]: payoff_arrays[0][i, j] += prize elif scores[0, i] < scores[1, j]: payoff_arrays[1][j, i] += prize else: payoff_arrays[0][i, j] += prize / 2 payoff_arrays[1][j, i] += prize / 2
Populate the ndarrays in `payoff_arrays` with the payoff values of the ranking game given `scores` and `costs`. Parameters ---------- payoff_arrays : tuple(ndarray(float, ndim=2)) Tuple of 2 ndarrays of shape (n, n). Modified in place. scores : ndarray(int, ndim=2) ndarray of shape (2, n) containing score values corresponding to the effort levels for the two players. costs : ndarray(float, ndim=2) ndarray of shape (2, n-1) containing cost values corresponding to the n-1 positive effort levels for the two players, with the assumption that the cost of the zero effort action is zero.
def _find_inline_images(contentsinfo): "Find inline images in the contentstream" for n, inline in enumerate(contentsinfo.inline_images): yield ImageInfo( name='inline-%02d' % n, shorthand=inline.shorthand, inline=inline )
Find inline images in the contentstream
def width(self) -> int: max_x = -1.0 for x, _ in self.entries.keys(): max_x = max(max_x, x) for v in self.vertical_lines: max_x = max(max_x, v.x) for h in self.horizontal_lines: max_x = max(max_x, h.x1, h.x2) return 1 + int(max_x)
Determines how many entry columns are in the diagram.
def _adjust_inferential_results_for_parameter_constraints(self, constraints): if constraints is not None: inferential_attributes = ["standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] assert all([hasattr(self, x) for x in inferential_attributes]) assert hasattr(self, "params") all_names = self.params.index.tolist() for series in [getattr(self, x) for x in inferential_attributes]: for pos in constraints: series.loc[all_names[pos]] = np.nan return None
Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None.
def loadScopeGroupbyName(self, name, service_group_id, callback=None, errback=None): import ns1.ipam scope_group = ns1.ipam.Scopegroup(self.config, name=name, service_group_id=service_group_id) return scope_group.load(callback=callback, errback=errback)
Load an existing Scope Group by name and service group id into a high level Scope Group object :param str name: Name of an existing Scope Group :param int service_group_id: id of the service group the Scope group is associated with
def _sign(self, data: bytes) -> bytes: assert self._raiden_service is not None return self._raiden_service.signer.sign(data=data)
Use eth_sign compatible hasher to sign matrix data
def compile(conf): with errorprint(): config = ConfModule(conf) for conf in config.configurations: conf.format(do_print=True)
Compiles classic uWSGI configuration file using the default or given `uwsgiconf` configuration module.
def delete_jail(name): if is_jail(name): cmd = 'poudriere jail -d -j {0}'.format(name) __salt__['cmd.run'](cmd) if is_jail(name): return 'Looks like there was an issue deleteing jail \ {0}'.format(name) else: return 'Looks like jail {0} has not been created'.format(name) make_file = os.path.join(_config_dir(), '{0}-make.conf'.format(name)) if os.path.isfile(make_file): try: os.remove(make_file) except (IOError, OSError): return ('Deleted jail "{0}" but was unable to remove jail make ' 'file').format(name) __salt__['file.remove'](make_file) return 'Deleted jail {0}'.format(name)
Deletes poudriere jail with `name` CLI Example: .. code-block:: bash salt '*' poudriere.delete_jail 90amd64
def copy(self, newdata=None): if newdata is None: newdata = self.data.copy() return self.__class__( self.molecule, self.origin.copy(), self.axes.copy(), self.nrep.copy(), newdata, self.subtitle, self.nuclear_charges )
Return a copy of the cube with optionally new data.
def bitcoin_address(self) -> str: type_ = self.random.choice(['1', '3']) letters = string.ascii_letters + string.digits return type_ + ''.join( self.random.choice(letters) for _ in range(33))
Generate a random bitcoin address. :return: Bitcoin address. :Example: 3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX
def derive_title(self): if not self.title: return _("Create %s") % force_text(self.model._meta.verbose_name).title() else: return self.title
Derives our title from our object
def get_attribute(module_name: str, attribute_name: str): assert isinstance(module_name, str) assert isinstance(attribute_name, str) _module = importlib.import_module(module_name) return getattr(_module, attribute_name)
Get the specified module attribute. It most cases, it will be a class or function. :param module_name: module name :param attribute_name: attribute name :return: module attribute
def log_middleware(store): def wrapper(next_): def log_dispatch(action): print('Dispatch Action:', action) return next_(action) return log_dispatch return wrapper
log all actions to console as they are dispatched
def adjustMinimumWidth( self ): pw = self.pixmapSize().width() self.setMinimumWidth(pw * self.maximum() + 3 * self.maximum())
Modifies the minimum width to factor in the size of the pixmaps and the number for the maximum.
def visit_Import(self, node): for alias in node.names: current_module = MODULES for path in alias.name.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(alias.name), node) else: current_module = current_module[path]
Check if imported module exists in MODULES.
def create_parser(self, prog_name, subcommand): parser = OptionParser(prog=prog_name, usage=self.usage(subcommand), option_list=self.option_list) return parser
Create an OptionParser prog_name - Name of a command subcommand - Name of a subcommand
def _recursive_getitem(d, key): if key in d: return d else: for v in d.values(): return _recursive_getitem(v, key) else: raise KeyError('Key not found: {}'.format(key))
Descend into a dict of dicts to return the one that contains a given key. Every value in the dict must be another dict.
def _get_raw_key(self, key_id): try: static_key = self._static_keys[key_id] except KeyError: static_key = os.urandom(32) self._static_keys[key_id] = static_key return WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=static_key, wrapping_key_type=EncryptionKeyType.SYMMETRIC, )
Returns a static, randomly-generated symmetric key for the specified key ID. :param str key_id: Key ID :returns: Wrapping key that contains the specified static key :rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
def add_before(self, pipeline): if not isinstance(pipeline, Pipeline): pipeline = Pipeline(pipeline) self.pipes = pipeline.pipes[:] + self.pipes[:] return self
Add a Pipeline to be applied before this processing pipeline. Arguments: pipeline: The Pipeline or callable to apply before this Pipeline.
def getvalue(self, v): if not is_measure(v): raise TypeError('Incorrect input type for getvalue()') import re rx = re.compile("m\d+") out = [] keys = v.keys()[:] keys.sort() for key in keys: if re.match(rx, key): out.append(dq.quantity(v.get(key))) return out
Return a list of quantities making up the measures' value. :param v: a measure
def serialize(self): if self.mate_chrom is None: remote_tag = "." else: if self.within_main_assembly: mate_chrom = self.mate_chrom else: mate_chrom = "<{}>".format(self.mate_chrom) tpl = {FORWARD: "[{}:{}[", REVERSE: "]{}:{}]"}[self.mate_orientation] remote_tag = tpl.format(mate_chrom, self.mate_pos) if self.orientation == FORWARD: return remote_tag + self.sequence else: return self.sequence + remote_tag
Return string representation for VCF
def print_config(): description = parser = argparse.ArgumentParser( description=textwrap.dedent(description) ) parser.add_argument( 'config_uri', type=str, help='an integer for the accumulator' ) parser.add_argument( '-k', '--key', dest='key', metavar='PREFIX', type=str, action='store', help=( "Tells script to print only specified" " config tree provided by dotted name" ) ) args = parser.parse_args(sys.argv[1:]) config_uri = args.config_uri env = bootstrap(config_uri) config, closer = env['registry']['config'], env['closer'] try: print(printer(slice_config(config, args.key))) except KeyError: print( 'Sorry, but the key path {0}, does not exists in Your config!' .format(args.key) ) finally: closer()
Print config entry function.
def start(self): if self.running: self.stop() self._thread = threading.Thread(target=self._wrapped_target) self._thread.daemon = True self._thread.start()
Start the run method as a new thread. It will first stop the thread if it is already running.
def convert2wkt(self, set3D=True): features = self.getfeatures() for feature in features: try: feature.geometry().Set3D(set3D) except AttributeError: dim = 3 if set3D else 2 feature.geometry().SetCoordinateDimension(dim) return [feature.geometry().ExportToWkt() for feature in features]
export the geometry of each feature as a wkt string Parameters ---------- set3D: bool keep the third (height) dimension? Returns -------
def delete_database(self, database): url = "db/{0}".format(database) self.request( url=url, method='DELETE', expected_response_code=204 ) return True
Drop a database on the InfluxDB server. :param database: the name of the database to delete :type database: string :rtype: boolean
def deploy(): _require_root() if not confirm("This will apply any available migrations to the database. Has the database been backed up?"): abort("Aborted.") if not confirm("Are you sure you want to deploy?"): abort("Aborted.") with lcd(PRODUCTION_DOCUMENT_ROOT): with shell_env(PRODUCTION="TRUE"): local("git pull") with open("requirements.txt", "r") as req_file: requirements = req_file.read().strip().split() try: pkg_resources.require(requirements) except pkg_resources.DistributionNotFound: local("pip install -r requirements.txt") except Exception: traceback.format_exc() local("pip install -r requirements.txt") else: puts("Python requirements already satisfied.") with prefix("source /usr/local/virtualenvs/ion/bin/activate"): local("./manage.py collectstatic --noinput", shell="/bin/bash") local("./manage.py migrate", shell="/bin/bash") restart_production_gunicorn(skip=True) puts("Deploy complete.")
Deploy to production.
def postToNodeInBox(self, msg, frm): logger.trace("{} appending to nodeInbox {}".format(self, msg)) self.nodeInBox.append((msg, frm))
Append the message to the node inbox :param msg: a node message :param frm: the name of the node that sent this `msg`
def wiki(self): date = self.pull.created_at.strftime("%m/%d/%Y %H:%M") return "{} {} ({} [{} github])\n".format(self.pull.avatar_url, self.pull.body, date, self.pull.html_url)
Returns the wiki markup describing the details of the github pull request as well as a link to the details on github.
def get(self, name=None, default=None): if name is None: return self.data if not isinstance(name, list): name = [name] data = self.data try: for key in name: data = data[key] except KeyError: return default return data
Get attribute value or return default Whole data dictionary is returned when no attribute provided. Supports direct values retrieval from deep dictionaries as well. Dictionary path should be provided as list. The following two examples are equal: tree.data['hardware']['memory']['size'] tree.get(['hardware', 'memory', 'size']) However the latter approach will also correctly handle providing default value when any of the dictionary keys does not exist.
def place_market_order(self, side: Side, amount: Number) -> Order: return self.place_order(side, OrderType.MARKET, amount)
Place a market order.
async def dump_message(obj, msg, field_archiver=None): mtype = msg.__class__ fields = mtype.f_specs() obj = collections.OrderedDict() if obj is None else get_elem(obj) for field in fields: await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver) return obj
Dumps message to the object. Returns message popo representation. :param obj: :param msg: :param field_archiver: :return:
def make_url(path, protocol=None, hosts=None): protocol = 'https://' if not protocol else protocol host = hosts[random.randrange(len(hosts))] if hosts else 'archive.org' return protocol + host + path.strip()
Make an URL given a path, and optionally, a protocol and set of hosts to select from randomly. :param path: The Archive.org path. :type path: str :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :rtype: str :returns: An Absolute URI.
def sg_summary_gradient(tensor, gradient, prefix=None, name=None): r prefix = '' if prefix is None else prefix + '/' name = prefix + _pretty_name(tensor) if name is None else prefix + name _scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient))) _histogram(name + '/grad-h', tf.abs(gradient))
r"""Register `tensor` to summary report as `gradient` Args: tensor: A `Tensor` to log as gradient gradient: A 0-D `Tensor`. A gradient to log prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
def get_gradients(self) -> Dict[str, List[mx.nd.NDArray]]: return {"dev_%d_%s" % (i, name): exe.grad_arrays[j] for i, exe in enumerate(self.executors) for j, name in enumerate(self.executor_group.arg_names) if name in self.executor_group.param_names and self.executors[0].grad_arrays[j] is not None}
Returns a mapping of parameters names to gradient arrays. Parameter names are prefixed with the device.
def get_cmd(self): cmd = None if self.test_program in ('nose', 'nosetests'): cmd = "nosetests %s" % self.file_path elif self.test_program == 'django': executable = "%s/manage.py" % self.file_path if os.path.exists(executable): cmd = "python %s/manage.py test" % self.file_path else: cmd = "django-admin.py test" elif self.test_program == 'py': cmd = 'py.test %s' % self.file_path elif self.test_program == 'symfony': cmd = 'symfony test-all' elif self.test_program == 'jelix': cmd = 'php tests.php' elif self.test_program == 'phpunit': cmd = 'phpunit' elif self.test_program == 'sphinx': cmd = 'make html' elif self.test_program == 'tox': cmd = 'tox' if not cmd: raise InvalidTestProgram("The test program %s is unknown. Valid options are: `nose`, `django` and `py`" % self.test_program) if self.custom_args: cmd = '%s %s' % (cmd, self.custom_args) return cmd
Returns the full command to be executed at runtime
def from_edgelist(self, edges, strict=True): for edge in edges: if len(edge) == 3: self.update(edge[1], edge[0], **edge[2]) elif len(edge) == 2: self.update(edge[1], edge[0]) elif strict: raise ValueError('edge incorrect shape: {}'.format(str(edge)))
Load transform data from an edge list into the current scene graph. Parameters ------------- edgelist : (n,) tuples (node_a, node_b, {key: value}) strict : bool If true, raise a ValueError when a malformed edge is passed in a tuple.
def confidence_interval(self, confidenceLevel): if not (confidenceLevel >= 0 and confidenceLevel <= 1): raise ValueError("Parameter percentage has to be in [0,1]") underestimations = [] overestimations = [] for error in self._errorValues: if error is None: continue if error >= 0: overestimations.append(error) if error <= 0: underestimations.append(error) overestimations.sort() underestimations.sort(reverse=True) overIdx = int(len(overestimations) * confidenceLevel) - 1 underIdx = int(len(underestimations) * confidenceLevel) - 1 overestimation = 0.0 underestimation = 0.0 if overIdx >= 0: overestimation = overestimations[overIdx] else: print len(overestimations), confidenceLevel if underIdx >= 0: underestimation = underestimations[underIdx] return underestimation, overestimation
Calculates for which value confidenceLevel% of the errors are closer to 0. :param float confidenceLevel: percentage of the errors that should be smaller than the returned value for overestimations and larger than the returned value for underestimations. confidenceLevel has to be in [0.0, 1.0] :return: return a tuple containing the underestimation and overestimation for the given confidenceLevel :rtype: tuple :warning: Index is still not calculated correctly
def docgraph2freqt(docgraph, root=None, include_pos=False, escape_func=FREQT_ESCAPE_FUNC): if root is None: return u"\n".join( sentence2freqt(docgraph, sentence, include_pos=include_pos, escape_func=escape_func) for sentence in docgraph.sentences) else: return sentence2freqt(docgraph, root, include_pos=include_pos, escape_func=escape_func)
convert a docgraph into a FREQT string.
def lookup_data(self, lookup, data): value = data parts = lookup.split('.') if not parts or not parts[0]: return value part = parts[0] remaining_lookup = '.'.join(parts[1:]) if callable(getattr(data, 'keys', None)) and hasattr(data, '__getitem__'): value = data[part] elif data is not None: value = getattr(data, part) if callable(value) and not hasattr(value, 'db_manager'): value = value() if not remaining_lookup: return value return self.lookup_data(remaining_lookup, value)
Given a lookup string, attempts to descend through nested data looking for the value. Can work with either dictionary-alikes or objects (or any combination of those). Lookups should be a string. If it is a dotted path, it will be split on ``.`` & it will traverse through to find the final value. If not, it will simply attempt to find either a key or attribute of that name & return it. Example:: >>> data = { ... 'type': 'message', ... 'greeting': { ... 'en': 'hello', ... 'fr': 'bonjour', ... 'es': 'hola', ... }, ... 'person': Person( ... name='daniel' ... ) ... } >>> lookup_data('type', data) 'message' >>> lookup_data('greeting.en', data) 'hello' >>> lookup_data('person.name', data) 'daniel'
def list_(runas=None): ret = [] output = _rbenv_exec(['install', '--list'], runas=runas) if output: for line in output.splitlines(): if line == 'Available versions:': continue ret.append(line.strip()) return ret
List the installable versions of ruby runas The user under which to run rbenv. If not specified, then rbenv will be run as the user under which Salt is running. CLI Example: .. code-block:: bash salt '*' rbenv.list
def utc_dt(dt): if not dt.tzinfo: return pytz.utc.localize(dt) return dt.astimezone(pytz.utc)
Set UTC timezone on a datetime object. A naive datetime is assumed to be in UTC TZ.
def connect(self): try: for group in ('inlets', 'receivers', 'outlets', 'senders'): self._connect_subgroup(group) except BaseException: objecttools.augment_excmessage( 'While trying to build the node connection of the `%s` ' 'sequences of the model handled by element `%s`' % (group[:-1], objecttools.devicename(self)))
Connect the link sequences of the actual model.
def wrap(self, string, width): if not string or width <= 0: logging.error("invalid string: %s or width: %s" % (string, width)) return False tmp = "" for line in string.splitlines(): if len(line) <= width: tmp += line + "\n" continue cur = 0 length = len(line) while cur + width < length: cur = line[:cur+width].rfind(self.sep) + len(self.sep) - 1 line = line[:cur] + "\n" + line[cur+1:] tmp += line + "\n\n" return tmp
Wrap lines according to width Place '\n' whenever necessary
def from_database(cls, database): if isinstance(database, PostgresqlDatabase): return PostgresqlMigrator(database) if isinstance(database, SqliteDatabase): return SqliteMigrator(database) if isinstance(database, MySQLDatabase): return MySQLMigrator(database) return super(SchemaMigrator, cls).from_database(database)
Initialize migrator by db.
def get_all_formulae(chebi_ids): all_formulae = [get_formulae(chebi_id) for chebi_id in chebi_ids] return [x for sublist in all_formulae for x in sublist]
Returns all formulae
def domain_score(self, domains): warn( 'OpenDNS Domain Scores endpoint is deprecated. Use ' 'InvestigateApi.categorization() instead', DeprecationWarning, ) url_path = 'domains/score/' return self._multi_post(url_path, domains)
Calls domain scores endpoint. This method is deprecated since OpenDNS Investigate API endpoint is also deprecated.
def get_wide_unicode(self, i): value = [] for x in range(3): c = next(i) if c == '0': value.append(c) else: raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1)) c = next(i) if c in ('0', '1'): value.append(c) else: raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1)) for x in range(4): c = next(i) if c.lower() in _HEX: value.append(c) else: raise SyntaxError('Invalid wide Unicode character at %d!' % (i.index - 1)) return ''.join(value)
Get narrow Unicode.
def get_plugins_info(self): d = {} for p in self.plugins: d.update(p.get_info()) return d
Collect the current live info from all the registered plugins. Return a dictionary, keyed on the plugin name.
def transaction(data_access): old_autocommit = data_access.autocommit data_access.autocommit = False try: yield data_access except RollbackTransaction as ex: data_access.rollback() except Exception as ex: data_access.rollback() raise ex else: data_access.commit() finally: data_access.autocommit = old_autocommit
Wrap statements in a transaction. If the statements succeed, commit, otherwise rollback. :param data_access: a DataAccess instance
def list_all_directories(self): def list_dirs_recursively(directory): if directory == self.filesystem: yield directory d_gen = itertools.chain( directory.directories, *tuple(list_dirs_recursively(d) for d in directory.directories)) for d in d_gen: yield d return list_dirs_recursively(self.filesystem)
Utility method that yields all directories on the device's file systems.
def _form_datetimes(days, msecs): all_datetimes = [] for i in range(days.size): day = int(days[i]) msec = msecs[i] scanline_datetimes = [] for j in range(int(VALUES_PER_SCAN_LINE / 4)): usec = 1000 * (j * VIEW_TIME_ADJUSTMENT + msec) delta = (dt.timedelta(days=day, microseconds=usec)) for k in range(4): scanline_datetimes.append(delta.total_seconds()) all_datetimes.append(scanline_datetimes) return np.array(all_datetimes, dtype=np.float64)
Calculate seconds since EPOCH from days and milliseconds for each of IASI scan.
def get_valid_cwd(): try: cwd = _current_dir() except: warn("Your current directory is invalid. If you open a ticket at " + "https://github.com/milkbikis/powerline-shell/issues/new " + "we would love to help fix the issue.") sys.stdout.write("> ") sys.exit(1) parts = cwd.split(os.sep) up = cwd while parts and not os.path.exists(up): parts.pop() up = os.sep.join(parts) if cwd != up: warn("Your current directory is invalid. Lowest valid directory: " + up) return cwd
Determine and check the current working directory for validity. Typically, an directory arises when you checkout a different branch on git that doesn't have this directory. When an invalid directory is found, a warning is printed to the screen, but the directory is still returned as-is, since this is what the shell considers to be the cwd.
def get_history_by_tail_number(self, tail_number, page=1, limit=100): url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url, True)
Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
def findPrevStmt(self, block): stmtEnd = self._prevNonCommentBlock(block) stmtStart = self.findStmtStart(stmtEnd) return Statement(self._qpart, stmtStart, stmtEnd)
Returns a tuple that contains the first and last line of the previous statement before line.
def check_security_settings(): in_production = not (current_app.debug or current_app.testing) secure = current_app.config.get('SESSION_COOKIE_SECURE') if in_production and not secure: current_app.logger.warning( "SESSION_COOKIE_SECURE setting must be set to True to prevent the " "session cookie from being leaked over an insecure channel." )
Warn if session cookie is not secure in production.