positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _translate_cond(self, c): #pylint:disable=no-self-use """ Checks whether this condition can be supported by FastMemory." """ if isinstance(c, claripy.ast.Base) and not c.singlevalued: raise SimFastMemoryError("size not supported") if c is None: return True else: return self.state.solver.eval_upto(c, 1)[0]
Checks whether this condition can be supported by FastMemory."
def p_iteration_statement_6(self, p): """ iteration_statement \ : FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement """ p[0] = ast.ForIn(item=ast.VarDecl(identifier=p[4], initializer=p[5]), iterable=p[7], statement=p[9])
iteration_statement \ : FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
def list_event_sources(self): """ Returns the existing event sources. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/events/sources'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.EventSourceInfo() message.ParseFromString(response.content) sources = getattr(message, 'source') return iter(sources)
Returns the existing event sources. :rtype: ~collections.Iterable[str]
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Add Permission to User Role.""" assert wait_for_completion is True # synchronous operation user_role_oid = uri_parms[0] user_role_uri = '/api/user-roles/' + user_role_oid try: user_role = hmc.lookup_by_uri(user_role_uri) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['permitted-object', 'permitted-object-type']) # Reject if User Role is system-defined: if user_role.properties['type'] == 'system-defined': raise BadRequestError( method, uri, reason=314, message="Cannot add permission to " "system-defined user role: {}".format(user_role_uri)) # Apply defaults, so our internally stored copy has all fields: permission = copy.deepcopy(body) if 'include-members' not in permission: permission['include-members'] = False if 'view-only-mode' not in permission: permission['view-only-mode'] = True # Add the permission to its store (the faked User Role object): if user_role.properties.get('permissions', None) is None: user_role.properties['permissions'] = [] user_role.properties['permissions'].append(permission)
Operation: Add Permission to User Role.
def documento(self, *args, **kwargs): """Resulta no documento XML como string, que pode ou não incluir a declaração XML no início do documento. """ forcar_unicode = kwargs.pop('forcar_unicode', False) incluir_xml_decl = kwargs.pop('incluir_xml_decl', True) doc = ET.tostring(self._xml(*args, **kwargs), encoding='utf-8').decode('utf-8') if forcar_unicode: if incluir_xml_decl: doc = u'{}\n{}'.format(constantes.XML_DECL_UNICODE, doc) else: if incluir_xml_decl: doc = '{}\n{}'.format(constantes.XML_DECL, unidecode(doc)) else: doc = unidecode(doc) return doc
Resulta no documento XML como string, que pode ou não incluir a declaração XML no início do documento.
def refresh(self): """Return a deferred.""" d = self.request('get', self.instance_url()) return d.addCallback(self.refresh_from).addCallback(lambda _: self)
Return a deferred.
def estimate_hmm(observations, nstates, lag=1, initial_model=None, output=None, reversible=True, stationary=False, p=None, accuracy=1e-3, maxit=1000, maxit_P=100000, mincount_connectivity=1e-2): r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>` """ # select output model type if output is None: output = _guess_output_type(observations) if lag > 1: observations = lag_observations(observations, lag) # construct estimator from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, output=output, reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit, maxit_P=maxit_P) # run est.fit() # set lag time est.hmm._lag = lag # return model # TODO: package into specific class (DiscreteHMM, GaussianHMM) return est.hmm
r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>`
def prepend_string_list(self, key, value, max_length_key): """Prepend a fixed-length string list with a new string. The oldest string will be removed from the list. If the string is already in the list, it is shuffled to the top. Use this to implement things like a 'most recent files' entry. """ max_len = self.get(max_length_key) strings = self.get_string_list(key) strings = [value] + [x for x in strings if x != value] strings = strings[:max_len] self.beginWriteArray(key) for i in range(len(strings)): self.setArrayIndex(i) self.setValue("entry", strings[i]) self.endArray()
Prepend a fixed-length string list with a new string. The oldest string will be removed from the list. If the string is already in the list, it is shuffled to the top. Use this to implement things like a 'most recent files' entry.
def player_stats(game_id): """Return dictionary of individual stats of a game with matching id. The additional pitching/batting is mostly the same stats, except it contains some useful stats such as groundouts/flyouts per pitcher (go/ao). MLB decided to have two box score files, thus we return the data from both. """ # get data from data module box_score = mlbgame.data.get_box_score(game_id) box_score_tree = etree.parse(box_score).getroot() # get pitching and batting info pitching = box_score_tree.findall('pitching') batting = box_score_tree.findall('batting') # get parsed stats pitching_info = __player_stats_info(pitching, 'pitcher') batting_info = __player_stats_info(batting, 'batter') # rawboxscore not available after 2018 try: raw_box_score = mlbgame.data.get_raw_box_score(game_id) raw_box_score_tree = etree.parse(raw_box_score).getroot() additional_stats = __raw_player_stats_info(raw_box_score_tree) addl_home_pitching = additional_stats[0]['pitchers'] addl_home_batting = additional_stats[0]['batters'] addl_away_pitching = additional_stats[1]['pitchers'] addl_away_batting = additional_stats[1]['batters'] output = { 'home_pitching': pitching_info[0], 'away_pitching': pitching_info[1], 'home_batting': batting_info[0], 'away_batting': batting_info[1], 'home_additional_pitching': addl_home_pitching, 'away_additional_pitching': addl_away_pitching, 'home_additional_batting': addl_home_batting, 'away_additional_batting': addl_away_batting } except etree.XMLSyntaxError: output = { 'home_pitching': pitching_info[0], 'away_pitching': pitching_info[1], 'home_batting': batting_info[0], 'away_batting': batting_info[1], } return output
Return dictionary of individual stats of a game with matching id. The additional pitching/batting is mostly the same stats, except it contains some useful stats such as groundouts/flyouts per pitcher (go/ao). MLB decided to have two box score files, thus we return the data from both.
def delete(self): """ Destructor. """ if self.glucose: pysolvers.glucose3_del(self.glucose) self.glucose = None if self.prfile: self.prfile.close()
Destructor.
def load_virt_stream(virt_fd): """ Loads the given conf stream into a dict, trying different formats if needed Args: virt_fd (str): file like objcect with the virt config to load Returns: dict: Loaded virt config """ try: virt_conf = json.load(virt_fd) except ValueError: virt_fd.seek(0) virt_conf = yaml.load(virt_fd) return deepcopy(virt_conf)
Loads the given conf stream into a dict, trying different formats if needed Args: virt_fd (str): file like objcect with the virt config to load Returns: dict: Loaded virt config
def s_l(l, alpha): """ get sigma as a function of degree l from Constable and Parker (1988) """ a2 = alpha**2 c_a = 0.547 s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.)))) return s_l
get sigma as a function of degree l from Constable and Parker (1988)
def getPharLapPath(): """Reads the registry to find the installed path of the Phar Lap ETS development kit. Raises UserError if no installed version of Phar Lap can be found.""" if not SCons.Util.can_read_reg: raise SCons.Errors.InternalError("No Windows registry module was found") try: k=SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Pharlap\\ETS') val, type = SCons.Util.RegQueryValueEx(k, 'BaseDir') # The following is a hack...there is (not surprisingly) # an odd issue in the Phar Lap plug in that inserts # a bunch of junk data after the phar lap path in the # registry. We must trim it. idx=val.find('\0') if idx >= 0: val = val[:idx] return os.path.normpath(val) except SCons.Util.RegError: raise SCons.Errors.UserError("Cannot find Phar Lap ETS path in the registry. Is it installed properly?")
Reads the registry to find the installed path of the Phar Lap ETS development kit. Raises UserError if no installed version of Phar Lap can be found.
def return_dat(self, chan, begsam, endsam): """Return the data as 2D numpy.ndarray. Parameters ---------- chan : int or list index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, with dimension chan X samples """ if begsam < 0: begpad = -1 * begsam begsam = 0 else: begpad = 0 if endsam > self.n_smp: endpad = endsam - self.n_smp endsam = self.n_smp else: endpad = 0 first_sam = DATA_PRECISION * self.n_chan * begsam toread_sam = DATA_PRECISION * self.n_chan * (endsam - begsam) with open(join(self.filename, EEG_FILE), 'rb') as f: f.seek(first_sam) x = f.read(toread_sam) dat = _read_dat(x) dat = reshape(dat, (self.n_chan, -1), 'F') dat = self.convertion(dat[chan, :]) dat = pad(dat, ((0, 0), (begpad, endpad)), mode='constant', constant_values=NaN) return dat
Return the data as 2D numpy.ndarray. Parameters ---------- chan : int or list index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, with dimension chan X samples
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError('axis must be 0') if self._from_selection: raise ValueError("Upsampling from level= or on= selection" " is not supported, use .set_index(...)" " to explicitly set index to" " datetime-like") ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if limit is None and to_offset(ax.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value) result = self._apply_loffset(result) return self._wrap_result(result)
Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna
def list_of_objects_from_api(url): ''' API only serves 20 pages by default This fetches info on all of items and return them as a list Assumption: limit of API is not less than 20 ''' response = requests.get(url) content = json.loads(response.content) count = content["meta"]["total_count"] if count <= 20: return content["items"] else: items = [] + content["items"] num_requests = int(math.ceil(count // 20)) for i in range(1, num_requests + 1): paginated_url = "{}?limit=20&offset={}".format( url, str(i * 20)) paginated_response = requests.get(paginated_url) items = items + json.loads(paginated_response.content)["items"] return items
API only serves 20 pages by default This fetches info on all of items and return them as a list Assumption: limit of API is not less than 20
def stop(self): """Stops all session activity. Blocks until io and writer thread dies """ if self._io_thread is not None: self.log.info("Waiting for I/O thread to stop...") self.closed = True self._io_thread.join() if self._writer_thread is not None: self.log.info("Waiting for Writer Thread to stop...") self.closed = True self._writer_thread.join() self.log.info("All worker threads stopped.")
Stops all session activity. Blocks until io and writer thread dies
def get_asset_admin_session_for_repository(self, repository_id=None, proxy=None): """Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetAdminSession) - an ``AssetAdminSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_admin()`` and ``supports_visible_federation()`` are ``true``.* """ asset_lookup_session = self._provider_manager.get_asset_lookup_session_for_repository( repository_id, proxy) return AssetAdminSession( self._provider_manager.get_asset_admin_session_for_repository(repository_id, proxy), self._config_map, asset_lookup_session)
Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetAdminSession) - an ``AssetAdminSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_admin()`` and ``supports_visible_federation()`` are ``true``.*
def standardUncertainties(self, sharpness=0.5): ''' sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error ''' height, width = self.coeffs['shape'] fx, fy = self.getDeflection(width, height) # is RMSE of imgPoint-projectedPoints r = self.coeffs['reprojectionError'] t = (sharpness**2 + r**2)**0.5 return fx * t, fy * t
sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error
def translate(script): '''translate zipline script into pylivetrader script. ''' tree = ast.parse(script) ZiplineImportVisitor().visit(tree) return astor.to_source(tree)
translate zipline script into pylivetrader script.
def load_from_rdf_file(self, rdf_file): """Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file. """ self.graph = rdflib.Graph() self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file.
def is_valid_url(value): """Check if given value is a valid URL string. :param value: a value to test :returns: True if the value is valid """ match = URL_REGEX.match(value) host_str = urlparse(value).hostname return match and is_valid_host(host_str)
Check if given value is a valid URL string. :param value: a value to test :returns: True if the value is valid
async def execute_insert( self, sql: str, parameters: Iterable[Any] = None ) -> Optional[sqlite3.Row]: """Helper to insert and get the last_insert_rowid.""" if parameters is None: parameters = [] return await self._execute(self._execute_insert, sql, parameters)
Helper to insert and get the last_insert_rowid.
def _get_group_no(self, tag_name): """ Takes tag name and returns the number of the group to which tag belongs """ if tag_name in self.full: return self.groups.index(self.full[tag_name]["parent"]) else: return len(self.groups)
Takes tag name and returns the number of the group to which tag belongs
def parse(self, argument): """See base class.""" if isinstance(argument, list): return argument elif not argument: return [] else: return [s.strip() for s in argument.split(self._token)]
See base class.
def deactivate_object(brain_or_object): """Deactivate the given object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Nothing :rtype: None """ obj = get_object(brain_or_object) # we do not want to delete the site root! if is_root(obj): fail(401, "Deactivating the Portal is not allowed") try: do_transition_for(brain_or_object, "deactivate") except Unauthorized: fail(401, "Not allowed to deactivate object '%s'" % obj.getId())
Deactivate the given object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Nothing :rtype: None
def energies(self, samples_like, dtype=np.float): """Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies. """ samples, labels = as_samples(samples_like) if all(v == idx for idx, v in enumerate(labels)): ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype) else: ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype) energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset return np.asarray(energies, dtype=dtype)
Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies.
def sendCode(self, mobile, templateId, region, verifyId=None, verifyCode=None): """ 发送短信验证码方法。 方法 @param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传) @param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传) @param region:手机号码所属国家区号,目前只支持中图区号 86) @param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。 @param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。 @return code:返回码,200 为正常。 @return sessionId:短信验证码唯一标识。 @return errorMessage:错误信息。 """ desc = { "name": "SMSSendCodeReslut", "desc": " SMSSendCodeReslut 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "sessionId", "type": "String", "desc": "短信验证码唯一标识。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('SMS', 'POST', 'application/x-www-form-urlencoded'), action='/sendCode.json', params={ "mobile": mobile, "templateId": templateId, "region": region, "verifyId": verifyId, "verifyCode": verifyCode }) return Response(r, desc)
发送短信验证码方法。 方法 @param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传) @param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传) @param region:手机号码所属国家区号,目前只支持中图区号 86) @param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。 @param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。 @return code:返回码,200 为正常。 @return sessionId:短信验证码唯一标识。 @return errorMessage:错误信息。
def get(self, name): """ Get the resource URI for a specified resource name. If an entry for the specified resource name does not exist in the Name-URI cache, the cache is refreshed from the HMC with all resources of the manager holding this cache. If an entry for the specified resource name still does not exist after that, ``NotFound`` is raised. """ self.auto_invalidate() try: return self._uris[name] except KeyError: self.refresh() try: return self._uris[name] except KeyError: raise NotFound({self._manager._name_prop: name}, self._manager)
Get the resource URI for a specified resource name. If an entry for the specified resource name does not exist in the Name-URI cache, the cache is refreshed from the HMC with all resources of the manager holding this cache. If an entry for the specified resource name still does not exist after that, ``NotFound`` is raised.
def get_render_configurations(self, request, **kwargs): """Render image interface""" data = self.process_form_data(self._get_form_defaults(), kwargs) variable_set = self.get_variable_set(self.service.variable_set.order_by('index'), data) base_config = ImageConfiguration( extent=data['bbox'], size=data['size'], image_format=data['image_format'], background_color=TRANSPARENT_BACKGROUND_COLOR if data.get('transparent') else DEFAULT_BACKGROUND_COLOR ) return base_config, self.apply_time_to_configurations([RenderConfiguration(v) for v in variable_set], data)
Render image interface
def is_equal(self, other_consonnant): """ >>> v_consonant = Consonant(Place.labio_dental, Manner.fricative, True, "v", False) >>> f_consonant = Consonant(Place.labio_dental, Manner.fricative, False, "f", False) >>> v_consonant.is_equal(f_consonant) False :param other_consonnant: :return: """ return self.place == other_consonnant.place and self.manner == other_consonnant.manner and \ self.voiced == other_consonnant.voiced and self.geminate == other_consonnant.geminate
>>> v_consonant = Consonant(Place.labio_dental, Manner.fricative, True, "v", False) >>> f_consonant = Consonant(Place.labio_dental, Manner.fricative, False, "f", False) >>> v_consonant.is_equal(f_consonant) False :param other_consonnant: :return:
def potential_purviews(self, direction, mechanism, purviews=False): """Override Subsystem implementation using Network-level indices.""" all_purviews = utils.powerset(self.node_indices) return irreducible_purviews( self.cm, direction, mechanism, all_purviews)
Override Subsystem implementation using Network-level indices.
def json_paginate(self, base_url, page_number): """ Return a dict for a JSON paginate """ data = self.page(page_number) first_id = None last_id = None if data: first_id = data[0].id last_id = data[-1].id return { 'meta': { 'total_pages': self.max_pages, 'first_id': first_id, 'last_id': last_id, 'current_page': page_number }, 'data': self.page(page_number), 'links': self.links(base_url, page_number) }
Return a dict for a JSON paginate
def to(self, space): """ Convert color to a different color space. :param str space: Name of the color space. :rtype: Color :returns: A new spectra.Color in the given color space. """ if space == self.space: return self new_color = convert_color(self.color_object, COLOR_SPACES[space]) return self.__class__(space, *new_color.get_value_tuple())
Convert color to a different color space. :param str space: Name of the color space. :rtype: Color :returns: A new spectra.Color in the given color space.
def update(self, id, newObj): """Update a object Args: id (int): Target Object ID newObj (object): New object will be merged into original object Returns: Object: Updated object None: If specified object id is not found MultipleInvalid: If input object is invaild """ newObj = self.validation(newObj) for obj in self.model.db: if obj["id"] != id: continue newObj.pop("id", None) obj.update(newObj) obj = self._cast_model(obj) if not self._batch.enable.is_set(): self.model.save_db() return obj return None
Update a object Args: id (int): Target Object ID newObj (object): New object will be merged into original object Returns: Object: Updated object None: If specified object id is not found MultipleInvalid: If input object is invaild
def _calculateCoverageMasks(proteindb, peptidedb): """Calcualte the sequence coverage masks for all proteindb elements. Private method used by :class:`ProteinDatabase`. A coverage mask is a numpy boolean array with the length of the protein sequence. Each protein position that has been covered in at least one peptide is set to True. Coverage masks are calculated for unique and for shared peptides. Peptides are matched to proteins according to positions derived by the digestion of the FASTA file. Alternatively peptides could also be matched to proteins just by sequence as it is done in :func:`pyteomics.parser.coverage`, but this is not the case here. :param proteindb: a dictionary containing :class:`ProteinSequence` entries, for example ``ProteinDatabase.proteins`` :param proteindb: a dictionary containing :class:`PeptideSequence` entries, for example ``ProteinDatabase.peptides`` Sets two attributes for each ``ProteinSequence`` entry: ``.coverageMaskUnique`` = coverage mask of unique peptides ``.coverageMaskShared`` = coverage mask of shared peptides """ for proteinId, proteinEntry in viewitems(proteindb): coverageMaskUnique = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.uniquePeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskUnique[startPos-1:endPos] = True coverageMaskShared = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.sharedPeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskShared[startPos-1:endPos] = True setattr(proteinEntry, 'coverageMaskUnique', coverageMaskUnique) setattr(proteinEntry, 'coverageMaskShared', coverageMaskShared)
Calcualte the sequence coverage masks for all proteindb elements. Private method used by :class:`ProteinDatabase`. A coverage mask is a numpy boolean array with the length of the protein sequence. Each protein position that has been covered in at least one peptide is set to True. Coverage masks are calculated for unique and for shared peptides. Peptides are matched to proteins according to positions derived by the digestion of the FASTA file. Alternatively peptides could also be matched to proteins just by sequence as it is done in :func:`pyteomics.parser.coverage`, but this is not the case here. :param proteindb: a dictionary containing :class:`ProteinSequence` entries, for example ``ProteinDatabase.proteins`` :param proteindb: a dictionary containing :class:`PeptideSequence` entries, for example ``ProteinDatabase.peptides`` Sets two attributes for each ``ProteinSequence`` entry: ``.coverageMaskUnique`` = coverage mask of unique peptides ``.coverageMaskShared`` = coverage mask of shared peptides
def threshold_monitor_hidden_threshold_monitor_Memory_retry(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Memory = ET.SubElement(threshold_monitor, "Memory") retry = ET.SubElement(Memory, "retry") retry.text = kwargs.pop('retry') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def delete_node_1ton(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[] """ delete the node which has 1-input and n-output """ if end is None: assert end is not None end = node.successor elif not isinstance(end, list): end = [end] if any(e_.in_or_out for e_ in end): # if the end is output node, the output name will be kept to avoid the model output name updating. begin.out_redirect(node.single_input, node.single_output) else: for ne_ in end: target_var_name = node.single_input # since the output info never be updated, except the final. assert target_var_name in begin.output.values() ne_.in_redirect(node.single_output, target_var_name) begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor for ne_ in end: ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence] node_list.remove(node) return node_list
delete the node which has 1-input and n-output
def create_basic_url(self): """ Create URL prefix for API calls based on type of repo. Repo may be forked and may be in namespace. That makes total 4 different types of URL. :return: """ if self.username is None: if self.namespace is None: request_url = "{}/api/0/{}/".format( self.instance, self.repo) else: request_url = "{}/api/0/{}/{}/".format( self.instance, self.namespace, self.repo) else: if self.namespace is None: request_url = "{}/api/0/fork/{}/{}/".format( self.instance, self.username, self.repo) else: request_url = "{}/api/0/fork/{}/{}/{}/".format( self.instance, self.username, self.namespace, self.repo) return request_url
Create URL prefix for API calls based on type of repo. Repo may be forked and may be in namespace. That makes total 4 different types of URL. :return:
def make_c_header(name, front, body): """ Build a C header from the front and body. """ return """ {0} # ifndef _GU_ZHENGXIONG_{1}_H # define _GU_ZHENGXIONG_{1}_H {2} # endif /* {3}.h */ """.strip().format(front, name.upper(), body, name) + '\n'
Build a C header from the front and body.
def setTabText(self, index, text): """ Returns the text for the tab at the inputed index. :param index | <int> :return <str> """ try: self.items()[index].setText(text) except IndexError: pass
Returns the text for the tab at the inputed index. :param index | <int> :return <str>
def __get_pending_revisions(self): """ Get all the pending revisions after the current time :return: A list of revisions :rtype: list """ dttime = time.mktime(datetime.datetime.now().timetuple()) changes = yield self.revisions.find({ "toa" : { "$lt" : dttime, }, "processed": False, "inProcess": None }) if len(changes) > 0: yield self.set_all_revisions_to_in_process([change.get("id") for change in changes]) raise Return(changes)
Get all the pending revisions after the current time :return: A list of revisions :rtype: list
def status(self): """ One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}. """ message = self.end_message if self.end_message else self.start_message if message: return message.contents[ACTION_STATUS_FIELD] else: return None
One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}.
def with_vtk(plot=True): """ Tests VTK interface and mesh repair of Stanford Bunny Mesh """ mesh = vtki.PolyData(bunny_scan) meshfix = pymeshfix.MeshFix(mesh) if plot: print('Plotting input mesh') meshfix.plot() meshfix.repair() if plot: print('Plotting repaired mesh') meshfix.plot() return meshfix.mesh
Tests VTK interface and mesh repair of Stanford Bunny Mesh
def setrange(self, min, max): """Set the dataset min and max values. Args:: min dataset minimum value (attribute 'valid_range') max dataset maximum value (attribute 'valid_range') Returns:: None The data range is part of the so-called "standard" SDS attributes. Calling method 'setrange' is equivalent to setting the following attribute with a 2-element [min,max] array:: valid_range C library equivalent: SDsetrange """ # Obtain SDS data type. try: sds_name, rank, dim_sizes, data_type, n_attrs = self.info() except HDF4Error: raise HDF4Error('setrange : cannot execute') n_values = 1 if data_type == SDC.CHAR8: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type in [SDC.UCHAR8, SDC.UINT8]: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type == SDC.INT8: buf1 = _C.array_int8(n_values) buf2 = _C.array_int8(n_values) elif data_type == SDC.INT16: buf1 = _C.array_int16(n_values) buf2 = _C.array_int16(n_values) elif data_type == SDC.UINT16: buf1 = _C.array_uint16(n_values) buf2 = _C.array_uint16(n_values) elif data_type == SDC.INT32: buf1 = _C.array_int32(n_values) buf2 = _C.array_int32(n_values) elif data_type == SDC.UINT32: buf1 = _C.array_uint32(n_values) buf2 = _C.array_uint32(n_values) elif data_type == SDC.FLOAT32: buf1 = _C.array_float32(n_values) buf2 = _C.array_float32(n_values) elif data_type == SDC.FLOAT64: buf1 = _C.array_float64(n_values) buf2 = _C.array_float64(n_values) else: raise HDF4Error("SDsetrange: SDS has an illegal or " \ "unsupported type %d" % data_type) buf1[0] = max buf2[0] = min status = _C.SDsetrange(self._id, buf1, buf2) _checkErr('setrange', status, 'cannot execute')
Set the dataset min and max values. Args:: min dataset minimum value (attribute 'valid_range') max dataset maximum value (attribute 'valid_range') Returns:: None The data range is part of the so-called "standard" SDS attributes. Calling method 'setrange' is equivalent to setting the following attribute with a 2-element [min,max] array:: valid_range C library equivalent: SDsetrange
def _get_link(self, cobj): """Get a valid link, False if not found""" fullname = cobj['module_short'] + '.' + cobj['name'] try: value = self._searchindex['objects'][cobj['module_short']] match = value[cobj['name']] except KeyError: link = False else: fname_idx = match[0] objname_idx = str(match[1]) anchor = match[3] fname = self._searchindex['filenames'][fname_idx] # In 1.5+ Sphinx seems to have changed from .rst.html to only # .html extension in converted files. Find this from the options. ext = self._docopts.get('FILE_SUFFIX', '.rst.html') fname = os.path.splitext(fname)[0] + ext if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if anchor == '': anchor = fullname elif anchor == '-': anchor = (self._searchindex['objnames'][objname_idx][1] + '-' + fullname) link = link + '#' + anchor return link
Get a valid link, False if not found
def site_occupation_statistics( self ): """ Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 } """ if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 }
def raw(self) -> str: """ Return a raw format string of the Peer document :return: """ doc = """Version: {0} Type: Peer Currency: {1} PublicKey: {2} Block: {3} Endpoints: """.format(self.version, self.currency, self.pubkey, self.blockUID) for _endpoint in self.endpoints: doc += "{0}\n".format(_endpoint.inline()) return doc
Return a raw format string of the Peer document :return:
def checktext(sometext, interchange = ALL): """ Checks that some text is palindrome. Checking performs case-insensitive :param str sometext: It is some string that will be checked for palindrome as text. What is the text see at help(palindromus.istext) The text can be multiline. :keyword dict interchange: It is dictionary of interchangeable letters :except TypeError: If the checked text is not a string :return bool: """ # check invalid data types OnlyStringsCanBeChecked(sometext) if istext(sometext): return checkstring(sometext, interchange = interchange) else: return False
Checks that some text is palindrome. Checking performs case-insensitive :param str sometext: It is some string that will be checked for palindrome as text. What is the text see at help(palindromus.istext) The text can be multiline. :keyword dict interchange: It is dictionary of interchangeable letters :except TypeError: If the checked text is not a string :return bool:
def widont(value, count=1): """ Add an HTML non-breaking space between the final two words of the string to avoid "widowed" words. Examples: >>> print(widont('Test me out')) Test me&nbsp;out >>> print("'",widont('It works with trailing spaces too '), "'") ' It works with trailing spaces&nbsp;too ' >>> print(widont('NoEffect')) NoEffect """ def replace(matchobj): return force_text('&nbsp;%s' % matchobj.group(1)) for i in range(count): value = re_widont.sub(replace, force_text(value)) return value
Add an HTML non-breaking space between the final two words of the string to avoid "widowed" words. Examples: >>> print(widont('Test me out')) Test me&nbsp;out >>> print("'",widont('It works with trailing spaces too '), "'") ' It works with trailing spaces&nbsp;too ' >>> print(widont('NoEffect')) NoEffect
def canonical_request(self): """ The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest() """ signed_headers = self.signed_headers header_lines = "".join( ["%s:%s\n" % item for item in iteritems(signed_headers)]) header_keys = ";".join([key for key in iterkeys(self.signed_headers)]) return (self.request_method + "\n" + self.canonical_uri_path + "\n" + self.canonical_query_string + "\n" + header_lines + "\n" + header_keys + "\n" + sha256(self.body).hexdigest())
The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest()
def _resolve_group_location(self, group: str) -> str: """ Resolves the location of a setting file based on the given identifier. :param group: the identifier for the group's settings file (~its location) :return: the absolute path of the settings location """ if os.path.isabs(group): possible_paths = [group] else: possible_paths = [] for repository in self.setting_repositories: possible_paths.append(os.path.join(repository, group)) for default_setting_extension in self.default_setting_extensions: number_of_paths = len(possible_paths) for i in range(number_of_paths): path_with_extension = "%s.%s" % (possible_paths[i], default_setting_extension) possible_paths.append(path_with_extension) for path in possible_paths: if os.path.exists(path): return path raise ValueError("Could not resolve location of settings identified by: \"%s\"" % group)
Resolves the location of a setting file based on the given identifier. :param group: the identifier for the group's settings file (~its location) :return: the absolute path of the settings location
def io_size_kb(prev, curr, counters): """ calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid """ bw_stats, io_stats = counters size_mb = div(getattr(curr, bw_stats), getattr(curr, io_stats)) return mul(size_mb, 1024)
calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid
def parse_from_environ(self, environ): """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ content_type = environ.get("CONTENT_TYPE", "") content_length = get_content_length(environ) mimetype, options = parse_options_header(content_type) return self.parse(get_input_stream(environ), mimetype, content_length, options)
Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``.
def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True
Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.
def from_jd(jd): '''Calculate Mayan long count from Julian day''' d = jd - EPOCH baktun = trunc(d / 144000) d = (d % 144000) katun = trunc(d / 7200) d = (d % 7200) tun = trunc(d / 360) d = (d % 360) uinal = trunc(d / 20) kin = int((d % 20)) return (baktun, katun, tun, uinal, kin)
Calculate Mayan long count from Julian day
def strip_head(sequence, values): """Strips elements of `values` from the beginning of `sequence`.""" values = set(values) return list(itertools.dropwhile(lambda x: x in values, sequence))
Strips elements of `values` from the beginning of `sequence`.
def batch_iterable(l, n): ''' Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery''' i = iter(l) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
def search_info(self, search_index): """ Retrieves information about a specified search index within the design document, returns dictionary GET databasename/_design/{ddoc}/_search_info/{search_index} """ ddoc_search_info = self.r_session.get( '/'.join([self.document_url, '_search_info', search_index])) ddoc_search_info.raise_for_status() return response_to_json_dict(ddoc_search_info)
Retrieves information about a specified search index within the design document, returns dictionary GET databasename/_design/{ddoc}/_search_info/{search_index}
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None): ''' Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: [] ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} auth_required = False authorized = __salt__['pcs.is_auth'](nodes=nodes) log.trace('Output of pcs.is_auth: %s', authorized) authorized_dict = {} for line in authorized['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorized_dict.update({node: auth_state}) log.trace('authorized_dict: %s', authorized_dict) for node in nodes: if node in authorized_dict and authorized_dict[node] == 'Already authorized': ret['comment'] += 'Node {0} is already authorized\n'.format(node) else: auth_required = True if __opts__['test']: ret['comment'] += 'Node is set to authorize: {0}\n'.format(node) if not auth_required: return ret if __opts__['test']: ret['result'] = None return ret if not isinstance(extra_args, (list, tuple)): extra_args = [] if '--force' not in extra_args: extra_args += ['--force'] authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args) log.trace('Output of pcs.auth: %s', authorize) authorize_dict = {} for line in authorize['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorize_dict.update({node: auth_state}) log.trace('authorize_dict: %s', authorize_dict) for node in nodes: if node in authorize_dict and authorize_dict[node] == 'Authorized': ret['comment'] += 'Authorized {0}\n'.format(node) ret['changes'].update({node: {'old': '', 'new': 'Authorized'}}) else: ret['result'] = False if node in authorized_dict: ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node]) if node in authorize_dict: ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node]) return ret
Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: []
def sample(self, cursor): """Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty """ count = cursor.count() # special case: empty collection if count == 0: self._empty = True raise ValueError("Empty collection") # special case: entire collection if self.p >= 1 and self.max_items <= 0: for item in cursor: yield item return # calculate target number of items to select if self.max_items <= 0: n_target = max(self.min_items, self.p * count) else: if self.p <= 0: n_target = max(self.min_items, self.max_items) else: n_target = max(self.min_items, min(self.max_items, self.p * count)) if n_target == 0: raise ValueError("No items requested") # select first `n_target` items that pop up with # probability self.p # This is actually biased to items at the beginning # of the file if n_target is smaller than (p * count), n = 0 while n < n_target: try: item = next(cursor) except StopIteration: # need to keep looping through data until # we get all our items! cursor.rewind() item = next(cursor) if self._keep(): yield item n += 1
Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty
def calcgain(self, ant1, ant2, skyfreq, pol): """ Calculates the complex gain product (g1*g2) for a pair of antennas. """ select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]] if len(select): # for when telcal solutions don't exist ind1 = n.where(ant1 == self.antnum[select]) ind2 = n.where(ant2 == self.antnum[select]) g1 = self.amp[select][ind1]*n.exp(1j*n.radians(self.phase[select][ind1])) * (not self.flagged.astype(int)[select][ind1][0]) g2 = self.amp[select][ind2]*n.exp(-1j*n.radians(self.phase[select][ind2])) * (not self.flagged.astype(int)[select][ind2][0]) else: g1 = [0]; g2 = [0] try: assert (g1[0] != 0j) and (g2[0] != 0j) invg1g2 = 1./(g1[0]*g2[0]) except (AssertionError, IndexError): invg1g2 = 0 return invg1g2
Calculates the complex gain product (g1*g2) for a pair of antennas.
def items(self, *keys): """ Return the fields of the record as a list of key and value tuples :return: """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append((key, None)) else: d.append((self.__keys[i], self[i])) return d return list((self.__keys[i], super(Record, self).__getitem__(i)) for i in range(len(self)))
Return the fields of the record as a list of key and value tuples :return:
def listar_por_tipo_ambiente(self, id_tipo_equipamento, id_ambiente): """Lista os equipamentos de um tipo e que estão associados a um ambiente. :param id_tipo_equipamento: Identificador do tipo do equipamento. :param id_ambiente: Identificador do ambiente. :return: Dicionário com a seguinte estrutura: :: {'equipamento': [{'id': < id_equipamento >, 'nome': < nome_equipamento >, 'id_tipo_equipamento': < id_tipo_equipamento >, 'nome_tipo_equipamento': < nome_tipo_equipamento >, 'id_modelo': < id_modelo >, 'nome_modelo': < nome_modelo >, 'id_marca': < id_marca >, 'nome_marca': < nome_marca > }, ... demais equipamentos ...]} :raise InvalidParameterError: O identificador do tipo de equipamento e/ou do ambiente são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_tipo_equipamento): raise InvalidParameterError( u'O identificador do tipo do equipamento é inválido ou não foi informado.') if not is_valid_int_param(id_ambiente): raise InvalidParameterError( u'O identificador do ambiente é inválido ou não foi informado.') url = 'equipamento/tipoequipamento/' + \ str(id_tipo_equipamento) + '/ambiente/' + str(id_ambiente) + '/' code, xml = self.submit(None, 'GET', url) key = 'equipamento' return get_list_map(self.response(code, xml, [key]), key)
Lista os equipamentos de um tipo e que estão associados a um ambiente. :param id_tipo_equipamento: Identificador do tipo do equipamento. :param id_ambiente: Identificador do ambiente. :return: Dicionário com a seguinte estrutura: :: {'equipamento': [{'id': < id_equipamento >, 'nome': < nome_equipamento >, 'id_tipo_equipamento': < id_tipo_equipamento >, 'nome_tipo_equipamento': < nome_tipo_equipamento >, 'id_modelo': < id_modelo >, 'nome_modelo': < nome_modelo >, 'id_marca': < id_marca >, 'nome_marca': < nome_marca > }, ... demais equipamentos ...]} :raise InvalidParameterError: O identificador do tipo de equipamento e/ou do ambiente são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
def get(self, request): '''The home page of this router''' ul = Html('ul') for router in sorted(self.routes, key=lambda r: r.creation_count): a = router.link(escape(router.route.path)) a.addClass(router.name) for method in METHODS: if router.getparam(method): a.addClass(method) li = Html('li', a, ' %s' % router.getparam('title', '')) ul.append(li) title = 'Pulsar' html = request.html_document html.head.title = title html.head.links.append('httpbin.css') html.head.links.append('favicon.ico', rel="icon", type='image/x-icon') html.head.scripts.append('httpbin.js') ul = ul.to_string(request) templ = asset('template.html') body = templ % (title, JAPANESE, CHINESE, version, pyversion, ul) html.body.append(body) return html.http_response(request)
The home page of this router
def wrapper__ignore(self, type_): """ Selectively ignore certain types when wrapping attributes. :param class type: The class/type definition to ignore. :rtype list(type): The current list of ignored types """ if type_ not in self.__exclusion_list: self.__exclusion_list.append(type_) return self.__exclusion_list
Selectively ignore certain types when wrapping attributes. :param class type: The class/type definition to ignore. :rtype list(type): The current list of ignored types
def get_filepaths_with_extension(extname, root_dir='.'): """Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension. """ # needed for comparison with os.path.splitext if not extname.startswith('.'): extname = '.' + extname # for case-insensitivity extname = extname.lower() root_dir = os.path.abspath(root_dir) selected_filenames = [] for dirname, sub_dirnames, filenames in os.walk(root_dir): for filename in filenames: if os.path.splitext(filename)[-1].lower() == extname: full_filename = os.path.join(dirname, filename) selected_filenames.append( os.path.relpath(full_filename, start=root_dir)) return selected_filenames
Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension.
def calculate_time_difference(startDate, endDate): """ *Return the time difference between two dates as a string* **Key Arguments:** - ``startDate`` -- the first date in YYYY-MM-DDTHH:MM:SS format - ``endDate`` -- the final date YYYY-MM-DDTHH:MM:SS format **Return:** - ``relTime`` -- the difference between the two dates in Y,M,D,h,m,s (string) **Usage:** .. code-block:: python from fundamentals import times diff = times.calculate_time_difference(startDate="2015-10-13 10:02:12", endDate="2017-11-04 16:47:05") print diff # OUT: 2yrs 22dys 6h 44m 53s """ ################ > IMPORTS ################ from datetime import datetime from dateutil import relativedelta ################ > VARIABLE SETTINGS ###### ################ >ACTION(S) ################ if "T" not in startDate: startDate = startDate.strip().replace(" ", "T") if "T" not in endDate: endDate = endDate.strip().replace(" ", "T") startDate = datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S') endDate = datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S') d = relativedelta.relativedelta(endDate, startDate) relTime = "" if d.years > 0: relTime += str(d.years) + "yrs " if d.months > 0: relTime += str(d.months) + "mths " if d.days > 0: relTime += str(d.days) + "dys " if d.hours > 0: relTime += str(d.hours) + "h " if d.minutes > 0: relTime += str(d.minutes) + "m " if d.seconds > 0: relTime += str(d.seconds) + "s" ############################### if relTime == "": relTime = "0s" return relTime
*Return the time difference between two dates as a string* **Key Arguments:** - ``startDate`` -- the first date in YYYY-MM-DDTHH:MM:SS format - ``endDate`` -- the final date YYYY-MM-DDTHH:MM:SS format **Return:** - ``relTime`` -- the difference between the two dates in Y,M,D,h,m,s (string) **Usage:** .. code-block:: python from fundamentals import times diff = times.calculate_time_difference(startDate="2015-10-13 10:02:12", endDate="2017-11-04 16:47:05") print diff # OUT: 2yrs 22dys 6h 44m 53s
def urlsplit(name): """ Parse :param:`name` into (netloc, port, ssl) """ if not (isinstance(name, string_types)): name = str(name) if not name.startswith(('http://', 'https://')): name = 'http://' + name rv = urlparse(name) if rv.scheme == 'https' and rv.port is None: return rv.netloc, 443, True return rv.netloc.rsplit(':')[0], rv.port or 80, rv.scheme == 'https'
Parse :param:`name` into (netloc, port, ssl)
def post_revert_tags(self, post_id, history_id): """Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history. """ params = {'id': post_id, 'history_id': history_id} return self._get('post/revert_tags', params, 'PUT')
Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history.
def calculate_solidity(labels,indexes=None): """Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure """ if indexes is not None: """ Convert to compat 32bit integer """ indexes = np.array(indexes,dtype=np.int32) areas = scind.sum(np.ones(labels.shape),labels,indexes) convex_hull_areas = calculate_convex_hull_areas(labels, indexes) return areas / convex_hull_areas
Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure
def generate_branches(scales=None, angles=None, shift_angle=0): """Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch. """ branches = [] for pos, scale in enumerate(scales): angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle branches.append([scale, angle]) return branches
Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch.
def _index_fs(self): """Returns a deque object full of local file system items. :returns: ``deque`` """ indexed_objects = self._return_deque() directory = self.job_args.get('directory') if directory: indexed_objects = self._return_deque( deque=indexed_objects, item=self._drectory_local_files( directory=directory ) ) object_names = self.job_args.get('object') if object_names: indexed_objects = self._return_deque( deque=indexed_objects, item=self._named_local_files( object_names=object_names ) ) return indexed_objects
Returns a deque object full of local file system items. :returns: ``deque``
def _departure(self) -> datetime: """Extract departure time.""" departure_time = datetime.strptime( self.journey.MainStop.BasicStop.Dep.Time.text, "%H:%M" ).time() if departure_time > (self.now - timedelta(hours=1)).time(): return datetime.combine(self.now.date(), departure_time) return datetime.combine(self.now.date() + timedelta(days=1), departure_time)
Extract departure time.
def directionaldiff(f, x0, vec, **options): """ Return directional derivative of a function of n variables Parameters ---------- fun: callable analytical function to differentiate. x0: array vector location at which to differentiate fun. If x0 is an nxm array, then fun is assumed to be a function of n*m variables. vec: array vector defining the line along which to take the derivative. It should be the same size as x0, but need not be a vector of unit length. **options: optional arguments to pass on to Derivative. Returns ------- dder: scalar estimate of the first derivative of fun in the specified direction. Examples -------- At the global minimizer (1,1) of the Rosenbrock function, compute the directional derivative in the direction [1 2] >>> import numpy as np >>> import numdifftools as nd >>> vec = np.r_[1, 2] >>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2 >>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True) >>> np.allclose(dd, 0) True >>> np.abs(info.error_estimate)<1e-14 True See also -------- Derivative, Gradient """ x0 = np.asarray(x0) vec = np.asarray(vec) if x0.size != vec.size: raise ValueError('vec and x0 must be the same shapes') vec = np.reshape(vec/np.linalg.norm(vec.ravel()), x0.shape) return Derivative(lambda t: f(x0+t*vec), **options)(0)
Return directional derivative of a function of n variables Parameters ---------- fun: callable analytical function to differentiate. x0: array vector location at which to differentiate fun. If x0 is an nxm array, then fun is assumed to be a function of n*m variables. vec: array vector defining the line along which to take the derivative. It should be the same size as x0, but need not be a vector of unit length. **options: optional arguments to pass on to Derivative. Returns ------- dder: scalar estimate of the first derivative of fun in the specified direction. Examples -------- At the global minimizer (1,1) of the Rosenbrock function, compute the directional derivative in the direction [1 2] >>> import numpy as np >>> import numdifftools as nd >>> vec = np.r_[1, 2] >>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2 >>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True) >>> np.allclose(dd, 0) True >>> np.abs(info.error_estimate)<1e-14 True See also -------- Derivative, Gradient
def get_aggregation_propensity(self, seq, outdir, cutoff_v=5, cutoff_n=5, run_amylmuts=False): """Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity. Args: seq (str, Seq, SeqRecord): Amino acid sequence outdir (str): Directory to where output files should be saved cutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue cutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of aggregation-prone region run_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most time consuming and generates a slightly different result every submission. Returns: int: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence """ seq = ssbio.protein.sequence.utils.cast_to_str(seq) results = self.run_amylpred2(seq=seq, outdir=outdir, run_amylmuts=run_amylmuts) agg_index, agg_conf = self.parse_for_consensus_aggregation(N=len(seq), results=results, cutoff_v=cutoff_v, cutoff_n=cutoff_n) return agg_index
Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity. Args: seq (str, Seq, SeqRecord): Amino acid sequence outdir (str): Directory to where output files should be saved cutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue cutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of aggregation-prone region run_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most time consuming and generates a slightly different result every submission. Returns: int: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence
def hsv_2_hex(self, h, s, v): """ convert a hsv color to hex """ return self.rgb_2_hex(*hsv_to_rgb(h, s, v))
convert a hsv color to hex
def get_min_distance(self, mesh): """ For each point in ``mesh`` compute the minimum distance to each surface element and return the smallest value. See :meth:`superclass method <.base.BaseSurface.get_min_distance>` for spec of input and result values. """ dists = [surf.get_min_distance(mesh) for surf in self.surfaces] return numpy.min(dists, axis=0)
For each point in ``mesh`` compute the minimum distance to each surface element and return the smallest value. See :meth:`superclass method <.base.BaseSurface.get_min_distance>` for spec of input and result values.
def set_relay(self, relay_pin=0, state=True): '''Set relay_pin to value of state''' if self.mavlink10(): self.mav.command_long_send( self.target_system, # target_system self.target_component, # target_component mavlink.MAV_CMD_DO_SET_RELAY, # command 0, # Confirmation relay_pin, # Relay Number int(state), # state (1 to indicate arm) 0, # param3 (all other params meaningless) 0, # param4 0, # param5 0, # param6 0) # param7 else: print("Setting relays not supported.")
Set relay_pin to value of state
def rmdir(self, directory, missing_okay=False): """Forcefully remove the specified directory and all its children.""" # Build a script to walk an entire directory structure and delete every # file and subfolder. This is tricky because MicroPython has no os.walk # or similar function to walk folders, so this code does it manually # with recursion and changing directories. For each directory it lists # the files and deletes everything it can, i.e. all the files. Then # it lists the files again and assumes they are directories (since they # couldn't be deleted in the first pass) and recursively clears those # subdirectories. Finally when finished clearing all the children the # parent directory is deleted. command = """ try: import os except ImportError: import uos as os def rmdir(directory): os.chdir(directory) for f in os.listdir(): try: os.remove(f) except OSError: pass for f in os.listdir(): rmdir(f) os.chdir('..') os.rmdir(directory) rmdir('{0}') """.format( directory ) self._pyboard.enter_raw_repl() try: out = self._pyboard.exec_(textwrap.dedent(command)) except PyboardError as ex: message = ex.args[2].decode("utf-8") # Check if this is an OSError #2, i.e. directory doesn't exist # and rethrow it as something more descriptive. if message.find("OSError: [Errno 2] ENOENT") != -1: if not missing_okay: raise RuntimeError("No such directory: {0}".format(directory)) else: raise ex self._pyboard.exit_raw_repl()
Forcefully remove the specified directory and all its children.
def land(self, velocity=VELOCITY): """ Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return: """ if self._is_flying: self.down(self._thread.get_height(), velocity) self._thread.stop() self._thread = None self._cf.commander.send_stop_setpoint() self._is_flying = False
Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return:
def build_interfaces_by_method(interfaces): """ Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } """ def traverse(block, parts): try: method = block['method'].lower() except KeyError: for k, v in compat.iteritems(block): traverse(v, parts + [k]) else: path = '.'.join(parts) try: methods[method] except KeyError: methods[method] = {} methods[method][path] = block methods = {} for key, val in compat.iteritems(interfaces): traverse(val, [key]) return methods
Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] }
def dfTObedtool(df): """ Transforms a pandas dataframe into a bedtool :param df: Pandas dataframe :returns: a bedtool """ df=df.astype(str) df=df.drop_duplicates() df=df.values.tolist() df=["\t".join(s) for s in df ] df="\n".join(df) df=BedTool(df, from_string=True) return df
Transforms a pandas dataframe into a bedtool :param df: Pandas dataframe :returns: a bedtool
def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): raise ValueError('The type of {column} is not a ' 'SQLAlchemy type '.format(column=col)) table = SQLTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype) table.create() table.insert(chunksize, method=method) if (not name.isdigit() and not name.islower()): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: table_names = engine.table_names( schema=schema or self.meta.schema, connection=conn, ) if name not in table_names: msg = ( "The provided table name '{0}' is not found exactly as " "such in the database after writing the table, possibly " "due to case sensitivity issues. Consider using lower " "case table names." ).format(name) warnings.warn(msg, UserWarning)
Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0
def _get_utxos(self, address, services, **modes): """ Using the service fallback engine, get utxos from remote service. """ return get_unspent_outputs( self.crypto, address, services=services, **modes )
Using the service fallback engine, get utxos from remote service.
def _get_options(self, style): """Get the list of keywords for a particular style :param style: the style that the keywords are wanted """ return [self.opt[o][style]['name'] for o in self.opt]
Get the list of keywords for a particular style :param style: the style that the keywords are wanted
def save(self, filename, format_='fasta'): """ Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}. """ format_ = format_.lower() count = 0 if isinstance(filename, str): try: with open(filename, 'w') as fp: for read in self: fp.write(read.toString(format_)) count += 1 except ValueError: unlink(filename) raise else: # We have a file-like object. for read in self: filename.write(read.toString(format_)) count += 1 return count
Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}.
def send_email_with_callback_token(user, email_token, **kwargs): """ Sends a Email to user.email. Passes silently without sending in test environment """ try: if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS: # Make sure we have a sending address before sending. # Get email subject and message email_subject = kwargs.get('email_subject', api_settings.PASSWORDLESS_EMAIL_SUBJECT) email_plaintext = kwargs.get('email_plaintext', api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE) email_html = kwargs.get('email_html', api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME) # Inject context if user specifies. context = inject_template_context({'callback_token': email_token.key, }) html_message = loader.render_to_string(email_html, context,) send_mail( email_subject, email_plaintext % email_token.key, api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS, [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)], fail_silently=False, html_message=html_message,) else: logger.debug("Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.") return False return True except Exception as e: logger.debug("Failed to send token email to user: %d." "Possibly no email on user object. Email entered was %s" % (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME))) logger.debug(e) return False
Sends a Email to user.email. Passes silently without sending in test environment
def get_line(cls, parent, current_line, line_count): """ Gets user selected line. :param parent: Parent widget :param current_line: Current line number :param line_count: Number of lines in the current text document. :returns: tuple(line, status) status is False if the dialog has been rejected. """ dlg = DlgGotoLine(parent, current_line + 1, line_count) if dlg.exec_() == dlg.Accepted: return dlg.spinBox.value() - 1, True return current_line, False
Gets user selected line. :param parent: Parent widget :param current_line: Current line number :param line_count: Number of lines in the current text document. :returns: tuple(line, status) status is False if the dialog has been rejected.
def homoscedasticity(*args, alpha=.05): """Test equality of variance. Parameters ---------- sample1, sample2,... : array_like Array of sample data. May be different lengths. Returns ------- equal_var : boolean True if data have equal variance. p : float P-value. See Also -------- normality : Test the univariate normality of one or more array(s). sphericity : Mauchly's test for sphericity. Notes ----- This function first tests if the data are normally distributed using the Shapiro-Wilk test. If yes, then the homogeneity of variances is measured using the Bartlett test. If the data are not normally distributed, the Levene (1960) test, which is less sensitive to departure from normality, is used. The **Bartlett** :math:`T` statistic is defined as: .. math:: T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1) \\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))} - 1/(N-k))} where :math:`s_i^2` is the variance of the :math:`i^{th}` group, :math:`N` is the total sample size, :math:`N_i` is the sample size of the :math:`i^{th}` group, :math:`k` is the number of groups, and :math:`s_p^2` is the pooled variance. The pooled variance is a weighted average of the group variances and is defined as: .. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k) The p-value is then computed using a chi-square distribution: .. math:: T \\sim \\chi^2(k-1) The **Levene** :math:`W` statistic is defined as: .. math:: W = \\frac{(N-k)} {(k-1)} \\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} } {\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} } where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`, :math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and :math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`. The p-value is then computed using a F-distribution: .. math:: W \\sim F(k-1, N-k) References ---------- .. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical tests. Proc. R. Soc. Lond. A, 160(901), 268-282. .. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the equality of variances. Journal of the American Statistical Association, 69(346), 364-367. .. [3] NIST/SEMATECH e-Handbook of Statistical Methods, http://www.itl.nist.gov/div898/handbook/ Examples -------- Test the homoscedasticity of two arrays. >>> import numpy as np >>> from pingouin import homoscedasticity >>> np.random.seed(123) >>> # Scale = standard deviation of the distribution. >>> x = np.random.normal(loc=0, scale=1., size=100) >>> y = np.random.normal(loc=0, scale=0.8,size=100) >>> equal_var, p = homoscedasticity(x, y, alpha=.05) >>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p) 1.273 0.602 False 0.0 """ from scipy.stats import levene, bartlett k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") # Test normality of data normal, _ = normality(*args) if np.count_nonzero(normal) != normal.size: # print('Data are not normally distributed. Using Levene test.') _, p = levene(*args) else: _, p = bartlett(*args) equal_var = True if p > alpha else False return equal_var, np.round(p, 3)
Test equality of variance. Parameters ---------- sample1, sample2,... : array_like Array of sample data. May be different lengths. Returns ------- equal_var : boolean True if data have equal variance. p : float P-value. See Also -------- normality : Test the univariate normality of one or more array(s). sphericity : Mauchly's test for sphericity. Notes ----- This function first tests if the data are normally distributed using the Shapiro-Wilk test. If yes, then the homogeneity of variances is measured using the Bartlett test. If the data are not normally distributed, the Levene (1960) test, which is less sensitive to departure from normality, is used. The **Bartlett** :math:`T` statistic is defined as: .. math:: T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1) \\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))} - 1/(N-k))} where :math:`s_i^2` is the variance of the :math:`i^{th}` group, :math:`N` is the total sample size, :math:`N_i` is the sample size of the :math:`i^{th}` group, :math:`k` is the number of groups, and :math:`s_p^2` is the pooled variance. The pooled variance is a weighted average of the group variances and is defined as: .. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k) The p-value is then computed using a chi-square distribution: .. math:: T \\sim \\chi^2(k-1) The **Levene** :math:`W` statistic is defined as: .. math:: W = \\frac{(N-k)} {(k-1)} \\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} } {\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} } where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`, :math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and :math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`. The p-value is then computed using a F-distribution: .. math:: W \\sim F(k-1, N-k) References ---------- .. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical tests. Proc. R. Soc. Lond. A, 160(901), 268-282. .. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the equality of variances. Journal of the American Statistical Association, 69(346), 364-367. .. [3] NIST/SEMATECH e-Handbook of Statistical Methods, http://www.itl.nist.gov/div898/handbook/ Examples -------- Test the homoscedasticity of two arrays. >>> import numpy as np >>> from pingouin import homoscedasticity >>> np.random.seed(123) >>> # Scale = standard deviation of the distribution. >>> x = np.random.normal(loc=0, scale=1., size=100) >>> y = np.random.normal(loc=0, scale=0.8,size=100) >>> equal_var, p = homoscedasticity(x, y, alpha=.05) >>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p) 1.273 0.602 False 0.0
def make_choices(*args): """Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute >>> make_choices(range(3)) ((0, u'0'), (1, u'1'), (2, u'2')) >>> make_choices(dict(enumerate('abcd'))) ((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd')) >>> make_choices('hello') (('hello', u'hello'),) >>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world')) True """ if not args: return tuple() if isinstance(args[0], (list, tuple)): return make_choices(*tuple(args[0])) elif isinstance(args[0], collections.Mapping): return tuple((k, unicode(v)) for (k, v) in args[0].iteritems()) elif all(isinstance(arg, (int, float, Decimal, basestring)) for arg in args): return tuple((k, unicode(k)) for k in args)
Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute >>> make_choices(range(3)) ((0, u'0'), (1, u'1'), (2, u'2')) >>> make_choices(dict(enumerate('abcd'))) ((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd')) >>> make_choices('hello') (('hello', u'hello'),) >>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world')) True
def internal_only(view_func): """ A view decorator which blocks access for requests coming through the load balancer. """ @functools.wraps(view_func) def wrapper(request, *args, **kwargs): forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",") # The nginx in the docker container adds the loadbalancer IP to the list inside # X-Forwarded-For, so if the list contains more than a single item, we know # that it went through our loadbalancer if len(forwards) > 1: raise PermissionDenied() return view_func(request, *args, **kwargs) return wrapper
A view decorator which blocks access for requests coming through the load balancer.
def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, warn_on_dtype=False): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. """ if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # store whether originally we wanted numeric dtype dtype_numeric = dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, 'kind'): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if sp.issparse(array): array = _ensure_sparse_format(array, accept_sparse, dtype, copy, force_all_finite) else: array = np.array(array, dtype=dtype, order=order, copy=copy) if ensure_2d: if array.ndim == 1: if ensure_min_samples >= 2: raise ValueError("%s expects at least 2 samples provided " "in a 2 dimensional array-like input" % estimator_name) warnings.warn( "Passing 1d arrays as data is deprecated in 0.17 and will" "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample.", DeprecationWarning) array = np.atleast_2d(array) # To ensure that array flags are maintained array = np.array(array, dtype=dtype, order=order, copy=copy) # make sure we acually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. expected <= 2." % (array.ndim)) if force_all_finite: _assert_all_finite(array) shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % (n_samples, shape_repr, ensure_min_samples)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % (n_features, shape_repr, ensure_min_features)) if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: msg = ("Data with input dtype %s was converted to %s." % (dtype_orig, array.dtype)) warnings.warn(msg, DataConversionWarning) return array
Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X.
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character.
def util_granulate_time_series(time_series, scale): """Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor """ n = len(time_series) b = int(np.fix(n / scale)) temp = np.reshape(time_series[0:b*scale], (b, scale)) cts = np.mean(temp, axis = 1) return cts
Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor
def _integrate(cls, fn, emin, emax, params, scale=1.0, extra_params=None, npt=20): """Fast numerical integration method using mid-point rule.""" emin = np.expand_dims(emin, -1) emax = np.expand_dims(emax, -1) params = copy.deepcopy(params) for i, p in enumerate(params): params[i] = np.expand_dims(params[i], -1) xedges = np.linspace(0.0, 1.0, npt + 1) logx_edge = np.log(emin) + xedges * (np.log(emax) - np.log(emin)) logx = 0.5 * (logx_edge[..., 1:] + logx_edge[..., :-1]) xw = np.exp(logx_edge[..., 1:]) - np.exp(logx_edge[..., :-1]) dnde = fn(np.exp(logx), params, scale, extra_params) return np.sum(dnde * xw, axis=-1)
Fast numerical integration method using mid-point rule.
def remove_user_from_user_groups(self, id, **kwargs): # noqa: E501 """Removes specific user groups from the user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_user_from_user_groups(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: The list of user groups that should be removed from the user :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.remove_user_from_user_groups_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.remove_user_from_user_groups_with_http_info(id, **kwargs) # noqa: E501 return data
Removes specific user groups from the user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_user_from_user_groups(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: The list of user groups that should be removed from the user :return: UserModel If the method is called asynchronously, returns the request thread.
def get_centroids(data,k,labels,centroids,data_norms): """ For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples) """ D = data.shape[1] for j in range(k): cluster_points = np.where(labels == j) cluster_total = len(cluster_points) if cluster_total == 0: _, temp = new_orthogonal_center(data,data_norms,centroids) else: temp = np.mean(data[cluster_points,:],axis=1) centroids[j,:] = temp return centroids
For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples)
def revoke_admin_privileges(name, **client_args): ''' Revoke cluster administration privileges from a user. name Name of the user from whom admin privileges will be revoked. CLI Example: .. code-block:: bash salt '*' influxdb.revoke_admin_privileges <name> ''' client = _client(**client_args) client.revoke_admin_privileges(name) return True
Revoke cluster administration privileges from a user. name Name of the user from whom admin privileges will be revoked. CLI Example: .. code-block:: bash salt '*' influxdb.revoke_admin_privileges <name>
def get_real_end_line(token): """Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line. """ end_line = token.end_mark.line + 1 if not isinstance(token, yaml.ScalarToken): return end_line pos = token.end_mark.pointer - 1 while (pos >= token.start_mark.pointer - 1 and token.end_mark.buffer[pos] in string.whitespace): if token.end_mark.buffer[pos] == '\n': end_line -= 1 pos -= 1 return end_line
Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line.