positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def find_all(query: Query=None) -> List['ApiKey']: """ List all API keys. """ return [ApiKey.from_db(key) for key in db.get_keys(query)]
List all API keys.
def is_in_ipython(): "Is the code running in the ipython environment (jupyter including)" program_name = os.path.basename(os.getenv('_', '')) if ('jupyter-notebook' in program_name or # jupyter-notebook 'ipython' in program_name or # ipython 'JPY_PARENT_PID' in os.environ): # ipython-notebook return True else: return False
Is the code running in the ipython environment (jupyter including)
def header(self, axis, x, level=0): """ Return the values of the labels for the header of columns or rows. The value corresponds to the header of column or row x in the given level. """ ax = self._axis(axis) return ax.values[x] if not hasattr(ax, 'levels') \ else ax.values[x][level]
Return the values of the labels for the header of columns or rows. The value corresponds to the header of column or row x in the given level.
def gps_raw_int_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, NOT WGS84), in meters * 1000 (positive for up). Note that virtually all GPS modules provide the AMSL altitude in addition to the WGS84 altitude. (int32_t) eph : GPS HDOP horizontal dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) ''' return MAVLink_gps_raw_int_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible)
The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, NOT WGS84), in meters * 1000 (positive for up). Note that virtually all GPS modules provide the AMSL altitude in addition to the WGS84 altitude. (int32_t) eph : GPS HDOP horizontal dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """ self._extend([TxItem.new("get", obj) for obj in objs]) return self
Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
def crop_frequencies(self, low=None, high=None, copy=False): """Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis """ if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and low < self.f0: warnings.warn('Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.') # check high frequency if high is not None and high.value == self.band[1]: high = None elif high is not None and high.value > self.band[1]: warnings.warn('Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.') # find low index if low is None: idx0 = None else: idx0 = int(float(low.value - self.f0.value) // self.df.value) # find high index if high is None: idx1 = None else: idx1 = int(float(high.value - self.f0.value) // self.df.value) # crop if copy: return self[:, idx0:idx1].copy() return self[:, idx0:idx1]
Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis
def LMA(XY,ParIni): """ input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r) which is acquired by using an algebraic circle fit (TaubinSVD) output: a, b, r. a and b are the center of the fitting circle, and r is the radius % Geometric circle fit (minimizing orthogonal distances) % based on the Levenberg-Marquardt scheme in the % "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1 % N. Chernov and C. Lesort, "Least squares fitting of circles", % J. Math. Imag. Vision, Vol. 23, 239-251 (2005) """ factorUp=10 factorDown=0.04 lambda0=0.01 epsilon=0.000001 IterMAX = 50 AdjustMax = 20 Xshift=0 Yshift=0 dX=1 dY=0; n = len(XY); # number of data points anew = ParIni[0] + Xshift bnew = ParIni[1] + Yshift Anew = old_div(1.,(2.*ParIni[2])) aabb = anew*anew + bnew*bnew Fnew = (aabb - ParIni[2]*ParIni[2])*Anew Tnew = numpy.arccos(old_div(-anew,numpy.sqrt(aabb))) if bnew > 0: Tnew = 2*numpy.pi - Tnew VarNew = VarCircle(XY,ParIni) VarLambda = lambda0; finish = 0; for it in range(0,IterMAX): Aold = Anew Fold = Fnew Told = Tnew VarOld = VarNew H = numpy.sqrt(1+4*Aold*Fold); aold = -H*numpy.cos(Told)/(Aold+Aold) - Xshift; bold = -H*numpy.sin(Told)/(Aold+Aold) - Yshift; Rold = old_div(1,abs(Aold+Aold)); DD = 1 + 4*Aold*Fold; D = numpy.sqrt(DD); CT = numpy.cos(Told); ST = numpy.sin(Told); H11=0; H12=0; H13=0; H22=0; H23=0; H33=0; F1=0; F2=0; F3=0; for i in range(0,n): Xi = XY[i,0] + Xshift; Yi = XY[i,1] + Yshift; Zi = Xi*Xi + Yi*Yi; Ui = Xi*CT + Yi*ST; Vi =-Xi*ST + Yi*CT; ADF = Aold*Zi + D*Ui + Fold; SQ = numpy.sqrt(4*Aold*ADF + 1); DEN = SQ + 1; Gi = 2*ADF/DEN; FACT = 2/DEN*(1 - Aold*Gi/SQ); DGDAi = FACT*(Zi + 2*Fold*Ui/D) - Gi*Gi/SQ; DGDFi = FACT*(2*Aold*Ui/D + 1); DGDTi = FACT*D*Vi; H11 = H11 + DGDAi*DGDAi; H12 = H12 + DGDAi*DGDFi; H13 = H13 + DGDAi*DGDTi; H22 = H22 + DGDFi*DGDFi; H23 = H23 + DGDFi*DGDTi; H33 = H33 + DGDTi*DGDTi; F1 = F1 + Gi*DGDAi; F2 = F2 + Gi*DGDFi; F3 = F3 + Gi*DGDTi; for adjust in range(1,AdjustMax): # Cholesly decomposition G11 = numpy.sqrt(H11 + VarLambda); G12 = old_div(H12,G11) G13 = old_div(H13,G11) G22 = numpy.sqrt(H22 + VarLambda - G12*G12); G23 = old_div((H23 - G12*G13),G22); G33 = numpy.sqrt(H33 + VarLambda - G13*G13 - G23*G23); D1 = old_div(F1,G11); D2 = old_div((F2 - G12*D1),G22); D3 = old_div((F3 - G13*D1 - G23*D2),G33); dT = old_div(D3,G33); dF = old_div((D2 - G23*dT),G22) dA = old_div((D1 - G12*dF - G13*dT),G11) # updating the parameters Anew = Aold - dA; Fnew = Fold - dF; Tnew = Told - dT; if 1+4*Anew*Fnew < epsilon and VarLambda>1: Xshift = Xshift + dX; Yshift = Yshift + dY; H = numpy.sqrt(1+4*Aold*Fold); aTemp = -H*numpy.cos(Told)/(Aold+Aold) + dX; bTemp = -H*numpy.sin(Told)/(Aold+Aold) + dY; rTemp = old_div(1,abs(Aold+Aold)); Anew = old_div(1,(rTemp + rTemp)); aabb = aTemp*aTemp + bTemp*bTemp; Fnew = (aabb - rTemp*rTemp)*Anew; Tnew = numpy.arccos(old_div(-aTemp,numpy.sqrt(aabb))); if bTemp > 0: Tnew = 2*numpy.pi - Tnew; VarNew = VarOld; break; if 1+4*Anew*Fnew < epsilon: VarLambda = VarLambda * factorUp; continue; DD = 1 + 4*Anew*Fnew; D = numpy.sqrt(DD); CT = numpy.cos(Tnew); ST = numpy.sin(Tnew); GG = 0; for i in range(0, n): Xi = XY[i,0] + Xshift; Yi = XY[i,1] + Yshift; Zi = Xi*Xi + Yi*Yi; Ui = Xi*CT + Yi*ST; ADF = Anew*Zi + D*Ui + Fnew; SQ = numpy.sqrt(4*Anew*ADF + 1); DEN = SQ + 1; Gi = 2*ADF/DEN; GG = GG + Gi*Gi; VarNew = old_div(GG,(n-3)); H = numpy.sqrt(1+4*Anew*Fnew); anew = -H*numpy.cos(Tnew)/(Anew+Anew) - Xshift; bnew = -H*numpy.sin(Tnew)/(Anew+Anew) - Yshift; Rnew = old_div(1,abs(Anew+Anew)); if VarNew <= VarOld: progress = old_div((abs(anew-aold) + abs(bnew-bold) + abs(Rnew-Rold)),(Rnew+Rold)); if progress < epsilon: Aold = Anew; Fold = Fnew; Told = Tnew; VarOld = VarNew # %#ok<NASGU> finish = 1; break; VarLambda = VarLambda * factorDown break else: # % no improvement VarLambda = VarLambda * factorUp; continue; if finish == 1: break H = numpy.sqrt(1+4*Aold*Fold); result_a = -H*numpy.cos(Told)/(Aold+Aold) - Xshift; result_b = -H*numpy.sin(Told)/(Aold+Aold) - Yshift; result_r = old_div(1,abs(Aold+Aold)); return result_a, result_b, result_r
input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r) which is acquired by using an algebraic circle fit (TaubinSVD) output: a, b, r. a and b are the center of the fitting circle, and r is the radius % Geometric circle fit (minimizing orthogonal distances) % based on the Levenberg-Marquardt scheme in the % "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1 % N. Chernov and C. Lesort, "Least squares fitting of circles", % J. Math. Imag. Vision, Vol. 23, 239-251 (2005)
def _forecast_model(self,beta,Z,h): """ Creates forecasted states and variances Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- a : np.ndarray Forecasted states P : np.ndarray Variance of forecasted states """ T, _, R, Q, H = self._ss_matrices(beta) return dl_univariate_kalman_fcst(self.data,Z,H,T,Q,R,0.0,h)
Creates forecasted states and variances Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- a : np.ndarray Forecasted states P : np.ndarray Variance of forecasted states
def field_date_to_json(self, day): """Convert a date to a date triple.""" if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
Convert a date to a date triple.
def items(self) -> Tuple[Tuple[str, "Package"], ...]: # type: ignore """ Return an iterable containing package name and corresponding `Package` instance that are available. """ item_dict = { name: self.build_dependencies.get(name) for name in self.build_dependencies } return tuple(item_dict.items())
Return an iterable containing package name and corresponding `Package` instance that are available.
def createService(self, createServiceParameter, description=None, tags="Feature Service", snippet=None): """ The Create Service operation allows users to create a hosted feature service. You can use the API to create an empty hosted feaure service from feature service metadata JSON. Inputs: createServiceParameter - create service object """ url = "%s/createService" % self.location val = createServiceParameter.value params = { "f" : "json", "outputType" : "featureService", "createParameters" : json.dumps(val), "tags" : tags } if snippet is not None: params['snippet'] = snippet if description is not None: params['description'] = description res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'id' in res or \ 'serviceItemId' in res: if 'id' in res: url = "%s/items/%s" % (self.location, res['id']) else: url = "%s/items/%s" % (self.location, res['serviceItemId']) return UserItem(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
The Create Service operation allows users to create a hosted feature service. You can use the API to create an empty hosted feaure service from feature service metadata JSON. Inputs: createServiceParameter - create service object
def project_with_metadata(self, term_doc_mat, x_dim=0, y_dim=1): ''' Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection ''' return self._project_category_corpus(self._get_category_metadata_corpus_and_replace_terms(term_doc_mat), x_dim, y_dim)
Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection
def get_user_id(self, attributes): """ For use when CAS_CREATE_USER_WITH_ID is True. Will raise ImproperlyConfigured exceptions when a user_id cannot be accessed. This is important because we shouldn't create Users with automatically assigned ids if we are trying to keep User primary key's in sync. """ if not attributes: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "no attributes were provided") user_id = attributes.get('id') if not user_id: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "`'id'` is not part of attributes.") return user_id
For use when CAS_CREATE_USER_WITH_ID is True. Will raise ImproperlyConfigured exceptions when a user_id cannot be accessed. This is important because we shouldn't create Users with automatically assigned ids if we are trying to keep User primary key's in sync.
def uniformVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the uniform distribution U(0.0, 1.0). :param sc: SparkContext used to create the RDD. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD. :param seed: Seed for the RNG that generates the seed for the generator in each partition. :return: RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`. >>> import numpy as np >>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect()) >>> mat.shape (10, 10) >>> mat.max() <= 1.0 and mat.min() >= 0.0 True >>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions() 4 """ return callMLlibFunc("uniformVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
Generates an RDD comprised of vectors containing i.i.d. samples drawn from the uniform distribution U(0.0, 1.0). :param sc: SparkContext used to create the RDD. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD. :param seed: Seed for the RNG that generates the seed for the generator in each partition. :return: RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`. >>> import numpy as np >>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect()) >>> mat.shape (10, 10) >>> mat.max() <= 1.0 and mat.min() >= 0.0 True >>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions() 4
def changed_files(self) -> typing.List[str]: """ :return: changed files :rtype: list of str """ changed_files: typing.List[str] = [x.a_path for x in self.repo.index.diff(None)] LOGGER.debug('changed files: %s', changed_files) return changed_files
:return: changed files :rtype: list of str
def get_upload_path(instance, filename): """Overriding to store the original filename""" if not instance.name: instance.name = filename # set original filename date = timezone.now().date() filename = '{name}.{ext}'.format(name=uuid4().hex, ext=filename.split('.')[-1]) return os.path.join('post_office_attachments', str(date.year), str(date.month), str(date.day), filename)
Overriding to store the original filename
def lastmod(self, author): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(author=author).only('modification_date') return lastitems[0].modification_date
Return the last modification of the entry.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'grammars') and self.grammars is not None: _dict['grammars'] = [x._to_dict() for x in self.grammars] return _dict
Return a json dictionary representing this model.
def GetTransPosition(df,field,dic,refCol="transcript_id"): """ Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript """ try: gen=str(int(df[field])) transid=df[refCol] bases=dic.get(transid).split(",") bases=bases.index(str(gen))+1 except: bases=np.nan return bases
Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript
def patched_context(*module_names, **kwargs): """apply emulation patches only for a specific context :param module_names: var-args for the modules to patch, as in :func:`patch` :param local: if True, unpatching is done on every switch-out, and re-patching on every switch-in, so that they are only applied for the one coroutine :returns: a contextmanager that patches on ``__enter__`` and unpatches on ``__exit__`` """ local = kwargs.pop('local', False) if kwargs: raise TypeError("patched_context() got an unexpected keyword " + "argument %r" % kwargs.keys()[0]) patch(*module_names) if local: @scheduler.local_incoming_hook @scheduler.local_outgoing_hook def hook(direction, target): {1: patch, 2: unpatch}[direction](*module_names) yield unpatch(*module_names) if local: scheduler.remove_local_incoming_hook(hook) scheduler.remove_local_outgoing_hook(hook)
apply emulation patches only for a specific context :param module_names: var-args for the modules to patch, as in :func:`patch` :param local: if True, unpatching is done on every switch-out, and re-patching on every switch-in, so that they are only applied for the one coroutine :returns: a contextmanager that patches on ``__enter__`` and unpatches on ``__exit__``
def get_portchannel_info_by_intf_output_lacp_oper_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf") config = get_portchannel_info_by_intf output = ET.SubElement(get_portchannel_info_by_intf, "output") lacp = ET.SubElement(output, "lacp") oper_key = ET.SubElement(lacp, "oper-key") oper_key.text = kwargs.pop('oper_key') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_activity_search_session_for_objective_bank(self, objective_bank_id=None): """Gets the OsidSession associated with the activity search service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivitySearchSession) - an ActivitySearchSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_search() and supports_visible_federation() are true. """ if not objective_bank_id: raise NullArgument if not self.supports_activity_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ActivitySearchSession(objective_bank_id, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the activity search service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivitySearchSession) - an ActivitySearchSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_search() and supports_visible_federation() are true.
def edit_account_info(self, short_name=None, author_name=None, author_url=None): """ Update information about a Telegraph account. Pass only the parameters that you want to edit :param short_name: Account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name :param author_name: Default author name used when creating new articles :param author_url: Default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channels """ return self._telegraph.method('editAccountInfo', values={ 'short_name': short_name, 'author_name': author_name, 'author_url': author_url })
Update information about a Telegraph account. Pass only the parameters that you want to edit :param short_name: Account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name :param author_name: Default author name used when creating new articles :param author_url: Default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channels
def split_overlays(self): "Deprecated method to split overlays inside the HoloMap." if util.config.future_deprecations: self.param.warning("split_overlays is deprecated and is now " "a private method.") return self._split_overlays()
Deprecated method to split overlays inside the HoloMap.
def libvlc_video_get_spu(p_mi): '''Get current video subtitle. @param p_mi: the media player. @return: the video subtitle selected, or -1 if none. ''' f = _Cfunctions.get('libvlc_video_get_spu', None) or \ _Cfunction('libvlc_video_get_spu', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi)
Get current video subtitle. @param p_mi: the media player. @return: the video subtitle selected, or -1 if none.
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION, algorithm=None): '''Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise. ''' if algorithm is None: algorithm = PRIME_ALGO if algorithm == 'gmpy-miller-rabin': if not gmpy: raise NotImplementedError return gmpy.is_prime(n, k) elif algorithm == 'miller-rabin': # miller rabin probability of primality is 1/4**k return miller_rabin(n, k, rnd=rnd) elif algorithm == 'solovay-strassen': # for jacobi it's 1/2**k return randomized_primality_testing(n, rnd=rnd, k=k*2) else: raise NotImplementedError
Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise.
def FindAll(params, ctxt, scope, stream, coord, interp): """ This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file: """ matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp) matches = list(matches_iter) types = interp.get_types() res = types.TFindResults() res.count = len(matches) # python3 map doesn't return a list starts = list(map(lambda m: m.start()+FIND_MATCHES_START_OFFSET, matches)) res.start = starts # python3 map doesn't return a list sizes = list(map(lambda m: m.end()-m.start(), matches)) res.size = sizes return res
This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50): ''' Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y ''' x, y = xy # distance to image center: dist = ((x - cx)**2 + (y - cy)**2)**0.5 # OFF_AXIS ILLUMINATION FACTOR: A = 1.0 / (1 + (dist / f)**2)**2 # GEOMETRIC FACTOR: if alpha != 0: G = (1 - alpha * dist) else: G = 1 # TILT FACTOR: if tilt != 0: T = tiltFactor((x, y), f, tilt, rot, (cy, cx)) else: T = 1 return A * G * T
Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y
def do_debug(self, arg): """debug code Enter a recursive debugger that steps through the code argument (which is an arbitrary expression or statement to be executed in the current environment). """ self.settrace(False) globals = self.curframe.f_globals locals = self.get_locals(self.curframe) p = Pdb(self.completekey, self.stdin, self.stdout, debug=True) p.prompt = "(%s) " % self.prompt.strip() self.message("ENTERING RECURSIVE DEBUGGER") sys.call_tracing(p.run, (arg, globals, locals)) self.message("LEAVING RECURSIVE DEBUGGER") self.settrace(True) self.lastcmd = p.lastcmd
debug code Enter a recursive debugger that steps through the code argument (which is an arbitrary expression or statement to be executed in the current environment).
def flush_buffer(self): ''' Flush the buffer of the tail ''' if len(self.buffer) > 0: return_value = ''.join(self.buffer) self.buffer.clear() self.send_message(return_value) self.last_flush_date = datetime.datetime.now()
Flush the buffer of the tail
def _get_url(self, filename): """ Returns url for cdn.urbanterror.info to pass to _not_wget(). http://cdn.urbanterror.info/urt/<major_ver_without_.>/<release_num>-<magic_number>/q3ut4/<filename> """ return self.cdn_url.format(self.mver, self.relnum, filename)
Returns url for cdn.urbanterror.info to pass to _not_wget(). http://cdn.urbanterror.info/urt/<major_ver_without_.>/<release_num>-<magic_number>/q3ut4/<filename>
def directions(ctx, features, profile, alternatives, geometries, overview, steps, continue_straight, waypoint_snapping, annotations, language, output): """The Mapbox Directions API will show you how to get where you're going. mapbox directions "[0, 0]" "[1, 1]" An access token is required. See "mapbox --help". """ access_token = (ctx.obj and ctx.obj.get("access_token")) or None service = mapbox.Directions(access_token=access_token) # The Directions SDK expects False to be # a bool, not a str. if overview == "False": overview = False # When using waypoint snapping, the # Directions SDK expects features to be # a list, not a generator. if waypoint_snapping is not None: features = list(features) if annotations: annotations = annotations.split(",") stdout = click.open_file(output, "w") try: res = service.directions( features, profile=profile, alternatives=alternatives, geometries=geometries, overview=overview, steps=steps, continue_straight=continue_straight, waypoint_snapping=waypoint_snapping, annotations=annotations, language=language ) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if res.status_code == 200: if geometries == "geojson": click.echo(json.dumps(res.geojson()), file=stdout) else: click.echo(res.text, file=stdout) else: raise MapboxCLIException(res.text.strip())
The Mapbox Directions API will show you how to get where you're going. mapbox directions "[0, 0]" "[1, 1]" An access token is required. See "mapbox --help".
def stop_if(expr, msg='', no_output=False): '''Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False ''' if expr: raise StopInputGroup(msg=msg, keep_output=not no_output) return 0
Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False
def save(self, filename): """Write this trigger to gracedb compatible xml format Parameters ---------- filename: str Name of file to write to disk. """ gz = filename.endswith('.gz') ligolw_utils.write_filename(self.outdoc, filename, gz=gz)
Write this trigger to gracedb compatible xml format Parameters ---------- filename: str Name of file to write to disk.
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name): """Generates an ivy xml with all jars marked as intransitive using the all conflict manager.""" org = IvyUtils.INTERNAL_ORG_NAME name = resolve_hash_name extra_configurations = [conf for conf in confs if conf and conf != 'default'] # Use org name _and_ rev so that we can have dependencies with different versions. This will # allow for batching fetching if we want to do that. jars_by_key = OrderedDict() for jar in jars: jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar) dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()] template_data = TemplateData(org=org, module=name, extra_configurations=extra_configurations, dependencies=dependencies) template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache') cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
Generates an ivy xml with all jars marked as intransitive using the all conflict manager.
def get_default_reference(self, method): """ Returns the default reference for a method. :arg method: name of a method :type method: :class:`str` :return: reference :rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str` """ if method not in self._available_methods: raise ValueError('Unknown method: {0}'.format(method)) return self._default_references.get(method)
Returns the default reference for a method. :arg method: name of a method :type method: :class:`str` :return: reference :rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str`
def parse(self): """ parse data """ url = self.config.get('url') self.cnml = CNMLParser(url) self.parsed_data = self.cnml.getNodes()
parse data
def vm_info(name, call=None): ''' Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm ''' if call != 'action': raise SaltCloudSystemExit( 'The vm_info action must be called with -a or --action.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.info(auth, vm_id) if response[0] is False: return response[1] else: info = {} tree = _get_xml(response[1]) info[tree.find('NAME').text] = _xml_to_dict(tree) return info
Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm
def sources( self): """*The results of the search returned as a python list of dictionaries* **Usage:** .. code-block:: python sources = tns.sources """ sourceResultsList = [] sourceResultsList[:] = [dict(l) for l in self.sourceResultsList] return sourceResultsList
*The results of the search returned as a python list of dictionaries* **Usage:** .. code-block:: python sources = tns.sources
def GetExportedResult(self, original_result, converter, metadata=None, token=None): """Converts original result via given converter..""" exported_results = list( converter.Convert( metadata or ExportedMetadata(), original_result, token=token)) if not exported_results: raise ExportError("Got 0 exported result when a single one " "was expected.") if len(exported_results) > 1: raise ExportError("Got > 1 exported results when a single " "one was expected, seems like a logical bug.") return exported_results[0]
Converts original result via given converter..
async def retry_create_artifact(*args, **kwargs): """Retry create_artifact() calls. Args: *args: the args to pass on to create_artifact **kwargs: the args to pass on to create_artifact """ await retry_async( create_artifact, retry_exceptions=( ScriptWorkerRetryException, aiohttp.ClientError ), args=args, kwargs=kwargs )
Retry create_artifact() calls. Args: *args: the args to pass on to create_artifact **kwargs: the args to pass on to create_artifact
def create_history_model(self, model, inherited): """ Creates a historical model to associate with the model provided. """ attrs = { "__module__": self.module, "_history_excluded_fields": self.excluded_fields, } app_module = "%s.models" % model._meta.app_label if inherited: # inherited use models module attrs["__module__"] = model.__module__ elif model.__module__ != self.module: # registered under different app attrs["__module__"] = self.module elif app_module != self.module: # Abuse an internal API because the app registry is loading. app = apps.app_configs[model._meta.app_label] models_module = app.name attrs["__module__"] = models_module fields = self.copy_fields(model) attrs.update(fields) attrs.update(self.get_extra_fields(model, fields)) # type in python2 wants str as a first argument attrs.update(Meta=type(str("Meta"), (), self.get_meta_options(model))) if self.table_name is not None: attrs["Meta"].db_table = self.table_name # Set as the default then check for overrides name = self.get_history_model_name(model) registered_models[model._meta.db_table] = model return python_2_unicode_compatible(type(str(name), self.bases, attrs))
Creates a historical model to associate with the model provided.
def promote_loops( loops, index, shared ): """Turn loops into "objects" that can be processed normally""" for loop in loops: loop = list(loop) members = [index[addr] for addr in loop] external_parents = list(set([ addr for addr in sum([shared.get(addr,[]) for addr in loop],[]) if addr not in loop ])) if external_parents: if len(external_parents) == 1: # potentially a loop that's been looped... parent = index.get( external_parents[0] ) if parent['type'] == LOOP_TYPE: continue # we haven't already been looped... loop_addr = new_address( index ) shared[loop_addr] = external_parents loop_record = index[loop_addr] = { 'address': loop_addr, 'refs': loop, 'parents': external_parents, 'type': LOOP_TYPE, 'size': 0, } for member in members: # member's references must *not* point to loop... member['refs'] = [ ref for ref in member['refs'] if ref not in loop ] # member's parents are *just* the loop member['parents'][:] = [loop_addr] # each referent to loop holds a single reference to the loop rather than many to children for parent in external_parents: parent = index[parent] for member in members: rewrite_references( parent['refs'], member['address'], None ) parent['refs'].append( loop_addr )
Turn loops into "objects" that can be processed normally
def validate(self, handler): """Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin. """ # Check for the test method first test_method = self.plugin_test_validation(handler) if not test_method: return None # Here we iterate through the classes found in the module and pick # the first one that satisfies the validation for name, plugin_class in inspect.getmembers(handler, inspect.isclass): if self.plugin_class_validation(plugin_class): return {'class':plugin_class, 'test':test_method} # If we're here the plugin didn't pass validation print 'Failure for plugin: %s' % (handler.__name__) print 'Validation Error: Worker class is required to have a dependencies list and an execute method' return None
Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin.
def get_rrsets_by_type_owner(self, zone_name, rtype, owner_name, q=None, **kwargs): """Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned. """ uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name params = build_params(q, kwargs) return self.rest_api_connection.get(uri, params)
Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned.
def create_pull(self, *args, **kwds): """ :calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_ :param title: string :param body: string :param issue: :class:`github.Issue.Issue` :param base: string :param head: string :param maintainer_can_modify: bool :rtype: :class:`github.PullRequest.PullRequest` """ if len(args) + len(kwds) >= 4: return self.__create_pull_1(*args, **kwds) else: return self.__create_pull_2(*args, **kwds)
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_ :param title: string :param body: string :param issue: :class:`github.Issue.Issue` :param base: string :param head: string :param maintainer_can_modify: bool :rtype: :class:`github.PullRequest.PullRequest`
def EL_Si_module(): ''' returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017 ''' arr = np.array([ [2.5, 1.00281 ], [7.5, 1.00238 ], [12.5, 1.00174], [17.5, 1.00204 ], [22.5, 1.00054 ], [27.5, 0.998255], [32.5, 0.995351], [37.5, 0.991246], [42.5, 0.985304], [47.5, 0.975338], [52.5, 0.960455], [57.5, 0.937544], [62.5, 0.900607], [67.5, 0.844636], [72.5, 0.735028], [77.5, 0.57492 ], [82.5, 0.263214], [87.5, 0.123062] ]) angles = arr[:,0] vals = arr[:,1] vals[vals>1]=1 return angles, vals
returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017
def convert_date(obj): """Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True """ try: return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) except ValueError: return None
Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True
def get_vlan_brief_output_vlan_vlan_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_name = ET.SubElement(vlan, "vlan-name") vlan_name.text = kwargs.pop('vlan_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_assessment_part_form_for_update(self, assessment_part_id): """Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') if (assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart' or assessment_part_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())}) mdata = {} if not result['assessmentPartId']: pass else: parent_part_id = Id(result['assessmentPartId']) mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True) lookup_session = mgr.get_assessment_part_lookup_session_for_bank(self._catalog_id, proxy=self._proxy) if lookup_session.get_assessment_parts_for_assessment_part(parent_part_id).available() > 1: mdata['sequestered']['is_read_only'] = True mdata['sequestered']['is_required'] = True obj_form = objects.AssessmentPartForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy, mdata=mdata) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def send_chat_action(self, action, to): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). """ payload = dict(chat_id=to, action=action) return self._get('sendChatAction', payload)
Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status).
def get_receiver(self, receiver=None): """ Returns a single receiver or a dictionary of receivers for this plugin. """ return self.__app.signals.get_receiver(receiver, self._plugin)
Returns a single receiver or a dictionary of receivers for this plugin.
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists. ''' super(ColumnDataChangedEvent, self).dispatch(receiver) if hasattr(receiver, '_column_data_changed'): receiver._column_data_changed(self)
Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.
def import_classes(names, src, dst): """Import classes in package from their implementation modules.""" for name in names: module = importlib.import_module('pygsp.' + src + '.' + name.lower()) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
Import classes in package from their implementation modules.
def _build_fluent_table(self): '''Builds the fluent table for each RDDL pvariable.''' self.fluent_table = collections.OrderedDict() for name, size in zip(self.domain.non_fluent_ordering, self.non_fluent_size): non_fluent = self.domain.non_fluents[name] self.fluent_table[name] = (non_fluent, size) for name, size in zip(self.domain.state_fluent_ordering, self.state_size): fluent = self.domain.state_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.action_fluent_ordering, self.action_size): fluent = self.domain.action_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.interm_fluent_ordering, self.interm_size): fluent = self.domain.intermediate_fluents[name] self.fluent_table[name] = (fluent, size)
Builds the fluent table for each RDDL pvariable.
def _gates_from_cli(opts, gate_opt): """Parses the given `gate_opt` into something understandable by `strain.gate_data`. """ gates = {} if getattr(opts, gate_opt) is None: return gates for gate in getattr(opts, gate_opt): try: ifo, central_time, half_dur, taper_dur = gate.split(':') central_time = float(central_time) half_dur = float(half_dur) taper_dur = float(taper_dur) except ValueError: raise ValueError("--gate {} not formatted correctly; ".format( gate) + "see help") try: gates[ifo].append((central_time, half_dur, taper_dur)) except KeyError: gates[ifo] = [(central_time, half_dur, taper_dur)] return gates
Parses the given `gate_opt` into something understandable by `strain.gate_data`.
def add(self, item): """ Add an item to the work queue. :param item: The work item to add. An item may be of any type; however, if it is not hashable, then the work queue must either be initialized with ``unique`` set to ``False``, or a ``key`` callable must have been provided. """ # Are we to uniquify work items? if self._unique: key = self._key(item) if self._key else item # If it already has been added to the queue, do nothing if key in self._seen: return self._seen.add(key) # Add the item to the queue self._work.append(item) # We'll keep a count of the number of items that have been # through the queue self._count += 1
Add an item to the work queue. :param item: The work item to add. An item may be of any type; however, if it is not hashable, then the work queue must either be initialized with ``unique`` set to ``False``, or a ``key`` callable must have been provided.
def get_field_min_max(self, name, **query_dict): """Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max] """ param_dict = query_dict.copy() param_dict.update({'rows': 1, 'fl': name, 'sort': '%s asc' % name}) try: min_resp_dict = self._post_query(**param_dict) param_dict['sort'] = '%s desc' % name max_resp_dict = self._post_query(**param_dict) return ( min_resp_dict['response']['docs'][0][name], max_resp_dict['response']['docs'][0][name], ) except Exception: self._log.exception('Exception') raise
Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max]
def clean_cache(self): """ Clean cache with entries older than now because not used in future ;) :return: None """ now = int(time.time()) t_to_del = [] for timestamp in self.cache: if timestamp < now: t_to_del.append(timestamp) for timestamp in t_to_del: del self.cache[timestamp] # same for the invalid cache t_to_del = [] for timestamp in self.invalid_cache: if timestamp < now: t_to_del.append(timestamp) for timestamp in t_to_del: del self.invalid_cache[timestamp]
Clean cache with entries older than now because not used in future ;) :return: None
def set_context_json(self, jsonquery): ''' Get a json parameter and rebuild the context back to a dictionary (probably kwargs) ''' # Make sure we are getting dicts if type(jsonquery) != dict: raise IOError("set_json_context() method can be called only with dictionaries, you gave me a '{}'".format(type(jsonquery))) # Set we will answer json to this request self.json = True # Transfer keys newget = {} for key in ['search', 'search_filter_button', 'page', 'pages_to_bring', 'rowsperpage', 'filters', 'year', 'month', 'day', 'hour', 'minute', 'second']: if key in jsonquery: newget[key] = jsonquery[key] # Add transformed ordering json_ordering = jsonquery.get('ordering', None) if json_ordering: # Convert to list ordering = [] for key in json_ordering: ordering.append({key: jsonquery['ordering'][key]}) # Order the result from ordering # ordering = sorted(ordering, key=lambda x: abs(x.values()[0])) ordering = sorted(ordering, key=lambda x: abs(list(x.values())[0])) # Save ordering newget['ordering'] = [] for orderer in ordering: key = list(orderer.keys())[0] value = orderer[key] if value > 0: value = 'asc' elif value < 0: value = 'desc' else: value = None if value: newget['ordering'].append({key: value}) # Get listid newget['listid'] = jsonquery.get("listid", None) # Get elementid newget['elementid'] = jsonquery.get("elementid", None) # Return new get return newget
Get a json parameter and rebuild the context back to a dictionary (probably kwargs)
def ulid_to_binary(ulid): """ Convert an ULID to its binary representation. :param ulid: An ULID (either as UUID, base32 ULID or binary) :return: Bytestring of length 16 :rtype: bytes """ if isinstance(ulid, uuid.UUID): return ulid.bytes if isinstance(ulid, (text_type, bytes)) and len(ulid) == 26: return decode_ulid_base32(ulid) if isinstance(ulid, (bytes, bytearray)) and len(ulid) == 16: return ulid raise InvalidULID('can not convert ulid %r to binary' % ulid)
Convert an ULID to its binary representation. :param ulid: An ULID (either as UUID, base32 ULID or binary) :return: Bytestring of length 16 :rtype: bytes
def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=True): ''' Wait for a certain status from Packet. status_type device or volume object_id The ID of the Packet device or volume to wait on. Required. status The status to wait for. timeout The amount of time to wait for a status to update. quiet Log status updates to debug logs when False. Otherwise, logs to info. ''' if status is None: status = "ok" interval = 5 iterations = int(timeout / interval) vm_ = get_configured_provider() manager = packet.Manager(auth_token=vm_['token']) for i in range(0, iterations): get_object = getattr(manager, "get_{status_type}".format(status_type=status_type)) obj = get_object(object_id) if obj.state == status: return obj time.sleep(interval) log.log( logging.INFO if not quiet else logging.DEBUG, 'Status for Packet %s is \'%s\', waiting for \'%s\'.', object_id, obj.state, status ) return obj
Wait for a certain status from Packet. status_type device or volume object_id The ID of the Packet device or volume to wait on. Required. status The status to wait for. timeout The amount of time to wait for a status to update. quiet Log status updates to debug logs when False. Otherwise, logs to info.
def remove_address(self, fqdn, address): " Remove an address of a domain." # Get a list of addresses. for record in self.list_address(fqdn): if record.address == address: record.delete() break
Remove an address of a domain.
def to_camel_case(snake_case_string): """ Convert a string from snake case to camel case. For example, "some_var" would become "someVar". :param snake_case_string: Snake-cased string to convert to camel case. :returns: Camel-cased version of snake_case_string. """ parts = snake_case_string.lstrip('_').split('_') return parts[0] + ''.join([i.title() for i in parts[1:]])
Convert a string from snake case to camel case. For example, "some_var" would become "someVar". :param snake_case_string: Snake-cased string to convert to camel case. :returns: Camel-cased version of snake_case_string.
def get_global_tor_instance(reactor, control_port=None, progress_updates=None, _tor_launcher=None): """ Normal users shouldn't need to call this; use TCPHiddenServiceEndpoint::system_tor instead. :return Tor: a 'global to this Python process' instance of Tor. There isn't one of these until the first time this method is called. All calls to this method return the same instance. """ global _global_tor global _global_tor_lock yield _global_tor_lock.acquire() if _tor_launcher is None: # XXX :( mutual dependencies...really get_global_tor_instance # should be in controller.py if it's going to return a Tor # instance. from .controller import launch _tor_launcher = launch try: if _global_tor is None: _global_tor = yield _tor_launcher(reactor, progress_updates=progress_updates) else: config = yield _global_tor.get_config() already_port = config.ControlPort if control_port is not None and control_port != already_port: raise RuntimeError( "ControlPort is already '{}', but you wanted '{}'", already_port, control_port, ) defer.returnValue(_global_tor) finally: _global_tor_lock.release()
Normal users shouldn't need to call this; use TCPHiddenServiceEndpoint::system_tor instead. :return Tor: a 'global to this Python process' instance of Tor. There isn't one of these until the first time this method is called. All calls to this method return the same instance.
def _set_enhanced_voq_max_queue_depth(self, v, load=False): """ Setter method for enhanced_voq_max_queue_depth, mapped from YANG variable /telemetry/profile/enhanced_voq_max_queue_depth (list) If this variable is read-only (config: false) in the source YANG file, then _set_enhanced_voq_max_queue_depth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enhanced_voq_max_queue_depth() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """enhanced_voq_max_queue_depth must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__enhanced_voq_max_queue_depth = t if hasattr(self, '_set'): self._set()
Setter method for enhanced_voq_max_queue_depth, mapped from YANG variable /telemetry/profile/enhanced_voq_max_queue_depth (list) If this variable is read-only (config: false) in the source YANG file, then _set_enhanced_voq_max_queue_depth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enhanced_voq_max_queue_depth() directly.
def _avg(value1, value2, weight): """Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned. """ if value1 is None: return value2 if value2 is None: return value1 return value2 * weight + value1 * (1 - weight)
Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned.
def right_click_event_statusicon(self, icon, button, time): """ It's just way how popup menu works in GTK. Don't ask me how it works. """ def pos(menu, aicon): """Just return menu""" return Gtk.StatusIcon.position_menu(menu, aicon) self.menu.popup(None, None, pos, icon, button, time)
It's just way how popup menu works in GTK. Don't ask me how it works.
def handle_typed_values(val, type_name, value_type): """Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values. """ if value_type in ['byte', 'short', 'int', 'long']: try: val = [int(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to int. Keeping type as str.', val) elif value_type in ['float', 'double']: try: val = [float(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to float. Keeping type as str.', val) elif value_type == 'boolean': try: # special case for boolean type val = val.split() # values must be either true or false for potential_bool in val: if potential_bool not in ['true', 'false']: raise ValueError val = [True if item == 'true' else False for item in val] except ValueError: msg = 'Cannot convert values %s to boolean.' msg += ' Keeping type as str.' log.warning(msg, val) elif value_type == 'String': # nothing special for String type pass else: # possibilities - Sequence, Structure, enum, opaque, object, # and char. # Not sure how to handle these as I do not have an example # of how they would show up in dataset.xml log.warning('%s type %s not understood. Keeping as String.', type_name, value_type) if not isinstance(val, list): val = [val] return val
Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values.
def center(self, width, fillchar=None): """Return centered in a string of length width. Padding is done using the specified fill character or space. :param int width: Length of output string. :param str fillchar: Use this character instead of spaces. """ if fillchar is not None: result = self.value_no_colors.center(width, fillchar) else: result = self.value_no_colors.center(width) return self.__class__(result.replace(self.value_no_colors, self.value_colors), keep_tags=True)
Return centered in a string of length width. Padding is done using the specified fill character or space. :param int width: Length of output string. :param str fillchar: Use this character instead of spaces.
def get(self, action, version=None): """Get the method class handing the given action and version.""" by_version = self._by_action[action] if version in by_version: return by_version[version] else: return by_version[None]
Get the method class handing the given action and version.
def reverse_byte_order(self, data): """Reverses the byte order of an int (16-bit) or long (32-bit) value.""" # Courtesy Vishal Sapre byte_count = len(hex(data)[2:].replace('L', '')[::2]) val = 0 for i in range(byte_count): val = (val << 8) | (data & 0xff) data >>= 8 return val
Reverses the byte order of an int (16-bit) or long (32-bit) value.
def run_command(cmd_to_run): """ Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings. """ with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file: # Run the command popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file) popen.wait() stderr_file.seek(0) stdout_file.seek(0) stderr = stderr_file.read() stdout = stdout_file.read() if six.PY3: stderr = stderr.decode() stdout = stdout.decode() return stderr, stdout
Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings.
def _complete_string(key, haystack): """ Returns valid string completions Takes the string 'key' and compares it to each of the strings in 'haystack'. The ones which beginns with 'key' are returned as result. """ if len(key) == 0: return haystack match = [] for straw in haystack: if string.find(straw, key) == 0: match.append(straw) return match
Returns valid string completions Takes the string 'key' and compares it to each of the strings in 'haystack'. The ones which beginns with 'key' are returned as result.
def on_to_coordinates(self, speed, x_target_mm, y_target_mm, brake=True, block=True): """ Drive to (`x_target_mm`, `y_target_mm`) coordinates at `speed` """ assert self.odometry_thread_id, "odometry_start() must be called to track robot coordinates" # stop moving self.off(brake='hold') # rotate in place so we are pointed straight at our target x_delta = x_target_mm - self.x_pos_mm y_delta = y_target_mm - self.y_pos_mm angle_target_radians = math.atan2(y_delta, x_delta) angle_target_degrees = math.degrees(angle_target_radians) self.turn_to_angle(speed, angle_target_degrees, brake=True, block=True) # drive in a straight line to the target coordinates distance_mm = math.sqrt(pow(self.x_pos_mm - x_target_mm, 2) + pow(self.y_pos_mm - y_target_mm, 2)) self.on_for_distance(speed, distance_mm, brake, block)
Drive to (`x_target_mm`, `y_target_mm`) coordinates at `speed`
def _read_hypocentre_from_ndk_string(self, linestring): """ Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class """ hypo = GCMTHypocentre() hypo.source = linestring[0:4] hypo.date = _read_date_from_string(linestring[5:15]) hypo.time = _read_time_from_string(linestring[16:26]) hypo.latitude = float(linestring[27:33]) hypo.longitude = float(linestring[34:41]) hypo.depth = float(linestring[42:47]) magnitudes = [float(x) for x in linestring[48:55].split(' ')] if magnitudes[0] > 0.: hypo.m_b = magnitudes[0] if magnitudes[1] > 0.: hypo.m_s = magnitudes[1] hypo.location = linestring[56:] return hypo
Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class
def handle_editor_command(self, cli, document): """ Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e"<enter> to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document """ # FIXME: using application.pre_run_callables like this here is not the best solution. # It's internal api of prompt_toolkit that may change. This was added to fix # https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future. saved_callables = cli.application.pre_run_callables while special.editor_command(document.text): filename = special.get_filename(document.text) query = (special.get_editor_query(document.text) or self.get_last_query()) sql, message = special.open_external_editor(filename, sql=query) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) cli.current_buffer.document = Document(sql, cursor_position=len(sql)) cli.application.pre_run_callables = [] document = cli.run() continue cli.application.pre_run_callables = saved_callables return document
Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e"<enter> to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document
def estimate(self, data, full_output=False, **kwargs): """ Estimate the model parameters, given the data. """ # Number of model comparisons can be specified in the configuration. num_model_comparisons = self._configuration.get("estimate", {}).get( "num_model_comparisons", self.grid_points.size) # If it's a fraction, we need to convert that to an integer. if 1 > num_model_comparisons > 0: num_model_comparisons *= self.grid_points.size # If the num_model_comparison is provided as a keyword argument, use it. num_model_comparisons = kwargs.pop("num_model_comparisons", int(num_model_comparisons)) logger.debug("Number of model comparisons to make for initial estimate:" " {0}".format(num_model_comparisons)) # Match the data to the model channels. matched_channels, missing_channels, ignore_parameters \ = self._match_channels_to_data(data) logger.debug("Matched channels: {0}, missing channels: {1}, ignore " "parameters: {2}".format(matched_channels, missing_channels, ignore_parameters)) # Load the intensities t = time() s = self.grid_points.size/num_model_comparisons # step size grid_points = self.grid_points[::s] intensities = np.memmap( self._configuration["model_grid"]["intensities"], dtype="float32", mode="r", shape=(self.grid_points.size, self.wavelengths.size))[::s] logger.debug("Took {:.0f} seconds to load and slice intensities".format( time() - t)) # Which matched, data channel has the highest S/N? # (This channel will be used to estimate astrophysical parameters) data, pixels_affected = self._apply_data_mask(data) median_snr = dict(zip(matched_channels, [np.nanmedian(spec.flux/(spec.variance**0.5)) for spec in data])) median_snr.pop(None, None) # Remove unmatched data spectra ccf_channel = self._configuration.get("settings", {}).get("ccf_channel", max(median_snr, key=median_snr.get)) if ccf_channel not in matched_channels: logger.warn("Ignoring CCF channel {0} because it was not a matched" " channel".format(ccf_channel)) ccf_channel = max(median_snr, key=median_snr.get) logger.debug("Channel with peak SNR is {0}".format(ccf_channel)) # Are there *any* continuum parameters in any matched channel? any_continuum_parameters = any(map(lambda s: s.startswith("continuum_"), set(self.parameters).difference(ignore_parameters))) # [TODO]: CCF MASK # [TODO]: Don't require CCF if we have only continuum parameters. z_limits = self._configuration["settings"].get("ccf_z_limits", None) theta = {} # Dictionary for the estimated model parameters. best_grid_index = None c = speed_of_light.to("km/s").value for matched_channel, spectrum in zip(matched_channels, data): if matched_channel is None: continue # Do we need todo cross-correlation for this channel? # We do if there are redshift parameters for this channel, # or if there is a global redshift or global continuum parameters # and this channel is the highest S/N. if "z_{}".format(matched_channel) in self.parameters \ or ((any_continuum_parameters or "z" in self.parameters) \ and matched_channel == ccf_channel): # Get the continuum degree for this channel. continuum_degree = self._configuration["model"].get("continuum", { matched_channel: -1 })[matched_channel] logger.debug("Perfoming CCF on {0} channel with a continuum " "degree of {1}".format(matched_channel, continuum_degree)) # Get model wavelength indices that match the data. # get the points that are in the mask, and within the spectrum # limits # TODO: Make this CCF not model mask. idx = np.where(self._model_mask() \ * (self.wavelengths >= spectrum.disp[0]) \ * (spectrum.disp[-1] >= self.wavelengths))[0] v, v_err, R = spectrum.cross_correlate( (self.wavelengths[idx], intensities[:, idx]), #(self.wavelengths, intensities), continuum_degree=continuum_degree, z_limits=z_limits) # Identify the best point by the CCF peak. best = np.nanargmax(R) # Now, why did we do CCF in this channel? Which model parameters # should be updated? if "z_{}".format(matched_channel) in self.parameters: theta["z_{}".format(matched_channel)] = v[best] / c elif "z" in self.parameters: # If there is a global redshift, update it. theta["z"] = v[best] / c # Continuum parameters will be updated later, so that each # channel is checked to see if it has the highest S/N, # otherwise we might be trying to calculate continuum # parameters when we haven't done CCF on the highest S/N # spectra yet. if matched_channel == ccf_channel: # Update astrophysical parameters. theta.update(dict(zip(grid_points.dtype.names, grid_points[best]))) best_grid_index = best # If there are continuum parameters, calculate them from the best point. if any_continuum_parameters: for matched_channel, spectrum in zip(matched_channels, data): if matched_channel is None: continue # The template spectra at the best point needs to be # redshifted to the data, and then continuum coefficients # calculated from that. # Get the continuum degree for this channel. continuum_degree = self._configuration["model"].get("continuum", { matched_channel: -1 })[matched_channel] # Get model wavelength indices that match the data. idx = np.clip(self.wavelengths.searchsorted( [spectrum.disp[0], spectrum.disp[-1]]) + [0, 1], 0, self.wavelengths.size) # Redshift and bin the spectrum. z = theta.get("z_{}".format(matched_channel), theta.get("z", 0)) best_intensities \ = np.copy(intensities[best_grid_index, idx[0]:idx[1]]).flatten() # Apply model mask. model_mask = self._model_mask(self.wavelengths[idx[0]:idx[1]]) best_intensities[~model_mask] = np.nan best_intensities = best_intensities * specutils.sample.resample( self.wavelengths[idx[0]:idx[1]] * (1 + z), spectrum.disp) # Calculate the continuum coefficients for this channel. continuum = spectrum.flux/best_intensities finite = np.isfinite(continuum) try: coefficients = np.polyfit( spectrum.disp[finite], continuum[finite], continuum_degree, )#w=spectrum.ivariance[finite]) except np.linalg.linalg.LinAlgError: logger.exception("Exception in initial polynomial fit") coefficients = np.polyfit(spectrum.disp[finite], continuum[finite], continuum_degree) # They go into theta backwards. such that coefficients[-1] is # continuum_{name}_0 theta.update(dict(zip( ["continuum_{0}_{1}".format(matched_channel, i) \ for i in range(continuum_degree + 1)], coefficients[::-1] ))) # Remaining parameters could be: resolving power, outlier pixels, # underestimated variance. remaining_parameters = set(self.parameters)\ .difference(ignore_parameters)\ .difference(theta) if remaining_parameters: logger.debug("Remaining parameters to estimate: {0}. For these we " "will just assume reasonable initial values.".format( remaining_parameters)) for parameter in remaining_parameters: if parameter == "resolution" \ or parameter.startswith("resolution_"): if parameter.startswith("resolution_"): spectra = [data[matched_channels.index( parameter.split("_")[1])]] else: spectra = [s for s in data if s is not None] R = [s.disp.mean()/np.diff(s.disp).mean() for s in spectra] # Assume oversampling rate of ~5. theta.update({ parameter: np.median(R)/5.}) elif parameter == "ln_f" or parameter.startswith("ln_f_"): theta.update({ parameter: 0.5 }) # Not overestimated. elif parameter in ("Po", "Vo"): theta.update({ "Po": 0.01, # 1% outlier pixels. "Vo": np.mean([np.nanmedian(s.variance) for s in data]), }) logger.info("Initial estimate: {}".format(theta)) # Having full_output = True means return the best spectra estimate. if full_output: # Create model fluxes and calculate some metric. __intensities = np.copy(intensities[best_grid_index]) # Apply model masks. __intensities[~self._model_mask()] = np.nan chi_sq, dof, model_fluxes = self._chi_sq(theta, data, __intensities=__intensities, __no_precomputed_binning=True) del intensities return (theta, chi_sq, dof, model_fluxes) # Delete the reference to intensities del intensities return theta
Estimate the model parameters, given the data.
def filter_by_label(self, pores=[], throats=[], labels=None, mode='or'): r""" Returns which of the supplied pores (or throats) has the specified label Parameters ---------- pores, or throats : array_like List of pores or throats to be filtered labels : list of strings The labels to apply as a filter mode : string Controls how the filter is applied. Options include: **'or', 'union', 'any'**: (default) Returns a list of the given locations where *any* of the given labels exist. **'and', 'intersection', 'all'**: Only locations where *all* the given labels are found. **'xor', 'exclusive_or'**: Only locations where exactly *one* of the given labels are found. **'nor', 'none', 'not'**: Only locations where *none* of the given labels are found. **'nand'** : Only locations with *some but not all* of the given labels are returned. **'xnor'** : Only locations with *more than one* of the given labels are returned. Returns ------- A list of pores (or throats) that have been filtered according the given criteria. The returned list is a subset of the received list of pores (or throats). See Also -------- pores throats Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.filter_by_label(pores=[0, 1, 5, 6], labels='left') array([0, 1]) >>> Ps = pn.pores(['top', 'bottom', 'front'], mode='or') >>> pn.filter_by_label(pores=Ps, labels=['top', 'front'], ... mode='and') array([ 4, 9, 14, 19, 24]) """ # Convert inputs to locations and element if (sp.size(throats) > 0) and (sp.size(pores) > 0): raise Exception('Can only filter either pores OR labels') if sp.size(pores) > 0: element = 'pore' locations = self._parse_indices(pores) elif sp.size(throats) > 0: element = 'throat' locations = self._parse_indices(throats) else: return(sp.array([], dtype=int)) labels = self._parse_labels(labels=labels, element=element) labels = [element+'.'+item.split('.')[-1] for item in labels] all_locs = self._get_indices(element=element, labels=labels, mode=mode) mask = self._tomask(indices=all_locs, element=element) ind = mask[locations] return locations[ind]
r""" Returns which of the supplied pores (or throats) has the specified label Parameters ---------- pores, or throats : array_like List of pores or throats to be filtered labels : list of strings The labels to apply as a filter mode : string Controls how the filter is applied. Options include: **'or', 'union', 'any'**: (default) Returns a list of the given locations where *any* of the given labels exist. **'and', 'intersection', 'all'**: Only locations where *all* the given labels are found. **'xor', 'exclusive_or'**: Only locations where exactly *one* of the given labels are found. **'nor', 'none', 'not'**: Only locations where *none* of the given labels are found. **'nand'** : Only locations with *some but not all* of the given labels are returned. **'xnor'** : Only locations with *more than one* of the given labels are returned. Returns ------- A list of pores (or throats) that have been filtered according the given criteria. The returned list is a subset of the received list of pores (or throats). See Also -------- pores throats Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.filter_by_label(pores=[0, 1, 5, 6], labels='left') array([0, 1]) >>> Ps = pn.pores(['top', 'bottom', 'front'], mode='or') >>> pn.filter_by_label(pores=Ps, labels=['top', 'front'], ... mode='and') array([ 4, 9, 14, 19, 24])
def decode_header_part(header): """ Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2) """ if not header: return six.text_type() output = six.text_type() try: for d, c in decode_header(header): c = c if c else 'utf-8' output += ported_string(d, c, 'ignore') # Header parsing failed, when header has charset Shift_JIS except (HeaderParseError, UnicodeError): log.error("Failed decoding header part: {}".format(header)) output += header return output
Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2)
def all_near_zero_mod(a: Union[float, complex, Iterable[float], np.ndarray], period: float, *, atol: float = 1e-8) -> bool: """Checks if the tensor's elements are all near multiples of the period. Args: a: Tensor of elements that could all be near multiples of the period. period: The period, e.g. 2 pi when working in radians. atol: Absolute tolerance. """ b = (np.asarray(a) + period / 2) % period - period / 2 return np.all(np.less_equal(np.abs(b), atol))
Checks if the tensor's elements are all near multiples of the period. Args: a: Tensor of elements that could all be near multiples of the period. period: The period, e.g. 2 pi when working in radians. atol: Absolute tolerance.
def current_app(self): """Return the current app.""" current_focus = self.adb_shell(CURRENT_APP_CMD) if current_focus is None: return None current_focus = current_focus.replace("\r", "") matches = WINDOW_REGEX.search(current_focus) # case 1: current app was successfully found if matches: (pkg, activity) = matches.group("package", "activity") return {"package": pkg, "activity": activity} # case 2: current app could not be found logging.warning("Couldn't get current app, reply was %s", current_focus) return None
Return the current app.
def checksum(digits): """Calculate checksum of Estonian personal identity code. Checksum is calculated with "Modulo 11" method using level I or II scale: Level I scale: 1 2 3 4 5 6 7 8 9 1 Level II scale: 3 4 5 6 7 8 9 1 2 3 The digits of the personal code are multiplied by level I scale and summed; if remainder of modulo 11 of the sum is less than 10, checksum is the remainder. If remainder is 10, then level II scale is used; checksum is remainder if remainder < 10 or 0 if remainder is 10. See also https://et.wikipedia.org/wiki/Isikukood """ sum_mod11 = sum(map(operator.mul, digits, Provider.scale1)) % 11 if sum_mod11 < 10: return sum_mod11 sum_mod11 = sum(map(operator.mul, digits, Provider.scale2)) % 11 return 0 if sum_mod11 == 10 else sum_mod11
Calculate checksum of Estonian personal identity code. Checksum is calculated with "Modulo 11" method using level I or II scale: Level I scale: 1 2 3 4 5 6 7 8 9 1 Level II scale: 3 4 5 6 7 8 9 1 2 3 The digits of the personal code are multiplied by level I scale and summed; if remainder of modulo 11 of the sum is less than 10, checksum is the remainder. If remainder is 10, then level II scale is used; checksum is remainder if remainder < 10 or 0 if remainder is 10. See also https://et.wikipedia.org/wiki/Isikukood
def getResourceMapPid(self): """Returns: str : PID of the Resource Map itself. """ ore = [ o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.ResourceMap) ][0] pid = [str(o) for o in self.objects(predicate=DCTERMS.identifier, subject=ore)][ 0 ] return pid
Returns: str : PID of the Resource Map itself.
def create(self, pools): """ Method to create pool's :param pools: List containing pool's desired to be created on database :return: None """ data = {'server_pools': pools} return super(ApiPool, self).post('api/v3/pool/', data)
Method to create pool's :param pools: List containing pool's desired to be created on database :return: None
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using warehouse api to find all of the "releases" """ url = '/'.join((index_url, project_name, 'json')) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return response.json()['releases'].keys() return None
Query the pypi index at index_url using warehouse api to find all of the "releases"
def _recurse(coreml_tree, scikit_tree, tree_id, node_id, scaling = 1.0, mode = 'regressor', n_classes = 2, tree_index = 0): """Traverse through the tree and append to the tree spec. """ if not(HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') ## Recursion should not be called on the leaf node. if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) # Add a branch node to the tree if scikit_tree.children_left[node_id] != _tree.TREE_LEAF: branch_mode = 'BranchOnValueLessThanEqual' feature_index = scikit_tree.feature[node_id] feature_value = scikit_tree.threshold[node_id] left_child_id = scikit_tree.children_left[node_id] right_child_id = scikit_tree.children_right[node_id] # Add a branch node coreml_tree.add_branch_node(tree_id, node_id, feature_index, feature_value, branch_mode, left_child_id, right_child_id) # Now recurse _recurse(coreml_tree, scikit_tree, tree_id, left_child_id, scaling, mode, n_classes, tree_index) _recurse(coreml_tree, scikit_tree, tree_id, right_child_id, scaling, mode, n_classes, tree_index) # Add a leaf node to the tree else: # Get the scikit-learn value if scikit_tree.n_outputs != 1: raise ValueError('Expected only 1 output in the scikit-learn tree.') value = _get_value(scikit_tree.value[node_id], mode, scaling, n_classes, tree_index) coreml_tree.add_leaf_node(tree_id, node_id, value)
Traverse through the tree and append to the tree spec.
def ParsePythonFlags(self, start_line=0): """Parse python/swig style flags.""" modname = None # name of current module modlist = [] flag = None for line_num in range(start_line, len(self.output)): # collect flags line = self.output[line_num].rstrip() if not line: # blank continue mobj = self.module_py_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_py_re.match(line) if mobj: # start of a new flag if flag: modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue if not flag: # continuation of a flag logging.error('Flag info, but no current flag "%s"' % line) mobj = self.flag_default_py_re.match(line) if mobj: # (default: '...') flag.default = mobj.group(1) logging.debug('Fdef: %s' % line) continue mobj = self.flag_tips_py_re.match(line) if mobj: # (tips) flag.tips = mobj.group(1) logging.debug('Ftip: %s' % line) continue if flag and flag.help: flag.help += line # multiflags tack on an extra line else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
Parse python/swig style flags.
def get_authentication_header(self, user=None, api_key=None, password=None, certificate=None): """ Return authenication string to place in Authorization Header If API Token is set, it'll be used. Otherwise, the clear text password will be sent. Users of NURESTLoginController are responsible to clean the password property. Returns: Returns the XREST Authentication string with API Key or user password encoded. """ if not user: user = self.user if not api_key: api_key = self.api_key if not password: password = self.password if not password: password = self.password if not certificate: certificate = self._certificate if certificate: return "XREST %s" % urlsafe_b64encode("{}:".format(user).encode('utf-8')).decode('utf-8') if api_key: return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, api_key).encode('utf-8')).decode('utf-8') return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, password).encode('utf-8')).decode('utf-8')
Return authenication string to place in Authorization Header If API Token is set, it'll be used. Otherwise, the clear text password will be sent. Users of NURESTLoginController are responsible to clean the password property. Returns: Returns the XREST Authentication string with API Key or user password encoded.
def evaluations(ty, pv, useScipy = True): """ evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC) ty, pv: list, tuple or ndarray useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv). """ if scipy != None and useScipy: return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv)) if len(ty) != len(pv): raise ValueError("len(ty) must be equal to len(pv)") total_correct = total_error = 0 sumv = sumy = sumvv = sumyy = sumvy = 0 for v, y in zip(pv, ty): if y == v: total_correct += 1 total_error += (v-y)*(v-y) sumv += v sumy += y sumvv += v*v sumyy += y*y sumvy += v*y l = len(ty) ACC = 100.0*total_correct/l MSE = total_error/l try: SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy)) except: SCC = float('nan') return (float(ACC), float(MSE), float(SCC))
evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC) ty, pv: list, tuple or ndarray useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv).
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file.
def pkg_config(pkg_libraries): """Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list) """ libraries=[] library_dirs=[] include_dirs=[] # Check that we have the packages for pkg in pkg_libraries: if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0: pass else: print("Could not find library {0}".format(pkg)) sys.exit(1) # Get the pck-config flags if len(pkg_libraries)>0 : # PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths. # On system-wide LAL installs, this is needed for swig to find lalswig.i for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split(): if token.startswith("-l"): libraries.append(token[2:]) elif token.startswith("-L"): library_dirs.append(token[2:]) elif token.startswith("-I"): include_dirs.append(token[2:]) return libraries, library_dirs, include_dirs
Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list)
def save_all_figures_as(self): """Save all the figures to a file.""" self.redirect_stdio.emit(False) dirname = getexistingdirectory(self, caption='Save all figures', basedir=getcwd_or_home()) self.redirect_stdio.emit(True) if dirname: return self.save_all_figures_todir(dirname)
Save all the figures to a file.
def b_pathOK(self, al_path): """ Checks if the absolute path specified in the al_path is valid for current tree """ b_OK = True try: self.l_allPaths.index(al_path) except: b_OK = False return b_OK
Checks if the absolute path specified in the al_path is valid for current tree
def array_map(f, ar): "Apply an ordinary function to all values in an array." flat_ar = ravel(ar) out = zeros(len(flat_ar), flat_ar.typecode()) for i in range(len(flat_ar)): out[i] = f(flat_ar[i]) out.shape = ar.shape return out
Apply an ordinary function to all values in an array.
def _inject_args(sig, types): """ A function to inject arguments manually into a method signature before it's been parsed. If using keyword arguments use 'kw=type' instead in the types array. sig the string signature types a list of types to be inserted Returns the altered signature. """ if '(' in sig: parts = sig.split('(') sig = '%s(%s%s%s' % ( parts[0], ', '.join(types), (', ' if parts[1].index(')') > 0 else ''), parts[1]) else: sig = '%s(%s)' % (sig, ', '.join(types)) return sig
A function to inject arguments manually into a method signature before it's been parsed. If using keyword arguments use 'kw=type' instead in the types array. sig the string signature types a list of types to be inserted Returns the altered signature.
def get_prefix_stripper(strip_prefix): """ Return function to strip `strip_prefix` prefix from string if present Parameters ---------- prefix : str Prefix to strip from the beginning of string if present Returns ------- stripper : func function such that ``stripper(a_string)`` will strip `prefix` from ``a_string`` if present, otherwise pass ``a_string`` unmodified """ n = len(strip_prefix) def stripper(path): return path if not path.startswith(strip_prefix) else path[n:] return stripper
Return function to strip `strip_prefix` prefix from string if present Parameters ---------- prefix : str Prefix to strip from the beginning of string if present Returns ------- stripper : func function such that ``stripper(a_string)`` will strip `prefix` from ``a_string`` if present, otherwise pass ``a_string`` unmodified
def push(self, field): ''' Add a field to the container, if the field is a Container itself, it should be poped() when done pushing into it :param field: BaseField to push ''' kassert.is_of_types(field, BaseField) container = self._container() field.enclosing = self if isinstance(field, Container): self._containers.append(field) if container: container.push(field) else: name = field.get_name() if name in self._fields_dict: raise KittyException('field with the name (%s) already exists in this container' % (name)) if name: self._fields_dict[name] = field self._fields.append(field) return True
Add a field to the container, if the field is a Container itself, it should be poped() when done pushing into it :param field: BaseField to push
def _deleteSpinBoxes(self, row): """ Removes all spinboxes """ tree = self.tree model = self.tree.model() for col, spinBox in enumerate(self._spinBoxes, self.COL_FIRST_COMBO + self.maxCombos): spinBox.valueChanged[int].disconnect(self._spinboxValueChanged) tree.setIndexWidget(model.index(row, col), None) self._spinBoxes = [] self._setColumnCountForContents()
Removes all spinboxes
def _build_predict(self, Xnew, full_cov=False): """ Xnew is a data matrix, the points at which we want to predict. This method computes p(F* | Y) where F* are points on the GP at Xnew, Y are noisy observations at X. """ y = self.Y - self.mean_function(self.X) Kmn = self.kern.K(self.X, Xnew) Kmm_sigma = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew) f_mean, f_var = base_conditional(Kmn, Kmm_sigma, Knn, y, full_cov=full_cov, white=False) # N x P, N x P or P x N x N return f_mean + self.mean_function(Xnew), f_var
Xnew is a data matrix, the points at which we want to predict. This method computes p(F* | Y) where F* are points on the GP at Xnew, Y are noisy observations at X.