code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def cache(opts, serial): ''' Returns the returner modules ''' return LazyLoader( _module_dirs(opts, 'cache', 'cache'), opts, tag='cache', pack={'__opts__': opts, '__context__': {'serial': serial}}, )
Returns the returner modules
Below is the the instruction that describes the task: ### Input: Returns the returner modules ### Response: def cache(opts, serial): ''' Returns the returner modules ''' return LazyLoader( _module_dirs(opts, 'cache', 'cache'), opts, tag='cache', pack={'__opts__': opts, '__context__': {'serial': serial}}, )
def create_button(self, style=Gtk.ReliefStyle.NORMAL): """ This is generalized method for creating Gtk.Button """ btn = Gtk.Button() btn.set_relief(style) return btn
This is generalized method for creating Gtk.Button
Below is the the instruction that describes the task: ### Input: This is generalized method for creating Gtk.Button ### Response: def create_button(self, style=Gtk.ReliefStyle.NORMAL): """ This is generalized method for creating Gtk.Button """ btn = Gtk.Button() btn.set_relief(style) return btn
def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link return dist.location
Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is.
Below is the the instruction that describes the task: ### Input: Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. ### Response: def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link return dist.location
def SetMaxCurrent(self, i): """Set the max output current. """ if i < 0 or i > 8: raise MonsoonError(("Target max current %sA, is out of acceptable " "range [0, 8].") % i) val = 1023 - int((i / 8) * 1023) self._SendStruct("BBB", 0x01, 0x0a, val & 0xff) self._SendStruct("BBB", 0x01, 0x0b, val >> 8)
Set the max output current.
Below is the the instruction that describes the task: ### Input: Set the max output current. ### Response: def SetMaxCurrent(self, i): """Set the max output current. """ if i < 0 or i > 8: raise MonsoonError(("Target max current %sA, is out of acceptable " "range [0, 8].") % i) val = 1023 - int((i / 8) * 1023) self._SendStruct("BBB", 0x01, 0x0a, val & 0xff) self._SendStruct("BBB", 0x01, 0x0b, val >> 8)
def search(self, CorpNum, DType, SDate, EDate, State, TradeType, TradeUsage, TaxationType, Page, PerPage, Order, UserID=None, QString=None, TradeOpt=None): """ λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμž, T-거래일자, I-λ°œν–‰μΌμž 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ λ°°μ—΄, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ TradeType : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜ν˜„κΈˆμ˜μˆ˜μ¦, C-μ·¨μ†Œν˜„κΈˆμ˜μˆ˜μ¦ TradeUsage : κ±°λž˜κ΅¬λΆ„ λ°°μ—΄, P-μ†Œλ“κ³΅μ œμš©, C-μ§€μΆœμ¦λΉ™μš© TaxationType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λΉ„κ³Όμ„Έ Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή κ²€μƒ‰κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : ν˜„κΈˆμ˜μˆ˜μ¦ μ‹λ³„λ²ˆν˜Έ, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ TradeOpt : κ±°λž˜μœ ν˜•, N-일반, B-λ„μ„œκ³΅μ—°, T-λŒ€μ€‘κ΅ν†΅ """ if DType == None or DType == '': raise PopbillException(-99999999, "μΌμžμœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if SDate == None or SDate == '': raise PopbillException(-99999999, "μ‹œμž‘μΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if EDate == None or EDate == '': raise PopbillException(-99999999, "μ’…λ£ŒμΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = '/Cashbill/Search' uri += '?DType=' + DType uri += '&SDate=' + SDate uri += '&EDate=' + EDate uri += '&State=' + ','.join(State) uri += '&TradeUsage=' + ','.join(TradeUsage) uri += '&TradeType=' + ','.join(TradeType) uri += '&TaxationType=' + ','.join(TaxationType) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order if QString is not None: uri += '&QString=' + QString if TradeOpt is not None: uri += '&TradeOpt=' + ','.join(TradeOpt) return self._httpget(uri, CorpNum, UserID)
λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμž, T-거래일자, I-λ°œν–‰μΌμž 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ λ°°μ—΄, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ TradeType : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜ν˜„κΈˆμ˜μˆ˜μ¦, C-μ·¨μ†Œν˜„κΈˆμ˜μˆ˜μ¦ TradeUsage : κ±°λž˜κ΅¬λΆ„ λ°°μ—΄, P-μ†Œλ“κ³΅μ œμš©, C-μ§€μΆœμ¦λΉ™μš© TaxationType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λΉ„κ³Όμ„Έ Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή κ²€μƒ‰κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : ν˜„κΈˆμ˜μˆ˜μ¦ μ‹λ³„λ²ˆν˜Έ, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ TradeOpt : κ±°λž˜μœ ν˜•, N-일반, B-λ„μ„œκ³΅μ—°, T-λŒ€μ€‘κ΅ν†΅
Below is the the instruction that describes the task: ### Input: λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμž, T-거래일자, I-λ°œν–‰μΌμž 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ λ°°μ—΄, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ TradeType : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜ν˜„κΈˆμ˜μˆ˜μ¦, C-μ·¨μ†Œν˜„κΈˆμ˜μˆ˜μ¦ TradeUsage : κ±°λž˜κ΅¬λΆ„ λ°°μ—΄, P-μ†Œλ“κ³΅μ œμš©, C-μ§€μΆœμ¦λΉ™μš© TaxationType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λΉ„κ³Όμ„Έ Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή κ²€μƒ‰κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : ν˜„κΈˆμ˜μˆ˜μ¦ μ‹λ³„λ²ˆν˜Έ, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ TradeOpt : κ±°λž˜μœ ν˜•, N-일반, B-λ„μ„œκ³΅μ—°, T-λŒ€μ€‘κ΅ν†΅ ### Response: def search(self, CorpNum, DType, SDate, EDate, State, TradeType, TradeUsage, TaxationType, Page, PerPage, Order, UserID=None, QString=None, TradeOpt=None): """ λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμž, T-거래일자, I-λ°œν–‰μΌμž 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ λ°°μ—΄, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ TradeType : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜ν˜„κΈˆμ˜μˆ˜μ¦, C-μ·¨μ†Œν˜„κΈˆμ˜μˆ˜μ¦ TradeUsage : κ±°λž˜κ΅¬λΆ„ λ°°μ—΄, P-μ†Œλ“κ³΅μ œμš©, C-μ§€μΆœμ¦λΉ™μš© TaxationType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λΉ„κ³Όμ„Έ Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή κ²€μƒ‰κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : ν˜„κΈˆμ˜μˆ˜μ¦ μ‹λ³„λ²ˆν˜Έ, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ TradeOpt : κ±°λž˜μœ ν˜•, N-일반, B-λ„μ„œκ³΅μ—°, T-λŒ€μ€‘κ΅ν†΅ """ if DType == None or DType == '': raise PopbillException(-99999999, "μΌμžμœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if SDate == None or SDate == '': raise PopbillException(-99999999, "μ‹œμž‘μΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if EDate == None or EDate == '': raise PopbillException(-99999999, "μ’…λ£ŒμΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = '/Cashbill/Search' uri += '?DType=' + DType uri += '&SDate=' + SDate uri += '&EDate=' + EDate uri += '&State=' + ','.join(State) uri += '&TradeUsage=' + ','.join(TradeUsage) uri += '&TradeType=' + ','.join(TradeType) uri += '&TaxationType=' + ','.join(TaxationType) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order if QString is not None: uri += '&QString=' + QString if TradeOpt is not None: uri += '&TradeOpt=' + ','.join(TradeOpt) return self._httpget(uri, CorpNum, UserID)
def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query')
Below is the the instruction that describes the task: ### Input: Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') ### Response: def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
def set_title(self, title=None): """ Sets the title on the current axes. Parameters ---------- title: string, default: None Add title to figure or if None leave untitled. """ title = self.title or title if title is not None: self.ax.set_title(title)
Sets the title on the current axes. Parameters ---------- title: string, default: None Add title to figure or if None leave untitled.
Below is the the instruction that describes the task: ### Input: Sets the title on the current axes. Parameters ---------- title: string, default: None Add title to figure or if None leave untitled. ### Response: def set_title(self, title=None): """ Sets the title on the current axes. Parameters ---------- title: string, default: None Add title to figure or if None leave untitled. """ title = self.title or title if title is not None: self.ax.set_title(title)
def _getSyntaxByFirstLine(self, firstLine): """Get syntax by first line of the file """ for pattern, xmlFileName in self._firstLineToXmlFileName.items(): if fnmatch.fnmatch(firstLine, pattern): return self._getSyntaxByXmlFileName(xmlFileName) else: raise KeyError("No syntax for " + firstLine)
Get syntax by first line of the file
Below is the the instruction that describes the task: ### Input: Get syntax by first line of the file ### Response: def _getSyntaxByFirstLine(self, firstLine): """Get syntax by first line of the file """ for pattern, xmlFileName in self._firstLineToXmlFileName.items(): if fnmatch.fnmatch(firstLine, pattern): return self._getSyntaxByXmlFileName(xmlFileName) else: raise KeyError("No syntax for " + firstLine)
def get_network_remove_kwargs(self, action, network_name, kwargs=None): """ Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """ c_kwargs = dict(net_id=network_name) update_kwargs(c_kwargs, kwargs) return c_kwargs
Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict
Below is the the instruction that describes the task: ### Input: Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict ### Response: def get_network_remove_kwargs(self, action, network_name, kwargs=None): """ Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """ c_kwargs = dict(net_id=network_name) update_kwargs(c_kwargs, kwargs) return c_kwargs
def encode_timeseries_put(self, tsobj): """ Fills an TsPutReq message with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :param req: the protobuf message to fill :type req: riak.pb.riak_ts_pb2.TsPutReq """ req = riak.pb.riak_ts_pb2.TsPutReq() req.table = str_to_bytes(tsobj.table.name) if tsobj.columns: raise NotImplementedError("columns are not implemented yet") if tsobj.rows and isinstance(tsobj.rows, list): for row in tsobj.rows: tsr = req.rows.add() # NB: type TsRow if not isinstance(row, list): raise ValueError("TsObject row must be a list of values") for cell in row: tsc = tsr.cells.add() # NB: type TsCell self.encode_to_ts_cell(cell, tsc) else: raise RiakError("TsObject requires a list of rows") mc = riak.pb.messages.MSG_CODE_TS_PUT_REQ rc = riak.pb.messages.MSG_CODE_TS_PUT_RESP return Msg(mc, req.SerializeToString(), rc)
Fills an TsPutReq message with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :param req: the protobuf message to fill :type req: riak.pb.riak_ts_pb2.TsPutReq
Below is the the instruction that describes the task: ### Input: Fills an TsPutReq message with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :param req: the protobuf message to fill :type req: riak.pb.riak_ts_pb2.TsPutReq ### Response: def encode_timeseries_put(self, tsobj): """ Fills an TsPutReq message with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :param req: the protobuf message to fill :type req: riak.pb.riak_ts_pb2.TsPutReq """ req = riak.pb.riak_ts_pb2.TsPutReq() req.table = str_to_bytes(tsobj.table.name) if tsobj.columns: raise NotImplementedError("columns are not implemented yet") if tsobj.rows and isinstance(tsobj.rows, list): for row in tsobj.rows: tsr = req.rows.add() # NB: type TsRow if not isinstance(row, list): raise ValueError("TsObject row must be a list of values") for cell in row: tsc = tsr.cells.add() # NB: type TsCell self.encode_to_ts_cell(cell, tsc) else: raise RiakError("TsObject requires a list of rows") mc = riak.pb.messages.MSG_CODE_TS_PUT_REQ rc = riak.pb.messages.MSG_CODE_TS_PUT_RESP return Msg(mc, req.SerializeToString(), rc)
def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation
Below is the the instruction that describes the task: ### Input: Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation ### Response: def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
def _tls_auth_encrypt(self, s): """ Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole GenericAEADCipher. Also, the additional data is computed right here. """ write_seq_num = struct.pack("!Q", self.tls_session.wcs.seq_num) self.tls_session.wcs.seq_num += 1 add_data = (write_seq_num + pkcs_i2osp(self.type, 1) + pkcs_i2osp(self.version, 2) + pkcs_i2osp(len(s), 2)) return self.tls_session.wcs.cipher.auth_encrypt(s, add_data, write_seq_num)
Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole GenericAEADCipher. Also, the additional data is computed right here.
Below is the the instruction that describes the task: ### Input: Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole GenericAEADCipher. Also, the additional data is computed right here. ### Response: def _tls_auth_encrypt(self, s): """ Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole GenericAEADCipher. Also, the additional data is computed right here. """ write_seq_num = struct.pack("!Q", self.tls_session.wcs.seq_num) self.tls_session.wcs.seq_num += 1 add_data = (write_seq_num + pkcs_i2osp(self.type, 1) + pkcs_i2osp(self.version, 2) + pkcs_i2osp(len(s), 2)) return self.tls_session.wcs.cipher.auth_encrypt(s, add_data, write_seq_num)
def encode_unicode(f): """Cerberus error messages expect regular binary strings. If unicode is used in a ValidationError message can't be printed. This decorator ensures that if legacy Python is used unicode strings are encoded before passing to a function. """ @wraps(f) def wrapped(obj, error): def _encode(value): """Helper encoding unicode strings into binary utf-8""" if isinstance(value, unicode): # noqa: F821 return value.encode('utf-8') return value error = copy(error) error.document_path = _encode(error.document_path) error.schema_path = _encode(error.schema_path) error.constraint = _encode(error.constraint) error.value = _encode(error.value) error.info = _encode(error.info) return f(obj, error) return wrapped if PYTHON_VERSION < 3 else f
Cerberus error messages expect regular binary strings. If unicode is used in a ValidationError message can't be printed. This decorator ensures that if legacy Python is used unicode strings are encoded before passing to a function.
Below is the the instruction that describes the task: ### Input: Cerberus error messages expect regular binary strings. If unicode is used in a ValidationError message can't be printed. This decorator ensures that if legacy Python is used unicode strings are encoded before passing to a function. ### Response: def encode_unicode(f): """Cerberus error messages expect regular binary strings. If unicode is used in a ValidationError message can't be printed. This decorator ensures that if legacy Python is used unicode strings are encoded before passing to a function. """ @wraps(f) def wrapped(obj, error): def _encode(value): """Helper encoding unicode strings into binary utf-8""" if isinstance(value, unicode): # noqa: F821 return value.encode('utf-8') return value error = copy(error) error.document_path = _encode(error.document_path) error.schema_path = _encode(error.schema_path) error.constraint = _encode(error.constraint) error.value = _encode(error.value) error.info = _encode(error.info) return f(obj, error) return wrapped if PYTHON_VERSION < 3 else f
async def _replace(self, key: Text, data: Dict[Text, Any]) -> None: """ Replace the register with a new value. """ with await self.pool as r: await r.set(self.register_key(key), ujson.dumps(data))
Replace the register with a new value.
Below is the the instruction that describes the task: ### Input: Replace the register with a new value. ### Response: async def _replace(self, key: Text, data: Dict[Text, Any]) -> None: """ Replace the register with a new value. """ with await self.pool as r: await r.set(self.register_key(key), ujson.dumps(data))
def main_generate(table_names, stream): """This will print out valid prom python code for given tables that already exist in a database. This is really handy when you want to bootstrap an existing database to work with prom and don't want to manually create Orm objects for the tables you want to use, let `generate` do it for you """ with stream.open() as fp: fp.write_line("from datetime import datetime, date") fp.write_line("from decimal import Decimal") fp.write_line("from prom import Orm, Field") fp.write_newlines() for table_name, inter, fields in get_table_info(*table_names): fp.write_line("class {}(Orm):".format(table_name.title().replace("_", ""))) fp.write_line(" table_name = '{}'".format(table_name)) if inter.connection_config.name: fp.write_line(" connection_name = '{}'".format(inter.connection_config.name)) fp.write_newlines() magic_field_names = set(["_id", "_created", "_updated"]) if "_id" in fields: fp.write_line(get_field_def("_id", fields.pop("_id"))) magic_field_names.discard("_id") for field_name, field_d in fields.items(): fp.write_line(get_field_def(field_name, field_d)) for magic_field_name in magic_field_names: if magic_field_name not in fields: fp.write_line(" {} = None".format(magic_field_name)) fp.write_newlines(2)
This will print out valid prom python code for given tables that already exist in a database. This is really handy when you want to bootstrap an existing database to work with prom and don't want to manually create Orm objects for the tables you want to use, let `generate` do it for you
Below is the the instruction that describes the task: ### Input: This will print out valid prom python code for given tables that already exist in a database. This is really handy when you want to bootstrap an existing database to work with prom and don't want to manually create Orm objects for the tables you want to use, let `generate` do it for you ### Response: def main_generate(table_names, stream): """This will print out valid prom python code for given tables that already exist in a database. This is really handy when you want to bootstrap an existing database to work with prom and don't want to manually create Orm objects for the tables you want to use, let `generate` do it for you """ with stream.open() as fp: fp.write_line("from datetime import datetime, date") fp.write_line("from decimal import Decimal") fp.write_line("from prom import Orm, Field") fp.write_newlines() for table_name, inter, fields in get_table_info(*table_names): fp.write_line("class {}(Orm):".format(table_name.title().replace("_", ""))) fp.write_line(" table_name = '{}'".format(table_name)) if inter.connection_config.name: fp.write_line(" connection_name = '{}'".format(inter.connection_config.name)) fp.write_newlines() magic_field_names = set(["_id", "_created", "_updated"]) if "_id" in fields: fp.write_line(get_field_def("_id", fields.pop("_id"))) magic_field_names.discard("_id") for field_name, field_d in fields.items(): fp.write_line(get_field_def(field_name, field_d)) for magic_field_name in magic_field_names: if magic_field_name not in fields: fp.write_line(" {} = None".format(magic_field_name)) fp.write_newlines(2)
def search(table: LdapObjectClass, query: Optional[Q] = None, database: Optional[Database] = None, base_dn: Optional[str] = None) -> Iterator[LdapObject]: """ Search for a object of given type in the database. """ fields = table.get_fields() db_fields = { name: field for name, field in fields.items() if field.db_field } database = get_database(database) connection = database.connection search_options = table.get_search_options(database) iterator = tldap.query.search( connection=connection, query=query, fields=db_fields, base_dn=base_dn or search_options.base_dn, object_classes=search_options.object_class, pk=search_options.pk_field, ) for dn, data in iterator: python_data = _db_to_python(data, table, dn) python_data = table.on_load(python_data, database) yield python_data
Search for a object of given type in the database.
Below is the the instruction that describes the task: ### Input: Search for a object of given type in the database. ### Response: def search(table: LdapObjectClass, query: Optional[Q] = None, database: Optional[Database] = None, base_dn: Optional[str] = None) -> Iterator[LdapObject]: """ Search for a object of given type in the database. """ fields = table.get_fields() db_fields = { name: field for name, field in fields.items() if field.db_field } database = get_database(database) connection = database.connection search_options = table.get_search_options(database) iterator = tldap.query.search( connection=connection, query=query, fields=db_fields, base_dn=base_dn or search_options.base_dn, object_classes=search_options.object_class, pk=search_options.pk_field, ) for dn, data in iterator: python_data = _db_to_python(data, table, dn) python_data = table.on_load(python_data, database) yield python_data
def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''): """ Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. """ # Load a dicom image dcmimage = dicom.read_file(dicom_file) # Load the nifti1 image image = nibabel.load(nii_file) # Check the we have a nifti1 format image if not isinstance(image, nibabel.nifti1.Nifti1Image): raise Exception( "Only Nifti1 image are supported not '{0}'.".format( type(image))) # check if dcm_tags is one string, if yes put it in a list: if isinstance(dcm_tags, str): dcm_tags = [dcm_tags] # Fill the nifti1 header header = image.get_header() # slice_duration: Time for 1 slice repetition_time = float(dcmimage[("0x0018", "0x0080")].value) header.set_dim_info(slice=2) nb_slices = header.get_n_slices() # Force round to 0 digit after coma. If more, nibabel completes to # 6 digits with random numbers... slice_duration = round(repetition_time / nb_slices, 0) header.set_slice_duration(slice_duration) # add free dicom fields if dcm_tags: content = ["{0}={1}".format(name, dcmimage[tag].value) for name, tag in dcm_tags] free_field = numpy.array(";".join(content), dtype=header["descrip"].dtype) image.get_header()["descrip"] = free_field # Update the image header image.update_header() # Save the filled image nibabel.save(image, nii_file)
Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file.
Below is the the instruction that describes the task: ### Input: Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. ### Response: def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''): """ Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. """ # Load a dicom image dcmimage = dicom.read_file(dicom_file) # Load the nifti1 image image = nibabel.load(nii_file) # Check the we have a nifti1 format image if not isinstance(image, nibabel.nifti1.Nifti1Image): raise Exception( "Only Nifti1 image are supported not '{0}'.".format( type(image))) # check if dcm_tags is one string, if yes put it in a list: if isinstance(dcm_tags, str): dcm_tags = [dcm_tags] # Fill the nifti1 header header = image.get_header() # slice_duration: Time for 1 slice repetition_time = float(dcmimage[("0x0018", "0x0080")].value) header.set_dim_info(slice=2) nb_slices = header.get_n_slices() # Force round to 0 digit after coma. If more, nibabel completes to # 6 digits with random numbers... slice_duration = round(repetition_time / nb_slices, 0) header.set_slice_duration(slice_duration) # add free dicom fields if dcm_tags: content = ["{0}={1}".format(name, dcmimage[tag].value) for name, tag in dcm_tags] free_field = numpy.array(";".join(content), dtype=header["descrip"].dtype) image.get_header()["descrip"] = free_field # Update the image header image.update_header() # Save the filled image nibabel.save(image, nii_file)
def push_note(device=None, title=None, body=None): ''' Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body." ''' spb = _SaltPushbullet(device) res = spb.push_note(title, body) return res
Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body."
Below is the the instruction that describes the task: ### Input: Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body." ### Response: def push_note(device=None, title=None, body=None): ''' Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body." ''' spb = _SaltPushbullet(device) res = spb.push_note(title, body) return res
def save(self) -> None: """ Saves all changed values to the database. """ for name, field in self.fields.items(): value = self.cleaned_data[name] if isinstance(value, UploadedFile): # Delete old file fname = self._s.get(name, as_type=File) if fname: try: default_storage.delete(fname.name) except OSError: # pragma: no cover logger.error('Deleting file %s failed.' % fname.name) # Create new file newname = default_storage.save(self.get_new_filename(value.name), value) value._name = newname self._s.set(name, value) elif isinstance(value, File): # file is unchanged continue elif isinstance(field, forms.FileField): # file is deleted fname = self._s.get(name, as_type=File) if fname: try: default_storage.delete(fname.name) except OSError: # pragma: no cover logger.error('Deleting file %s failed.' % fname.name) del self._s[name] elif value is None: del self._s[name] elif self._s.get(name, as_type=type(value)) != value: self._s.set(name, value)
Saves all changed values to the database.
Below is the the instruction that describes the task: ### Input: Saves all changed values to the database. ### Response: def save(self) -> None: """ Saves all changed values to the database. """ for name, field in self.fields.items(): value = self.cleaned_data[name] if isinstance(value, UploadedFile): # Delete old file fname = self._s.get(name, as_type=File) if fname: try: default_storage.delete(fname.name) except OSError: # pragma: no cover logger.error('Deleting file %s failed.' % fname.name) # Create new file newname = default_storage.save(self.get_new_filename(value.name), value) value._name = newname self._s.set(name, value) elif isinstance(value, File): # file is unchanged continue elif isinstance(field, forms.FileField): # file is deleted fname = self._s.get(name, as_type=File) if fname: try: default_storage.delete(fname.name) except OSError: # pragma: no cover logger.error('Deleting file %s failed.' % fname.name) del self._s[name] elif value is None: del self._s[name] elif self._s.get(name, as_type=type(value)) != value: self._s.set(name, value)
def set_terminal_width(self, command="", delay_factor=1): """CLI terminals try to automatically adjust the line based on the width of the terminal. This causes the output to get distorted when accessed programmatically. Set terminal width to 511 which works on a broad set of devices. :param command: Command string to send to the device :type command: str :param delay_factor: See __init__: global_delay_factor :type delay_factor: int """ if not command: return "" delay_factor = self.select_delay_factor(delay_factor) command = self.normalize_cmd(command) self.write_channel(command) output = self.read_until_prompt() if self.ansi_escape_codes: output = self.strip_ansi_escape_codes(output) return output
CLI terminals try to automatically adjust the line based on the width of the terminal. This causes the output to get distorted when accessed programmatically. Set terminal width to 511 which works on a broad set of devices. :param command: Command string to send to the device :type command: str :param delay_factor: See __init__: global_delay_factor :type delay_factor: int
Below is the the instruction that describes the task: ### Input: CLI terminals try to automatically adjust the line based on the width of the terminal. This causes the output to get distorted when accessed programmatically. Set terminal width to 511 which works on a broad set of devices. :param command: Command string to send to the device :type command: str :param delay_factor: See __init__: global_delay_factor :type delay_factor: int ### Response: def set_terminal_width(self, command="", delay_factor=1): """CLI terminals try to automatically adjust the line based on the width of the terminal. This causes the output to get distorted when accessed programmatically. Set terminal width to 511 which works on a broad set of devices. :param command: Command string to send to the device :type command: str :param delay_factor: See __init__: global_delay_factor :type delay_factor: int """ if not command: return "" delay_factor = self.select_delay_factor(delay_factor) command = self.normalize_cmd(command) self.write_channel(command) output = self.read_until_prompt() if self.ansi_escape_codes: output = self.strip_ansi_escape_codes(output) return output
def condition_input(args, kwargs): ''' Return a single arg structure for the publisher to safely use ''' ret = [] for arg in args: if (six.PY3 and isinstance(arg, six.integer_types) and salt.utils.jid.is_jid(six.text_type(arg))) or \ (six.PY2 and isinstance(arg, long)): # pylint: disable=incompatible-py3-code,undefined-variable ret.append(six.text_type(arg)) else: ret.append(arg) if isinstance(kwargs, dict) and kwargs: kw_ = {'__kwarg__': True} for key, val in six.iteritems(kwargs): kw_[key] = val return ret + [kw_] return ret
Return a single arg structure for the publisher to safely use
Below is the the instruction that describes the task: ### Input: Return a single arg structure for the publisher to safely use ### Response: def condition_input(args, kwargs): ''' Return a single arg structure for the publisher to safely use ''' ret = [] for arg in args: if (six.PY3 and isinstance(arg, six.integer_types) and salt.utils.jid.is_jid(six.text_type(arg))) or \ (six.PY2 and isinstance(arg, long)): # pylint: disable=incompatible-py3-code,undefined-variable ret.append(six.text_type(arg)) else: ret.append(arg) if isinstance(kwargs, dict) and kwargs: kw_ = {'__kwarg__': True} for key, val in six.iteritems(kwargs): kw_[key] = val return ret + [kw_] return ret
def get_instance(self, payload): """ Build an instance of WorkflowCumulativeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance """ return WorkflowCumulativeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
Build an instance of WorkflowCumulativeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance
Below is the the instruction that describes the task: ### Input: Build an instance of WorkflowCumulativeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance ### Response: def get_instance(self, payload): """ Build an instance of WorkflowCumulativeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsInstance """ return WorkflowCumulativeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
def confd_state_internal_callpoints_validationpoint_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") validationpoint = ET.SubElement(callpoints, "validationpoint") id = ET.SubElement(validationpoint, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def confd_state_internal_callpoints_validationpoint_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") validationpoint = ET.SubElement(callpoints, "validationpoint") id = ET.SubElement(validationpoint, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
def _paint_margin(self, event): """ Paints the right margin after editor paint event. """ font = QtGui.QFont(self.editor.font_name, self.editor.font_size + self.editor.zoom_level) metrics = QtGui.QFontMetricsF(font) pos = self._margin_pos offset = self.editor.contentOffset().x() + \ self.editor.document().documentMargin() x80 = round(metrics.width(' ') * pos) + offset painter = QtGui.QPainter(self.editor.viewport()) painter.setPen(self._pen) painter.drawLine(x80, 0, x80, 2 ** 16)
Paints the right margin after editor paint event.
Below is the the instruction that describes the task: ### Input: Paints the right margin after editor paint event. ### Response: def _paint_margin(self, event): """ Paints the right margin after editor paint event. """ font = QtGui.QFont(self.editor.font_name, self.editor.font_size + self.editor.zoom_level) metrics = QtGui.QFontMetricsF(font) pos = self._margin_pos offset = self.editor.contentOffset().x() + \ self.editor.document().documentMargin() x80 = round(metrics.width(' ') * pos) + offset painter = QtGui.QPainter(self.editor.viewport()) painter.setPen(self._pen) painter.drawLine(x80, 0, x80, 2 ** 16)
def save(self): """ Saves or updates the current tailored audience permission. """ if self.id: method = 'put' resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = 'post' resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body['data'])
Saves or updates the current tailored audience permission.
Below is the the instruction that describes the task: ### Input: Saves or updates the current tailored audience permission. ### Response: def save(self): """ Saves or updates the current tailored audience permission. """ if self.id: method = 'put' resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = 'post' resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body['data'])
def id(self): """ Computes the signature of the record, a SHA-512 of significant values :return: SHa-512 Hex string """ h = hashlib.new('sha512') for value in (self.machine.name, self.machine.os, self.user, self.application.name, self.application.path, self.event.report_type, self.event.type, self.event.time.isoformat()): h.update(str(value).encode('utf-8')) for parameter in sorted(self.parameters, key=lambda k: getattr(k, 'id')): h.update(parameter.value.encode('utf-8')) return h.hexdigest()
Computes the signature of the record, a SHA-512 of significant values :return: SHa-512 Hex string
Below is the the instruction that describes the task: ### Input: Computes the signature of the record, a SHA-512 of significant values :return: SHa-512 Hex string ### Response: def id(self): """ Computes the signature of the record, a SHA-512 of significant values :return: SHa-512 Hex string """ h = hashlib.new('sha512') for value in (self.machine.name, self.machine.os, self.user, self.application.name, self.application.path, self.event.report_type, self.event.type, self.event.time.isoformat()): h.update(str(value).encode('utf-8')) for parameter in sorted(self.parameters, key=lambda k: getattr(k, 'id')): h.update(parameter.value.encode('utf-8')) return h.hexdigest()
def patch(self, force=False): """Patch local_settings.py.example with local_settings.diff. The patch application generates the local_settings.py file (the local_settings.py.example remains unchanged). http://github.com/sitkatech/pypatch fails if the local_settings.py.example file is not 100% identical to the one used to generate the first diff so we use the patch command instead. """ with DirContext(self.local_settings_dir) as dircontext: if os.path.exists(self.local_settings_diff): if not os.path.exists(self.local_settings_file) or force: local_settings_reject = \ self.local_settings_reject_pattern % ( time.strftime(self.file_time_fmt, time.localtime()) ) patch_cmd = shlex.split( 'patch %s %s -o %s -r %s' % ( self.local_settings_example, self.local_settings_diff, self.local_settings_file, local_settings_reject ) ) try: subprocess.check_call(patch_cmd) except subprocess.CalledProcessError: if os.path.exists(local_settings_reject): sys.exit( 'Some conflict(s) occurred. Please check "%s" ' 'to find unapplied parts of the diff.\n' 'Once conflicts are solved, it is safer to ' 'regenerate a newer diff with the "--gendiff" ' 'option.' % os.path.join( dircontext.curdir, local_settings_reject) ) else: sys.exit('An unhandled error occurred.') print('Generation of "%s" successful.' % os.path.join( dircontext.curdir, self.local_settings_file) ) sys.exit(0) else: sys.exit( '"%s" already exists.' % os.path.join(dircontext.curdir, self.local_settings_file) ) else: sys.exit('No diff file found, please generate one with the ' '"--gendiff" option.')
Patch local_settings.py.example with local_settings.diff. The patch application generates the local_settings.py file (the local_settings.py.example remains unchanged). http://github.com/sitkatech/pypatch fails if the local_settings.py.example file is not 100% identical to the one used to generate the first diff so we use the patch command instead.
Below is the the instruction that describes the task: ### Input: Patch local_settings.py.example with local_settings.diff. The patch application generates the local_settings.py file (the local_settings.py.example remains unchanged). http://github.com/sitkatech/pypatch fails if the local_settings.py.example file is not 100% identical to the one used to generate the first diff so we use the patch command instead. ### Response: def patch(self, force=False): """Patch local_settings.py.example with local_settings.diff. The patch application generates the local_settings.py file (the local_settings.py.example remains unchanged). http://github.com/sitkatech/pypatch fails if the local_settings.py.example file is not 100% identical to the one used to generate the first diff so we use the patch command instead. """ with DirContext(self.local_settings_dir) as dircontext: if os.path.exists(self.local_settings_diff): if not os.path.exists(self.local_settings_file) or force: local_settings_reject = \ self.local_settings_reject_pattern % ( time.strftime(self.file_time_fmt, time.localtime()) ) patch_cmd = shlex.split( 'patch %s %s -o %s -r %s' % ( self.local_settings_example, self.local_settings_diff, self.local_settings_file, local_settings_reject ) ) try: subprocess.check_call(patch_cmd) except subprocess.CalledProcessError: if os.path.exists(local_settings_reject): sys.exit( 'Some conflict(s) occurred. Please check "%s" ' 'to find unapplied parts of the diff.\n' 'Once conflicts are solved, it is safer to ' 'regenerate a newer diff with the "--gendiff" ' 'option.' % os.path.join( dircontext.curdir, local_settings_reject) ) else: sys.exit('An unhandled error occurred.') print('Generation of "%s" successful.' % os.path.join( dircontext.curdir, self.local_settings_file) ) sys.exit(0) else: sys.exit( '"%s" already exists.' % os.path.join(dircontext.curdir, self.local_settings_file) ) else: sys.exit('No diff file found, please generate one with the ' '"--gendiff" option.')
def total_return(self): """http://en.wikipedia.org/wiki/Total_shareholder_return - mimics bloomberg total return""" pxend = self.close pxstart = pxend.shift(1).bfill() return (1. + (pxend - pxstart + self.dvds.fillna(0)) / pxstart).cumprod() - 1
http://en.wikipedia.org/wiki/Total_shareholder_return - mimics bloomberg total return
Below is the the instruction that describes the task: ### Input: http://en.wikipedia.org/wiki/Total_shareholder_return - mimics bloomberg total return ### Response: def total_return(self): """http://en.wikipedia.org/wiki/Total_shareholder_return - mimics bloomberg total return""" pxend = self.close pxstart = pxend.shift(1).bfill() return (1. + (pxend - pxstart + self.dvds.fillna(0)) / pxstart).cumprod() - 1
def get_all_names() -> Tuple[str]: """ Retrieve a tuple of all known color names, basic and 'known names'. """ names = list(basic_names) names.extend(name_data) return tuple(sorted(set(names)))
Retrieve a tuple of all known color names, basic and 'known names'.
Below is the the instruction that describes the task: ### Input: Retrieve a tuple of all known color names, basic and 'known names'. ### Response: def get_all_names() -> Tuple[str]: """ Retrieve a tuple of all known color names, basic and 'known names'. """ names = list(basic_names) names.extend(name_data) return tuple(sorted(set(names)))
def CreateSmartShoppingAdGroup(client, campaign_id): """Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID. """ ad_group_service = client.GetService('AdGroupService', version='v201809') # Create the ad group. ad_group = { 'campaignId': campaign_id, 'name': 'Smart Shopping ad group #%s' % uuid.uuid4(), # Set the ad group type to SHOPPING_GOAL_OPTIMIZED_ADS. 'adGroupType': 'SHOPPING_GOAL_OPTIMIZED_ADS' } adgroup_operations = { 'operator': 'ADD', 'operand': ad_group } # Make the mutate request to add the AdGroup to the Smart Shopping campaign. ad_group = ad_group_service.mutate(adgroup_operations)['value'][0] ad_group_id = ad_group['id'] print ('AdGroup with name "%s" and ID "%s" was added.' % (ad_group['name'], ad_group_id)) return ad_group_id
Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID.
Below is the the instruction that describes the task: ### Input: Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID. ### Response: def CreateSmartShoppingAdGroup(client, campaign_id): """Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID. """ ad_group_service = client.GetService('AdGroupService', version='v201809') # Create the ad group. ad_group = { 'campaignId': campaign_id, 'name': 'Smart Shopping ad group #%s' % uuid.uuid4(), # Set the ad group type to SHOPPING_GOAL_OPTIMIZED_ADS. 'adGroupType': 'SHOPPING_GOAL_OPTIMIZED_ADS' } adgroup_operations = { 'operator': 'ADD', 'operand': ad_group } # Make the mutate request to add the AdGroup to the Smart Shopping campaign. ad_group = ad_group_service.mutate(adgroup_operations)['value'][0] ad_group_id = ad_group['id'] print ('AdGroup with name "%s" and ID "%s" was added.' % (ad_group['name'], ad_group_id)) return ad_group_id
def do_heavy_work(self, block): """ Note: Expects Compressor Block like objects """ src_file_path = block.latest_file_info.path img_path = src_file_path + self.get_extension() self.log.debug("Converting file '%s' to image '%s'", src_file_path, img_path) from_file_to_image(src_file_path, img_path) block.image_converted_file_info = FileInfo(img_path) block.latest_file_info = block.image_converted_file_info return block
Note: Expects Compressor Block like objects
Below is the the instruction that describes the task: ### Input: Note: Expects Compressor Block like objects ### Response: def do_heavy_work(self, block): """ Note: Expects Compressor Block like objects """ src_file_path = block.latest_file_info.path img_path = src_file_path + self.get_extension() self.log.debug("Converting file '%s' to image '%s'", src_file_path, img_path) from_file_to_image(src_file_path, img_path) block.image_converted_file_info = FileInfo(img_path) block.latest_file_info = block.image_converted_file_info return block
def get_categories(self, app_name): """ Returns a list of the categories for an app name. """ cat_nums = self.apps.get(app_name, {}).get("cats", []) cat_names = [self.categories.get("%s" % cat_num, "") for cat_num in cat_nums] return cat_names
Returns a list of the categories for an app name.
Below is the the instruction that describes the task: ### Input: Returns a list of the categories for an app name. ### Response: def get_categories(self, app_name): """ Returns a list of the categories for an app name. """ cat_nums = self.apps.get(app_name, {}).get("cats", []) cat_names = [self.categories.get("%s" % cat_num, "") for cat_num in cat_nums] return cat_names
def _extend_with_api(test_dict, api_def_dict): """ extend test with api definition, test will merge and override api definition. Args: test_dict (dict): test block, this will override api_def_dict api_def_dict (dict): api definition Examples: >>> api_def_dict = { "name": "get token 1", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } >>> test_dict = { "name": "get token 2", "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } >>> _extend_with_api(test_dict, api_def_dict) >>> print(test_dict) { "name": "get token 2", "request": {...}, "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } """ # override api name test_dict.setdefault("name", api_def_dict.pop("name", "api name undefined")) # override variables def_variables = api_def_dict.pop("variables", []) test_dict["variables"] = utils.extend_variables( def_variables, test_dict.get("variables", {}) ) # merge & override validators TODO: relocate def_raw_validators = api_def_dict.pop("validate", []) def_validators = [ validator.uniform_validator(_validator) for _validator in def_raw_validators ] ref_validators = test_dict.pop("validate", []) test_dict["validate"] = validator.extend_validators( def_validators, ref_validators ) # merge & override extractors def_extrators = api_def_dict.pop("extract", {}) test_dict["extract"] = utils.extend_variables( def_extrators, test_dict.get("extract", {}) ) # merge & override request test_dict["request"] = api_def_dict.pop("request", {}) # base_url & verify: priority api_def_dict > test_dict if api_def_dict.get("base_url"): test_dict["base_url"] = api_def_dict["base_url"] if "verify" in api_def_dict: test_dict["request"]["verify"] = api_def_dict["verify"] # merge & override setup_hooks def_setup_hooks = api_def_dict.pop("setup_hooks", []) ref_setup_hooks = test_dict.get("setup_hooks", []) extended_setup_hooks = list(set(def_setup_hooks + ref_setup_hooks)) if extended_setup_hooks: test_dict["setup_hooks"] = extended_setup_hooks # merge & override teardown_hooks def_teardown_hooks = api_def_dict.pop("teardown_hooks", []) ref_teardown_hooks = test_dict.get("teardown_hooks", []) extended_teardown_hooks = list(set(def_teardown_hooks + ref_teardown_hooks)) if extended_teardown_hooks: test_dict["teardown_hooks"] = extended_teardown_hooks # TODO: extend with other api definition items, e.g. times test_dict.update(api_def_dict)
extend test with api definition, test will merge and override api definition. Args: test_dict (dict): test block, this will override api_def_dict api_def_dict (dict): api definition Examples: >>> api_def_dict = { "name": "get token 1", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } >>> test_dict = { "name": "get token 2", "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } >>> _extend_with_api(test_dict, api_def_dict) >>> print(test_dict) { "name": "get token 2", "request": {...}, "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] }
Below is the the instruction that describes the task: ### Input: extend test with api definition, test will merge and override api definition. Args: test_dict (dict): test block, this will override api_def_dict api_def_dict (dict): api definition Examples: >>> api_def_dict = { "name": "get token 1", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } >>> test_dict = { "name": "get token 2", "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } >>> _extend_with_api(test_dict, api_def_dict) >>> print(test_dict) { "name": "get token 2", "request": {...}, "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } ### Response: def _extend_with_api(test_dict, api_def_dict): """ extend test with api definition, test will merge and override api definition. Args: test_dict (dict): test block, this will override api_def_dict api_def_dict (dict): api definition Examples: >>> api_def_dict = { "name": "get token 1", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } >>> test_dict = { "name": "get token 2", "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } >>> _extend_with_api(test_dict, api_def_dict) >>> print(test_dict) { "name": "get token 2", "request": {...}, "extract": {"token": "content.token"}, "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } """ # override api name test_dict.setdefault("name", api_def_dict.pop("name", "api name undefined")) # override variables def_variables = api_def_dict.pop("variables", []) test_dict["variables"] = utils.extend_variables( def_variables, test_dict.get("variables", {}) ) # merge & override validators TODO: relocate def_raw_validators = api_def_dict.pop("validate", []) def_validators = [ validator.uniform_validator(_validator) for _validator in def_raw_validators ] ref_validators = test_dict.pop("validate", []) test_dict["validate"] = validator.extend_validators( def_validators, ref_validators ) # merge & override extractors def_extrators = api_def_dict.pop("extract", {}) test_dict["extract"] = utils.extend_variables( def_extrators, test_dict.get("extract", {}) ) # merge & override request test_dict["request"] = api_def_dict.pop("request", {}) # base_url & verify: priority api_def_dict > test_dict if api_def_dict.get("base_url"): test_dict["base_url"] = api_def_dict["base_url"] if "verify" in api_def_dict: test_dict["request"]["verify"] = api_def_dict["verify"] # merge & override setup_hooks def_setup_hooks = api_def_dict.pop("setup_hooks", []) ref_setup_hooks = test_dict.get("setup_hooks", []) extended_setup_hooks = list(set(def_setup_hooks + ref_setup_hooks)) if extended_setup_hooks: test_dict["setup_hooks"] = extended_setup_hooks # merge & override teardown_hooks def_teardown_hooks = api_def_dict.pop("teardown_hooks", []) ref_teardown_hooks = test_dict.get("teardown_hooks", []) extended_teardown_hooks = list(set(def_teardown_hooks + ref_teardown_hooks)) if extended_teardown_hooks: test_dict["teardown_hooks"] = extended_teardown_hooks # TODO: extend with other api definition items, e.g. times test_dict.update(api_def_dict)
def get_episode_name(self, series, episode_numbers, season_number): """Perform lookup for name of episode numbers for a given series. :param object series: instance of a series :param list episode_numbers: the episode sequence number :param int season_number: numeric season of series :returns: list of episode name :rtype: list(str) """ try: episodes = self.api.get_episodes(series.get('id'), airedSeason=season_number) except exceptions.TVDBRequestException as err: LOG.exception('episodes for series %s season no %s failed', series.get('id'), season_number) return None, _as_str(err) epnames = [] for epno in episode_numbers: epname = _get_epname(episodes, epno) if epname is None: epname = _get_epname(episodes, epno, absolute=True) if epname is None: return None, None epnames.append(epname) return epnames, None
Perform lookup for name of episode numbers for a given series. :param object series: instance of a series :param list episode_numbers: the episode sequence number :param int season_number: numeric season of series :returns: list of episode name :rtype: list(str)
Below is the the instruction that describes the task: ### Input: Perform lookup for name of episode numbers for a given series. :param object series: instance of a series :param list episode_numbers: the episode sequence number :param int season_number: numeric season of series :returns: list of episode name :rtype: list(str) ### Response: def get_episode_name(self, series, episode_numbers, season_number): """Perform lookup for name of episode numbers for a given series. :param object series: instance of a series :param list episode_numbers: the episode sequence number :param int season_number: numeric season of series :returns: list of episode name :rtype: list(str) """ try: episodes = self.api.get_episodes(series.get('id'), airedSeason=season_number) except exceptions.TVDBRequestException as err: LOG.exception('episodes for series %s season no %s failed', series.get('id'), season_number) return None, _as_str(err) epnames = [] for epno in episode_numbers: epname = _get_epname(episodes, epno) if epname is None: epname = _get_epname(episodes, epno, absolute=True) if epname is None: return None, None epnames.append(epname) return epnames, None
def close(self): """ if this was a zip'd distribution, any introspection may have resulted in opening or creating temporary files. Call close in order to clean up. """ if self.tmpdir: rmtree(self.tmpdir) self.tmpdir = None self._contents = None
if this was a zip'd distribution, any introspection may have resulted in opening or creating temporary files. Call close in order to clean up.
Below is the the instruction that describes the task: ### Input: if this was a zip'd distribution, any introspection may have resulted in opening or creating temporary files. Call close in order to clean up. ### Response: def close(self): """ if this was a zip'd distribution, any introspection may have resulted in opening or creating temporary files. Call close in order to clean up. """ if self.tmpdir: rmtree(self.tmpdir) self.tmpdir = None self._contents = None
def delete_gauge(self, slug): """Removes all gauges with the given ``slug``.""" key = self._gauge_key(slug) self.r.delete(key) # Remove the Gauge self.r.srem(self._gauge_slugs_key, slug)
Removes all gauges with the given ``slug``.
Below is the the instruction that describes the task: ### Input: Removes all gauges with the given ``slug``. ### Response: def delete_gauge(self, slug): """Removes all gauges with the given ``slug``.""" key = self._gauge_key(slug) self.r.delete(key) # Remove the Gauge self.r.srem(self._gauge_slugs_key, slug)
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0, response_handler="google.visualization.Query.setResponse"): """Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code. """ response_obj = { "version": "0.6", "reqId": str(req_id), "table": self._ToJSonObj(columns_order, order_by), "status": "ok" } encoded_response_str = DataTableJSONEncoder().encode(response_obj) if not isinstance(encoded_response_str, str): encoded_response_str = encoded_response_str.encode("utf-8") return "%s(%s);" % (response_handler, encoded_response_str)
Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code.
Below is the the instruction that describes the task: ### Input: Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code. ### Response: def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0, response_handler="google.visualization.Query.setResponse"): """Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code. """ response_obj = { "version": "0.6", "reqId": str(req_id), "table": self._ToJSonObj(columns_order, order_by), "status": "ok" } encoded_response_str = DataTableJSONEncoder().encode(response_obj) if not isinstance(encoded_response_str, str): encoded_response_str = encoded_response_str.encode("utf-8") return "%s(%s);" % (response_handler, encoded_response_str)
def chunks(l, n): """ Yields successive n-sized chunks from l. """ for i in _range(0, len(l), n): yield l[i:i + n]
Yields successive n-sized chunks from l.
Below is the the instruction that describes the task: ### Input: Yields successive n-sized chunks from l. ### Response: def chunks(l, n): """ Yields successive n-sized chunks from l. """ for i in _range(0, len(l), n): yield l[i:i + n]
def debug(self, nest_level=1): """ Show the binary data and parsed data in a tree structure """ prefix = ' ' * nest_level print('%s%s Object #%s' % (prefix, type_name(self), id(self))) print('%s Children:' % (prefix,)) for child in self._children: child.debug(nest_level + 2)
Show the binary data and parsed data in a tree structure
Below is the the instruction that describes the task: ### Input: Show the binary data and parsed data in a tree structure ### Response: def debug(self, nest_level=1): """ Show the binary data and parsed data in a tree structure """ prefix = ' ' * nest_level print('%s%s Object #%s' % (prefix, type_name(self), id(self))) print('%s Children:' % (prefix,)) for child in self._children: child.debug(nest_level + 2)
def injector_gear_2_json(self): """ transform this local object to JSON. :return: the JSON from this local object """ LOGGER.debug("InjectorCachedGear.injector_gear_2_json") json_obj = { 'gearId': self.id, 'gearName': self.name, 'gearAdminQueue': self.admin_queue, 'gearDescription': self.description, 'running': 'true' if self.running else 'false' } return json_obj
transform this local object to JSON. :return: the JSON from this local object
Below is the the instruction that describes the task: ### Input: transform this local object to JSON. :return: the JSON from this local object ### Response: def injector_gear_2_json(self): """ transform this local object to JSON. :return: the JSON from this local object """ LOGGER.debug("InjectorCachedGear.injector_gear_2_json") json_obj = { 'gearId': self.id, 'gearName': self.name, 'gearAdminQueue': self.admin_queue, 'gearDescription': self.description, 'running': 'true' if self.running else 'false' } return json_obj
def h2o_mean_squared_error(y_actual, y_predicted, weights=None): """ Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0). """ ModelBase._check_targets(y_actual, y_predicted) return _colmean((y_predicted - y_actual) ** 2)
Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0).
Below is the the instruction that describes the task: ### Input: Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0). ### Response: def h2o_mean_squared_error(y_actual, y_predicted, weights=None): """ Mean squared error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: mean squared error loss (best is 0.0). """ ModelBase._check_targets(y_actual, y_predicted) return _colmean((y_predicted - y_actual) ** 2)
def truncated_normal_expval(mu, tau, a, b): """Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution. """ phia = np.exp(normal_like(a, mu, tau)) phib = np.exp(normal_like(b, mu, tau)) sigma = 1. / np.sqrt(tau) Phia = utils.normcdf((a - mu) / sigma) if b == np.inf: Phib = 1.0 else: Phib = utils.normcdf((b - mu) / sigma) return (mu + (phia - phib) / (Phib - Phia))[0]
Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution.
Below is the the instruction that describes the task: ### Input: Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution. ### Response: def truncated_normal_expval(mu, tau, a, b): """Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution. """ phia = np.exp(normal_like(a, mu, tau)) phib = np.exp(normal_like(b, mu, tau)) sigma = 1. / np.sqrt(tau) Phia = utils.normcdf((a - mu) / sigma) if b == np.inf: Phib = 1.0 else: Phib = utils.normcdf((b - mu) / sigma) return (mu + (phia - phib) / (Phib - Phia))[0]
def get_ports_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value.""" ret = 'OK' if port['status'] is None: ret = 'CAREFUL' elif port['status'] == 0: ret = 'CRITICAL' elif (isinstance(port['status'], (float, int)) and port['rtt_warning'] is not None and port['status'] > port['rtt_warning']): ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, port[self.get_key()]) return ret
Return the alert status relative to the port scan return value.
Below is the the instruction that describes the task: ### Input: Return the alert status relative to the port scan return value. ### Response: def get_ports_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value.""" ret = 'OK' if port['status'] is None: ret = 'CAREFUL' elif port['status'] == 0: ret = 'CRITICAL' elif (isinstance(port['status'], (float, int)) and port['rtt_warning'] is not None and port['status'] > port['rtt_warning']): ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, port[self.get_key()]) return ret
def _special_method_cache(method, cache_wrapper): """ Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 """ name = method.__name__ special_names = '__getattr__', '__getitem__' if name not in special_names: return wrapper_name = '__cached' + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy
Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5
Below is the the instruction that describes the task: ### Input: Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 ### Response: def _special_method_cache(method, cache_wrapper): """ Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 """ name = method.__name__ special_names = '__getattr__', '__getitem__' if name not in special_names: return wrapper_name = '__cached' + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy
def configure(self, options, config): """Configures the test timer plugin.""" super(TimerPlugin, self).configure(options, config) self.config = config if self.enabled: self.timer_top_n = int(options.timer_top_n) self.timer_ok = self._parse_time(options.timer_ok) self.timer_warning = self._parse_time(options.timer_warning) self.timer_filter = self._parse_filter(options.timer_filter) self.timer_fail = options.timer_fail self.timer_no_color = True self.json_file = options.json_file # Windows + nosetests does not support colors (even with colorama). if not IS_NT: self.timer_no_color = options.timer_no_color # determine if multiprocessing plugin enabled self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False))
Configures the test timer plugin.
Below is the the instruction that describes the task: ### Input: Configures the test timer plugin. ### Response: def configure(self, options, config): """Configures the test timer plugin.""" super(TimerPlugin, self).configure(options, config) self.config = config if self.enabled: self.timer_top_n = int(options.timer_top_n) self.timer_ok = self._parse_time(options.timer_ok) self.timer_warning = self._parse_time(options.timer_warning) self.timer_filter = self._parse_filter(options.timer_filter) self.timer_fail = options.timer_fail self.timer_no_color = True self.json_file = options.json_file # Windows + nosetests does not support colors (even with colorama). if not IS_NT: self.timer_no_color = options.timer_no_color # determine if multiprocessing plugin enabled self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False))
def run_check(self, check, argument_names): """Run a check plugin.""" arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments)
Run a check plugin.
Below is the the instruction that describes the task: ### Input: Run a check plugin. ### Response: def run_check(self, check, argument_names): """Run a check plugin.""" arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments)
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.
Below is the the instruction that describes the task: ### Input: Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`. ### Response: def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)): """imprints a PIL image with the indicated text in lower-right corner""" if im.mode != "RGBA": im = im.convert("RGBA") textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0)) textdraw = ImageDraw.Draw(textlayer) textsize = textdraw.textsize(inputtext, font=font) textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]] textdraw.text(textpos, inputtext, font=font, fill=color) if opacity != 1: textlayer = reduce_opacity(textlayer, opacity) return Image.composite(textlayer, im, textlayer)
imprints a PIL image with the indicated text in lower-right corner
Below is the the instruction that describes the task: ### Input: imprints a PIL image with the indicated text in lower-right corner ### Response: def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)): """imprints a PIL image with the indicated text in lower-right corner""" if im.mode != "RGBA": im = im.convert("RGBA") textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0)) textdraw = ImageDraw.Draw(textlayer) textsize = textdraw.textsize(inputtext, font=font) textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]] textdraw.text(textpos, inputtext, font=font, fill=color) if opacity != 1: textlayer = reduce_opacity(textlayer, opacity) return Image.composite(textlayer, im, textlayer)
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]: """ Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError """ jsonschema_validate(request, schema) return request
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
Below is the the instruction that describes the task: ### Input: Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError ### Response: def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]: """ Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError """ jsonschema_validate(request, schema) return request
def __execute_bisz(self, instr): """Execute BISZ instruction. """ op0_val = self.read_operand(instr.operands[0]) op2_val = 1 if op0_val == 0 else 0 self.write_operand(instr.operands[2], op2_val) return None
Execute BISZ instruction.
Below is the the instruction that describes the task: ### Input: Execute BISZ instruction. ### Response: def __execute_bisz(self, instr): """Execute BISZ instruction. """ op0_val = self.read_operand(instr.operands[0]) op2_val = 1 if op0_val == 0 else 0 self.write_operand(instr.operands[2], op2_val) return None
def _set_state(self, state): """Set `_state` and notify any threads waiting for the change. """ logger.debug(" _set_state({0!r})".format(state)) self._state = state self._state_cond.notify()
Set `_state` and notify any threads waiting for the change.
Below is the the instruction that describes the task: ### Input: Set `_state` and notify any threads waiting for the change. ### Response: def _set_state(self, state): """Set `_state` and notify any threads waiting for the change. """ logger.debug(" _set_state({0!r})".format(state)) self._state = state self._state_cond.notify()
def call(cmd, timeout=None, signum=signal.SIGKILL, keep_rc=False, encoding="utf-8", env=os.environ): """ Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails """ if not isinstance(cmd, list): cmd = [cmd] p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env) res = p(keep_rc=keep_rc) if keep_rc: rc, output = res output = output.decode(encoding, 'ignore') return rc, output return res.decode(encoding, "ignore")
Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails
Below is the the instruction that describes the task: ### Input: Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails ### Response: def call(cmd, timeout=None, signum=signal.SIGKILL, keep_rc=False, encoding="utf-8", env=os.environ): """ Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails """ if not isinstance(cmd, list): cmd = [cmd] p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env) res = p(keep_rc=keep_rc) if keep_rc: rc, output = res output = output.decode(encoding, 'ignore') return rc, output return res.decode(encoding, "ignore")
def download_url(url, back_off=True, **kwargs): """ Get the content of a URL and return a file-like object. back_off=True provides retry """ if back_off: return _download_with_backoff(url, as_file=True, **kwargs) else: return _download_without_backoff(url, as_file=True, **kwargs)
Get the content of a URL and return a file-like object. back_off=True provides retry
Below is the the instruction that describes the task: ### Input: Get the content of a URL and return a file-like object. back_off=True provides retry ### Response: def download_url(url, back_off=True, **kwargs): """ Get the content of a URL and return a file-like object. back_off=True provides retry """ if back_off: return _download_with_backoff(url, as_file=True, **kwargs) else: return _download_without_backoff(url, as_file=True, **kwargs)
def create(cls, rule_entries, union_rules=None): """Creates a RuleIndex with tasks indexed by their output type.""" serializable_rules = OrderedDict() serializable_roots = OrderedSet() union_rules = OrderedDict(union_rules or ()) def add_task(product_type, rule): # TODO(#7311): make a defaultdict-like wrapper for OrderedDict if more widely used. if product_type not in serializable_rules: serializable_rules[product_type] = OrderedSet() serializable_rules[product_type].add(rule) def add_root_rule(root_rule): serializable_roots.add(root_rule) def add_rule(rule): if isinstance(rule, RootRule): add_root_rule(rule) else: add_task(rule.output_type, rule) for dep_rule in rule.dependency_rules: add_rule(dep_rule) def add_type_transition_rule(union_rule): # NB: This does not require that union bases be supplied to `def rules():`, as the union type # is never instantiated! union_base = union_rule.union_base assert union_base._is_union union_member = union_rule.union_member if union_base not in union_rules: union_rules[union_base] = OrderedSet() union_rules[union_base].add(union_member) for entry in rule_entries: if isinstance(entry, Rule): add_rule(entry) elif isinstance(entry, UnionRule): add_type_transition_rule(entry) elif hasattr(entry, '__call__'): rule = getattr(entry, 'rule', None) if rule is None: raise TypeError("Expected callable {} to be decorated with @rule.".format(entry)) add_rule(rule) else: raise TypeError("""\ Rule entry {} had an unexpected type: {}. Rules either extend Rule or UnionRule, or are static \ functions decorated with @rule.""".format(entry, type(entry))) return cls(serializable_rules, serializable_roots, union_rules)
Creates a RuleIndex with tasks indexed by their output type.
Below is the the instruction that describes the task: ### Input: Creates a RuleIndex with tasks indexed by their output type. ### Response: def create(cls, rule_entries, union_rules=None): """Creates a RuleIndex with tasks indexed by their output type.""" serializable_rules = OrderedDict() serializable_roots = OrderedSet() union_rules = OrderedDict(union_rules or ()) def add_task(product_type, rule): # TODO(#7311): make a defaultdict-like wrapper for OrderedDict if more widely used. if product_type not in serializable_rules: serializable_rules[product_type] = OrderedSet() serializable_rules[product_type].add(rule) def add_root_rule(root_rule): serializable_roots.add(root_rule) def add_rule(rule): if isinstance(rule, RootRule): add_root_rule(rule) else: add_task(rule.output_type, rule) for dep_rule in rule.dependency_rules: add_rule(dep_rule) def add_type_transition_rule(union_rule): # NB: This does not require that union bases be supplied to `def rules():`, as the union type # is never instantiated! union_base = union_rule.union_base assert union_base._is_union union_member = union_rule.union_member if union_base not in union_rules: union_rules[union_base] = OrderedSet() union_rules[union_base].add(union_member) for entry in rule_entries: if isinstance(entry, Rule): add_rule(entry) elif isinstance(entry, UnionRule): add_type_transition_rule(entry) elif hasattr(entry, '__call__'): rule = getattr(entry, 'rule', None) if rule is None: raise TypeError("Expected callable {} to be decorated with @rule.".format(entry)) add_rule(rule) else: raise TypeError("""\ Rule entry {} had an unexpected type: {}. Rules either extend Rule or UnionRule, or are static \ functions decorated with @rule.""".format(entry, type(entry))) return cls(serializable_rules, serializable_roots, union_rules)
def get_resource(self, request, filename): """Return a static resource from the shared folder.""" filename = join("shared", basename(filename)) try: data = pkgutil.get_data(__package__, filename) except OSError: data = None if data is not None: mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" return Response(data, mimetype=mimetype) return Response("Not Found", status=404)
Return a static resource from the shared folder.
Below is the the instruction that describes the task: ### Input: Return a static resource from the shared folder. ### Response: def get_resource(self, request, filename): """Return a static resource from the shared folder.""" filename = join("shared", basename(filename)) try: data = pkgutil.get_data(__package__, filename) except OSError: data = None if data is not None: mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" return Response(data, mimetype=mimetype) return Response("Not Found", status=404)
def covariance(self): """ The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. """ mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[0, 2], m[1, 1]], [m[1, 1], m[2, 0]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2
The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source.
Below is the the instruction that describes the task: ### Input: The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. ### Response: def covariance(self): """ The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. """ mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[0, 2], m[1, 1]], [m[1, 1], m[2, 0]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2
def get_events(self, time_period, include_archived=False) -> Optional[int]: """Get the number of events that have occurred on this Monitor. Specifically only gets events that have occurred within the TimePeriod provided. """ date_filter = '1%20{}'.format(time_period.period) if time_period == TimePeriod.ALL: # The consoleEvents API uses DATE_SUB, so give it # something large date_filter = '100%20year' archived_filter = '/Archived=:0' if include_archived: archived_filter = '' event = self._client.get_state( 'api/events/consoleEvents/{}{}.json'.format( date_filter, archived_filter ) ) try: events_by_monitor = event['results'] if isinstance(events_by_monitor, list): return 0 return events_by_monitor.get(str(self._monitor_id), 0) except (TypeError, KeyError, AttributeError): return None
Get the number of events that have occurred on this Monitor. Specifically only gets events that have occurred within the TimePeriod provided.
Below is the the instruction that describes the task: ### Input: Get the number of events that have occurred on this Monitor. Specifically only gets events that have occurred within the TimePeriod provided. ### Response: def get_events(self, time_period, include_archived=False) -> Optional[int]: """Get the number of events that have occurred on this Monitor. Specifically only gets events that have occurred within the TimePeriod provided. """ date_filter = '1%20{}'.format(time_period.period) if time_period == TimePeriod.ALL: # The consoleEvents API uses DATE_SUB, so give it # something large date_filter = '100%20year' archived_filter = '/Archived=:0' if include_archived: archived_filter = '' event = self._client.get_state( 'api/events/consoleEvents/{}{}.json'.format( date_filter, archived_filter ) ) try: events_by_monitor = event['results'] if isinstance(events_by_monitor, list): return 0 return events_by_monitor.get(str(self._monitor_id), 0) except (TypeError, KeyError, AttributeError): return None
def getRealInterfaceNumber(self, interface): """ Returns the host-visible interface number, or None if there is no such interface. """ try: return self._ioctl(INTERFACE_REVMAP, interface) except IOError as exc: if exc.errno == errno.EDOM: return None raise
Returns the host-visible interface number, or None if there is no such interface.
Below is the the instruction that describes the task: ### Input: Returns the host-visible interface number, or None if there is no such interface. ### Response: def getRealInterfaceNumber(self, interface): """ Returns the host-visible interface number, or None if there is no such interface. """ try: return self._ioctl(INTERFACE_REVMAP, interface) except IOError as exc: if exc.errno == errno.EDOM: return None raise
def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False): ''' ::param: html_content ::returns: a text representation of the html content. ''' html_content = html_content.strip() if not html_content: return "" # strip XML declaration, if necessary if html_content.startswith('<?xml '): html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1) html_tree = fromstring(html_content) parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links) return parser.get_text()
::param: html_content ::returns: a text representation of the html content.
Below is the the instruction that describes the task: ### Input: ::param: html_content ::returns: a text representation of the html content. ### Response: def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False): ''' ::param: html_content ::returns: a text representation of the html content. ''' html_content = html_content.strip() if not html_content: return "" # strip XML declaration, if necessary if html_content.startswith('<?xml '): html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1) html_tree = fromstring(html_content) parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links) return parser.get_text()
def cancelEdit( self ): """ Rejects the current edit and shows the parts widget. """ if ( self._partsWidget.isVisible() ): return False self._completerTree.hide() self.completer().popup().hide() self.setText(self._originalText) return True
Rejects the current edit and shows the parts widget.
Below is the the instruction that describes the task: ### Input: Rejects the current edit and shows the parts widget. ### Response: def cancelEdit( self ): """ Rejects the current edit and shows the parts widget. """ if ( self._partsWidget.isVisible() ): return False self._completerTree.hide() self.completer().popup().hide() self.setText(self._originalText) return True
def batch_get_documents( self, database, documents, mask=None, transaction=None, new_transaction=None, read_time=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "batch_get_documents" not in self._inner_api_calls: self._inner_api_calls[ "batch_get_documents" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_get_documents, default_retry=self._method_configs["BatchGetDocuments"].retry, default_timeout=self._method_configs["BatchGetDocuments"].timeout, client_info=self._client_info, ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) request = firestore_pb2.BatchGetDocumentsRequest( database=database, documents=documents, mask=mask, transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["batch_get_documents"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
Below is the the instruction that describes the task: ### Input: Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. ### Response: def batch_get_documents( self, database, documents, mask=None, transaction=None, new_transaction=None, read_time=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "batch_get_documents" not in self._inner_api_calls: self._inner_api_calls[ "batch_get_documents" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_get_documents, default_retry=self._method_configs["BatchGetDocuments"].retry, default_timeout=self._method_configs["BatchGetDocuments"].timeout, client_info=self._client_info, ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) request = firestore_pb2.BatchGetDocumentsRequest( database=database, documents=documents, mask=mask, transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["batch_get_documents"]( request, retry=retry, timeout=timeout, metadata=metadata )
def status_search(self, status): """Searches for jobs matching the given ``status``.""" json = self._fetch_json() jobs = json['response'] for job in jobs: job_info = jobs[job] if job_info['status'].lower() == status.lower(): yield self._build_results(jobs, job)
Searches for jobs matching the given ``status``.
Below is the the instruction that describes the task: ### Input: Searches for jobs matching the given ``status``. ### Response: def status_search(self, status): """Searches for jobs matching the given ``status``.""" json = self._fetch_json() jobs = json['response'] for job in jobs: job_info = jobs[job] if job_info['status'].lower() == status.lower(): yield self._build_results(jobs, job)
def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found.
Below is the the instruction that describes the task: ### Input: Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. ### Response: def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
async def delete_pairwise(self, their_did: str) -> None: """ Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove """ LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did) if not ok_did(their_did): LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did) raise BadIdentifier('Bad DID {}'.format(their_did)) await self.delete_non_secret(TYPE_PAIRWISE, their_did) LOGGER.debug('Wallet.delete_pairwise <<<')
Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove
Below is the the instruction that describes the task: ### Input: Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove ### Response: async def delete_pairwise(self, their_did: str) -> None: """ Remove a pairwise DID record by its remote DID. Silently return if no such record is present. Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID. :param their_did: remote DID marking pairwise DID to remove """ LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did) if not ok_did(their_did): LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did) raise BadIdentifier('Bad DID {}'.format(their_did)) await self.delete_non_secret(TYPE_PAIRWISE, their_did) LOGGER.debug('Wallet.delete_pairwise <<<')
def solve(self): """Solve rpn expression, return None if not valid.""" popflag = True self.tmpopslist = [] while True: while self.opslist and popflag: op = self.opslist.pop() if self.is_variable(op): op = self.variables.get(op) if self.is_operator(op): popflag = False break self.tmpopslist.append(op) # operations tmpr = self._get_temp_result(op) if tmpr == 'ERROR': return None if tmpr is not None: self.opslist.append('{r:.20f}'.format(r=tmpr)) if len(self.tmpopslist) > 0 or len(self.opslist) > 1: popflag = True else: break return float(self.opslist[0])
Solve rpn expression, return None if not valid.
Below is the the instruction that describes the task: ### Input: Solve rpn expression, return None if not valid. ### Response: def solve(self): """Solve rpn expression, return None if not valid.""" popflag = True self.tmpopslist = [] while True: while self.opslist and popflag: op = self.opslist.pop() if self.is_variable(op): op = self.variables.get(op) if self.is_operator(op): popflag = False break self.tmpopslist.append(op) # operations tmpr = self._get_temp_result(op) if tmpr == 'ERROR': return None if tmpr is not None: self.opslist.append('{r:.20f}'.format(r=tmpr)) if len(self.tmpopslist) > 0 or len(self.opslist) > 1: popflag = True else: break return float(self.opslist[0])
def _ExtractPathSpecsFromFile(self, file_entry): """Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file. """ produced_main_path_spec = False for data_stream in file_entry.data_streams: # Make a copy so we don't make the changes on a path specification # directly. Otherwise already produced path specifications can be # altered in the process. path_spec = copy.deepcopy(file_entry.path_spec) if data_stream.name: setattr(path_spec, 'data_stream', data_stream.name) yield path_spec if not data_stream.name: produced_main_path_spec = True if not produced_main_path_spec: yield file_entry.path_spec
Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file.
Below is the the instruction that describes the task: ### Input: Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file. ### Response: def _ExtractPathSpecsFromFile(self, file_entry): """Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file. """ produced_main_path_spec = False for data_stream in file_entry.data_streams: # Make a copy so we don't make the changes on a path specification # directly. Otherwise already produced path specifications can be # altered in the process. path_spec = copy.deepcopy(file_entry.path_spec) if data_stream.name: setattr(path_spec, 'data_stream', data_stream.name) yield path_spec if not data_stream.name: produced_main_path_spec = True if not produced_main_path_spec: yield file_entry.path_spec
def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None, newfname=None, ensure=False, prefix=None, suffix=None): """ augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak """ if prefix is not None: augpref = prefix if suffix is not None: augsuf = suffix # Breakup path dpath, fname = split(path) fname_noext, ext = splitext(fname) if newfname is not None: fname_noext = newfname # Augment ext if newext is None: newext = ext # Augment fname new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext)) # Augment dpath if augdir is not None: new_dpath = join(dpath, augdir) if ensure: # create new dir if needebe ensuredir(new_dpath) else: new_dpath = dpath # Recombine into new path newpath = join(new_dpath, new_fname) return newpath
augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak
Below is the the instruction that describes the task: ### Input: augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak ### Response: def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None, newfname=None, ensure=False, prefix=None, suffix=None): """ augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak """ if prefix is not None: augpref = prefix if suffix is not None: augsuf = suffix # Breakup path dpath, fname = split(path) fname_noext, ext = splitext(fname) if newfname is not None: fname_noext = newfname # Augment ext if newext is None: newext = ext # Augment fname new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext)) # Augment dpath if augdir is not None: new_dpath = join(dpath, augdir) if ensure: # create new dir if needebe ensuredir(new_dpath) else: new_dpath = dpath # Recombine into new path newpath = join(new_dpath, new_fname) return newpath
def get(self, singleSnapshot=False): """ *geneate the pyephem positions* **Key Arguments:** - ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing) **Return:** - ``None`` """ self.log.info('starting the ``get`` method') global xephemOE global tileSide global magLimit # GRAB PARAMETERS FROM SETTINGS FILE tileSide = float(self.settings["pyephem"]["atlas exposure match side"]) magLimit = float(self.settings["pyephem"]["magnitude limit"]) snapshotsRequired = 1 while snapshotsRequired > 0: nextMjds, exposures, snapshotsRequired = self._get_exposures_requiring_pyephem_positions( concurrentSnapshots=int(self.settings["pyephem"]["batch size"])) print "There are currently %(snapshotsRequired)s more pyephem snapshots required " % locals() if snapshotsRequired == 0: return if len(xephemOE) == 0: xephemOE = self._get_xephem_orbital_elements() # DEFINE AN INPUT ARRAY magLimit = self.settings["pyephem"]["magnitude limit"] pyephemDB = fmultiprocess(log=self.log, function=_generate_pyephem_snapshot, timeout=300, inputArray=nextMjds, magLimit=magLimit) matchedObjects = [] for p, e, m in zip(pyephemDB, exposures, nextMjds): matchedObjects.append( self._match_pyephem_snapshot_to_atlas_exposures(p, e, m)) self._add_matched_objects_to_database(matchedObjects) self._update_database_flag(exposures) if singleSnapshot: snapshotsRequired = 0 self.log.info('completed the ``get`` method') return None
*geneate the pyephem positions* **Key Arguments:** - ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing) **Return:** - ``None``
Below is the the instruction that describes the task: ### Input: *geneate the pyephem positions* **Key Arguments:** - ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing) **Return:** - ``None`` ### Response: def get(self, singleSnapshot=False): """ *geneate the pyephem positions* **Key Arguments:** - ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing) **Return:** - ``None`` """ self.log.info('starting the ``get`` method') global xephemOE global tileSide global magLimit # GRAB PARAMETERS FROM SETTINGS FILE tileSide = float(self.settings["pyephem"]["atlas exposure match side"]) magLimit = float(self.settings["pyephem"]["magnitude limit"]) snapshotsRequired = 1 while snapshotsRequired > 0: nextMjds, exposures, snapshotsRequired = self._get_exposures_requiring_pyephem_positions( concurrentSnapshots=int(self.settings["pyephem"]["batch size"])) print "There are currently %(snapshotsRequired)s more pyephem snapshots required " % locals() if snapshotsRequired == 0: return if len(xephemOE) == 0: xephemOE = self._get_xephem_orbital_elements() # DEFINE AN INPUT ARRAY magLimit = self.settings["pyephem"]["magnitude limit"] pyephemDB = fmultiprocess(log=self.log, function=_generate_pyephem_snapshot, timeout=300, inputArray=nextMjds, magLimit=magLimit) matchedObjects = [] for p, e, m in zip(pyephemDB, exposures, nextMjds): matchedObjects.append( self._match_pyephem_snapshot_to_atlas_exposures(p, e, m)) self._add_matched_objects_to_database(matchedObjects) self._update_database_flag(exposures) if singleSnapshot: snapshotsRequired = 0 self.log.info('completed the ``get`` method') return None
def parse_args(self): """Parse command line arguments.""" args = self.init_args().parse_args() # Load the configuration file, if it exists self.config = Config(args.conf_file) # Debug mode if args.debug: from logging import DEBUG logger.setLevel(DEBUG) else: from warnings import simplefilter simplefilter("ignore") # Plugins disable/enable if args.disable_plugin is not None: for p in args.disable_plugin.split(','): disable(args, p) else: # Allow users to disable plugins from the glances.conf (issue #1378) for s in self.config.sections(): if self.config.has_section(s) \ and (self.config.get_bool_value(s, 'disable', False)): disable(args, s) logger.debug('{} disabled by the configuration file'.format(s)) # Exporters activation if args.export is not None: for p in args.export.split(','): setattr(args, 'export_' + p, True) # Client/server Port if args.port is None: if args.webserver: args.port = self.web_server_port else: args.port = self.server_port # Port in the -c URI #996 if args.client is not None: args.client, args.port = (x if x else y for (x, y) in zip(args.client.partition(':')[::2], (args.client, args.port))) # Autodiscover if args.disable_autodiscover: logger.info("Auto discover mode is disabled") # By default Windows is started in Web mode if WINDOWS: args.webserver = True # In web server mode, default refresh time: 5 sec if args.webserver: args.time = 5 args.process_short_name = True # Server or client login/password if args.username_prompt: # Every username needs a password args.password_prompt = True # Prompt username if args.server: args.username = self.__get_username( description='Define the Glances server username: ') elif args.webserver: args.username = self.__get_username( description='Define the Glances webserver username: ') elif args.client: args.username = self.__get_username( description='Enter the Glances server username: ') else: if args.username_used: # A username has been set using the -u option ? args.username = args.username_used else: # Default user name is 'glances' args.username = self.username if args.password_prompt or args.username_used: # Interactive or file password if args.server: args.password = self.__get_password( description='Define the Glances server password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.webserver: args.password = self.__get_password( description='Define the Glances webserver password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.client: args.password = self.__get_password( description='Enter the Glances server password ({} username): '.format( args.username), clear=True, username=args.username) else: # Default is no password args.password = self.password # By default help is hidden args.help_tag = False # Display Rx and Tx, not the sum for the network args.network_sum = False args.network_cumul = False # Manage light mode if args.enable_light: logger.info("Light mode is on") args.disable_left_sidebar = True disable(args, 'process') disable(args, 'alert') disable(args, 'amps') disable(args, 'docker') # Manage full quicklook option if args.full_quicklook: logger.info("Full quicklook mode") enable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') enable(args, 'load') # Manage disable_top option if args.disable_top: logger.info("Disable top menu") disable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') disable(args, 'load') # Init the generate_graph tag # Should be set to True to generate graphs args.generate_graph = False # Control parameter and exit if it is not OK self.args = args # Export is only available in standalone or client mode (issue #614) export_tag = self.args.export is not None and any(self.args.export) if WINDOWS and export_tag: # On Windows, export is possible but only in quiet mode # See issue #1038 logger.info("On Windows OS, export disable the Web interface") self.args.quiet = True self.args.webserver = False elif not (self.is_standalone() or self.is_client()) and export_tag: logger.critical("Export is only available in standalone or client mode") sys.exit(2) # Filter is only available in standalone mode if args.process_filter is not None and not self.is_standalone(): logger.critical( "Process filter is only available in standalone mode") sys.exit(2) # Disable HDDTemp if sensors are disabled if getattr(args, 'disable_sensors', False): disable(args, 'hddtemp') logger.debug("Sensors and HDDTemp are disabled") return args
Parse command line arguments.
Below is the the instruction that describes the task: ### Input: Parse command line arguments. ### Response: def parse_args(self): """Parse command line arguments.""" args = self.init_args().parse_args() # Load the configuration file, if it exists self.config = Config(args.conf_file) # Debug mode if args.debug: from logging import DEBUG logger.setLevel(DEBUG) else: from warnings import simplefilter simplefilter("ignore") # Plugins disable/enable if args.disable_plugin is not None: for p in args.disable_plugin.split(','): disable(args, p) else: # Allow users to disable plugins from the glances.conf (issue #1378) for s in self.config.sections(): if self.config.has_section(s) \ and (self.config.get_bool_value(s, 'disable', False)): disable(args, s) logger.debug('{} disabled by the configuration file'.format(s)) # Exporters activation if args.export is not None: for p in args.export.split(','): setattr(args, 'export_' + p, True) # Client/server Port if args.port is None: if args.webserver: args.port = self.web_server_port else: args.port = self.server_port # Port in the -c URI #996 if args.client is not None: args.client, args.port = (x if x else y for (x, y) in zip(args.client.partition(':')[::2], (args.client, args.port))) # Autodiscover if args.disable_autodiscover: logger.info("Auto discover mode is disabled") # By default Windows is started in Web mode if WINDOWS: args.webserver = True # In web server mode, default refresh time: 5 sec if args.webserver: args.time = 5 args.process_short_name = True # Server or client login/password if args.username_prompt: # Every username needs a password args.password_prompt = True # Prompt username if args.server: args.username = self.__get_username( description='Define the Glances server username: ') elif args.webserver: args.username = self.__get_username( description='Define the Glances webserver username: ') elif args.client: args.username = self.__get_username( description='Enter the Glances server username: ') else: if args.username_used: # A username has been set using the -u option ? args.username = args.username_used else: # Default user name is 'glances' args.username = self.username if args.password_prompt or args.username_used: # Interactive or file password if args.server: args.password = self.__get_password( description='Define the Glances server password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.webserver: args.password = self.__get_password( description='Define the Glances webserver password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.client: args.password = self.__get_password( description='Enter the Glances server password ({} username): '.format( args.username), clear=True, username=args.username) else: # Default is no password args.password = self.password # By default help is hidden args.help_tag = False # Display Rx and Tx, not the sum for the network args.network_sum = False args.network_cumul = False # Manage light mode if args.enable_light: logger.info("Light mode is on") args.disable_left_sidebar = True disable(args, 'process') disable(args, 'alert') disable(args, 'amps') disable(args, 'docker') # Manage full quicklook option if args.full_quicklook: logger.info("Full quicklook mode") enable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') enable(args, 'load') # Manage disable_top option if args.disable_top: logger.info("Disable top menu") disable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') disable(args, 'load') # Init the generate_graph tag # Should be set to True to generate graphs args.generate_graph = False # Control parameter and exit if it is not OK self.args = args # Export is only available in standalone or client mode (issue #614) export_tag = self.args.export is not None and any(self.args.export) if WINDOWS and export_tag: # On Windows, export is possible but only in quiet mode # See issue #1038 logger.info("On Windows OS, export disable the Web interface") self.args.quiet = True self.args.webserver = False elif not (self.is_standalone() or self.is_client()) and export_tag: logger.critical("Export is only available in standalone or client mode") sys.exit(2) # Filter is only available in standalone mode if args.process_filter is not None and not self.is_standalone(): logger.critical( "Process filter is only available in standalone mode") sys.exit(2) # Disable HDDTemp if sensors are disabled if getattr(args, 'disable_sensors', False): disable(args, 'hddtemp') logger.debug("Sensors and HDDTemp are disabled") return args
def parse_json_qry(qry_str): """ Parses a json query string into its parts args: qry_str: query string params: variables passed into the string """ def param_analyzer(param_list): rtn_list = [] for param in param_list: parts = param.strip().split("=") try: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()](parts[1])) except IndexError: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()]()) return rtn_list def part_analyzer(part, idx): nonlocal dallor, asterick, question_mark if part == "$": dallor = idx return part elif part == "*": asterick = idx return part elif part == "?": question_mark = idx return part elif part.startswith("="): return part return cssparse(part)[0] # pdb.set_trace() main_parts = qry_str.split("|") or_parts = main_parts.pop(0).strip() params = param_analyzer(main_parts) rtn_list = [] for or_part in [item.strip() for item in or_parts.split(",") if item.strip()]: dallor, asterick, question_mark = None, None, None dot_parts = or_part.split(".") rtn_list.append(([part_analyzer(part, i) \ for i, part in enumerate(dot_parts)], dallor, asterick, question_mark)) return {"qry_parts": rtn_list, "params": params}
Parses a json query string into its parts args: qry_str: query string params: variables passed into the string
Below is the the instruction that describes the task: ### Input: Parses a json query string into its parts args: qry_str: query string params: variables passed into the string ### Response: def parse_json_qry(qry_str): """ Parses a json query string into its parts args: qry_str: query string params: variables passed into the string """ def param_analyzer(param_list): rtn_list = [] for param in param_list: parts = param.strip().split("=") try: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()](parts[1])) except IndexError: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()]()) return rtn_list def part_analyzer(part, idx): nonlocal dallor, asterick, question_mark if part == "$": dallor = idx return part elif part == "*": asterick = idx return part elif part == "?": question_mark = idx return part elif part.startswith("="): return part return cssparse(part)[0] # pdb.set_trace() main_parts = qry_str.split("|") or_parts = main_parts.pop(0).strip() params = param_analyzer(main_parts) rtn_list = [] for or_part in [item.strip() for item in or_parts.split(",") if item.strip()]: dallor, asterick, question_mark = None, None, None dot_parts = or_part.split(".") rtn_list.append(([part_analyzer(part, i) \ for i, part in enumerate(dot_parts)], dallor, asterick, question_mark)) return {"qry_parts": rtn_list, "params": params}
def _from_string(cls, string): """Create an Actor from a string. :param string: is the string, which is expected to be in regular git format John Doe <jdoe@example.com> :return: Actor """ m = cls.name_email_regex.search(string) if m: name, email = m.groups() return Actor(name, email) else: m = cls.name_only_regex.search(string) if m: return Actor(m.group(1), None) else: # assume best and use the whole string as name return Actor(string, None)
Create an Actor from a string. :param string: is the string, which is expected to be in regular git format John Doe <jdoe@example.com> :return: Actor
Below is the the instruction that describes the task: ### Input: Create an Actor from a string. :param string: is the string, which is expected to be in regular git format John Doe <jdoe@example.com> :return: Actor ### Response: def _from_string(cls, string): """Create an Actor from a string. :param string: is the string, which is expected to be in regular git format John Doe <jdoe@example.com> :return: Actor """ m = cls.name_email_regex.search(string) if m: name, email = m.groups() return Actor(name, email) else: m = cls.name_only_regex.search(string) if m: return Actor(m.group(1), None) else: # assume best and use the whole string as name return Actor(string, None)
def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True
Parse the updates line
Below is the the instruction that describes the task: ### Input: Parse the updates line ### Response: def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True
def from_shortcode(cls, context: InstaloaderContext, shortcode: str): """Create a post object from a given shortcode""" # pylint:disable=protected-access post = cls(context, {'shortcode': shortcode}) post._node = post._full_metadata return post
Create a post object from a given shortcode
Below is the the instruction that describes the task: ### Input: Create a post object from a given shortcode ### Response: def from_shortcode(cls, context: InstaloaderContext, shortcode: str): """Create a post object from a given shortcode""" # pylint:disable=protected-access post = cls(context, {'shortcode': shortcode}) post._node = post._full_metadata return post
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
Process tasks in queue.
Below is the the instruction that describes the task: ### Input: Process tasks in queue. ### Response: def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False): """Write an integer to one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int or float): The value to store in the slave register (might be scaled before sending). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 6 or 16. * signed (bool): Whether the data should be interpreted as unsigned or signed. To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally. This will multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. For discussion on negative values, the range and on alternative names, see :meth:`.read_register`. Use the parameter ``signed=True`` if writing to a register that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). Returns: None Raises: ValueError, TypeError, IOError """ _checkFunctioncode(functioncode, [6, 16]) _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals') _checkBool(signed, description='signed') _checkNumerical(value, description='input value') self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)
Write an integer to one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int or float): The value to store in the slave register (might be scaled before sending). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 6 or 16. * signed (bool): Whether the data should be interpreted as unsigned or signed. To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally. This will multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. For discussion on negative values, the range and on alternative names, see :meth:`.read_register`. Use the parameter ``signed=True`` if writing to a register that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). Returns: None Raises: ValueError, TypeError, IOError
Below is the the instruction that describes the task: ### Input: Write an integer to one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int or float): The value to store in the slave register (might be scaled before sending). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 6 or 16. * signed (bool): Whether the data should be interpreted as unsigned or signed. To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally. This will multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. For discussion on negative values, the range and on alternative names, see :meth:`.read_register`. Use the parameter ``signed=True`` if writing to a register that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). Returns: None Raises: ValueError, TypeError, IOError ### Response: def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False): """Write an integer to one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int or float): The value to store in the slave register (might be scaled before sending). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 6 or 16. * signed (bool): Whether the data should be interpreted as unsigned or signed. To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally. This will multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. For discussion on negative values, the range and on alternative names, see :meth:`.read_register`. Use the parameter ``signed=True`` if writing to a register that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). Returns: None Raises: ValueError, TypeError, IOError """ _checkFunctioncode(functioncode, [6, 16]) _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals') _checkBool(signed, description='signed') _checkNumerical(value, description='input value') self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)
def add(self, command): # type: (BaseCommand) -> Application """ Adds a command object. """ self.add_command(command.config) command.set_application(self) return self
Adds a command object.
Below is the the instruction that describes the task: ### Input: Adds a command object. ### Response: def add(self, command): # type: (BaseCommand) -> Application """ Adds a command object. """ self.add_command(command.config) command.set_application(self) return self
def to_dataframe(self, start_row=0, max_rows=None): """ Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data. """ fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows, page_size=self._MAX_PAGE_SIZE) count = 0 page_token = None # Collect results of page fetcher in separate dataframe objects, then # concatenate them to reduce the amount of copying df_list = [] df = None while True: page_rows, page_token = fetcher(page_token, count) if len(page_rows): count += len(page_rows) df_list.append(pandas.DataFrame.from_records(page_rows)) if not page_token: break if df_list: df = pandas.concat(df_list, ignore_index=True, copy=False) # Need to reorder the dataframe to preserve column ordering ordered_fields = [field.name for field in self.schema] return df[ordered_fields] if df is not None else pandas.DataFrame()
Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data.
Below is the the instruction that describes the task: ### Input: Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data. ### Response: def to_dataframe(self, start_row=0, max_rows=None): """ Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data. """ fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows, page_size=self._MAX_PAGE_SIZE) count = 0 page_token = None # Collect results of page fetcher in separate dataframe objects, then # concatenate them to reduce the amount of copying df_list = [] df = None while True: page_rows, page_token = fetcher(page_token, count) if len(page_rows): count += len(page_rows) df_list.append(pandas.DataFrame.from_records(page_rows)) if not page_token: break if df_list: df = pandas.concat(df_list, ignore_index=True, copy=False) # Need to reorder the dataframe to preserve column ordering ordered_fields = [field.name for field in self.schema] return df[ordered_fields] if df is not None else pandas.DataFrame()
def register_bootstrap_options(cls, register): """Register bootstrap options. "Bootstrap options" are a small set of options whose values are useful when registering other options. Therefore we must bootstrap them early, before other options are registered, let alone parsed. Bootstrap option values can be interpolated into the config file, and can be referenced programatically in registration code, e.g., as register.bootstrap.pants_workdir. Note that regular code can also access these options as normal global-scope options. Their status as "bootstrap options" is only pertinent during option registration. """ buildroot = get_buildroot() default_distdir_name = 'dist' default_distdir = os.path.join(buildroot, default_distdir_name) default_rel_distdir = '/{}/'.format(default_distdir_name) register('-l', '--level', choices=['trace', 'debug', 'info', 'warn'], default='info', recursive=True, help='Set the logging level.') register('-q', '--quiet', type=bool, recursive=True, daemon=False, help='Squelches most console output. NOTE: Some tasks default to behaving quietly: ' 'inverting this option supports making them noisier than they would be otherwise.') register('--log-show-rust-3rdparty', type=bool, default=False, advanced=True, help='Whether to show/hide logging done by 3rdparty rust crates used by the pants ' 'engine.') # Not really needed in bootstrap options, but putting it here means it displays right # after -l and -q in help output, which is conveniently contextual. register('--colors', type=bool, default=sys.stdout.isatty(), recursive=True, daemon=False, help='Set whether log messages are displayed in color.') # TODO(#7203): make a regexp option type! register('--ignore-pants-warnings', type=list, member_type=str, default=[], help='Regexps matching warning strings to ignore, e.g. ' '["DEPRECATED: scope some_scope will be removed"]. The regexps will be matched ' 'from the start of the warning string, and will always be case-insensitive. ' 'See the `warnings` module documentation for more background on these are used.') register('--pants-version', advanced=True, default=pants_version(), help='Use this pants version. Note Pants code only uses this to verify that you are ' 'using the requested version, as Pants cannot dynamically change the version it ' 'is using once the program is already running. This option is useful to set in ' 'your pants.ini, however, and then you can grep the value to select which ' 'version to use for setup scripts (e.g. `./pants`), runner scripts, IDE plugins, ' 'etc. For example, the setup script we distribute at https://www.pantsbuild.org/install.html#recommended-installation ' 'uses this value to determine which Python version to run with. You may find the ' 'version of the pants instance you are running using -v, -V, or --version.') register('--pants-runtime-python-version', advanced=True, removal_version='1.19.0.dev0', deprecation_start_version='1.17.0.dev0', removal_hint=dedent(""" This option was only used to help with Pants' migration to run on Python 3. \ Pants will now correctly default to whichever Python versions are supported for \ the current `pants_version` you are using. Please make sure you are using the \ most up-to-date version of the `./pants` script with: curl -L -O https://pantsbuild.github.io/setup/pants and then unset this option."""), help='Use this Python version to run Pants. The option expects the major and minor ' 'version, e.g. 2.7 or 3.6. Note Pants code only uses this to verify that you are ' 'using the requested interpreter, as Pants cannot dynamically change the ' 'interpreter it is using once the program is already running. This option is ' 'useful to set in your pants.ini, however, and then you can grep the value to ' 'select which interpreter to use for setup scripts (e.g. `./pants`), runner ' 'scripts, IDE plugins, etc. For example, the setup script we distribute at ' 'https://www.pantsbuild.org/install.html#recommended-installation uses this ' 'value to determine which Python version to run with. Also note this does not mean ' 'your own code must use this Python version. See ' 'https://www.pantsbuild.org/python_readme.html#configure-the-python-version ' 'for how to configure your code\'s compatibility.') register('--plugins', advanced=True, type=list, help='Load these plugins.') register('--plugin-cache-dir', advanced=True, default=os.path.join(get_pants_cachedir(), 'plugins'), help='Cache resolved plugin requirements here.') register('--backend-packages', advanced=True, type=list, default=['pants.backend.graph_info', 'pants.backend.python', 'pants.backend.jvm', 'pants.backend.native', # TODO: Move into the graph_info backend. 'pants.rules.core', 'pants.backend.codegen.antlr.java', 'pants.backend.codegen.antlr.python', 'pants.backend.codegen.jaxb', 'pants.backend.codegen.protobuf.java', 'pants.backend.codegen.ragel.java', 'pants.backend.codegen.thrift.java', 'pants.backend.codegen.thrift.python', 'pants.backend.codegen.grpcio.python', 'pants.backend.codegen.wire.java', 'pants.backend.project_info'], help='Load backends from these packages that are already on the path. ' 'Add contrib and custom backends to this list.') register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(), help='Use this dir for global cache.') register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(), help='Use this dir for global config files.') register('--pants-workdir', advanced=True, metavar='<dir>', default=os.path.join(buildroot, '.pants.d'), help='Write intermediate output files to this dir.') register('--pants-supportdir', advanced=True, metavar='<dir>', default=os.path.join(buildroot, 'build-support'), help='Use support files from this dir.') register('--pants-distdir', advanced=True, metavar='<dir>', default=default_distdir, help='Write end-product artifacts to this dir. If you modify this path, you ' 'should also update --build-ignore and --pants-ignore to include the ' 'custom dist dir path as well.') register('--pants-subprocessdir', advanced=True, default=os.path.join(buildroot, '.pids'), help='The directory to use for tracking subprocess metadata, if any. This should ' 'live outside of the dir used by `--pants-workdir` to allow for tracking ' 'subprocesses that outlive the workdir data (e.g. `./pants server`).') register('--pants-config-files', advanced=True, type=list, daemon=False, default=[get_default_pants_config_file()], help='Paths to Pants config files.') # TODO: Deprecate the --pantsrc/--pantsrc-files options? This would require being able # to set extra config file locations in an initial bootstrap config file. register('--pantsrc', advanced=True, type=bool, default=True, help='Use pantsrc files.') register('--pantsrc-files', advanced=True, type=list, metavar='<path>', daemon=False, default=['/etc/pantsrc', '~/.pants.rc'], help='Override config with values from these files. ' 'Later files override earlier ones.') register('--pythonpath', advanced=True, type=list, help='Add these directories to PYTHONPATH to search for plugins.') register('--target-spec-file', type=list, dest='target_spec_files', daemon=False, help='Read additional specs from this file, one per line') register('--verify-config', type=bool, default=True, daemon=False, advanced=True, help='Verify that all config file values correspond to known options.') register('--build-ignore', advanced=True, type=list, default=['.*/', default_rel_distdir, 'bower_components/', 'node_modules/', '*.egg-info/'], help='Paths to ignore when identifying BUILD files. ' 'This does not affect any other filesystem operations. ' 'Patterns use the gitignore pattern syntax (https://git-scm.com/docs/gitignore).') register('--pants-ignore', advanced=True, type=list, default=['.*/', default_rel_distdir], help='Paths to ignore for all filesystem operations performed by pants ' '(e.g. BUILD file scanning, glob matching, etc). ' 'Patterns use the gitignore syntax (https://git-scm.com/docs/gitignore).') register('--glob-expansion-failure', advanced=True, default=GlobMatchErrorBehavior.warn, type=GlobMatchErrorBehavior, help="Raise an exception if any targets declaring source files " "fail to match any glob provided in the 'sources' argument.") # TODO(#7203): make a regexp option type! register('--exclude-target-regexp', advanced=True, type=list, default=[], daemon=False, metavar='<regexp>', help='Exclude target roots that match these regexes.') register('--subproject-roots', type=list, advanced=True, default=[], help='Paths that correspond with build roots for any subproject that this ' 'project depends on.') register('--owner-of', type=list, member_type=file_option, default=[], daemon=False, metavar='<path>', help='Select the targets that own these files. ' 'This is the third target calculation strategy along with the --changed-* ' 'options and specifying the targets directly. These three types of target ' 'selection are mutually exclusive.') # These logging options are registered in the bootstrap phase so that plugins can log during # registration and not so that their values can be interpolated in configs. register('-d', '--logdir', advanced=True, metavar='<dir>', help='Write logs to files under this directory.') # This facilitates bootstrap-time configuration of pantsd usage such that we can # determine whether or not to use the Pailgun client to invoke a given pants run # without resorting to heavier options parsing. register('--enable-pantsd', advanced=True, type=bool, default=False, help='Enables use of the pants daemon (and implicitly, the v2 engine). (Beta)') # Shutdown pantsd after the current run. # This needs to be accessed at the same time as enable_pantsd, # so we register it at bootstrap time. register('--shutdown-pantsd-after-run', advanced=True, type=bool, default=False, help='Create a new pantsd server, and use it, and shut it down immediately after. ' 'If pantsd is already running, it will shut it down and spawn a new instance (Beta)') # These facilitate configuring the native engine. register('--native-engine-visualize-to', advanced=True, default=None, type=dir_option, daemon=False, help='A directory to write execution and rule graphs to as `dot` files. The contents ' 'of the directory will be overwritten if any filenames collide.') register('--print-exception-stacktrace', advanced=True, type=bool, help='Print to console the full exception stack trace if encountered.') # BinaryUtil options. register('--binaries-baseurls', type=list, advanced=True, default=['https://binaries.pantsbuild.org'], help='List of URLs from which binary tools are downloaded. URLs are ' 'searched in order until the requested path is found.') register('--binaries-fetch-timeout-secs', type=int, default=30, advanced=True, daemon=False, help='Timeout in seconds for URL reads when fetching binary tools from the ' 'repos specified by --baseurls.') register('--binaries-path-by-id', type=dict, advanced=True, help=("Maps output of uname for a machine to a binary search path: " "(sysname, id) -> (os, arch), e.g. {('darwin', '15'): ('mac', '10.11'), " "('linux', 'arm32'): ('linux', 'arm32')}.")) register('--allow-external-binary-tool-downloads', type=bool, default=True, advanced=True, help="If False, require BinaryTool subclasses to download their contents from urls " "generated from --binaries-baseurls, even if the tool has an external url " "generator. This can be necessary if using Pants in an environment which cannot " "contact the wider Internet.") # Pants Daemon options. register('--pantsd-pailgun-host', advanced=True, default='127.0.0.1', help='The host to bind the pants nailgun server to.') register('--pantsd-pailgun-port', advanced=True, type=int, default=0, help='The port to bind the pants nailgun server to. Defaults to a random port.') # TODO(#7514): Make this default to 1.0 seconds if stdin is a tty! register('--pantsd-pailgun-quit-timeout', advanced=True, type=float, default=5.0, help='The length of time (in seconds) to wait for further output after sending a ' 'signal to the remote pantsd-runner process before killing it.') register('--pantsd-log-dir', advanced=True, default=None, help='The directory to log pantsd output to.') register('--pantsd-invalidation-globs', advanced=True, type=list, default=[], help='Filesystem events matching any of these globs will trigger a daemon restart.') # Watchman options. register('--watchman-version', advanced=True, default='4.9.0-pants1', help='Watchman version.') register('--watchman-supportdir', advanced=True, default='bin/watchman', help='Find watchman binaries under this dir. Used as part of the path to lookup ' 'the binary with --binaries-baseurls and --pants-bootstrapdir.') register('--watchman-startup-timeout', type=float, advanced=True, default=30.0, help='The watchman socket timeout (in seconds) for the initial `watch-project` command. ' 'This may need to be set higher for larger repos due to watchman startup cost.') register('--watchman-socket-timeout', type=float, advanced=True, default=0.1, help='The watchman client socket timeout in seconds. Setting this to too high a ' 'value can negatively impact the latency of runs forked by pantsd.') register('--watchman-socket-path', type=str, advanced=True, default=None, help='The path to the watchman UNIX socket. This can be overridden if the default ' 'absolute path length exceeds the maximum allowed by the OS.') # This option changes the parser behavior in a fundamental way (which currently invalidates # all caches), and needs to be parsed out early, so we make it a bootstrap option. register('--build-file-imports', choices=['allow', 'warn', 'error'], default='warn', advanced=True, help='Whether to allow import statements in BUILD files') register('--local-store-dir', advanced=True, help="Directory to use for engine's local file store.", # This default is also hard-coded into the engine's rust code in # fs::Store::default_path default=os.path.expanduser('~/.cache/pants/lmdb_store')) register('--remote-store-server', advanced=True, type=list, default=[], help='host:port of grpc server to use as remote execution file store.') register('--remote-store-thread-count', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_thread_count, help='Thread count to use for the pool that interacts with the remote file store.') register('--remote-execution-server', advanced=True, help='host:port of grpc server to use as remote execution scheduler.') register('--remote-store-chunk-bytes', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_chunk_bytes, help='Size in bytes of chunks transferred to/from the remote file store.') register('--remote-store-chunk-upload-timeout-seconds', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_chunk_upload_timeout_seconds, help='Timeout (in seconds) for uploads of individual chunks to the remote file store.') register('--remote-store-rpc-retries', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_rpc_retries, help='Number of times to retry any RPC to the remote store before giving up.') register('--remote-execution-process-cache-namespace', advanced=True, help="The cache namespace for remote process execution. " "Bump this to invalidate every artifact's remote execution. " "This is the remote execution equivalent of the legacy cache-key-gen-version " "flag.") register('--remote-instance-name', advanced=True, help='Name of the remote execution instance to use. Used for routing within ' '--remote-execution-server and --remote-store-server.') register('--remote-ca-certs-path', advanced=True, help='Path to a PEM file containing CA certificates used for verifying secure ' 'connections to --remote-execution-server and --remote-store-server. ' 'If not specified, TLS will not be used.') register('--remote-oauth-bearer-token-path', advanced=True, help='Path to a file containing an oauth token to use for grpc connections to ' '--remote-execution-server and --remote-store-server. If not specified, no ' 'authorization will be performed.') # This should eventually deprecate the RunTracker worker count, which is used for legacy cache # lookups via CacheSetup in TaskBase. register('--process-execution-parallelism', type=int, default=multiprocessing.cpu_count(), advanced=True, help='Number of concurrent processes that may be executed either locally and remotely.') register('--process-execution-cleanup-local-dirs', type=bool, default=True, advanced=True, help='Whether or not to cleanup directories used for local process execution ' '(primarily useful for e.g. debugging).')
Register bootstrap options. "Bootstrap options" are a small set of options whose values are useful when registering other options. Therefore we must bootstrap them early, before other options are registered, let alone parsed. Bootstrap option values can be interpolated into the config file, and can be referenced programatically in registration code, e.g., as register.bootstrap.pants_workdir. Note that regular code can also access these options as normal global-scope options. Their status as "bootstrap options" is only pertinent during option registration.
Below is the the instruction that describes the task: ### Input: Register bootstrap options. "Bootstrap options" are a small set of options whose values are useful when registering other options. Therefore we must bootstrap them early, before other options are registered, let alone parsed. Bootstrap option values can be interpolated into the config file, and can be referenced programatically in registration code, e.g., as register.bootstrap.pants_workdir. Note that regular code can also access these options as normal global-scope options. Their status as "bootstrap options" is only pertinent during option registration. ### Response: def register_bootstrap_options(cls, register): """Register bootstrap options. "Bootstrap options" are a small set of options whose values are useful when registering other options. Therefore we must bootstrap them early, before other options are registered, let alone parsed. Bootstrap option values can be interpolated into the config file, and can be referenced programatically in registration code, e.g., as register.bootstrap.pants_workdir. Note that regular code can also access these options as normal global-scope options. Their status as "bootstrap options" is only pertinent during option registration. """ buildroot = get_buildroot() default_distdir_name = 'dist' default_distdir = os.path.join(buildroot, default_distdir_name) default_rel_distdir = '/{}/'.format(default_distdir_name) register('-l', '--level', choices=['trace', 'debug', 'info', 'warn'], default='info', recursive=True, help='Set the logging level.') register('-q', '--quiet', type=bool, recursive=True, daemon=False, help='Squelches most console output. NOTE: Some tasks default to behaving quietly: ' 'inverting this option supports making them noisier than they would be otherwise.') register('--log-show-rust-3rdparty', type=bool, default=False, advanced=True, help='Whether to show/hide logging done by 3rdparty rust crates used by the pants ' 'engine.') # Not really needed in bootstrap options, but putting it here means it displays right # after -l and -q in help output, which is conveniently contextual. register('--colors', type=bool, default=sys.stdout.isatty(), recursive=True, daemon=False, help='Set whether log messages are displayed in color.') # TODO(#7203): make a regexp option type! register('--ignore-pants-warnings', type=list, member_type=str, default=[], help='Regexps matching warning strings to ignore, e.g. ' '["DEPRECATED: scope some_scope will be removed"]. The regexps will be matched ' 'from the start of the warning string, and will always be case-insensitive. ' 'See the `warnings` module documentation for more background on these are used.') register('--pants-version', advanced=True, default=pants_version(), help='Use this pants version. Note Pants code only uses this to verify that you are ' 'using the requested version, as Pants cannot dynamically change the version it ' 'is using once the program is already running. This option is useful to set in ' 'your pants.ini, however, and then you can grep the value to select which ' 'version to use for setup scripts (e.g. `./pants`), runner scripts, IDE plugins, ' 'etc. For example, the setup script we distribute at https://www.pantsbuild.org/install.html#recommended-installation ' 'uses this value to determine which Python version to run with. You may find the ' 'version of the pants instance you are running using -v, -V, or --version.') register('--pants-runtime-python-version', advanced=True, removal_version='1.19.0.dev0', deprecation_start_version='1.17.0.dev0', removal_hint=dedent(""" This option was only used to help with Pants' migration to run on Python 3. \ Pants will now correctly default to whichever Python versions are supported for \ the current `pants_version` you are using. Please make sure you are using the \ most up-to-date version of the `./pants` script with: curl -L -O https://pantsbuild.github.io/setup/pants and then unset this option."""), help='Use this Python version to run Pants. The option expects the major and minor ' 'version, e.g. 2.7 or 3.6. Note Pants code only uses this to verify that you are ' 'using the requested interpreter, as Pants cannot dynamically change the ' 'interpreter it is using once the program is already running. This option is ' 'useful to set in your pants.ini, however, and then you can grep the value to ' 'select which interpreter to use for setup scripts (e.g. `./pants`), runner ' 'scripts, IDE plugins, etc. For example, the setup script we distribute at ' 'https://www.pantsbuild.org/install.html#recommended-installation uses this ' 'value to determine which Python version to run with. Also note this does not mean ' 'your own code must use this Python version. See ' 'https://www.pantsbuild.org/python_readme.html#configure-the-python-version ' 'for how to configure your code\'s compatibility.') register('--plugins', advanced=True, type=list, help='Load these plugins.') register('--plugin-cache-dir', advanced=True, default=os.path.join(get_pants_cachedir(), 'plugins'), help='Cache resolved plugin requirements here.') register('--backend-packages', advanced=True, type=list, default=['pants.backend.graph_info', 'pants.backend.python', 'pants.backend.jvm', 'pants.backend.native', # TODO: Move into the graph_info backend. 'pants.rules.core', 'pants.backend.codegen.antlr.java', 'pants.backend.codegen.antlr.python', 'pants.backend.codegen.jaxb', 'pants.backend.codegen.protobuf.java', 'pants.backend.codegen.ragel.java', 'pants.backend.codegen.thrift.java', 'pants.backend.codegen.thrift.python', 'pants.backend.codegen.grpcio.python', 'pants.backend.codegen.wire.java', 'pants.backend.project_info'], help='Load backends from these packages that are already on the path. ' 'Add contrib and custom backends to this list.') register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(), help='Use this dir for global cache.') register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(), help='Use this dir for global config files.') register('--pants-workdir', advanced=True, metavar='<dir>', default=os.path.join(buildroot, '.pants.d'), help='Write intermediate output files to this dir.') register('--pants-supportdir', advanced=True, metavar='<dir>', default=os.path.join(buildroot, 'build-support'), help='Use support files from this dir.') register('--pants-distdir', advanced=True, metavar='<dir>', default=default_distdir, help='Write end-product artifacts to this dir. If you modify this path, you ' 'should also update --build-ignore and --pants-ignore to include the ' 'custom dist dir path as well.') register('--pants-subprocessdir', advanced=True, default=os.path.join(buildroot, '.pids'), help='The directory to use for tracking subprocess metadata, if any. This should ' 'live outside of the dir used by `--pants-workdir` to allow for tracking ' 'subprocesses that outlive the workdir data (e.g. `./pants server`).') register('--pants-config-files', advanced=True, type=list, daemon=False, default=[get_default_pants_config_file()], help='Paths to Pants config files.') # TODO: Deprecate the --pantsrc/--pantsrc-files options? This would require being able # to set extra config file locations in an initial bootstrap config file. register('--pantsrc', advanced=True, type=bool, default=True, help='Use pantsrc files.') register('--pantsrc-files', advanced=True, type=list, metavar='<path>', daemon=False, default=['/etc/pantsrc', '~/.pants.rc'], help='Override config with values from these files. ' 'Later files override earlier ones.') register('--pythonpath', advanced=True, type=list, help='Add these directories to PYTHONPATH to search for plugins.') register('--target-spec-file', type=list, dest='target_spec_files', daemon=False, help='Read additional specs from this file, one per line') register('--verify-config', type=bool, default=True, daemon=False, advanced=True, help='Verify that all config file values correspond to known options.') register('--build-ignore', advanced=True, type=list, default=['.*/', default_rel_distdir, 'bower_components/', 'node_modules/', '*.egg-info/'], help='Paths to ignore when identifying BUILD files. ' 'This does not affect any other filesystem operations. ' 'Patterns use the gitignore pattern syntax (https://git-scm.com/docs/gitignore).') register('--pants-ignore', advanced=True, type=list, default=['.*/', default_rel_distdir], help='Paths to ignore for all filesystem operations performed by pants ' '(e.g. BUILD file scanning, glob matching, etc). ' 'Patterns use the gitignore syntax (https://git-scm.com/docs/gitignore).') register('--glob-expansion-failure', advanced=True, default=GlobMatchErrorBehavior.warn, type=GlobMatchErrorBehavior, help="Raise an exception if any targets declaring source files " "fail to match any glob provided in the 'sources' argument.") # TODO(#7203): make a regexp option type! register('--exclude-target-regexp', advanced=True, type=list, default=[], daemon=False, metavar='<regexp>', help='Exclude target roots that match these regexes.') register('--subproject-roots', type=list, advanced=True, default=[], help='Paths that correspond with build roots for any subproject that this ' 'project depends on.') register('--owner-of', type=list, member_type=file_option, default=[], daemon=False, metavar='<path>', help='Select the targets that own these files. ' 'This is the third target calculation strategy along with the --changed-* ' 'options and specifying the targets directly. These three types of target ' 'selection are mutually exclusive.') # These logging options are registered in the bootstrap phase so that plugins can log during # registration and not so that their values can be interpolated in configs. register('-d', '--logdir', advanced=True, metavar='<dir>', help='Write logs to files under this directory.') # This facilitates bootstrap-time configuration of pantsd usage such that we can # determine whether or not to use the Pailgun client to invoke a given pants run # without resorting to heavier options parsing. register('--enable-pantsd', advanced=True, type=bool, default=False, help='Enables use of the pants daemon (and implicitly, the v2 engine). (Beta)') # Shutdown pantsd after the current run. # This needs to be accessed at the same time as enable_pantsd, # so we register it at bootstrap time. register('--shutdown-pantsd-after-run', advanced=True, type=bool, default=False, help='Create a new pantsd server, and use it, and shut it down immediately after. ' 'If pantsd is already running, it will shut it down and spawn a new instance (Beta)') # These facilitate configuring the native engine. register('--native-engine-visualize-to', advanced=True, default=None, type=dir_option, daemon=False, help='A directory to write execution and rule graphs to as `dot` files. The contents ' 'of the directory will be overwritten if any filenames collide.') register('--print-exception-stacktrace', advanced=True, type=bool, help='Print to console the full exception stack trace if encountered.') # BinaryUtil options. register('--binaries-baseurls', type=list, advanced=True, default=['https://binaries.pantsbuild.org'], help='List of URLs from which binary tools are downloaded. URLs are ' 'searched in order until the requested path is found.') register('--binaries-fetch-timeout-secs', type=int, default=30, advanced=True, daemon=False, help='Timeout in seconds for URL reads when fetching binary tools from the ' 'repos specified by --baseurls.') register('--binaries-path-by-id', type=dict, advanced=True, help=("Maps output of uname for a machine to a binary search path: " "(sysname, id) -> (os, arch), e.g. {('darwin', '15'): ('mac', '10.11'), " "('linux', 'arm32'): ('linux', 'arm32')}.")) register('--allow-external-binary-tool-downloads', type=bool, default=True, advanced=True, help="If False, require BinaryTool subclasses to download their contents from urls " "generated from --binaries-baseurls, even if the tool has an external url " "generator. This can be necessary if using Pants in an environment which cannot " "contact the wider Internet.") # Pants Daemon options. register('--pantsd-pailgun-host', advanced=True, default='127.0.0.1', help='The host to bind the pants nailgun server to.') register('--pantsd-pailgun-port', advanced=True, type=int, default=0, help='The port to bind the pants nailgun server to. Defaults to a random port.') # TODO(#7514): Make this default to 1.0 seconds if stdin is a tty! register('--pantsd-pailgun-quit-timeout', advanced=True, type=float, default=5.0, help='The length of time (in seconds) to wait for further output after sending a ' 'signal to the remote pantsd-runner process before killing it.') register('--pantsd-log-dir', advanced=True, default=None, help='The directory to log pantsd output to.') register('--pantsd-invalidation-globs', advanced=True, type=list, default=[], help='Filesystem events matching any of these globs will trigger a daemon restart.') # Watchman options. register('--watchman-version', advanced=True, default='4.9.0-pants1', help='Watchman version.') register('--watchman-supportdir', advanced=True, default='bin/watchman', help='Find watchman binaries under this dir. Used as part of the path to lookup ' 'the binary with --binaries-baseurls and --pants-bootstrapdir.') register('--watchman-startup-timeout', type=float, advanced=True, default=30.0, help='The watchman socket timeout (in seconds) for the initial `watch-project` command. ' 'This may need to be set higher for larger repos due to watchman startup cost.') register('--watchman-socket-timeout', type=float, advanced=True, default=0.1, help='The watchman client socket timeout in seconds. Setting this to too high a ' 'value can negatively impact the latency of runs forked by pantsd.') register('--watchman-socket-path', type=str, advanced=True, default=None, help='The path to the watchman UNIX socket. This can be overridden if the default ' 'absolute path length exceeds the maximum allowed by the OS.') # This option changes the parser behavior in a fundamental way (which currently invalidates # all caches), and needs to be parsed out early, so we make it a bootstrap option. register('--build-file-imports', choices=['allow', 'warn', 'error'], default='warn', advanced=True, help='Whether to allow import statements in BUILD files') register('--local-store-dir', advanced=True, help="Directory to use for engine's local file store.", # This default is also hard-coded into the engine's rust code in # fs::Store::default_path default=os.path.expanduser('~/.cache/pants/lmdb_store')) register('--remote-store-server', advanced=True, type=list, default=[], help='host:port of grpc server to use as remote execution file store.') register('--remote-store-thread-count', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_thread_count, help='Thread count to use for the pool that interacts with the remote file store.') register('--remote-execution-server', advanced=True, help='host:port of grpc server to use as remote execution scheduler.') register('--remote-store-chunk-bytes', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_chunk_bytes, help='Size in bytes of chunks transferred to/from the remote file store.') register('--remote-store-chunk-upload-timeout-seconds', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_chunk_upload_timeout_seconds, help='Timeout (in seconds) for uploads of individual chunks to the remote file store.') register('--remote-store-rpc-retries', type=int, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.remote_store_rpc_retries, help='Number of times to retry any RPC to the remote store before giving up.') register('--remote-execution-process-cache-namespace', advanced=True, help="The cache namespace for remote process execution. " "Bump this to invalidate every artifact's remote execution. " "This is the remote execution equivalent of the legacy cache-key-gen-version " "flag.") register('--remote-instance-name', advanced=True, help='Name of the remote execution instance to use. Used for routing within ' '--remote-execution-server and --remote-store-server.') register('--remote-ca-certs-path', advanced=True, help='Path to a PEM file containing CA certificates used for verifying secure ' 'connections to --remote-execution-server and --remote-store-server. ' 'If not specified, TLS will not be used.') register('--remote-oauth-bearer-token-path', advanced=True, help='Path to a file containing an oauth token to use for grpc connections to ' '--remote-execution-server and --remote-store-server. If not specified, no ' 'authorization will be performed.') # This should eventually deprecate the RunTracker worker count, which is used for legacy cache # lookups via CacheSetup in TaskBase. register('--process-execution-parallelism', type=int, default=multiprocessing.cpu_count(), advanced=True, help='Number of concurrent processes that may be executed either locally and remotely.') register('--process-execution-cleanup-local-dirs', type=bool, default=True, advanced=True, help='Whether or not to cleanup directories used for local process execution ' '(primarily useful for e.g. debugging).')
def extract_function_metadata(wrapped, instance, args, kwargs, return_value): """Stash the `args` and `kwargs` into the metadata of the subsegment.""" LOGGER.debug( 'Extracting function call metadata', args=args, kwargs=kwargs, ) return { 'metadata': { 'args': args, 'kwargs': kwargs, }, }
Stash the `args` and `kwargs` into the metadata of the subsegment.
Below is the the instruction that describes the task: ### Input: Stash the `args` and `kwargs` into the metadata of the subsegment. ### Response: def extract_function_metadata(wrapped, instance, args, kwargs, return_value): """Stash the `args` and `kwargs` into the metadata of the subsegment.""" LOGGER.debug( 'Extracting function call metadata', args=args, kwargs=kwargs, ) return { 'metadata': { 'args': args, 'kwargs': kwargs, }, }
def is_member(self, m): """Check if a user is a member of the chatroom""" if not m: return False elif isinstance(m, basestring): jid = m else: jid = m['JID'] is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0 return is_member
Check if a user is a member of the chatroom
Below is the the instruction that describes the task: ### Input: Check if a user is a member of the chatroom ### Response: def is_member(self, m): """Check if a user is a member of the chatroom""" if not m: return False elif isinstance(m, basestring): jid = m else: jid = m['JID'] is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0 return is_member
def of_type(self, *kinds): """Selects documents if a field is of the correct type. Document.field.of_type() Document.field.of_type('string') Element operator: {$type: self.__foreign__} Documentation: https://docs.mongodb.org/manual/reference/operator/query/type/#op._S_type """ if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz). return reduce(self._combining, (q.of_type(*kinds) for q in self._field)) foreign = set(kinds) if kinds else self._field.__foreign__ if not foreign: return Filter() if len(foreign) == 1: # Simplify if the value is singular. foreign, = foreign # Unpack. return Filter({self._name: {'$type': foreign}})
Selects documents if a field is of the correct type. Document.field.of_type() Document.field.of_type('string') Element operator: {$type: self.__foreign__} Documentation: https://docs.mongodb.org/manual/reference/operator/query/type/#op._S_type
Below is the the instruction that describes the task: ### Input: Selects documents if a field is of the correct type. Document.field.of_type() Document.field.of_type('string') Element operator: {$type: self.__foreign__} Documentation: https://docs.mongodb.org/manual/reference/operator/query/type/#op._S_type ### Response: def of_type(self, *kinds): """Selects documents if a field is of the correct type. Document.field.of_type() Document.field.of_type('string') Element operator: {$type: self.__foreign__} Documentation: https://docs.mongodb.org/manual/reference/operator/query/type/#op._S_type """ if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz). return reduce(self._combining, (q.of_type(*kinds) for q in self._field)) foreign = set(kinds) if kinds else self._field.__foreign__ if not foreign: return Filter() if len(foreign) == 1: # Simplify if the value is singular. foreign, = foreign # Unpack. return Filter({self._name: {'$type': foreign}})
def extract_subset(self, subset): """ Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs """ # note subsets have an unusual encoding query = """ prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> SELECT ?c WHERE {{ GRAPH <{g}> {{ ?c oboInOwl:inSubset ?s FILTER regex(?s,'#{s}$','i') }} }} """.format(s=subset, g=self.graph_name) bindings = run_sparql(query) return [r['c']['value'] for r in bindings]
Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs
Below is the the instruction that describes the task: ### Input: Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs ### Response: def extract_subset(self, subset): """ Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs """ # note subsets have an unusual encoding query = """ prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#> SELECT ?c WHERE {{ GRAPH <{g}> {{ ?c oboInOwl:inSubset ?s FILTER regex(?s,'#{s}$','i') }} }} """.format(s=subset, g=self.graph_name) bindings = run_sparql(query) return [r['c']['value'] for r in bindings]
def formatted(self): # pylint: disable=line-too-long """ Return a human readable string with the statistics for this container. The operations are sorted by decreasing average time. The three columns for `ServerTime` are included only if the WBEM server has returned WBEM server response times. Example if statistics are enabled:: Statistics (times in seconds, lengths in Bytes): Count Excep ClientTime ServerTime RequestLen ReplyLen Operation Cnt Avg Min Max Avg Min Max Avg Min Max Avg Min Max 3 0 0.234 0.100 0.401 0.204 0.080 0.361 1233 1000 1500 26667 20000 35000 EnumerateInstances 1 0 0.100 0.100 0.100 0.080 0.080 0.080 1200 1200 1200 22000 22000 22000 EnumerateInstanceNames . . . Example if statistics are disabled:: Statistics (times in seconds, lengths in Bytes): Disabled """ # noqa: E501 # pylint: enable=line-too-long ret = "Statistics (times in seconds, lengths in Bytes):\n" if self.enabled: snapshot = sorted(self.snapshot(), key=lambda item: item[1].avg_time, reverse=True) # Test to see if any server time is non-zero include_svr = False for name, stats in snapshot: # pylint: disable=unused-variable # pylint: disable=protected-access if stats._server_time_stored: include_svr = True # pylint: disable=protected-access if include_svr: ret += OperationStatistic._formatted_header_w_svr else: ret += OperationStatistic._formatted_header for name, stats in snapshot: # pylint: disable=unused-variable ret += stats.formatted(include_svr) else: ret += "Disabled" return ret.strip()
Return a human readable string with the statistics for this container. The operations are sorted by decreasing average time. The three columns for `ServerTime` are included only if the WBEM server has returned WBEM server response times. Example if statistics are enabled:: Statistics (times in seconds, lengths in Bytes): Count Excep ClientTime ServerTime RequestLen ReplyLen Operation Cnt Avg Min Max Avg Min Max Avg Min Max Avg Min Max 3 0 0.234 0.100 0.401 0.204 0.080 0.361 1233 1000 1500 26667 20000 35000 EnumerateInstances 1 0 0.100 0.100 0.100 0.080 0.080 0.080 1200 1200 1200 22000 22000 22000 EnumerateInstanceNames . . . Example if statistics are disabled:: Statistics (times in seconds, lengths in Bytes): Disabled
Below is the the instruction that describes the task: ### Input: Return a human readable string with the statistics for this container. The operations are sorted by decreasing average time. The three columns for `ServerTime` are included only if the WBEM server has returned WBEM server response times. Example if statistics are enabled:: Statistics (times in seconds, lengths in Bytes): Count Excep ClientTime ServerTime RequestLen ReplyLen Operation Cnt Avg Min Max Avg Min Max Avg Min Max Avg Min Max 3 0 0.234 0.100 0.401 0.204 0.080 0.361 1233 1000 1500 26667 20000 35000 EnumerateInstances 1 0 0.100 0.100 0.100 0.080 0.080 0.080 1200 1200 1200 22000 22000 22000 EnumerateInstanceNames . . . Example if statistics are disabled:: Statistics (times in seconds, lengths in Bytes): Disabled ### Response: def formatted(self): # pylint: disable=line-too-long """ Return a human readable string with the statistics for this container. The operations are sorted by decreasing average time. The three columns for `ServerTime` are included only if the WBEM server has returned WBEM server response times. Example if statistics are enabled:: Statistics (times in seconds, lengths in Bytes): Count Excep ClientTime ServerTime RequestLen ReplyLen Operation Cnt Avg Min Max Avg Min Max Avg Min Max Avg Min Max 3 0 0.234 0.100 0.401 0.204 0.080 0.361 1233 1000 1500 26667 20000 35000 EnumerateInstances 1 0 0.100 0.100 0.100 0.080 0.080 0.080 1200 1200 1200 22000 22000 22000 EnumerateInstanceNames . . . Example if statistics are disabled:: Statistics (times in seconds, lengths in Bytes): Disabled """ # noqa: E501 # pylint: enable=line-too-long ret = "Statistics (times in seconds, lengths in Bytes):\n" if self.enabled: snapshot = sorted(self.snapshot(), key=lambda item: item[1].avg_time, reverse=True) # Test to see if any server time is non-zero include_svr = False for name, stats in snapshot: # pylint: disable=unused-variable # pylint: disable=protected-access if stats._server_time_stored: include_svr = True # pylint: disable=protected-access if include_svr: ret += OperationStatistic._formatted_header_w_svr else: ret += OperationStatistic._formatted_header for name, stats in snapshot: # pylint: disable=unused-variable ret += stats.formatted(include_svr) else: ret += "Disabled" return ret.strip()
def inject_coordinates(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=None, original_y=None): ''' Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart ''' self._verify_coordinates(x_coords, 'x') self._verify_coordinates(y_coords, 'y') self.x_coords = x_coords self.y_coords = y_coords self._rescale_x = rescale_x self._rescale_y = rescale_y self.original_x = x_coords if original_x is None else original_x self.original_y = y_coords if original_y is None else original_y
Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart
Below is the the instruction that describes the task: ### Input: Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart ### Response: def inject_coordinates(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=None, original_y=None): ''' Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart ''' self._verify_coordinates(x_coords, 'x') self._verify_coordinates(y_coords, 'y') self.x_coords = x_coords self.y_coords = y_coords self._rescale_x = rescale_x self._rescale_y = rescale_y self.original_x = x_coords if original_x is None else original_x self.original_y = y_coords if original_y is None else original_y
def res_from_en(pst,enfile): """load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame """ converters = {"name": str_con, "group": str_con} try: #substitute ensemble for res, 'base' if there, otherwise mean obs=pst.observation_data if isinstance(enfile,str): df=pd.read_csv(enfile,converters=converters) df.columns=df.columns.str.lower() df = df.set_index('real_name').T.rename_axis('name').rename_axis(None, 1) else: df = enfile.T if 'base' in df.columns: df['modelled']=df['base'] df['std']=df.std(axis=1) else: df['modelled']=df.mean(axis=1) df['std']=df.std(axis=1) #probably a more pandastic way to do this res_df=df[['modelled','std']].copy() res_df['group']=obs.loc[:,'obgnme'].copy() res_df['measured']=obs['obsval'].copy() res_df['weight']=obs['weight'].copy() res_df['residual']=res_df['measured']-res_df['modelled'] except Exception as e: raise Exception("Pst.res_from_en:{0}".format(str(e))) return res_df
load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame
Below is the the instruction that describes the task: ### Input: load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame ### Response: def res_from_en(pst,enfile): """load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame """ converters = {"name": str_con, "group": str_con} try: #substitute ensemble for res, 'base' if there, otherwise mean obs=pst.observation_data if isinstance(enfile,str): df=pd.read_csv(enfile,converters=converters) df.columns=df.columns.str.lower() df = df.set_index('real_name').T.rename_axis('name').rename_axis(None, 1) else: df = enfile.T if 'base' in df.columns: df['modelled']=df['base'] df['std']=df.std(axis=1) else: df['modelled']=df.mean(axis=1) df['std']=df.std(axis=1) #probably a more pandastic way to do this res_df=df[['modelled','std']].copy() res_df['group']=obs.loc[:,'obgnme'].copy() res_df['measured']=obs['obsval'].copy() res_df['weight']=obs['weight'].copy() res_df['residual']=res_df['measured']-res_df['modelled'] except Exception as e: raise Exception("Pst.res_from_en:{0}".format(str(e))) return res_df
def serialize_object(self, attr, **kwargs): """Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be cast to str. :param dict attr: Object to be serialized. :rtype: dict or str """ if attr is None: return None if isinstance(attr, ET.Element): return attr obj_type = type(attr) if obj_type in self.basic_types: return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) if obj_type is _long_type: return self.serialize_long(attr) # If it's a model or I know this dependency, serialize as a Model elif obj_type in self.dependencies.values() or isinstance(obj_type, Model): return self._serialize(attr) if obj_type == dict: serialized = {} for key, value in attr.items(): try: serialized[self.serialize_unicode(key)] = self.serialize_object( value, **kwargs) except ValueError: serialized[self.serialize_unicode(key)] = None return serialized if obj_type == list: serialized = [] for obj in attr: try: serialized.append(self.serialize_object( obj, **kwargs)) except ValueError: pass return serialized return str(attr)
Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be cast to str. :param dict attr: Object to be serialized. :rtype: dict or str
Below is the the instruction that describes the task: ### Input: Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be cast to str. :param dict attr: Object to be serialized. :rtype: dict or str ### Response: def serialize_object(self, attr, **kwargs): """Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be cast to str. :param dict attr: Object to be serialized. :rtype: dict or str """ if attr is None: return None if isinstance(attr, ET.Element): return attr obj_type = type(attr) if obj_type in self.basic_types: return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) if obj_type is _long_type: return self.serialize_long(attr) # If it's a model or I know this dependency, serialize as a Model elif obj_type in self.dependencies.values() or isinstance(obj_type, Model): return self._serialize(attr) if obj_type == dict: serialized = {} for key, value in attr.items(): try: serialized[self.serialize_unicode(key)] = self.serialize_object( value, **kwargs) except ValueError: serialized[self.serialize_unicode(key)] = None return serialized if obj_type == list: serialized = [] for obj in attr: try: serialized.append(self.serialize_object( obj, **kwargs)) except ValueError: pass return serialized return str(attr)
def make_tar(tfn, source_dirs, ignore_path=[], optimize_python=True): ''' Make a zip file `fn` from the contents of source_dis. ''' # selector function def select(fn): rfn = realpath(fn) for p in ignore_path: if p.endswith('/'): p = p[:-1] if rfn.startswith(p): return False if rfn in python_files: return False return not is_blacklist(fn) # get the files and relpath file of all the directory we asked for files = [] for sd in source_dirs: sd = realpath(sd) compile_dir(sd, optimize_python=optimize_python) files += [(x, relpath(realpath(x), sd)) for x in listfiles(sd) if select(x)] # create tar.gz of thoses files tf = tarfile.open(tfn, 'w:gz', format=tarfile.USTAR_FORMAT) dirs = [] for fn, afn in files: dn = dirname(afn) if dn not in dirs: # create every dirs first if not exist yet d = '' for component in split(dn): d = join(d, component) if d.startswith('/'): d = d[1:] if d == '' or d in dirs: continue dirs.append(d) tinfo = tarfile.TarInfo(d) tinfo.type = tarfile.DIRTYPE tf.addfile(tinfo) # put the file tf.add(fn, afn) tf.close()
Make a zip file `fn` from the contents of source_dis.
Below is the the instruction that describes the task: ### Input: Make a zip file `fn` from the contents of source_dis. ### Response: def make_tar(tfn, source_dirs, ignore_path=[], optimize_python=True): ''' Make a zip file `fn` from the contents of source_dis. ''' # selector function def select(fn): rfn = realpath(fn) for p in ignore_path: if p.endswith('/'): p = p[:-1] if rfn.startswith(p): return False if rfn in python_files: return False return not is_blacklist(fn) # get the files and relpath file of all the directory we asked for files = [] for sd in source_dirs: sd = realpath(sd) compile_dir(sd, optimize_python=optimize_python) files += [(x, relpath(realpath(x), sd)) for x in listfiles(sd) if select(x)] # create tar.gz of thoses files tf = tarfile.open(tfn, 'w:gz', format=tarfile.USTAR_FORMAT) dirs = [] for fn, afn in files: dn = dirname(afn) if dn not in dirs: # create every dirs first if not exist yet d = '' for component in split(dn): d = join(d, component) if d.startswith('/'): d = d[1:] if d == '' or d in dirs: continue dirs.append(d) tinfo = tarfile.TarInfo(d) tinfo.type = tarfile.DIRTYPE tf.addfile(tinfo) # put the file tf.add(fn, afn) tf.close()
def rebin(d, n_x, n_y=None): """ Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y) """ if d.ndim == 2: if n_y is None: n_y = 1 if n_x is None: n_x = 1 d = d[:int(d.shape[0] // n_x) * n_x, :int(d.shape[1] // n_y) * n_y] d = d.reshape((d.shape[0] // n_x, n_x, d.shape[1] // n_y, n_y)) d = d.mean(axis=3) d = d.mean(axis=1) elif d.ndim == 1: d = d[:int(d.shape[0] // n_x) * n_x] d = d.reshape((d.shape[0] // n_x, n_x)) d = d.mean(axis=1) else: raise RuntimeError("Only NDIM <= 2 supported") return d
Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y)
Below is the the instruction that describes the task: ### Input: Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y) ### Response: def rebin(d, n_x, n_y=None): """ Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y) """ if d.ndim == 2: if n_y is None: n_y = 1 if n_x is None: n_x = 1 d = d[:int(d.shape[0] // n_x) * n_x, :int(d.shape[1] // n_y) * n_y] d = d.reshape((d.shape[0] // n_x, n_x, d.shape[1] // n_y, n_y)) d = d.mean(axis=3) d = d.mean(axis=1) elif d.ndim == 1: d = d[:int(d.shape[0] // n_x) * n_x] d = d.reshape((d.shape[0] // n_x, n_x)) d = d.mean(axis=1) else: raise RuntimeError("Only NDIM <= 2 supported") return d
def forget_canvas(canvas): """ Forget about the given canvas. Used by the canvas when closed. """ cc = [c() for c in canvasses if c() is not None] while canvas in cc: cc.remove(canvas) canvasses[:] = [weakref.ref(c) for c in cc]
Forget about the given canvas. Used by the canvas when closed.
Below is the the instruction that describes the task: ### Input: Forget about the given canvas. Used by the canvas when closed. ### Response: def forget_canvas(canvas): """ Forget about the given canvas. Used by the canvas when closed. """ cc = [c() for c in canvasses if c() is not None] while canvas in cc: cc.remove(canvas) canvasses[:] = [weakref.ref(c) for c in cc]
def disable_active_checks(self, checks): """Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None """ self.active_checks_enabled = False for chk_id in self.checks_in_progress: chk = checks[chk_id] chk.status = ACT_STATUS_WAIT_CONSUME chk.exit_status = self.state_id chk.output = self.output chk.check_time = time.time() chk.execution_time = 0 chk.perf_data = self.perf_data
Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None
Below is the the instruction that describes the task: ### Input: Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None ### Response: def disable_active_checks(self, checks): """Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None """ self.active_checks_enabled = False for chk_id in self.checks_in_progress: chk = checks[chk_id] chk.status = ACT_STATUS_WAIT_CONSUME chk.exit_status = self.state_id chk.output = self.output chk.check_time = time.time() chk.execution_time = 0 chk.perf_data = self.perf_data
def hooks_setup(trun, parent, hnames=None): """ Setup test-hooks @returns dict of hook filepaths {"enter": [], "exit": []} """ hooks = { "enter": [], "exit": [] } if hnames is None: # Nothing to do, just return the struct return hooks for hname in hnames: # Fill out paths for med in HOOK_PATTERNS: for ptn in HOOK_PATTERNS[med]: fpath = os.sep.join([trun["conf"]["HOOKS"], ptn % hname]) if not os.path.exists(fpath): continue hook = hook_setup(parent, fpath) if not hook: continue hooks[med].append(hook) if not hooks["enter"] + hooks["exit"]: cij.err("rnr:hooks_setup:FAIL { hname: %r has no files }" % hname) return None return hooks
Setup test-hooks @returns dict of hook filepaths {"enter": [], "exit": []}
Below is the the instruction that describes the task: ### Input: Setup test-hooks @returns dict of hook filepaths {"enter": [], "exit": []} ### Response: def hooks_setup(trun, parent, hnames=None): """ Setup test-hooks @returns dict of hook filepaths {"enter": [], "exit": []} """ hooks = { "enter": [], "exit": [] } if hnames is None: # Nothing to do, just return the struct return hooks for hname in hnames: # Fill out paths for med in HOOK_PATTERNS: for ptn in HOOK_PATTERNS[med]: fpath = os.sep.join([trun["conf"]["HOOKS"], ptn % hname]) if not os.path.exists(fpath): continue hook = hook_setup(parent, fpath) if not hook: continue hooks[med].append(hook) if not hooks["enter"] + hooks["exit"]: cij.err("rnr:hooks_setup:FAIL { hname: %r has no files }" % hname) return None return hooks
def GroupSensorsFind(self, group_id, parameters, filters, namespace = None): """ Find sensors in a group based on a number of filters on metatags @param group_id (int) - Id of the group in which to find sensors @param namespace (string) - Namespace to use in filtering on metatags @param parameters (dictionary) - Dictionary containing additional parameters @param filters (dictionary) - Dictioanry containing the filters on metatags @return (bool) - Boolean indicating whether GroupSensorsFind was successful """ ns = "default" if namespace is None else namespace parameters['namespace'] = ns if self.__SenseApiCall__("/groups/{0}/sensors/find.json?{1}".format(group_id, urllib.urlencode(parameters, True)), "POST", parameters = filters): return True else: self.__error__ = "api call unsuccessful" return False
Find sensors in a group based on a number of filters on metatags @param group_id (int) - Id of the group in which to find sensors @param namespace (string) - Namespace to use in filtering on metatags @param parameters (dictionary) - Dictionary containing additional parameters @param filters (dictionary) - Dictioanry containing the filters on metatags @return (bool) - Boolean indicating whether GroupSensorsFind was successful
Below is the the instruction that describes the task: ### Input: Find sensors in a group based on a number of filters on metatags @param group_id (int) - Id of the group in which to find sensors @param namespace (string) - Namespace to use in filtering on metatags @param parameters (dictionary) - Dictionary containing additional parameters @param filters (dictionary) - Dictioanry containing the filters on metatags @return (bool) - Boolean indicating whether GroupSensorsFind was successful ### Response: def GroupSensorsFind(self, group_id, parameters, filters, namespace = None): """ Find sensors in a group based on a number of filters on metatags @param group_id (int) - Id of the group in which to find sensors @param namespace (string) - Namespace to use in filtering on metatags @param parameters (dictionary) - Dictionary containing additional parameters @param filters (dictionary) - Dictioanry containing the filters on metatags @return (bool) - Boolean indicating whether GroupSensorsFind was successful """ ns = "default" if namespace is None else namespace parameters['namespace'] = ns if self.__SenseApiCall__("/groups/{0}/sensors/find.json?{1}".format(group_id, urllib.urlencode(parameters, True)), "POST", parameters = filters): return True else: self.__error__ = "api call unsuccessful" return False
def _generate_grid(self): """Get the all possible values for each of the tunables.""" grid_axes = [] for _, param in self.tunables: grid_axes.append(param.get_grid_axis(self.grid_width)) return grid_axes
Get the all possible values for each of the tunables.
Below is the the instruction that describes the task: ### Input: Get the all possible values for each of the tunables. ### Response: def _generate_grid(self): """Get the all possible values for each of the tunables.""" grid_axes = [] for _, param in self.tunables: grid_axes.append(param.get_grid_axis(self.grid_width)) return grid_axes
def column_print(fmt, rows, print_func): """Prints a formatted list, adjusting the width so everything fits. fmt contains a single character for each column. < indicates that the column should be left justified, > indicates that the column should be right justified. The last column may be a space which implies left justification and no padding. """ # Figure out the max width of each column num_cols = len(fmt) width = [max(0 if isinstance(row, str) else len(row[i]) for row in rows) for i in range(num_cols)] for row in rows: if isinstance(row, str): # Print a separator line print_func(' '.join([row * width[i] for i in range(num_cols)])) else: print_func(' '.join([align_cell(fmt[i], row[i], width[i]) for i in range(num_cols)]))
Prints a formatted list, adjusting the width so everything fits. fmt contains a single character for each column. < indicates that the column should be left justified, > indicates that the column should be right justified. The last column may be a space which implies left justification and no padding.
Below is the the instruction that describes the task: ### Input: Prints a formatted list, adjusting the width so everything fits. fmt contains a single character for each column. < indicates that the column should be left justified, > indicates that the column should be right justified. The last column may be a space which implies left justification and no padding. ### Response: def column_print(fmt, rows, print_func): """Prints a formatted list, adjusting the width so everything fits. fmt contains a single character for each column. < indicates that the column should be left justified, > indicates that the column should be right justified. The last column may be a space which implies left justification and no padding. """ # Figure out the max width of each column num_cols = len(fmt) width = [max(0 if isinstance(row, str) else len(row[i]) for row in rows) for i in range(num_cols)] for row in rows: if isinstance(row, str): # Print a separator line print_func(' '.join([row * width[i] for i in range(num_cols)])) else: print_func(' '.join([align_cell(fmt[i], row[i], width[i]) for i in range(num_cols)]))
def xml_to_dict(raw_xml): """Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream """ def node_to_dict(node): d = {} d.update(node.items()) text = getattr(node, 'text', None) if text is not None: d['__text__'] = text childs = {} for child in node: childs.setdefault(child.tag, []).append(node_to_dict(child)) d.update(childs.items()) return d purged_xml = remove_invalid_xml_chars(raw_xml) try: tree = xml.etree.ElementTree.fromstring(purged_xml) except xml.etree.ElementTree.ParseError as e: cause = "XML stream %s" % (str(e)) raise ParseError(cause=cause) d = node_to_dict(tree) return d
Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream
Below is the the instruction that describes the task: ### Input: Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream ### Response: def xml_to_dict(raw_xml): """Convert a XML stream into a dictionary. This function transforms a xml stream into a dictionary. The attributes are stored as single elements while child nodes are stored into lists. The text node is stored using the special key '__text__'. This code is based on Winston Ewert's solution to this problem. See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict for more info. The code was licensed as cc by-sa 3.0. :param raw_xml: XML stream :returns: a dict with the XML data :raises ParseError: raised when an error occurs parsing the given XML stream """ def node_to_dict(node): d = {} d.update(node.items()) text = getattr(node, 'text', None) if text is not None: d['__text__'] = text childs = {} for child in node: childs.setdefault(child.tag, []).append(node_to_dict(child)) d.update(childs.items()) return d purged_xml = remove_invalid_xml_chars(raw_xml) try: tree = xml.etree.ElementTree.fromstring(purged_xml) except xml.etree.ElementTree.ParseError as e: cause = "XML stream %s" % (str(e)) raise ParseError(cause=cause) d = node_to_dict(tree) return d
def negotiate_header(url): """ Return the "Authorization" HTTP header value to use for this URL. """ hostname = urlparse(url).hostname _, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname) # authGSSClientStep goes over the network to the KDC (ie blocking). yield threads.deferToThread(kerberos.authGSSClientStep, krb_context, '') negotiate_details = kerberos.authGSSClientResponse(krb_context) defer.returnValue('Negotiate ' + negotiate_details)
Return the "Authorization" HTTP header value to use for this URL.
Below is the the instruction that describes the task: ### Input: Return the "Authorization" HTTP header value to use for this URL. ### Response: def negotiate_header(url): """ Return the "Authorization" HTTP header value to use for this URL. """ hostname = urlparse(url).hostname _, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname) # authGSSClientStep goes over the network to the KDC (ie blocking). yield threads.deferToThread(kerberos.authGSSClientStep, krb_context, '') negotiate_details = kerberos.authGSSClientResponse(krb_context) defer.returnValue('Negotiate ' + negotiate_details)
def Rx_matrix(theta): """Rotation matrix around the X axis""" return np.array([ [1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)] ])
Rotation matrix around the X axis
Below is the the instruction that describes the task: ### Input: Rotation matrix around the X axis ### Response: def Rx_matrix(theta): """Rotation matrix around the X axis""" return np.array([ [1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)] ])
def decrypt(self, encryption_key, iv, encrypted_data): """Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str """ logger.info('Decrypting subtitles with length (%d bytes), key=%r', len(encrypted_data), encryption_key) return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data))
Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str
Below is the the instruction that describes the task: ### Input: Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str ### Response: def decrypt(self, encryption_key, iv, encrypted_data): """Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str """ logger.info('Decrypting subtitles with length (%d bytes), key=%r', len(encrypted_data), encryption_key) return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data))
def _pre_delete_hook(cls, key): """ Removes instance from index. """ if cls.searching_enabled: doc_id = cls.search_get_document_id(key) index = cls.search_get_index() index.delete(doc_id)
Removes instance from index.
Below is the the instruction that describes the task: ### Input: Removes instance from index. ### Response: def _pre_delete_hook(cls, key): """ Removes instance from index. """ if cls.searching_enabled: doc_id = cls.search_get_document_id(key) index = cls.search_get_index() index.delete(doc_id)
def create(self, collector, tryImport=True): """ Creates an inspector of the registered and passes the collector to the constructor. Tries to import the class if tryImport is True. Raises ImportError if the class could not be imported. """ cls = self.getClass(tryImport=tryImport) if not self.successfullyImported: raise ImportError("Class not successfully imported: {}".format(self.exception)) return cls(collector)
Creates an inspector of the registered and passes the collector to the constructor. Tries to import the class if tryImport is True. Raises ImportError if the class could not be imported.
Below is the the instruction that describes the task: ### Input: Creates an inspector of the registered and passes the collector to the constructor. Tries to import the class if tryImport is True. Raises ImportError if the class could not be imported. ### Response: def create(self, collector, tryImport=True): """ Creates an inspector of the registered and passes the collector to the constructor. Tries to import the class if tryImport is True. Raises ImportError if the class could not be imported. """ cls = self.getClass(tryImport=tryImport) if not self.successfullyImported: raise ImportError("Class not successfully imported: {}".format(self.exception)) return cls(collector)