code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def bind(self, family, type, proto=0): """Create (or recreate) the actual socket object.""" self.socket = sockets.Socket(family, type, proto) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.setblocking(0) #~ self.socket.setsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY, 1) self.socket.bind(self.bind_addr)
Create (or recreate) the actual socket object.
def ValidateLanguageCode(lang, column_name=None, problems=None): """ Validates a non-required language code value using the pybcp47 module: - if invalid adds InvalidValue error (if problems accumulator is provided) - distinguishes between 'not well-formed' and 'not valid' and adds error reasons accordingly - an empty language code is regarded as valid! Otherwise we might end up with many duplicate errors because of the required field checks. - returns true if the language is valid, false if not well-formed or invalid. """ if util.IsEmpty(lang): return True bcp47_obj = parser.ParseLanguage(str(lang.lower())) if not bcp47_obj.wellformed: if problems: problems.InvalidValue(column_name, lang, 'language code "%s" is not well-formed' % lang, type=problems_class.TYPE_ERROR) return False if not bcp47_obj.valid: if problems: problems.InvalidValue(column_name, lang, 'language code "%s" is not valid, parses as: %s' % (lang, bcp47_obj), type=problems_class.TYPE_WARNING) return False return True
Validates a non-required language code value using the pybcp47 module: - if invalid adds InvalidValue error (if problems accumulator is provided) - distinguishes between 'not well-formed' and 'not valid' and adds error reasons accordingly - an empty language code is regarded as valid! Otherwise we might end up with many duplicate errors because of the required field checks. - returns true if the language is valid, false if not well-formed or invalid.
def visit_ListComp(self, node: ast.ListComp) -> Any: """Compile the list comprehension as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
Compile the list comprehension as a function and call it.
def remover(self, id_perms): """Remove Administrative Permission from by the identifier. :param id_perms: Identifier of the Administrative Permission. Integer value and greater than zero. :return: None :raise InvalidParameterError: The identifier of Administrative Permission is null and invalid. :raise PermissaoAdministrativaNaoExisteError: Administrative Permission not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_perms): raise InvalidParameterError( u'The identifier of Administrative Permission is invalid or was not informed.') url = 'aperms/' + str(id_perms) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Remove Administrative Permission from by the identifier. :param id_perms: Identifier of the Administrative Permission. Integer value and greater than zero. :return: None :raise InvalidParameterError: The identifier of Administrative Permission is null and invalid. :raise PermissaoAdministrativaNaoExisteError: Administrative Permission not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def wait(self): """Blocks until the rate is met""" now = _monotonic() if now < self._ref: delay = max(0, self._ref - now) self.sleep_func(delay) self._update_ref()
Blocks until the rate is met
def update_file(url, filename): """Update the content of a single file.""" resp = urlopen(url) if resp.code != 200: raise Exception('GET {} failed.'.format(url)) with open(_get_package_path(filename), 'w') as fp: for l in resp: if not l.startswith(b'#'): fp.write(l.decode('utf8')) print('Updated {}'.format(filename))
Update the content of a single file.
def CreateMuskingumKfacFile(in_drainage_line, river_id, length_id, slope_id, celerity, formula_type, in_connectivity_file, out_kfac_file, length_units="km", slope_percentage=False, file_geodatabase=None): r""" Creates the Kfac file for calibration. The improved methods using slope to generate values for Kfac were used here: Tavakoly, A. A., A. D. Snow, C. H. David, M. L. Follum, D. R. Maidment, and Z.-L. Yang, (2016) "Continental-Scale River Flow Modeling of the Mississippi River Basin Using High-Resolution NHDPlus Dataset", Journal of the American Water Resources Association (JAWRA) 1-22. DOI: 10.1111/1752-1688.12456 Formula Type Options: 1. :math:`Kfac_n = \frac{RiverLength_n}{Celerity_n}` 2. :math:`Kfac_n = \eta*\frac{RiverLength_n}{\sqrt{RiverSlope_n}}` 3. :math:`Kfac_n = \eta*\frac{RiverLength_n}{\sqrt{RiverSlope_n}}\left[0.05, 0.95\right]` Where: :math:`a = \frac{\sum_{n=1}^{r} \frac{RiverLength_n}{Celerity_n}}{r}` :math:`b = \frac{\sum_{n=1}^{r} \frac{RiverLength_n}{\sqrt{RiverSlope_n}}}{r}` :math:`\eta = \frac{a}{b}` r = Number of river segments. Parameters ---------- in_drainage_line: str Path to the stream network (i.e. Drainage Line) shapefile. river_id: str The name of the field with the river ID (Ex. 'HydroID', 'COMID', or 'LINKNO'). length_id: str The field name containging the length of the river segment (Ex. 'LENGTHKM' or 'Length'). slope_id: str The field name containging the slope of the river segment (Ex. 'Avg_Slope' or 'Slope'). celerity: float The flow wave celerity for the watershed in meters per second. 1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown. formula_type: int An integer representing the formula type to use when calculating kfac. in_connectivity_file: str The path to the RAPID connectivity file. out_kfac_file: str The path to the output kfac file. length_units: str, optional The units for the length_id field. Supported types are "m" for meters and "km" for kilometers. slope_percentage: bool, optional If True, it assumes the slope given is in percentage and will divide by 100. Default is False. file_geodatabase: str, optional Path to the file geodatabase. If you use this option, in_drainage_line is the name of the stream network feature class (WARNING: Not always stable with GDAL). Example:: from RAPIDpy.gis.muskingum import CreateMuskingumKfacFile CreateMuskingumKfacFile( in_drainage_line='/path/to/drainageline.shp', river_id='LINKNO', length_id='Length', slope_id='Slope', celerity=1000.0/3600.0, formula_type=3, in_connectivity_file='/path/to/rapid_connect.csv', out_kfac_file='/path/to/kfac.csv', length_units="m", ) """ # noqa ogr_drainage_line_shapefile_lyr, ogr_drainage_line_shapefile = \ open_shapefile(in_drainage_line, file_geodatabase) number_of_features = ogr_drainage_line_shapefile_lyr.GetFeatureCount() river_id_list = np.zeros(number_of_features, dtype=np.int32) length_list = \ np.zeros(number_of_features, dtype=np.float32) slope_list = np.zeros(number_of_features, dtype=np.float32) for feature_idx, drainage_line_feature in \ enumerate(ogr_drainage_line_shapefile_lyr): river_id_list[feature_idx] = drainage_line_feature.GetField(river_id) length = drainage_line_feature.GetField(length_id) if length is not None: length_list[feature_idx] = length slope = drainage_line_feature.GetField(slope_id) if slope is not None: slope_list[feature_idx] = slope del ogr_drainage_line_shapefile if slope_percentage: slope_list /= 100.0 if length_units == "m": length_list /= 1000.0 elif length_units != "km": raise Exception("Invalid length units supplied. " "Supported units are m and km.") connectivity_table = np.loadtxt(in_connectivity_file, delimiter=",", ndmin=2, dtype=int) length_slope_array = [] kfac2_array = [] if formula_type == 1: log("River Length/Celerity") elif formula_type == 2: log("Eta*River Length/Sqrt(River Slope)") elif formula_type == 3: log("Eta*River Length/Sqrt(River Slope) [0.05, 0.95]") else: raise Exception("Invalid formula type. Valid range: 1-3 ...") with open_csv(out_kfac_file, 'w') as kfacfile: kfac_writer = csv_writer(kfacfile) for row in connectivity_table: stream_id = int(float(row[0])) stream_id_index = river_id_list == stream_id # find the length stream_length = length_list[stream_id_index] * 1000.0 if formula_type >= 2: # find the slope stream_slope = slope_list[stream_id_index] if stream_slope <= 0: # if no slope, take average of upstream # and downstream to get it next_down_id = int(float(row[1])) next_down_slope = 0 try: next_down_index = \ np.where(river_id_list == next_down_id)[0][0] next_down_slope = slope_list[next_down_index] except IndexError: pass next_up_id = int(float(row[3])) next_up_slope = 0 try: next_up_index = \ np.where(river_id_list == next_up_id)[0][0] next_up_slope = slope_list[next_up_index] except IndexError: pass stream_slope = (next_down_slope + next_up_slope) / 2.0 if stream_slope <= 0: # if still no slope, set to 0.001 stream_slope = 0.001 length_slope_array.append(stream_length / stream_slope**0.5) kfac2_array.append(stream_length / celerity) else: kfac = stream_length / celerity kfac_writer.writerow(kfac) if formula_type >= 2: if formula_type == 3: log("Filtering Data by 5th and 95th Percentiles ...") length_slope_array = np.array(length_slope_array) percentile_5 = np.percentile(length_slope_array, 5) percentile_95 = np.percentile(length_slope_array, 95) length_slope_array[length_slope_array < percentile_5] = \ percentile_5 length_slope_array[length_slope_array > percentile_95] = \ percentile_95 eta = np.mean(kfac2_array) / np.mean(length_slope_array) log("Kfac2_Avg {0}".format(np.mean(kfac2_array))) log("Length_Slope Avg {0}".format(np.mean(length_slope_array))) log("Eta {0}".format(eta)) log("Writing Data ...") for len_slope in length_slope_array: kfac_writer.writerow(eta*len_slope)
r""" Creates the Kfac file for calibration. The improved methods using slope to generate values for Kfac were used here: Tavakoly, A. A., A. D. Snow, C. H. David, M. L. Follum, D. R. Maidment, and Z.-L. Yang, (2016) "Continental-Scale River Flow Modeling of the Mississippi River Basin Using High-Resolution NHDPlus Dataset", Journal of the American Water Resources Association (JAWRA) 1-22. DOI: 10.1111/1752-1688.12456 Formula Type Options: 1. :math:`Kfac_n = \frac{RiverLength_n}{Celerity_n}` 2. :math:`Kfac_n = \eta*\frac{RiverLength_n}{\sqrt{RiverSlope_n}}` 3. :math:`Kfac_n = \eta*\frac{RiverLength_n}{\sqrt{RiverSlope_n}}\left[0.05, 0.95\right]` Where: :math:`a = \frac{\sum_{n=1}^{r} \frac{RiverLength_n}{Celerity_n}}{r}` :math:`b = \frac{\sum_{n=1}^{r} \frac{RiverLength_n}{\sqrt{RiverSlope_n}}}{r}` :math:`\eta = \frac{a}{b}` r = Number of river segments. Parameters ---------- in_drainage_line: str Path to the stream network (i.e. Drainage Line) shapefile. river_id: str The name of the field with the river ID (Ex. 'HydroID', 'COMID', or 'LINKNO'). length_id: str The field name containging the length of the river segment (Ex. 'LENGTHKM' or 'Length'). slope_id: str The field name containging the slope of the river segment (Ex. 'Avg_Slope' or 'Slope'). celerity: float The flow wave celerity for the watershed in meters per second. 1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown. formula_type: int An integer representing the formula type to use when calculating kfac. in_connectivity_file: str The path to the RAPID connectivity file. out_kfac_file: str The path to the output kfac file. length_units: str, optional The units for the length_id field. Supported types are "m" for meters and "km" for kilometers. slope_percentage: bool, optional If True, it assumes the slope given is in percentage and will divide by 100. Default is False. file_geodatabase: str, optional Path to the file geodatabase. If you use this option, in_drainage_line is the name of the stream network feature class (WARNING: Not always stable with GDAL). Example:: from RAPIDpy.gis.muskingum import CreateMuskingumKfacFile CreateMuskingumKfacFile( in_drainage_line='/path/to/drainageline.shp', river_id='LINKNO', length_id='Length', slope_id='Slope', celerity=1000.0/3600.0, formula_type=3, in_connectivity_file='/path/to/rapid_connect.csv', out_kfac_file='/path/to/kfac.csv', length_units="m", )
def plotConvergenceByObject(results, objectRange, featureRange, numTrials, linestyle='-'): """ Plots the convergence graph: iterations vs number of objects. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f,o] = how long it took it to converge with f unique features # and o objects. convergence = numpy.zeros((max(featureRange), max(objectRange) + 1)) for r in results: if r["numFeatures"] in featureRange: convergence[r["numFeatures"] - 1, r["numObjects"]] += r["convergencePoint"] convergence /= numTrials ######################################################################## # # Create the plot. x-axis= # Plot each curve legendList = [] colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y'] for i in range(len(featureRange)): f = featureRange[i] print "features={} objectRange={} convergence={}".format( f,objectRange, convergence[f-1,objectRange]) legendList.append('Unique features={}'.format(f)) plt.plot(objectRange, convergence[f-1, objectRange], color=colorList[i], linestyle=linestyle) # format plt.legend(legendList, loc="lower right", prop={'size':10}) plt.xlabel("Number of objects in training set") plt.xticks(range(0,max(objectRange)+1,10)) plt.yticks(range(0,int(convergence.max())+2)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (single column)")
Plots the convergence graph: iterations vs number of objects. Each curve shows the convergence for a given number of unique features.
def evaluate(self, batchsize): """Evaluate how well the classifier is doing. Return mean loss and mean accuracy""" sum_loss, sum_accuracy = 0, 0 for i in range(0, self.testsize, batchsize): x = Variable(self.x_test[i: i + batchsize]) y = Variable(self.y_test[i: i + batchsize]) loss = self.model(x, y) sum_loss += loss.data * batchsize sum_accuracy += self.model.accuracy.data * batchsize return sum_loss / self.testsize, sum_accuracy / self.testsize
Evaluate how well the classifier is doing. Return mean loss and mean accuracy
def ttfautohint(in_file, out_file, args=None, **kwargs): """Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments. """ arg_list = ["ttfautohint"] file_args = [in_file, out_file] if args is not None: if kwargs: raise TypeError("Should not provide both cmd args and kwargs.") rv = subprocess.call(arg_list + args.split() + file_args) if rv != 0: raise TTFAError(rv) return boolean_options = ( "debug", "composites", "dehint", "help", "ignore_restrictions", "detailed_info", "no_info", "adjust_subglyphs", "symbol", "ttfa_table", "verbose", "version", "windows_compatibility", ) other_options = ( "default_script", "fallback_script", "family_suffix", "hinting_limit", "fallback_stem_width", "hinting_range_min", "control_file", "hinting_range_max", "strong_stem_width", "increase_x_height", "x_height_snapping_exceptions", ) for option in boolean_options: if kwargs.pop(option, False): arg_list.append("--" + option.replace("_", "-")) for option in other_options: arg = kwargs.pop(option, None) if arg is not None: arg_list.append("--{}={}".format(option.replace("_", "-"), arg)) if kwargs: raise TypeError("Unexpected argument(s): " + ", ".join(kwargs.keys())) rv = subprocess.call(arg_list + file_args) if rv != 0: raise TTFAError(rv)
Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments.
def get_query_string(environ): """Returns the `QUERY_STRING` from the WSGI environment. This also takes care about the WSGI decoding dance on Python 3 environments as a native string. The string returned will be restricted to ASCII characters. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the query string from. """ qs = wsgi_get_bytes(environ.get("QUERY_STRING", "")) # QUERY_STRING really should be ascii safe but some browsers # will send us some unicode stuff (I am looking at you IE). # In that case we want to urllib quote it badly. return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),"))
Returns the `QUERY_STRING` from the WSGI environment. This also takes care about the WSGI decoding dance on Python 3 environments as a native string. The string returned will be restricted to ASCII characters. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the query string from.
def block(self, tofile="block.dat"): ''' 获取证券板块信息 :param tofile: :return: pd.dataFrame or None ''' with self.client.connect(*self.bestip): data = self.client.get_and_parse_block_info(tofile) return self.client.to_df(data)
获取证券板块信息 :param tofile: :return: pd.dataFrame or None
def do_lisp(self, subcmd, opts, folder=""): """${cmd_name}: list messages in the specified folder in JSON format ${cmd_usage} """ client = MdClient(self.maildir, filesystem=self.filesystem) client.lisp( foldername=folder, stream=self.stdout, reverse=getattr(opts, "reverse", False), since=float(getattr(opts, "since", -1)) )
${cmd_name}: list messages in the specified folder in JSON format ${cmd_usage}
def createDataport(self, auth, desc, defer=False): """Create a dataport resource. "format" and "retention" are required { "format": "float" | "integer" | "string", "meta": string = "", "name": string = "", "preprocess": list = [], "public": boolean = false, "retention": { "count": number | "infinity", "duration": number | "infinity" }, "subscribe": <ResourceID> | null = null } """ return self._call('create', auth, ['dataport', desc], defer)
Create a dataport resource. "format" and "retention" are required { "format": "float" | "integer" | "string", "meta": string = "", "name": string = "", "preprocess": list = [], "public": boolean = false, "retention": { "count": number | "infinity", "duration": number | "infinity" }, "subscribe": <ResourceID> | null = null }
def ReleaseObject(self, identifier): """Releases a cached object based on the identifier. This method decrements the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache. RuntimeError: if the cache value is missing. """ if identifier not in self._values: raise KeyError('Missing cached object for identifier: {0:s}'.format( identifier)) cache_value = self._values[identifier] if not cache_value: raise RuntimeError('Missing cache value for identifier: {0:s}'.format( identifier)) cache_value.DecrementReferenceCount()
Releases a cached object based on the identifier. This method decrements the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache. RuntimeError: if the cache value is missing.
def apply(self, im): """ Apply an n-dimensional displacement by shifting an image or volume. Parameters ---------- im : ndarray The image or volume to shift """ from scipy.ndimage.interpolation import shift return shift(im, map(lambda x: -x, self.delta), mode='nearest')
Apply an n-dimensional displacement by shifting an image or volume. Parameters ---------- im : ndarray The image or volume to shift
def masked(name, runtime=False, root=None): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2015.8.5 The return data for this function has changed. If the service is masked, the return value will now be the output of the ``systemctl is-enabled`` command (so that a persistent mask can be distinguished from a runtime mask). If the service is not masked, then ``False`` will be returned. .. versionchanged:: 2017.7.0 This function now returns a boolean telling the user whether a mask specified by the new ``runtime`` argument is set. If ``runtime`` is ``False``, this function will return ``True`` if an indefinite mask is set for the named service (otherwise ``False`` will be returned). If ``runtime`` is ``False``, this function will return ``True`` if a runtime mask is set, otherwise ``False``. Check whether or not a service is masked runtime : False Set to ``True`` to check for a runtime mask .. versionadded:: 2017.7.0 In previous versions, this function would simply return the output of ``systemctl is-enabled`` when the service was found to be masked. However, since it is possible to both have both indefinite and runtime masks on a service simultaneously, this function now only checks for runtime masks if this argument is set to ``True``. Otherwise, it will check for an indefinite mask. root Enable/disable/mask unit files in the specified root directory CLI Examples: .. code-block:: bash salt '*' service.masked foo salt '*' service.masked foo runtime=True ''' _check_for_unit_changes(name) root_dir = _root('/run' if runtime else '/etc', root) link_path = os.path.join(root_dir, 'systemd', 'system', _canonical_unit_name(name)) try: return os.readlink(link_path) == '/dev/null' except OSError as exc: if exc.errno == errno.ENOENT: log.trace( 'Path %s does not exist. This is normal if service \'%s\' is ' 'not masked or does not exist.', link_path, name ) elif exc.errno == errno.EINVAL: log.error( 'Failed to check mask status for service %s. Path %s is a ' 'file, not a symlink. This could be caused by changes in ' 'systemd and is probably a bug in Salt. Please report this ' 'to the developers.', name, link_path ) return False
.. versionadded:: 2015.8.0 .. versionchanged:: 2015.8.5 The return data for this function has changed. If the service is masked, the return value will now be the output of the ``systemctl is-enabled`` command (so that a persistent mask can be distinguished from a runtime mask). If the service is not masked, then ``False`` will be returned. .. versionchanged:: 2017.7.0 This function now returns a boolean telling the user whether a mask specified by the new ``runtime`` argument is set. If ``runtime`` is ``False``, this function will return ``True`` if an indefinite mask is set for the named service (otherwise ``False`` will be returned). If ``runtime`` is ``False``, this function will return ``True`` if a runtime mask is set, otherwise ``False``. Check whether or not a service is masked runtime : False Set to ``True`` to check for a runtime mask .. versionadded:: 2017.7.0 In previous versions, this function would simply return the output of ``systemctl is-enabled`` when the service was found to be masked. However, since it is possible to both have both indefinite and runtime masks on a service simultaneously, this function now only checks for runtime masks if this argument is set to ``True``. Otherwise, it will check for an indefinite mask. root Enable/disable/mask unit files in the specified root directory CLI Examples: .. code-block:: bash salt '*' service.masked foo salt '*' service.masked foo runtime=True
def list_users(self, limit=None, marker=None): """Returns a list of the names of all users for this instance.""" return self._user_manager.list(limit=limit, marker=marker)
Returns a list of the names of all users for this instance.
def build_payment_parameters(amount: Money, client_ref: str) -> PaymentParameters: """ Builds the parameters needed to present the user with a datatrans payment form. :param amount: The amount and currency we want the user to pay :param client_ref: A unique reference for this payment :return: The parameters needed to display the datatrans form """ merchant_id = web_merchant_id amount, currency = money_to_amount_and_currency(amount) refno = client_ref sign = sign_web(merchant_id, amount, currency, refno) parameters = PaymentParameters( merchant_id=merchant_id, amount=amount, currency=currency, refno=refno, sign=sign, use_alias=False, ) logger.info('build-payment-parameters', parameters=parameters) return parameters
Builds the parameters needed to present the user with a datatrans payment form. :param amount: The amount and currency we want the user to pay :param client_ref: A unique reference for this payment :return: The parameters needed to display the datatrans form
def gaussian(df, width=0.3, downshift=-1.8, prefix=None): """ Impute missing values by drawing from a normal distribution :param df: :param width: Scale factor for the imputed distribution relative to the standard deviation of measured values. Can be a single number or list of one per column. :param downshift: Shift the imputed values down, in units of std. dev. Can be a single number or list of one per column :param prefix: The column prefix for imputed columns :return: """ df = df.copy() imputed = df.isnull() # Keep track of what's real if prefix: mask = np.array([l.startswith(prefix) for l in df.columns.values]) mycols = np.arange(0, df.shape[1])[mask] else: mycols = np.arange(0, df.shape[1]) if type(width) is not list: width = [width] * len(mycols) elif len(mycols) != len(width): raise ValueError("Length of iterable 'width' does not match # of columns") if type(downshift) is not list: downshift = [downshift] * len(mycols) elif len(mycols) != len(downshift): raise ValueError("Length of iterable 'downshift' does not match # of columns") for i in mycols: data = df.iloc[:, i] mask = data.isnull().values mean = data.mean(axis=0) stddev = data.std(axis=0) m = mean + downshift[i]*stddev s = stddev*width[i] # Generate a list of random numbers for filling in values = np.random.normal(loc=m, scale=s, size=df.shape[0]) # Now fill them in df.iloc[mask, i] = values[mask] return df, imputed
Impute missing values by drawing from a normal distribution :param df: :param width: Scale factor for the imputed distribution relative to the standard deviation of measured values. Can be a single number or list of one per column. :param downshift: Shift the imputed values down, in units of std. dev. Can be a single number or list of one per column :param prefix: The column prefix for imputed columns :return:
def get_stock_quote(self, code_list): """ 获取订阅股票报价的实时数据,有订阅要求限制。 对于异步推送,参见StockQuoteHandlerBase :param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 data_date str 日期 data_time str 时间(美股默认是美东时间,港股A股默认是北京时间) last_price float 最新价格 open_price float 今日开盘价 high_price float 最高价格 low_price float 最低价格 prev_close_price float 昨收盘价格 volume int 成交数量 turnover float 成交金额 turnover_rate float 换手率 amplitude int 振幅 suspension bool 是否停牌(True表示停牌) listing_date str 上市日期 (yyyy-MM-dd) price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差 dark_status str 暗盘交易状态,见DarkStatus strike_price float 行权价 contract_size int 每份合约数 open_interest int 未平仓合约数 implied_volatility float 隐含波动率 premium float 溢价 delta float 希腊值 Delta gamma float 希腊值 Gamma vega float 希腊值 Vega theta float 希腊值 Theta rho float 希腊值 Rho ===================== =========== ============================================================== """ code_list = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( StockQuoteQuery.pack_req, StockQuoteQuery.unpack_rsp, ) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, quote_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'data_date', 'data_time', 'last_price', 'open_price', 'high_price', 'low_price', 'prev_close_price', 'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date', 'price_spread', 'dark_status', 'strike_price', 'contract_size', 'open_interest', 'implied_volatility', 'premium', 'delta', 'gamma', 'vega', 'theta', 'rho' ] quote_frame_table = pd.DataFrame(quote_list, columns=col_list) return RET_OK, quote_frame_table
获取订阅股票报价的实时数据,有订阅要求限制。 对于异步推送,参见StockQuoteHandlerBase :param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 data_date str 日期 data_time str 时间(美股默认是美东时间,港股A股默认是北京时间) last_price float 最新价格 open_price float 今日开盘价 high_price float 最高价格 low_price float 最低价格 prev_close_price float 昨收盘价格 volume int 成交数量 turnover float 成交金额 turnover_rate float 换手率 amplitude int 振幅 suspension bool 是否停牌(True表示停牌) listing_date str 上市日期 (yyyy-MM-dd) price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差 dark_status str 暗盘交易状态,见DarkStatus strike_price float 行权价 contract_size int 每份合约数 open_interest int 未平仓合约数 implied_volatility float 隐含波动率 premium float 溢价 delta float 希腊值 Delta gamma float 希腊值 Gamma vega float 希腊值 Vega theta float 希腊值 Theta rho float 希腊值 Rho ===================== =========== ==============================================================
def _partition_data(datavol, roivol, roivalue, maskvol=None, zeroe=True): """ Extracts the values in `datavol` that are in the ROI with value `roivalue` in `roivol`. The ROI can be masked by `maskvol`. Parameters ---------- datavol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume roivalue: int or float A value from roivol that represents the ROI to be used for extraction. maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. Only applied to timeseries (4D) data. Returns ------- values: np.array An array of the values in the indicated ROI. A 2D matrix if `datavol` is 4D or a 1D vector if `datavol` is 3D. """ if maskvol is not None: # get all masked time series within this roi r indices = (roivol == roivalue) * (maskvol > 0) else: # get all time series within this roi r indices = roivol == roivalue if datavol.ndim == 4: ts = datavol[indices, :] else: ts = datavol[indices] # remove zeroed time series if zeroe: if datavol.ndim == 4: ts = ts[ts.sum(axis=1) != 0, :] return ts
Extracts the values in `datavol` that are in the ROI with value `roivalue` in `roivol`. The ROI can be masked by `maskvol`. Parameters ---------- datavol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume roivalue: int or float A value from roivol that represents the ROI to be used for extraction. maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. Only applied to timeseries (4D) data. Returns ------- values: np.array An array of the values in the indicated ROI. A 2D matrix if `datavol` is 4D or a 1D vector if `datavol` is 3D.
def generate(self, model_len=None, model_width=None): """Generates a CNN. Args: model_len: An integer. Number of convolutional layers. model_width: An integer. Number of filters for the convolutional layers. Returns: An instance of the class Graph. Represents the neural architecture graph of the generated model. """ if model_len is None: model_len = Constant.MODEL_LEN if model_width is None: model_width = Constant.MODEL_WIDTH pooling_len = int(model_len / 4) graph = Graph(self.input_shape, False) temp_input_channel = self.input_shape[-1] output_node_id = 0 stride = 1 for i in range(model_len): output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer( self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id ) output_node_id = graph.add_layer( self.conv(temp_input_channel, model_width, kernel_size=3, stride=stride), output_node_id, ) temp_input_channel = model_width if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1): output_node_id = graph.add_layer(self.pooling(), output_node_id) output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id) output_node_id = graph.add_layer( self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id ) output_node_id = graph.add_layer( StubDense(graph.node_list[output_node_id].shape[0], model_width), output_node_id, ) output_node_id = graph.add_layer(StubReLU(), output_node_id) graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id) return graph
Generates a CNN. Args: model_len: An integer. Number of convolutional layers. model_width: An integer. Number of filters for the convolutional layers. Returns: An instance of the class Graph. Represents the neural architecture graph of the generated model.
def _load_params(params, logger=logging): """Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params. """ if isinstance(params, str): cur_path = os.path.dirname(os.path.realpath(__file__)) param_file_path = os.path.join(cur_path, params) logger.info('Loading params from file %s' % param_file_path) save_dict = nd_load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return arg_params, aux_params elif isinstance(params, (tuple, list)) and len(params) == 2: return params[0], params[1] else: raise ValueError('Unsupported params provided. Must be either a path to the param file or' ' a pair of dictionaries representing arg_params and aux_params')
Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params.
def namedb_query_execute( cur, query, values, abort=True): """ Execute a query. If it fails, abort. Retry with timeouts on lock DO NOT CALL THIS DIRECTLY. """ return db_query_execute(cur, query, values, abort=abort)
Execute a query. If it fails, abort. Retry with timeouts on lock DO NOT CALL THIS DIRECTLY.
def reference(self, refobj, taskfileinfo): """Reference the given taskfileinfo into the scene and return the created reference node The created reference node will be used on :meth:`RefobjInterface.set_reference` to set the reference on a reftrack node. Do not call :meth:`RefobjInterface.set_reference` yourself. This will also create a group node and group all dagnodes under a appropriate node. :param refobj: the reftrack node that will be linked to the reference :type refobj: str :param taskfileinfo: The taskfileinfo that holds the information for what to reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: the reference node that was created and should set on the appropriate reftrack node :rtype: str :raises: None """ # work in root namespace with common.preserve_namespace(":"): jbfile = JB_File(taskfileinfo) filepath = jbfile.get_fullpath() ns_suggestion = reftrack.get_namespace(taskfileinfo) newnodes = cmds.file(filepath, reference=True, namespace=ns_suggestion, returnNewNodes=True) # You could also use the filename returned by the file command to query the reference node. # Atm there is a but, that if you import the file before, the command fails. # So we get all new reference nodes and query the one that is not referenced for refnode in cmds.ls(newnodes, type='reference'): if not cmds.referenceQuery(refnode, isNodeReferenced=True): node = refnode break ns = cmds.referenceQuery(node, namespace=True) # query the actual new namespace content = cmds.namespaceInfo(ns, listOnlyDependencyNodes=True, dagPath=True) # get the content # connect reftrack with scenenode scenenode = self.get_scenenode(content) self.get_refobjinter().connect_reftrack_scenenode(refobj, scenenode) reccontent = cmds.namespaceInfo(ns, listOnlyDependencyNodes=True, dagPath=True, recurse=True) # get the content + content of children dagcontent = cmds.ls(reccontent, ap=True, assemblies=True) # get only the top level dagnodes so we can group them if not dagcontent: return node # no need for a top group if there are not dagnodes to group # group the dagnodes grpname = reftrack.get_groupname(taskfileinfo) reftrack.group_content(dagcontent, ns, grpname, "jb_asset") return node
Reference the given taskfileinfo into the scene and return the created reference node The created reference node will be used on :meth:`RefobjInterface.set_reference` to set the reference on a reftrack node. Do not call :meth:`RefobjInterface.set_reference` yourself. This will also create a group node and group all dagnodes under a appropriate node. :param refobj: the reftrack node that will be linked to the reference :type refobj: str :param taskfileinfo: The taskfileinfo that holds the information for what to reference :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: the reference node that was created and should set on the appropriate reftrack node :rtype: str :raises: None
def export_osm_file(self): """Generate OpenStreetMap element tree from ``Osm``.""" osm = create_elem('osm', {'generator': self.generator, 'version': self.version}) osm.extend(obj.toosm() for obj in self) return etree.ElementTree(osm)
Generate OpenStreetMap element tree from ``Osm``.
def read_committed_file(gitref, filename): """Retrieve the content of a file in an old commit and returns it. Ketword Arguments: :gitref: (str) -- full reference of the git commit :filename: (str) -- name (full path) of the file Returns: str -- content of the file """ repo = Repo() commitobj = repo.commit(gitref) blob = commitobj.tree[_delta_dir() + filename] return blob.data_stream.read()
Retrieve the content of a file in an old commit and returns it. Ketword Arguments: :gitref: (str) -- full reference of the git commit :filename: (str) -- name (full path) of the file Returns: str -- content of the file
def calc_fc_size(img_height, img_width): '''Calculates shape of data after encoding. Parameters ---------- img_height : int Height of input image. img_width : int Width of input image. Returns ------- encoded_shape : tuple(int) Gives back 3-tuple with new dims. ''' height, width = img_height, img_width for _ in range(5): height, width = _get_conv_outsize( (height, width), 4, 2, 1) conv_out_layers = 512 return conv_out_layers, height, width
Calculates shape of data after encoding. Parameters ---------- img_height : int Height of input image. img_width : int Width of input image. Returns ------- encoded_shape : tuple(int) Gives back 3-tuple with new dims.
def set_path(self, data, path, value): """ Sets the given key in the given dict object to the given value. If the given path is nested, child dicts are created as appropriate. Accepts either a dot-delimited path or an array of path elements as the `path` variable. """ self.say('set_path:value:' + str(value) + ' at:' + str(path) + ' in:' + str(data)) if isinstance(path, str): path = path.split('.') if len(path) > 1: self.set_path(data.setdefault(path[0], {}), path[1:], value) else: data[path[0]] = value return data
Sets the given key in the given dict object to the given value. If the given path is nested, child dicts are created as appropriate. Accepts either a dot-delimited path or an array of path elements as the `path` variable.
def get_instance(cls, state): """:rtype: UserStorageHandler""" if cls.instance is None: cls.instance = UserStorageHandler(state) return cls.instance
:rtype: UserStorageHandler
def set_scale_alpha_from_selection(self): ''' Set scale marker to alpha for selected layer. ''' # 1. Look up selected layer. selection = self.treeview_layers.get_selection() list_store, selected_iter = selection.get_selected() # 2. Set adjustment to current alpha value for selected layer (if any). if selected_iter is None: # No layer was selected, so disable scale widget. self.adjustment_alpha.set_value(100) self.scale_alpha.set_sensitive(False) return else: surface_name, alpha = list_store[selected_iter] self.adjustment_alpha.set_value(alpha * 100) self.scale_alpha.set_sensitive(True)
Set scale marker to alpha for selected layer.
def get(self, field, value=None): """Gets user input for given field and checks if it is valid. If input is invalid, it will ask the user to enter it again. Defaults values to empty or :value:. It does not check validity of parent index. It can only be tested further down the road, so for now accept anything. :field: Field name. :value: Default value to use for field. :returns: User input. """ self.value = value val = self.input(field) if field == 'name': while True: if val != '': break print("Name cannot be empty.") val = self.input(field) elif field == 'priority': if val == '': # Use default priority return None while True: if val in Get.PRIORITIES.values(): break c, val = val, Get.PRIORITIES.get(val) if val: break print("Unrecognized priority number or name [{}].".format(c)) val = self.input(field) val = int(val) return val
Gets user input for given field and checks if it is valid. If input is invalid, it will ask the user to enter it again. Defaults values to empty or :value:. It does not check validity of parent index. It can only be tested further down the road, so for now accept anything. :field: Field name. :value: Default value to use for field. :returns: User input.
def _writeFASTA(self, i, image): """ Write a FASTA file containing the set of reads that hit a sequence. @param i: The number of the image in self._images. @param image: A member of self._images. @return: A C{str}, either 'fasta' or 'fastq' indicating the format of the reads in C{self._titlesAlignments}. """ if isinstance(self._titlesAlignments.readsAlignments.reads, FastqReads): format_ = 'fastq' else: format_ = 'fasta' filename = '%s/%d.%s' % (self._outputDir, i, format_) titleAlignments = self._titlesAlignments[image['title']] with open(filename, 'w') as fp: for titleAlignment in titleAlignments: fp.write(titleAlignment.read.toString(format_)) return format_
Write a FASTA file containing the set of reads that hit a sequence. @param i: The number of the image in self._images. @param image: A member of self._images. @return: A C{str}, either 'fasta' or 'fastq' indicating the format of the reads in C{self._titlesAlignments}.
def write_totals(self, file_path='', date=str(datetime.date.today()), organization='N/A', members=0, teams=0): """ Updates the total.csv file with current data. """ total_exists = os.path.isfile(file_path) with open(file_path, 'a') as out_total: if not total_exists: out_total.write('date,organization,repos,members,teams,' + 'unique_contributors,total_contributors,forks,' + 'stargazers,pull_requests,open_issues,has_readme,' + 'has_license,pull_requests_open,pull_requests_closed,' + 'commits,id,closed_issues,issues\n') self.delete_last_line(date=date, file_path=file_path) out_total.close() with open(file_path, 'r') as file_read: row_count = sum(1 for row in file_read) - 1 file_read.close() with open(file_path, 'a') as out_total: out_total.write(date + ',' + organization + ',' + str(self.total_repos) + ',' + str(members) + ',' + str(teams) + ',' + str(len(self.unique_contributors)) + ',' + str(self.total_contributors) + ',' + str(self.total_forks) + ',' + str(self.total_stars) + ',' + str(self.total_pull_reqs) + ',' + str(self.total_open_issues) + ',' + str(self.total_readmes) + ',' + str(self.total_licenses) + ',' + str(self.total_pull_reqs_open) + ',' + str(self.total_pull_reqs_closed) + ',' + str(self.total_commits) + ',' + str(row_count) + ',' + str(self.total_closed_issues) + ',' + str(self.total_issues) + '\n') out_total.close()
Updates the total.csv file with current data.
def tiles(self) -> np.array: """An array of this consoles tile data. This acts as a combination of the `ch`, `fg`, and `bg` attributes. Colors include an alpha channel but how alpha works is currently undefined. Example:: >>> con = tcod.console.Console(10, 2, order="F") >>> con.tiles[0, 0] = ( ... ord("X"), ... (*tcod.white, 255), ... (*tcod.black, 255), ... ) >>> con.tiles[0, 0] (88, [255, 255, 255, 255], [ 0, 0, 0, 255]) .. versionadded:: 10.0 """ return self._tiles.T if self._order == "F" else self._tiles
An array of this consoles tile data. This acts as a combination of the `ch`, `fg`, and `bg` attributes. Colors include an alpha channel but how alpha works is currently undefined. Example:: >>> con = tcod.console.Console(10, 2, order="F") >>> con.tiles[0, 0] = ( ... ord("X"), ... (*tcod.white, 255), ... (*tcod.black, 255), ... ) >>> con.tiles[0, 0] (88, [255, 255, 255, 255], [ 0, 0, 0, 255]) .. versionadded:: 10.0
def Convert(self, metadata, checkresult, token=None): """Converts a single CheckResult. Args: metadata: ExportedMetadata to be used for conversion. checkresult: CheckResult to be converted. token: Security token. Yields: Resulting ExportedCheckResult. Empty list is a valid result and means that conversion wasn't possible. """ if checkresult.HasField("anomaly"): for anomaly in checkresult.anomaly: exported_anomaly = ExportedAnomaly( type=anomaly.type, severity=anomaly.severity, confidence=anomaly.confidence) if anomaly.symptom: exported_anomaly.symptom = anomaly.symptom if anomaly.explanation: exported_anomaly.explanation = anomaly.explanation if anomaly.generated_by: exported_anomaly.generated_by = anomaly.generated_by if anomaly.anomaly_reference_id: exported_anomaly.anomaly_reference_id = "\n".join( anomaly.anomaly_reference_id) if anomaly.finding: exported_anomaly.finding = "\n".join(anomaly.finding) yield ExportedCheckResult( metadata=metadata, check_id=checkresult.check_id, anomaly=exported_anomaly) else: yield ExportedCheckResult( metadata=metadata, check_id=checkresult.check_id)
Converts a single CheckResult. Args: metadata: ExportedMetadata to be used for conversion. checkresult: CheckResult to be converted. token: Security token. Yields: Resulting ExportedCheckResult. Empty list is a valid result and means that conversion wasn't possible.
def select_waveform_generator(approximant): """Returns the single-IFO generator for the approximant. Parameters ---------- approximant : str Name of waveform approximant. Valid names can be found using ``pycbc.waveform`` methods. Returns ------- generator : (PyCBC generator instance) A waveform generator object. Examples -------- Get a list of available approximants: >>> from pycbc import waveform >>> waveform.fd_approximants() >>> waveform.td_approximants() >>> from pycbc.waveform import ringdown >>> ringdown.ringdown_fd_approximants.keys() Get generator object: >>> from pycbc.waveform.generator import select_waveform_generator >>> select_waveform_generator(waveform.fd_approximants()[0]) """ # check if frequency-domain CBC waveform if approximant in waveform.fd_approximants(): return FDomainCBCGenerator # check if time-domain CBC waveform elif approximant in waveform.td_approximants(): return TDomainCBCGenerator # check if frequency-domain ringdown waveform elif approximant in ringdown.ringdown_fd_approximants: if approximant == 'FdQNMfromFinalMassSpin': return FDomainMassSpinRingdownGenerator elif approximant == 'FdQNMfromFreqTau': return FDomainFreqTauRingdownGenerator elif approximant in ringdown.ringdown_td_approximants: if approximant == 'TdQNMfromFinalMassSpin': return TDomainMassSpinRingdownGenerator elif approximant == 'TdQNMfromFreqTau': return TDomainFreqTauRingdownGenerator # otherwise waveform approximant is not supported else: raise ValueError("%s is not a valid approximant." % approximant)
Returns the single-IFO generator for the approximant. Parameters ---------- approximant : str Name of waveform approximant. Valid names can be found using ``pycbc.waveform`` methods. Returns ------- generator : (PyCBC generator instance) A waveform generator object. Examples -------- Get a list of available approximants: >>> from pycbc import waveform >>> waveform.fd_approximants() >>> waveform.td_approximants() >>> from pycbc.waveform import ringdown >>> ringdown.ringdown_fd_approximants.keys() Get generator object: >>> from pycbc.waveform.generator import select_waveform_generator >>> select_waveform_generator(waveform.fd_approximants()[0])
def lat(self): """Latitude of grid centers (degrees North) :getter: Returns the points of axis ``'lat'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lat'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislat = dom.axes['lat'].points except: pass return thislat except: raise ValueError('Can\'t resolve a lat axis.')
Latitude of grid centers (degrees North) :getter: Returns the points of axis ``'lat'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lat'`` axis can be found.
def show(self, temp_file_name = 'ani.mp4', **kwargs): """ ## Arguments: - 'args' and 'kwargs' will be passed to 'self.save()' """ ## [NOTE] Make this method as a method of base class. ## [NOTE] This should be modified to prevent erasing other existing file with the same name. assert type(temp_file_name) is str self.save(temp_file_name, **kwargs) ## [NOTE] Consider removing the temp file. ## [NOTE] Implement automatic showing. return HTML(""" <video width="%d" height="%d" controls> <source src="%s" type="video/mp4"> </video> """ % (640, 300, temp_file_name))
## Arguments: - 'args' and 'kwargs' will be passed to 'self.save()'
def angle(self, deg=False): """Return the angle of a complex Timeseries Args: deg (bool, optional): Return angle in degrees if True, radians if False (default). Returns: angle (Timeseries): The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. """ if self.dtype.str[1] != 'c': warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1) da = distob.vectorize(np.angle)(self, deg) return _dts_from_da(da, self.tspan, self.labels)
Return the angle of a complex Timeseries Args: deg (bool, optional): Return angle in degrees if True, radians if False (default). Returns: angle (Timeseries): The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64.
def get_proficiency_form_for_create(self, objective_id, resource_id, proficiency_record_types): """Gets the proficiency form for creating new proficiencies. A new form should be requested for each create transaction. arg: objective_id (osid.id.Id): the ``Id`` of the ``Objective`` arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource`` arg: proficiency_record_types (osid.type.Type[]): array of proficiency record types return: (osid.learning.ProficiencyForm) - the proficiency form raise: NotFound - ``objective_id`` or ``resource_id`` is not found raise: NullArgument - ``objective_id, resource_id,`` or ``proficieny_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.RelationshipAdminSession.get_relationship_form_for_create # These really need to be in module imports: from dlkit.abstract_osid.id.primitives import Id as ABCId from dlkit.abstract_osid.type.primitives import Type as ABCType if not isinstance(objective_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') if not isinstance(resource_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in proficiency_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if proficiency_record_types == []: # WHY are we passing objective_bank_id = self._catalog_id below, seems redundant: obj_form = objects.ProficiencyForm( objective_bank_id=self._catalog_id, objective_id=objective_id, resource_id=resource_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.ProficiencyForm( objective_bank_id=self._catalog_id, record_types=proficiency_record_types, objective_id=objective_id, resource_id=resource_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
Gets the proficiency form for creating new proficiencies. A new form should be requested for each create transaction. arg: objective_id (osid.id.Id): the ``Id`` of the ``Objective`` arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource`` arg: proficiency_record_types (osid.type.Type[]): array of proficiency record types return: (osid.learning.ProficiencyForm) - the proficiency form raise: NotFound - ``objective_id`` or ``resource_id`` is not found raise: NullArgument - ``objective_id, resource_id,`` or ``proficieny_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
def AddFXrefRead(self, method, classobj, field): """ Add a Field Read to this class :param method: :param classobj: :param field: :return: """ if field not in self._fields: self._fields[field] = FieldClassAnalysis(field) self._fields[field].AddXrefRead(classobj, method)
Add a Field Read to this class :param method: :param classobj: :param field: :return:
def get_book_progress(self, asin): """Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`. """ kbp = self._get_api_call('get_book_progress', '"%s"' % asin) return KindleCloudReaderAPI._kbp_to_progress(kbp)
Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`.
def collapse(cls, holomap, ranges=None, mode='data'): """ Given a map of Overlays, apply all applicable compositors. """ # No potential compositors if cls.definitions == []: return holomap # Apply compositors clone = holomap.clone(shared_data=False) data = zip(ranges[1], holomap.data.values()) if ranges else holomap.data.items() for key, overlay in data: clone[key] = cls.collapse_element(overlay, ranges, mode) return clone
Given a map of Overlays, apply all applicable compositors.
def claim_invitations(user): """Claims any pending invitations for the given user's email address.""" # See if there are any build invitations present for the user with this # email address. If so, replace all those invitations with the real user. invitation_user_id = '%s:%s' % ( models.User.EMAIL_INVITATION, user.email_address) invitation_user = models.User.query.get(invitation_user_id) if invitation_user: invited_build_list = list(invitation_user.builds) if not invited_build_list: return db.session.add(user) logging.debug('Found %d build admin invitations for id=%r, user=%r', len(invited_build_list), invitation_user_id, user) for build in invited_build_list: build.owners.remove(invitation_user) if not build.is_owned_by(user.id): build.owners.append(user) logging.debug('Claiming invitation for build_id=%r', build.id) save_admin_log(build, invite_accepted=True) else: logging.debug('User already owner of build. ' 'id=%r, build_id=%r', user.id, build.id) db.session.add(build) db.session.delete(invitation_user) db.session.commit() # Re-add the user to the current session so we can query with it. db.session.add(current_user)
Claims any pending invitations for the given user's email address.
def get_ascii(self, show_internal=True, compact=False, attributes=None): """ Returns a string containing an ascii drawing of the tree. Parameters: ----------- show_internal: include internal edge names. compact: use exactly one line per tip. attributes: A list of node attributes to shown in the ASCII representation. """ (lines, mid) = self._asciiArt(show_internal=show_internal, compact=compact, attributes=attributes) return '\n'+'\n'.join(lines)
Returns a string containing an ascii drawing of the tree. Parameters: ----------- show_internal: include internal edge names. compact: use exactly one line per tip. attributes: A list of node attributes to shown in the ASCII representation.
def log2_lut(v): """ See `this algo <https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup>`__ for computing the log2 of a 32 bit integer using a look up table Parameters ---------- v : int 32 bit integer Returns ------- """ res = np.zeros(v.shape, dtype=np.int32) tt = v >> 16 tt_zero = (tt == 0) tt_not_zero = ~tt_zero t_h = tt >> 8 t_zero_h = (t_h == 0) & tt_not_zero t_not_zero_h = ~t_zero_h & tt_not_zero res[t_zero_h] = LogTable256[tt[t_zero_h]] + 16 res[t_not_zero_h] = LogTable256[t_h[t_not_zero_h]] + 24 t_l = v >> 8 t_zero_l = (t_l == 0) & tt_zero t_not_zero_l = ~t_zero_l & tt_zero res[t_zero_l] = LogTable256[v[t_zero_l]] res[t_not_zero_l] = LogTable256[t_l[t_not_zero_l]] + 8 return res
See `this algo <https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup>`__ for computing the log2 of a 32 bit integer using a look up table Parameters ---------- v : int 32 bit integer Returns -------
def main(): """ Main function of this example. """ parser = argparse.ArgumentParser() parser.add_argument( '--digest', default="md5", help="Digest to use", choices=sorted( getattr(hashlib, 'algorithms', None) or hashlib.algorithms_available)) parser.add_argument( '--url', default="http://example.org", help="URL to load") parser.add_argument( '-o', '--output', type=argparse.FileType('wb'), metavar='FILE', default='-', help="Where to write the retrieved conentent") opts = parser.parse_args() request = requestlib.Request(opts.url) reader = requestlib.urlopen(request) stream = written_hash_proxy( opts.output.buffer if hasattr(opts.output, 'buffer') else opts.output, name=opts.digest) for chunk in reader: stream.write(chunk) stream.flush() print("{} of {} is {}".format( proxy.state(stream).digest.name, opts.url, proxy.state(stream).digest.hexdigest()))
Main function of this example.
def open_ioc(fn): """ Opens an IOC file, or XML string. Returns the root element, top level indicator element, and parameters element. If the IOC or string fails to parse, an IOCParseError is raised. This is a helper function used by __init__. :param fn: This is a path to a file to open, or a string containing XML representing an IOC. :return: a tuple containing three elementTree Element objects The first element, the root, contains the entire IOC itself. The second element, the top level OR indicator, allows the user to add additional IndicatorItem or Indicator nodes to the IOC easily. The third element, the parameters node, allows the user to quickly parse the parameters. """ parsed_xml = xmlutils.read_xml_no_ns(fn) if not parsed_xml: raise IOCParseError('Error occured parsing XML') root = parsed_xml.getroot() metadata_node = root.find('metadata') top_level_indicator = get_top_level_indicator_node(root) parameters_node = root.find('parameters') if parameters_node is None: # parameters node is not required by schema; but we add it if it is not present parameters_node = ioc_et.make_parameters_node() root.append(parameters_node) return root, metadata_node, top_level_indicator, parameters_node
Opens an IOC file, or XML string. Returns the root element, top level indicator element, and parameters element. If the IOC or string fails to parse, an IOCParseError is raised. This is a helper function used by __init__. :param fn: This is a path to a file to open, or a string containing XML representing an IOC. :return: a tuple containing three elementTree Element objects The first element, the root, contains the entire IOC itself. The second element, the top level OR indicator, allows the user to add additional IndicatorItem or Indicator nodes to the IOC easily. The third element, the parameters node, allows the user to quickly parse the parameters.
def poll(self): """ Poll agents for data """ start_time = time.time() for agent in self.agents: for collect in agent.reader: # don't crush if trash or traceback came from agent to stdout if not collect: return 0 for chunk in collect: ts, prepared_results = chunk if self.load_start_time and int( ts) >= self.load_start_time: ready_to_send = { "timestamp": int(ts), "data": { self.hash_hostname(agent.host): { "comment": agent.config.comment, "metrics": prepared_results } } } self.__collected_data.append(ready_to_send) logger.debug( 'Polling/decoding agents data took: %.2fms', (time.time() - start_time) * 1000) collected_data_length = len(self.__collected_data) if not self.first_data_received and self.__collected_data: self.first_data_received = True logger.info("Monitoring received first data.") else: self.send_collected_data() return collected_data_length
Poll agents for data
def convert_to_python(self, xmlrpc=None): """ Extracts a value for the field from an XML-RPC response. """ if xmlrpc: return xmlrpc.get(self.name, self.default) elif self.default: return self.default else: return None
Extracts a value for the field from an XML-RPC response.
def rsdl(self): """Compute fixed point residual in Fourier domain.""" diff = self.Xf - self.Yfprv return sl.rfl2norm2(diff, self.X.shape, axis=self.cri.axisN)
Compute fixed point residual in Fourier domain.
def names2dnsrepr(x): """ Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!! """ if type(x) is str: if x and x[-1] == '\x00': # stupid heuristic return x.encode('ascii') x = [x.encode('ascii')] elif type(x) is bytes: if x and x[-1] == 0: return x x = [x] res = [] for n in x: if type(n) is str: n = n.encode('ascii') termin = b"\x00" if n.count(b'.') == 0: # single-component gets one more termin += bytes([0]) n = b"".join(map(lambda y: chr(len(y)).encode('ascii')+y, n.split(b"."))) + termin res.append(n) return b"".join(res)
Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!!
def config_md5(self, source_config): """Compute MD5 hash of file.""" file_contents = source_config + "\n" # Cisco IOS automatically adds this file_contents = file_contents.encode("UTF-8") return hashlib.md5(file_contents).hexdigest()
Compute MD5 hash of file.
def _get_kind(self, limit): """ Get a set of dominant file types. The files must contribute at least C{limit}% to the item's total size. """ histo = self.fetch("custom_kind") if histo: # Parse histogram from cached field histo = [i.split("%_") for i in str(histo).split()] histo = [(int(val, 10), ext) for val, ext in histo] ##self._engine.LOG.debug("~~~~~~~~~~ cached histo = %r" % histo) else: # Get filetypes histo = traits.get_filetypes(self.fetch("files"), path=operator.attrgetter("path"), size=operator.attrgetter("size")) # Set custom cache field with value formatted like "80%_flac 20%_jpg" (sorted by percentage) histo_str = ' '.join(("%d%%_%s" % i).replace(' ', '_') for i in histo) self._make_it_so("setting kind cache %r on" % (histo_str,), ["custom.set"], "kind", histo_str) self._fields["custom_kind"] = histo_str # Return all non-empty extensions that make up at least <limit>% of total size return set(ext for val, ext in histo if ext and val >= limit)
Get a set of dominant file types. The files must contribute at least C{limit}% to the item's total size.
def tracker_index(): """Get tracker overview.""" stats = server.stats if stats and stats.snapshots: stats.annotate() timeseries = [] for cls in stats.tracked_classes: series = [] for snapshot in stats.snapshots: series.append(snapshot.classes.get(cls, {}).get('sum', 0)) timeseries.append((cls, series)) series = [s.overhead for s in stats.snapshots] timeseries.append(("Profiling overhead", series)) if stats.snapshots[0].system_total.data_segment: # Assume tracked data resides in the data segment series = [s.system_total.data_segment - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Data segment", series)) series = [s.system_total.code_segment for s in stats.snapshots] timeseries.append(("Code segment", series)) series = [s.system_total.stack_segment for s in stats.snapshots] timeseries.append(("Stack segment", series)) series = [s.system_total.shared_segment for s in stats.snapshots] timeseries.append(("Shared memory", series)) else: series = [s.total - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Other", series)) return dict(snapshots=stats.snapshots, timeseries=timeseries) else: return dict(snapshots=[])
Get tracker overview.
def configure_gateway( cls, launch_jvm: bool = True, gateway: Union[GatewayParameters, Dict[str, Any]] = None, callback_server: Union[CallbackServerParameters, Dict[str, Any]] = False, javaopts: Iterable[str] = (), classpath: Iterable[str] = ''): """ Configure a Py4J gateway. :param launch_jvm: ``True`` to spawn a Java Virtual Machine in a subprocess and connect to it, ``False`` to connect to an existing Py4J enabled JVM :param gateway: either a :class:`~py4j.java_gateway.GatewayParameters` object or a dictionary of keyword arguments for it :param callback_server: callback server parameters or a boolean indicating if a callback server is wanted :param javaopts: options passed to Java itself :param classpath: path or iterable of paths to pass to the JVM launcher as the class path """ assert check_argument_types() classpath = classpath if isinstance(classpath, str) else os.pathsep.join(classpath) javaopts = list(javaopts) # Substitute package names with their absolute directory paths for match in package_re.finditer(classpath): pkgname = match.group(1) module = import_module(pkgname) module_dir = os.path.dirname(module.__file__) classpath = classpath.replace(match.group(0), module_dir) if gateway is None: gateway = {} if isinstance(gateway, dict): gateway.setdefault('eager_load', True) gateway.setdefault('auto_convert', True) gateway = GatewayParameters(**gateway) if isinstance(callback_server, dict): callback_server = CallbackServerParameters(**callback_server) elif callback_server is True: callback_server = CallbackServerParameters() return launch_jvm, gateway, callback_server, classpath, javaopts
Configure a Py4J gateway. :param launch_jvm: ``True`` to spawn a Java Virtual Machine in a subprocess and connect to it, ``False`` to connect to an existing Py4J enabled JVM :param gateway: either a :class:`~py4j.java_gateway.GatewayParameters` object or a dictionary of keyword arguments for it :param callback_server: callback server parameters or a boolean indicating if a callback server is wanted :param javaopts: options passed to Java itself :param classpath: path or iterable of paths to pass to the JVM launcher as the class path
def avl_join_dir_recursive(t1, t2, node, direction): """ Recursive version of join_left and join_right TODO: make this iterative using a stack """ other_side = 1 - direction if _DEBUG_JOIN_DIR: print('--JOIN DIR (dir=%r) --' % (direction,)) ascii_tree(t1, 't1') ascii_tree(t2, 't2') if direction == 0: large, small = t2, t1 elif direction == 1: large, small = t1, t2 else: assert False # Follow the spine of the larger tree spine = large[direction] rest = large[other_side] # k_, v_ = large.key, large.value hsmall = height(small) hspine = height(spine) hrest = height(rest) if _DEBUG_JOIN_DIR: ascii_tree(spine, 'spine') ascii_tree(rest, 'rest') ascii_tree(small, 'small') if hspine <= hsmall + 1: t_ = avl_new_top(small, spine, node, direction) if _DEBUG_JOIN_DIR: print('JOIN DIR (BASE)') ascii_tree(t_, 't_') if height(t_) <= hrest + 1: if _DEBUG_JOIN_DIR: print('JOIN DIR (Case 1)') return avl_new_top(t_, rest, large, direction) else: # Double rotation, but with a new node if _DEBUG_JOIN_DIR: print('JOIN DIR (Case 2)') t_rotate = avl_rotate_single(t_, direction) if _DEBUG_JOIN_DIR: ascii_tree(t_rotate, 't_rotate') EulerTourTree(root=t_rotate)._assert_nodes('t_rotate') t_merge = avl_new_top(rest, t_rotate, large, other_side) if _DEBUG_JOIN_DIR: ascii_tree(t_merge, 't_merge') EulerTourTree(root=t_merge)._assert_nodes('t_merge') new_root = avl_rotate_single(t_merge, other_side) if _DEBUG_JOIN_DIR: ascii_tree(new_root, 'new_root') EulerTourTree(root=new_root)._assert_nodes('new_root') return new_root else: # Traverse down the spine in the appropriate direction if _DEBUG_JOIN_DIR: print('JOIN DIR (RECURSE)') if direction == 0: t_ = avl_join_dir_recursive(small, spine, node, direction) elif direction == 1: t_ = avl_join_dir_recursive(spine, t2, node, direction) else: raise AssertionError('invalid direction') t__ = avl_new_top(t_, rest, large, direction) if height(t_) <= hrest + 1: if _DEBUG_JOIN_DIR: print('JOIN DIR (Case 3)') return t__ else: if _DEBUG_JOIN_DIR: print('JOIN DIR (Case 4)') return avl_rotate_single(t__, other_side) assert False, 'should never get here'
Recursive version of join_left and join_right TODO: make this iterative using a stack
def popen(fn, *args, **kwargs) -> subprocess.Popen: """ Please ensure you're not killing the process before it had started properly :param fn: :param args: :param kwargs: :return: """ args = popen_encode(fn, *args, **kwargs) logging.getLogger(__name__).debug('Start %s', args) p = subprocess.Popen(args) return p
Please ensure you're not killing the process before it had started properly :param fn: :param args: :param kwargs: :return:
def get_tunnel_info_input_filter_type_filter_by_dip_dest_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info input = ET.SubElement(get_tunnel_info, "input") filter_type = ET.SubElement(input, "filter-type") filter_by_dip = ET.SubElement(filter_type, "filter-by-dip") dest_ip = ET.SubElement(filter_by_dip, "dest-ip") dest_ip.text = kwargs.pop('dest_ip') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def delete(self): """ Deletes the resource. """ return self._client._delete( self.__class__.base_url( self.sys['space'].id, self.sys['id'], environment_id=self._environment_id ) )
Deletes the resource.
def bsinPoints(pb, pe): """Return Bezier control points, when pb and pe stand for a full period from (0,0) to (2*pi, 0), respectively, in the user's coordinate system. The returned points can be used to draw up to four Bezier curves for the complete phase of the sine function graph (0 to 360 degrees). """ v = pe - pb assert v.y == 0, "begin and end points must have same y coordinate" f = abs(v) * 0.5 / math.pi # represents the unit cp1 = 5.34295228e-01 cp2 = 1.01474288e+00 y_ampl = (0, f) y_cp1 = (0, f * cp1) y_cp2 = (0, f * cp2) p0 = pb p4 = pe p1 = pb + v * 0.25 - y_ampl p2 = pb + v * 0.5 p3 = pb + v * 0.75 + y_ampl k1 = pb + v * (1./12.) - y_cp1 k2 = pb + v * (2./12.) - y_cp2 k3 = pb + v * (4./12.) - y_cp2 k4 = pb + v * (5./12.) - y_cp1 k5 = pb + v * (7./12.) + y_cp1 k6 = pb + v * (8./12.) + y_cp2 k7 = pb + v * (10./12.) + y_cp2 k8 = pb + v * (11./12.) + y_cp1 return p0, k1, k2, p1, k3, k4, p2, k5, k6, p3, k7, k8, p4
Return Bezier control points, when pb and pe stand for a full period from (0,0) to (2*pi, 0), respectively, in the user's coordinate system. The returned points can be used to draw up to four Bezier curves for the complete phase of the sine function graph (0 to 360 degrees).
def _import_submodules( __all__, __path__, __name__, include=None, exclude=None, include_private_modules=False, require__all__=True, recursive=True): """ Import all available submodules, all objects defined in the `__all__` lists of those submodules, and extend `__all__` with the imported objects. Args: __all__ (list): The list of public objects in the "root" module __path__ (str): The path where the ``__init__.py`` file for the "root" module is located in the file system (every module has a global `__path__` variable which should be passed here) __name__ (str): The full name of the "root" module. Again, every module has a global `__name__` variable. include (list or None): If not None, list of full module names to be included. That is, every module not in the `include` list is ignored exclude (list or None): List of full module names to be excluded from the (recursive) input include_private_modules (bool): Whether to include modules whose name starts with an underscore recursive (bool): Whether to recursively act on submodules of the "root" module. This will make sub-submodules available both in the submodule, and in the "root" module """ mod = sys.modules[__name__] if exclude is None: exclude = [] for (_, submodname, ispkg) in pkgutil.iter_modules(path=__path__): if submodname.startswith('_') and not include_private_modules: continue submod = importlib.import_module('.' + submodname, __name__) if submod.__name__ in exclude: continue if include is not None: if submod.__name__ not in include: continue if not hasattr(submod, '__all__'): setattr(submod, '__all__', []) if recursive and ispkg: _import_submodules( submod.__all__, submod.__path__, submod.__name__) setattr(mod, submodname, submod) for obj_name in submod.__all__: obj = getattr(submod, obj_name) if hasattr(mod, obj_name): existing_obj = getattr(mod, obj_name) if existing_obj is obj: continue else: raise ImportError( "{mod}.{attr} points to {submod1}.{attr}. " "Cannot set to {submod2}.{attr}".format( mod=mod.__name__, attr=obj_name, submod1=existing_obj.__module__, submod2=obj.__module__)) setattr(mod, obj_name, obj) __all__.append(obj_name) __all__.sort()
Import all available submodules, all objects defined in the `__all__` lists of those submodules, and extend `__all__` with the imported objects. Args: __all__ (list): The list of public objects in the "root" module __path__ (str): The path where the ``__init__.py`` file for the "root" module is located in the file system (every module has a global `__path__` variable which should be passed here) __name__ (str): The full name of the "root" module. Again, every module has a global `__name__` variable. include (list or None): If not None, list of full module names to be included. That is, every module not in the `include` list is ignored exclude (list or None): List of full module names to be excluded from the (recursive) input include_private_modules (bool): Whether to include modules whose name starts with an underscore recursive (bool): Whether to recursively act on submodules of the "root" module. This will make sub-submodules available both in the submodule, and in the "root" module
def process_query(self): """Q.process_query() -- processes the user query, by tokenizing and stemming words. """ self.query = wt(self.query) self.processed_query = [] for word in self.query: if word not in self.stop_words and word not in self.punctuation: self.processed_query.append(self.stemmer.stem(word))
Q.process_query() -- processes the user query, by tokenizing and stemming words.
def destination(self, value): """ Set the destination of the message. :type value: tuple :param value: (ip, port) :raise AttributeError: if value is not a ip and a port. """ if value is not None and (not isinstance(value, tuple) or len(value)) != 2: raise AttributeError self._destination = value
Set the destination of the message. :type value: tuple :param value: (ip, port) :raise AttributeError: if value is not a ip and a port.
def get_order(self, order_id): """Lookup an order based on the order id returned from one of the order functions. Parameters ---------- order_id : str The unique identifier for the order. Returns ------- order : Order The order object. """ if order_id in self.blotter.orders: return self.blotter.orders[order_id].to_api_obj()
Lookup an order based on the order id returned from one of the order functions. Parameters ---------- order_id : str The unique identifier for the order. Returns ------- order : Order The order object.
def put(self, url, data=None, verify=False, headers=None, proxies=None, timeout=60, **kwargs): """Sends a PUT request. Refactor from requests module :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ self.log.debug("Put a request to %s with data: %s", url, data) response = requests.put(url, data=data, verify=verify, headers=headers, proxies=proxies, timeout=timeout, **kwargs) if response.status_code not in [200, 201]: self.log.error('Failed PUT request at <%s> with response: %s', url, response.content) response.raise_for_status() return response
Sends a PUT request. Refactor from requests module :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response
def list_flavors(self, limit=None, marker=None): """Returns a list of all available Flavors.""" return self._flavor_manager.list(limit=limit, marker=marker)
Returns a list of all available Flavors.
def response(uri, method, res, token='', keyword='', content='', raw_flag=False): """Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print """ if method == 'GET' or (method == 'PUT' and not token): # response body data = res.read() data_utf8 = data.decode('utf-8') if token: datas = json.loads(data_utf8) else: token = json.loads(data_utf8)['hash'] return token if keyword == 'serial': # filtering with keyword record = search_record(datas, 'SOA')[0] # if SOA record, remove priority unnecessary del record['priority'] # override ttl record['ttl'] = int(record['ttl']) c = JSONConverter(content['domain']) new_record = c.get_soa(record, content) return record, new_record elif keyword: # '--search' option of 'get' subcommand records = search_record(datas, keyword) datas.update({"records": records}) if uri.split('/')[3] == 'template': # 'tmpl_get' subcommand if len(uri.split('/')) == 5: # when specify template identfier #print_formatted(datas) utils.pretty_print(datas) else: # when get all templates for data in datas: #print_formatted(data) utils.pretty_print(datas) else: # 'get' subcommand if raw_flag: return datas else: #print_formatted(datas) if len(uri.split('zone/')) > 1: domain = uri.split('zone/')[1] else: domain = '' utils.pretty_print(datas, keyword, domain) else: # response non JSON data data = res.read() print(data)
Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print
def restart_listener(self, topics): '''Restart listener after configuration update. ''' if self.listener is not None: if self.listener.running: self.stop() self.__init__(topics=topics)
Restart listener after configuration update.
def is_valid_preview(preview): ''' Verifies that the preview is a valid filetype ''' if not preview: return False if mimetype(preview) not in [ExportMimeType.PNG, ExportMimeType.PDF]: return False return True
Verifies that the preview is a valid filetype
def delete_records(self, domain, name, record_type=None): """Deletes records by name. You can also add a record type, which will only delete records with the specified type/name combo. If no record type is specified, ALL records that have a matching name will be deleted. This is haphazard functionality. I DO NOT recommend using this in Production code, as your entire DNS record set could be deleted, depending on the fickleness of GoDaddy. Unfortunately, they do not expose a proper "delete record" call, so there isn't much one can do here... :param domain: the domain to delete records from :param name: the name of records to remove :param record_type: the type of records to remove :return: True if no exceptions occurred """ records = self.get_records(domain) if records is None: return False # we don't want to replace the records with nothing at all save = list() deleted = 0 for record in records: if (record_type == str(record['type']) or record_type is None) and name == str(record['name']): deleted += 1 else: save.append(record) self.replace_records(domain, records=save) self.logger.info("Deleted {} records @ {}".format(deleted, domain)) # If we didn't get any exceptions, return True to let the user know return True
Deletes records by name. You can also add a record type, which will only delete records with the specified type/name combo. If no record type is specified, ALL records that have a matching name will be deleted. This is haphazard functionality. I DO NOT recommend using this in Production code, as your entire DNS record set could be deleted, depending on the fickleness of GoDaddy. Unfortunately, they do not expose a proper "delete record" call, so there isn't much one can do here... :param domain: the domain to delete records from :param name: the name of records to remove :param record_type: the type of records to remove :return: True if no exceptions occurred
def occurrences_after(self, after=None): """ It is often useful to know what the next occurrence is given a list of events. This function produces a generator that yields the the most recent occurrence after the date ``after`` from any of the events in ``self.events`` """ from schedule.models import Occurrence if after is None: after = timezone.now() occ_replacer = OccurrenceReplacer( Occurrence.objects.filter(event__in=self.events)) generators = [event._occurrences_after_generator(after) for event in self.events] occurrences = [] for generator in generators: try: heapq.heappush(occurrences, (next(generator), generator)) except StopIteration: pass while occurrences: generator = occurrences[0][1] try: next_occurrence = heapq.heapreplace(occurrences, (next(generator), generator))[0] except StopIteration: next_occurrence = heapq.heappop(occurrences)[0] yield occ_replacer.get_occurrence(next_occurrence)
It is often useful to know what the next occurrence is given a list of events. This function produces a generator that yields the the most recent occurrence after the date ``after`` from any of the events in ``self.events``
def timer_expired(self): """ This method is invoked in context of the timer thread, so we cannot directly throw exceptions (we can, but they would be in the wrong thread), so instead we shut down the socket of the connection. When the timeout happens in early phases of the connection setup, there is no socket object on the HTTP connection yet, in that case we retry after the retry duration, indefinitely. So we do not guarantee in all cases that the overall operation times out after the specified timeout. """ if self._http_conn.sock is not None: self._shutdown = True self._http_conn.sock.shutdown(socket.SHUT_RDWR) else: # Retry after the retry duration self._timer.cancel() self._timer = threading.Timer(self._retrytime, HTTPTimeout.timer_expired, [self]) self._timer.start()
This method is invoked in context of the timer thread, so we cannot directly throw exceptions (we can, but they would be in the wrong thread), so instead we shut down the socket of the connection. When the timeout happens in early phases of the connection setup, there is no socket object on the HTTP connection yet, in that case we retry after the retry duration, indefinitely. So we do not guarantee in all cases that the overall operation times out after the specified timeout.
def make_order_string(cls, order_specification): """ Converts the given order specification to a CQL order expression. """ registry = get_current_registry() visitor_cls = registry.getUtility(IOrderSpecificationVisitor, name=EXPRESSION_KINDS.CQL) visitor = visitor_cls() order_specification.accept(visitor) return str(visitor.expression)
Converts the given order specification to a CQL order expression.
def write_contents(self, table, reader): """Write the contents of `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. - `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source. Returns None """ f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose) self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
Write the contents of `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. - `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source. Returns None
def support_autoupload_param_hostip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras") autoupload_param = ET.SubElement(support, "autoupload-param") hostip = ET.SubElement(autoupload_param, "hostip") hostip.text = kwargs.pop('hostip') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_template_by_name(name,**kwargs): """ Get a specific resource template, by name. """ try: tmpl_i = db.DBSession.query(Template).filter(Template.name == name).options(joinedload_all('templatetypes.typeattrs.default_dataset.metadata')).one() return tmpl_i except NoResultFound: log.info("%s is not a valid identifier for a template",name) raise HydraError('Template "%s" not found'%name)
Get a specific resource template, by name.
def request(self, path, method='GET', headers=None, **kwargs): """Perform a HTTP request. Given a relative Bugzilla URL path, an optional request method, and arguments suitable for requests.Request(), perform a HTTP request. """ headers = {} if headers is None else headers.copy() headers["User-Agent"] = "Bugsy" kwargs['headers'] = headers url = '%s/%s' % (self.bugzilla_url, path) return self._handle_errors(self.session.request(method, url, **kwargs))
Perform a HTTP request. Given a relative Bugzilla URL path, an optional request method, and arguments suitable for requests.Request(), perform a HTTP request.
def gpg_interactive_input(self, sub_keys_number): """ processes series of inputs normally supplied on --edit-key but passed through stdin this ensures that no other --edit-key command is actually passing through. """ deselect_sub_key = "key 0\n" _input = self._main_key_command() for sub_key_number in range(1, sub_keys_number + 1): _input += self._sub_key_command(sub_key_number) + deselect_sub_key return "%ssave\n" % _input
processes series of inputs normally supplied on --edit-key but passed through stdin this ensures that no other --edit-key command is actually passing through.
def importobj(modpath, attrname): """imports a module, then resolves the attrname on it""" module = __import__(modpath, None, None, ['__doc__']) if not attrname: return module retval = module names = attrname.split(".") for x in names: retval = getattr(retval, x) return retval
imports a module, then resolves the attrname on it
def _get_connection(self): """ Returns connection to sqlite db. Returns: connection to the sqlite db who stores mpr data. """ if getattr(self, '_connection', None): logger.debug('Connection to sqlite db already exists. Using existing one.') else: dsn = self._dsn if dsn == 'sqlite://': dsn = ':memory:' else: dsn = dsn.replace('sqlite:///', '') logger.debug( 'Creating new apsw connection.\n dsn: {}, config_dsn: {}' .format(dsn, self._dsn)) self._connection = apsw.Connection(dsn) return self._connection
Returns connection to sqlite db. Returns: connection to the sqlite db who stores mpr data.
def __wrap( self, method_name ): """ This method actually does the wrapping. When it's given a method to copy it returns that method with facilities to log the call so it can be repeated. :param str method_name: The name of the method precisely as it's called on the object to wrap. :rtype lambda function: """ return lambda *args, **kwargs: Facade( self.__cache( method_name, *args, **kwargs ), list(self.__exclusion_list) )
This method actually does the wrapping. When it's given a method to copy it returns that method with facilities to log the call so it can be repeated. :param str method_name: The name of the method precisely as it's called on the object to wrap. :rtype lambda function:
def check_input(prolog_file): ''' Check for illegal predicates (like reading/writing, opening sockets, etc). ''' if prolog_file == None: return for pred in illegal_predicates: if type(pred) == tuple: print_name = pred[1] pred = pred[0] else: print_name = pred if re.search(r'[^\w]' + pred + r'\s*[\(\)\:\.\,\;]+', prolog_file): raise Exception('Illegal predicate "%s" used in your input, aborting. If your own predicate clashes with a predefined YAP predicate, you must rename it.' % print_name)
Check for illegal predicates (like reading/writing, opening sockets, etc).
def _learnPhase1(self, activeColumns, readOnly=False): """ Compute the learning active state given the predicted state and the bottom-up input. :param activeColumns list of active bottom-ups :param readOnly True if being called from backtracking logic. This tells us not to increment any segment duty cycles or queue up any updates. :returns: True if the current input was sufficiently predicted, OR if we started over on startCells. False indicates that the current input was NOT predicted, well enough to consider it as "inSequence" This looks at: - @ref lrnActiveState['t-1'] - @ref lrnPredictedState['t-1'] This modifies: - @ref lrnActiveState['t'] - @ref lrnActiveState['t-1'] """ # Save previous active state and start out on a clean slate self.lrnActiveState['t'].fill(0) # For each column, turn on the predicted cell. There will always be at most # one predicted cell per column numUnpredictedColumns = 0 for c in activeColumns: predictingCells = numpy.where(self.lrnPredictedState['t-1'][c] == 1)[0] numPredictedCells = len(predictingCells) assert numPredictedCells <= 1 # If we have a predicted cell, turn it on. The segment's posActivation # count will have already been incremented by processSegmentUpdates if numPredictedCells == 1: i = predictingCells[0] self.lrnActiveState['t'][c, i] = 1 continue numUnpredictedColumns += 1 if readOnly: continue # If no predicted cell, pick the closest matching one to reinforce, or # if none exists, create a new segment on a cell in that column i, s, numActive = self._getBestMatchingCell( c, self.lrnActiveState['t-1'], self.minThreshold) if s is not None and s.isSequenceSegment(): if self.verbosity >= 4: print "Learn branch 0, found segment match. Learning on col=", c self.lrnActiveState['t'][c, i] = 1 segUpdate = self._getSegmentActiveSynapses( c, i, s, self.lrnActiveState['t-1'], newSynapses = True) s.totalActivations += 1 # This will update the permanences, posActivationsCount, and the # lastActiveIteration (age). trimSegment = self._adaptSegment(segUpdate) if trimSegment: self._trimSegmentsInCell(c, i, [s], minPermanence = 0.00001, minNumSyns = 0) # If no close match exists, create a new one else: # Choose a cell in this column to add a new segment to i = self._getCellForNewSegment(c) if (self.verbosity >= 4): print "Learn branch 1, no match. Learning on col=", c, print ", newCellIdxInCol=", i self.lrnActiveState['t'][c, i] = 1 segUpdate = self._getSegmentActiveSynapses( c, i, None, self.lrnActiveState['t-1'], newSynapses=True) segUpdate.sequenceSegment = True # Make it a sequence segment self._adaptSegment(segUpdate) # No need to check whether perm reached 0 # Determine if we are out of sequence or not and reset our PAM counter # if we are in sequence numBottomUpColumns = len(activeColumns) if numUnpredictedColumns < numBottomUpColumns / 2: return True # in sequence else: return False
Compute the learning active state given the predicted state and the bottom-up input. :param activeColumns list of active bottom-ups :param readOnly True if being called from backtracking logic. This tells us not to increment any segment duty cycles or queue up any updates. :returns: True if the current input was sufficiently predicted, OR if we started over on startCells. False indicates that the current input was NOT predicted, well enough to consider it as "inSequence" This looks at: - @ref lrnActiveState['t-1'] - @ref lrnPredictedState['t-1'] This modifies: - @ref lrnActiveState['t'] - @ref lrnActiveState['t-1']
def purge_old_user_tasks(): """ Delete any UserTaskStatus and UserTaskArtifact records older than ``settings.USER_TASKS_MAX_AGE``. Intended to be run as a scheduled task. """ limit = now() - settings.USER_TASKS_MAX_AGE # UserTaskArtifacts will also be removed via deletion cascading UserTaskStatus.objects.filter(created__lt=limit).delete()
Delete any UserTaskStatus and UserTaskArtifact records older than ``settings.USER_TASKS_MAX_AGE``. Intended to be run as a scheduled task.
def copy_ifcfg_file(source_interface, dest_interface): """Copies an existing ifcfg network script to another :param source_interface: String (e.g. 1) :param dest_interface: String (e.g. 0:0) :return: None :raises TypeError, OSError """ log = logging.getLogger(mod_logger + '.copy_ifcfg_file') # Validate args if not isinstance(source_interface, basestring): msg = 'source_interface argument must be a string' log.error(msg) raise TypeError(msg) if not isinstance(dest_interface, basestring): msg = 'dest_interface argument must be a string' log.error(msg) raise TypeError(msg) network_script = '/etc/sysconfig/network-scripts/ifcfg-eth' source_file = network_script + source_interface dest_file = network_script + dest_interface command = ['cp', '-f', source_file, dest_file] try: result = run_command(command) code = result['code'] except CommandError: _, ex, trace = sys.exc_info() msg = 'Unable to copy the ifcfg file from interface {s} to interface {d}\n{e}'.format( s=source_interface, d=dest_interface, e=str(ex)) raise OSError, msg, trace log.info('Copy command exited with code: {c}'.format(c=code)) if code != 0: msg = 'There was a problem copying file {s} file to {d}'.format(s=source, d=dest_file) log.error(msg) raise OSError(msg) # Updating the destination network script DEVICE property try: sed(file_path=dest_file, pattern='^DEVICE=.*', replace_str='DEVICE="eth{i}"'.format(i=dest_interface)) except CommandError: _, ex, trace = sys.exc_info() msg = 'Unable to update DEVICE in file: {d}\n{e}'.format( d=dest_file, e=str(ex)) log.error(msg) raise CommandError, msg, trace log.info('Successfully created file: {d}'.format(d=dest_file)) log.info('Restarting networking in 10 seconds to ensure the changes take effect...') time.sleep(10) retry_time = 10 max_retries = 10 for i in range(1, max_retries+2): if i > max_retries: msg = 'Unable to successfully start the networking service after {m} attempts'.format(m=max_retries) log.error(msg) raise OSError(msg) log.info('Attempting to restart the networking service, attempt #{i} of {m}'.format(i=i, m=max_retries)) try: service_network_restart() except CommandError: _, ex, trace = sys.exc_info() log.warn('Attempted unsuccessfully to restart networking on attempt #{i} of {m}, trying again in {t} ' 'seconds\n{e}'.format(i=i, m=max_retries, t=retry_time, e=str(ex))) time.sleep(retry_time) else: log.info('Successfully restarted networking') break log.info('Successfully configured interface: {d}'.format(d=dest_interface))
Copies an existing ifcfg network script to another :param source_interface: String (e.g. 1) :param dest_interface: String (e.g. 0:0) :return: None :raises TypeError, OSError
def fig_intro(params, ana_params, T=[800, 1000], fraction=0.05, rasterized=False): '''set up plot for introduction''' ana_params.set_PLOS_2column_fig_style(ratio=0.5) #load spike as database networkSim = CachedNetwork(**params.networkSimParams) if analysis_params.bw: networkSim.colors = phlp.get_colors(len(networkSim.X)) #set up figure and subplots fig = plt.figure() gs = gridspec.GridSpec(3, 4) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.5, hspace=0.) #network diagram ax0_1 = fig.add_subplot(gs[:, 0], frameon=False) ax0_1.set_title('point-neuron network', va='bottom') network_sketch(ax0_1, yscaling=1.3) ax0_1.xaxis.set_ticks([]) ax0_1.yaxis.set_ticks([]) phlp.annotate_subplot(ax0_1, ncols=4, nrows=1, letter='A', linear_offset=0.065) #network raster ax1 = fig.add_subplot(gs[:, 1], frameon=True) phlp.remove_axis_junk(ax1) phlp.annotate_subplot(ax1, ncols=4, nrows=1, letter='B', linear_offset=0.065) x, y = networkSim.get_xy(T, fraction=fraction) # networkSim.plot_raster(ax1, T, x, y, markersize=0.1, alpha=1.,legend=False, pop_names=True) networkSim.plot_raster(ax1, T, x, y, markersize=0.2, marker='_', alpha=1.,legend=False, pop_names=True, rasterized=rasterized) ax1.set_ylabel('') ax1.xaxis.set_major_locator(plt.MaxNLocator(4)) ax1.set_title('spiking activity', va='bottom') a = ax1.axis() ax1.vlines(x['TC'][0], a[2], a[3], 'k', lw=0.25) #population ax2 = fig.add_subplot(gs[:, 2], frameon=False) ax2.xaxis.set_ticks([]) ax2.yaxis.set_ticks([]) plot_population(ax2, params, isometricangle=np.pi/24, plot_somas=False, plot_morphos=True, num_unitsE=1, num_unitsI=1, clip_dendrites=True, main_pops=True, title='', rasterized=rasterized) ax2.set_title('multicompartment\nneurons', va='bottom', fontweight='normal') phlp.annotate_subplot(ax2, ncols=4, nrows=1, letter='C', linear_offset=0.065) #LFP traces in all channels ax3 = fig.add_subplot(gs[:, 3], frameon=True) phlp.remove_axis_junk(ax3) plot_signal_sum(ax3, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', vlimround=0.8, T=T, ylim=[ax2.axis()[2], ax2.axis()[3]], rasterized=False) ax3.set_title('LFP', va='bottom') ax3.xaxis.set_major_locator(plt.MaxNLocator(4)) phlp.annotate_subplot(ax3, ncols=4, nrows=1, letter='D', linear_offset=0.065) a = ax3.axis() ax3.vlines(x['TC'][0], a[2], a[3], 'k', lw=0.25) #draw some arrows: ax = plt.gca() ax.annotate("", xy=(0.27, 0.5), xytext=(.24, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) ax.annotate("", xy=(0.52, 0.5), xytext=(.49, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) ax.annotate("", xy=(0.78, 0.5), xytext=(.75, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) return fig
set up plot for introduction
def log_queries(recipe): """ Logs recipe instance SQL queries (actually, only time). """ logger.debug( '⚐ Badge %s: SQL queries time %.2f second(s)', recipe.slug, sum([float(q['time']) for q in connection.queries]))
Logs recipe instance SQL queries (actually, only time).
def move_part_instance(part_instance, target_parent, part_model, name=None, include_children=True): """ Move the `Part` instance to target parent and updates the properties based on the original part instance. .. versionadded:: 2.3 :param part_instance: `Part` object to be moved :type part_instance: :class:`Part` :param part_model: `Part` object representing the model of part_instance :type part_model: :class: `Part` :param target_parent: `Part` object under which the desired `Part` is moved :type target_parent: :class:`Part` :param name: how the moved top-level `Part` should be called :type name: basestring :param include_children: True to move also the descendants of `Part`. If False, the children will be lost. :type include_children: bool :return: moved :class: `Part` instance """ # If no specific name has been required, then call in as Clone of the part_instance. if not name: name = part_instance.name # Retrieve the model of the future part to be created moved_model = get_mapping_dictionary()[part_model.id] # Now act based on multiplicity if moved_model.multiplicity == Multiplicity.ONE: # If multiplicity is 'Exactly 1', that means the instance was automatically created with the model, so just # retrieve it, map the original instance with the moved one and update the name and property values. moved_instance = moved_model.instances(parent_id=target_parent.id)[0] map_property_instances(part_instance, moved_instance) moved_instance = update_part_with_properties(part_instance, moved_instance, name=str(name)) elif moved_model.multiplicity == Multiplicity.ONE_MANY: # If multiplicity is '1 or more', that means one instance has automatically been created with the model, so # retrieve it, map the original instance with the moved one and update the name and property values. Store # the model in a list, in case there are multiple instance those need to be recreated. if target_parent.id not in get_edited_one_many(): moved_instance = moved_model.instances(parent_id=target_parent.id)[0] map_property_instances(part_instance, moved_instance) moved_instance = update_part_with_properties(part_instance, moved_instance, name=str(name)) get_edited_one_many().append(target_parent.id) else: moved_instance = target_parent.add(name=part_instance.name, model=moved_model, suppress_kevents=True) map_property_instances(part_instance, moved_instance) moved_instance = update_part_with_properties(part_instance, moved_instance, name=str(name)) else: # If multiplicity is '0 or more' or '0 or 1', it means no instance has been created automatically with the # model, so then everything must be created and then updated. moved_instance = target_parent.add(name=name, model=moved_model, suppress_kevents=True) map_property_instances(part_instance, moved_instance) moved_instance = update_part_with_properties(part_instance, moved_instance, name=str(name)) # If include_children is True, then recursively call this function for every descendant. Keep the name of the # original sub-instance. if include_children: for sub_instance in part_instance._cached_children: move_part_instance(part_instance=sub_instance, target_parent=moved_instance, part_model=sub_instance.model(), name=sub_instance.name, include_children=True) return moved_instance
Move the `Part` instance to target parent and updates the properties based on the original part instance. .. versionadded:: 2.3 :param part_instance: `Part` object to be moved :type part_instance: :class:`Part` :param part_model: `Part` object representing the model of part_instance :type part_model: :class: `Part` :param target_parent: `Part` object under which the desired `Part` is moved :type target_parent: :class:`Part` :param name: how the moved top-level `Part` should be called :type name: basestring :param include_children: True to move also the descendants of `Part`. If False, the children will be lost. :type include_children: bool :return: moved :class: `Part` instance
def replace_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, free_shipping_promotion, **kwargs): """Replace FreeShippingPromotion Replace all attributes of FreeShippingPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_free_shipping_promotion_by_id(free_shipping_promotion_id, free_shipping_promotion, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to replace (required) :param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to replace (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, free_shipping_promotion, **kwargs) else: (data) = cls._replace_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, free_shipping_promotion, **kwargs) return data
Replace FreeShippingPromotion Replace all attributes of FreeShippingPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_free_shipping_promotion_by_id(free_shipping_promotion_id, free_shipping_promotion, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to replace (required) :param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to replace (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread.
def FindSolFile(shot=0, t=0, Dt=None, Mesh='Rough1', Deg=2, Deriv='D2N2', Sep=True, Pos=True, OutPath='/afs/ipp-garching.mpg.de/home/d/didiv/Python/tofu/src/Outputs_AUG/'): """ Identify the good Sol2D saved file in a given folder (OutPath), based on key ToFu criteria When trying to load a Sol2D object (i.e.: solution of a tomographic inversion), it may be handy to provide the key parameters (shot, time, mesh name, degree of basis functions, regularisation functional) instead of copy-pasting the full file name. This function identifies, within the relevant repository (OutPath), the files matching the provided criteria. This function only works of the automatically generated default SaveName was preserved for the Sol2D objects. Parameters ---------- shot : int A shot number t : None / int / float A time value that must be contained in the time interval of the Sol2D file, must be provided if Dt is None Dt : None / iterable A time interval that the Sol2D file has to match, must be provided if t is None Mesh : str The name of the mesh that was used to compute the inversion Deg : int The of the b-splines (LBF2D object) that were used to discretize the solution Deriv : str The flag indicating the regularization functional that was used for the inversion Sep : bool The flag value that was used for indicating whether the boundary constraint at the separatrix should be considered Pos : bool The flag value that was used for indicating whether the positivity constraint was considered Outpath : str The absolute path of the repository where to look Returns ------- out : None / str The matching file name, if any """ assert None in [t,Dt] and not (t is None and Dt is None), "Arg t or Dt must be None, but not both !" LF = [ff for ff in os.listdir(OutPath) if 'TFI_Sol2D_AUG_SXR' in ff] LF = [ff for ff in LF if all([ss in ff for ss in ['_'+str(shot)+'_', '_'+Mesh+'_D'+str(Deg), '_Deriv'+Deriv+'_Sep'+str(Sep)+'_Pos'+str(Pos)]])] if len(LF)==0: print("No matching Sol2D file in ", OutPath) out = None LDTstr = [ff[ff.index('_Dt')+3:ff.index('s_')] for ff in LF] LDTstr = [(ss[:7],ss[8:]) for ss in LDTstr] if t is None: LF = [LF[ii] for ii in range(0,len(LF)) if LDTstr[ii][0]+'-'+LDTstr[ii][1]=='{0:07.4f}-{1:07.4f}'.format(Dt[0],Dt[1])] elif Dt is None: LF = [LF[ii] for ii in range(0,len(LF)) if t>=float(LDTstr[ii][0]) and t<=float(LDTstr[ii][1])] if len(LF)==0: print("No matching Sol2D file in ", OutPath) out = None elif len(LF)>1: print("Several matching Sol2D files in ", OutPath) print(LF) out = None else: out = LF[0] return out
Identify the good Sol2D saved file in a given folder (OutPath), based on key ToFu criteria When trying to load a Sol2D object (i.e.: solution of a tomographic inversion), it may be handy to provide the key parameters (shot, time, mesh name, degree of basis functions, regularisation functional) instead of copy-pasting the full file name. This function identifies, within the relevant repository (OutPath), the files matching the provided criteria. This function only works of the automatically generated default SaveName was preserved for the Sol2D objects. Parameters ---------- shot : int A shot number t : None / int / float A time value that must be contained in the time interval of the Sol2D file, must be provided if Dt is None Dt : None / iterable A time interval that the Sol2D file has to match, must be provided if t is None Mesh : str The name of the mesh that was used to compute the inversion Deg : int The of the b-splines (LBF2D object) that were used to discretize the solution Deriv : str The flag indicating the regularization functional that was used for the inversion Sep : bool The flag value that was used for indicating whether the boundary constraint at the separatrix should be considered Pos : bool The flag value that was used for indicating whether the positivity constraint was considered Outpath : str The absolute path of the repository where to look Returns ------- out : None / str The matching file name, if any
def invert_inventory(inventory): """Return {item: binding} from {binding: item} Protect against items with additional metadata and items whose type is a number Returns: Dictionary of inverted inventory """ inverted = dict() for binding, items in inventory.iteritems(): for item in items: if isinstance(item, dict): item = item.keys()[0] item = str(item) # Key may be number if item in inverted: echo("Warning: Duplicate item found, " "for \"%s: %s\"" % (binding, item)) continue inverted[item] = binding return inverted
Return {item: binding} from {binding: item} Protect against items with additional metadata and items whose type is a number Returns: Dictionary of inverted inventory
def get_std_end_date(self): """ If the date is custom, return the end datetime with the format %Y-%m-%d %H:%M:%S. Else, returns "". """ _, second = self._val if second != datetime.max: return second.strftime("%Y-%m-%d %H:%M:%S") else: return ""
If the date is custom, return the end datetime with the format %Y-%m-%d %H:%M:%S. Else, returns "".
def stringify(req, resp): """ dumps all valid jsons This is the latest after hook """ if isinstance(resp.body, dict): try: resp.body = json.dumps(resp.body) except(nameError): resp.status = falcon.HTTP_500
dumps all valid jsons This is the latest after hook
def refresh(self): """Obtain a new access token.""" grant_type = "https://oauth.reddit.com/grants/installed_client" self._request_token(grant_type=grant_type, device_id=self._device_id)
Obtain a new access token.
def log_event(cls, event, text = None): """ Log lines of text associated with a debug event. @type event: L{Event} @param event: Event object. @type text: str @param text: (Optional) Text to log. If no text is provided the default is to show a description of the event itself. @rtype: str @return: Log line. """ if not text: if event.get_event_code() == win32.EXCEPTION_DEBUG_EVENT: what = event.get_exception_description() if event.is_first_chance(): what = '%s (first chance)' % what else: what = '%s (second chance)' % what try: address = event.get_fault_address() except NotImplementedError: address = event.get_exception_address() else: what = event.get_event_name() address = event.get_thread().get_pc() process = event.get_process() label = process.get_label_at_address(address) address = HexDump.address(address, process.get_bits()) if label: where = '%s (%s)' % (address, label) else: where = address text = '%s at %s' % (what, where) text = 'pid %d tid %d: %s' % (event.get_pid(), event.get_tid(), text) #text = 'pid %d tid %d:\t%s' % (event.get_pid(), event.get_tid(), text) # text CSV return cls.log_text(text)
Log lines of text associated with a debug event. @type event: L{Event} @param event: Event object. @type text: str @param text: (Optional) Text to log. If no text is provided the default is to show a description of the event itself. @rtype: str @return: Log line.
def memoize(max_cache_size=1000): """Python 2.4 compatible memoize decorator. It creates a cache that has a maximum size. If the cache exceeds the max, it is thrown out and a new one made. With such behavior, it is wise to set the cache just a little larger that the maximum expected need. Parameters: max_cache_size - the size to which a cache can grow """ def wrapper(f): @wraps(f) def fn(*args, **kwargs): if kwargs: key = (args, tuple(kwargs.items())) else: key = args try: return fn.cache[key] except KeyError: if fn.count >= max_cache_size: fn.cache = {} fn.count = 0 result = f(*args, **kwargs) fn.cache[key] = result fn.count += 1 return result except TypeError: return f(*args, **kwargs) fn.cache = {} fn.count = 0 return fn return wrapper
Python 2.4 compatible memoize decorator. It creates a cache that has a maximum size. If the cache exceeds the max, it is thrown out and a new one made. With such behavior, it is wise to set the cache just a little larger that the maximum expected need. Parameters: max_cache_size - the size to which a cache can grow
def eth_getTransactionByBlockNumberAndIndex(self, block=BLOCK_TAG_LATEST, index=0): """https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblocknumberandindex :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :param index: Index position (optional) :type index: int :return: transaction :rtype: dict or None """ block = validate_block(block) result = yield from self.rpc_call('eth_getTransactionByBlockNumberAndIndex', [block, hex(index)]) # TODO: Update result response return result
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblocknumberandindex :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :param index: Index position (optional) :type index: int :return: transaction :rtype: dict or None