nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/backends/qt_editor/figureoptions.py
python
figure_edit
(axes, parent=None)
Edit matplotlib figure options
Edit matplotlib figure options
[ "Edit", "matplotlib", "figure", "options" ]
def figure_edit(axes, parent=None): """Edit matplotlib figure options""" sep = (None, None) # separator # Get / General # Cast to builtin floats as they have nicer reprs. xmin, xmax = map(float, axes.get_xlim()) ymin, ymax = map(float, axes.get_ylim()) general = [('Title', axes.get_title()), sep, (None, "<b>X-Axis</b>"), ('Left', xmin), ('Right', xmax), ('Label', axes.get_xlabel()), ('Scale', [axes.get_xscale(), 'linear', 'log', 'logit']), sep, (None, "<b>Y-Axis</b>"), ('Bottom', ymin), ('Top', ymax), ('Label', axes.get_ylabel()), ('Scale', [axes.get_yscale(), 'linear', 'log', 'logit']), sep, ('(Re-)Generate automatic legend', False), ] # Save the unit data xconverter = axes.xaxis.converter yconverter = axes.yaxis.converter xunits = axes.xaxis.get_units() yunits = axes.yaxis.get_units() # Sorting for default labels (_lineXXX, _imageXXX). def cmp_key(label): match = re.match(r"(_line|_image)(\d+)", label) if match: return match.group(1), int(match.group(2)) else: return label, 0 # Get / Curves linedict = {} for line in axes.get_lines(): label = line.get_label() if label == '_nolegend_': continue linedict[label] = line curves = [] def prepare_data(d, init): """Prepare entry for FormLayout. `d` is a mapping of shorthands to style names (a single style may have multiple shorthands, in particular the shorthands `None`, `"None"`, `"none"` and `""` are synonyms); `init` is one shorthand of the initial style. This function returns an list suitable for initializing a FormLayout combobox, namely `[initial_name, (shorthand, style_name), (shorthand, style_name), ...]`. """ if init not in d: d = {**d, init: str(init)} # Drop duplicate shorthands from dict (by overwriting them during # the dict comprehension). name2short = {name: short for short, name in d.items()} # Convert back to {shorthand: name}. short2name = {short: name for name, short in name2short.items()} # Find the kept shorthand for the style specified by init. canonical_init = name2short[d[init]] # Sort by representation and prepend the initial value. return ([canonical_init] + sorted(short2name.items(), key=lambda short_and_name: short_and_name[1])) curvelabels = sorted(linedict, key=cmp_key) for label in curvelabels: line = linedict[label] color = mcolors.to_hex( mcolors.to_rgba(line.get_color(), line.get_alpha()), keep_alpha=True) ec = mcolors.to_hex( mcolors.to_rgba(line.get_markeredgecolor(), line.get_alpha()), keep_alpha=True) fc = mcolors.to_hex( mcolors.to_rgba(line.get_markerfacecolor(), line.get_alpha()), keep_alpha=True) curvedata = [ ('Label', label), sep, (None, '<b>Line</b>'), ('Line style', prepare_data(LINESTYLES, line.get_linestyle())), ('Draw style', prepare_data(DRAWSTYLES, line.get_drawstyle())), ('Width', line.get_linewidth()), ('Color (RGBA)', color), sep, (None, '<b>Marker</b>'), ('Style', prepare_data(MARKERS, line.get_marker())), ('Size', line.get_markersize()), ('Face color (RGBA)', fc), ('Edge color (RGBA)', ec)] curves.append([curvedata, label, ""]) # Is there a curve displayed? has_curve = bool(curves) # Get / Images imagedict = {} for image in axes.get_images(): label = image.get_label() if label == '_nolegend_': continue imagedict[label] = image imagelabels = sorted(imagedict, key=cmp_key) images = [] cmaps = [(cmap, name) for name, cmap in sorted(cm.cmap_d.items())] for label in imagelabels: image = imagedict[label] cmap = image.get_cmap() if cmap not in cm.cmap_d.values(): cmaps = [(cmap, cmap.name)] + cmaps low, high = image.get_clim() imagedata = [ ('Label', label), ('Colormap', [cmap.name] + cmaps), ('Min. value', low), ('Max. value', high), ('Interpolation', [image.get_interpolation()] + [(name, name) for name in sorted(mimage.interpolations_names)])] images.append([imagedata, label, ""]) # Is there an image displayed? has_image = bool(images) datalist = [(general, "Axes", "")] if curves: datalist.append((curves, "Curves", "")) if images: datalist.append((images, "Images", "")) def apply_callback(data): """This function will be called to apply changes""" orig_xlim = axes.get_xlim() orig_ylim = axes.get_ylim() general = data.pop(0) curves = data.pop(0) if has_curve else [] images = data.pop(0) if has_image else [] if data: raise ValueError("Unexpected field") # Set / General (title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale, generate_legend) = general if axes.get_xscale() != xscale: axes.set_xscale(xscale) if axes.get_yscale() != yscale: axes.set_yscale(yscale) axes.set_title(title) axes.set_xlim(xmin, xmax) axes.set_xlabel(xlabel) axes.set_ylim(ymin, ymax) axes.set_ylabel(ylabel) # Restore the unit data axes.xaxis.converter = xconverter axes.yaxis.converter = yconverter axes.xaxis.set_units(xunits) axes.yaxis.set_units(yunits) axes.xaxis._update_axisinfo() axes.yaxis._update_axisinfo() # Set / Curves for index, curve in enumerate(curves): line = linedict[curvelabels[index]] (label, linestyle, drawstyle, linewidth, color, marker, markersize, markerfacecolor, markeredgecolor) = curve line.set_label(label) line.set_linestyle(linestyle) line.set_drawstyle(drawstyle) line.set_linewidth(linewidth) rgba = mcolors.to_rgba(color) line.set_alpha(None) line.set_color(rgba) if not isinstance(marker, str) or marker != 'none': line.set_marker(marker) line.set_markersize(markersize) line.set_markerfacecolor(markerfacecolor) line.set_markeredgecolor(markeredgecolor) # Set / Images for index, image_settings in enumerate(images): image = imagedict[imagelabels[index]] label, cmap, low, high, interpolation = image_settings image.set_label(label) image.set_cmap(cm.get_cmap(cmap)) image.set_clim(*sorted([low, high])) image.set_interpolation(interpolation) # re-generate legend, if checkbox is checked if generate_legend: draggable = None ncol = 1 if axes.legend_ is not None: old_legend = axes.get_legend() draggable = old_legend._draggable is not None ncol = old_legend._ncol new_legend = axes.legend(ncol=ncol) if new_legend: new_legend.set_draggable(draggable) # Redraw figure = axes.get_figure() figure.canvas.draw() if not (axes.get_xlim() == orig_xlim and axes.get_ylim() == orig_ylim): figure.canvas.toolbar.push_current() data = formlayout.fedit(datalist, title="Figure options", parent=parent, icon=get_icon('qt4_editor_options.svg'), apply=apply_callback) if data is not None: apply_callback(data)
[ "def", "figure_edit", "(", "axes", ",", "parent", "=", "None", ")", ":", "sep", "=", "(", "None", ",", "None", ")", "# separator", "# Get / General", "# Cast to builtin floats as they have nicer reprs.", "xmin", ",", "xmax", "=", "map", "(", "float", ",", "axe...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/backends/qt_editor/figureoptions.py#L38-L257
nubank/fklearn
aa558fbce8aa10a20f1043c6b9954dec85800ddd
src/fklearn/training/unsupervised.py
python
isolation_forest_learner
(df: pd.DataFrame, features: List[str], params: Dict[str, Any] = None, prediction_column: str = "prediction", encode_extra_cols: bool = True)
return p, p(df), log
Fits an anomaly detection algorithm (Isolation Forest) to the dataset Parameters ---------- df : pandas.DataFrame A Pandas' DataFrame with features and target columns. The model will be trained to predict the target column from the features. features : list of str A list os column names that are used as features for the model. All this names should be in `df`. params : dict The IsolationForest parameters in the format {"par_name": param}. See: http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html prediction_column : str The name of the column with the predictions from the model. encode_extra_cols : bool (default: True) If True, treats all columns in `df` with name pattern fklearn_feat__col==val` as feature columns.
Fits an anomaly detection algorithm (Isolation Forest) to the dataset
[ "Fits", "an", "anomaly", "detection", "algorithm", "(", "Isolation", "Forest", ")", "to", "the", "dataset" ]
def isolation_forest_learner(df: pd.DataFrame, features: List[str], params: Dict[str, Any] = None, prediction_column: str = "prediction", encode_extra_cols: bool = True) -> LearnerReturnType: """ Fits an anomaly detection algorithm (Isolation Forest) to the dataset Parameters ---------- df : pandas.DataFrame A Pandas' DataFrame with features and target columns. The model will be trained to predict the target column from the features. features : list of str A list os column names that are used as features for the model. All this names should be in `df`. params : dict The IsolationForest parameters in the format {"par_name": param}. See: http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html prediction_column : str The name of the column with the predictions from the model. encode_extra_cols : bool (default: True) If True, treats all columns in `df` with name pattern fklearn_feat__col==val` as feature columns. """ default_params = {"n_jobs": -1, "random_state": 1729, "contamination": 0.1, "behaviour": "new"} params = default_params if not params else merge(default_params, params) features = features if not encode_extra_cols else expand_features_encoded(df, features) model = IsolationForest() model.set_params(**params) model.fit(df[features].values) def p(new_df: pd.DataFrame) -> pd.DataFrame: output_col = {prediction_column: model.decision_function( new_df[features])} return new_df.assign(**output_col) p.__doc__ = learner_pred_fn_docstring("isolation_forest_learner") log = {'isolation_forest_learner': { 'features': features, 'parameters': params, 'prediction_column': prediction_column, 'package': "sklearn", 'package_version': sklearn.__version__, 'training_samples': len(df)}} return p, p(df), log
[ "def", "isolation_forest_learner", "(", "df", ":", "pd", ".", "DataFrame", ",", "features", ":", "List", "[", "str", "]", ",", "params", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", "prediction_column", ":", "str", "=", "\"prediction\"", ...
https://github.com/nubank/fklearn/blob/aa558fbce8aa10a20f1043c6b9954dec85800ddd/src/fklearn/training/unsupervised.py#L15-L70
apache/bloodhound
c3e31294e68af99d4e040e64fbdf52394344df9e
trac/tracopt/versioncontrol/svn/svn_fs.py
python
SubversionRepository.get_path_url
(self, path, rev)
Retrieve the "native" URL from which this repository is reachable from Subversion clients.
Retrieve the "native" URL from which this repository is reachable from Subversion clients.
[ "Retrieve", "the", "native", "URL", "from", "which", "this", "repository", "is", "reachable", "from", "Subversion", "clients", "." ]
def get_path_url(self, path, rev): """Retrieve the "native" URL from which this repository is reachable from Subversion clients. """ url = self.params.get('url', '').rstrip('/') if url: if not path or path == '/': return url return url + '/' + path.lstrip('/')
[ "def", "get_path_url", "(", "self", ",", "path", ",", "rev", ")", ":", "url", "=", "self", ".", "params", ".", "get", "(", "'url'", ",", "''", ")", ".", "rstrip", "(", "'/'", ")", "if", "url", ":", "if", "not", "path", "or", "path", "==", "'/'"...
https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/svn/svn_fs.py#L453-L461
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/nova/nova/availability_zones.py
python
get_availability_zones
(context)
return (available_zones, not_available_zones)
Return available and unavailable zones.
Return available and unavailable zones.
[ "Return", "available", "and", "unavailable", "zones", "." ]
def get_availability_zones(context): """Return available and unavailable zones.""" enabled_services = db.service_get_all(context, False) disabled_services = db.service_get_all(context, True) enabled_services = set_availability_zones(context, enabled_services) disabled_services = set_availability_zones(context, disabled_services) available_zones = [] for zone in [service['availability_zone'] for service in enabled_services]: if zone not in available_zones: available_zones.append(zone) not_available_zones = [] zones = [service['availability_zone'] for service in disabled_services if service['availability_zone'] not in available_zones] for zone in zones: if zone not in not_available_zones: not_available_zones.append(zone) return (available_zones, not_available_zones)
[ "def", "get_availability_zones", "(", "context", ")", ":", "enabled_services", "=", "db", ".", "service_get_all", "(", "context", ",", "False", ")", "disabled_services", "=", "db", ".", "service_get_all", "(", "context", ",", "True", ")", "enabled_services", "="...
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/nova/nova/availability_zones.py#L69-L88
filipradenovic/cnnimageretrieval-pytorch
c5368dfbbfe0286f536e374a4a35ff89578ef2e5
cirtorch/utils/download.py
python
download_train
(data_dir)
DOWNLOAD_TRAIN Checks, and, if required, downloads the necessary datasets for the training. download_train(DATA_ROOT) checks if the data necessary for running the example script exist. If not it downloads it in the folder structure: DATA_ROOT/train/retrieval-SfM-120k/ : folder with rsfm120k images and db files DATA_ROOT/train/retrieval-SfM-30k/ : folder with rsfm30k images and db files
DOWNLOAD_TRAIN Checks, and, if required, downloads the necessary datasets for the training. download_train(DATA_ROOT) checks if the data necessary for running the example script exist. If not it downloads it in the folder structure: DATA_ROOT/train/retrieval-SfM-120k/ : folder with rsfm120k images and db files DATA_ROOT/train/retrieval-SfM-30k/ : folder with rsfm30k images and db files
[ "DOWNLOAD_TRAIN", "Checks", "and", "if", "required", "downloads", "the", "necessary", "datasets", "for", "the", "training", ".", "download_train", "(", "DATA_ROOT", ")", "checks", "if", "the", "data", "necessary", "for", "running", "the", "example", "script", "e...
def download_train(data_dir): """ DOWNLOAD_TRAIN Checks, and, if required, downloads the necessary datasets for the training. download_train(DATA_ROOT) checks if the data necessary for running the example script exist. If not it downloads it in the folder structure: DATA_ROOT/train/retrieval-SfM-120k/ : folder with rsfm120k images and db files DATA_ROOT/train/retrieval-SfM-30k/ : folder with rsfm30k images and db files """ # Create data folder if it does not exist if not os.path.isdir(data_dir): os.mkdir(data_dir) # Create datasets folder if it does not exist datasets_dir = os.path.join(data_dir, 'train') if not os.path.isdir(datasets_dir): os.mkdir(datasets_dir) # Download folder train/retrieval-SfM-120k/ src_dir = os.path.join('http://cmp.felk.cvut.cz/cnnimageretrieval/data', 'train', 'ims') dst_dir = os.path.join(datasets_dir, 'retrieval-SfM-120k', 'ims') dl_file = 'ims.tar.gz' if not os.path.isdir(dst_dir): src_file = os.path.join(src_dir, dl_file) dst_file = os.path.join(dst_dir, dl_file) print('>> Image directory does not exist. Creating: {}'.format(dst_dir)) os.makedirs(dst_dir) print('>> Downloading ims.tar.gz...') os.system('wget {} -O {}'.format(src_file, dst_file)) print('>> Extracting {}...'.format(dst_file)) os.system('tar -zxf {} -C {}'.format(dst_file, dst_dir)) print('>> Extracted, deleting {}...'.format(dst_file)) os.system('rm {}'.format(dst_file)) # Create symlink for train/retrieval-SfM-30k/ dst_dir_old = os.path.join(datasets_dir, 'retrieval-SfM-120k', 'ims') dst_dir = os.path.join(datasets_dir, 'retrieval-SfM-30k', 'ims') if not os.path.isdir(dst_dir): os.makedirs(os.path.join(datasets_dir, 'retrieval-SfM-30k')) os.system('ln -s {} {}'.format(dst_dir_old, dst_dir)) print('>> Created symbolic link from retrieval-SfM-120k/ims to retrieval-SfM-30k/ims') # Download db files src_dir = os.path.join('http://cmp.felk.cvut.cz/cnnimageretrieval/data', 'train', 'dbs') datasets = ['retrieval-SfM-120k', 'retrieval-SfM-30k'] for dataset in datasets: dst_dir = os.path.join(datasets_dir, dataset) if dataset == 'retrieval-SfM-120k': dl_files = ['{}.pkl'.format(dataset), '{}-whiten.pkl'.format(dataset)] elif dataset == 'retrieval-SfM-30k': dl_files = ['{}-whiten.pkl'.format(dataset)] if not os.path.isdir(dst_dir): print('>> Dataset directory does not exist. Creating: {}'.format(dst_dir)) os.mkdir(dst_dir) for i in range(len(dl_files)): src_file = os.path.join(src_dir, dl_files[i]) dst_file = os.path.join(dst_dir, dl_files[i]) if not os.path.isfile(dst_file): print('>> DB file {} does not exist. Downloading...'.format(dl_files[i])) os.system('wget {} -O {}'.format(src_file, dst_file))
[ "def", "download_train", "(", "data_dir", ")", ":", "# Create data folder if it does not exist", "if", "not", "os", ".", "path", ".", "isdir", "(", "data_dir", ")", ":", "os", ".", "mkdir", "(", "data_dir", ")", "# Create datasets folder if it does not exist", "data...
https://github.com/filipradenovic/cnnimageretrieval-pytorch/blob/c5368dfbbfe0286f536e374a4a35ff89578ef2e5/cirtorch/utils/download.py#L90-L152
nltk/nltk_contrib
c9da2c29777ca9df650740145f1f4a375ccac961
nltk_contrib/hadoop/hadooplib/mapper.py
python
MapperBase.__init__
(self)
set the default input formatter and output collector
set the default input formatter and output collector
[ "set", "the", "default", "input", "formatter", "and", "output", "collector" ]
def __init__(self): """ set the default input formatter and output collector """ self.inputformat = TextLineInput self.outputcollector = LineOutput
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "inputformat", "=", "TextLineInput", "self", ".", "outputcollector", "=", "LineOutput" ]
https://github.com/nltk/nltk_contrib/blob/c9da2c29777ca9df650740145f1f4a375ccac961/nltk_contrib/hadoop/hadooplib/mapper.py#L13-L18
Blizzard/s2protocol
4bfe857bb832eee12cc6307dd699e3b74bd7e1b2
s2protocol/versions/protocol28272.py
python
decode_replay_details
(contents)
return decoder.instance(game_details_typeid)
Decodes and returns the game details from the contents byte string.
Decodes and returns the game details from the contents byte string.
[ "Decodes", "and", "returns", "the", "game", "details", "from", "the", "contents", "byte", "string", "." ]
def decode_replay_details(contents): """Decodes and returns the game details from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(game_details_typeid)
[ "def", "decode_replay_details", "(", "contents", ")", ":", "decoder", "=", "VersionedDecoder", "(", "contents", ",", "typeinfos", ")", "return", "decoder", ".", "instance", "(", "game_details_typeid", ")" ]
https://github.com/Blizzard/s2protocol/blob/4bfe857bb832eee12cc6307dd699e3b74bd7e1b2/s2protocol/versions/protocol28272.py#L428-L431
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/data_catalog/data_catalog_client.py
python
DataCatalogClient.get_term
(self, catalog_id, glossary_key, term_key, **kwargs)
Gets a specific glossary term by key. :param str catalog_id: (required) Unique catalog identifier. :param str glossary_key: (required) Unique glossary key. :param str term_key: (required) Unique glossary term key. :param list[str] fields: (optional) Specifies the fields to return in a term response. Allowed values are: "key", "displayName", "description", "glossaryKey", "parentTermKey", "isAllowedToHaveChildTerms", "path", "lifecycleState", "timeCreated", "timeUpdated", "createdById", "updatedById", "owner", "workflowStatus", "uri", "relatedTerms", "associatedObjectCount", "associatedObjects" :param str opc_request_id: (optional) The client request ID for tracing. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation uses :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` as default if no retry strategy is provided. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Term` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/datacatalog/get_term.py.html>`__ to see an example of how to use get_term API.
Gets a specific glossary term by key.
[ "Gets", "a", "specific", "glossary", "term", "by", "key", "." ]
def get_term(self, catalog_id, glossary_key, term_key, **kwargs): """ Gets a specific glossary term by key. :param str catalog_id: (required) Unique catalog identifier. :param str glossary_key: (required) Unique glossary key. :param str term_key: (required) Unique glossary term key. :param list[str] fields: (optional) Specifies the fields to return in a term response. Allowed values are: "key", "displayName", "description", "glossaryKey", "parentTermKey", "isAllowedToHaveChildTerms", "path", "lifecycleState", "timeCreated", "timeUpdated", "createdById", "updatedById", "owner", "workflowStatus", "uri", "relatedTerms", "associatedObjectCount", "associatedObjects" :param str opc_request_id: (optional) The client request ID for tracing. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation uses :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` as default if no retry strategy is provided. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.Term` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/datacatalog/get_term.py.html>`__ to see an example of how to use get_term API. """ resource_path = "/catalogs/{catalogId}/glossaries/{glossaryKey}/terms/{termKey}" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "fields", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_term got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "catalogId": catalog_id, "glossaryKey": glossary_key, "termKey": term_key } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) if 'fields' in kwargs: fields_allowed_values = ["key", "displayName", "description", "glossaryKey", "parentTermKey", "isAllowedToHaveChildTerms", "path", "lifecycleState", "timeCreated", "timeUpdated", "createdById", "updatedById", "owner", "workflowStatus", "uri", "relatedTerms", "associatedObjectCount", "associatedObjects"] for fields_item in kwargs['fields']: if fields_item not in fields_allowed_values: raise ValueError( "Invalid value for `fields`, must be one of {0}".format(fields_allowed_values) ) query_params = { "fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi') } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.base_client.get_preferred_retry_strategy( operation_retry_strategy=kwargs.get('retry_strategy'), client_retry_strategy=self.retry_strategy ) if retry_strategy is None: retry_strategy = retry.DEFAULT_RETRY_STRATEGY if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_client_retries_header(header_params) retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, query_params=query_params, header_params=header_params, response_type="Term") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, query_params=query_params, header_params=header_params, response_type="Term")
[ "def", "get_term", "(", "self", ",", "catalog_id", ",", "glossary_key", ",", "term_key", ",", "*", "*", "kwargs", ")", ":", "resource_path", "=", "\"/catalogs/{catalogId}/glossaries/{glossaryKey}/terms/{termKey}\"", "method", "=", "\"GET\"", "# Don't accept unknown kwargs...
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/data_catalog/data_catalog_client.py#L7181-L7289
MillionIntegrals/vel
f3ce7da64362ad207f40f2c0d58d9300a25df3e8
vel/rl/buffers/circular_replay_buffer.py
python
CircularReplayBuffer.store_transition
(self, frame, action, reward, done, extra_info=None)
Store given transition in the backend
Store given transition in the backend
[ "Store", "given", "transition", "in", "the", "backend" ]
def store_transition(self, frame, action, reward, done, extra_info=None): """ Store given transition in the backend """ self.backend.store_transition(frame=frame, action=action, reward=reward, done=done, extra_info=extra_info)
[ "def", "store_transition", "(", "self", ",", "frame", ",", "action", ",", "reward", ",", "done", ",", "extra_info", "=", "None", ")", ":", "self", ".", "backend", ".", "store_transition", "(", "frame", "=", "frame", ",", "action", "=", "action", ",", "...
https://github.com/MillionIntegrals/vel/blob/f3ce7da64362ad207f40f2c0d58d9300a25df3e8/vel/rl/buffers/circular_replay_buffer.py#L103-L105
deepmind/dm_control
806a10e896e7c887635328bfa8352604ad0fedae
dm_control/suite/wrappers/pixels.py
python
Wrapper.reset
(self)
return self._add_pixel_observation(time_step)
[]
def reset(self): time_step = self._env.reset() return self._add_pixel_observation(time_step)
[ "def", "reset", "(", "self", ")", ":", "time_step", "=", "self", ".", "_env", ".", "reset", "(", ")", "return", "self", ".", "_add_pixel_observation", "(", "time_step", ")" ]
https://github.com/deepmind/dm_control/blob/806a10e896e7c887635328bfa8352604ad0fedae/dm_control/suite/wrappers/pixels.py#L87-L89
cisco/mindmeld
809c36112e9ea8019fe29d54d136ca14eb4fd8db
mindmeld/components/nlp.py
python
Processor._build_recursive
(self, incremental=False, label_set=None)
Builds all the natural language processing models for this processor and its children. Args: incremental (bool, optional): When ``True``, only build models whose training data or configuration has changed since the last build. Defaults to ``False``. label_set (string, optional): The label set from which to train all classifiers.
Builds all the natural language processing models for this processor and its children.
[ "Builds", "all", "the", "natural", "language", "processing", "models", "for", "this", "processor", "and", "its", "children", "." ]
def _build_recursive(self, incremental=False, label_set=None): """Builds all the natural language processing models for this processor and its children. Args: incremental (bool, optional): When ``True``, only build models whose training data or configuration has changed since the last build. Defaults to ``False``. label_set (string, optional): The label set from which to train all classifiers. """ self._build(incremental=incremental, label_set=label_set, load_cached=False) # We dump and unload the model to reduce memory consumption while training if self.ready: self._dump() self.unload() for child in self._children.values(): # We pass the incremental_timestamp to children processors child.incremental_timestamp = self.incremental_timestamp child._build_recursive(incremental=incremental, label_set=label_set)
[ "def", "_build_recursive", "(", "self", ",", "incremental", "=", "False", ",", "label_set", "=", "None", ")", ":", "self", ".", "_build", "(", "incremental", "=", "incremental", ",", "label_set", "=", "label_set", ",", "load_cached", "=", "False", ")", "# ...
https://github.com/cisco/mindmeld/blob/809c36112e9ea8019fe29d54d136ca14eb4fd8db/mindmeld/components/nlp.py#L148-L165
OpenXenManager/openxenmanager
1cb5c1cb13358ba584856e99a94f9669d17670ff
src/pygtk_chart/chart_object.py
python
ChartObject._do_draw
(self, context, rect)
A derived class should override this method. The drawing stuff should happen here. @type context: cairo.Context @param context: The context to draw on. @type rect: gtk.gdk.Rectangle @param rect: A rectangle representing the charts area.
A derived class should override this method. The drawing stuff should happen here.
[ "A", "derived", "class", "should", "override", "this", "method", ".", "The", "drawing", "stuff", "should", "happen", "here", "." ]
def _do_draw(self, context, rect): """ A derived class should override this method. The drawing stuff should happen here. @type context: cairo.Context @param context: The context to draw on. @type rect: gtk.gdk.Rectangle @param rect: A rectangle representing the charts area. """ pass
[ "def", "_do_draw", "(", "self", ",", "context", ",", "rect", ")", ":", "pass" ]
https://github.com/OpenXenManager/openxenmanager/blob/1cb5c1cb13358ba584856e99a94f9669d17670ff/src/pygtk_chart/chart_object.py#L85-L95
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py
python
Results.upgrade
(self, results, reverse=False)
Re-sorts the results so any hits that are also in 'results' appear before hits not in 'results', otherwise keeping their current relative positions. This does not add the documents in the other results object to this one. :param results: another results object. :param reverse: if True, lower the position of hits in the other results object instead of raising them.
Re-sorts the results so any hits that are also in 'results' appear before hits not in 'results', otherwise keeping their current relative positions. This does not add the documents in the other results object to this one.
[ "Re", "-", "sorts", "the", "results", "so", "any", "hits", "that", "are", "also", "in", "results", "appear", "before", "hits", "not", "in", "results", "otherwise", "keeping", "their", "current", "relative", "positions", ".", "This", "does", "not", "add", "...
def upgrade(self, results, reverse=False): """Re-sorts the results so any hits that are also in 'results' appear before hits not in 'results', otherwise keeping their current relative positions. This does not add the documents in the other results object to this one. :param results: another results object. :param reverse: if True, lower the position of hits in the other results object instead of raising them. """ if not len(results): return otherdocs = results.docs() arein = [item for item in self.top_n if item[1] in otherdocs] notin = [item for item in self.top_n if item[1] not in otherdocs] if reverse: items = notin + arein else: items = arein + notin self.top_n = items
[ "def", "upgrade", "(", "self", ",", "results", ",", "reverse", "=", "False", ")", ":", "if", "not", "len", "(", "results", ")", ":", "return", "otherdocs", "=", "results", ".", "docs", "(", ")", "arein", "=", "[", "item", "for", "item", "in", "self...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py#L1301-L1324
matplotlib/matplotlib
8d7a2b9d2a38f01ee0d6802dd4f9e98aec812322
lib/matplotlib/textpath.py
python
TextToPath.get_text_path
(self, prop, s, ismath=False)
return verts, codes
Convert text *s* to path (a tuple of vertices and codes for matplotlib.path.Path). Parameters ---------- prop : `~matplotlib.font_manager.FontProperties` The font properties for the text. s : str The text to be converted. ismath : {False, True, "TeX"} If True, use mathtext parser. If "TeX", use tex for rendering. Returns ------- verts : list A list of numpy arrays containing the x and y coordinates of the vertices. codes : list A list of path codes. Examples -------- Create a list of vertices and codes from a text, and create a `.Path` from those:: from matplotlib.path import Path from matplotlib.textpath import TextToPath from matplotlib.font_manager import FontProperties fp = FontProperties(family="Humor Sans", style="italic") verts, codes = TextToPath().get_text_path(fp, "ABC") path = Path(verts, codes, closed=False) Also see `TextPath` for a more direct way to create a path from a text.
Convert text *s* to path (a tuple of vertices and codes for matplotlib.path.Path).
[ "Convert", "text", "*", "s", "*", "to", "path", "(", "a", "tuple", "of", "vertices", "and", "codes", "for", "matplotlib", ".", "path", ".", "Path", ")", "." ]
def get_text_path(self, prop, s, ismath=False): """ Convert text *s* to path (a tuple of vertices and codes for matplotlib.path.Path). Parameters ---------- prop : `~matplotlib.font_manager.FontProperties` The font properties for the text. s : str The text to be converted. ismath : {False, True, "TeX"} If True, use mathtext parser. If "TeX", use tex for rendering. Returns ------- verts : list A list of numpy arrays containing the x and y coordinates of the vertices. codes : list A list of path codes. Examples -------- Create a list of vertices and codes from a text, and create a `.Path` from those:: from matplotlib.path import Path from matplotlib.textpath import TextToPath from matplotlib.font_manager import FontProperties fp = FontProperties(family="Humor Sans", style="italic") verts, codes = TextToPath().get_text_path(fp, "ABC") path = Path(verts, codes, closed=False) Also see `TextPath` for a more direct way to create a path from a text. """ if ismath == "TeX": glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s) elif not ismath: font = self._get_font(prop) glyph_info, glyph_map, rects = self.get_glyphs_with_font(font, s) else: glyph_info, glyph_map, rects = self.get_glyphs_mathtext(prop, s) verts, codes = [], [] for glyph_id, xposition, yposition, scale in glyph_info: verts1, codes1 = glyph_map[glyph_id] if len(verts1): verts1 = np.array(verts1) * scale + [xposition, yposition] verts.extend(verts1) codes.extend(codes1) for verts1, codes1 in rects: verts.extend(verts1) codes.extend(codes1) return verts, codes
[ "def", "get_text_path", "(", "self", ",", "prop", ",", "s", ",", "ismath", "=", "False", ")", ":", "if", "ismath", "==", "\"TeX\"", ":", "glyph_info", ",", "glyph_map", ",", "rects", "=", "self", ".", "get_glyphs_tex", "(", "prop", ",", "s", ")", "el...
https://github.com/matplotlib/matplotlib/blob/8d7a2b9d2a38f01ee0d6802dd4f9e98aec812322/lib/matplotlib/textpath.py#L73-L134
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python3-alpha/python-libs/pyxmpp2/ext/disco.py
python
DiscoInfo.get_identities
(self)
return ret
List the identity objects contained in `self`. :return: the list of identities. :returntype: `list` of `DiscoIdentity`
List the identity objects contained in `self`.
[ "List", "the", "identity", "objects", "contained", "in", "self", "." ]
def get_identities(self): """List the identity objects contained in `self`. :return: the list of identities. :returntype: `list` of `DiscoIdentity`""" ret=[] l=self.xpath_ctxt.xpathEval("d:identity") if l is not None: for i in l: ret.append(DiscoIdentity(self,i)) return ret
[ "def", "get_identities", "(", "self", ")", ":", "ret", "=", "[", "]", "l", "=", "self", ".", "xpath_ctxt", ".", "xpathEval", "(", "\"d:identity\"", ")", "if", "l", "is", "not", "None", ":", "for", "i", "in", "l", ":", "ret", ".", "append", "(", "...
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/pyxmpp2/ext/disco.py#L723-L733
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/rings/number_field/totallyreal_phc.py
python
__lagrange_bounds_phc
(n, m, a, tmpfile=None)
r""" This function determines the bounds on the roots in the enumeration of totally real fields via Lagrange multipliers. It is used internally by the main function enumerate_totallyreal_fields_prim(), which should be consulted for further information. INPUT: - k -- integer, the index of the next coefficient - a -- list of integers, the coefficients OUTPUT: the lower and upper bounds as real numbers. .. NOTE:: See Cohen [Coh2000]_ for the general idea and unpublished work of the author for more detail. AUTHORS: - John Voight (2007-09-19) EXAMPLES:: sage: from sage.rings.number_field.totallyreal_phc import __lagrange_bounds_phc sage: __lagrange_bounds_phc(3,5,[8,1,2,0,1]) # optional - phc [] sage: x, y = __lagrange_bounds_phc(3,2,[8,1,2,0,1]) # optional - phc sage: x # optional - phc -1.3333333333333299 sage: y < 0.00000001 # optional - phc True sage: __lagrange_bounds_phc(3,1,[8,1,2,0,1]) # optional - phc []
r""" This function determines the bounds on the roots in the enumeration of totally real fields via Lagrange multipliers.
[ "r", "This", "function", "determines", "the", "bounds", "on", "the", "roots", "in", "the", "enumeration", "of", "totally", "real", "fields", "via", "Lagrange", "multipliers", "." ]
def __lagrange_bounds_phc(n, m, a, tmpfile=None): r""" This function determines the bounds on the roots in the enumeration of totally real fields via Lagrange multipliers. It is used internally by the main function enumerate_totallyreal_fields_prim(), which should be consulted for further information. INPUT: - k -- integer, the index of the next coefficient - a -- list of integers, the coefficients OUTPUT: the lower and upper bounds as real numbers. .. NOTE:: See Cohen [Coh2000]_ for the general idea and unpublished work of the author for more detail. AUTHORS: - John Voight (2007-09-19) EXAMPLES:: sage: from sage.rings.number_field.totallyreal_phc import __lagrange_bounds_phc sage: __lagrange_bounds_phc(3,5,[8,1,2,0,1]) # optional - phc [] sage: x, y = __lagrange_bounds_phc(3,2,[8,1,2,0,1]) # optional - phc sage: x # optional - phc -1.3333333333333299 sage: y < 0.00000001 # optional - phc True sage: __lagrange_bounds_phc(3,1,[8,1,2,0,1]) # optional - phc [] """ # Compute power sums. S = coefficients_to_power_sums(n,m,a) # Look for phc. fi, fo = os.popen2('which phc') find_phc = fo.readlines() fi.close() fo.close() if find_phc == []: raise RuntimeError("PHCpack not installed.") # Initialization. if tmpfile is None: tmpfile = sage.misc.misc.tmp_filename() f = open(tmpfile + '.phc', 'w') f.close() output_data = [] # By the method of Lagrange multipliers, if we maximize x_n subject to # S_j(x) = S[j] (j = 1, ..., m), # then there are at most m-1 distinct values amongst the x_i. # Therefore we must solve the implied equations for each partition of n-1 # into m-1 parts. for P in sage.combinat.partition.Partitions(n-1,length=m-1): f = open(tmpfile, 'w') # First line: number of variables/equations f.write('%d'%m + '\n') # In the next m-1 lines, write the equation S_j(x) = S[j] for j in range(1,m+1): for i in range(m-1): f.write('%d'%P[i] + '*x%d'%i + '**%d'%j + ' + ') f.write('xn**%d'%j + ' - (%d'%S[j] + ');\n') f.close() os.remove(tmpfile + '.phc') os.popen('phc -b ' + tmpfile + ' ' + tmpfile + '.phc') f = open(tmpfile + '.phc', 'r') f_str = f.read() pos = f_str.find('= real ') crits = [] while pos != -1: posl = f_str.rfind('xn', 0, pos) f_str_split = f_str[posl:pos].split() crits += [float(f_str_split[2])] pos = f_str.find('= real ', pos+1) if len(crits) > 0: output_data += [[P, min(crits), max(crits)]] if len(output_data) > 0: return [min([v[1] for v in output_data]), max([v[2] for v in output_data])] else: return []
[ "def", "__lagrange_bounds_phc", "(", "n", ",", "m", ",", "a", ",", "tmpfile", "=", "None", ")", ":", "# Compute power sums.", "S", "=", "coefficients_to_power_sums", "(", "n", ",", "m", ",", "a", ")", "# Look for phc.", "fi", ",", "fo", "=", "os", ".", ...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/number_field/totallyreal_phc.py#L61-L155
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/hqmedia/views.py
python
ProcessBulkUploadView.validate_file
(self, replace_diff_ext=False)
[]
def validate_file(self, replace_diff_ext=False): if not self.mime_type in self.valid_mime_types(): raise BadMediaFileException(_("Uploaded file is not a ZIP file.")) if not self.uploaded_zip: raise BadMediaFileException(_("There is no ZIP file.")) if self.uploaded_zip.testzip(): raise BadMediaFileException(_("Unable to extract the ZIP file."))
[ "def", "validate_file", "(", "self", ",", "replace_diff_ext", "=", "False", ")", ":", "if", "not", "self", ".", "mime_type", "in", "self", ".", "valid_mime_types", "(", ")", ":", "raise", "BadMediaFileException", "(", "_", "(", "\"Uploaded file is not a ZIP file...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/hqmedia/views.py#L568-L574
eternnoir/pyTelegramBotAPI
fdbc0e6a619f671c2ac97afa2f694c17c6dce7d9
telebot/__init__.py
python
TeleBot.send_audio
( self, chat_id: Union[int, str], audio: Union[Any, str], caption: Optional[str]=None, duration: Optional[int]=None, performer: Optional[str]=None, title: Optional[str]=None, reply_to_message_id: Optional[int]=None, reply_markup: Optional[REPLY_MARKUP_TYPES]=None, parse_mode: Optional[str]=None, disable_notification: Optional[bool]=None, timeout: Optional[int]=None, thumb: Optional[Union[Any, str]]=None, caption_entities: Optional[List[types.MessageEntity]]=None, allow_sending_without_reply: Optional[bool]=None, protect_content: Optional[bool]=None)
return types.Message.de_json( apihelper.send_audio( self.token, chat_id, audio, caption, duration, performer, title, reply_to_message_id, reply_markup, parse_mode, disable_notification, timeout, thumb, caption_entities, allow_sending_without_reply, protect_content))
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. :param chat_id:Unique identifier for the message recipient :param audio:Audio file to send. :param caption: :param duration:Duration of the audio in seconds :param performer:Performer :param title:Track name :param reply_to_message_id:If the message is a reply, ID of the original message :param reply_markup: :param parse_mode :param disable_notification: :param timeout: :param thumb: :param caption_entities: :param allow_sending_without_reply: :param protect_content: :return: Message
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. :param chat_id:Unique identifier for the message recipient :param audio:Audio file to send. :param caption: :param duration:Duration of the audio in seconds :param performer:Performer :param title:Track name :param reply_to_message_id:If the message is a reply, ID of the original message :param reply_markup: :param parse_mode :param disable_notification: :param timeout: :param thumb: :param caption_entities: :param allow_sending_without_reply: :param protect_content: :return: Message
[ "Use", "this", "method", "to", "send", "audio", "files", "if", "you", "want", "Telegram", "clients", "to", "display", "them", "in", "the", "music", "player", ".", "Your", "audio", "must", "be", "in", "the", ".", "mp3", "format", ".", ":", "param", "cha...
def send_audio( self, chat_id: Union[int, str], audio: Union[Any, str], caption: Optional[str]=None, duration: Optional[int]=None, performer: Optional[str]=None, title: Optional[str]=None, reply_to_message_id: Optional[int]=None, reply_markup: Optional[REPLY_MARKUP_TYPES]=None, parse_mode: Optional[str]=None, disable_notification: Optional[bool]=None, timeout: Optional[int]=None, thumb: Optional[Union[Any, str]]=None, caption_entities: Optional[List[types.MessageEntity]]=None, allow_sending_without_reply: Optional[bool]=None, protect_content: Optional[bool]=None) -> types.Message: """ Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. :param chat_id:Unique identifier for the message recipient :param audio:Audio file to send. :param caption: :param duration:Duration of the audio in seconds :param performer:Performer :param title:Track name :param reply_to_message_id:If the message is a reply, ID of the original message :param reply_markup: :param parse_mode :param disable_notification: :param timeout: :param thumb: :param caption_entities: :param allow_sending_without_reply: :param protect_content: :return: Message """ parse_mode = self.parse_mode if (parse_mode is None) else parse_mode return types.Message.de_json( apihelper.send_audio( self.token, chat_id, audio, caption, duration, performer, title, reply_to_message_id, reply_markup, parse_mode, disable_notification, timeout, thumb, caption_entities, allow_sending_without_reply, protect_content))
[ "def", "send_audio", "(", "self", ",", "chat_id", ":", "Union", "[", "int", ",", "str", "]", ",", "audio", ":", "Union", "[", "Any", ",", "str", "]", ",", "caption", ":", "Optional", "[", "str", "]", "=", "None", ",", "duration", ":", "Optional", ...
https://github.com/eternnoir/pyTelegramBotAPI/blob/fdbc0e6a619f671c2ac97afa2f694c17c6dce7d9/telebot/__init__.py#L1100-L1139
kedro-org/kedro
e78990c6b606a27830f0d502afa0f639c0830950
kedro/extras/datasets/pandas/parquet_dataset.py
python
ParquetDataSet.__init__
( self, filepath: str, load_args: Dict[str, Any] = None, save_args: Dict[str, Any] = None, version: Version = None, credentials: Dict[str, Any] = None, fs_args: Dict[str, Any] = None, )
Creates a new instance of ``ParquetDataSet`` pointing to a concrete Parquet file on a specific filesystem. Args: filepath: Filepath in POSIX format to a Parquet file prefixed with a protocol like `s3://`. If prefix is not provided, `file` protocol (local filesystem) will be used. The prefix should be any protocol supported by ``fsspec``. It can also be a path to a directory. If the directory is provided then it can be used for reading partitioned parquet files. Note: `http(s)` doesn't support versioning. load_args: Additional options for loading Parquet file(s). Here you can find all available arguments when reading single file: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_parquet.html Here you can find all available arguments when reading partitioned datasets: https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html#pyarrow.parquet.ParquetDataset.read All defaults are preserved. save_args: Additional saving options for `pyarrow.parquet.write_table` and `pyarrow.Table.from_pandas`. Here you can find all available arguments for `write_table()`: https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html?highlight=write_table#pyarrow.parquet.write_table The arguments for `from_pandas()` should be passed through a nested key: `from_pandas`. E.g.: `save_args = {"from_pandas": {"preserve_index": False}}` Here you can find all available arguments for `from_pandas()`: https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.from_pandas version: If specified, should be an instance of ``kedro.io.core.Version``. If its ``load`` attribute is None, the latest version will be loaded. If its ``save`` attribute is None, save version will be autogenerated. credentials: Credentials required to get access to the underlying filesystem. E.g. for ``GCSFileSystem`` it should look like `{"token": None}`. fs_args: Extra arguments to pass into underlying filesystem class constructor (e.g. `{"project": "my-project"}` for ``GCSFileSystem``), as well as to pass to the filesystem's `open` method through nested keys `open_args_load` and `open_args_save`. Here you can find all available arguments for `open`: https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.open All defaults are preserved.
Creates a new instance of ``ParquetDataSet`` pointing to a concrete Parquet file on a specific filesystem.
[ "Creates", "a", "new", "instance", "of", "ParquetDataSet", "pointing", "to", "a", "concrete", "Parquet", "file", "on", "a", "specific", "filesystem", "." ]
def __init__( self, filepath: str, load_args: Dict[str, Any] = None, save_args: Dict[str, Any] = None, version: Version = None, credentials: Dict[str, Any] = None, fs_args: Dict[str, Any] = None, ) -> None: """Creates a new instance of ``ParquetDataSet`` pointing to a concrete Parquet file on a specific filesystem. Args: filepath: Filepath in POSIX format to a Parquet file prefixed with a protocol like `s3://`. If prefix is not provided, `file` protocol (local filesystem) will be used. The prefix should be any protocol supported by ``fsspec``. It can also be a path to a directory. If the directory is provided then it can be used for reading partitioned parquet files. Note: `http(s)` doesn't support versioning. load_args: Additional options for loading Parquet file(s). Here you can find all available arguments when reading single file: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_parquet.html Here you can find all available arguments when reading partitioned datasets: https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html#pyarrow.parquet.ParquetDataset.read All defaults are preserved. save_args: Additional saving options for `pyarrow.parquet.write_table` and `pyarrow.Table.from_pandas`. Here you can find all available arguments for `write_table()`: https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html?highlight=write_table#pyarrow.parquet.write_table The arguments for `from_pandas()` should be passed through a nested key: `from_pandas`. E.g.: `save_args = {"from_pandas": {"preserve_index": False}}` Here you can find all available arguments for `from_pandas()`: https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.from_pandas version: If specified, should be an instance of ``kedro.io.core.Version``. If its ``load`` attribute is None, the latest version will be loaded. If its ``save`` attribute is None, save version will be autogenerated. credentials: Credentials required to get access to the underlying filesystem. E.g. for ``GCSFileSystem`` it should look like `{"token": None}`. fs_args: Extra arguments to pass into underlying filesystem class constructor (e.g. `{"project": "my-project"}` for ``GCSFileSystem``), as well as to pass to the filesystem's `open` method through nested keys `open_args_load` and `open_args_save`. Here you can find all available arguments for `open`: https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.open All defaults are preserved. """ _fs_args = deepcopy(fs_args) or {} self._fs_open_args_load = _fs_args.pop("open_args_load", {}) _credentials = deepcopy(credentials) or {} protocol, path = get_protocol_and_path(filepath, version) if protocol == "file": _fs_args.setdefault("auto_mkdir", True) self._protocol = protocol self._fs = fsspec.filesystem(self._protocol, **_credentials, **_fs_args) super().__init__( filepath=PurePosixPath(path), version=version, exists_function=self._fs.exists, glob_function=self._fs.glob, ) self._from_pandas_args = {} # type: Dict[str, Any] # Handle default load and save arguments self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS) if load_args is not None: self._load_args.update(load_args) self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS) if save_args is not None: self._from_pandas_args.update(save_args.pop("from_pandas", {})) self._save_args.update(save_args)
[ "def", "__init__", "(", "self", ",", "filepath", ":", "str", ",", "load_args", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", "save_args", ":", "Dict", "[", "str", ",", "Any", "]", "=", "None", ",", "version", ":", "Version", "=", "N...
https://github.com/kedro-org/kedro/blob/e78990c6b606a27830f0d502afa0f639c0830950/kedro/extras/datasets/pandas/parquet_dataset.py#L76-L150
Azure/azure-functions-python-worker
edc21f8c6214d2072944ed45d6eb5016e2c747fd
azure_functions_worker/utils/dependency.py
python
DependencyManager._remove_module_cache
(path: str)
Remove module cache if the module is imported from specific path. This will not impact builtin modules Parameters ---------- path: str The module cache to be removed if it is imported from this path.
Remove module cache if the module is imported from specific path. This will not impact builtin modules
[ "Remove", "module", "cache", "if", "the", "module", "is", "imported", "from", "specific", "path", ".", "This", "will", "not", "impact", "builtin", "modules" ]
def _remove_module_cache(path: str): """Remove module cache if the module is imported from specific path. This will not impact builtin modules Parameters ---------- path: str The module cache to be removed if it is imported from this path. """ if not path: return not_builtin = set(sys.modules.keys()) - set(sys.builtin_module_names) # Don't reload azure_functions_worker to_be_cleared_from_cache = set([ module_name for module_name in not_builtin if not module_name.startswith('azure_functions_worker') ]) for module_name in to_be_cleared_from_cache: module = sys.modules.get(module_name) if not isinstance(module, ModuleType): continue # Module path can be actual file path or a pure namespace path. # Both of these has the module path placed in __path__ property # The property .__path__ can be None or does not exist in module try: module_paths = set(getattr(module, '__path__', None) or []) if hasattr(module, '__file__') and module.__file__: module_paths.add(module.__file__) if any([p for p in module_paths if p.startswith(path)]): sys.modules.pop(module_name) except Exception as e: logger.warning( f'Attempt to remove module cache for {module_name} but' f' failed with {e}. Using the original module cache.' )
[ "def", "_remove_module_cache", "(", "path", ":", "str", ")", ":", "if", "not", "path", ":", "return", "not_builtin", "=", "set", "(", "sys", ".", "modules", ".", "keys", "(", ")", ")", "-", "set", "(", "sys", ".", "builtin_module_names", ")", "# Don't ...
https://github.com/Azure/azure-functions-python-worker/blob/edc21f8c6214d2072944ed45d6eb5016e2c747fd/azure_functions_worker/utils/dependency.py#L356-L395
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/UserString.py
python
UserString.__str__
(self)
return str(self.data)
[]
def __str__(self): return str(self.data)
[ "def", "__str__", "(", "self", ")", ":", "return", "str", "(", "self", ".", "data", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/UserString.py#L21-L21
nilearn/nilearn
9edba4471747efacf21260bf470a346307f52706
nilearn/_utils/class_inspect.py
python
get_params
(cls, instance, ignore=None)
return params
Retrieve the initialization parameters corresponding to a class This helper function retrieves the parameters of function __init__ for class 'cls' and returns the value for these parameters in object 'instance'. When using a composition pattern (e.g. with a NiftiMasker class), it is useful to forward parameters from one instance to another. Parameters ---------- cls : class The class that gives us the list of parameters we are interested in. instance : object, instance of BaseEstimator The object that gives us the values of the parameters. ignore : None or list of strings Names of the parameters that are not returned. Returns ------- params : dict The dict of parameters.
Retrieve the initialization parameters corresponding to a class
[ "Retrieve", "the", "initialization", "parameters", "corresponding", "to", "a", "class" ]
def get_params(cls, instance, ignore=None): """Retrieve the initialization parameters corresponding to a class This helper function retrieves the parameters of function __init__ for class 'cls' and returns the value for these parameters in object 'instance'. When using a composition pattern (e.g. with a NiftiMasker class), it is useful to forward parameters from one instance to another. Parameters ---------- cls : class The class that gives us the list of parameters we are interested in. instance : object, instance of BaseEstimator The object that gives us the values of the parameters. ignore : None or list of strings Names of the parameters that are not returned. Returns ------- params : dict The dict of parameters. """ _ignore = set(('memory', 'memory_level', 'verbose', 'copy', 'n_jobs')) if ignore is not None: _ignore.update(ignore) param_names = cls._get_param_names() params = dict() for param_name in param_names: if param_name in _ignore: continue if hasattr(instance, param_name): params[param_name] = getattr(instance, param_name) return params
[ "def", "get_params", "(", "cls", ",", "instance", ",", "ignore", "=", "None", ")", ":", "_ignore", "=", "set", "(", "(", "'memory'", ",", "'memory_level'", ",", "'verbose'", ",", "'copy'", ",", "'n_jobs'", ")", ")", "if", "ignore", "is", "not", "None",...
https://github.com/nilearn/nilearn/blob/9edba4471747efacf21260bf470a346307f52706/nilearn/_utils/class_inspect.py#L10-L48
reuterbal/photobooth
40f1cdadcc8cff0c142fe1d4f14813f07c5a656f
photobooth/worker/PictureMailer.py
python
send_mail
(send_from, send_to, subject, message, picture, filename, server, port, is_auth, username, password, is_tls)
Compose and send email with provided info and attachments. Based on https://stackoverflow.com/a/16509278 Args: send_from (str): from name send_to (str): to name subject (str): message title message (str): message body picture (jpg byte_data): ByteIO data of the JPG picture filename (str): Filename of picture server (str): mail server host name port (int): port number is_auth (bool): server requires authentication username (str): server auth username password (str): server auth password is_tls (bool): use TLS mode
Compose and send email with provided info and attachments.
[ "Compose", "and", "send", "email", "with", "provided", "info", "and", "attachments", "." ]
def send_mail(send_from, send_to, subject, message, picture, filename, server, port, is_auth, username, password, is_tls): """Compose and send email with provided info and attachments. Based on https://stackoverflow.com/a/16509278 Args: send_from (str): from name send_to (str): to name subject (str): message title message (str): message body picture (jpg byte_data): ByteIO data of the JPG picture filename (str): Filename of picture server (str): mail server host name port (int): port number is_auth (bool): server requires authentication username (str): server auth username password (str): server auth password is_tls (bool): use TLS mode """ msg = MIMEMultipart() msg['From'] = send_from msg['To'] = send_to msg['Date'] = formatdate(localtime=True) msg['Subject'] = subject msg.attach(MIMEText(message)) part = MIMEBase('application', "octet-stream") part.set_payload(picture.getbuffer()) encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="{}"'.format(filename)) msg.attach(part) smtp = smtplib.SMTP(server, port) if is_tls: smtp.starttls() if is_auth: smtp.login(username, password) smtp.sendmail(send_from, send_to, msg.as_string()) smtp.quit()
[ "def", "send_mail", "(", "send_from", ",", "send_to", ",", "subject", ",", "message", ",", "picture", ",", "filename", ",", "server", ",", "port", ",", "is_auth", ",", "username", ",", "password", ",", "is_tls", ")", ":", "msg", "=", "MIMEMultipart", "("...
https://github.com/reuterbal/photobooth/blob/40f1cdadcc8cff0c142fe1d4f14813f07c5a656f/photobooth/worker/PictureMailer.py#L34-L75
GoSecure/pyrdp
abd8b8762b6d7fd0e49d4a927b529f892b412743
pyrdp/parser/rdp/connection.py
python
ServerConnectionParser.__init__
(self)
[]
def __init__(self): super().__init__() self.parsers = { ConnectionDataType.SERVER_CORE: self.parseServerCoreData, ConnectionDataType.SERVER_NETWORK: self.parseServerNetworkData, ConnectionDataType.SERVER_SECURITY: self.parseServerSecurityData, } self.writers = { ConnectionDataType.SERVER_CORE: self.writeServerCoreData, ConnectionDataType.SERVER_NETWORK: self.writeServerNetworkData, ConnectionDataType.SERVER_SECURITY: self.writeServerSecurityData, }
[ "def", "__init__", "(", "self", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "parsers", "=", "{", "ConnectionDataType", ".", "SERVER_CORE", ":", "self", ".", "parseServerCoreData", ",", "ConnectionDataType", ".", "SERVER_NETWORK", ":"...
https://github.com/GoSecure/pyrdp/blob/abd8b8762b6d7fd0e49d4a927b529f892b412743/pyrdp/parser/rdp/connection.py#L258-L270
roclark/sportsipy
c19f545d3376d62ded6304b137dc69238ac620a9
sportsipy/fb/roster.py
python
Roster.__repr__
(self)
return self.__str__()
Return the string representation of the class.
Return the string representation of the class.
[ "Return", "the", "string", "representation", "of", "the", "class", "." ]
def __repr__(self): """ Return the string representation of the class. """ return self.__str__()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "__str__", "(", ")" ]
https://github.com/roclark/sportsipy/blob/c19f545d3376d62ded6304b137dc69238ac620a9/sportsipy/fb/roster.py#L1541-L1545
twisted/twisted
dee676b040dd38b847ea6fb112a712cb5e119490
src/twisted/spread/pb.py
python
Broker.sendDecRef
(self, objectID)
(internal) Send a DECREF directive. @param objectID: The object ID.
(internal) Send a DECREF directive.
[ "(", "internal", ")", "Send", "a", "DECREF", "directive", "." ]
def sendDecRef(self, objectID): """ (internal) Send a DECREF directive. @param objectID: The object ID. """ self.sendCall(b"decref", objectID)
[ "def", "sendDecRef", "(", "self", ",", "objectID", ")", ":", "self", ".", "sendCall", "(", "b\"decref\"", ",", "objectID", ")" ]
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/spread/pb.py#L1165-L1171
pythonarcade/arcade
1ee3eb1900683213e8e8df93943327c2ea784564
arcade/examples/sprite_properties.py
python
main
()
Main function
Main function
[ "Main", "function" ]
def main(): """ Main function """ window = MyGame() window.setup() arcade.run()
[ "def", "main", "(", ")", ":", "window", "=", "MyGame", "(", ")", "window", ".", "setup", "(", ")", "arcade", ".", "run", "(", ")" ]
https://github.com/pythonarcade/arcade/blob/1ee3eb1900683213e8e8df93943327c2ea784564/arcade/examples/sprite_properties.py#L129-L133
aws/aws-parallelcluster
f1fe5679a01c524e7ea904c329bd6d17318c6cd9
api/client/src/pcluster_client/model/build_image_request_content.py
python
BuildImageRequestContent.openapi_types
()
return { 'image_configuration': (str,), # noqa: E501 'image_id': (str,), # noqa: E501 }
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
[ "This", "must", "be", "a", "method", "because", "a", "model", "may", "have", "properties", "that", "are", "of", "type", "self", "this", "must", "run", "after", "the", "class", "is", "loaded" ]
def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'image_configuration': (str,), # noqa: E501 'image_id': (str,), # noqa: E501 }
[ "def", "openapi_types", "(", ")", ":", "return", "{", "'image_configuration'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "'image_id'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "}" ]
https://github.com/aws/aws-parallelcluster/blob/f1fe5679a01c524e7ea904c329bd6d17318c6cd9/api/client/src/pcluster_client/model/build_image_request_content.py#L70-L82
plasticityai/supersqlite
d74da749c6fa5df021df3968b854b9a59f829e17
supersqlite/third_party/_apsw/tools/shell.py
python
Shell._set_db
(self, newv)
Sets the open database (or None) and filename
Sets the open database (or None) and filename
[ "Sets", "the", "open", "database", "(", "or", "None", ")", "and", "filename" ]
def _set_db(self, newv): "Sets the open database (or None) and filename" (db, dbfilename)=newv if self._db: self._db.close(True) self._db=None self._db=db self.dbfilename=dbfilename
[ "def", "_set_db", "(", "self", ",", "newv", ")", ":", "(", "db", ",", "dbfilename", ")", "=", "newv", "if", "self", ".", "_db", ":", "self", ".", "_db", ".", "close", "(", "True", ")", "self", ".", "_db", "=", "None", "self", ".", "_db", "=", ...
https://github.com/plasticityai/supersqlite/blob/d74da749c6fa5df021df3968b854b9a59f829e17/supersqlite/third_party/_apsw/tools/shell.py#L160-L167
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/asyncio/transports.py
python
WriteTransport.abort
(self)
Close the transport immediately. Buffered data will be lost. No more data will be received. The protocol's connection_lost() method will (eventually) be called with None as its argument.
Close the transport immediately.
[ "Close", "the", "transport", "immediately", "." ]
def abort(self): """Close the transport immediately. Buffered data will be lost. No more data will be received. The protocol's connection_lost() method will (eventually) be called with None as its argument. """ raise NotImplementedError
[ "def", "abort", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/asyncio/transports.py#L132-L139
intuition-io/intuition
cd517e6b3b315a743eb4d0d0dc294e264ab913ce
intuition/api/algorithm.py
python
TradingFactory.warm
(self, data)
Called at the first handle_data frame
Called at the first handle_data frame
[ "Called", "at", "the", "first", "handle_data", "frame" ]
def warm(self, data): ''' Called at the first handle_data frame ''' pass
[ "def", "warm", "(", "self", ",", "data", ")", ":", "pass" ]
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/api/algorithm.py#L64-L66
WerWolv/EdiZon_CheatsConfigsAndScripts
d16d36c7509c01dca770f402babd83ff2e9ae6e7
Scripts/lib/python3.5/mailbox.py
python
Mailbox.iteritems
(self)
Return an iterator over (key, message) tuples.
Return an iterator over (key, message) tuples.
[ "Return", "an", "iterator", "over", "(", "key", "message", ")", "tuples", "." ]
def iteritems(self): """Return an iterator over (key, message) tuples.""" for key in self.iterkeys(): try: value = self[key] except KeyError: continue yield (key, value)
[ "def", "iteritems", "(", "self", ")", ":", "for", "key", "in", "self", ".", "iterkeys", "(", ")", ":", "try", ":", "value", "=", "self", "[", "key", "]", "except", "KeyError", ":", "continue", "yield", "(", "key", ",", "value", ")" ]
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/mailbox.py#L120-L127
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python3-alpha/python-libs/pyxmpp2/exceptions.py
python
ProtocolError.log_ignored
(self)
Log message via the "pyxmpp.ProtocolError.ignored" logger.
Log message via the "pyxmpp.ProtocolError.ignored" logger.
[ "Log", "message", "via", "the", "pyxmpp", ".", "ProtocolError", ".", "ignored", "logger", "." ]
def log_ignored(self): """Log message via the "pyxmpp.ProtocolError.ignored" logger.""" self.logger_ignored.debug("Protocol error detected: {0}" .format(self.message))
[ "def", "log_ignored", "(", "self", ")", ":", "self", ".", "logger_ignored", ".", "debug", "(", "\"Protocol error detected: {0}\"", ".", "format", "(", "self", ".", "message", ")", ")" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/pyxmpp2/exceptions.py#L173-L176
datacenter/nexus9000
73b2540db588e60bba79a95b5b0e7d6ea00024c6
nexusdash/utils/fetchcliout.py
python
FetchCliOut.__init__
(self, target_url, username, password, **kwargs)
@param target_url: URL format. For telnet/ssh use, telnet://hostip:port or ssh://hostip:port For NXAPI, use http://hostip/ins @param username: Username of device @param password: Password of device
[]
def __init__(self, target_url, username, password, **kwargs): ''' @param target_url: URL format. For telnet/ssh use, telnet://hostip:port or ssh://hostip:port For NXAPI, use http://hostip/ins @param username: Username of device @param password: Password of device ''' self.username = username self.password = password self.target_url = target_url parsed_url = urlparse(target_url) self.hostname = parsed_url.hostname self.port = parsed_url.port self.error_online = '' self.health_statuses = list() # Format: [(description, healthy)] self._is_online = False
[ "def", "__init__", "(", "self", ",", "target_url", ",", "username", ",", "password", ",", "*", "*", "kwargs", ")", ":", "self", ".", "username", "=", "username", "self", ".", "password", "=", "password", "self", ".", "target_url", "=", "target_url", "par...
https://github.com/datacenter/nexus9000/blob/73b2540db588e60bba79a95b5b0e7d6ea00024c6/nexusdash/utils/fetchcliout.py#L19-L37
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/setuptools/__init__.py
python
PackageFinder._find_packages_iter
(cls, where, exclude, include)
All the packages found in 'where' that pass the 'include' filter, but not the 'exclude' filter.
All the packages found in 'where' that pass the 'include' filter, but not the 'exclude' filter.
[ "All", "the", "packages", "found", "in", "where", "that", "pass", "the", "include", "filter", "but", "not", "the", "exclude", "filter", "." ]
def _find_packages_iter(cls, where, exclude, include): """ All the packages found in 'where' that pass the 'include' filter, but not the 'exclude' filter. """ for root, dirs, files in os.walk(where, followlinks=True): # Copy dirs to iterate over it, then empty dirs. all_dirs = dirs[:] dirs[:] = [] for dir in all_dirs: full_path = os.path.join(root, dir) rel_path = os.path.relpath(full_path, where) package = rel_path.replace(os.path.sep, '.') # Skip directory trees that are not valid packages if ('.' in dir or not cls._looks_like_package(full_path)): continue # Should this package be included? if include(package) and not exclude(package): yield package # Keep searching subdirectories, as there may be more packages # down there, even if the parent was excluded. dirs.append(dir)
[ "def", "_find_packages_iter", "(", "cls", ",", "where", ",", "exclude", ",", "include", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "where", ",", "followlinks", "=", "True", ")", ":", "# Copy dirs to iterate over it, t...
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/setuptools/__init__.py#L63-L88
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/munkres.py
python
Munkres.__convert_path
(self, path, count)
[]
def __convert_path(self, path, count): for i in range(count+1): if self.marked[path[i][0]][path[i][1]] == 1: self.marked[path[i][0]][path[i][1]] = 0 else: self.marked[path[i][0]][path[i][1]] = 1
[ "def", "__convert_path", "(", "self", ",", "path", ",", "count", ")", ":", "for", "i", "in", "range", "(", "count", "+", "1", ")", ":", "if", "self", ".", "marked", "[", "path", "[", "i", "]", "[", "0", "]", "]", "[", "path", "[", "i", "]", ...
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/munkres.py#L652-L657
ros/ros_comm
52b0556dadf3ec0c0bc72df4fc202153a53b539e
clients/rospy/src/rospy/impl/masterslave.py
python
ROSHandler.getSubscriptions
(self, caller_id)
return 1, "subscriptions", get_topic_manager().get_subscriptions()
Retrieve a list of topics that this node subscribes to. @param caller_id: ROS caller id @type caller_id: str @return: list of topics this node subscribes to. @rtype: [int, str, [ [topic1, topicType1]...[topicN, topicTypeN]]]
Retrieve a list of topics that this node subscribes to.
[ "Retrieve", "a", "list", "of", "topics", "that", "this", "node", "subscribes", "to", "." ]
def getSubscriptions(self, caller_id): """ Retrieve a list of topics that this node subscribes to. @param caller_id: ROS caller id @type caller_id: str @return: list of topics this node subscribes to. @rtype: [int, str, [ [topic1, topicType1]...[topicN, topicTypeN]]] """ return 1, "subscriptions", get_topic_manager().get_subscriptions()
[ "def", "getSubscriptions", "(", "self", ",", "caller_id", ")", ":", "return", "1", ",", "\"subscriptions\"", ",", "get_topic_manager", "(", ")", ".", "get_subscriptions", "(", ")" ]
https://github.com/ros/ros_comm/blob/52b0556dadf3ec0c0bc72df4fc202153a53b539e/clients/rospy/src/rospy/impl/masterslave.py#L384-L392
tornadoweb/tornado
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
tornado/web.py
python
RequestHandler.get_cookie
(self, name: str, default: Optional[str] = None)
return default
Returns the value of the request cookie with the given name. If the named cookie is not present, returns ``default``. This method only returns cookies that were present in the request. It does not see the outgoing cookies set by `set_cookie` in this handler.
Returns the value of the request cookie with the given name.
[ "Returns", "the", "value", "of", "the", "request", "cookie", "with", "the", "given", "name", "." ]
def get_cookie(self, name: str, default: Optional[str] = None) -> Optional[str]: """Returns the value of the request cookie with the given name. If the named cookie is not present, returns ``default``. This method only returns cookies that were present in the request. It does not see the outgoing cookies set by `set_cookie` in this handler. """ if self.request.cookies is not None and name in self.request.cookies: return self.request.cookies[name].value return default
[ "def", "get_cookie", "(", "self", ",", "name", ":", "str", ",", "default", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Optional", "[", "str", "]", ":", "if", "self", ".", "request", ".", "cookies", "is", "not", "None", "and", "name",...
https://github.com/tornadoweb/tornado/blob/208672f3bf6cbb7e37f54c356e02a71ca29f1e02/tornado/web.py#L579-L590
ray-project/ray
703c1610348615dcb8c2d141a0c46675084660f5
python/ray/autoscaler/_private/command_runner.py
python
_with_environment_variables
(cmd: str, environment_variables: Dict[str, object])
return all_vars + cmd
Prepend environment variables to a shell command. Args: cmd (str): The base command. environment_variables (Dict[str, object]): The set of environment variables. If an environment variable value is a dict, it will automatically be converted to a one line yaml string.
Prepend environment variables to a shell command.
[ "Prepend", "environment", "variables", "to", "a", "shell", "command", "." ]
def _with_environment_variables(cmd: str, environment_variables: Dict[str, object]): """Prepend environment variables to a shell command. Args: cmd (str): The base command. environment_variables (Dict[str, object]): The set of environment variables. If an environment variable value is a dict, it will automatically be converted to a one line yaml string. """ as_strings = [] for key, val in environment_variables.items(): val = json.dumps(val, separators=(",", ":")) s = "export {}={};".format(key, quote(val)) as_strings.append(s) all_vars = "".join(as_strings) return all_vars + cmd
[ "def", "_with_environment_variables", "(", "cmd", ":", "str", ",", "environment_variables", ":", "Dict", "[", "str", ",", "object", "]", ")", ":", "as_strings", "=", "[", "]", "for", "key", ",", "val", "in", "environment_variables", ".", "items", "(", ")",...
https://github.com/ray-project/ray/blob/703c1610348615dcb8c2d141a0c46675084660f5/python/ray/autoscaler/_private/command_runner.py#L82-L99
ivre/ivre
5728855b51c0ae2e59450a1c3a782febcad2128b
ivre/active/data.py
python
cleanup_synack_honeypot_host
(host: NmapHost, update_openports: bool = True)
This function will clean the `host` record if it has too many (at least `VIEW_SYNACK_HONEYPOT_COUNT`) open ports that may be "syn-ack" honeypots (which means, ports for which is_real_service_port() returns False).
This function will clean the `host` record if it has too many (at least `VIEW_SYNACK_HONEYPOT_COUNT`) open ports that may be "syn-ack" honeypots (which means, ports for which is_real_service_port() returns False).
[ "This", "function", "will", "clean", "the", "host", "record", "if", "it", "has", "too", "many", "(", "at", "least", "VIEW_SYNACK_HONEYPOT_COUNT", ")", "open", "ports", "that", "may", "be", "syn", "-", "ack", "honeypots", "(", "which", "means", "ports", "fo...
def cleanup_synack_honeypot_host(host: NmapHost, update_openports: bool = True) -> None: """This function will clean the `host` record if it has too many (at least `VIEW_SYNACK_HONEYPOT_COUNT`) open ports that may be "syn-ack" honeypots (which means, ports for which is_real_service_port() returns False). """ if VIEW_SYNACK_HONEYPOT_COUNT is None: return n_ports = len(host.get("ports", [])) if n_ports < VIEW_SYNACK_HONEYPOT_COUNT: return # check if we have too many open ports that could be "syn-ack # honeypots"... newports = [port for port in host["ports"] if is_real_service_port(port)] if n_ports - len(newports) > VIEW_SYNACK_HONEYPOT_COUNT: # ... if so, keep only the ports that cannot be "syn-ack # honeypots" host["ports"] = newports host["synack_honeypot"] = True if update_openports: set_openports_attribute(host)
[ "def", "cleanup_synack_honeypot_host", "(", "host", ":", "NmapHost", ",", "update_openports", ":", "bool", "=", "True", ")", "->", "None", ":", "if", "VIEW_SYNACK_HONEYPOT_COUNT", "is", "None", ":", "return", "n_ports", "=", "len", "(", "host", ".", "get", "...
https://github.com/ivre/ivre/blob/5728855b51c0ae2e59450a1c3a782febcad2128b/ivre/active/data.py#L243-L264
ynhacler/RedKindle
7c970920dc840f869e38cbda480d630cc2e7b200
rq2/job.py
python
get_current_job
()
return Job.fetch(job_id)
Returns the Job instance that is currently being executed. If this function is invoked from outside a job context, None is returned.
Returns the Job instance that is currently being executed. If this function is invoked from outside a job context, None is returned.
[ "Returns", "the", "Job", "instance", "that", "is", "currently", "being", "executed", ".", "If", "this", "function", "is", "invoked", "from", "outside", "a", "job", "context", "None", "is", "returned", "." ]
def get_current_job(): """Returns the Job instance that is currently being executed. If this function is invoked from outside a job context, None is returned. """ job_id = _job_stack.top if job_id is None: return None return Job.fetch(job_id)
[ "def", "get_current_job", "(", ")", ":", "job_id", "=", "_job_stack", ".", "top", "if", "job_id", "is", "None", ":", "return", "None", "return", "Job", ".", "fetch", "(", "job_id", ")" ]
https://github.com/ynhacler/RedKindle/blob/7c970920dc840f869e38cbda480d630cc2e7b200/rq2/job.py#L59-L66
BlackLight/platypush
a6b552504e2ac327c94f3a28b607061b6b60cf36
platypush/plugins/music/mpd/__init__.py
python
MusicMpdPlugin.pause
(self)
Pause playback
Pause playback
[ "Pause", "playback" ]
def pause(self): """ Pause playback """ status = self.status().output['state'] if status == 'play': return self._exec('pause') else: return self._exec('play')
[ "def", "pause", "(", "self", ")", ":", "status", "=", "self", ".", "status", "(", ")", ".", "output", "[", "'state'", "]", "if", "status", "==", "'play'", ":", "return", "self", ".", "_exec", "(", "'pause'", ")", "else", ":", "return", "self", ".",...
https://github.com/BlackLight/platypush/blob/a6b552504e2ac327c94f3a28b607061b6b60cf36/platypush/plugins/music/mpd/__init__.py#L119-L126
Pylons/pyramid
0b24ac16cc04746b25cf460f1497c157f6d3d6f4
src/pyramid/interfaces.py
python
IResponse.app_iter_range
(start, stop)
Return a new app_iter built from the response app_iter that serves up only the given start:stop range.
Return a new app_iter built from the response app_iter that serves up only the given start:stop range.
[ "Return", "a", "new", "app_iter", "built", "from", "the", "response", "app_iter", "that", "serves", "up", "only", "the", "given", "start", ":", "stop", "range", "." ]
def app_iter_range(start, stop): """Return a new app_iter built from the response app_iter that serves up only the given start:stop range."""
[ "def", "app_iter_range", "(", "start", ",", "stop", ")", ":" ]
https://github.com/Pylons/pyramid/blob/0b24ac16cc04746b25cf460f1497c157f6d3d6f4/src/pyramid/interfaces.py#L112-L114
pandas-dev/pandas
5ba7d714014ae8feaccc0dd4a98890828cf2832d
pandas/core/arrays/datetimes.py
python
maybe_convert_dtype
(data, copy: bool)
return data, copy
Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed
Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate.
[ "Convert", "data", "based", "on", "dtype", "conventions", "issuing", "deprecation", "warnings", "or", "errors", "where", "appropriate", "." ]
def maybe_convert_dtype(data, copy: bool): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if not hasattr(data, "dtype"): # e.g. collections.deque return data, copy if is_float_dtype(data.dtype): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(DT64NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype): # GH#29794 enforcing deprecation introduced in GH#23539 raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]") elif is_period_dtype(data.dtype): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError( "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" ) elif is_categorical_dtype(data.dtype): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_array_dtype(data.dtype) and not is_datetime64tz_dtype(data.dtype): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
[ "def", "maybe_convert_dtype", "(", "data", ",", "copy", ":", "bool", ")", ":", "if", "not", "hasattr", "(", "data", ",", "\"dtype\"", ")", ":", "# e.g. collections.deque", "return", "data", ",", "copy", "if", "is_float_dtype", "(", "data", ".", "dtype", ")...
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/core/arrays/datetimes.py#L2243-L2297
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/click/core.py
python
Context.lookup_default
(self, name)
Looks up the default for a parameter name. This by default looks into the :attr:`default_map` if available.
Looks up the default for a parameter name. This by default looks into the :attr:`default_map` if available.
[ "Looks", "up", "the", "default", "for", "a", "parameter", "name", ".", "This", "by", "default", "looks", "into", "the", ":", "attr", ":", "default_map", "if", "available", "." ]
def lookup_default(self, name): """Looks up the default for a parameter name. This by default looks into the :attr:`default_map` if available. """ if self.default_map is not None: rv = self.default_map.get(name) if callable(rv): rv = rv() return rv
[ "def", "lookup_default", "(", "self", ",", "name", ")", ":", "if", "self", ".", "default_map", "is", "not", "None", ":", "rv", "=", "self", ".", "default_map", ".", "get", "(", "name", ")", "if", "callable", "(", "rv", ")", ":", "rv", "=", "rv", ...
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/click/core.py#L480-L488
pandas-dev/pandas
5ba7d714014ae8feaccc0dd4a98890828cf2832d
pandas/io/pytables.py
python
HDFStore.info
(self)
return output
Print detailed information on the store. Returns ------- str
Print detailed information on the store.
[ "Print", "detailed", "information", "on", "the", "store", "." ]
def info(self) -> str: """ Print detailed information on the store. Returns ------- str """ path = pprint_thing(self._path) output = f"{type(self)}\nFile path: {path}\n" if self.is_open: lkeys = sorted(self.keys()) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append(pprint_thing(s or "invalid_HDFStore node")) except AssertionError: # surface any assertion errors for e.g. debugging raise except Exception as detail: keys.append(k) dstr = pprint_thing(detail) values.append(f"[invalid_HDFStore node: {dstr}]") output += adjoin(12, keys, values) else: output += "Empty" else: output += "File is CLOSED" return output
[ "def", "info", "(", "self", ")", "->", "str", ":", "path", "=", "pprint_thing", "(", "self", ".", "_path", ")", "output", "=", "f\"{type(self)}\\nFile path: {path}\\n\"", "if", "self", ".", "is_open", ":", "lkeys", "=", "sorted", "(", "self", ".", "keys", ...
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/io/pytables.py#L1581-L1618
man-group/mdf
4b2c78084467791ad883c0b4c53832ad70fc96ef
mdf/context.py
python
NodeOrBuilderTimer.__exit__
(self, exc_type, exc_value, traceback)
[]
def __exit__(self, exc_type, exc_value, traceback): self.ctx._stop_timer()
[ "def", "__exit__", "(", "self", ",", "exc_type", ",", "exc_value", ",", "traceback", ")", ":", "self", ".", "ctx", ".", "_stop_timer", "(", ")" ]
https://github.com/man-group/mdf/blob/4b2c78084467791ad883c0b4c53832ad70fc96ef/mdf/context.py#L163-L164
rowliny/DiffHelper
ab3a96f58f9579d0023aed9ebd785f4edf26f8af
Tool/SitePackages/click/core.py
python
augment_usage_errors
( ctx: "Context", param: t.Optional["Parameter"] = None )
Context manager that attaches extra information to exceptions.
Context manager that attaches extra information to exceptions.
[ "Context", "manager", "that", "attaches", "extra", "information", "to", "exceptions", "." ]
def augment_usage_errors( ctx: "Context", param: t.Optional["Parameter"] = None ) -> t.Iterator[None]: """Context manager that attaches extra information to exceptions.""" try: yield except BadParameter as e: if e.ctx is None: e.ctx = ctx if param is not None and e.param is None: e.param = param raise except UsageError as e: if e.ctx is None: e.ctx = ctx raise
[ "def", "augment_usage_errors", "(", "ctx", ":", "\"Context\"", ",", "param", ":", "t", ".", "Optional", "[", "\"Parameter\"", "]", "=", "None", ")", "->", "t", ".", "Iterator", "[", "None", "]", ":", "try", ":", "yield", "except", "BadParameter", "as", ...
https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/click/core.py#L97-L112
awslabs/deeplearning-benchmark
3e9a906422b402869537f91056ae771b66487a8e
tensorflow_benchmark/tf_cnn_benchmarks/variable_mgr.py
python
VariableMgrDistributedReplicated.get_gradients_to_apply
(self, device_num, gradient_state)
return avg_grads
[]
def get_gradients_to_apply(self, device_num, gradient_state): device_grads = gradient_state # From 2nd result of preprocess_device_grads. avg_grads = aggregate_gradients_using_copy_with_device_selection( self.benchmark_cnn, device_grads, use_mean=True) # Make shadow variable on a parameter server for each original trainable # variable. for i, (g, v) in enumerate(avg_grads): my_name = PS_SHADOW_VAR_PREFIX + '/' + v.name if my_name.endswith(':0'): my_name = my_name[:-2] new_v = tf.get_variable(my_name, dtype=v.dtype.base_dtype, initializer=v.initial_value, trainable=True) avg_grads[i] = (g, new_v) return avg_grads
[ "def", "get_gradients_to_apply", "(", "self", ",", "device_num", ",", "gradient_state", ")", ":", "device_grads", "=", "gradient_state", "# From 2nd result of preprocess_device_grads.", "avg_grads", "=", "aggregate_gradients_using_copy_with_device_selection", "(", "self", ".", ...
https://github.com/awslabs/deeplearning-benchmark/blob/3e9a906422b402869537f91056ae771b66487a8e/tensorflow_benchmark/tf_cnn_benchmarks/variable_mgr.py#L881-L896
schutzwerk/CANalyzat0r
6bc251e69f73d9f8554bcc6134354e18ab8ca426
src/UDSTab.py
python
UDSTab.fuzzingModeChanged
(self)
This gets called if the ComboBox gets changed to update the active UDS fuzzing mode. The other GUI elements will be set and enabled depending on the selected mode.
This gets called if the ComboBox gets changed to update the active UDS fuzzing mode. The other GUI elements will be set and enabled depending on the selected mode.
[ "This", "gets", "called", "if", "the", "ComboBox", "gets", "changed", "to", "update", "the", "active", "UDS", "fuzzing", "mode", ".", "The", "other", "GUI", "elements", "will", "be", "set", "and", "enabled", "depending", "on", "the", "selected", "mode", "....
def fuzzingModeChanged(self): """ This gets called if the ComboBox gets changed to update the active UDS fuzzing mode. The other GUI elements will be set and enabled depending on the selected mode. """ selectedData = self.comboBoxUDSMode.itemData( self.comboBoxUDSMode.currentIndex()) QtCore.QCoreApplication.processEvents()
[ "def", "fuzzingModeChanged", "(", "self", ")", ":", "selectedData", "=", "self", ".", "comboBoxUDSMode", ".", "itemData", "(", "self", ".", "comboBoxUDSMode", ".", "currentIndex", "(", ")", ")", "QtCore", ".", "QCoreApplication", ".", "processEvents", "(", ")"...
https://github.com/schutzwerk/CANalyzat0r/blob/6bc251e69f73d9f8554bcc6134354e18ab8ca426/src/UDSTab.py#L300-L309
1040003585/WebScrapingWithPython
a770fa5b03894076c8c9539b1ffff34424ffc016
portia_examle/lib/python2.7/site-packages/setuptools/command/bdist_egg.py
python
bdist_egg.get_ext_outputs
(self)
return all_outputs, ext_outputs
Get a list of relative paths to C extensions in the output distro
Get a list of relative paths to C extensions in the output distro
[ "Get", "a", "list", "of", "relative", "paths", "to", "C", "extensions", "in", "the", "output", "distro" ]
def get_ext_outputs(self): """Get a list of relative paths to C extensions in the output distro""" all_outputs = [] ext_outputs = [] paths = {self.bdist_dir: ''} for base, dirs, files in os.walk(self.bdist_dir): for filename in files: if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: all_outputs.append(paths[base] + filename) for filename in dirs: paths[os.path.join(base, filename)] = (paths[base] + filename + '/') if self.distribution.has_ext_modules(): build_cmd = self.get_finalized_command('build_ext') for ext in build_cmd.extensions: if isinstance(ext, Library): continue fullname = build_cmd.get_ext_fullname(ext.name) filename = build_cmd.get_ext_filename(fullname) if not os.path.basename(filename).startswith('dl-'): if os.path.exists(os.path.join(self.bdist_dir, filename)): ext_outputs.append(filename) return all_outputs, ext_outputs
[ "def", "get_ext_outputs", "(", "self", ")", ":", "all_outputs", "=", "[", "]", "ext_outputs", "=", "[", "]", "paths", "=", "{", "self", ".", "bdist_dir", ":", "''", "}", "for", "base", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "self"...
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/setuptools/command/bdist_egg.py#L298-L324
microsoft/ptvsd
99c8513921021d2cc7cd82e132b65c644c256768
src/ptvsd/_vendored/pydevd/pydev_ipython/matplotlibtools.py
python
flag_calls
(func)
return wrapper
Wrap a function to detect and flag when it gets called. This is a decorator which takes a function and wraps it in a function with a 'called' attribute. wrapper.called is initialized to False. The wrapper.called attribute is set to False right before each call to the wrapped function, so if the call fails it remains False. After the call completes, wrapper.called is set to True and the output is returned. Testing for truth in wrapper.called allows you to determine if a call to func() was attempted and succeeded.
Wrap a function to detect and flag when it gets called.
[ "Wrap", "a", "function", "to", "detect", "and", "flag", "when", "it", "gets", "called", "." ]
def flag_calls(func): """Wrap a function to detect and flag when it gets called. This is a decorator which takes a function and wraps it in a function with a 'called' attribute. wrapper.called is initialized to False. The wrapper.called attribute is set to False right before each call to the wrapped function, so if the call fails it remains False. After the call completes, wrapper.called is set to True and the output is returned. Testing for truth in wrapper.called allows you to determine if a call to func() was attempted and succeeded.""" # don't wrap twice if hasattr(func, 'called'): return func def wrapper(*args, **kw): wrapper.called = False out = func(*args, **kw) wrapper.called = True return out wrapper.called = False wrapper.__doc__ = func.__doc__ return wrapper
[ "def", "flag_calls", "(", "func", ")", ":", "# don't wrap twice", "if", "hasattr", "(", "func", ",", "'called'", ")", ":", "return", "func", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "wrapper", ".", "called", "=", "False", "o...
https://github.com/microsoft/ptvsd/blob/99c8513921021d2cc7cd82e132b65c644c256768/src/ptvsd/_vendored/pydevd/pydev_ipython/matplotlibtools.py#L110-L135
sony/nnabla-examples
068be490aacf73740502a1c3b10f8b2d15a52d32
responsible_ai/adversarial_debiasing/utils.py
python
get_fairness
(y, privileged_group, preds)
return demographic_parity_difference, equal_opportunity_difference, average_abs_odds_difference
Compute multiple fairness metrics for the classifier: 1. Demographic parity 2. Equal opportunity 3. Equalized odds Args: y : True data (or target, ground truth) privileged_group : list of privileged group values preds : data predicted (calculated, output) by your model Returns: demographic_parity_difference (float): Demographic parity equal_opportunity_difference (float): Equal opportunity average_abs_odds_difference(float): Equalized odds
Compute multiple fairness metrics for the classifier: 1. Demographic parity 2. Equal opportunity 3. Equalized odds
[ "Compute", "multiple", "fairness", "metrics", "for", "the", "classifier", ":", "1", ".", "Demographic", "parity", "2", ".", "Equal", "opportunity", "3", ".", "Equalized", "odds" ]
def get_fairness(y, privileged_group, preds): """ Compute multiple fairness metrics for the classifier: 1. Demographic parity 2. Equal opportunity 3. Equalized odds Args: y : True data (or target, ground truth) privileged_group : list of privileged group values preds : data predicted (calculated, output) by your model Returns: demographic_parity_difference (float): Demographic parity equal_opportunity_difference (float): Equal opportunity average_abs_odds_difference(float): Equalized odds """ y_unprivileged, preds_unprivileged = y[privileged_group == False], preds[privileged_group == False] y_privileged, preds_privileged, = y[privileged_group], preds[privileged_group] cm_unprivileged = confusion_matrix(y_unprivileged, preds_unprivileged) cm_privileged = confusion_matrix(y_privileged, preds_privileged) unprivileged_PR = ( cm_unprivileged[1, 1] + cm_unprivileged[0, 1]) / cm_unprivileged.sum() privileged_PR = (cm_privileged[1, 1] + cm_privileged[0, 1]) / cm_privileged.sum() # compute demographic parity demographic_parity_difference = unprivileged_PR - privileged_PR unprivileged_TPR = cm_unprivileged[1, 1] / cm_unprivileged[1].sum() privileged_TPR = cm_privileged[1, 1] / cm_privileged[1].sum() # compute equal opportunity equal_opportunity_difference = unprivileged_TPR - privileged_TPR unprivileged_FPR = cm_unprivileged[0, 1] / cm_unprivileged[0].sum() privileged_FPR = cm_privileged[0, 1] / cm_privileged[0].sum() # compute Equalized odds average_abs_odds_difference = 0.5 * ( abs(unprivileged_FPR - privileged_FPR) + abs(unprivileged_TPR - privileged_TPR)) return demographic_parity_difference, equal_opportunity_difference, average_abs_odds_difference
[ "def", "get_fairness", "(", "y", ",", "privileged_group", ",", "preds", ")", ":", "y_unprivileged", ",", "preds_unprivileged", "=", "y", "[", "privileged_group", "==", "False", "]", ",", "preds", "[", "privileged_group", "==", "False", "]", "y_privileged", ","...
https://github.com/sony/nnabla-examples/blob/068be490aacf73740502a1c3b10f8b2d15a52d32/responsible_ai/adversarial_debiasing/utils.py#L91-L134
python-excel/xlrd
0c4e80b3d48dfe2250ac4e514c8231a742fee221
xlrd/biffh.py
python
unpack_unicode
(data, pos, lenlen=2)
return strg
Return unicode_strg
Return unicode_strg
[ "Return", "unicode_strg" ]
def unpack_unicode(data, pos, lenlen=2): "Return unicode_strg" nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0] if not nchars: # Ambiguous whether 0-length string should have an "options" byte. # Avoid crash if missing. return UNICODE_LITERAL("") pos += lenlen options = BYTES_ORD(data[pos]) pos += 1 # phonetic = options & 0x04 # richtext = options & 0x08 if options & 0x08: # rt = unpack('<H', data[pos:pos+2])[0] # unused pos += 2 if options & 0x04: # sz = unpack('<i', data[pos:pos+4])[0] # unused pos += 4 if options & 0x01: # Uncompressed UTF-16-LE rawstrg = data[pos:pos+2*nchars] # if DEBUG: print "nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg) strg = unicode(rawstrg, 'utf_16_le') # pos += 2*nchars else: # Note: this is COMPRESSED (not ASCII!) encoding!!! # Merely returning the raw bytes would work OK 99.99% of the time # if the local codepage was cp1252 -- however this would rapidly go pear-shaped # for other codepages so we grit our Anglocentric teeth and return Unicode :-) strg = unicode(data[pos:pos+nchars], "latin_1") # pos += nchars # if richtext: # pos += 4 * rt # if phonetic: # pos += sz # return (strg, pos) return strg
[ "def", "unpack_unicode", "(", "data", ",", "pos", ",", "lenlen", "=", "2", ")", ":", "nchars", "=", "unpack", "(", "'<'", "+", "'BH'", "[", "lenlen", "-", "1", "]", ",", "data", "[", "pos", ":", "pos", "+", "lenlen", "]", ")", "[", "0", "]", ...
https://github.com/python-excel/xlrd/blob/0c4e80b3d48dfe2250ac4e514c8231a742fee221/xlrd/biffh.py#L262-L299
nasa-gibs/onearth
accb40171af208934971ae027854cc2535e64968
src/mrfgen/mrfgen.py
python
mrf_block_align
(extents, xmin, ymin, xmax, ymax, target_x, target_y, mrf_blocksize)
return (str(ulx), str(uly), str(lrx), str(lry))
Aligns granule image to fit in a MRF block Arguments: extents -- spatial extents as ulx, uly, lrx, lry xmin -- Minimum x value ymin -- Minimum y value xmax -- Maximum x value ymax -- Maximum y value target_x -- The target resolution for x target_y -- The target resolution for y mrf_blocksize -- The block size of MRF tiles
Aligns granule image to fit in a MRF block Arguments: extents -- spatial extents as ulx, uly, lrx, lry xmin -- Minimum x value ymin -- Minimum y value xmax -- Maximum x value ymax -- Maximum y value target_x -- The target resolution for x target_y -- The target resolution for y mrf_blocksize -- The block size of MRF tiles
[ "Aligns", "granule", "image", "to", "fit", "in", "a", "MRF", "block", "Arguments", ":", "extents", "--", "spatial", "extents", "as", "ulx", "uly", "lrx", "lry", "xmin", "--", "Minimum", "x", "value", "ymin", "--", "Minimum", "y", "value", "xmax", "--", ...
def mrf_block_align(extents, xmin, ymin, xmax, ymax, target_x, target_y, mrf_blocksize): """ Aligns granule image to fit in a MRF block Arguments: extents -- spatial extents as ulx, uly, lrx, lry xmin -- Minimum x value ymin -- Minimum y value xmax -- Maximum x value ymax -- Maximum y value target_x -- The target resolution for x target_y -- The target resolution for y mrf_blocksize -- The block size of MRF tiles """ extents = [Decimal(x) for x in extents] ulx, uly, lrx, lry = extents x_len = abs(Decimal(xmax)-Decimal(xmin)) y_len = abs(Decimal(ymax)-Decimal(ymin)) x_res = Decimal(target_x)/x_len y_res = Decimal(target_y)/y_len x_size = abs(lrx-ulx) * x_res y_size = abs(lry-uly) * y_res log_info_mssg ("x-res: " + str(x_res) + ", y-res: " + str(y_res) + ", x-size: " + str(x_size) + ", y-size: " + str(y_size)) # figure out appropriate block size that covers extent of granule block_x = Decimal(mrf_blocksize) block_y = Decimal(mrf_blocksize) while (block_x*2) < x_size: block_x = block_x * 2 while (block_y*2) < y_size: block_y = block_y * 2 block = Decimal(str(max([block_x,block_y]))) log_info_mssg("Insert block size %s - (x: %s y: %s)" % (str(block), str(block_x), str(block_y))) # calculate new extents that align with MRF blocks ulx = Decimal(Decimal(str(math.floor((ulx*x_res) / block))) * block) / x_res uly = Decimal(Decimal(str(math.ceil((uly*y_res) / block))) * block) / y_res lrx = Decimal(Decimal(str(math.ceil((lrx*x_res) / block))) * block) / x_res lry = Decimal(Decimal(str(math.floor((lry*y_res) / block))) * block) / y_res # snap to min/max extents if on the edge if ulx < Decimal(xmin): ulx = xmin if uly > Decimal(ymax): uly = ymax if lrx > Decimal(xmax): lrx = Decimal(xmax) if lry < Decimal(ymin): lry = Decimal(ymin) return (str(ulx), str(uly), str(lrx), str(lry))
[ "def", "mrf_block_align", "(", "extents", ",", "xmin", ",", "ymin", ",", "xmax", ",", "ymax", ",", "target_x", ",", "target_y", ",", "mrf_blocksize", ")", ":", "extents", "=", "[", "Decimal", "(", "x", ")", "for", "x", "in", "extents", "]", "ulx", ",...
https://github.com/nasa-gibs/onearth/blob/accb40171af208934971ae027854cc2535e64968/src/mrfgen/mrfgen.py#L318-L368
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1_ingress_service_backend.py
python
V1IngressServiceBackend.name
(self, name)
Sets the name of this V1IngressServiceBackend. Name is the referenced service. The service must exist in the same namespace as the Ingress object. # noqa: E501 :param name: The name of this V1IngressServiceBackend. # noqa: E501 :type: str
Sets the name of this V1IngressServiceBackend.
[ "Sets", "the", "name", "of", "this", "V1IngressServiceBackend", "." ]
def name(self, name): """Sets the name of this V1IngressServiceBackend. Name is the referenced service. The service must exist in the same namespace as the Ingress object. # noqa: E501 :param name: The name of this V1IngressServiceBackend. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name
[ "def", "name", "(", "self", ",", "name", ")", ":", "if", "self", ".", "local_vars_configuration", ".", "client_side_validation", "and", "name", "is", "None", ":", "# noqa: E501", "raise", "ValueError", "(", "\"Invalid value for `name`, must not be `None`\"", ")", "#...
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_ingress_service_backend.py#L71-L82
hardbyte/python-can
e7a2b040ee1f0cdd7fd77fbfef0454353166b333
can/interfaces/vector/canlib.py
python
VectorBus.__init__
( self, channel: Union[int, Sequence[int], str], can_filters: Optional[CanFilters] = None, poll_interval: float = 0.01, receive_own_messages: bool = False, bitrate: int = None, rx_queue_size: int = 2 ** 14, app_name: str = "CANalyzer", serial: int = None, fd: bool = False, data_bitrate: int = None, sjw_abr: int = 2, tseg1_abr: int = 6, tseg2_abr: int = 3, sjw_dbr: int = 2, tseg1_dbr: int = 6, tseg2_dbr: int = 3, **kwargs, )
:param channel: The channel indexes to create this bus with. Can also be a single integer or a comma separated string. :param can_filters: See :class:`can.BusABC`. :param receive_own_messages: See :class:`can.BusABC`. :param poll_interval: Poll interval in seconds. :param bitrate: Bitrate in bits/s. :param rx_queue_size: Number of messages in receive queue (power of 2). CAN: range `16…32768` CAN-FD: range `8192…524288` :param app_name: Name of application in *Vector Hardware Config*. If set to `None`, the channel should be a global channel index. :param serial: Serial number of the hardware to be used. If set, the channel parameter refers to the channels ONLY on the specified hardware. If set, the `app_name` does not have to be previously defined in *Vector Hardware Config*. :param fd: If CAN-FD frames should be supported. :param data_bitrate: Which bitrate to use for data phase in CAN FD. Defaults to arbitration bitrate. :param sjw_abr: Bus timing value sample jump width (arbitration). :param tseg1_abr: Bus timing value tseg1 (arbitration) :param tseg2_abr: Bus timing value tseg2 (arbitration) :param sjw_dbr: Bus timing value sample jump width (data) :param tseg1_dbr: Bus timing value tseg1 (data) :param tseg2_dbr: Bus timing value tseg2 (data) :raise can.CanInterfaceNotImplementedError: If the current operating system is not supported or the driver could not be loaded. :raise can.CanInitializationError: If the bus could not be set up. This may or may not be a :class:`can.interfaces.vector.VectorInitializationError`.
:param channel: The channel indexes to create this bus with. Can also be a single integer or a comma separated string. :param can_filters: See :class:`can.BusABC`. :param receive_own_messages: See :class:`can.BusABC`. :param poll_interval: Poll interval in seconds. :param bitrate: Bitrate in bits/s. :param rx_queue_size: Number of messages in receive queue (power of 2). CAN: range `16…32768` CAN-FD: range `8192…524288` :param app_name: Name of application in *Vector Hardware Config*. If set to `None`, the channel should be a global channel index. :param serial: Serial number of the hardware to be used. If set, the channel parameter refers to the channels ONLY on the specified hardware. If set, the `app_name` does not have to be previously defined in *Vector Hardware Config*. :param fd: If CAN-FD frames should be supported. :param data_bitrate: Which bitrate to use for data phase in CAN FD. Defaults to arbitration bitrate. :param sjw_abr: Bus timing value sample jump width (arbitration). :param tseg1_abr: Bus timing value tseg1 (arbitration) :param tseg2_abr: Bus timing value tseg2 (arbitration) :param sjw_dbr: Bus timing value sample jump width (data) :param tseg1_dbr: Bus timing value tseg1 (data) :param tseg2_dbr: Bus timing value tseg2 (data)
[ ":", "param", "channel", ":", "The", "channel", "indexes", "to", "create", "this", "bus", "with", ".", "Can", "also", "be", "a", "single", "integer", "or", "a", "comma", "separated", "string", ".", ":", "param", "can_filters", ":", "See", ":", "class", ...
def __init__( self, channel: Union[int, Sequence[int], str], can_filters: Optional[CanFilters] = None, poll_interval: float = 0.01, receive_own_messages: bool = False, bitrate: int = None, rx_queue_size: int = 2 ** 14, app_name: str = "CANalyzer", serial: int = None, fd: bool = False, data_bitrate: int = None, sjw_abr: int = 2, tseg1_abr: int = 6, tseg2_abr: int = 3, sjw_dbr: int = 2, tseg1_dbr: int = 6, tseg2_dbr: int = 3, **kwargs, ) -> None: """ :param channel: The channel indexes to create this bus with. Can also be a single integer or a comma separated string. :param can_filters: See :class:`can.BusABC`. :param receive_own_messages: See :class:`can.BusABC`. :param poll_interval: Poll interval in seconds. :param bitrate: Bitrate in bits/s. :param rx_queue_size: Number of messages in receive queue (power of 2). CAN: range `16…32768` CAN-FD: range `8192…524288` :param app_name: Name of application in *Vector Hardware Config*. If set to `None`, the channel should be a global channel index. :param serial: Serial number of the hardware to be used. If set, the channel parameter refers to the channels ONLY on the specified hardware. If set, the `app_name` does not have to be previously defined in *Vector Hardware Config*. :param fd: If CAN-FD frames should be supported. :param data_bitrate: Which bitrate to use for data phase in CAN FD. Defaults to arbitration bitrate. :param sjw_abr: Bus timing value sample jump width (arbitration). :param tseg1_abr: Bus timing value tseg1 (arbitration) :param tseg2_abr: Bus timing value tseg2 (arbitration) :param sjw_dbr: Bus timing value sample jump width (data) :param tseg1_dbr: Bus timing value tseg1 (data) :param tseg2_dbr: Bus timing value tseg2 (data) :raise can.CanInterfaceNotImplementedError: If the current operating system is not supported or the driver could not be loaded. :raise can.CanInitializationError: If the bus could not be set up. This may or may not be a :class:`can.interfaces.vector.VectorInitializationError`. """ if os.name != "nt" and not kwargs.get("_testing", False): raise CanInterfaceNotImplementedError( f"The Vector interface is only supported on Windows, " f'but you are running "{os.name}"' ) if xldriver is None: raise CanInterfaceNotImplementedError("The Vector API has not been loaded") self.poll_interval = poll_interval if isinstance(channel, str): # must be checked before generic Sequence # Assume comma separated string of channels self.channels = [int(ch.strip()) for ch in channel.split(",")] elif isinstance(channel, int): self.channels = [channel] elif isinstance(channel, Sequence): self.channels = channel else: raise TypeError( f"Invalid type for channels parameter: {type(channel).__name__}" ) self._app_name = app_name.encode() if app_name is not None else b"" self.channel_info = "Application %s: %s" % ( app_name, ", ".join(f"CAN {ch + 1}" for ch in self.channels), ) if serial is not None: app_name = None channel_index = [] channel_configs = get_channel_configs() for channel_config in channel_configs: if channel_config.serialNumber == serial: if channel_config.hwChannel in self.channels: channel_index.append(channel_config.channelIndex) if channel_index: if len(channel_index) != len(self.channels): LOG.info( "At least one defined channel wasn't found on the specified hardware." ) self.channels = channel_index else: # Is there any better way to raise the error? raise CanInitializationError( "None of the configured channels could be found on the specified hardware." ) xldriver.xlOpenDriver() self.port_handle = xlclass.XLportHandle(xldefine.XL_INVALID_PORTHANDLE) self.mask = 0 self.fd = fd # Get channels masks self.channel_masks = {} self.index_to_channel = {} for channel in self.channels: if app_name: # Get global channel index from application channel hw_type, hw_index, hw_channel = self.get_application_config( app_name, channel ) LOG.debug("Channel index %d found", channel) idx = xldriver.xlGetChannelIndex(hw_type, hw_index, hw_channel) if idx < 0: # Undocumented behavior! See issue #353. # If hardware is unavailable, this function returns -1. # Raise an exception as if the driver # would have signalled XL_ERR_HW_NOT_PRESENT. raise VectorInitializationError( xldefine.XL_Status.XL_ERR_HW_NOT_PRESENT, xldefine.XL_Status.XL_ERR_HW_NOT_PRESENT.name, "xlGetChannelIndex", ) else: # Channel already given as global channel idx = channel mask = 1 << idx self.channel_masks[channel] = mask self.index_to_channel[idx] = channel self.mask |= mask permission_mask = xlclass.XLaccess() # Set mask to request channel init permission if needed if bitrate or fd: permission_mask.value = self.mask if fd: xldriver.xlOpenPort( self.port_handle, self._app_name, self.mask, permission_mask, rx_queue_size, xldefine.XL_InterfaceVersion.XL_INTERFACE_VERSION_V4, xldefine.XL_BusTypes.XL_BUS_TYPE_CAN, ) else: xldriver.xlOpenPort( self.port_handle, self._app_name, self.mask, permission_mask, rx_queue_size, xldefine.XL_InterfaceVersion.XL_INTERFACE_VERSION, xldefine.XL_BusTypes.XL_BUS_TYPE_CAN, ) LOG.debug( "Open Port: PortHandle: %d, PermissionMask: 0x%X", self.port_handle.value, permission_mask.value, ) if permission_mask.value == self.mask: if fd: self.canFdConf = xlclass.XLcanFdConf() if bitrate: self.canFdConf.arbitrationBitRate = int(bitrate) else: self.canFdConf.arbitrationBitRate = 500000 self.canFdConf.sjwAbr = int(sjw_abr) self.canFdConf.tseg1Abr = int(tseg1_abr) self.canFdConf.tseg2Abr = int(tseg2_abr) if data_bitrate: self.canFdConf.dataBitRate = int(data_bitrate) else: self.canFdConf.dataBitRate = self.canFdConf.arbitrationBitRate self.canFdConf.sjwDbr = int(sjw_dbr) self.canFdConf.tseg1Dbr = int(tseg1_dbr) self.canFdConf.tseg2Dbr = int(tseg2_dbr) xldriver.xlCanFdSetConfiguration( self.port_handle, self.mask, self.canFdConf ) LOG.info( "SetFdConfig.: ABaudr.=%u, DBaudr.=%u", self.canFdConf.arbitrationBitRate, self.canFdConf.dataBitRate, ) LOG.info( "SetFdConfig.: sjwAbr=%u, tseg1Abr=%u, tseg2Abr=%u", self.canFdConf.sjwAbr, self.canFdConf.tseg1Abr, self.canFdConf.tseg2Abr, ) LOG.info( "SetFdConfig.: sjwDbr=%u, tseg1Dbr=%u, tseg2Dbr=%u", self.canFdConf.sjwDbr, self.canFdConf.tseg1Dbr, self.canFdConf.tseg2Dbr, ) else: if bitrate: xldriver.xlCanSetChannelBitrate( self.port_handle, permission_mask, bitrate ) LOG.info("SetChannelBitrate: baudr.=%u", bitrate) else: LOG.info("No init access!") # Enable/disable TX receipts tx_receipts = 1 if receive_own_messages else 0 xldriver.xlCanSetChannelMode(self.port_handle, self.mask, tx_receipts, 0) if HAS_EVENTS: self.event_handle = xlclass.XLhandle() xldriver.xlSetNotification(self.port_handle, self.event_handle, 1) else: LOG.info("Install pywin32 to avoid polling") try: xldriver.xlActivateChannel( self.port_handle, self.mask, xldefine.XL_BusTypes.XL_BUS_TYPE_CAN, 0 ) except VectorOperationError as error: self.shutdown() raise VectorInitializationError.from_generic(error) from None # Calculate time offset for absolute timestamps offset = xlclass.XLuint64() try: if time.get_clock_info("time").resolution > 1e-5: ts, perfcounter = time_perfcounter_correlation() try: xldriver.xlGetSyncTime(self.port_handle, offset) except VectorInitializationError: xldriver.xlGetChannelTime(self.port_handle, self.mask, offset) current_perfcounter = time.perf_counter() now = ts + (current_perfcounter - perfcounter) self._time_offset = now - offset.value * 1e-9 else: try: xldriver.xlGetSyncTime(self.port_handle, offset) except VectorInitializationError: xldriver.xlGetChannelTime(self.port_handle, self.mask, offset) self._time_offset = time.time() - offset.value * 1e-9 except VectorInitializationError: self._time_offset = 0.0 self._is_filtered = False super().__init__(channel=channel, can_filters=can_filters, **kwargs)
[ "def", "__init__", "(", "self", ",", "channel", ":", "Union", "[", "int", ",", "Sequence", "[", "int", "]", ",", "str", "]", ",", "can_filters", ":", "Optional", "[", "CanFilters", "]", "=", "None", ",", "poll_interval", ":", "float", "=", "0.01", ",...
https://github.com/hardbyte/python-can/blob/e7a2b040ee1f0cdd7fd77fbfef0454353166b333/can/interfaces/vector/canlib.py#L71-L340
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/memberships/forms.py
python
MembershipTypeForm.__init__
(self, *args, **kwargs)
[]
def __init__(self, *args, **kwargs): super(MembershipTypeForm, self).__init__(*args, **kwargs) self.type_exp_method_fields = type_exp_method_fields initial_list = [] if self.instance.pk: for field in self.type_exp_method_fields: field_value = getattr(self.instance, field) if field == 'fixed_option2_can_rollover' and (not field_value): field_value = '' else: if not field_value: field_value = '' initial_list.append(str(field_value)) self.fields['type_exp_method'].initial = ','.join(initial_list) else: self.fields['type_exp_method'].initial = "rolling,1,years,0,1,0,1,1,0,1,1,,1,1,,1" # a field position dictionary - so we can retrieve data later fields_pos_d = {} for i, field in enumerate(self.type_exp_method_fields): fields_pos_d[field] = (i, type_exp_method_widgets[i]) self.fields['type_exp_method'].widget = TypeExpMethodWidget(attrs={'id': 'type_exp_method'}, fields_pos_d=fields_pos_d)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "MembershipTypeForm", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "type_exp_method_fields", "=", "t...
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/memberships/forms.py#L229-L255
openstack/swift
b8d7c3dcb817504dcc0959ba52cc4ed2cf66c100
swift/obj/diskfile.py
python
_decode_metadata
(metadata)
return dict(((to_str(k), to_str(v)) for k, v in metadata.items()))
Given a metadata dict from disk, convert keys and values to native strings. :param metadata: a dict
Given a metadata dict from disk, convert keys and values to native strings.
[ "Given", "a", "metadata", "dict", "from", "disk", "convert", "keys", "and", "values", "to", "native", "strings", "." ]
def _decode_metadata(metadata): """ Given a metadata dict from disk, convert keys and values to native strings. :param metadata: a dict """ if six.PY2: def to_str(item): if isinstance(item, six.text_type): return item.encode('utf8') return item else: def to_str(item): if isinstance(item, six.binary_type): return item.decode('utf8', 'surrogateescape') return item return dict(((to_str(k), to_str(v)) for k, v in metadata.items()))
[ "def", "_decode_metadata", "(", "metadata", ")", ":", "if", "six", ".", "PY2", ":", "def", "to_str", "(", "item", ")", ":", "if", "isinstance", "(", "item", ",", "six", ".", "text_type", ")", ":", "return", "item", ".", "encode", "(", "'utf8'", ")", ...
https://github.com/openstack/swift/blob/b8d7c3dcb817504dcc0959ba52cc4ed2cf66c100/swift/obj/diskfile.py#L170-L187
ClusterHQ/flocker
eaa586248986d7cd681c99c948546c2b507e44de
flocker/node/_loop.py
python
_UnconvergedDelay.sleep
(self)
return s
Get the duration that should be slept for this iteration. :return _Sleep: an instance of `_Sleep` with a duration following an exponential backoff curve.
Get the duration that should be slept for this iteration.
[ "Get", "the", "duration", "that", "should", "be", "slept", "for", "this", "iteration", "." ]
def sleep(self): """ Get the duration that should be slept for this iteration. :return _Sleep: an instance of `_Sleep` with a duration following an exponential backoff curve. """ Message.log( message_type=u'flocker:node:_loop:delay', log_level=u'INFO', message=u'Intentionally delaying the next iteration of the ' u'convergence loop to avoid RequestLimitExceeded.', current_wait=self._delay ) s = _Sleep(delay_seconds=self._delay) self._delay *= _UNCONVERGED_BACKOFF_FACTOR if self._delay > self.max_sleep: self._delay = self.max_sleep return s
[ "def", "sleep", "(", "self", ")", ":", "Message", ".", "log", "(", "message_type", "=", "u'flocker:node:_loop:delay'", ",", "log_level", "=", "u'INFO'", ",", "message", "=", "u'Intentionally delaying the next iteration of the '", "u'convergence loop to avoid RequestLimitExc...
https://github.com/ClusterHQ/flocker/blob/eaa586248986d7cd681c99c948546c2b507e44de/flocker/node/_loop.py#L313-L331
OCA/reporting-engine
93a005f241a99ef9921d12e9a34a6cf2ea5a3be4
report_py3o_fusion_server/models/py3o_report.py
python
Py3oReport._create_single_report
(self, model_instance, data)
return result_path
This function to generate our py3o report
This function to generate our py3o report
[ "This", "function", "to", "generate", "our", "py3o", "report" ]
def _create_single_report(self, model_instance, data): """This function to generate our py3o report""" self.ensure_one() report_xml = self.ir_actions_report_id filetype = report_xml.py3o_filetype if not report_xml.py3o_server_id: return super()._create_single_report(model_instance, data) elif report_xml.py3o_is_local_fusion: result_path = super( Py3oReport, self.with_context(report_py3o_skip_conversion=True) )._create_single_report(model_instance, data) with closing(open(result_path, "rb")) as out_stream: tmpl_data = out_stream.read() datadict = {} else: result_fd, result_path = tempfile.mkstemp( suffix="." + filetype, prefix="p3o.report.tmp." ) tmpl_data = self.get_template(model_instance) in_stream = BytesIO(tmpl_data) with closing(os.fdopen(result_fd, "wb+")) as out_stream: template = Template(in_stream, out_stream, escape_false=True) localcontext = self._get_parser_context(model_instance, data) expressions = template.get_all_user_python_expression() py_expression = template.convert_py3o_to_python_ast(expressions) convertor = Py3oConvertor() data_struct = convertor(py_expression) datadict = data_struct.render(localcontext) # Call py3o.server to render the template in the desired format files = {"tmpl_file": tmpl_data} fields = { "targetformat": filetype, "datadict": json.dumps(datadict), "image_mapping": "{}", "escape_false": "on", } if report_xml.py3o_is_local_fusion: fields["skipfusion"] = "1" url = report_xml.py3o_server_id.url logger.info( "Connecting to %s to convert report %s to %s", url, report_xml.report_name, filetype, ) if filetype == "pdf": options = ( report_xml.pdf_options_id or report_xml.py3o_server_id.pdf_options_id ) if options: pdf_options_dict = options.odoo2libreoffice_options() fields["pdf_options"] = json.dumps(pdf_options_dict) logger.debug("PDF Export options: %s", pdf_options_dict) start_chrono = datetime.now() r = requests.post(url, data=fields, files=files) if r.status_code != 200: # server says we have an issue... let's tell that to enduser logger.error("Py3o fusion server error: %s", r.text) raise UserError(_("Fusion server error %s") % r.text) chunk_size = 1024 with open(result_path, "w+b") as fd: for chunk in r.iter_content(chunk_size): fd.write(chunk) end_chrono = datetime.now() convert_seconds = (end_chrono - start_chrono).total_seconds() logger.info( "Report %s converted to %s in %s seconds", report_xml.report_name, filetype, convert_seconds, ) if len(model_instance) == 1: self._postprocess_report(model_instance, result_path) return result_path
[ "def", "_create_single_report", "(", "self", ",", "model_instance", ",", "data", ")", ":", "self", ".", "ensure_one", "(", ")", "report_xml", "=", "self", ".", "ir_actions_report_id", "filetype", "=", "report_xml", ".", "py3o_filetype", "if", "not", "report_xml"...
https://github.com/OCA/reporting-engine/blob/93a005f241a99ef9921d12e9a34a6cf2ea5a3be4/report_py3o_fusion_server/models/py3o_report.py#L30-L106
aleju/imgaug
0101108d4fed06bc5056c4a03e2bcb0216dac326
imgaug/augmenters/overlay.py
python
FrequencyNoiseAlpha
(*args, **kwargs)
return blend.FrequencyNoiseAlpha(*args, **kwargs)
See :class:`~imgaug.augmenters.blend.BlendAlphaFrequencyNoise`.
See :class:`~imgaug.augmenters.blend.BlendAlphaFrequencyNoise`.
[ "See", ":", "class", ":", "~imgaug", ".", "augmenters", ".", "blend", ".", "BlendAlphaFrequencyNoise", "." ]
def FrequencyNoiseAlpha(*args, **kwargs): """See :class:`~imgaug.augmenters.blend.BlendAlphaFrequencyNoise`.""" # pylint: disable=invalid-name return blend.FrequencyNoiseAlpha(*args, **kwargs)
[ "def", "FrequencyNoiseAlpha", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=invalid-name", "return", "blend", ".", "FrequencyNoiseAlpha", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/augmenters/overlay.py#L53-L56
MechanicalSoup/MechanicalSoup
72783b827b176bec8a3f9672a5222ce835b72a82
mechanicalsoup/form.py
python
Form.set_checkbox
(self, data, uncheck_other_boxes=True)
Set the *checked*-attribute of input elements of type "checkbox" specified by ``data`` (i.e. check boxes). :param data: Dict of ``{name: value, ...}``. In the family of checkboxes whose *name*-attribute is ``name``, check the box whose *value*-attribute is ``value``. All boxes in the family can be checked (unchecked) if ``value`` is True (False). To check multiple specific boxes, let ``value`` be a tuple or list. :param uncheck_other_boxes: If True (default), before checking any boxes specified by ``data``, uncheck the entire checkbox family. Consider setting to False if some boxes are checked by default when the HTML is served.
Set the *checked*-attribute of input elements of type "checkbox" specified by ``data`` (i.e. check boxes).
[ "Set", "the", "*", "checked", "*", "-", "attribute", "of", "input", "elements", "of", "type", "checkbox", "specified", "by", "data", "(", "i", ".", "e", ".", "check", "boxes", ")", "." ]
def set_checkbox(self, data, uncheck_other_boxes=True): """Set the *checked*-attribute of input elements of type "checkbox" specified by ``data`` (i.e. check boxes). :param data: Dict of ``{name: value, ...}``. In the family of checkboxes whose *name*-attribute is ``name``, check the box whose *value*-attribute is ``value``. All boxes in the family can be checked (unchecked) if ``value`` is True (False). To check multiple specific boxes, let ``value`` be a tuple or list. :param uncheck_other_boxes: If True (default), before checking any boxes specified by ``data``, uncheck the entire checkbox family. Consider setting to False if some boxes are checked by default when the HTML is served. """ for (name, value) in data.items(): # Case-insensitive search for type=checkbox selector = 'input[type="checkbox" i][name="{}"]'.format(name) checkboxes = self.form.select(selector) if not checkboxes: raise InvalidFormMethod("No input checkbox named " + name) # uncheck if requested if uncheck_other_boxes: self.uncheck_all(name) # Wrap individual values (e.g. int, str) in a 1-element tuple. if not isinstance(value, list) and not isinstance(value, tuple): value = (value,) # Check or uncheck one or more boxes for choice in value: choice_str = str(choice) # Allow for example literal numbers for checkbox in checkboxes: if checkbox.attrs.get("value", "on") == choice_str: checkbox["checked"] = "" break # Allow specifying True or False to check/uncheck elif choice is True: checkbox["checked"] = "" break elif choice is False: if "checked" in checkbox.attrs: del checkbox.attrs["checked"] break else: raise LinkNotFoundError( "No input checkbox named %s with choice %s" % (name, choice) )
[ "def", "set_checkbox", "(", "self", ",", "data", ",", "uncheck_other_boxes", "=", "True", ")", ":", "for", "(", "name", ",", "value", ")", "in", "data", ".", "items", "(", ")", ":", "# Case-insensitive search for type=checkbox", "selector", "=", "'input[type=\...
https://github.com/MechanicalSoup/MechanicalSoup/blob/72783b827b176bec8a3f9672a5222ce835b72a82/mechanicalsoup/form.py#L100-L148
MultiChain/multichain-explorer
9e850fa79d0759b7348647ccf73a31d387c945a5
Mce/SqlAbstraction.py
python
SqlAbstraction.cursor
(sql)
return sql._cursor
[]
def cursor(sql): if sql._cursor is None: sql._cursor = sql.conn().cursor() return sql._cursor
[ "def", "cursor", "(", "sql", ")", ":", "if", "sql", ".", "_cursor", "is", "None", ":", "sql", ".", "_cursor", "=", "sql", ".", "conn", "(", ")", ".", "cursor", "(", ")", "return", "sql", ".", "_cursor" ]
https://github.com/MultiChain/multichain-explorer/blob/9e850fa79d0759b7348647ccf73a31d387c945a5/Mce/SqlAbstraction.py#L256-L259
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
thirdparty_libs/nltk/featstruct.py
python
subsumes
(fstruct1, fstruct2)
return fstruct2 == unify(fstruct1, fstruct2)
Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return true if unifying ``fstruct1`` with ``fstruct2`` would result in a feature structure equal to ``fstruct2.`` :rtype: bool
Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return true if unifying ``fstruct1`` with ``fstruct2`` would result in a feature structure equal to ``fstruct2.``
[ "Return", "True", "if", "fstruct1", "subsumes", "fstruct2", ".", "I", ".", "e", ".", "return", "true", "if", "unifying", "fstruct1", "with", "fstruct2", "would", "result", "in", "a", "feature", "structure", "equal", "to", "fstruct2", "." ]
def subsumes(fstruct1, fstruct2): """ Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return true if unifying ``fstruct1`` with ``fstruct2`` would result in a feature structure equal to ``fstruct2.`` :rtype: bool """ return fstruct2 == unify(fstruct1, fstruct2)
[ "def", "subsumes", "(", "fstruct1", ",", "fstruct2", ")", ":", "return", "fstruct2", "==", "unify", "(", "fstruct1", ",", "fstruct2", ")" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/nltk/featstruct.py#L1621-L1629
openwpm/OpenWPM
771b6db4169374a7f7b6eb5ce6e59ea763f26df4
openwpm/command_sequence.py
python
CommandSequence.screenshot_full_page
(self, suffix="", timeout=30)
Save a screenshot of the entire page. NOTE: geckodriver v0.15 only supports viewport screenshots. To screenshot the entire page we scroll the page using javascript and take a viewport screenshot at each location. This method will save the parts and a stitched version in the `screenshot_path`. We only scroll vertically, so pages that are wider than the viewport will be clipped. See: https://github.com/mozilla/geckodriver/issues/570 The screenshot produced will only include the area originally loaded at the start of the command. Sites which dynamically expand as the page is scrolled (i.e. infinite scroll) will only go as far as the original height. NOTE: In geckodriver v0.15 doing any scrolling (or having devtools open) seems to break element-only screenshots. So using this command will cause any future element-only screenshots to be mis-aligned
Save a screenshot of the entire page.
[ "Save", "a", "screenshot", "of", "the", "entire", "page", "." ]
def screenshot_full_page(self, suffix="", timeout=30): """Save a screenshot of the entire page. NOTE: geckodriver v0.15 only supports viewport screenshots. To screenshot the entire page we scroll the page using javascript and take a viewport screenshot at each location. This method will save the parts and a stitched version in the `screenshot_path`. We only scroll vertically, so pages that are wider than the viewport will be clipped. See: https://github.com/mozilla/geckodriver/issues/570 The screenshot produced will only include the area originally loaded at the start of the command. Sites which dynamically expand as the page is scrolled (i.e. infinite scroll) will only go as far as the original height. NOTE: In geckodriver v0.15 doing any scrolling (or having devtools open) seems to break element-only screenshots. So using this command will cause any future element-only screenshots to be mis-aligned """ self.total_timeout += timeout if not self.contains_get_or_browse: raise CommandExecutionError( "No get or browse request preceding the screenshot full page command", self, ) command = ScreenshotFullPageCommand(suffix) self._commands_with_timeout.append((command, timeout))
[ "def", "screenshot_full_page", "(", "self", ",", "suffix", "=", "\"\"", ",", "timeout", "=", "30", ")", ":", "self", ".", "total_timeout", "+=", "timeout", "if", "not", "self", ".", "contains_get_or_browse", ":", "raise", "CommandExecutionError", "(", "\"No ge...
https://github.com/openwpm/OpenWPM/blob/771b6db4169374a7f7b6eb5ce6e59ea763f26df4/openwpm/command_sequence.py#L114-L140
sergioburdisso/pyss3
70c37853f3f56a60c3df9b94b678ca3f0db843de
pyss3/__init__.py
python
SS3.__sg_xai__
(self, ngram, icat, cache=True)
A variation of the significance (sn) function. This version of the sg function adds extra checks to improve visual explanations.
A variation of the significance (sn) function.
[ "A", "variation", "of", "the", "significance", "(", "sn", ")", "function", "." ]
def __sg_xai__(self, ngram, icat, cache=True): """ A variation of the significance (sn) function. This version of the sg function adds extra checks to improve visual explanations. """ try: if cache: return self.__trie_node__(ngram, icat)[SG] else: ncats = len(self.__categories__) l = self.__l__ lvs = [self.__lv__(ngram, ic) for ic in xrange(ncats)] lv = lvs[icat] M, sd = mad(lvs, ncats) if l * sd <= MIN_MAD_SD: sd = MIN_MAD_SD / l if l else 0 # stopwords filter stopword = (M > .2) or ( sum(map(lambda v: v > 0.09, lvs)) == ncats ) if (stopword and sd <= .1) or (M >= .3): return 0. if not sd and lv: return 1. return sigmoid(lv - M, l * sd) except TypeError: return 0.
[ "def", "__sg_xai__", "(", "self", ",", "ngram", ",", "icat", ",", "cache", "=", "True", ")", ":", "try", ":", "if", "cache", ":", "return", "self", ".", "__trie_node__", "(", "ngram", ",", "icat", ")", "[", "SG", "]", "else", ":", "ncats", "=", "...
https://github.com/sergioburdisso/pyss3/blob/70c37853f3f56a60c3df9b94b678ca3f0db843de/pyss3/__init__.py#L247-L281
onaio/onadata
89ad16744e8f247fb748219476f6ac295869a95f
onadata/apps/sms_support/providers/textit.py
python
process_message_for_textit
(username, sms_identity, sms_text, sms_time, id_string, payload={})
return get_response(response)
Process a text instance and return in SMSSync expected format
Process a text instance and return in SMSSync expected format
[ "Process", "a", "text", "instance", "and", "return", "in", "SMSSync", "expected", "format" ]
def process_message_for_textit(username, sms_identity, sms_text, sms_time, id_string, payload={}): """ Process a text instance and return in SMSSync expected format """ if not sms_identity or not sms_text: return get_response({'code': SMS_API_ERROR, 'text': _(u"`identity` and `message` are " u"both required and must not be " u"empty.")}) incomings = [(sms_identity, sms_text)] response = process_incoming_smses(username, incomings, id_string)[-1] response.update({'payload': payload}) return get_response(response)
[ "def", "process_message_for_textit", "(", "username", ",", "sms_identity", ",", "sms_text", ",", "sms_time", ",", "id_string", ",", "payload", "=", "{", "}", ")", ":", "if", "not", "sms_identity", "or", "not", "sms_text", ":", "return", "get_response", "(", ...
https://github.com/onaio/onadata/blob/89ad16744e8f247fb748219476f6ac295869a95f/onadata/apps/sms_support/providers/textit.py#L120-L134
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/django/contrib/gis/geos/polygon.py
python
Polygon.from_bbox
(cls, bbox)
return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))
Constructs a Polygon from a bounding box (4-tuple).
Constructs a Polygon from a bounding box (4-tuple).
[ "Constructs", "a", "Polygon", "from", "a", "bounding", "box", "(", "4", "-", "tuple", ")", "." ]
def from_bbox(cls, bbox): "Constructs a Polygon from a bounding box (4-tuple)." x0, y0, x1, y1 = bbox for z in bbox: if not isinstance(z, six.integer_types + (float,)): return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0)) return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))
[ "def", "from_bbox", "(", "cls", ",", "bbox", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "bbox", "for", "z", "in", "bbox", ":", "if", "not", "isinstance", "(", "z", ",", "six", ".", "integer_types", "+", "(", "float", ",", ")", ")", ...
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/django/contrib/gis/geos/polygon.py#L57-L64
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/multiprocessing/process.py
python
BaseProcess.sentinel
(self)
Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination.
Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination.
[ "Return", "a", "file", "descriptor", "(", "Unix", ")", "or", "handle", "(", "Windows", ")", "suitable", "for", "waiting", "for", "process", "termination", "." ]
def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' self._check_closed() try: return self._sentinel except AttributeError: raise ValueError("process not started") from None
[ "def", "sentinel", "(", "self", ")", ":", "self", ".", "_check_closed", "(", ")", "try", ":", "return", "self", ".", "_sentinel", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"process not started\"", ")", "from", "None" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/multiprocessing/process.py#L268-L277
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
ext/urllib3/response.py
python
HTTPResponse.read_chunked
(self, amt=None, decode_content=None)
Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header.
Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``.
[ "Similar", "to", ":", "meth", ":", "HTTPResponse", ".", "read", "but", "with", "an", "additional", "parameter", ":", "decode_content", "." ]
def read_chunked(self, amt=None, decode_content=None): """ Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ self._init_decoder() # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked( "Response is not chunked. " "Header 'transfer-encoding: chunked' is missing.") if not self.supports_chunked_reads(): raise BodyNotHttplibCompatible( "Body should be httplib.HTTPResponse like. " "It should have have an fp attribute which returns raw chunks.") with self._error_catcher(): # Don't bother reading the body of a HEAD request. if self._original_response and is_response_to_head(self._original_response): self._original_response.close() return # If a response is already read and closed # then return immediately. if self._fp.fp is None: return while True: self._update_chunk_length() if self.chunk_left == 0: break chunk = self._handle_chunk(amt) decoded = self._decode(chunk, decode_content=decode_content, flush_decoder=False) if decoded: yield decoded if decode_content: # On CPython and PyPy, we should never need to flush the # decoder. However, on Jython we *might* need to, so # lets defensively do it anyway. decoded = self._flush_decoder() if decoded: # Platform-specific: Jython. yield decoded # Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b'\r\n': break # We read everything; close the "file". if self._original_response: self._original_response.close()
[ "def", "read_chunked", "(", "self", ",", "amt", "=", "None", ",", "decode_content", "=", "None", ")", ":", "self", ".", "_init_decoder", "(", ")", "# FIXME: Rewrite this method and make it a class with a better structured logic.", "if", "not", "self", ".", "chunked", ...
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/urllib3/response.py#L629-L694
ni/nidaqmx-python
62fc6b48cbbb330fe1bcc9aedadc86610a1269b6
nidaqmx/_task_modules/channels/ci_channel.py
python
CIChannel.ci_lin_encoder_dist_per_pulse
(self)
return val.value
float: Specifies the distance to measure for each pulse the encoder generates on signal A or signal B. This value is in the units you specify with **ci_lin_encoder_units**.
float: Specifies the distance to measure for each pulse the encoder generates on signal A or signal B. This value is in the units you specify with **ci_lin_encoder_units**.
[ "float", ":", "Specifies", "the", "distance", "to", "measure", "for", "each", "pulse", "the", "encoder", "generates", "on", "signal", "A", "or", "signal", "B", ".", "This", "value", "is", "in", "the", "units", "you", "specify", "with", "**", "ci_lin_encode...
def ci_lin_encoder_dist_per_pulse(self): """ float: Specifies the distance to measure for each pulse the encoder generates on signal A or signal B. This value is in the units you specify with **ci_lin_encoder_units**. """ val = ctypes.c_double() cfunc = lib_importer.windll.DAQmxGetCILinEncoderDistPerPulse if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes_byte_str, ctypes.POINTER(ctypes.c_double)] error_code = cfunc( self._handle, self._name, ctypes.byref(val)) check_for_error(error_code) return val.value
[ "def", "ci_lin_encoder_dist_per_pulse", "(", "self", ")", ":", "val", "=", "ctypes", ".", "c_double", "(", ")", "cfunc", "=", "lib_importer", ".", "windll", ".", "DAQmxGetCILinEncoderDistPerPulse", "if", "cfunc", ".", "argtypes", "is", "None", ":", "with", "cf...
https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/channels/ci_channel.py#L5686-L5706
NifTK/NiftyNet
935bf4334cd00fa9f9d50f6a95ddcbfdde4031e0
niftynet/utilities/download.py
python
ConfigStoreCache.get_local_cache_folder
(self)
return self._cache_folder
Returns the folder in which the cached files are stored
Returns the folder in which the cached files are stored
[ "Returns", "the", "folder", "in", "which", "the", "cached", "files", "are", "stored" ]
def get_local_cache_folder(self): """ Returns the folder in which the cached files are stored """ return self._cache_folder
[ "def", "get_local_cache_folder", "(", "self", ")", ":", "return", "self", ".", "_cache_folder" ]
https://github.com/NifTK/NiftyNet/blob/935bf4334cd00fa9f9d50f6a95ddcbfdde4031e0/niftynet/utilities/download.py#L344-L349
LabPy/lantz
3e878e3f765a4295b0089d04e241d4beb7b8a65b
lantz/drivers/andor/ccd.py
python
CCD.baseline_offset
(self)
return self.baseline_offset_value
This function allows the user to move the baseline level by the amount selected. For example “+100” will add approximately 100 counts to the default baseline value. The value entered should be a multiple of 100 between -1000 and +1000 inclusively.
This function allows the user to move the baseline level by the amount selected. For example “+100” will add approximately 100 counts to the default baseline value. The value entered should be a multiple of 100 between -1000 and +1000 inclusively.
[ "This", "function", "allows", "the", "user", "to", "move", "the", "baseline", "level", "by", "the", "amount", "selected", ".", "For", "example", "“", "+", "100”", "will", "add", "approximately", "100", "counts", "to", "the", "default", "baseline", "value", ...
def baseline_offset(self): """This function allows the user to move the baseline level by the amount selected. For example “+100” will add approximately 100 counts to the default baseline value. The value entered should be a multiple of 100 between -1000 and +1000 inclusively. """ return self.baseline_offset_value
[ "def", "baseline_offset", "(", "self", ")", ":", "return", "self", ".", "baseline_offset_value" ]
https://github.com/LabPy/lantz/blob/3e878e3f765a4295b0089d04e241d4beb7b8a65b/lantz/drivers/andor/ccd.py#L1649-L1655
python/cpython
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
Lib/_collections_abc.py
python
clear
(self)
D.clear() -> None. Remove all items from D.
D.clear() -> None. Remove all items from D.
[ "D", ".", "clear", "()", "-", ">", "None", ".", "Remove", "all", "items", "from", "D", "." ]
def clear(self): 'D.clear() -> None. Remove all items from D.' try: while True: self.popitem() except KeyError: pass
[ "def", "clear", "(", "self", ")", ":", "try", ":", "while", "True", ":", "self", ".", "popitem", "(", ")", "except", "KeyError", ":", "pass" ]
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/_collections_abc.py#L975-L981
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/wheel/signatures/keys.py
python
WheelKeys.add_signer
(self, scope, vk)
Remember verifying key vk as being valid for signing in scope.
Remember verifying key vk as being valid for signing in scope.
[ "Remember", "verifying", "key", "vk", "as", "being", "valid", "for", "signing", "in", "scope", "." ]
def add_signer(self, scope, vk): """Remember verifying key vk as being valid for signing in scope.""" self.data['signers'].append({'scope':scope, 'vk':vk})
[ "def", "add_signer", "(", "self", ",", "scope", ",", "vk", ")", ":", "self", ".", "data", "[", "'signers'", "]", ".", "append", "(", "{", "'scope'", ":", "scope", ",", "'vk'", ":", "vk", "}", ")" ]
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/wheel/signatures/keys.py#L96-L98
onnx/onnx-coreml
141fc33d7217674ea8bda36494fa8089a543a3f3
onnx_coreml/converter.py
python
_check_unsupported_ops
(nodes, disable_coreml_rank5_mapping=False)
[]
def _check_unsupported_ops(nodes, disable_coreml_rank5_mapping=False): # type: (...) -> None unsupported_op_types = [] # type: List[Text] for node in nodes: if disable_coreml_rank5_mapping: if node.op_type not in _ONNX_NODE_REGISTRY_ND and \ node.op_type not in unsupported_op_types: unsupported_op_types.append(node.op_type) continue if node.op_type not in _ONNX_NODE_REGISTRY and \ node.op_type not in unsupported_op_types: unsupported_op_types.append(node.op_type) coreml_3_rerun_message = '' if not disable_coreml_rank5_mapping: coreml_3_rerun_message = '\nPlease try converting again by providing the additonal argument, ' \ 'minimum_ios_deployment_target=13' \ ' and making sure you have the latest coremltools package' if len(unsupported_op_types) > 0: raise NotImplementedError("Unsupported ONNX ops of type: %s %s" % ( ','.join(unsupported_op_types), coreml_3_rerun_message))
[ "def", "_check_unsupported_ops", "(", "nodes", ",", "disable_coreml_rank5_mapping", "=", "False", ")", ":", "# type: (...) -> None", "unsupported_op_types", "=", "[", "]", "# type: List[Text]", "for", "node", "in", "nodes", ":", "if", "disable_coreml_rank5_mapping", ":"...
https://github.com/onnx/onnx-coreml/blob/141fc33d7217674ea8bda36494fa8089a543a3f3/onnx_coreml/converter.py#L206-L227
tensorflow/tensorboard
61d11d99ef034c30ba20b6a7840c8eededb9031c
tensorboard/plugins/debugger_v2/debugger_v2_plugin.py
python
_missing_run_error_response
(request)
return _error_response(request, "run parameter is not provided")
[]
def _missing_run_error_response(request): return _error_response(request, "run parameter is not provided")
[ "def", "_missing_run_error_response", "(", "request", ")", ":", "return", "_error_response", "(", "request", ",", "\"run parameter is not provided\"", ")" ]
https://github.com/tensorflow/tensorboard/blob/61d11d99ef034c30ba20b6a7840c8eededb9031c/tensorboard/plugins/debugger_v2/debugger_v2_plugin.py#L38-L39
medipixel/rl_algorithms
c5f7d1d60dcefb3050d75c5c657207183bd8db65
rl_algorithms/common/networks/heads.py
python
init_layer_uniform
(layer: nn.Linear, init_w: float = 3e-3)
return layer
Init uniform parameters on the single layer
Init uniform parameters on the single layer
[ "Init", "uniform", "parameters", "on", "the", "single", "layer" ]
def init_layer_uniform(layer: nn.Linear, init_w: float = 3e-3) -> nn.Linear: """Init uniform parameters on the single layer""" layer.weight.data.uniform_(-init_w, init_w) layer.bias.data.uniform_(-init_w, init_w) return layer
[ "def", "init_layer_uniform", "(", "layer", ":", "nn", ".", "Linear", ",", "init_w", ":", "float", "=", "3e-3", ")", "->", "nn", ".", "Linear", ":", "layer", ".", "weight", ".", "data", ".", "uniform_", "(", "-", "init_w", ",", "init_w", ")", "layer",...
https://github.com/medipixel/rl_algorithms/blob/c5f7d1d60dcefb3050d75c5c657207183bd8db65/rl_algorithms/common/networks/heads.py#L23-L28
autotest/autotest
4614ae5f550cc888267b9a419e4b90deb54f8fae
client/shared/hosts/base_classes.py
python
Host.cleanup_kernels
(self, boot_dir='/boot')
Remove any kernel image and associated files (vmlinux, system.map, modules) for any image found in the boot directory that is not referenced by entries in the bootloader configuration. :param boot_dir: boot directory path string, default '/boot'
Remove any kernel image and associated files (vmlinux, system.map, modules) for any image found in the boot directory that is not referenced by entries in the bootloader configuration.
[ "Remove", "any", "kernel", "image", "and", "associated", "files", "(", "vmlinux", "system", ".", "map", "modules", ")", "for", "any", "image", "found", "in", "the", "boot", "directory", "that", "is", "not", "referenced", "by", "entries", "in", "the", "boot...
def cleanup_kernels(self, boot_dir='/boot'): """ Remove any kernel image and associated files (vmlinux, system.map, modules) for any image found in the boot directory that is not referenced by entries in the bootloader configuration. :param boot_dir: boot directory path string, default '/boot' """ # find all the vmlinuz images referenced by the bootloader boot_info = self.bootloader.get_entries() used_kernver = [] for boot in boot_info.itervalues(): k = os.path.basename(boot['kernel'])[len('vmlinuz-'):] if k not in used_kernver: used_kernver.append(k) # find all the unused vmlinuz images in /boot vmlinuz_prefix = os.path.join(boot_dir, 'vmlinuz-') all_vmlinuz = self.list_files_glob(vmlinuz_prefix + '*') used_vmlinuz = self.symlink_closure(vmlinuz_prefix + kernver for kernver in used_kernver) unused_vmlinuz = set(all_vmlinuz) - set(used_vmlinuz) # find all the unused vmlinux images in /boot vmlinux_prefix = os.path.join(boot_dir, 'vmlinux-') all_vmlinux = self.list_files_glob(vmlinux_prefix + '*') used_vmlinux = self.symlink_closure(vmlinux_prefix + kernver for kernver in used_kernver) unused_vmlinux = set(all_vmlinux) - set(used_vmlinux) # find all the unused System.map files in /boot systemmap_prefix = os.path.join(boot_dir, 'System.map-') all_system_map = self.list_files_glob(systemmap_prefix + '*') used_system_map = self.symlink_closure( systemmap_prefix + kernver for kernver in used_kernver) unused_system_map = set(all_system_map) - set(used_system_map) # find all the module directories associated with unused kernels modules_prefix = '/lib/modules/' all_moddirs = [mod_dir for mod_dir in self.list_files_glob(modules_prefix + '*') if re.match(modules_prefix + r'\d+\.\d+\.\d+.*', mod_dir)] used_moddirs = self.symlink_closure(modules_prefix + kernver for kernver in used_kernver) unused_moddirs = set(all_moddirs) - set(used_moddirs) # remove all the vmlinuz files we don't use # TODO: if needed this should become package manager agnostic for vmlinuz in unused_vmlinuz: # try and get an rpm package name rpm = self.run('rpm -qf', args=(vmlinuz,), ignore_status=True, timeout=120) if rpm.exit_status == 0: packages = set(line.strip() for line in rpm.stdout.splitlines()) # if we found some package names, try to remove them for package in packages: self.run('rpm -e', args=(package,), ignore_status=True, timeout=120) # remove the image files anyway, even if rpm didn't self.run('rm -f', args=(vmlinuz,), ignore_status=True, timeout=120) # remove all the vmlinux and System.map files left over for f in (unused_vmlinux | unused_system_map): self.run('rm -f', args=(f,), ignore_status=True, timeout=120) # remove all unused module directories # the regex match should keep us safe from removing the wrong files for moddir in unused_moddirs: self.run('rm -fr', args=(moddir,), ignore_status=True)
[ "def", "cleanup_kernels", "(", "self", ",", "boot_dir", "=", "'/boot'", ")", ":", "# find all the vmlinuz images referenced by the bootloader", "boot_info", "=", "self", ".", "bootloader", ".", "get_entries", "(", ")", "used_kernver", "=", "[", "]", "for", "boot", ...
https://github.com/autotest/autotest/blob/4614ae5f550cc888267b9a419e4b90deb54f8fae/client/shared/hosts/base_classes.py#L595-L665
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
bin/x86/Debug/scripting_engine/Lib/Bastion.py
python
BastionClass.__init__
(self, get, name)
Constructor. Arguments: get - a function that gets the attribute value (by name) name - a human-readable name for the original object (suggestion: use repr(object))
Constructor.
[ "Constructor", "." ]
def __init__(self, get, name): """Constructor. Arguments: get - a function that gets the attribute value (by name) name - a human-readable name for the original object (suggestion: use repr(object)) """ self._get_ = get self._name_ = name
[ "def", "__init__", "(", "self", ",", "get", ",", "name", ")", ":", "self", ".", "_get_", "=", "get", "self", ".", "_name_", "=", "name" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/bin/x86/Debug/scripting_engine/Lib/Bastion.py#L47-L58
hupili/snsapi
129529b89f38cbee253a23e5ed31dae2a0ea4254
snsapi/third/feedparser.py
python
_BaseHTMLProcessor.output
(self)
return ''.join([str(p) for p in self.pieces])
Return processed HTML as a single string
Return processed HTML as a single string
[ "Return", "processed", "HTML", "as", "a", "single", "string" ]
def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces])
[ "def", "output", "(", "self", ")", ":", "return", "''", ".", "join", "(", "[", "str", "(", "p", ")", "for", "p", "in", "self", ".", "pieces", "]", ")" ]
https://github.com/hupili/snsapi/blob/129529b89f38cbee253a23e5ed31dae2a0ea4254/snsapi/third/feedparser.py#L2048-L2050
microsoft/msticpy
2a401444ee529114004f496f4c0376ff25b5268a
msticpy/nbtools/nbwidgets.py
python
OptionButtons._create_button_callbacks
(self, btns)
Set up buttons.
Set up buttons.
[ "Set", "up", "buttons", "." ]
def _create_button_callbacks(self, btns): """Set up buttons.""" def getvalue(change): """Button on_click handler.""" self.value = change.description for btn in btns: btn.on_click(getvalue, remove=True) for btn in btns: btn.on_click(getvalue)
[ "def", "_create_button_callbacks", "(", "self", ",", "btns", ")", ":", "def", "getvalue", "(", "change", ")", ":", "\"\"\"Button on_click handler.\"\"\"", "self", ".", "value", "=", "change", ".", "description", "for", "btn", "in", "btns", ":", "btn", ".", "...
https://github.com/microsoft/msticpy/blob/2a401444ee529114004f496f4c0376ff25b5268a/msticpy/nbtools/nbwidgets.py#L1664-L1674
iopsgroup/imoocc
de810eb6d4c1697b7139305925a5b0ba21225f3f
extra_apps/xadmin/views/list.py
python
ListAdminView.get_list_display
(self)
return list(self.base_list_display)
Return a sequence containing the fields to be displayed on the list.
Return a sequence containing the fields to be displayed on the list.
[ "Return", "a", "sequence", "containing", "the", "fields", "to", "be", "displayed", "on", "the", "list", "." ]
def get_list_display(self): """ Return a sequence containing the fields to be displayed on the list. """ self.base_list_display = (COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR] != "" and \ self.request.GET[COL_LIST_VAR].split('.')) or self.list_display return list(self.base_list_display)
[ "def", "get_list_display", "(", "self", ")", ":", "self", ".", "base_list_display", "=", "(", "COL_LIST_VAR", "in", "self", ".", "request", ".", "GET", "and", "self", ".", "request", ".", "GET", "[", "COL_LIST_VAR", "]", "!=", "\"\"", "and", "self", ".",...
https://github.com/iopsgroup/imoocc/blob/de810eb6d4c1697b7139305925a5b0ba21225f3f/extra_apps/xadmin/views/list.py#L148-L154
stepjam/PyRep
d778d5d4ffa3be366d4e699f6e2941553fd47ecc
pyrep/objects/object.py
python
Object.get_quaternion
(self, relative_to=None)
return np.array(quaternion, dtype=np.float64)
Retrieves the quaternion (x,y,z,w) of an object. :param relative_to: Indicates relative to which reference frame we want the orientation. Specify None to retrieve the absolute orientation, or an Object relative to whose reference frame we want the orientation. :return: A list containing the quaternion (x,y,z,w).
Retrieves the quaternion (x,y,z,w) of an object.
[ "Retrieves", "the", "quaternion", "(", "x", "y", "z", "w", ")", "of", "an", "object", "." ]
def get_quaternion(self, relative_to=None) -> np.ndarray: """Retrieves the quaternion (x,y,z,w) of an object. :param relative_to: Indicates relative to which reference frame we want the orientation. Specify None to retrieve the absolute orientation, or an Object relative to whose reference frame we want the orientation. :return: A list containing the quaternion (x,y,z,w). """ relto = -1 if relative_to is None else relative_to.get_handle() quaternion = sim.simGetObjectQuaternion(self._handle, relto) return np.array(quaternion, dtype=np.float64)
[ "def", "get_quaternion", "(", "self", ",", "relative_to", "=", "None", ")", "->", "np", ".", "ndarray", ":", "relto", "=", "-", "1", "if", "relative_to", "is", "None", "else", "relative_to", ".", "get_handle", "(", ")", "quaternion", "=", "sim", ".", "...
https://github.com/stepjam/PyRep/blob/d778d5d4ffa3be366d4e699f6e2941553fd47ecc/pyrep/objects/object.py#L187-L198
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/tencentcloud/dcdb/v20180411/dcdb_client.py
python
DcdbClient.DescribeDBLogFiles
(self, request)
本接口(DescribeDBLogFiles)用于获取数据库的各种日志列表,包括冷备、binlog、errlog和slowlog。 :param request: 调用DescribeDBLogFiles所需参数的结构体。 :type request: :class:`tencentcloud.dcdb.v20180411.models.DescribeDBLogFilesRequest` :rtype: :class:`tencentcloud.dcdb.v20180411.models.DescribeDBLogFilesResponse`
本接口(DescribeDBLogFiles)用于获取数据库的各种日志列表,包括冷备、binlog、errlog和slowlog。
[ "本接口", "(", "DescribeDBLogFiles", ")", "用于获取数据库的各种日志列表,包括冷备、binlog、errlog和slowlog。" ]
def DescribeDBLogFiles(self, request): """本接口(DescribeDBLogFiles)用于获取数据库的各种日志列表,包括冷备、binlog、errlog和slowlog。 :param request: 调用DescribeDBLogFiles所需参数的结构体。 :type request: :class:`tencentcloud.dcdb.v20180411.models.DescribeDBLogFilesRequest` :rtype: :class:`tencentcloud.dcdb.v20180411.models.DescribeDBLogFilesResponse` """ try: params = request._serialize() body = self.call("DescribeDBLogFiles", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeDBLogFilesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise e else: raise TencentCloudSDKException(e.message, e.message)
[ "def", "DescribeDBLogFiles", "(", "self", ",", "request", ")", ":", "try", ":", "params", "=", "request", ".", "_serialize", "(", ")", "body", "=", "self", ".", "call", "(", "\"DescribeDBLogFiles\"", ",", "params", ")", "response", "=", "json", ".", "loa...
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/tencentcloud/dcdb/v20180411/dcdb_client.py#L56-L81
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
openedx/core/djangoapps/common_views/xblock.py
python
xblock_resource
(request, block_type, uri)
return HttpResponse(content, content_type=mimetype)
Return a package resource for the specified XBlock.
Return a package resource for the specified XBlock.
[ "Return", "a", "package", "resource", "for", "the", "specified", "XBlock", "." ]
def xblock_resource(request, block_type, uri): # pylint: disable=unused-argument """ Return a package resource for the specified XBlock. """ try: # Figure out what the XBlock class is from the block type, and # then open whatever resource has been requested. xblock_class = XBlock.load_class(block_type, select=settings.XBLOCK_SELECT_FUNCTION) content = xblock_class.open_local_resource(uri) except OSError: log.info('Failed to load xblock resource', exc_info=True) raise Http404 # lint-amnesty, pylint: disable=raise-missing-from except Exception: log.error('Failed to load xblock resource', exc_info=True) raise Http404 # lint-amnesty, pylint: disable=raise-missing-from mimetype, _ = mimetypes.guess_type(uri) return HttpResponse(content, content_type=mimetype)
[ "def", "xblock_resource", "(", "request", ",", "block_type", ",", "uri", ")", ":", "# pylint: disable=unused-argument", "try", ":", "# Figure out what the XBlock class is from the block type, and", "# then open whatever resource has been requested.", "xblock_class", "=", "XBlock", ...
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/common_views/xblock.py#L16-L33
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/django/template/defaulttags.py
python
comment
(parser, token)
return CommentNode()
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
[ "Ignores", "everything", "between", "{", "%", "comment", "%", "}", "and", "{", "%", "endcomment", "%", "}", "." ]
def comment(parser, token): """ Ignores everything between ``{% comment %}`` and ``{% endcomment %}``. """ parser.skip_past('endcomment') return CommentNode()
[ "def", "comment", "(", "parser", ",", "token", ")", ":", "parser", ".", "skip_past", "(", "'endcomment'", ")", "return", "CommentNode", "(", ")" ]
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/django/template/defaulttags.py#L527-L532
pythad/nider
be71e09968559c5504c59158aa339136fb891fb4
nider/utils.py
python
is_path_creatable
(pathname)
return os.access(dirname, os.W_OK)
Function to check if the current user has sufficient permissions to create the passed pathname Args: pathname (str): path to check. Returns: bool: ``True`` if the current user has sufficient permissions to create the passed ``pathname``. ``False`` otherwise.
Function to check if the current user has sufficient permissions to create the passed pathname
[ "Function", "to", "check", "if", "the", "current", "user", "has", "sufficient", "permissions", "to", "create", "the", "passed", "pathname" ]
def is_path_creatable(pathname): '''Function to check if the current user has sufficient permissions to create the passed pathname Args: pathname (str): path to check. Returns: bool: ``True`` if the current user has sufficient permissions to create the passed ``pathname``. ``False`` otherwise. ''' # Parent directory of the passed path. If empty, we substitute the current # working directory (CWD) instead. dirname = os.path.dirname(pathname) or os.getcwd() return os.access(dirname, os.W_OK)
[ "def", "is_path_creatable", "(", "pathname", ")", ":", "# Parent directory of the passed path. If empty, we substitute the current", "# working directory (CWD) instead.", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "pathname", ")", "or", "os", ".", "getcwd", "...
https://github.com/pythad/nider/blob/be71e09968559c5504c59158aa339136fb891fb4/nider/utils.py#L40-L53
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1_taint.py
python
V1Taint.__ne__
(self, other)
return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
Returns true if both objects are not equal
[ "Returns", "true", "if", "both", "objects", "are", "not", "equal" ]
def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1Taint): return True return self.to_dict() != other.to_dict()
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "V1Taint", ")", ":", "return", "True", "return", "self", ".", "to_dict", "(", ")", "!=", "other", ".", "to_dict", "(", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_taint.py#L203-L208
FORTH-ICS-INSPIRE/artemis
f0774af8abc25ef5c6b307960c048ff7528d8a9c
backend-services/fileobserver/core/observer.py
python
Handler.__init__
(self, d, fn)
[]
def __init__(self, d, fn): super().__init__() self.response = None self.path = "{}/{}".format(d, fn) try: with open(self.path, "r") as f: self.content = f.readlines() except Exception: log.exception("exception")
[ "def", "__init__", "(", "self", ",", "d", ",", "fn", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "response", "=", "None", "self", ".", "path", "=", "\"{}/{}\"", ".", "format", "(", "d", ",", "fn", ")", "try", ":", "with...
https://github.com/FORTH-ICS-INSPIRE/artemis/blob/f0774af8abc25ef5c6b307960c048ff7528d8a9c/backend-services/fileobserver/core/observer.py#L210-L218
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit - MAC OSX/scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/windows/amd64/psutil/_pslinux.py
python
per_cpu_times
()
Return a list of namedtuple representing the CPU times for every CPU available on the system.
Return a list of namedtuple representing the CPU times for every CPU available on the system.
[ "Return", "a", "list", "of", "namedtuple", "representing", "the", "CPU", "times", "for", "every", "CPU", "available", "on", "the", "system", "." ]
def per_cpu_times(): """Return a list of namedtuple representing the CPU times for every CPU available on the system. """ cpus = [] with open('/proc/stat', 'rb') as f: # get rid of the first line which refers to system wide CPU stats f.readline() for line in f: if line.startswith(b'cpu'): values = line.split() fields = values[1:len(scputimes._fields) + 1] fields = [float(x) / CLOCK_TICKS for x in fields] entry = scputimes(*fields) cpus.append(entry) return cpus
[ "def", "per_cpu_times", "(", ")", ":", "cpus", "=", "[", "]", "with", "open", "(", "'/proc/stat'", ",", "'rb'", ")", "as", "f", ":", "# get rid of the first line which refers to system wide CPU stats", "f", ".", "readline", "(", ")", "for", "line", "in", "f", ...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/windows/amd64/psutil/_pslinux.py#L242-L257
DeNA/Chainer_Mask_R-CNN
315a5b09897801c1f6f21270aa898dc2c4d96c65
utils/roi_align_2d.py
python
roi_align_2d
(x, rois, outh, outw, spatial_scale)
return ROIAlign2D(outh, outw, spatial_scale)(x, rois)
Spatial Region of Interest (ROI) align function. This function acts similarly to :class:`~functions.MaxPooling2D`, but it computes the maximum of input spatial patch for each channel with the region of interest. Args: x (~chainer.Variable): Input variable. The shape is expected to be 4 dimentional: (n: batch, c: channel, h, height, w: width). rois (~chainer.Variable): Input roi variable. The shape is expected to be (n: data size, 5), and each datum is set as below: (batch_index, x_min, y_min, x_max, y_max). outh (int): Height of output image after pooled. outw (int): Width of output image after pooled. spatial_scale (float): Scale of the roi is resized. Returns: ~chainer.Variable: Output variable. See the original paper proposing ROIPooling: `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_.
Spatial Region of Interest (ROI) align function.
[ "Spatial", "Region", "of", "Interest", "(", "ROI", ")", "align", "function", "." ]
def roi_align_2d(x, rois, outh, outw, spatial_scale): """Spatial Region of Interest (ROI) align function. This function acts similarly to :class:`~functions.MaxPooling2D`, but it computes the maximum of input spatial patch for each channel with the region of interest. Args: x (~chainer.Variable): Input variable. The shape is expected to be 4 dimentional: (n: batch, c: channel, h, height, w: width). rois (~chainer.Variable): Input roi variable. The shape is expected to be (n: data size, 5), and each datum is set as below: (batch_index, x_min, y_min, x_max, y_max). outh (int): Height of output image after pooled. outw (int): Width of output image after pooled. spatial_scale (float): Scale of the roi is resized. Returns: ~chainer.Variable: Output variable. See the original paper proposing ROIPooling: `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_. """ return ROIAlign2D(outh, outw, spatial_scale)(x, rois)
[ "def", "roi_align_2d", "(", "x", ",", "rois", ",", "outh", ",", "outw", ",", "spatial_scale", ")", ":", "return", "ROIAlign2D", "(", "outh", ",", "outw", ",", "spatial_scale", ")", "(", "x", ",", "rois", ")" ]
https://github.com/DeNA/Chainer_Mask_R-CNN/blob/315a5b09897801c1f6f21270aa898dc2c4d96c65/utils/roi_align_2d.py#L219-L243
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/hashlib.py
python
__hash_new
(name, string='')
new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string.
new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string.
[ "new", "(", "name", "string", "=", ")", "-", "Return", "a", "new", "hashing", "object", "using", "the", "named", "algorithm", ";", "optionally", "initialized", "with", "a", "string", "." ]
def __hash_new(name, string=''): """new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string. """ try: return _hashlib.new(name, string) except ValueError: # If the _hashlib module (OpenSSL) doesn't support the named # hash, try using our builtin implementations. # This allows for SHA224/256 and SHA384/512 support even though # the OpenSSL library prior to 0.9.8 doesn't provide them. return __get_builtin_constructor(name)(string)
[ "def", "__hash_new", "(", "name", ",", "string", "=", "''", ")", ":", "try", ":", "return", "_hashlib", ".", "new", "(", "name", ",", "string", ")", "except", "ValueError", ":", "# If the _hashlib module (OpenSSL) doesn't support the named", "# hash, try using our b...
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/hashlib.py#L113-L124
mchristopher/PokemonGo-DesktopMap
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
app/pywin/Lib/distutils/msvccompiler.py
python
get_build_version
()
return None
Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6.
Return the version of MSVC that was used to build Python.
[ "Return", "the", "version", "of", "MSVC", "that", "was", "used", "to", "build", "Python", "." ]
def get_build_version(): """Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ prefix = "MSC v." i = string.find(sys.version, prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion # else we don't know what version of the compiler this is return None
[ "def", "get_build_version", "(", ")", ":", "prefix", "=", "\"MSC v.\"", "i", "=", "string", ".", "find", "(", "sys", ".", "version", ",", "prefix", ")", "if", "i", "==", "-", "1", ":", "return", "6", "i", "=", "i", "+", "len", "(", "prefix", ")",...
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pywin/Lib/distutils/msvccompiler.py#L153-L174
ansible/ansible-runner-service
a9c7bd6174217bdc9c6d1e9e23fa74fe66a11d50
runner_service/controllers/vars.py
python
GroupVars.post
(self, group_name)
return r.__dict__, self.state_to_http[r.status]
POST [?type=file|inventory] Store group variables. By default, variables are stored in the group_vars subdirectory (type=file) but you may also store them in the inventory itself by specifying ?type=inventory on the request Example. ``` $ curl -i -k --key client.key --cert client.crt -H "Content-Type: application/json" --data '{"osd_auto_discovery": false, "osd_objectstore": "bluestore", "osd_scenario": "non-collocated"}' https://localhost:5001/api/v1/groupvars/osds -X POST HTTP/1.0 200 OK Content-Type: application/json Content-Length: 125 Server: Werkzeug/0.12.2 Python/3.6.6 Date: Tue, 11 Dec 2018 19:47:31 GMT { "status": "OK", "msg": "Variables written successfully to ./samples/project/group_vars/osds.yml", "data": {} } ```
POST [?type=file|inventory] Store group variables. By default, variables are stored in the group_vars subdirectory (type=file) but you may also store them in the inventory itself by specifying ?type=inventory on the request
[ "POST", "[", "?type", "=", "file|inventory", "]", "Store", "group", "variables", ".", "By", "default", "variables", "are", "stored", "in", "the", "group_vars", "subdirectory", "(", "type", "=", "file", ")", "but", "you", "may", "also", "store", "them", "in...
def post(self, group_name): """ POST [?type=file|inventory] Store group variables. By default, variables are stored in the group_vars subdirectory (type=file) but you may also store them in the inventory itself by specifying ?type=inventory on the request Example. ``` $ curl -i -k --key client.key --cert client.crt -H "Content-Type: application/json" --data '{"osd_auto_discovery": false, "osd_objectstore": "bluestore", "osd_scenario": "non-collocated"}' https://localhost:5001/api/v1/groupvars/osds -X POST HTTP/1.0 200 OK Content-Type: application/json Content-Length: 125 Server: Werkzeug/0.12.2 Python/3.6.6 Date: Tue, 11 Dec 2018 19:47:31 GMT { "status": "OK", "msg": "Variables written successfully to ./samples/project/group_vars/osds.yml", "data": {} } ``` """ # noqa r = APIResponse() if request.content_type != 'application/json': logger.warning("Invalid request. GROUPVARS POST requests must be " "in JSON format (application/json)") r.status, r.msg = "UNSUPPORTED", \ "Invalid content-type({}). Use application/" \ "json".format(request.content_type) return r.__dict__, self.state_to_http[r.status] # default for host vars storage store_type = 'file' vars = request.get_json() args = request.args.to_dict() if args: r_store_type = args.get('type', None) if not r_store_type: logger.debug("POST request invalid. Only type= is supported") r.status, r.msg = "INVALID", \ "Only type=inventory is supported" return r.__dict__, self.state_to_http[r.status] if r_store_type in ['inventory', 'file']: store_type = r_store_type else: logger.debug("GROUPVARS POST request has invalid type parm") r.status, r.msg = "INVALID", \ "type= value must be either inventory or file" # noqa return r.__dict__, self.state_to_http[r.status] # check that the json object can be converted to YAML try: yaml.safe_dump(vars) except yaml.YAMLError as e: logger.error("Unable to convert vars to YAML format : {}".format(e)) # noqa r.status, r.msg = "INVALID", \ "JSON received could not be converted to YAML" return r.__dict__, self.state_to_http[r.status] # payload is OK, so let's commit the change r = add_groupvars(group_name, vars, store_type) return r.__dict__, self.state_to_http[r.status]
[ "def", "post", "(", "self", ",", "group_name", ")", ":", "# noqa", "r", "=", "APIResponse", "(", ")", "if", "request", ".", "content_type", "!=", "'application/json'", ":", "logger", ".", "warning", "(", "\"Invalid request. GROUPVARS POST requests must be \"", "\"...
https://github.com/ansible/ansible-runner-service/blob/a9c7bd6174217bdc9c6d1e9e23fa74fe66a11d50/runner_service/controllers/vars.py#L228-L295
fogleman/pg
124ea3803c788b2c98c4f3a428e5d26842a67b58
pg/glfw.py
python
set_cursor_pos
(window, xpos, ypos)
Sets the position of the cursor, relative to the client area of the window. Wrapper for: void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos);
Sets the position of the cursor, relative to the client area of the window.
[ "Sets", "the", "position", "of", "the", "cursor", "relative", "to", "the", "client", "area", "of", "the", "window", "." ]
def set_cursor_pos(window, xpos, ypos): ''' Sets the position of the cursor, relative to the client area of the window. Wrapper for: void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos); ''' _glfw.glfwSetCursorPos(window, xpos, ypos)
[ "def", "set_cursor_pos", "(", "window", ",", "xpos", ",", "ypos", ")", ":", "_glfw", ".", "glfwSetCursorPos", "(", "window", ",", "xpos", ",", "ypos", ")" ]
https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L1306-L1313
justquick/django-varnish
05ae29ff870bc3cd08a91570b7e19f47c9d33929
varnishapp/management/commands/varnishmgt.py
python
Command.handle
(self, *args, **options)
[]
def handle(self, *args, **options): if args: pprint(manager.run(*args)) else: print manager.help()
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "if", "args", ":", "pprint", "(", "manager", ".", "run", "(", "*", "args", ")", ")", "else", ":", "print", "manager", ".", "help", "(", ")" ]
https://github.com/justquick/django-varnish/blob/05ae29ff870bc3cd08a91570b7e19f47c9d33929/varnishapp/management/commands/varnishmgt.py#L6-L10
songwsx/person_search_demo
b4a23f222ed9c451f13673f861e562b1d9169791
utils/datasets.py
python
LoadImagesAndLabels.__init__
(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, image_weights=False, cache_images=False)
:param path: 得到训练集的ID文件路径 'data/train.txt' :param img_size: 网络输入分辨率 416 :param batch_size: 2 :param augment: 是否进行数据增广 :param hyp: 数据增广的超参数 :param rect: 是否采用矩形训练 :param image_weights: False :param cache_images: True
[]
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=True, image_weights=False, cache_images=False): """ :param path: 得到训练集的ID文件路径 'data/train.txt' :param img_size: 网络输入分辨率 416 :param batch_size: 2 :param augment: 是否进行数据增广 :param hyp: 数据增广的超参数 :param rect: 是否采用矩形训练 :param image_weights: False :param cache_images: True """ path = str(Path(path)) # os-agnostic # 读取训练/验证txt文件的内容 with open(path, 'r') as f: self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic if os.path.splitext(x)[-1].lower() in img_formats] n = len(self.img_files) # 4807 图片的个数 bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index [0 0 1 1 2 2...] nb = bi[-1] + 1 # number of batches 2404 assert n > 0, 'No images found in %s' % path self.n = n self.batch = bi # batch index of image self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect # 将图片与标注对应上,根据train.txt的图片路径得到对应的标注文件路径 # 图片的images文件名替换为标注label所在的labels # 图片的后缀遇到.png或者.jpg则替换为标注文件后缀.txt self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files] # 读取train.txt记录的图片路径 # Rectangular Training https://github.com/ultralytics/yolov3/issues/232 if self.rect: # Read image shapes sp = 'data' + os.sep + path.replace('.txt', '.shapes').split(os.sep)[-1] # shapefile path try: with open(sp, 'r') as f: # read existing shapefile s = [x.split() for x in f.read().splitlines()] assert len(s) == n, 'Shapefile out of sync' except: s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')] np.savetxt(sp, s, fmt='%g') # overwrites existing (if any) # Sort by aspect ratio s = np.array(s, dtype=np.float64) ar = s[:, 1] / s[:, 0] # aspect ratio i = ar.argsort() self.img_files = [self.img_files[i] for i in i] self.label_files = [self.label_files[i] for i in i] self.shapes = s[i] ar = ar[i] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: shapes[i] = [maxi, 1] elif mini > 1: shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32 # Preload labels (required for weighted CE training) self.imgs = [None] * n self.labels = [None] * n if augment or image_weights: # cache labels for faster training self.labels = [np.zeros((0, 5))] * n extract_bounding_boxes = False pbar = tqdm(self.label_files, desc='Reading labels') # Reading labels: 0%| | 0/4807 [00:00<?, ?it/s] nm, nf, ne = 0, 0, 0 # number missing, number found, number empty for i, file in enumerate(pbar): try: with open(file, 'r') as f: # 'data\\labels\\train\\Inria_319.txt' l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # 2代表两个目标物体: (2, 5) except: nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing continue if l.shape[0]: assert l.shape[1] == 5, '> 5 label columns: %s' % file assert (l >= 0).all(), 'negative labels: %s' % file assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file self.labels[i] = l nf += 1 # file found # Extract object detection boxes for a second stage classifier if extract_bounding_boxes: p = Path(self.img_files[i]) img = cv2.imread(str(p)) h, w, _ = img.shape for j, x in enumerate(l): f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name) if not os.path.exists(Path(f).parent): os.makedirs(Path(f).parent) # make new output folder box = xywh2xyxy(x[1:].reshape(-1, 4)).ravel() b = np.clip(box, 0, 1) # clip boxes outside of image ret_val = cv2.imwrite(f, img[int(b[1] * h):int(b[3] * h), int(b[0] * w):int(b[2] * w)]) assert ret_val, 'Failure extracting classifier boxes' else: ne += 1 # file empty pbar.desc = 'Reading labels (%g found, %g missing, %g empty for %g images)' % (nf, nm, ne, n) assert nf > 0, 'No labels found. Recommend correcting image and label paths.' # Cache images into memory for faster training (~5GB) # imread比较慢,因此这里直接先读取最多10000张图片,大概5GB,加快训练 if cache_images and augment: # if training for i in tqdm(range(min(len(self.img_files), 10000)), desc='Reading images'): # max 10k images img_path = self.img_files[i] img = cv2.imread(img_path) # BGR assert img is not None, 'Image Not Found ' + img_path r = self.img_size / max(img.shape) # size ratio 长边缩放到416的缩放比例 if self.augment and r < 1: # if training (NOT testing), downsize to inference shape h, w, _ = img.shape img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR) # or INTER_AREA self.imgs[i] = img # 将等比例缩放后的图片存进去 # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3 # 判断图片是否下载下来正常,如果出现异常的图片就打印出来 detect_corrupted_images = False if detect_corrupted_images: from skimage import io # conda install -c conda-forge scikit-image for file in tqdm(self.img_files, desc='Detecting corrupted images'): try: _ = io.imread(file) except: print('Corrupted image detected: %s' % file)
[ "def", "__init__", "(", "self", ",", "path", ",", "img_size", "=", "416", ",", "batch_size", "=", "16", ",", "augment", "=", "False", ",", "hyp", "=", "None", ",", "rect", "=", "True", ",", "image_weights", "=", "False", ",", "cache_images", "=", "Fa...
https://github.com/songwsx/person_search_demo/blob/b4a23f222ed9c451f13673f861e562b1d9169791/utils/datasets.py#L188-L323
punchagan/cinspect
23834b9d02511a88cba8ca0aa1397eef927822c3
cinspect/index/serialize.py
python
write_index
(db, data)
Read the index and return the data.
Read the index and return the data.
[ "Read", "the", "index", "and", "return", "the", "data", "." ]
def write_index(db, data): """ Read the index and return the data. """ with open(db, 'w') as f: json.dump(data, f, indent=2)
[ "def", "write_index", "(", "db", ",", "data", ")", ":", "with", "open", "(", "db", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "data", ",", "f", ",", "indent", "=", "2", ")" ]
https://github.com/punchagan/cinspect/blob/23834b9d02511a88cba8ca0aa1397eef927822c3/cinspect/index/serialize.py#L55-L59