repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
wandb/client
wandb/vendor/prompt_toolkit/layout/controls.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/controls.py#L532-L588
def _create_get_processed_line_func(self, cli, document): """ Create a function that takes a line number of the current document and returns a _ProcessedLine(processed_tokens, source_to_display, display_to_source) tuple. """ def transform(lineno, tokens): " Transform the tokens for a given line number. " source_to_display_functions = [] display_to_source_functions = [] # Get cursor position at this line. if document.cursor_position_row == lineno: cursor_column = document.cursor_position_col else: cursor_column = None def source_to_display(i): """ Translate x position from the buffer to the x position in the processed token list. """ for f in source_to_display_functions: i = f(i) return i # Apply each processor. for p in self.input_processors: transformation = p.apply_transformation( cli, document, lineno, source_to_display, tokens) tokens = transformation.tokens if cursor_column: cursor_column = transformation.source_to_display(cursor_column) display_to_source_functions.append(transformation.display_to_source) source_to_display_functions.append(transformation.source_to_display) def display_to_source(i): for f in reversed(display_to_source_functions): i = f(i) return i return _ProcessedLine(tokens, source_to_display, display_to_source) def create_func(): get_line = self._get_tokens_for_line_func(cli, document) cache = {} def get_processed_line(i): try: return cache[i] except KeyError: processed_line = transform(i, get_line(i)) cache[i] = processed_line return processed_line return get_processed_line return create_func()
[ "def", "_create_get_processed_line_func", "(", "self", ",", "cli", ",", "document", ")", ":", "def", "transform", "(", "lineno", ",", "tokens", ")", ":", "\" Transform the tokens for a given line number. \"", "source_to_display_functions", "=", "[", "]", "display_to_sou...
Create a function that takes a line number of the current document and returns a _ProcessedLine(processed_tokens, source_to_display, display_to_source) tuple.
[ "Create", "a", "function", "that", "takes", "a", "line", "number", "of", "the", "current", "document", "and", "returns", "a", "_ProcessedLine", "(", "processed_tokens", "source_to_display", "display_to_source", ")", "tuple", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py#L25-L35
def threshold_monitor_hidden_threshold_monitor_sfp_pause(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") sfp = ET.SubElement(threshold_monitor, "sfp") pause = ET.SubElement(sfp, "pause") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_sfp_pause", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ",", "\"threshold-m...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
senaite/senaite.core
bika/lims/upgrade/v01_02_009.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_02_009.py#L115-L132
def reindex_client_local_owner_permissions(portal): """https://github.com/senaite/senaite.core/issues/957 Reindex bika_setup objects located in clients to give proper permissions to client contacts. """ start = time.time() bsc = portal.bika_setup_catalog uids = [c.UID() for c in portal.clients.objectValues()] brains = bsc(getClientUID=uids) total = len(brains) for num, brain in enumerate(brains): ob = brain.getObject() logger.info("Reindexing permission for {}/{} ({})" .format(num, total, ob.absolute_url())) ob.reindexObjectSecurity() end = time.time() logger.info("Fixing local owner role on client objects took {:.2f}s" .format(end-start)) transaction.commit()
[ "def", "reindex_client_local_owner_permissions", "(", "portal", ")", ":", "start", "=", "time", ".", "time", "(", ")", "bsc", "=", "portal", ".", "bika_setup_catalog", "uids", "=", "[", "c", ".", "UID", "(", ")", "for", "c", "in", "portal", ".", "clients...
https://github.com/senaite/senaite.core/issues/957 Reindex bika_setup objects located in clients to give proper permissions to client contacts.
[ "https", ":", "//", "github", ".", "com", "/", "senaite", "/", "senaite", ".", "core", "/", "issues", "/", "957", "Reindex", "bika_setup", "objects", "located", "in", "clients", "to", "give", "proper", "permissions", "to", "client", "contacts", "." ]
python
train
weso/CWR-DataApi
data_cwr/accessor.py
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/data_cwr/accessor.py#L39-L53
def read_csv_file(self, file_name): """ Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents """ result = [] with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as csvfile: headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for type_row in headers_reader: for t in type_row: result.append(t) return result
[ "def", "read_csv_file", "(", "self", ",", "file_name", ")", ":", "result", "=", "[", "]", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", "(", ")", ",", "os", ".", "path", ".", "basename", "(", "file_name", ")", "...
Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents
[ "Parses", "a", "CSV", "file", "into", "a", "list", "." ]
python
train
mixmastamyk/console
console/windows.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/windows.py#L206-L215
def get_title(): ''' Returns console title string. https://docs.microsoft.com/en-us/windows/console/getconsoletitle ''' MAX_LEN = 256 buffer_ = create_unicode_buffer(MAX_LEN) kernel32.GetConsoleTitleW(buffer_, MAX_LEN) log.debug('%s', buffer_.value) return buffer_.value
[ "def", "get_title", "(", ")", ":", "MAX_LEN", "=", "256", "buffer_", "=", "create_unicode_buffer", "(", "MAX_LEN", ")", "kernel32", ".", "GetConsoleTitleW", "(", "buffer_", ",", "MAX_LEN", ")", "log", ".", "debug", "(", "'%s'", ",", "buffer_", ".", "value"...
Returns console title string. https://docs.microsoft.com/en-us/windows/console/getconsoletitle
[ "Returns", "console", "title", "string", "." ]
python
train
arviz-devs/arviz
arviz/plots/plot_utils.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/plot_utils.py#L87-L126
def get_bins(values): """ Automatically compute the number of bins for discrete variables. Parameters ---------- values = numpy array values Returns ------- array with the bins Notes ----- Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis estimators. Acording to numpy `np.histogram` this provides good all around performance. The Sturges is a very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth. It is considered a robusts version of the Scott rule as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. """ x_min = values.min().astype(int) x_max = values.max().astype(int) # Sturges histogram bin estimator bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1) # The Freedman-Diaconis histogram bin estimator. iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return bins_fd = 2 * iqr * values.size ** (-1 / 3) width = round(np.max([1, bins_sturges, bins_fd])).astype(int) return np.arange(x_min, x_max + width + 1, width)
[ "def", "get_bins", "(", "values", ")", ":", "x_min", "=", "values", ".", "min", "(", ")", ".", "astype", "(", "int", ")", "x_max", "=", "values", ".", "max", "(", ")", ".", "astype", "(", "int", ")", "# Sturges histogram bin estimator", "bins_sturges", ...
Automatically compute the number of bins for discrete variables. Parameters ---------- values = numpy array values Returns ------- array with the bins Notes ----- Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis estimators. Acording to numpy `np.histogram` this provides good all around performance. The Sturges is a very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth. It is considered a robusts version of the Scott rule as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions.
[ "Automatically", "compute", "the", "number", "of", "bins", "for", "discrete", "variables", "." ]
python
train
ethereum/web3.py
web3/contract.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/contract.py#L1427-L1454
def build_transaction_for_function( address, web3, function_name=None, transaction=None, contract_abi=None, fn_abi=None, *args, **kwargs): """Builds a dictionary with the fields required to make the given transaction Don't call this directly, instead use :meth:`Contract.buildTransaction` on your contract instance. """ prepared_transaction = prepare_transaction( address, web3, fn_identifier=function_name, contract_abi=contract_abi, fn_abi=fn_abi, transaction=transaction, fn_args=args, fn_kwargs=kwargs, ) prepared_transaction = fill_transaction_defaults(web3, prepared_transaction) return prepared_transaction
[ "def", "build_transaction_for_function", "(", "address", ",", "web3", ",", "function_name", "=", "None", ",", "transaction", "=", "None", ",", "contract_abi", "=", "None", ",", "fn_abi", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "...
Builds a dictionary with the fields required to make the given transaction Don't call this directly, instead use :meth:`Contract.buildTransaction` on your contract instance.
[ "Builds", "a", "dictionary", "with", "the", "fields", "required", "to", "make", "the", "given", "transaction" ]
python
train
mar10/wsgidav
wsgidav/samples/virtual_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/samples/virtual_dav_provider.py#L323-L332
def handle_copy(self, dest_path, depth_infinity): """Change semantic of COPY to add resource tags.""" # destPath must be '/by_tag/<tag>/<resname>' if "/by_tag/" not in dest_path: raise DAVError(HTTP_FORBIDDEN) catType, tag, _rest = util.save_split(dest_path.strip("/"), "/", 2) assert catType == "by_tag" if tag not in self.data["tags"]: self.data["tags"].append(tag) return True
[ "def", "handle_copy", "(", "self", ",", "dest_path", ",", "depth_infinity", ")", ":", "# destPath must be '/by_tag/<tag>/<resname>'", "if", "\"/by_tag/\"", "not", "in", "dest_path", ":", "raise", "DAVError", "(", "HTTP_FORBIDDEN", ")", "catType", ",", "tag", ",", ...
Change semantic of COPY to add resource tags.
[ "Change", "semantic", "of", "COPY", "to", "add", "resource", "tags", "." ]
python
valid
inasafe/inasafe
safe/common/dynamic_translations.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/dynamic_translations.py#L54-L239
def dynamic_translations(): """These listed here so they get translated apriori to loading data.""" # Bangunan DKI tr('DKI buildings') # Banjir seperti 2007 tr('Jakarta 2007 flood') tr('A flood in Jakarta like in 2007') tr('Jakarta flood like 2007 with pump failure at Pluit, Ancol and Sunter') # Banjir 2007 tanpa pompa di Pluit, Ancol dan Sunter tr('Jakarta flood like 2007 with pump failure at Pluit and Ancol') tr('A flood in Jakarta like in 2007 but with structural improvements') # Dam Pluit Runtuh tr('Sea wall collapse at Pluit') # Daerah Rawan Banjir tr('Jakarta flood prone areas') tr('A flood in Jakarta in RW areas identified as flood prone') # Daerah Rawan Banjir # Penduduk Jakarta tr('Population Jakarta') tr('People') tr('people') tr('People in Jakarta') tr('Indonesian people') tr('Indonesian People') tr('People in Indonesia') tr('Flood Depth (design) Jakarta') tr('Flood Depth (current) Jakarta') tr('An earthquake in Yogyakarta like in 2006') tr('Yogyakarta 2006 earthquake') tr('Indonesian Earthquake Hazard Map') tr('A tsunami in Maumere (Mw 8.1)') tr('Maumere tsunami inundation') tr('A tsunami in Padang (Mw 8.8)') tr('An earthquake at the Sumatran fault (Mw 7.8)') # Skenario Gempabumi Sesar Sumatra Mw 7.8 tr('An earthquake at the Mentawai fault (Mw 9.0)') # Skenario Gempabumi Sesar Mentawai Mw 9.0 tr('An earthquake in Padang like in 2009') tr('An earthquake in Yogyakarta like in 2006') tr('An earthquake at the Lembang fault') # Bangunan OSM tr('OSM building footprints') tr('Structures') tr('Structures in Jakarta') tr('Building') tr('Buildings') tr('Buildings in Jakarta') tr('Essential buildings') tr('Essential Buildings') tr('OSM buildings') tr('AIBEP schools') # Perkiraan penduduk tr('Population Count (5kmx5km)') tr('Office buildings Jakarta') # Puskesmas dan rumah sakit tr('Hospitals and clinics Jakarta') tr('Schools Jakarta') tr('Schools') tr('Industrial buildings Jakarta') tr('Industrial areas Jakarta') tr('Commercial areas Jakarta') tr('Hospitals Jakarta') tr('An eruption') tr('A volcano eruption') tr('A volcano alert') # Data attribute value start here tr('office') tr('clinic') tr('terrace') tr('police') tr('residential') tr('kindergarten') tr('bank') tr('place of worship') tr('school') tr('university') tr('apartments') tr('college') tr('commercial') tr('hospital') tr('industrial') tr('civic') tr('church') tr('hotel') tr('public building') tr('other') tr('fire station') # impact function parameters # FIXME (Sunni) It's better to be updated dynamically tr('Thresholds') tr('Postprocessors') tr('Medium thresholds') tr('High thresholds') tr('Low thresholds') tr('Medium threshold') tr('High threshold') tr('Low threshold') tr('Distances') tr('Volcano name') tr('BuildingType') tr('Youth ratio') tr('Adult ratio') tr('Elderly ratio') tr('Aggregation Categorical') tr('Displacement rate') tr('Mmi range') tr('Tolerance') tr('Calculate displaced people') tr('Evacuation percentage') tr('Threshold [m]') tr('Thresholds [m]') tr('Distances [km]') # Post-processing tr('Gender') tr('Age') tr('Aggregation') tr('Building type') tr('Aggregation Categorical') tr('gender') tr('age') tr('aggregation') tr('building type') tr('aggregation categorical') # Aggregation tr('Area') # Boolean tr('True') tr('False') # Keywords tr('Value map') tr('value map') tr('Inasafe fields') tr('Keyword version') tr('Inasafe default values') # OSM Downloader tr('Afghanistan') tr('Ethiopia') tr('France') tr('Indonesia') tr('Madagascar') tr('Malawi') tr('Mozambique') tr('Philippines') tr('South Africa') tr('Thailand') tr('Zimbabwe') # Words that we have in minimum needs BNPB EN tr('Rice') tr('Drinking Water') tr('Clean Water') tr('Family Kits') tr('Toilets') tr('weekly') tr('once') # Words that we have in SPHERE EN tr('daily') tr('monthly') tr('Daily food requirements') tr('Monthly soap allocation') tr('Monthly detergent requirements') tr('Washing') tr('Trash bags') tr('Water collecting container') tr('Water storage container') tr('Saucepan') tr('Basin') tr('Knife') tr('Rice ladle') tr('Plate') tr('Spoon') tr('Mug') tr('Stove') tr('Clothing') tr('Mattress') tr('Insecticide bed nets') tr('Covered floor area (3.5mx3.5m)') tr('Infants blanket (100x70cm)')
[ "def", "dynamic_translations", "(", ")", ":", "# Bangunan DKI", "tr", "(", "'DKI buildings'", ")", "# Banjir seperti 2007", "tr", "(", "'Jakarta 2007 flood'", ")", "tr", "(", "'A flood in Jakarta like in 2007'", ")", "tr", "(", "'Jakarta flood like 2007 with pump failure at...
These listed here so they get translated apriori to loading data.
[ "These", "listed", "here", "so", "they", "get", "translated", "apriori", "to", "loading", "data", "." ]
python
train
fedora-infra/fmn.rules
fmn/rules/generic.py
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/generic.py#L47-L52
def _get_users_of_group(config, group): """ Utility to query fas for users of a group. """ if not group: return set() fas = fmn.rules.utils.get_fas(config) return fmn.rules.utils.get_user_of_group(config, fas, group)
[ "def", "_get_users_of_group", "(", "config", ",", "group", ")", ":", "if", "not", "group", ":", "return", "set", "(", ")", "fas", "=", "fmn", ".", "rules", ".", "utils", ".", "get_fas", "(", "config", ")", "return", "fmn", ".", "rules", ".", "utils",...
Utility to query fas for users of a group.
[ "Utility", "to", "query", "fas", "for", "users", "of", "a", "group", "." ]
python
train
draios/python-sdc-client
sdcclient/_common.py
https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_common.py#L583-L618
def create_user_invite(self, user_email, first_name=None, last_name=None, system_role=None): '''**Description** Invites a new user to use Sysdig Monitor. This should result in an email notification to the specified address. **Arguments** - **user_email**: the email address of the user that will be invited to use Sysdig Monitor - **first_name**: the first name of the user being invited - **last_name**: the last name of the user being invited - **system_role**: system-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' to create an Admin. if not specified, default is a non-Admin ('ROLE_USER'). **Success Return Value** The newly created user. **Examples** - `examples/user_team_mgmt.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt.py>`_ - `examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_ ''' # Look up the list of users to see if this exists, do not create if one exists res = requests.get(self.url + '/api/users', headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] data = res.json() for user in data['users']: if user['username'] == user_email: return [False, 'user ' + user_email + ' already exists'] # Create the user options = {'username': user_email, 'firstName': first_name, 'lastName': last_name, 'systemRole': system_role} user_json = {k: v for k, v in options.items() if v is not None} res = requests.post(self.url + '/api/users', headers=self.hdrs, data=json.dumps(user_json), verify=self.ssl_verify) return self._request_result(res)
[ "def", "create_user_invite", "(", "self", ",", "user_email", ",", "first_name", "=", "None", ",", "last_name", "=", "None", ",", "system_role", "=", "None", ")", ":", "# Look up the list of users to see if this exists, do not create if one exists", "res", "=", "requests...
**Description** Invites a new user to use Sysdig Monitor. This should result in an email notification to the specified address. **Arguments** - **user_email**: the email address of the user that will be invited to use Sysdig Monitor - **first_name**: the first name of the user being invited - **last_name**: the last name of the user being invited - **system_role**: system-wide privilege level for this user regardless of team. specify 'ROLE_CUSTOMER' to create an Admin. if not specified, default is a non-Admin ('ROLE_USER'). **Success Return Value** The newly created user. **Examples** - `examples/user_team_mgmt.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt.py>`_ - `examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_
[ "**", "Description", "**", "Invites", "a", "new", "user", "to", "use", "Sysdig", "Monitor", ".", "This", "should", "result", "in", "an", "email", "notification", "to", "the", "specified", "address", "." ]
python
test
Duke-GCB/DukeDSClient
ddsc/ddsclient.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/ddsclient.py#L153-L175
def run(self, args): """ Upload contents of folders to a project with project_name on remote store. If follow_symlinks we will traverse symlinked directories. If content is already on remote site it will not be sent. :param args: Namespace arguments parsed from the command line. """ project_name_or_id = self.create_project_name_or_id_from_args(args) folders = args.folders # list of local files/folders to upload into the project follow_symlinks = args.follow_symlinks # should we follow symlinks when traversing folders dry_run = args.dry_run # do not upload anything, instead print out what you would upload project_upload = ProjectUpload(self.config, project_name_or_id, folders, follow_symlinks=follow_symlinks) if dry_run: print(project_upload.dry_run_report()) else: print(project_upload.get_differences_summary()) if project_upload.needs_to_upload(): project_upload.run() print('\n') print(project_upload.get_upload_report()) print('\n') print(project_upload.get_url_msg())
[ "def", "run", "(", "self", ",", "args", ")", ":", "project_name_or_id", "=", "self", ".", "create_project_name_or_id_from_args", "(", "args", ")", "folders", "=", "args", ".", "folders", "# list of local files/folders to upload into the project", "follow_symlinks", "=",...
Upload contents of folders to a project with project_name on remote store. If follow_symlinks we will traverse symlinked directories. If content is already on remote site it will not be sent. :param args: Namespace arguments parsed from the command line.
[ "Upload", "contents", "of", "folders", "to", "a", "project", "with", "project_name", "on", "remote", "store", ".", "If", "follow_symlinks", "we", "will", "traverse", "symlinked", "directories", ".", "If", "content", "is", "already", "on", "remote", "site", "it...
python
train
PyCQA/pylint-django
pylint_django/augmentations/__init__.py
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L697-L709
def is_templatetags_module_valid_constant(node): """Suppress warnings for valid constants in templatetags module.""" if node.name not in ('register', ): return False parent = node.parent while not isinstance(parent, Module): parent = parent.parent if "templatetags." not in parent.name: return False return True
[ "def", "is_templatetags_module_valid_constant", "(", "node", ")", ":", "if", "node", ".", "name", "not", "in", "(", "'register'", ",", ")", ":", "return", "False", "parent", "=", "node", ".", "parent", "while", "not", "isinstance", "(", "parent", ",", "Mod...
Suppress warnings for valid constants in templatetags module.
[ "Suppress", "warnings", "for", "valid", "constants", "in", "templatetags", "module", "." ]
python
train
pandas-dev/pandas
pandas/core/ops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L38-L58
def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """ # `left` is always a pd.Series when called from within ops if isinstance(right, (ABCSeries, pd.Index)): name = _maybe_match_name(left, right) else: name = left.name return name
[ "def", "get_op_result_name", "(", "left", ",", "right", ")", ":", "# `left` is always a pd.Series when called from within ops", "if", "isinstance", "(", "right", ",", "(", "ABCSeries", ",", "pd", ".", "Index", ")", ")", ":", "name", "=", "_maybe_match_name", "(", ...
Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string
[ "Find", "the", "appropriate", "name", "to", "pin", "to", "an", "operation", "result", ".", "This", "result", "should", "always", "be", "either", "an", "Index", "or", "a", "Series", "." ]
python
train
saltstack/salt
salt/modules/ssh.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ssh.py#L94-L120
def _expand_authorized_keys_path(path, user, home): ''' Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5) ''' converted_path = '' had_escape = False for char in path: if had_escape: had_escape = False if char == '%': converted_path += '%' elif char == 'u': converted_path += user elif char == 'h': converted_path += home else: error = 'AuthorizedKeysFile path: unknown token character "%{0}"'.format(char) raise CommandExecutionError(error) continue elif char == '%': had_escape = True else: converted_path += char if had_escape: error = "AuthorizedKeysFile path: Last character can't be escape character" raise CommandExecutionError(error) return converted_path
[ "def", "_expand_authorized_keys_path", "(", "path", ",", "user", ",", "home", ")", ":", "converted_path", "=", "''", "had_escape", "=", "False", "for", "char", "in", "path", ":", "if", "had_escape", ":", "had_escape", "=", "False", "if", "char", "==", "'%'...
Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5)
[ "Expand", "the", "AuthorizedKeysFile", "expression", ".", "Defined", "in", "man", "sshd_config", "(", "5", ")" ]
python
train
HPAC/matchpy
matchpy/utils.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L367-L428
def base_solution_linear(a: int, b: int, c: int) -> Iterator[Tuple[int, int]]: r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative solutions are generated by using that the general solution is :math:`(x_0 + b t, y_0 - a t)`. Because the base solution is one of the minimal pairs of Bézout's coefficients, for all non-negative solutions either :math:`t \geq 0` or :math:`t \leq 0` must hold. Also, all the non-negative solutions are consecutive with respect to :math:`t`. Hence, by adding or subtracting :math:`a` resp. :math:`b` from the base solution, all non-negative solutions can be efficiently generated. Args: a: The first coefficient of the equation. b: The second coefficient of the equation. c: The constant of the equation. Yields: Each non-negative integer solution of the equation as a tuple ``(x, y)``. Raises: ValueError: If any of the coefficients is not a positive integer. """ if a <= 0 or b <= 0: raise ValueError('Coefficients a and b must be positive integers.') if c < 0: raise ValueError('Constant c must not be negative.') d = math.gcd(a, math.gcd(b, c)) a = a // d b = b // d c = c // d if c == 0: yield (0, 0) else: x0, y0, d = extended_euclid(a, b) # If c is not divisible by gcd(a, b), then there is no solution if c % d != 0: return x, y = c * x0, c * y0 if x <= 0: while y >= 0: if x >= 0: yield (x, y) x += b y -= a else: while x >= 0: if y >= 0: yield (x, y) x -= b y += a
[ "def", "base_solution_linear", "(", "a", ":", "int", ",", "b", ":", "int", ",", "c", ":", "int", ")", "->", "Iterator", "[", "Tuple", "[", "int", ",", "int", "]", "]", ":", "if", "a", "<=", "0", "or", "b", "<=", "0", ":", "raise", "ValueError",...
r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative solutions are generated by using that the general solution is :math:`(x_0 + b t, y_0 - a t)`. Because the base solution is one of the minimal pairs of Bézout's coefficients, for all non-negative solutions either :math:`t \geq 0` or :math:`t \leq 0` must hold. Also, all the non-negative solutions are consecutive with respect to :math:`t`. Hence, by adding or subtracting :math:`a` resp. :math:`b` from the base solution, all non-negative solutions can be efficiently generated. Args: a: The first coefficient of the equation. b: The second coefficient of the equation. c: The constant of the equation. Yields: Each non-negative integer solution of the equation as a tuple ``(x, y)``. Raises: ValueError: If any of the coefficients is not a positive integer.
[ "r", "Yield", "solutions", "for", "a", "basic", "linear", "Diophantine", "equation", "of", "the", "form", ":", "math", ":", "ax", "+", "by", "=", "c", "." ]
python
train
cggh/scikit-allel
allel/stats/distance.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L335-L396
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
[ "def", "plot_pairwise_distance", "(", "dist", ",", "labels", "=", "None", ",", "colorbar", "=", "True", ",", "ax", "=", "None", ",", "imshow_kwargs", "=", "None", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "# check inputs", "dist_square", ...
Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn
[ "Plot", "a", "pairwise", "distance", "matrix", "." ]
python
train
projectatomic/osbs-client
osbs/build/plugins_configuration.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/plugins_configuration.py#L92-L100
def has_plugin_conf(self, phase, name): """ Check whether a plugin is configured. """ try: self.get_plugin_conf(phase, name) return True except (KeyError, IndexError): return False
[ "def", "has_plugin_conf", "(", "self", ",", "phase", ",", "name", ")", ":", "try", ":", "self", ".", "get_plugin_conf", "(", "phase", ",", "name", ")", "return", "True", "except", "(", "KeyError", ",", "IndexError", ")", ":", "return", "False" ]
Check whether a plugin is configured.
[ "Check", "whether", "a", "plugin", "is", "configured", "." ]
python
train
kivy/python-for-android
pythonforandroid/pythonpackage.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/pythonpackage.py#L146-L231
def _get_system_python_executable(): """ Returns the path the system-wide python binary. (In case we're running in a virtualenv or venv) """ # This function is required by get_package_as_folder() to work # inside a virtualenv, since venv creation will fail with # the virtualenv's local python binary. # (venv/virtualenv incompatibility) # Abort if not in virtualenv or venv: if not hasattr(sys, "real_prefix") and ( not hasattr(sys, "base_prefix") or os.path.normpath(sys.base_prefix) == os.path.normpath(sys.prefix)): return sys.executable # Extract prefix we need to look in: if hasattr(sys, "real_prefix"): search_prefix = sys.real_prefix # virtualenv else: search_prefix = sys.base_prefix # venv def python_binary_from_folder(path): def binary_is_usable(python_bin): try: filenotfounderror = FileNotFoundError except NameError: # Python 2 filenotfounderror = OSError try: subprocess.check_output([ os.path.join(path, python_bin), "--version" ], stderr=subprocess.STDOUT) return True except (subprocess.CalledProcessError, filenotfounderror): return False python_name = "python" + sys.version while (not binary_is_usable(python_name) and python_name.find(".") > 0): # Try less specific binary name: python_name = python_name.rpartition(".")[0] if binary_is_usable(python_name): return os.path.join(path, python_name) return None # Return from sys.real_prefix if present: result = python_binary_from_folder(search_prefix) if result is not None: return result # Check out all paths in $PATH: bad_candidates = [] good_candidates = [] ever_had_nonvenv_path = False for p in os.environ.get("PATH", "").split(":"): # Skip if not possibly the real system python: if not os.path.normpath(p).startswith( os.path.normpath(search_prefix) ): continue # First folders might be virtualenv/venv we want to avoid: if not ever_had_nonvenv_path: sep = os.path.sep if ("system32" not in p.lower() and "usr" not in p) or \ {"home", ".tox"}.intersection(set(p.split(sep))) or \ "users" in p.lower(): # Doesn't look like bog-standard system path. if (p.endswith(os.path.sep + "bin") or p.endswith(os.path.sep + "bin" + os.path.sep)): # Also ends in "bin" -> likely virtualenv/venv. # Add as unfavorable / end of candidates: bad_candidates.append(p) continue ever_had_nonvenv_path = True good_candidates.append(p) # See if we can now actually find the system python: for p in good_candidates + bad_candidates: result = python_binary_from_folder(p) if result is not None: return result raise RuntimeError("failed to locate system python in: " + sys.real_prefix)
[ "def", "_get_system_python_executable", "(", ")", ":", "# This function is required by get_package_as_folder() to work", "# inside a virtualenv, since venv creation will fail with", "# the virtualenv's local python binary.", "# (venv/virtualenv incompatibility)", "# Abort if not in virtualenv or v...
Returns the path the system-wide python binary. (In case we're running in a virtualenv or venv)
[ "Returns", "the", "path", "the", "system", "-", "wide", "python", "binary", ".", "(", "In", "case", "we", "re", "running", "in", "a", "virtualenv", "or", "venv", ")" ]
python
train
django-auth-ldap/django-auth-ldap
django_auth_ldap/backend.py
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L901-L917
def get_group_names(self): """ Returns the set of Django group names that this user belongs to by virtue of LDAP group memberships. """ if self._group_names is None: self._load_cached_attr("_group_names") if self._group_names is None: group_infos = self._get_group_infos() self._group_names = { self._group_type.group_name_from_info(group_info) for group_info in group_infos } self._cache_attr("_group_names") return self._group_names
[ "def", "get_group_names", "(", "self", ")", ":", "if", "self", ".", "_group_names", "is", "None", ":", "self", ".", "_load_cached_attr", "(", "\"_group_names\"", ")", "if", "self", ".", "_group_names", "is", "None", ":", "group_infos", "=", "self", ".", "_...
Returns the set of Django group names that this user belongs to by virtue of LDAP group memberships.
[ "Returns", "the", "set", "of", "Django", "group", "names", "that", "this", "user", "belongs", "to", "by", "virtue", "of", "LDAP", "group", "memberships", "." ]
python
train
push-things/django-th
django_th/views.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/django_th/views.py#L108-L164
def get_context_data(self, **kwargs): """ get the data of the view data are : 1) number of triggers enabled 2) number of triggers disabled 3) number of activated services 4) list of activated services by the connected user """ triggers_enabled = triggers_disabled = services_activated = () context = super(TriggerListView, self).get_context_data(**kwargs) if self.kwargs.get('trigger_filtered_by'): page_link = reverse('trigger_filter_by', kwargs={'trigger_filtered_by': self.kwargs.get('trigger_filtered_by')}) elif self.kwargs.get('trigger_ordered_by'): page_link = reverse('trigger_order_by', kwargs={'trigger_ordered_by': self.kwargs.get('trigger_ordered_by')}) else: page_link = reverse('home') if self.request.user.is_authenticated: # get the enabled triggers triggers_enabled = TriggerService.objects.filter( user=self.request.user, status=1).count() # get the disabled triggers triggers_disabled = TriggerService.objects.filter( user=self.request.user, status=0).count() # get the activated services user_service = UserService.objects.filter(user=self.request.user) """ List of triggers activated by the user """ context['trigger_filter_by'] = user_service """ number of service activated for the current user """ services_activated = user_service.count() """ which triggers are enabled/disabled """ context['nb_triggers'] = {'enabled': triggers_enabled, 'disabled': triggers_disabled} """ Number of services activated """ context['nb_services'] = services_activated context['page_link'] = page_link context['fire'] = settings.DJANGO_TH.get('fire', False) return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "triggers_enabled", "=", "triggers_disabled", "=", "services_activated", "=", "(", ")", "context", "=", "super", "(", "TriggerListView", ",", "self", ")", ".", "get_context_data", "(", ...
get the data of the view data are : 1) number of triggers enabled 2) number of triggers disabled 3) number of activated services 4) list of activated services by the connected user
[ "get", "the", "data", "of", "the", "view" ]
python
train
Capitains/MyCapytain
MyCapytain/retrievers/dts/__init__.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/retrievers/dts/__init__.py#L158-L178
def get_document( self, collection_id, ref=None, mimetype="application/tei+xml, application/xml"): """ Make a navigation request on the DTS API :param collection_id: Id of the collection :param ref: If ref is a tuple, it is treated as a range. String or int are treated as single ref :param mimetype: Media type to request :return: Response :rtype: requests.Response """ parameters = { "id": collection_id } _parse_ref_parameters(parameters, ref) return self.call( "documents", parameters, mimetype=mimetype )
[ "def", "get_document", "(", "self", ",", "collection_id", ",", "ref", "=", "None", ",", "mimetype", "=", "\"application/tei+xml, application/xml\"", ")", ":", "parameters", "=", "{", "\"id\"", ":", "collection_id", "}", "_parse_ref_parameters", "(", "parameters", ...
Make a navigation request on the DTS API :param collection_id: Id of the collection :param ref: If ref is a tuple, it is treated as a range. String or int are treated as single ref :param mimetype: Media type to request :return: Response :rtype: requests.Response
[ "Make", "a", "navigation", "request", "on", "the", "DTS", "API" ]
python
train
SoCo/SoCo
soco/alarms.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/alarms.py#L236-L250
def remove(self): """Remove the alarm from the Sonos system. There is no need to call `save`. The Python instance is not deleted, and can be saved back to Sonos again if desired. """ self.zone.alarmClock.DestroyAlarm([ ('ID', self._alarm_id) ]) alarm_id = self._alarm_id try: del Alarm._all_alarms[alarm_id] except KeyError: pass self._alarm_id = None
[ "def", "remove", "(", "self", ")", ":", "self", ".", "zone", ".", "alarmClock", ".", "DestroyAlarm", "(", "[", "(", "'ID'", ",", "self", ".", "_alarm_id", ")", "]", ")", "alarm_id", "=", "self", ".", "_alarm_id", "try", ":", "del", "Alarm", ".", "_...
Remove the alarm from the Sonos system. There is no need to call `save`. The Python instance is not deleted, and can be saved back to Sonos again if desired.
[ "Remove", "the", "alarm", "from", "the", "Sonos", "system", "." ]
python
train
limodou/uliweb
uliweb/contrib/upload/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/upload/__init__.py#L82-L111
def get_filename(self, filename, filesystem=False, convert=False, subpath=''): """ Get the filename according to self.to_path, and if filesystem is False then return unicode filename, otherwise return filesystem encoded filename @param filename: relative filename, it'll be combine with self.to_path @param filesystem: if True, then encoding the filename to filesystem @param convert: if True, then convert filename with FilenameConverter class @param subpath: sub folder in to_path """ from uliweb.utils.common import safe_unicode #make sure the filename is unicode s = settings.GLOBAL if convert: _p, _f = os.path.split(filename) _filename = os.path.join(_p, self.filename_convert(_f)) else: _filename = filename nfile = safe_unicode(_filename, s.HTMLPAGE_ENCODING) if subpath: paths = [application_path(self.to_path), subpath, nfile] else: paths = [application_path(self.to_path), nfile] f = os.path.normpath(os.path.join(*paths)).replace('\\', '/') if filesystem: return files.encode_filename(f, to_encoding=s.FILESYSTEM_ENCODING) return f
[ "def", "get_filename", "(", "self", ",", "filename", ",", "filesystem", "=", "False", ",", "convert", "=", "False", ",", "subpath", "=", "''", ")", ":", "from", "uliweb", ".", "utils", ".", "common", "import", "safe_unicode", "#make sure the filename is unicod...
Get the filename according to self.to_path, and if filesystem is False then return unicode filename, otherwise return filesystem encoded filename @param filename: relative filename, it'll be combine with self.to_path @param filesystem: if True, then encoding the filename to filesystem @param convert: if True, then convert filename with FilenameConverter class @param subpath: sub folder in to_path
[ "Get", "the", "filename", "according", "to", "self", ".", "to_path", "and", "if", "filesystem", "is", "False", "then", "return", "unicode", "filename", "otherwise", "return", "filesystem", "encoded", "filename" ]
python
train
ewilazarus/yld
yld/__main__.py
https://github.com/ewilazarus/yld/blob/157e474d1055f14ffdfd7e99da6c77d5f17d4307/yld/__main__.py#L81-L99
def confirm(tag): """ Prompts user before proceeding """ click.echo() if click.confirm('Do you want to create the tag {tag}?'.format( tag=click.style(str(tag), fg='yellow')), default=True, abort=True): git.create_tag(tag) if click.confirm( 'Do you want to push the tag {tag} into the upstream?'.format( tag=click.style(str(tag), fg='yellow')), default=True): git.push_tag(tag) click.echo('Done!') else: git.delete_tag(tag) click.echo('Aborted!')
[ "def", "confirm", "(", "tag", ")", ":", "click", ".", "echo", "(", ")", "if", "click", ".", "confirm", "(", "'Do you want to create the tag {tag}?'", ".", "format", "(", "tag", "=", "click", ".", "style", "(", "str", "(", "tag", ")", ",", "fg", "=", ...
Prompts user before proceeding
[ "Prompts", "user", "before", "proceeding" ]
python
valid
EconForge/dolo
dolo/numeric/optimize/newton.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/optimize/newton.py#L81-L151
def newton(f, x, verbose=False, tol=1e-6, maxit=5, jactype='serial'): """Solve nonlinear system using safeguarded Newton iterations Parameters ---------- Return ------ """ if verbose: print = lambda txt: old_print(txt) else: print = lambda txt: None it = 0 error = 10 converged = False maxbacksteps = 30 x0 = x if jactype == 'sparse': from scipy.sparse.linalg import spsolve as solve elif jactype == 'full': from numpy.linalg import solve else: solve = serial_solve while it<maxit and not converged: [v,dv] = f(x) # TODO: rewrite starting here # print("Time to evaluate {}".format(ss-tt)0) error_0 = abs(v).max() if error_0 < tol: if verbose: print("> System was solved after iteration {}. Residual={}".format(it,error_0)) converged = True else: it += 1 dx = solve(dv, v) # norm_dx = abs(dx).max() for bck in range(maxbacksteps): xx = x - dx*(2**(-bck)) vm = f(xx)[0] err = abs(vm).max() if err < error_0: break x = xx if verbose: print("\t> {} | {} | {}".format(it, err, bck)) if not converged: import warnings warnings.warn("Did not converge") return [x, it]
[ "def", "newton", "(", "f", ",", "x", ",", "verbose", "=", "False", ",", "tol", "=", "1e-6", ",", "maxit", "=", "5", ",", "jactype", "=", "'serial'", ")", ":", "if", "verbose", ":", "print", "=", "lambda", "txt", ":", "old_print", "(", "txt", ")",...
Solve nonlinear system using safeguarded Newton iterations Parameters ---------- Return ------
[ "Solve", "nonlinear", "system", "using", "safeguarded", "Newton", "iterations" ]
python
train
jazzband/django-axes
axes/middleware.py
https://github.com/jazzband/django-axes/blob/3e215a174030e43e7ab8c2a79c395eb0eeddc667/axes/middleware.py#L51-L59
def process_exception(self, request: AxesHttpRequest, exception): # pylint: disable=inconsistent-return-statements """ Exception handler that processes exceptions raised by the Axes signal handler when request fails with login. Only ``axes.exceptions.AxesSignalPermissionDenied`` exception is handled by this middleware. """ if isinstance(exception, AxesSignalPermissionDenied): return get_lockout_response(request)
[ "def", "process_exception", "(", "self", ",", "request", ":", "AxesHttpRequest", ",", "exception", ")", ":", "# pylint: disable=inconsistent-return-statements", "if", "isinstance", "(", "exception", ",", "AxesSignalPermissionDenied", ")", ":", "return", "get_lockout_respo...
Exception handler that processes exceptions raised by the Axes signal handler when request fails with login. Only ``axes.exceptions.AxesSignalPermissionDenied`` exception is handled by this middleware.
[ "Exception", "handler", "that", "processes", "exceptions", "raised", "by", "the", "Axes", "signal", "handler", "when", "request", "fails", "with", "login", "." ]
python
train
grundprinzip/pyxplorer
pyxplorer/types.py
https://github.com/grundprinzip/pyxplorer/blob/34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2/pyxplorer/types.py#L91-L105
def distribution(self, limit=1024): """ Build the distribution of distinct values """ res = self._qexec("%s, count(*) as __cnt" % self.name(), group="%s" % self.name(), order="__cnt DESC LIMIT %d" % limit) dist = [] cnt = self._table.size() for i, r in enumerate(res): dist.append(list(r) + [i, r[1] / float(cnt)]) self._distribution = pd.DataFrame(dist, columns=["value", "cnt", "r", "fraction"]) self._distribution.index = self._distribution.r return self._distribution
[ "def", "distribution", "(", "self", ",", "limit", "=", "1024", ")", ":", "res", "=", "self", ".", "_qexec", "(", "\"%s, count(*) as __cnt\"", "%", "self", ".", "name", "(", ")", ",", "group", "=", "\"%s\"", "%", "self", ".", "name", "(", ")", ",", ...
Build the distribution of distinct values
[ "Build", "the", "distribution", "of", "distinct", "values" ]
python
train
nwilming/ocupy
ocupy/parallel.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L70-L88
def xmlrpc_get_task(self): """ Return a new task description: ID and necessary parameters, all are given in a dictionary """ try: if len(self.reschedule) == 0: (task_id, cur_task) = next(self.task_iterator) else: (task_id, cur_task) = self.reschedule.pop() self.scheduled_tasks.update({task_id: cur_task}) return (task_id, cur_task.to_dict()) except StopIteration: print('StopIteration: No more tasks') return False except Exception as err: print('Some other error') print(err) return False
[ "def", "xmlrpc_get_task", "(", "self", ")", ":", "try", ":", "if", "len", "(", "self", ".", "reschedule", ")", "==", "0", ":", "(", "task_id", ",", "cur_task", ")", "=", "next", "(", "self", ".", "task_iterator", ")", "else", ":", "(", "task_id", "...
Return a new task description: ID and necessary parameters, all are given in a dictionary
[ "Return", "a", "new", "task", "description", ":", "ID", "and", "necessary", "parameters", "all", "are", "given", "in", "a", "dictionary" ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/virt/power.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/virt/power.py#L16-L25
def rescue(env, identifier): """Reboot into a rescue image.""" vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') if not (env.skip_confirmations or formatting.confirm("This action will reboot this VSI. Continue?")): raise exceptions.CLIAbort('Aborted') vsi.rescue(vs_id)
[ "def", "rescue", "(", "env", ",", "identifier", ")", ":", "vsi", "=", "SoftLayer", ".", "VSManager", "(", "env", ".", "client", ")", "vs_id", "=", "helpers", ".", "resolve_id", "(", "vsi", ".", "resolve_ids", ",", "identifier", ",", "'VS'", ")", "if", ...
Reboot into a rescue image.
[ "Reboot", "into", "a", "rescue", "image", "." ]
python
train
nschloe/meshplex
meshplex/base.py
https://github.com/nschloe/meshplex/blob/376cfe8ce7b9917e5398c5d60c87455ff5590913/meshplex/base.py#L259-L273
def _mark_vertices(self, subdomain): """Mark faces/edges which are fully in subdomain. """ if subdomain is None: is_inside = numpy.ones(len(self.node_coords), dtype=bool) else: is_inside = subdomain.is_inside(self.node_coords.T).T if subdomain.is_boundary_only: # Filter boundary self.mark_boundary() is_inside = is_inside & self.is_boundary_node self.subdomains[subdomain] = {"vertices": is_inside} return
[ "def", "_mark_vertices", "(", "self", ",", "subdomain", ")", ":", "if", "subdomain", "is", "None", ":", "is_inside", "=", "numpy", ".", "ones", "(", "len", "(", "self", ".", "node_coords", ")", ",", "dtype", "=", "bool", ")", "else", ":", "is_inside", ...
Mark faces/edges which are fully in subdomain.
[ "Mark", "faces", "/", "edges", "which", "are", "fully", "in", "subdomain", "." ]
python
train
sbusard/wagoner
wagoner/utils.py
https://github.com/sbusard/wagoner/blob/7f83d66bbd0e009e4d4232ffdf319bd5a2a5683b/wagoner/utils.py#L76-L85
def extract_words(lines): """ Extract from the given iterable of lines the list of words. :param lines: an iterable of lines; :return: a generator of words of lines. """ for line in lines: for word in re.findall(r"\w+", line): yield word
[ "def", "extract_words", "(", "lines", ")", ":", "for", "line", "in", "lines", ":", "for", "word", "in", "re", ".", "findall", "(", "r\"\\w+\"", ",", "line", ")", ":", "yield", "word" ]
Extract from the given iterable of lines the list of words. :param lines: an iterable of lines; :return: a generator of words of lines.
[ "Extract", "from", "the", "given", "iterable", "of", "lines", "the", "list", "of", "words", "." ]
python
train
wmayner/pyphi
pyphi/actual.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/actual.py#L197-L215
def repertoire(self, direction, mechanism, purview): """Return the cause or effect repertoire function based on a direction. Args: direction (str): The temporal direction, specifiying the cause or effect repertoire. """ system = self.system[direction] node_labels = system.node_labels if not set(purview).issubset(self.purview_indices(direction)): raise ValueError('{} is not a {} purview in {}'.format( fmt.fmt_mechanism(purview, node_labels), direction, self)) if not set(mechanism).issubset(self.mechanism_indices(direction)): raise ValueError('{} is no a {} mechanism in {}'.format( fmt.fmt_mechanism(mechanism, node_labels), direction, self)) return system.repertoire(direction, mechanism, purview)
[ "def", "repertoire", "(", "self", ",", "direction", ",", "mechanism", ",", "purview", ")", ":", "system", "=", "self", ".", "system", "[", "direction", "]", "node_labels", "=", "system", ".", "node_labels", "if", "not", "set", "(", "purview", ")", ".", ...
Return the cause or effect repertoire function based on a direction. Args: direction (str): The temporal direction, specifiying the cause or effect repertoire.
[ "Return", "the", "cause", "or", "effect", "repertoire", "function", "based", "on", "a", "direction", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/timetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/timetools.py#L363-L389
def to_cfunits(self, unit='hours', utcoffset=None): """Return a `units` string agreeing with the NetCDF-CF conventions. By default, |Date.to_cfunits| takes `hours` as time unit, and the the actual value of |Options.utcoffset| as time zone information: >>> from hydpy import Date >>> date = Date('1992-10-08 15:15:42') >>> date.to_cfunits() 'hours since 1992-10-08 15:15:42 +01:00' Other time units are allowed (no checks are performed, so select something useful): >>> date.to_cfunits(unit='minutes') 'minutes since 1992-10-08 15:15:42 +01:00' For changing the time zone, pass the corresponding offset in minutes: >>> date.to_cfunits(unit='sec', utcoffset=-60) 'sec since 1992-10-08 13:15:42 -01:00' """ if utcoffset is None: utcoffset = hydpy.pub.options.utcoffset string = self.to_string('iso2', utcoffset) string = ' '.join((string[:-6], string[-6:])) return f'{unit} since {string}'
[ "def", "to_cfunits", "(", "self", ",", "unit", "=", "'hours'", ",", "utcoffset", "=", "None", ")", ":", "if", "utcoffset", "is", "None", ":", "utcoffset", "=", "hydpy", ".", "pub", ".", "options", ".", "utcoffset", "string", "=", "self", ".", "to_strin...
Return a `units` string agreeing with the NetCDF-CF conventions. By default, |Date.to_cfunits| takes `hours` as time unit, and the the actual value of |Options.utcoffset| as time zone information: >>> from hydpy import Date >>> date = Date('1992-10-08 15:15:42') >>> date.to_cfunits() 'hours since 1992-10-08 15:15:42 +01:00' Other time units are allowed (no checks are performed, so select something useful): >>> date.to_cfunits(unit='minutes') 'minutes since 1992-10-08 15:15:42 +01:00' For changing the time zone, pass the corresponding offset in minutes: >>> date.to_cfunits(unit='sec', utcoffset=-60) 'sec since 1992-10-08 13:15:42 -01:00'
[ "Return", "a", "units", "string", "agreeing", "with", "the", "NetCDF", "-", "CF", "conventions", "." ]
python
train
Kortemme-Lab/klab
klab/bio/pdbml.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdbml.py#L104-L123
def parse_deprecation(self): '''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.''' deprecation_tag = self.main_tag.getElementsByTagName("PDBx:pdbx_database_PDB_obs_sprCategory") assert(len(deprecation_tag) <= 1) if deprecation_tag: deprecation_tag = deprecation_tag[0] deprecation_subtag = deprecation_tag.getElementsByTagName("PDBx:pdbx_database_PDB_obs_spr") assert(len(deprecation_subtag) == 1) deprecation_subtag = deprecation_subtag[0] assert(deprecation_subtag.hasAttribute('replace_pdb_id')) assert(deprecation_subtag.hasAttribute('pdb_id')) old_pdb_id = deprecation_subtag.getAttribute('replace_pdb_id').upper() new_pdb_id = deprecation_subtag.getAttribute('pdb_id').upper() if self.pdb_id == old_pdb_id: self.deprecated = True self.replacement_pdb_id = new_pdb_id else: assert(self.pdb_id == new_pdb_id)
[ "def", "parse_deprecation", "(", "self", ")", ":", "deprecation_tag", "=", "self", ".", "main_tag", ".", "getElementsByTagName", "(", "\"PDBx:pdbx_database_PDB_obs_sprCategory\"", ")", "assert", "(", "len", "(", "deprecation_tag", ")", "<=", "1", ")", "if", "depre...
Checks to see if the PDB file has been deprecated and, if so, what the new ID is.
[ "Checks", "to", "see", "if", "the", "PDB", "file", "has", "been", "deprecated", "and", "if", "so", "what", "the", "new", "ID", "is", "." ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/pip/commands/show.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/pip/commands/show.py#L81-L100
def print_results(distributions, list_all_files): """ Print the informations from installed distributions found. """ results_printed = False for dist in distributions: results_printed = True logger.info("---") logger.info("Name: %s" % dist['name']) logger.info("Version: %s" % dist['version']) logger.info("Location: %s" % dist['location']) logger.info("Requires: %s" % ', '.join(dist['requires'])) if list_all_files: logger.info("Files:") if dist['files'] is not None: for line in dist['files']: logger.info(" %s" % line.strip()) else: logger.info("Cannot locate installed-files.txt") return results_printed
[ "def", "print_results", "(", "distributions", ",", "list_all_files", ")", ":", "results_printed", "=", "False", "for", "dist", "in", "distributions", ":", "results_printed", "=", "True", "logger", ".", "info", "(", "\"---\"", ")", "logger", ".", "info", "(", ...
Print the informations from installed distributions found.
[ "Print", "the", "informations", "from", "installed", "distributions", "found", "." ]
python
test
PetrochukM/PyTorch-NLP
torchnlp/encoders/text/static_tokenizer_encoder.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/encoders/text/static_tokenizer_encoder.py#L121-L132
def decode(self, encoded): """ Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``. """ encoded = super().decode(encoded) tokens = [self.itos[index] for index in encoded] return self.detokenize(tokens)
[ "def", "decode", "(", "self", ",", "encoded", ")", ":", "encoded", "=", "super", "(", ")", ".", "decode", "(", "encoded", ")", "tokens", "=", "[", "self", ".", "itos", "[", "index", "]", "for", "index", "in", "encoded", "]", "return", "self", ".", ...
Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``.
[ "Decodes", "a", "tensor", "into", "a", "sequence", "." ]
python
train
UDST/urbansim
urbansim/accounts.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L57-L75
def add_transaction(self, amount, subaccount=None, metadata=None): """ Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'. """ metadata = metadata or {} self.transactions.append(Transaction(amount, subaccount, metadata)) self.balance += amount
[ "def", "add_transaction", "(", "self", ",", "amount", ",", "subaccount", "=", "None", ",", "metadata", "=", "None", ")", ":", "metadata", "=", "metadata", "or", "{", "}", "self", ".", "transactions", ".", "append", "(", "Transaction", "(", "amount", ",",...
Add a new transaction to the account. Parameters ---------- amount : float Negative for withdrawls, positive for deposits. subaccount : object, optional Any indicator of a subaccount to which this transaction applies. metadata : dict, optional Any extra metadata to record with the transaction. (E.g. Info about where the money is coming from or going.) May not contain keys 'amount' or 'subaccount'.
[ "Add", "a", "new", "transaction", "to", "the", "account", "." ]
python
train
royi1000/py-libhdate
hdate/date.py
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/date.py#L130-L134
def _jdn(self): """Return the Julian date number for the given date.""" if self._last_updated == "gdate": return conv.gdate_to_jdn(self.gdate) return conv.hdate_to_jdn(self.hdate)
[ "def", "_jdn", "(", "self", ")", ":", "if", "self", ".", "_last_updated", "==", "\"gdate\"", ":", "return", "conv", ".", "gdate_to_jdn", "(", "self", ".", "gdate", ")", "return", "conv", ".", "hdate_to_jdn", "(", "self", ".", "hdate", ")" ]
Return the Julian date number for the given date.
[ "Return", "the", "Julian", "date", "number", "for", "the", "given", "date", "." ]
python
train
pmacosta/pexdoc
pexdoc/pinspect.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/pinspect.py#L913-L919
def _in_class(self, node): """Find if callable is function or method.""" # Move left one indentation level and check if that callable is a class indent = self._get_indent(node) for indent_dict in reversed(self._indent_stack): # pragma: no branch if (indent_dict["level"] < indent) or (indent_dict["type"] == "module"): return indent_dict["type"] == "class"
[ "def", "_in_class", "(", "self", ",", "node", ")", ":", "# Move left one indentation level and check if that callable is a class", "indent", "=", "self", ".", "_get_indent", "(", "node", ")", "for", "indent_dict", "in", "reversed", "(", "self", ".", "_indent_stack", ...
Find if callable is function or method.
[ "Find", "if", "callable", "is", "function", "or", "method", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py#L338-L348
def create_vehicle_icon(self, name, colour, follow=False, vehicle_type=None): '''add a vehicle to the map''' from MAVProxy.modules.mavproxy_map import mp_slipmap if vehicle_type is None: vehicle_type = self.vehicle_type_name if name in self.have_vehicle and self.have_vehicle[name] == vehicle_type: return self.have_vehicle[name] = vehicle_type icon = self.mpstate.map.icon(colour + vehicle_type + '.png') self.mpstate.map.add_object(mp_slipmap.SlipIcon(name, (0,0), icon, layer=3, rotation=0, follow=follow, trail=mp_slipmap.SlipTrail()))
[ "def", "create_vehicle_icon", "(", "self", ",", "name", ",", "colour", ",", "follow", "=", "False", ",", "vehicle_type", "=", "None", ")", ":", "from", "MAVProxy", ".", "modules", ".", "mavproxy_map", "import", "mp_slipmap", "if", "vehicle_type", "is", "None...
add a vehicle to the map
[ "add", "a", "vehicle", "to", "the", "map" ]
python
train
aarongarrett/inspyred
inspyred/ec/terminators.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/terminators.py#L100-L122
def average_fitness_termination(population, num_generations, num_evaluations, args): """Return True if the population's average fitness is near its best fitness. This function calculates the average fitness of the population, as well as the best fitness. If the difference between those values is less than a specified tolerance, the function returns True. .. Arguments: population -- the population of Individuals num_generations -- the number of elapsed generations num_evaluations -- the number of candidate solution evaluations args -- a dictionary of keyword arguments Optional keyword arguments in args: - *tolerance* -- the minimum allowable difference between average and best fitness (default 0.001) """ tolerance = args.setdefault('tolerance', 0.001) avg_fit = sum([x.fitness for x in population]) / float(len(population)) best_fit = max([x.fitness for x in population]) return (best_fit - avg_fit) < tolerance
[ "def", "average_fitness_termination", "(", "population", ",", "num_generations", ",", "num_evaluations", ",", "args", ")", ":", "tolerance", "=", "args", ".", "setdefault", "(", "'tolerance'", ",", "0.001", ")", "avg_fit", "=", "sum", "(", "[", "x", ".", "fi...
Return True if the population's average fitness is near its best fitness. This function calculates the average fitness of the population, as well as the best fitness. If the difference between those values is less than a specified tolerance, the function returns True. .. Arguments: population -- the population of Individuals num_generations -- the number of elapsed generations num_evaluations -- the number of candidate solution evaluations args -- a dictionary of keyword arguments Optional keyword arguments in args: - *tolerance* -- the minimum allowable difference between average and best fitness (default 0.001)
[ "Return", "True", "if", "the", "population", "s", "average", "fitness", "is", "near", "its", "best", "fitness", ".", "This", "function", "calculates", "the", "average", "fitness", "of", "the", "population", "as", "well", "as", "the", "best", "fitness", ".", ...
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/system_monitor_mail/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/system_monitor_mail/__init__.py#L100-L121
def _set_fru(self, v, load=False): """ Setter method for fru, mapped from YANG variable /system_monitor_mail/fru (container) If this variable is read-only (config: false) in the source YANG file, then _set_fru is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fru() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=fru.fru, is_container='container', presence=False, yang_name="fru", rest_name="fru", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FRU mail settings'}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fru must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=fru.fru, is_container='container', presence=False, yang_name="fru", rest_name="fru", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FRU mail settings'}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""", }) self.__fru = t if hasattr(self, '_set'): self._set()
[ "def", "_set_fru", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "...
Setter method for fru, mapped from YANG variable /system_monitor_mail/fru (container) If this variable is read-only (config: false) in the source YANG file, then _set_fru is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fru() directly.
[ "Setter", "method", "for", "fru", "mapped", "from", "YANG", "variable", "/", "system_monitor_mail", "/", "fru", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "...
python
train
pandas-dev/pandas
pandas/core/groupby/groupby.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L2092-L2115
def shift(self, periods=1, freq=None, axis=0, fill_value=None): """ Shift each group by periods observations. Parameters ---------- periods : integer, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 fill_value : optional .. versionadded:: 0.24.0 """ if freq is not None or axis != 0 or not isna(fill_value): return self.apply(lambda x: x.shift(periods, freq, axis, fill_value)) return self._get_cythonized_result('group_shift_indexer', self.grouper, cython_dtype=np.int64, needs_ngroups=True, result_is_index=True, periods=periods)
[ "def", "shift", "(", "self", ",", "periods", "=", "1", ",", "freq", "=", "None", ",", "axis", "=", "0", ",", "fill_value", "=", "None", ")", ":", "if", "freq", "is", "not", "None", "or", "axis", "!=", "0", "or", "not", "isna", "(", "fill_value", ...
Shift each group by periods observations. Parameters ---------- periods : integer, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 fill_value : optional .. versionadded:: 0.24.0
[ "Shift", "each", "group", "by", "periods", "observations", "." ]
python
train
Fizzadar/pyinfra
pyinfra_cli/inventory/__init__.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra_cli/inventory/__init__.py#L111-L217
def make_inventory( inventory_filename, deploy_dir=None, ssh_port=None, ssh_user=None, ssh_key=None, ssh_key_password=None, ssh_password=None, ): ''' Builds a ``pyinfra.api.Inventory`` from the filesystem. If the file does not exist and doesn't contain a / attempts to use that as the only hostname. ''' if ssh_port is not None: ssh_port = int(ssh_port) file_groupname = None # If we're not a valid file we assume a list of comma separated hostnames if not path.exists(inventory_filename): groups = { 'all': inventory_filename.split(','), } else: groups = _get_groups_from_filename(inventory_filename) # Used to set all the hosts to an additional group - that of the filename # ie inventories/dev.py means all the hosts are in the dev group, if not present file_groupname = path.basename(inventory_filename).rsplit('.')[0] all_data = {} if 'all' in groups: all_hosts = groups.pop('all') if isinstance(all_hosts, tuple): all_hosts, all_data = all_hosts # Build all out of the existing hosts if not defined else: all_hosts = [] for hosts in groups.values(): # Groups can be a list of hosts or tuple of (hosts, data) hosts = hosts[0] if isinstance(hosts, tuple) else hosts for host in hosts: # Hosts can be a hostname or tuple of (hostname, data) hostname = host[0] if isinstance(host, tuple) else host if hostname not in all_hosts: all_hosts.append(hostname) groups['all'] = (all_hosts, all_data) # Apply the filename group if not already defined if file_groupname and file_groupname not in groups: groups[file_groupname] = all_hosts # In pyinfra an inventory is a combination of (hostnames + data). However, in CLI # mode we want to be define this in separate files (inventory / group data). The # issue is we want inventory access within the group data files - but at this point # we're not ready to make an Inventory. So here we just create a fake one, and # attach it to pseudo_inventory while we import the data files. logger.debug('Creating fake inventory...') fake_groups = { # In API mode groups *must* be tuples of (hostnames, data) name: group if isinstance(group, tuple) else (group, {}) for name, group in six.iteritems(groups) } fake_inventory = Inventory((all_hosts, all_data), **fake_groups) pseudo_inventory.set(fake_inventory) # Get all group data (group_data/*.py) group_data = _get_group_data(deploy_dir) # Reset the pseudo inventory pseudo_inventory.reset() # For each group load up any data for name, hosts in six.iteritems(groups): data = {} if isinstance(hosts, tuple): hosts, data = hosts if name in group_data: data.update(group_data.pop(name)) # Attach to group object groups[name] = (hosts, data) # Loop back through any leftover group data and create an empty (for now) # group - this is because inventory @connectors can attach arbitrary groups # to hosts, so we need to support that. for name, data in six.iteritems(group_data): groups[name] = ([], data) return Inventory( groups.pop('all'), ssh_user=ssh_user, ssh_key=ssh_key, ssh_key_password=ssh_key_password, ssh_port=ssh_port, ssh_password=ssh_password, **groups ), file_groupname and file_groupname.lower()
[ "def", "make_inventory", "(", "inventory_filename", ",", "deploy_dir", "=", "None", ",", "ssh_port", "=", "None", ",", "ssh_user", "=", "None", ",", "ssh_key", "=", "None", ",", "ssh_key_password", "=", "None", ",", "ssh_password", "=", "None", ",", ")", "...
Builds a ``pyinfra.api.Inventory`` from the filesystem. If the file does not exist and doesn't contain a / attempts to use that as the only hostname.
[ "Builds", "a", "pyinfra", ".", "api", ".", "Inventory", "from", "the", "filesystem", ".", "If", "the", "file", "does", "not", "exist", "and", "doesn", "t", "contain", "a", "/", "attempts", "to", "use", "that", "as", "the", "only", "hostname", "." ]
python
train
malthe/pop
src/pop/client.py
https://github.com/malthe/pop/blob/3b58b91b41d8b9bee546eb40dc280a57500b8bed/src/pop/client.py#L59-L68
def set_or_create(self, path, *args, **kwargs): """Sets the data of a node at the given path, or creates it.""" d = self.set(path, *args, **kwargs) @d.addErrback def _error(result): return self.create(path, *args, **kwargs) return d
[ "def", "set_or_create", "(", "self", ",", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "d", "=", "self", ".", "set", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "@", "d", ".", "addErrback", "def", "_error", "(",...
Sets the data of a node at the given path, or creates it.
[ "Sets", "the", "data", "of", "a", "node", "at", "the", "given", "path", "or", "creates", "it", "." ]
python
train
chaimleib/intervaltree
intervaltree/interval.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/interval.py#L100-L110
def contains_interval(self, other): """ Whether other is contained in this Interval. :param other: Interval :return: True or False :rtype: bool """ return ( self.begin <= other.begin and self.end >= other.end )
[ "def", "contains_interval", "(", "self", ",", "other", ")", ":", "return", "(", "self", ".", "begin", "<=", "other", ".", "begin", "and", "self", ".", "end", ">=", "other", ".", "end", ")" ]
Whether other is contained in this Interval. :param other: Interval :return: True or False :rtype: bool
[ "Whether", "other", "is", "contained", "in", "this", "Interval", ".", ":", "param", "other", ":", "Interval", ":", "return", ":", "True", "or", "False", ":", "rtype", ":", "bool" ]
python
train
wtsi-hgi/python-common
hgicommon/data_source/static_from_file.py
https://github.com/wtsi-hgi/python-common/blob/0376a6b574ff46e82e509e90b6cb3693a3dbb577/hgicommon/data_source/static_from_file.py#L217-L231
def _on_file_moved(self, event: FileSystemMovedEvent): """ Called when a file in the monitored directory has been moved. Breaks move down into a delete and a create (which it is sometimes detected as!). :param event: the file system event """ if not event.is_directory and self.is_data_file(event.src_path): delete_event = FileSystemEvent(event.src_path) delete_event.event_type = EVENT_TYPE_DELETED self._on_file_deleted(delete_event) create_event = FileSystemEvent(event.dest_path) create_event.event_type = EVENT_TYPE_CREATED self._on_file_created(create_event)
[ "def", "_on_file_moved", "(", "self", ",", "event", ":", "FileSystemMovedEvent", ")", ":", "if", "not", "event", ".", "is_directory", "and", "self", ".", "is_data_file", "(", "event", ".", "src_path", ")", ":", "delete_event", "=", "FileSystemEvent", "(", "e...
Called when a file in the monitored directory has been moved. Breaks move down into a delete and a create (which it is sometimes detected as!). :param event: the file system event
[ "Called", "when", "a", "file", "in", "the", "monitored", "directory", "has", "been", "moved", "." ]
python
valid
python-rope/rope
rope/base/utils/__init__.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/utils/__init__.py#L88-L98
def resolve(str_or_obj): """Returns object from string""" from rope.base.utils.pycompat import string_types if not isinstance(str_or_obj, string_types): return str_or_obj if '.' not in str_or_obj: str_or_obj += '.' mod_name, obj_name = str_or_obj.rsplit('.', 1) __import__(mod_name) mod = sys.modules[mod_name] return getattr(mod, obj_name) if obj_name else mod
[ "def", "resolve", "(", "str_or_obj", ")", ":", "from", "rope", ".", "base", ".", "utils", ".", "pycompat", "import", "string_types", "if", "not", "isinstance", "(", "str_or_obj", ",", "string_types", ")", ":", "return", "str_or_obj", "if", "'.'", "not", "i...
Returns object from string
[ "Returns", "object", "from", "string" ]
python
train
DataBiosphere/toil
src/toil/utils/toilDebugFile.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilDebugFile.py#L71-L100
def printContentsOfJobStore(jobStorePath, nameOfJob=None): """ Fetch a list of all files contained in the jobStore directory input if nameOfJob is not declared, otherwise it only prints out the names of files for that specific job for which it can find a match. Also creates a logFile containing this same record of job files in the working directory. :param jobStorePath: Directory path to recursively look for files. :param nameOfJob: Default is None, which prints out all files in the jobStore. If specified, it will print all jobStore files that have been written to the jobStore by that job. """ if nameOfJob: glob = "*" + nameOfJob + "*" logFile = nameOfJob + "_fileset.txt" else: glob = "*" logFile = "jobstore_files.txt" nameOfJob = "" list_of_files = recursiveGlob(directoryname=jobStorePath, glob_pattern=glob) if os.path.exists(logFile): os.remove(logFile) for gfile in sorted(list_of_files): if not gfile.endswith('.new'): logger.debug(nameOfJob + "File: %s", os.path.basename(gfile)) with open(logFile, "a+") as f: f.write(os.path.basename(gfile)) f.write("\n")
[ "def", "printContentsOfJobStore", "(", "jobStorePath", ",", "nameOfJob", "=", "None", ")", ":", "if", "nameOfJob", ":", "glob", "=", "\"*\"", "+", "nameOfJob", "+", "\"*\"", "logFile", "=", "nameOfJob", "+", "\"_fileset.txt\"", "else", ":", "glob", "=", "\"*...
Fetch a list of all files contained in the jobStore directory input if nameOfJob is not declared, otherwise it only prints out the names of files for that specific job for which it can find a match. Also creates a logFile containing this same record of job files in the working directory. :param jobStorePath: Directory path to recursively look for files. :param nameOfJob: Default is None, which prints out all files in the jobStore. If specified, it will print all jobStore files that have been written to the jobStore by that job.
[ "Fetch", "a", "list", "of", "all", "files", "contained", "in", "the", "jobStore", "directory", "input", "if", "nameOfJob", "is", "not", "declared", "otherwise", "it", "only", "prints", "out", "the", "names", "of", "files", "for", "that", "specific", "job", ...
python
train
jic-dtool/dtool-create
dtool_create/dataset.py
https://github.com/jic-dtool/dtool-create/blob/12172363d14eaedba2db4c452ef995b14f1b630d/dtool_create/dataset.py#L74-L96
def _prompt_for_values(d): """Update the descriptive metadata interactively. Uses values entered by the user. Note that the function keeps recursing whenever a value is another ``CommentedMap`` or a ``list``. The function works as passing dictionaries and lists into a function edits the values in place. """ for key, value in d.items(): if isinstance(value, CommentedMap): _prompt_for_values(value) elif isinstance(value, list): for item in value: _prompt_for_values(item) else: typ = type(value) if isinstance(value, ScalarFloat): # Deal with ruamel.yaml floats. typ = float new_value = click.prompt(key, type=typ, default=value) d[key] = new_value return d
[ "def", "_prompt_for_values", "(", "d", ")", ":", "for", "key", ",", "value", "in", "d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "CommentedMap", ")", ":", "_prompt_for_values", "(", "value", ")", "elif", "isinstance", "(", "v...
Update the descriptive metadata interactively. Uses values entered by the user. Note that the function keeps recursing whenever a value is another ``CommentedMap`` or a ``list``. The function works as passing dictionaries and lists into a function edits the values in place.
[ "Update", "the", "descriptive", "metadata", "interactively", "." ]
python
valid
darkfeline/animanager
animanager/db/query/select.py
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/select.py#L76-L147
def select( db, where_query: str, where_params: SQLParams, fields: FieldsParam = ALL, episode_fields: FieldsParam = (), ) -> Iterator[Anime]: """Perform an arbitrary SQL SELECT WHERE on the anime table. By nature of "arbitrary query", this is vulnerable to injection, use only trusted values for `where_query`. This will "lazily" fetch the requested fields as needed. For example, episodes (which require a separate query per anime) will only be fetched if `episode_fields` is provided. Anime status will be cached only if status fields are requested. :param str where_query: SELECT WHERE query :param where_params: parameters for WHERE query :param fields: anime fields to get. If :const:`ALL`, get all fields. Default is :const:`ALL`. :param episode_fields: episode fields to get. If :const:`ALL`, get all fields. If empty, don't get episodes. `fields` must contain 'aid' to get episodes. :param bool force_status: whether to force status calculation. :returns: iterator of Anime """ logger.debug( 'select(%r, %r, %r, %r, %r)', db, where_query, where_params, fields, episode_fields) fields = _clean_fields(ANIME_FIELDS, fields) if not fields: raise ValueError('Fields cannot be empty') if set(fields) & STATUS_FIELDS.keys(): cur = db.cursor().execute( ANIME_QUERY.format('aid', where_query), where_params) for row in cur: cache_status(db, row[0]) if 'aid' in fields: episode_fields = _clean_fields(EPISODE_FIELDS, episode_fields) else: episode_fields = () with db: anime_query = ANIME_QUERY.format( ','.join(ANIME_FIELDS[field] for field in fields), where_query, ) anime_rows = db.cursor().execute(anime_query, where_params) for row in anime_rows: anime = Anime(**{ field: value for field, value in zip(fields, row)}) if episode_fields: episode_query = 'SELECT {} FROM episode WHERE aid=?' episode_query = episode_query.format( ','.join(EPISODE_FIELDS[field] for field in episode_fields)) episode_rows = db.cursor().execute(episode_query, (anime.aid,)) episodes = [ Episode(**{ field: value for field, value in zip(episode_fields, row)}) for row in episode_rows] anime.episodes = episodes yield anime
[ "def", "select", "(", "db", ",", "where_query", ":", "str", ",", "where_params", ":", "SQLParams", ",", "fields", ":", "FieldsParam", "=", "ALL", ",", "episode_fields", ":", "FieldsParam", "=", "(", ")", ",", ")", "->", "Iterator", "[", "Anime", "]", "...
Perform an arbitrary SQL SELECT WHERE on the anime table. By nature of "arbitrary query", this is vulnerable to injection, use only trusted values for `where_query`. This will "lazily" fetch the requested fields as needed. For example, episodes (which require a separate query per anime) will only be fetched if `episode_fields` is provided. Anime status will be cached only if status fields are requested. :param str where_query: SELECT WHERE query :param where_params: parameters for WHERE query :param fields: anime fields to get. If :const:`ALL`, get all fields. Default is :const:`ALL`. :param episode_fields: episode fields to get. If :const:`ALL`, get all fields. If empty, don't get episodes. `fields` must contain 'aid' to get episodes. :param bool force_status: whether to force status calculation. :returns: iterator of Anime
[ "Perform", "an", "arbitrary", "SQL", "SELECT", "WHERE", "on", "the", "anime", "table", "." ]
python
train
redhat-cip/dci-control-server
dci/api/v1/components.py
https://github.com/redhat-cip/dci-control-server/blob/b416cf935ec93e4fdd5741f61a21cabecf8454d2/dci/api/v1/components.py#L464-L472
def retrieve_tags_from_component(user, c_id): """Retrieve all tags attached to a component.""" JCT = models.JOIN_COMPONENTS_TAGS query = (sql.select([models.TAGS]) .select_from(JCT.join(models.TAGS)) .where(JCT.c.component_id == c_id)) rows = flask.g.db_conn.execute(query) return flask.jsonify({'tags': rows, '_meta': {'count': rows.rowcount}})
[ "def", "retrieve_tags_from_component", "(", "user", ",", "c_id", ")", ":", "JCT", "=", "models", ".", "JOIN_COMPONENTS_TAGS", "query", "=", "(", "sql", ".", "select", "(", "[", "models", ".", "TAGS", "]", ")", ".", "select_from", "(", "JCT", ".", "join",...
Retrieve all tags attached to a component.
[ "Retrieve", "all", "tags", "attached", "to", "a", "component", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L26-L34
def to_rec_single(samples, default_keys=None): """Convert output into a list of single CWL records. """ out = [] for data in samples: recs = samples_to_records([normalize_missing(utils.to_single_data(data))], default_keys) assert len(recs) == 1 out.append(recs[0]) return out
[ "def", "to_rec_single", "(", "samples", ",", "default_keys", "=", "None", ")", ":", "out", "=", "[", "]", "for", "data", "in", "samples", ":", "recs", "=", "samples_to_records", "(", "[", "normalize_missing", "(", "utils", ".", "to_single_data", "(", "data...
Convert output into a list of single CWL records.
[ "Convert", "output", "into", "a", "list", "of", "single", "CWL", "records", "." ]
python
train
MillionIntegrals/vel
vel/rl/modules/action_head.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/action_head.py#L108-L114
def sample(self, logits, argmax_sampling=False): """ Sample from a probability space of all actions """ if argmax_sampling: return torch.argmax(logits, dim=-1) else: u = torch.rand_like(logits) return torch.argmax(logits - torch.log(-torch.log(u)), dim=-1)
[ "def", "sample", "(", "self", ",", "logits", ",", "argmax_sampling", "=", "False", ")", ":", "if", "argmax_sampling", ":", "return", "torch", ".", "argmax", "(", "logits", ",", "dim", "=", "-", "1", ")", "else", ":", "u", "=", "torch", ".", "rand_lik...
Sample from a probability space of all actions
[ "Sample", "from", "a", "probability", "space", "of", "all", "actions" ]
python
train
wavycloud/pyboto3
pyboto3/route53.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/route53.py#L2976-L3170
def update_health_check(HealthCheckId=None, HealthCheckVersion=None, IPAddress=None, Port=None, ResourcePath=None, FullyQualifiedDomainName=None, SearchString=None, FailureThreshold=None, Inverted=None, HealthThreshold=None, ChildHealthChecks=None, EnableSNI=None, Regions=None, AlarmIdentifier=None, InsufficientDataHealthStatus=None): """ Updates an existing health check. Note that some values can't be updated. For more information about updating health checks, see Creating, Updating, and Deleting Health Checks in the Amazon Route 53 Developer Guide . See also: AWS API Documentation :example: response = client.update_health_check( HealthCheckId='string', HealthCheckVersion=123, IPAddress='string', Port=123, ResourcePath='string', FullyQualifiedDomainName='string', SearchString='string', FailureThreshold=123, Inverted=True|False, HealthThreshold=123, ChildHealthChecks=[ 'string', ], EnableSNI=True|False, Regions=[ 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1', ], AlarmIdentifier={ 'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1', 'Name': 'string' }, InsufficientDataHealthStatus='Healthy'|'Unhealthy'|'LastKnownStatus' ) :type HealthCheckId: string :param HealthCheckId: [REQUIRED] The ID for the health check for which you want detailed information. When you created the health check, CreateHealthCheck returned the ID in the response, in the HealthCheckId element. :type HealthCheckVersion: integer :param HealthCheckVersion: A sequential counter that Amazon Route 53 sets to 1 when you create a health check and increments by 1 each time you update settings for the health check. We recommend that you use GetHealthCheck or ListHealthChecks to get the current value of HealthCheckVersion for the health check that you want to update, and that you include that value in your UpdateHealthCheck request. This prevents Amazon Route 53 from overwriting an intervening update: If the value in the UpdateHealthCheck request matches the value of HealthCheckVersion in the health check, Amazon Route 53 updates the health check with the new settings. If the value of HealthCheckVersion in the health check is greater, the health check was changed after you got the version number. Amazon Route 53 does not update the health check, and it returns a HealthCheckVersionMismatch error. :type IPAddress: string :param IPAddress: The IPv4 or IPv6 IP address for the endpoint that you want Amazon Route 53 to perform health checks on. If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to resolve the domain name that you specify in FullyQualifiedDomainName at the interval that you specify in RequestInterval . Using an IP address that is returned by DNS, Amazon Route 53 then checks the health of the endpoint. Use one of the following formats for the value of IPAddress : IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 . IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 . If the endpoint is an EC2 instance, we recommend that you create an Elastic IP address, associate it with your EC2 instance, and specify the Elastic IP address for IPAddress . This ensures that the IP address of your instance never changes. For more information, see the applicable documentation: Linux: Elastic IP Addresses (EIP) in the Amazon EC2 User Guide for Linux Instances Windows: Elastic IP Addresses (EIP) in the Amazon EC2 User Guide for Windows Instances Note If a health check already has a value for IPAddress , you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress . For more information, see UpdateHealthCheckRequest$FullyQualifiedDomainName . Constraints: Amazon Route 53 can't check the health of endpoints for which the IP address is in local, private, non-routable, or multicast ranges. For more information about IP addresses for which you can't create health checks, see the following documents: RFC 5735, Special Use IPv4 Addresses RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space RFC 5156, Special-Use IPv6 Addresses :type Port: integer :param Port: The port on the endpoint on which you want Amazon Route 53 to perform health checks. :type ResourcePath: string :param ResourcePath: The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html. Specify this value only if you want to change it. :type FullyQualifiedDomainName: string :param FullyQualifiedDomainName: Amazon Route 53 behavior depends on whether you specify a value for IPAddress . Note If a health check already has a value for IPAddress , you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress . If you specify a value for IPAddress : Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Amazon Route 53 to perform health checks. When Amazon Route 53 checks the health of an endpoint, here is how it constructs the Host header: If you specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header. If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header. If you specify another value for Port and any value except TCP for Type , Amazon Route 53 passes * FullyQualifiedDomainName :Port * to the endpoint in the Host header. If you don't specify a value for FullyQualifiedDomainName , Amazon Route 53 substitutes the value of IPAddress in the Host header in each of the above cases. If you don't specify a value for IPAddress : If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval . Using an IPv4 address that is returned by DNS, Amazon Route 53 then checks the health of the endpoint. Note If you don't specify a value for IPAddress , Amazon Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName , the health check fails with a 'DNS resolution failed' error. If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName , we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com ), not the name of the resource record sets (www.example.com). Warning In this configuration, if the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable. In addition, if the value of Type is HTTP , HTTPS , HTTP_STR_MATCH , or HTTPS_STR_MATCH , Amazon Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress . If the value of Type is TCP , Amazon Route 53 doesn't pass a Host header. :type SearchString: string :param SearchString: If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH , the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy. (You can't change the value of Type when you update a health check.) :type FailureThreshold: integer :param FailureThreshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide . If you don't specify a value for FailureThreshold , the default value is three health checks. :type Inverted: boolean :param Inverted: Specify whether you want Amazon Route 53 to invert the status of a health check, for example, to consider a health check unhealthy when it otherwise would be considered healthy. :type HealthThreshold: integer :param HealthThreshold: The number of child health checks that are associated with a CALCULATED health that Amazon Route 53 must consider healthy for the CALCULATED health check to be considered healthy. To specify the child health checks that you want to associate with a CALCULATED health check, use the ChildHealthChecks and ChildHealthCheck elements. Note the following: If you specify a number greater than the number of child health checks, Amazon Route 53 always considers this health check to be unhealthy. If you specify 0 , Amazon Route 53 always considers this health check to be healthy. :type ChildHealthChecks: list :param ChildHealthChecks: A complex type that contains one ChildHealthCheck element for each health check that you want to associate with a CALCULATED health check. (string) -- :type EnableSNI: boolean :param EnableSNI: Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. This allows the endpoint to respond to HTTPS health check requests with the applicable SSL/TLS certificate. Some endpoints require that HTTPS requests include the host name in the client_hello message. If you don't enable SNI, the status of the health check will be SSL alert handshake_failure . A health check can also have that status for other reasons. If SNI is enabled and you're still getting the error, check the SSL/TLS configuration on your endpoint and confirm that your certificate is valid. The SSL/TLS certificate on your endpoint includes a domain name in the Common Name field and possibly several more in the Subject Alternative Names field. One of the domain names in the certificate should match the value that you specify for FullyQualifiedDomainName . If the endpoint responds to the client_hello message with a certificate that does not include the domain name that you specified in FullyQualifiedDomainName , a health checker will retry the handshake. In the second attempt, the health checker will omit FullyQualifiedDomainName from the client_hello message. :type Regions: list :param Regions: A complex type that contains one Region element for each region that you want Amazon Route 53 health checkers to check the specified endpoint from. (string) -- :type AlarmIdentifier: dict :param AlarmIdentifier: A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy. Region (string) -- [REQUIRED]A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy. For the current list of CloudWatch regions, see Amazon CloudWatch in the AWS Regions and Endpoints chapter of the Amazon Web Services General Reference . Name (string) -- [REQUIRED]The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy. :type InsufficientDataHealthStatus: string :param InsufficientDataHealthStatus: When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign to the health check: Healthy : Amazon Route 53 considers the health check to be healthy. Unhealthy : Amazon Route 53 considers the health check to be unhealthy. LastKnownStatus : Amazon Route 53 uses the status of the health check from the last time CloudWatch had sufficient data to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy. :rtype: dict :return: { 'HealthCheck': { 'Id': 'string', 'CallerReference': 'string', 'HealthCheckConfig': { 'IPAddress': 'string', 'Port': 123, 'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC', 'ResourcePath': 'string', 'FullyQualifiedDomainName': 'string', 'SearchString': 'string', 'RequestInterval': 123, 'FailureThreshold': 123, 'MeasureLatency': True|False, 'Inverted': True|False, 'HealthThreshold': 123, 'ChildHealthChecks': [ 'string', ], 'EnableSNI': True|False, 'Regions': [ 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1', ], 'AlarmIdentifier': { 'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1', 'Name': 'string' }, 'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus' }, 'HealthCheckVersion': 123, 'CloudWatchAlarmConfiguration': { 'EvaluationPeriods': 123, 'Threshold': 123.0, 'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold', 'Period': 123, 'MetricName': 'string', 'Namespace': 'string', 'Statistic': 'Average'|'Sum'|'SampleCount'|'Maximum'|'Minimum', 'Dimensions': [ { 'Name': 'string', 'Value': 'string' }, ] } } } :returns: IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 . IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 . """ pass
[ "def", "update_health_check", "(", "HealthCheckId", "=", "None", ",", "HealthCheckVersion", "=", "None", ",", "IPAddress", "=", "None", ",", "Port", "=", "None", ",", "ResourcePath", "=", "None", ",", "FullyQualifiedDomainName", "=", "None", ",", "SearchString",...
Updates an existing health check. Note that some values can't be updated. For more information about updating health checks, see Creating, Updating, and Deleting Health Checks in the Amazon Route 53 Developer Guide . See also: AWS API Documentation :example: response = client.update_health_check( HealthCheckId='string', HealthCheckVersion=123, IPAddress='string', Port=123, ResourcePath='string', FullyQualifiedDomainName='string', SearchString='string', FailureThreshold=123, Inverted=True|False, HealthThreshold=123, ChildHealthChecks=[ 'string', ], EnableSNI=True|False, Regions=[ 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1', ], AlarmIdentifier={ 'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1', 'Name': 'string' }, InsufficientDataHealthStatus='Healthy'|'Unhealthy'|'LastKnownStatus' ) :type HealthCheckId: string :param HealthCheckId: [REQUIRED] The ID for the health check for which you want detailed information. When you created the health check, CreateHealthCheck returned the ID in the response, in the HealthCheckId element. :type HealthCheckVersion: integer :param HealthCheckVersion: A sequential counter that Amazon Route 53 sets to 1 when you create a health check and increments by 1 each time you update settings for the health check. We recommend that you use GetHealthCheck or ListHealthChecks to get the current value of HealthCheckVersion for the health check that you want to update, and that you include that value in your UpdateHealthCheck request. This prevents Amazon Route 53 from overwriting an intervening update: If the value in the UpdateHealthCheck request matches the value of HealthCheckVersion in the health check, Amazon Route 53 updates the health check with the new settings. If the value of HealthCheckVersion in the health check is greater, the health check was changed after you got the version number. Amazon Route 53 does not update the health check, and it returns a HealthCheckVersionMismatch error. :type IPAddress: string :param IPAddress: The IPv4 or IPv6 IP address for the endpoint that you want Amazon Route 53 to perform health checks on. If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to resolve the domain name that you specify in FullyQualifiedDomainName at the interval that you specify in RequestInterval . Using an IP address that is returned by DNS, Amazon Route 53 then checks the health of the endpoint. Use one of the following formats for the value of IPAddress : IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 . IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 . If the endpoint is an EC2 instance, we recommend that you create an Elastic IP address, associate it with your EC2 instance, and specify the Elastic IP address for IPAddress . This ensures that the IP address of your instance never changes. For more information, see the applicable documentation: Linux: Elastic IP Addresses (EIP) in the Amazon EC2 User Guide for Linux Instances Windows: Elastic IP Addresses (EIP) in the Amazon EC2 User Guide for Windows Instances Note If a health check already has a value for IPAddress , you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress . For more information, see UpdateHealthCheckRequest$FullyQualifiedDomainName . Constraints: Amazon Route 53 can't check the health of endpoints for which the IP address is in local, private, non-routable, or multicast ranges. For more information about IP addresses for which you can't create health checks, see the following documents: RFC 5735, Special Use IPv4 Addresses RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space RFC 5156, Special-Use IPv6 Addresses :type Port: integer :param Port: The port on the endpoint on which you want Amazon Route 53 to perform health checks. :type ResourcePath: string :param ResourcePath: The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html. Specify this value only if you want to change it. :type FullyQualifiedDomainName: string :param FullyQualifiedDomainName: Amazon Route 53 behavior depends on whether you specify a value for IPAddress . Note If a health check already has a value for IPAddress , you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress . If you specify a value for IPAddress : Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Amazon Route 53 to perform health checks. When Amazon Route 53 checks the health of an endpoint, here is how it constructs the Host header: If you specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header. If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header. If you specify another value for Port and any value except TCP for Type , Amazon Route 53 passes * FullyQualifiedDomainName :Port * to the endpoint in the Host header. If you don't specify a value for FullyQualifiedDomainName , Amazon Route 53 substitutes the value of IPAddress in the Host header in each of the above cases. If you don't specify a value for IPAddress : If you don't specify a value for IPAddress , Amazon Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval . Using an IPv4 address that is returned by DNS, Amazon Route 53 then checks the health of the endpoint. Note If you don't specify a value for IPAddress , Amazon Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName , the health check fails with a 'DNS resolution failed' error. If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName , we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName , specify the domain name of the server (such as us-east-2-www.example.com ), not the name of the resource record sets (www.example.com). Warning In this configuration, if the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable. In addition, if the value of Type is HTTP , HTTPS , HTTP_STR_MATCH , or HTTPS_STR_MATCH , Amazon Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress . If the value of Type is TCP , Amazon Route 53 doesn't pass a Host header. :type SearchString: string :param SearchString: If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH , the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy. (You can't change the value of Type when you update a health check.) :type FailureThreshold: integer :param FailureThreshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide . If you don't specify a value for FailureThreshold , the default value is three health checks. :type Inverted: boolean :param Inverted: Specify whether you want Amazon Route 53 to invert the status of a health check, for example, to consider a health check unhealthy when it otherwise would be considered healthy. :type HealthThreshold: integer :param HealthThreshold: The number of child health checks that are associated with a CALCULATED health that Amazon Route 53 must consider healthy for the CALCULATED health check to be considered healthy. To specify the child health checks that you want to associate with a CALCULATED health check, use the ChildHealthChecks and ChildHealthCheck elements. Note the following: If you specify a number greater than the number of child health checks, Amazon Route 53 always considers this health check to be unhealthy. If you specify 0 , Amazon Route 53 always considers this health check to be healthy. :type ChildHealthChecks: list :param ChildHealthChecks: A complex type that contains one ChildHealthCheck element for each health check that you want to associate with a CALCULATED health check. (string) -- :type EnableSNI: boolean :param EnableSNI: Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client_hello message during TLS negotiation. This allows the endpoint to respond to HTTPS health check requests with the applicable SSL/TLS certificate. Some endpoints require that HTTPS requests include the host name in the client_hello message. If you don't enable SNI, the status of the health check will be SSL alert handshake_failure . A health check can also have that status for other reasons. If SNI is enabled and you're still getting the error, check the SSL/TLS configuration on your endpoint and confirm that your certificate is valid. The SSL/TLS certificate on your endpoint includes a domain name in the Common Name field and possibly several more in the Subject Alternative Names field. One of the domain names in the certificate should match the value that you specify for FullyQualifiedDomainName . If the endpoint responds to the client_hello message with a certificate that does not include the domain name that you specified in FullyQualifiedDomainName , a health checker will retry the handshake. In the second attempt, the health checker will omit FullyQualifiedDomainName from the client_hello message. :type Regions: list :param Regions: A complex type that contains one Region element for each region that you want Amazon Route 53 health checkers to check the specified endpoint from. (string) -- :type AlarmIdentifier: dict :param AlarmIdentifier: A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy. Region (string) -- [REQUIRED]A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy. For the current list of CloudWatch regions, see Amazon CloudWatch in the AWS Regions and Endpoints chapter of the Amazon Web Services General Reference . Name (string) -- [REQUIRED]The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy. :type InsufficientDataHealthStatus: string :param InsufficientDataHealthStatus: When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign to the health check: Healthy : Amazon Route 53 considers the health check to be healthy. Unhealthy : Amazon Route 53 considers the health check to be unhealthy. LastKnownStatus : Amazon Route 53 uses the status of the health check from the last time CloudWatch had sufficient data to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy. :rtype: dict :return: { 'HealthCheck': { 'Id': 'string', 'CallerReference': 'string', 'HealthCheckConfig': { 'IPAddress': 'string', 'Port': 123, 'Type': 'HTTP'|'HTTPS'|'HTTP_STR_MATCH'|'HTTPS_STR_MATCH'|'TCP'|'CALCULATED'|'CLOUDWATCH_METRIC', 'ResourcePath': 'string', 'FullyQualifiedDomainName': 'string', 'SearchString': 'string', 'RequestInterval': 123, 'FailureThreshold': 123, 'MeasureLatency': True|False, 'Inverted': True|False, 'HealthThreshold': 123, 'ChildHealthChecks': [ 'string', ], 'EnableSNI': True|False, 'Regions': [ 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1', ], 'AlarmIdentifier': { 'Region': 'us-east-1'|'us-east-2'|'us-west-1'|'us-west-2'|'ca-central-1'|'eu-central-1'|'eu-west-1'|'eu-west-2'|'ap-south-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'ap-northeast-2'|'sa-east-1', 'Name': 'string' }, 'InsufficientDataHealthStatus': 'Healthy'|'Unhealthy'|'LastKnownStatus' }, 'HealthCheckVersion': 123, 'CloudWatchAlarmConfiguration': { 'EvaluationPeriods': 123, 'Threshold': 123.0, 'ComparisonOperator': 'GreaterThanOrEqualToThreshold'|'GreaterThanThreshold'|'LessThanThreshold'|'LessThanOrEqualToThreshold', 'Period': 123, 'MetricName': 'string', 'Namespace': 'string', 'Statistic': 'Average'|'Sum'|'SampleCount'|'Maximum'|'Minimum', 'Dimensions': [ { 'Name': 'string', 'Value': 'string' }, ] } } } :returns: IPv4 address : four values between 0 and 255, separated by periods (.), for example, 192.0.2.44 . IPv6 address : eight groups of four hexadecimal values, separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345 .
[ "Updates", "an", "existing", "health", "check", ".", "Note", "that", "some", "values", "can", "t", "be", "updated", ".", "For", "more", "information", "about", "updating", "health", "checks", "see", "Creating", "Updating", "and", "Deleting", "Health", "Checks"...
python
train
klen/muffin-metrics
muffin_metrics.py
https://github.com/klen/muffin-metrics/blob/b62fc25172e3e1e9fc6dc6c8da3170935ee69f01/muffin_metrics.py#L239-L241
def incr(self, stat, count=1, rate=1): """Increment a stat by `count`.""" return self.send(stat, "%s|c" % count, rate)
[ "def", "incr", "(", "self", ",", "stat", ",", "count", "=", "1", ",", "rate", "=", "1", ")", ":", "return", "self", ".", "send", "(", "stat", ",", "\"%s|c\"", "%", "count", ",", "rate", ")" ]
Increment a stat by `count`.
[ "Increment", "a", "stat", "by", "count", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/wfpc2Data.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wfpc2Data.py#L111-L165
def setInstrumentParameters(self, instrpars): """ This method overrides the superclass to set default values into the parameter dictionary, in case empty entries are provided. """ pri_header = self._image[0].header self.proc_unit = instrpars['proc_unit'] instrpars['gnkeyword'] = 'ATODGAIN' # hard-code for WFPC2 data instrpars['rnkeyword'] = None if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']): instrpars['expkeyword'] = 'EXPTIME' for chip in self.returnAllChips(extname=self.scienceExt): chip._headergain = self.getInstrParameter( instrpars['gain'], pri_header, instrpars['gnkeyword'] ) chip._exptime = self.getInstrParameter( instrpars['exptime'], pri_header, instrpars['expkeyword'] ) # We need to treat Read Noise as a special case since it is # not populated in the WFPC2 primary header if instrpars['rnkeyword'] is None: chip._rdnoise = None else: chip._rdnoise = self.getInstrParameter( instrpars['rdnoise'], pri_header, instrpars['rnkeyword'] ) if chip._headergain is None or chip._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError # We need to determine if the user has used the default readnoise/gain value # since if not, they will need to supply a gain/readnoise value as well usingDefaultGain = instrpars['gnkeyword'] == 'ATODGAIN' usingDefaultReadnoise = instrpars['rnkeyword'] in [None, 'None'] # If the user has specified either the readnoise or the gain, we need to make sure # that they have actually specified both values. In the default case, the readnoise # of the system depends on what the gain if usingDefaultReadnoise and usingDefaultGain: self._setchippars() elif usingDefaultReadnoise and not usingDefaultGain: raise ValueError("ERROR: You need to supply readnoise information\n when not using the default gain for WFPC2.") elif not usingDefaultReadnoise and usingDefaultGain: raise ValueError("ERROR: You need to supply gain information when\n not using the default readnoise for WFPC2.") else: # In this case, the user has specified both a gain and readnoise values. Just use them as is. for chip in self.returnAllChips(extname=self.scienceExt): chip._gain = chip._headergain print("Using user defined values for gain and readnoise") # Convert the science data to electrons self.doUnitConversions()
[ "def", "setInstrumentParameters", "(", "self", ",", "instrpars", ")", ":", "pri_header", "=", "self", ".", "_image", "[", "0", "]", ".", "header", "self", ".", "proc_unit", "=", "instrpars", "[", "'proc_unit'", "]", "instrpars", "[", "'gnkeyword'", "]", "=...
This method overrides the superclass to set default values into the parameter dictionary, in case empty entries are provided.
[ "This", "method", "overrides", "the", "superclass", "to", "set", "default", "values", "into", "the", "parameter", "dictionary", "in", "case", "empty", "entries", "are", "provided", "." ]
python
train
runfalk/spans
spans/types.py
https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/spans/types.py#L445-L503
def union(self, other): """ Merges this range with a given range. >>> intrange(1, 5).union(intrange(5, 10)) intrange([1,10)) >>> intrange(1, 10).union(intrange(5, 15)) intrange([1,15)) Two ranges can not be merged if the resulting range would be split in two. This happens when the two sets are neither adjacent nor overlaps. >>> intrange(1, 5).union(intrange(10, 15)) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: Ranges must be either adjacent or overlapping This does not modify the range in place. This is the same as the ``+`` operator for two ranges in PostgreSQL. :param other: Range to merge with. :return: A new range that is the union of this and `other`. :raises ValueError: If `other` can not be merged with this range. """ if not self.is_valid_range(other): msg = "Unsupported type to test for union '{.__class__.__name__}'" raise TypeError(msg.format(other)) # Optimize empty ranges if not self: return other elif not other: return self # Order ranges to simplify checks if self < other: a, b = self, other else: a, b = other, self if (a.upper < b.lower or a.upper == b.lower and not a.upper_inc and not b.lower_inc) and not a.adjacent(b): raise ValueError("Ranges must be either adjacent or overlapping") # a.lower is guaranteed to be the lower bound, but either a.upper or # b.upper can be the upper bound if a.upper == b.upper: upper = a.upper upper_inc = a.upper_inc or b.upper_inc elif a.upper < b.upper: upper = b.upper upper_inc = b.upper_inc else: upper = a.upper upper_inc = a.upper_inc return self.__class__(a.lower, upper, a.lower_inc, upper_inc)
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "not", "self", ".", "is_valid_range", "(", "other", ")", ":", "msg", "=", "\"Unsupported type to test for union '{.__class__.__name__}'\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "other", ...
Merges this range with a given range. >>> intrange(1, 5).union(intrange(5, 10)) intrange([1,10)) >>> intrange(1, 10).union(intrange(5, 15)) intrange([1,15)) Two ranges can not be merged if the resulting range would be split in two. This happens when the two sets are neither adjacent nor overlaps. >>> intrange(1, 5).union(intrange(10, 15)) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: Ranges must be either adjacent or overlapping This does not modify the range in place. This is the same as the ``+`` operator for two ranges in PostgreSQL. :param other: Range to merge with. :return: A new range that is the union of this and `other`. :raises ValueError: If `other` can not be merged with this range.
[ "Merges", "this", "range", "with", "a", "given", "range", "." ]
python
train
PyO3/setuptools-rust
setuptools_rust/extension.py
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/extension.py#L106-L122
def get_lib_name(self): """ Parse Cargo.toml to get the name of the shared library. """ # We import in here to make sure the the setup_requires are already installed import toml cfg = toml.load(self.path) name = cfg.get("lib", {}).get("name") if name is None: name = cfg.get("package", {}).get("name") if name is None: raise Exception( "Can not parse library name from Cargo.toml. " "Cargo.toml missing value for 'name' key " "in both the [package] section and the [lib] section" ) name = re.sub(r"[./\\-]", "_", name) return name
[ "def", "get_lib_name", "(", "self", ")", ":", "# We import in here to make sure the the setup_requires are already installed", "import", "toml", "cfg", "=", "toml", ".", "load", "(", "self", ".", "path", ")", "name", "=", "cfg", ".", "get", "(", "\"lib\"", ",", ...
Parse Cargo.toml to get the name of the shared library.
[ "Parse", "Cargo", ".", "toml", "to", "get", "the", "name", "of", "the", "shared", "library", "." ]
python
train
blackecho/Deep-Learning-TensorFlow
yadlt/models/boltzmann/rbm.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/boltzmann/rbm.py#L172-L191
def _create_variables(self, n_features): """Create the TensorFlow variables for the model. :param n_features: number of features :return: self """ w_name = 'weights' self.W = tf.Variable(tf.truncated_normal( shape=[n_features, self.num_hidden], stddev=0.1), name=w_name) tf.summary.histogram(w_name, self.W) bh_name = 'hidden-bias' self.bh_ = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]), name=bh_name) tf.summary.histogram(bh_name, self.bh_) bv_name = 'visible-bias' self.bv_ = tf.Variable(tf.constant(0.1, shape=[n_features]), name=bv_name) tf.summary.histogram(bv_name, self.bv_)
[ "def", "_create_variables", "(", "self", ",", "n_features", ")", ":", "w_name", "=", "'weights'", "self", ".", "W", "=", "tf", ".", "Variable", "(", "tf", ".", "truncated_normal", "(", "shape", "=", "[", "n_features", ",", "self", ".", "num_hidden", "]",...
Create the TensorFlow variables for the model. :param n_features: number of features :return: self
[ "Create", "the", "TensorFlow", "variables", "for", "the", "model", "." ]
python
train
ynop/audiomate
audiomate/utils/audio.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/utils/audio.py#L7-L24
def process_buffer(buffer, n_channels): """ Merge the read blocks and resample if necessary. Args: buffer (list): A list of blocks of samples. n_channels (int): The number of channels of the input data. Returns: np.array: The samples """ samples = np.concatenate(buffer) if n_channels > 1: samples = samples.reshape((-1, n_channels)).T samples = librosa.to_mono(samples) return samples
[ "def", "process_buffer", "(", "buffer", ",", "n_channels", ")", ":", "samples", "=", "np", ".", "concatenate", "(", "buffer", ")", "if", "n_channels", ">", "1", ":", "samples", "=", "samples", ".", "reshape", "(", "(", "-", "1", ",", "n_channels", ")",...
Merge the read blocks and resample if necessary. Args: buffer (list): A list of blocks of samples. n_channels (int): The number of channels of the input data. Returns: np.array: The samples
[ "Merge", "the", "read", "blocks", "and", "resample", "if", "necessary", "." ]
python
train
decentfox/aioh2
aioh2/protocol.py
https://github.com/decentfox/aioh2/blob/2f9b76161e99e32317083cd2ebd17ce2ed3e41ab/aioh2/protocol.py#L371-L383
def set_handler(self, handler): """ Connect with a coroutine, which is scheduled when connection is made. This function will create a task, and when connection is closed, the task will be canceled. :param handler: :return: None """ if self._handler: raise Exception('Handler was already set') if handler: self._handler = async_task(handler, loop=self._loop)
[ "def", "set_handler", "(", "self", ",", "handler", ")", ":", "if", "self", ".", "_handler", ":", "raise", "Exception", "(", "'Handler was already set'", ")", "if", "handler", ":", "self", ".", "_handler", "=", "async_task", "(", "handler", ",", "loop", "="...
Connect with a coroutine, which is scheduled when connection is made. This function will create a task, and when connection is closed, the task will be canceled. :param handler: :return: None
[ "Connect", "with", "a", "coroutine", "which", "is", "scheduled", "when", "connection", "is", "made", "." ]
python
train
saltstack/salt
salt/modules/udev.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/udev.py#L87-L109
def info(dev): ''' Extract all info delivered by udevadm CLI Example: .. code-block:: bash salt '*' udev.info /dev/sda salt '*' udev.info /sys/class/net/eth0 ''' if 'sys' in dev: qtype = 'path' else: qtype = 'name' cmd = 'udevadm info --export --query=all --{0}={1}'.format(qtype, dev) udev_result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet') if udev_result['retcode'] != 0: raise CommandExecutionError(udev_result['stderr']) return _parse_udevadm_info(udev_result['stdout'])[0]
[ "def", "info", "(", "dev", ")", ":", "if", "'sys'", "in", "dev", ":", "qtype", "=", "'path'", "else", ":", "qtype", "=", "'name'", "cmd", "=", "'udevadm info --export --query=all --{0}={1}'", ".", "format", "(", "qtype", ",", "dev", ")", "udev_result", "="...
Extract all info delivered by udevadm CLI Example: .. code-block:: bash salt '*' udev.info /dev/sda salt '*' udev.info /sys/class/net/eth0
[ "Extract", "all", "info", "delivered", "by", "udevadm" ]
python
train
ClericPy/torequests
torequests/dummy.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/dummy.py#L264-L273
def submitter(self, f): """Decorator to submit a coro-function as NewTask to self.loop with sem control. Use default_callback frequency of loop.""" f = self._wrap_coro_function_with_sem(f) @wraps(f) def wrapped(*args, **kwargs): return self.submit(f(*args, **kwargs)) return wrapped
[ "def", "submitter", "(", "self", ",", "f", ")", ":", "f", "=", "self", ".", "_wrap_coro_function_with_sem", "(", "f", ")", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", ...
Decorator to submit a coro-function as NewTask to self.loop with sem control. Use default_callback frequency of loop.
[ "Decorator", "to", "submit", "a", "coro", "-", "function", "as", "NewTask", "to", "self", ".", "loop", "with", "sem", "control", ".", "Use", "default_callback", "frequency", "of", "loop", "." ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/trace_exporter/utils.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/trace_exporter/utils.py#L128-L149
def set_proto_annotation(pb_annotation, span_data_annotation): """Sets properties on the protobuf Span annotation. :type pb_annotation: :class: `~opencensus.proto.trace.Span.TimeEvent.Annotation` :param pb_annotation: protobuf annotation :type span_data_annotation: :class: `~opencensus.trace.time_event.Annotation` :param span_data_annotation: opencensus annotation """ pb_annotation.description.value = span_data_annotation.description if span_data_annotation.attributes is not None \ and span_data_annotation.attributes.attributes is not None: for attribute_key, attribute_value in \ span_data_annotation.attributes.attributes.items(): add_proto_attribute_value( pb_annotation.attributes, attribute_key, attribute_value)
[ "def", "set_proto_annotation", "(", "pb_annotation", ",", "span_data_annotation", ")", ":", "pb_annotation", ".", "description", ".", "value", "=", "span_data_annotation", ".", "description", "if", "span_data_annotation", ".", "attributes", "is", "not", "None", "and",...
Sets properties on the protobuf Span annotation. :type pb_annotation: :class: `~opencensus.proto.trace.Span.TimeEvent.Annotation` :param pb_annotation: protobuf annotation :type span_data_annotation: :class: `~opencensus.trace.time_event.Annotation` :param span_data_annotation: opencensus annotation
[ "Sets", "properties", "on", "the", "protobuf", "Span", "annotation", "." ]
python
train
agile4you/bottle-neck
bottle_neck/response.py
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L262-L275
def not_implemented(cls, errors=None): """Shortcut API for HTTP 501 `Not Implemented` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance. """ if cls.expose_status: # pragma: no cover cls.response.content_type = 'application/json' cls.response._status_line = '501 Not Implemented' return cls(501, None, errors).to_json
[ "def", "not_implemented", "(", "cls", ",", "errors", "=", "None", ")", ":", "if", "cls", ".", "expose_status", ":", "# pragma: no cover", "cls", ".", "response", ".", "content_type", "=", "'application/json'", "cls", ".", "response", ".", "_status_line", "=", ...
Shortcut API for HTTP 501 `Not Implemented` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
[ "Shortcut", "API", "for", "HTTP", "501", "Not", "Implemented", "response", "." ]
python
train
Erotemic/utool
utool/util_dev.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1288-L1387
def get_stats(list_, axis=None, use_nan=False, use_sum=False, use_median=False, size=False): """ Args: list_ (listlike): values to get statistics of axis (int): if `list_` is ndarray then this specifies the axis Returns: OrderedDict: stats: dictionary of common numpy statistics (min, max, mean, std, nMin, nMax, shape) SeeAlso: get_stats_str CommandLine: python -m utool.util_dev --test-get_stats python -m utool.util_dev --test-get_stats:1 Examples0: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> import numpy as np >>> import utool >>> axis = 0 >>> np.random.seed(0) >>> list_ = np.random.rand(10, 2).astype(np.float32) >>> stats = get_stats(list_, axis, use_nan=False) >>> result = str(utool.repr4(stats, nl=1, precision=4, with_dtype=True)) >>> print(result) { 'mean': np.array([0.5206, 0.6425], dtype=np.float32), 'std': np.array([0.2854, 0.2517], dtype=np.float32), 'max': np.array([0.9637, 0.9256], dtype=np.float32), 'min': np.array([0.0202, 0.0871], dtype=np.float32), 'nMin': np.array([1, 1], dtype=np.int32), 'nMax': np.array([1, 1], dtype=np.int32), 'shape': (10, 2), } Examples1: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> import numpy as np >>> import utool >>> axis = 0 >>> rng = np.random.RandomState(0) >>> list_ = rng.randint(0, 42, size=100).astype(np.float32) >>> list_[4] = np.nan >>> stats = get_stats(list_, axis, use_nan=True) >>> result = str(utool.repr2(stats, precision=1, strkeys=True)) >>> print(result) {mean: 20.0, std: 13.2, max: 41.0, min: 0.0, nMin: 7, nMax: 3, shape: (100,), num_nan: 1} """ datacast = np.float32 # Assure input is in numpy format if isinstance(list_, np.ndarray): nparr = list_ elif isinstance(list_, list): nparr = np.array(list_) else: nparr = np.array(list(list_)) # Check to make sure stats are feasible if len(nparr) == 0: stats = OrderedDict([('empty_list', True)]) if size: stats['size'] = 0 else: if use_nan: min_val = np.nanmin(nparr, axis=axis) max_val = np.nanmax(nparr, axis=axis) mean_ = np.nanmean(nparr, axis=axis) std_ = np.nanstd(nparr, axis=axis) else: min_val = nparr.min(axis=axis) max_val = nparr.max(axis=axis) mean_ = nparr.mean(axis=axis) std_ = nparr.std(axis=axis) # number of entries with min/max val nMin = np.sum(nparr == min_val, axis=axis) nMax = np.sum(nparr == max_val, axis=axis) stats = OrderedDict([ ('mean', datacast(mean_)), ('std', datacast(std_)), ('max', (max_val)), ('min', (min_val)), ('nMin', np.int32(nMin)), ('nMax', np.int32(nMax)), ]) if size: stats['size'] = nparr.size else: stats['shape'] = nparr.shape if use_median: stats['med'] = np.nanmedian(nparr) if use_nan: stats['num_nan'] = np.isnan(nparr).sum() if use_sum: sumfunc = np.nansum if use_nan else np.sum stats['sum'] = sumfunc(nparr, axis=axis) return stats
[ "def", "get_stats", "(", "list_", ",", "axis", "=", "None", ",", "use_nan", "=", "False", ",", "use_sum", "=", "False", ",", "use_median", "=", "False", ",", "size", "=", "False", ")", ":", "datacast", "=", "np", ".", "float32", "# Assure input is in num...
Args: list_ (listlike): values to get statistics of axis (int): if `list_` is ndarray then this specifies the axis Returns: OrderedDict: stats: dictionary of common numpy statistics (min, max, mean, std, nMin, nMax, shape) SeeAlso: get_stats_str CommandLine: python -m utool.util_dev --test-get_stats python -m utool.util_dev --test-get_stats:1 Examples0: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> import numpy as np >>> import utool >>> axis = 0 >>> np.random.seed(0) >>> list_ = np.random.rand(10, 2).astype(np.float32) >>> stats = get_stats(list_, axis, use_nan=False) >>> result = str(utool.repr4(stats, nl=1, precision=4, with_dtype=True)) >>> print(result) { 'mean': np.array([0.5206, 0.6425], dtype=np.float32), 'std': np.array([0.2854, 0.2517], dtype=np.float32), 'max': np.array([0.9637, 0.9256], dtype=np.float32), 'min': np.array([0.0202, 0.0871], dtype=np.float32), 'nMin': np.array([1, 1], dtype=np.int32), 'nMax': np.array([1, 1], dtype=np.int32), 'shape': (10, 2), } Examples1: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> import numpy as np >>> import utool >>> axis = 0 >>> rng = np.random.RandomState(0) >>> list_ = rng.randint(0, 42, size=100).astype(np.float32) >>> list_[4] = np.nan >>> stats = get_stats(list_, axis, use_nan=True) >>> result = str(utool.repr2(stats, precision=1, strkeys=True)) >>> print(result) {mean: 20.0, std: 13.2, max: 41.0, min: 0.0, nMin: 7, nMax: 3, shape: (100,), num_nan: 1}
[ "Args", ":", "list_", "(", "listlike", ")", ":", "values", "to", "get", "statistics", "of", "axis", "(", "int", ")", ":", "if", "list_", "is", "ndarray", "then", "this", "specifies", "the", "axis" ]
python
train
mar10/wsgidav
wsgidav/request_server.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/request_server.py#L152-L184
def _send_response( self, environ, start_response, root_res, success_code, error_list ): """Send WSGI response (single or multistatus). - If error_list is None or [], then <success_code> is send as response. - If error_list contains a single error with a URL that matches root_res, then this error is returned. - If error_list contains more than one error, then '207 Multi-Status' is returned. """ assert success_code in (HTTP_CREATED, HTTP_NO_CONTENT, HTTP_OK) if not error_list: # Status OK return util.send_status_response(environ, start_response, success_code) if len(error_list) == 1 and error_list[0][0] == root_res.get_href(): # Only one error that occurred on the root resource return util.send_status_response(environ, start_response, error_list[0][1]) # Multiple errors, or error on one single child multistatusEL = xml_tools.make_multistatus_el() for refurl, e in error_list: # assert refurl.startswith("http:") assert refurl.startswith("/") assert isinstance(e, DAVError) responseEL = etree.SubElement(multistatusEL, "{DAV:}response") etree.SubElement(responseEL, "{DAV:}href").text = refurl etree.SubElement(responseEL, "{DAV:}status").text = "HTTP/1.1 {}".format( get_http_status_string(e) ) return util.send_multi_status_response(environ, start_response, multistatusEL)
[ "def", "_send_response", "(", "self", ",", "environ", ",", "start_response", ",", "root_res", ",", "success_code", ",", "error_list", ")", ":", "assert", "success_code", "in", "(", "HTTP_CREATED", ",", "HTTP_NO_CONTENT", ",", "HTTP_OK", ")", "if", "not", "erro...
Send WSGI response (single or multistatus). - If error_list is None or [], then <success_code> is send as response. - If error_list contains a single error with a URL that matches root_res, then this error is returned. - If error_list contains more than one error, then '207 Multi-Status' is returned.
[ "Send", "WSGI", "response", "(", "single", "or", "multistatus", ")", "." ]
python
valid
flyingrub/scdl
scdl/scdl.py
https://github.com/flyingrub/scdl/blob/e833a22dd6676311b72fadd8a1c80f4a06acfad9/scdl/scdl.py#L562-L588
def already_downloaded(track, title, filename): """ Returns True if the file has already been downloaded """ global arguments already_downloaded = False if os.path.isfile(filename): already_downloaded = True if arguments['--flac'] and can_convert(filename) \ and os.path.isfile(filename[:-4] + ".flac"): already_downloaded = True if arguments['--download-archive'] and in_download_archive(track): already_downloaded = True if arguments['--flac'] and can_convert(filename) and os.path.isfile(filename): already_downloaded = False if already_downloaded: if arguments['-c'] or arguments['--remove']: logger.info('Track "{0}" already downloaded.'.format(title)) return True else: logger.error('Track "{0}" already exists!'.format(title)) logger.error('Exiting... (run again with -c to continue)') sys.exit(0) return False
[ "def", "already_downloaded", "(", "track", ",", "title", ",", "filename", ")", ":", "global", "arguments", "already_downloaded", "=", "False", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "already_downloaded", "=", "True", "if", "argume...
Returns True if the file has already been downloaded
[ "Returns", "True", "if", "the", "file", "has", "already", "been", "downloaded" ]
python
train
odlgroup/odl
odl/set/space.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/space.py#L926-L935
def multiply(self, other, out=None): """Return ``out = self * other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.multiply """ return self.space.multiply(self, other, out=out)
[ "def", "multiply", "(", "self", ",", "other", ",", "out", "=", "None", ")", ":", "return", "self", ".", "space", ".", "multiply", "(", "self", ",", "other", ",", "out", "=", "out", ")" ]
Return ``out = self * other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.multiply
[ "Return", "out", "=", "self", "*", "other", "." ]
python
train
Azure/azure-sdk-for-python
azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/policy_states_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/policy_states_operations.py#L148-L227
def summarize_for_management_group( self, management_group_name, query_options=None, custom_headers=None, raw=False, **operation_config): """Summarizes policy states for the resources under the management group. :param management_group_name: Management group name. :type management_group_name: str :param query_options: Additional parameters for the operation :type query_options: ~azure.mgmt.policyinsights.models.QueryOptions :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SummarizeResults or ClientRawResponse if raw=true :rtype: ~azure.mgmt.policyinsights.models.SummarizeResults or ~msrest.pipeline.ClientRawResponse :raises: :class:`QueryFailureException<azure.mgmt.policyinsights.models.QueryFailureException>` """ top = None if query_options is not None: top = query_options.top from_parameter = None if query_options is not None: from_parameter = query_options.from_property to = None if query_options is not None: to = query_options.to filter = None if query_options is not None: filter = query_options.filter # Construct URL url = self.summarize_for_management_group.metadata['url'] path_format_arguments = { 'policyStatesSummaryResource': self._serialize.url("self.policy_states_summary_resource", self.policy_states_summary_resource, 'str'), 'managementGroupsNamespace': self._serialize.url("self.management_groups_namespace", self.management_groups_namespace, 'str'), 'managementGroupName': self._serialize.url("management_group_name", management_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=0) if from_parameter is not None: query_parameters['$from'] = self._serialize.query("from_parameter", from_parameter, 'iso-8601') if to is not None: query_parameters['$to'] = self._serialize.query("to", to, 'iso-8601') if filter is not None: query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.post(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.QueryFailureException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SummarizeResults', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "summarize_for_management_group", "(", "self", ",", "management_group_name", ",", "query_options", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "operation_config", ")", ":", "top", "=", "None", "if", "query_o...
Summarizes policy states for the resources under the management group. :param management_group_name: Management group name. :type management_group_name: str :param query_options: Additional parameters for the operation :type query_options: ~azure.mgmt.policyinsights.models.QueryOptions :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SummarizeResults or ClientRawResponse if raw=true :rtype: ~azure.mgmt.policyinsights.models.SummarizeResults or ~msrest.pipeline.ClientRawResponse :raises: :class:`QueryFailureException<azure.mgmt.policyinsights.models.QueryFailureException>`
[ "Summarizes", "policy", "states", "for", "the", "resources", "under", "the", "management", "group", "." ]
python
test
mcocdawc/chemcoord
src/chemcoord/cartesian_coordinates/_cartesian_class_core.py
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/_cartesian_class_core.py#L1221-L1271
def align(self, other, indices=None, ignore_hydrogens=False): """Align two Cartesians. Minimize the RMSD (root mean squared deviation) between ``self`` and ``other``. Returns a tuple of copies of ``self`` and ``other`` where both are centered around their centroid and ``other`` is rotated unto ``self``. The rotation minimises the distances between the atom pairs of same label. Uses the Kabsch algorithm implemented within :func:`~.xyz_functions.get_kabsch_rotation` .. note:: If ``indices is None``, then ``len(self) == len(other)`` must be true and the elements in each index have to be the same. Args: other (Cartesian): indices (sequence): It is possible to specify a subset of indices that is used for the determination of the best rotation matrix:: [[i1, i2,...], [j1, j2,...]] If ``indices`` is given in this form, the rotation matrix minimises the distance between ``i1`` and ``j1``, ``i2`` and ``j2`` and so on. ignore_hydrogens (bool): Returns: tuple: """ m1 = (self - self.get_centroid()).sort_index() m2 = (other - other.get_centroid()).sort_index() if indices is not None and ignore_hydrogens: message = 'Indices != None and ignore_hydrogens == True is invalid' raise IllegalArgumentCombination(message) elif ignore_hydrogens: m1 = m1[m1['atom'] != 'H'] m2 = m2[m2['atom'] != 'H'] elif indices is not None: pos1 = m1.loc[indices[0], ['x', 'y', 'z']].values pos2 = m2.loc[indices[1], ['x', 'y', 'z']].values else: pos1 = m1.loc[:, ['x', 'y', 'z']].values pos2 = m2.loc[m1.index, ['x', 'y', 'z']].values m2 = dot(xyz_functions.get_kabsch_rotation(pos1, pos2), m2) return m1, m2
[ "def", "align", "(", "self", ",", "other", ",", "indices", "=", "None", ",", "ignore_hydrogens", "=", "False", ")", ":", "m1", "=", "(", "self", "-", "self", ".", "get_centroid", "(", ")", ")", ".", "sort_index", "(", ")", "m2", "=", "(", "other", ...
Align two Cartesians. Minimize the RMSD (root mean squared deviation) between ``self`` and ``other``. Returns a tuple of copies of ``self`` and ``other`` where both are centered around their centroid and ``other`` is rotated unto ``self``. The rotation minimises the distances between the atom pairs of same label. Uses the Kabsch algorithm implemented within :func:`~.xyz_functions.get_kabsch_rotation` .. note:: If ``indices is None``, then ``len(self) == len(other)`` must be true and the elements in each index have to be the same. Args: other (Cartesian): indices (sequence): It is possible to specify a subset of indices that is used for the determination of the best rotation matrix:: [[i1, i2,...], [j1, j2,...]] If ``indices`` is given in this form, the rotation matrix minimises the distance between ``i1`` and ``j1``, ``i2`` and ``j2`` and so on. ignore_hydrogens (bool): Returns: tuple:
[ "Align", "two", "Cartesians", "." ]
python
train
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L892-L950
def to_allele_counts(self, max_allele=None, dtype='u1'): """Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2 """ # determine alleles to count if max_allele is None: max_allele = self.max() alleles = list(range(max_allele + 1)) # set up output array outshape = self.shape[:-1] + (len(alleles),) out = np.zeros(outshape, dtype=dtype) for allele in alleles: # count alleles along ploidy dimension allele_match = self.values == allele if self.mask is not None: allele_match &= ~self.mask[..., np.newaxis] np.sum(allele_match, axis=-1, out=out[..., allele]) if self.ndim == 2: out = GenotypeAlleleCountsVector(out) elif self.ndim == 3: out = GenotypeAlleleCountsArray(out) return out
[ "def", "to_allele_counts", "(", "self", ",", "max_allele", "=", "None", ",", "dtype", "=", "'u1'", ")", ":", "# determine alleles to count", "if", "max_allele", "is", "None", ":", "max_allele", "=", "self", ".", "max", "(", ")", "alleles", "=", "list", "("...
Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2
[ "Transform", "genotype", "calls", "into", "allele", "counts", "per", "call", "." ]
python
train
merll/docker-map
dockermap/map/client.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/client.py#L446-L460
def pull_images(self, container, instances=None, map_name=None, **kwargs): """ Pulls images for container configurations along their dependency path. :param container: Container configuration name. :type container: unicode | str :param map_name: Container map name. :type map_name: unicode | str :param instances: Not applicable for images. :type instances: unicode | str :param kwargs: Keyword arguments to the script runner function. :return: Return values of actions. :rtype: list[dockermap.map.runner.ActionOutput] """ return self.run_actions('pull_images', container, map_name=map_name, **kwargs)
[ "def", "pull_images", "(", "self", ",", "container", ",", "instances", "=", "None", ",", "map_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "run_actions", "(", "'pull_images'", ",", "container", ",", "map_name", "=", "map_...
Pulls images for container configurations along their dependency path. :param container: Container configuration name. :type container: unicode | str :param map_name: Container map name. :type map_name: unicode | str :param instances: Not applicable for images. :type instances: unicode | str :param kwargs: Keyword arguments to the script runner function. :return: Return values of actions. :rtype: list[dockermap.map.runner.ActionOutput]
[ "Pulls", "images", "for", "container", "configurations", "along", "their", "dependency", "path", "." ]
python
train
apache/incubator-heron
heron/instance/src/python/utils/metrics/metrics_helper.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/metrics/metrics_helper.py#L238-L241
def failed_tuple(self, stream_id, fail_latency_ns): """Apply updates to the fail metrics""" self.update_count(self.FAIL_COUNT, key=stream_id) self.update_reduced_metric(self.FAIL_LATENCY, fail_latency_ns, key=stream_id)
[ "def", "failed_tuple", "(", "self", ",", "stream_id", ",", "fail_latency_ns", ")", ":", "self", ".", "update_count", "(", "self", ".", "FAIL_COUNT", ",", "key", "=", "stream_id", ")", "self", ".", "update_reduced_metric", "(", "self", ".", "FAIL_LATENCY", ",...
Apply updates to the fail metrics
[ "Apply", "updates", "to", "the", "fail", "metrics" ]
python
valid
praekeltfoundation/molo.commenting
molo/commenting/views.py
https://github.com/praekeltfoundation/molo.commenting/blob/94549bd75e4a5c5b3db43149e32d636330b3969c/molo/commenting/views.py#L21-L37
def report(request, comment_id): """ Flags a comment on GET. Redirects to whatever is provided in request.REQUEST['next']. """ comment = get_object_or_404( django_comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID) if comment.parent is not None: messages.info(request, _('Reporting comment replies is not allowed.')) else: perform_flag(request, comment) messages.info(request, _('The comment has been reported.')) next = request.GET.get('next') or comment.get_absolute_url() return HttpResponseRedirect(next)
[ "def", "report", "(", "request", ",", "comment_id", ")", ":", "comment", "=", "get_object_or_404", "(", "django_comments", ".", "get_model", "(", ")", ",", "pk", "=", "comment_id", ",", "site__pk", "=", "settings", ".", "SITE_ID", ")", "if", "comment", "."...
Flags a comment on GET. Redirects to whatever is provided in request.REQUEST['next'].
[ "Flags", "a", "comment", "on", "GET", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L375-L392
def evaluate(data): """Provide evaluations for multiple callers split by structural variant type. """ work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", dd.get_sample_name(data), "validate")) truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data) if truth_sets and data.get("sv"): if isinstance(truth_sets, dict): val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data) summary_plots = _plot_evaluation(df_csv) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv} else: assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data) title = "%s structural variants" % dd.get_sample_name(data) summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None} return data
[ "def", "evaluate", "(", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"structural\"", ",", "dd", ".", "get_sample_name", "(", "data"...
Provide evaluations for multiple callers split by structural variant type.
[ "Provide", "evaluations", "for", "multiple", "callers", "split", "by", "structural", "variant", "type", "." ]
python
train
timedata-org/loady
loady/whitelist.py
https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/whitelist.py#L64-L69
def check_entry(*entry): """Throws an exception if the entry isn't on the whitelist.""" whitelist = read_whitelist() if not check_allow_prompt(entry, whitelist): whitelist.append(entry) write_whitelist(whitelist)
[ "def", "check_entry", "(", "*", "entry", ")", ":", "whitelist", "=", "read_whitelist", "(", ")", "if", "not", "check_allow_prompt", "(", "entry", ",", "whitelist", ")", ":", "whitelist", ".", "append", "(", "entry", ")", "write_whitelist", "(", "whitelist", ...
Throws an exception if the entry isn't on the whitelist.
[ "Throws", "an", "exception", "if", "the", "entry", "isn", "t", "on", "the", "whitelist", "." ]
python
train
benley/butcher
butcher/targets/base.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L139-L151
def is_cached(self): """Returns true if this rule is already cached.""" # TODO: cache by target+hash, not per file. try: for item in self.rule.output_files: log.info(item) self.cachemgr.in_cache(item, self._metahash()) except cache.CacheMiss: log.info('[%s]: Not cached.', self.address) return False else: log.info('[%s]: found in cache.', self.address) return True
[ "def", "is_cached", "(", "self", ")", ":", "# TODO: cache by target+hash, not per file.", "try", ":", "for", "item", "in", "self", ".", "rule", ".", "output_files", ":", "log", ".", "info", "(", "item", ")", "self", ".", "cachemgr", ".", "in_cache", "(", "...
Returns true if this rule is already cached.
[ "Returns", "true", "if", "this", "rule", "is", "already", "cached", "." ]
python
train
datamachine/twx.botapi
twx/botapi/botapi.py
https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4306-L4308
def send_document(self, *args, **kwargs): """See :func:`send_document`""" return send_document(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "send_document", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "send_document", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
See :func:`send_document`
[ "See", ":", "func", ":", "send_document" ]
python
train
jbaiter/gphoto2-cffi
gphoto2cffi/gphoto2.py
https://github.com/jbaiter/gphoto2-cffi/blob/2876d15a58174bd24613cd4106a3ef0cefd48050/gphoto2cffi/gphoto2.py#L571-L574
def supported_operations(self): """ All operations supported by the camera. """ return tuple(op for op in backend.CAM_OPS if self._abilities.operations & op)
[ "def", "supported_operations", "(", "self", ")", ":", "return", "tuple", "(", "op", "for", "op", "in", "backend", ".", "CAM_OPS", "if", "self", ".", "_abilities", ".", "operations", "&", "op", ")" ]
All operations supported by the camera.
[ "All", "operations", "supported", "by", "the", "camera", "." ]
python
train
spyder-ide/spyder
spyder/utils/misc.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/misc.py#L134-L151
def remove_backslashes(path): """Remove backslashes in *path* For Windows platforms only. Returns the path unchanged on other platforms. This is especially useful when formatting path strings on Windows platforms for which folder paths may contain backslashes and provoke unicode decoding errors in Python 3 (or in Python 2 when future 'unicode_literals' symbol has been imported).""" if os.name == 'nt': # Removing trailing single backslash if path.endswith('\\') and not path.endswith('\\\\'): path = path[:-1] # Replacing backslashes by slashes path = path.replace('\\', '/') path = path.replace('/\'', '\\\'') return path
[ "def", "remove_backslashes", "(", "path", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "# Removing trailing single backslash\r", "if", "path", ".", "endswith", "(", "'\\\\'", ")", "and", "not", "path", ".", "endswith", "(", "'\\\\\\\\'", ")", ":", ...
Remove backslashes in *path* For Windows platforms only. Returns the path unchanged on other platforms. This is especially useful when formatting path strings on Windows platforms for which folder paths may contain backslashes and provoke unicode decoding errors in Python 3 (or in Python 2 when future 'unicode_literals' symbol has been imported).
[ "Remove", "backslashes", "in", "*", "path", "*", "For", "Windows", "platforms", "only", ".", "Returns", "the", "path", "unchanged", "on", "other", "platforms", ".", "This", "is", "especially", "useful", "when", "formatting", "path", "strings", "on", "Windows",...
python
train
OpenKMIP/PyKMIP
kmip/core/objects.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/objects.py#L1124-L1194
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the DeviceCredential struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. """ super(DeviceCredential, self).read( input_stream, kmip_version=kmip_version ) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.DEVICE_SERIAL_NUMBER, local_stream): self._device_serial_number = primitives.TextString( tag=enums.Tags.DEVICE_SERIAL_NUMBER ) self._device_serial_number.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.PASSWORD, local_stream): self._password = primitives.TextString( tag=enums.Tags.PASSWORD ) self._password.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.DEVICE_IDENTIFIER, local_stream): self._device_identifier = primitives.TextString( tag=enums.Tags.DEVICE_IDENTIFIER ) self._device_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.NETWORK_IDENTIFIER, local_stream): self._network_identifier = primitives.TextString( tag=enums.Tags.NETWORK_IDENTIFIER ) self._network_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.MACHINE_IDENTIFIER, local_stream): self._machine_identifier = primitives.TextString( tag=enums.Tags.MACHINE_IDENTIFIER ) self._machine_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.MEDIA_IDENTIFIER, local_stream): self._media_identifier = primitives.TextString( tag=enums.Tags.MEDIA_IDENTIFIER ) self._media_identifier.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
[ "def", "read", "(", "self", ",", "input_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "DeviceCredential", ",", "self", ")", ".", "read", "(", "input_stream", ",", "kmip_version", "=", "kmip_version",...
Read the data encoding the DeviceCredential struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
[ "Read", "the", "data", "encoding", "the", "DeviceCredential", "struct", "and", "decode", "it", "into", "its", "constituent", "parts", "." ]
python
test
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/network_utils.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/network_utils.py#L71-L90
def _CreateInterfaceMapNetifaces(self): """Generate a dictionary mapping MAC address to Ethernet interfaces. Returns: dict, string MAC addresses mapped to the string network interface name. """ interfaces = {} for interface in netifaces.interfaces(): af_link = netifaces.ifaddresses(interface).get(netifaces.AF_LINK, []) mac_address = next(iter(af_link), {}).get('addr', '') # In some systems this field can come with an empty string or with the # name of the interface when there is no MAC address associated with it. # Check the regex to be sure. if MAC_REGEX.match(mac_address): interfaces[mac_address] = interface else: message = 'Unable to determine MAC address for %s.' self.logger.warning(message, interface) return interfaces
[ "def", "_CreateInterfaceMapNetifaces", "(", "self", ")", ":", "interfaces", "=", "{", "}", "for", "interface", "in", "netifaces", ".", "interfaces", "(", ")", ":", "af_link", "=", "netifaces", ".", "ifaddresses", "(", "interface", ")", ".", "get", "(", "ne...
Generate a dictionary mapping MAC address to Ethernet interfaces. Returns: dict, string MAC addresses mapped to the string network interface name.
[ "Generate", "a", "dictionary", "mapping", "MAC", "address", "to", "Ethernet", "interfaces", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/gridfs/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/gridfs/__init__.py#L203-L224
def delete(self, file_id): """Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: `file_id`. .. warning:: Any processes/threads reading from the file while this method is executing will likely see an invalid/corrupt file. Care should be taken to avoid concurrent reads to a file while it is being deleted. .. note:: Deletes of non-existent files are considered successful since the end result is the same: no file with that _id remains. :Parameters: - `file_id`: ``"_id"`` of the file to delete .. versionchanged:: 3.1 ``delete`` no longer ensures indexes. """ self.__files.delete_one({"_id": file_id}) self.__chunks.delete_many({"files_id": file_id})
[ "def", "delete", "(", "self", ",", "file_id", ")", ":", "self", ".", "__files", ".", "delete_one", "(", "{", "\"_id\"", ":", "file_id", "}", ")", "self", ".", "__chunks", ".", "delete_many", "(", "{", "\"files_id\"", ":", "file_id", "}", ")" ]
Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: `file_id`. .. warning:: Any processes/threads reading from the file while this method is executing will likely see an invalid/corrupt file. Care should be taken to avoid concurrent reads to a file while it is being deleted. .. note:: Deletes of non-existent files are considered successful since the end result is the same: no file with that _id remains. :Parameters: - `file_id`: ``"_id"`` of the file to delete .. versionchanged:: 3.1 ``delete`` no longer ensures indexes.
[ "Delete", "a", "file", "from", "GridFS", "by", "_id", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/rl/dqn.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/rl/dqn.py#L113-L124
def soft_target_update(self): """ Soft update model parameters: .. math:: \\theta_target = \\tau \\times \\theta_local + (1 - \\tau) \\times \\theta_target , with \\tau \\ll 1 See https://arxiv.org/pdf/1509.02971.pdf """ for target_param, local_param in zip(self.target.parameters(), self.local.parameters()): target_param.data.copy_(self.tau * local_param.data + (1.0 - self.tau) * target_param.data)
[ "def", "soft_target_update", "(", "self", ")", ":", "for", "target_param", ",", "local_param", "in", "zip", "(", "self", ".", "target", ".", "parameters", "(", ")", ",", "self", ".", "local", ".", "parameters", "(", ")", ")", ":", "target_param", ".", ...
Soft update model parameters: .. math:: \\theta_target = \\tau \\times \\theta_local + (1 - \\tau) \\times \\theta_target , with \\tau \\ll 1 See https://arxiv.org/pdf/1509.02971.pdf
[ "Soft", "update", "model", "parameters", ":" ]
python
train
snare/scruffy
scruffy/file.py
https://github.com/snare/scruffy/blob/0fedc08cfdb6db927ff93c09f25f24ce5a04c541/scruffy/file.py#L288-L296
def apply_config(self, applicator): """ Replace any config tokens with values from the config. """ if type(self._path) == str: self._path = applicator.apply(self._path) for key in self._children: self._children[key].apply_config(applicator)
[ "def", "apply_config", "(", "self", ",", "applicator", ")", ":", "if", "type", "(", "self", ".", "_path", ")", "==", "str", ":", "self", ".", "_path", "=", "applicator", ".", "apply", "(", "self", ".", "_path", ")", "for", "key", "in", "self", ".",...
Replace any config tokens with values from the config.
[ "Replace", "any", "config", "tokens", "with", "values", "from", "the", "config", "." ]
python
test
openstack/python-monascaclient
monascaclient/v2_0/alarm_definitions.py
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/alarm_definitions.py#L49-L58
def update(self, **kwargs): """Update a specific alarm definition.""" url_str = self.base_url + '/%s' % kwargs['alarm_id'] del kwargs['alarm_id'] resp = self.client.create(url=url_str, method='PUT', json=kwargs) return resp
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "url_str", "=", "self", ".", "base_url", "+", "'/%s'", "%", "kwargs", "[", "'alarm_id'", "]", "del", "kwargs", "[", "'alarm_id'", "]", "resp", "=", "self", ".", "client", ".", "create",...
Update a specific alarm definition.
[ "Update", "a", "specific", "alarm", "definition", "." ]
python
train
neurosnap/mudicom
mudicom/lookup.py
https://github.com/neurosnap/mudicom/blob/04011967007409f0c5253b4f308f53a7b0fc99c6/mudicom/lookup.py#L8-L54
def VR(VR=None, description=None): """ Value Representation (VR) <-> Description lookup. :param VR: Takes the VR and returns its description :param description: Take the description of a VR and returns the VR """ value_repr = { "AE": "Application Entity", "AS": "Age String", "AT": "Attribute Tag", "CS": "Code String", "DA": "Date", "DS": "Decimal String", "DT": "Date/Time", "FL": "Floating Point Single (4 bytes)", "FD": "Floating Point Double (8 bytes)", "IS": "Integer String", "LO": "Long String", "LT": "Long Text", "OB": "Other Byte", "OF": "Other Float", "OW": "Other Word", "PN": "Person Name", "SH": "Short String", "SL": "Signed Long", "SQ": "Sequence of Items", "SS": "Signed Short", "ST": "Short Text", "TM": "Time", "UI": "Unique Identifier", "UL": "Unsigned Long", "UN": "Unknown", "US": "Unsigned Short", "UT": "Unlimited Text" } assert VR or description, "Either VR or description required to map VR" if VR is not None: VR = VR.upper() if VR in value_repr: return value_repr[VR] for key, value in value_repr.iteritems(): if description == value: return key return None
[ "def", "VR", "(", "VR", "=", "None", ",", "description", "=", "None", ")", ":", "value_repr", "=", "{", "\"AE\"", ":", "\"Application Entity\"", ",", "\"AS\"", ":", "\"Age String\"", ",", "\"AT\"", ":", "\"Attribute Tag\"", ",", "\"CS\"", ":", "\"Code String...
Value Representation (VR) <-> Description lookup. :param VR: Takes the VR and returns its description :param description: Take the description of a VR and returns the VR
[ "Value", "Representation", "(", "VR", ")", "<", "-", ">", "Description", "lookup", "." ]
python
train
sunjinopensource/asynmsg
asynmsg.py
https://github.com/sunjinopensource/asynmsg/blob/9c1d14f859cc6702446c3bb30b9916280429bd1d/asynmsg.py#L114-L141
def run_once(runner_list=None, extra_tick=Sleep(0.001), use_poll=False, auto_stop=True): """ :param auto_stop when tick error occur, stop all runners, except: if error was from a runner tick and the runner has set 'only_stop_self_when_tick_error' to True, then only this runner stop """ if runner_list is None: runner_list = _runner_list code = _run_once(runner_list, extra_tick, use_poll) if code is True: # no error return True elif code is False: # extra tick error if auto_stop: for runner in list(runner_list): runner.stop() return False else: # runner tick error if hasattr(code, 'only_stop_self_when_tick_error') and code.only_stop_self_when_tick_error: if auto_stop: code.stop() return True else: if auto_stop: for runner in list(runner_list): runner.stop() return False return True
[ "def", "run_once", "(", "runner_list", "=", "None", ",", "extra_tick", "=", "Sleep", "(", "0.001", ")", ",", "use_poll", "=", "False", ",", "auto_stop", "=", "True", ")", ":", "if", "runner_list", "is", "None", ":", "runner_list", "=", "_runner_list", "c...
:param auto_stop when tick error occur, stop all runners, except: if error was from a runner tick and the runner has set 'only_stop_self_when_tick_error' to True, then only this runner stop
[ ":", "param", "auto_stop", "when", "tick", "error", "occur", "stop", "all", "runners", "except", ":", "if", "error", "was", "from", "a", "runner", "tick", "and", "the", "runner", "has", "set", "only_stop_self_when_tick_error", "to", "True", "then", "only", "...
python
train
saltstack/salt
salt/modules/boto_cognitoidentity.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_cognitoidentity.py#L179-L218
def create_identity_pool(IdentityPoolName, AllowUnauthenticatedIdentities=False, SupportedLoginProviders=None, DeveloperProviderName=None, OpenIdConnectProviderARNs=None, region=None, key=None, keyid=None, profile=None): ''' Creates a new identity pool. All parameters except for IdentityPoolName is optional. SupportedLoginProviders should be a dictionary mapping provider names to provider app IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider ARNs. Returns the created identity pool if successful CLI Example: .. code-block:: bash salt myminion boto_cognitoidentity.create_identity_pool my_id_pool_name \ DeveloperProviderName=custom_developer_provider ''' SupportedLoginProviders = dict() if SupportedLoginProviders is None else SupportedLoginProviders OpenIdConnectProviderARNs = list() if OpenIdConnectProviderARNs is None else OpenIdConnectProviderARNs conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: request_params = dict(IdentityPoolName=IdentityPoolName, AllowUnauthenticatedIdentities=AllowUnauthenticatedIdentities, SupportedLoginProviders=SupportedLoginProviders, OpenIdConnectProviderARNs=OpenIdConnectProviderARNs) if DeveloperProviderName: request_params['DeveloperProviderName'] = DeveloperProviderName response = conn.create_identity_pool(**request_params) response.pop('ResponseMetadata', None) return {'created': True, 'identity_pool': response} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "create_identity_pool", "(", "IdentityPoolName", ",", "AllowUnauthenticatedIdentities", "=", "False", ",", "SupportedLoginProviders", "=", "None", ",", "DeveloperProviderName", "=", "None", ",", "OpenIdConnectProviderARNs", "=", "None", ",", "region", "=", "None"...
Creates a new identity pool. All parameters except for IdentityPoolName is optional. SupportedLoginProviders should be a dictionary mapping provider names to provider app IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider ARNs. Returns the created identity pool if successful CLI Example: .. code-block:: bash salt myminion boto_cognitoidentity.create_identity_pool my_id_pool_name \ DeveloperProviderName=custom_developer_provider
[ "Creates", "a", "new", "identity", "pool", ".", "All", "parameters", "except", "for", "IdentityPoolName", "is", "optional", ".", "SupportedLoginProviders", "should", "be", "a", "dictionary", "mapping", "provider", "names", "to", "provider", "app", "IDs", ".", "O...
python
train
influxdata/influxdb-python
influxdb/line_protocol.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/line_protocol.py#L119-L172
def make_lines(data, precision=None): """Extract points from given dict. Extracts the points from the given dict and returns a Unicode string matching the line protocol introduced in InfluxDB 0.9.0. """ lines = [] static_tags = data.get('tags') for point in data['points']: elements = [] # add measurement name measurement = _escape_tag(_get_unicode( point.get('measurement', data.get('measurement')))) key_values = [measurement] # add tags if static_tags: tags = dict(static_tags) # make a copy, since we'll modify tags.update(point.get('tags') or {}) else: tags = point.get('tags') or {} # tags should be sorted client-side to take load off server for tag_key, tag_value in sorted(iteritems(tags)): key = _escape_tag(tag_key) value = _escape_tag_value(tag_value) if key != '' and value != '': key_values.append(key + "=" + value) elements.append(','.join(key_values)) # add fields field_values = [] for field_key, field_value in sorted(iteritems(point['fields'])): key = _escape_tag(field_key) value = _escape_value(field_value) if key != '' and value != '': field_values.append(key + "=" + value) elements.append(','.join(field_values)) # add timestamp if 'time' in point: timestamp = _get_unicode(str(int( _convert_timestamp(point['time'], precision)))) elements.append(timestamp) line = ' '.join(elements) lines.append(line) return '\n'.join(lines) + '\n'
[ "def", "make_lines", "(", "data", ",", "precision", "=", "None", ")", ":", "lines", "=", "[", "]", "static_tags", "=", "data", ".", "get", "(", "'tags'", ")", "for", "point", "in", "data", "[", "'points'", "]", ":", "elements", "=", "[", "]", "# ad...
Extract points from given dict. Extracts the points from the given dict and returns a Unicode string matching the line protocol introduced in InfluxDB 0.9.0.
[ "Extract", "points", "from", "given", "dict", "." ]
python
train
portfoliome/postpy
postpy/ddl.py
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/ddl.py#L39-L46
def compile_column(name: str, data_type: str, nullable: bool) -> str: """Create column definition statement.""" null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},'.format(name=name, data_type=data_type, null=null_str)
[ "def", "compile_column", "(", "name", ":", "str", ",", "data_type", ":", "str", ",", "nullable", ":", "bool", ")", "->", "str", ":", "null_str", "=", "'NULL'", "if", "nullable", "else", "'NOT NULL'", "return", "'{name} {data_type} {null},'", ".", "format", "...
Create column definition statement.
[ "Create", "column", "definition", "statement", "." ]
python
train
manahl/arctic
arctic/store/version_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/version_store.py#L420-L440
def get_arctic_version(self, symbol, as_of=None): """ Return the numerical representation of the arctic version used to write the last (or as_of) version for the given symbol. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time Returns ------- arctic_version : int The numerical representation of Arctic version, used to create the specified symbol version """ return self._read_metadata(symbol, as_of=as_of).get('arctic_version', 0)
[ "def", "get_arctic_version", "(", "self", ",", "symbol", ",", "as_of", "=", "None", ")", ":", "return", "self", ".", "_read_metadata", "(", "symbol", ",", "as_of", "=", "as_of", ")", ".", "get", "(", "'arctic_version'", ",", "0", ")" ]
Return the numerical representation of the arctic version used to write the last (or as_of) version for the given symbol. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time Returns ------- arctic_version : int The numerical representation of Arctic version, used to create the specified symbol version
[ "Return", "the", "numerical", "representation", "of", "the", "arctic", "version", "used", "to", "write", "the", "last", "(", "or", "as_of", ")", "version", "for", "the", "given", "symbol", "." ]
python
train
lambdamusic/Ontospy
ontospy/core/ontospy.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L594-L631
def __buildDomainRanges(self, aProp): """ extract domain/range details and add to Python objects """ domains = chain(aProp.rdflib_graph.objects( None, rdflib.term.URIRef(u'http://schema.org/domainIncludes')), aProp.rdflib_graph.objects( None, rdflib.RDFS.domain)) ranges = chain(aProp.rdflib_graph.objects( None, rdflib.term.URIRef(u'http://schema.org/rangeIncludes')), aProp.rdflib_graph.objects( None, rdflib.RDFS.range)) for x in domains: if isBlankNode(x): aProp.domains += [RDF_Entity(x, None, self.namespaces, is_Bnode=True)] else: aClass = self.get_class(uri=str(x)) if aClass: aProp.domains += [aClass] aClass.domain_of += [aProp] else: # edge case: it's not an OntoClass instance aProp.domains += [OntoClass(x, None, self.namespaces, ext_model=True)] for x in ranges: if isBlankNode(x): aProp.domains += [RDF_Entity(x, None, self.namespaces, is_Bnode=True)] else: aClass = self.get_class(uri=str(x)) if aClass: aProp.ranges += [aClass] aClass.range_of += [aProp] else: # eg a DataType property has xsd:STRING # here we're storing an ontospy entities but not adding it to # the main index aProp.ranges += [OntoClass(x, None, self.namespaces, ext_model=True)]
[ "def", "__buildDomainRanges", "(", "self", ",", "aProp", ")", ":", "domains", "=", "chain", "(", "aProp", ".", "rdflib_graph", ".", "objects", "(", "None", ",", "rdflib", ".", "term", ".", "URIRef", "(", "u'http://schema.org/domainIncludes'", ")", ")", ",", ...
extract domain/range details and add to Python objects
[ "extract", "domain", "/", "range", "details", "and", "add", "to", "Python", "objects" ]
python
train
Varkal/chuda
chuda/shell.py
https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/shell.py#L224-L243
def print_live_output(self): ''' Block and print the output of the command Raises: TypeError: If command is blocking ''' if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) else: while self.thread.is_alive() or self.old_output_size < len(self.output) or self.old_error_size < len(self.error): if self._stdout is not None and len(self.output) > self.old_output_size: while self.old_output_size < len(self.output): self.logger.info(self.output[self.old_output_size]) self.old_output_size += 1 if self._stderr is not None and len(self.error) > self.old_error_size: while self.old_error_size < len(self.error): self.logger.error(self.error[self.old_error_size]) self.old_error_size += 1
[ "def", "print_live_output", "(", "self", ")", ":", "if", "self", ".", "block", ":", "raise", "TypeError", "(", "NON_BLOCKING_ERROR_MESSAGE", ")", "else", ":", "while", "self", ".", "thread", ".", "is_alive", "(", ")", "or", "self", ".", "old_output_size", ...
Block and print the output of the command Raises: TypeError: If command is blocking
[ "Block", "and", "print", "the", "output", "of", "the", "command" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/elasticity/elastic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L890-L913
def find_eq_stress(strains, stresses, tol=1e-10): """ Finds stress corresponding to zero strain state in stress-strain list Args: strains (Nx3x3 array-like): array corresponding to strains stresses (Nx3x3 array-like): array corresponding to stresses tol (float): tolerance to find zero strain state """ stress_array = np.array(stresses) strain_array = np.array(strains) eq_stress = stress_array[np.all(abs(strain_array)<tol, axis=(1,2))] if eq_stress.size != 0: all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all() if len(eq_stress) > 1 and not all_same: raise ValueError("Multiple stresses found for equilibrium strain" " state, please specify equilibrium stress or " " remove extraneous stresses.") eq_stress = eq_stress[0] else: warnings.warn("No eq state found, returning zero voigt stress") eq_stress = Stress(np.zeros((3, 3))) return eq_stress
[ "def", "find_eq_stress", "(", "strains", ",", "stresses", ",", "tol", "=", "1e-10", ")", ":", "stress_array", "=", "np", ".", "array", "(", "stresses", ")", "strain_array", "=", "np", ".", "array", "(", "strains", ")", "eq_stress", "=", "stress_array", "...
Finds stress corresponding to zero strain state in stress-strain list Args: strains (Nx3x3 array-like): array corresponding to strains stresses (Nx3x3 array-like): array corresponding to stresses tol (float): tolerance to find zero strain state
[ "Finds", "stress", "corresponding", "to", "zero", "strain", "state", "in", "stress", "-", "strain", "list" ]
python
train
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L316-L330
def channelModeModify(VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): """CHANNEL MODE MODIFY Section 9.1.5""" a = TpPd(pd=0x6) b = MessageType(mesType=0x8) # 0001000 c = ChannelDescription2() d = ChannelMode() packet = a / b / c / d if VgcsTargetModeIdentication is 1: e = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / e if MultiRateConfiguration is 1: f = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / f return packet
[ "def", "channelModeModify", "(", "VgcsTargetModeIdentication_presence", "=", "0", ",", "MultiRateConfiguration_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x6", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x8", ")", "# 0001000", ...
CHANNEL MODE MODIFY Section 9.1.5
[ "CHANNEL", "MODE", "MODIFY", "Section", "9", ".", "1", ".", "5" ]
python
train
NLeSC/noodles
noodles/run/threading/sqlite3.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/run/threading/sqlite3.py#L20-L47
def pass_job(db: JobDB, result_queue: Queue, always_cache=False): """Create a pull stream that receives jobs and passes them on to the database. If the job already has a result, that result is pushed onto the `result_queue`. """ @pull def pass_job_stream(job_source): """Pull stream instance created by `pass_job`.""" result_sink = result_queue.sink() for message in job_source(): if message is EndOfQueue: return key, job = message if always_cache or ('store' in job.hints): status, retrieved_result = db.add_job_to_db(key, job) if status == 'retrieved': result_sink.send(retrieved_result) continue elif status == 'attached': continue yield message return pass_job_stream
[ "def", "pass_job", "(", "db", ":", "JobDB", ",", "result_queue", ":", "Queue", ",", "always_cache", "=", "False", ")", ":", "@", "pull", "def", "pass_job_stream", "(", "job_source", ")", ":", "\"\"\"Pull stream instance created by `pass_job`.\"\"\"", "result_sink", ...
Create a pull stream that receives jobs and passes them on to the database. If the job already has a result, that result is pushed onto the `result_queue`.
[ "Create", "a", "pull", "stream", "that", "receives", "jobs", "and", "passes", "them", "on", "to", "the", "database", ".", "If", "the", "job", "already", "has", "a", "result", "that", "result", "is", "pushed", "onto", "the", "result_queue", "." ]
python
train