repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
openai/universe
universe/vncdriver/vendor/pydes.py
https://github.com/openai/universe/blob/cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c/universe/vncdriver/vendor/pydes.py#L739-L743
def setMode(self, mode): """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" _baseDes.setMode(self, mode) for key in (self.__key1, self.__key2, self.__key3): key.setMode(mode)
[ "def", "setMode", "(", "self", ",", "mode", ")", ":", "_baseDes", ".", "setMode", "(", "self", ",", "mode", ")", "for", "key", "in", "(", "self", ".", "__key1", ",", "self", ".", "__key2", ",", "self", ".", "__key3", ")", ":", "key", ".", "setMod...
Sets the type of crypting mode, pyDes.ECB or pyDes.CBC
[ "Sets", "the", "type", "of", "crypting", "mode", "pyDes", ".", "ECB", "or", "pyDes", ".", "CBC" ]
python
train
awslabs/sockeye
sockeye/encoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L285-L298
def encode(self, data: mx.sym.Symbol, data_length: Optional[mx.sym.Symbol], seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]: """ Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len). """ with mx.AttrScope(__layout__=self.target_layout): return mx.sym.swapaxes(data=data, dim1=0, dim2=1), data_length, seq_len
[ "def", "encode", "(", "self", ",", "data", ":", "mx", ".", "sym", ".", "Symbol", ",", "data_length", ":", "Optional", "[", "mx", ".", "sym", ".", "Symbol", "]", ",", "seq_len", ":", "int", ")", "->", "Tuple", "[", "mx", ".", "sym", ".", "Symbol",...
Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len).
[ "Encodes", "data", "given", "sequence", "lengths", "of", "individual", "examples", "and", "maximum", "sequence", "length", "." ]
python
train
cgtobi/PyRMVtransport
RMVtransport/rmvjourney.py
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L93-L101
def _pass_list(self) -> List[Dict[str, Any]]: """Extract next stops along the journey.""" stops: List[Dict[str, Any]] = [] for stop in self.journey.PassList.BasicStop: index = stop.get("index") station = stop.Location.Station.HafasName.Text.text station_id = stop.Location.Station.ExternalId.text stops.append({"index": index, "stationId": station_id, "station": station}) return stops
[ "def", "_pass_list", "(", "self", ")", "->", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "stops", ":", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", "=", "[", "]", "for", "stop", "in", "self", ".", "journey", ".", "Pa...
Extract next stops along the journey.
[ "Extract", "next", "stops", "along", "the", "journey", "." ]
python
train
josiahcarlson/rom
rom/util.py
https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/util.py#L278-L298
def SIMPLE(val): ''' This is a basic case-sensitive "sorted order" index keygen function for strings. This will return a value that is suitable to be used for ordering by a 7-byte prefix of a string (that is 7 characters from a byte-string, and 1.75-7 characters from a unicode string, depending on character -> encoding length). .. warning:: Case sensitivity is based on the (encoded) byte prefixes of the strings/text being indexed, so ordering *may be different* than a native comparison ordering (especially if an order is different based on characters past the 7th encoded byte). ''' if not val: return None if not isinstance(val, six.string_types): if six.PY3 and isinstance(val, bytes): val = val.decode('latin-1') else: val = str(val) return {'': _prefix_score(val)}
[ "def", "SIMPLE", "(", "val", ")", ":", "if", "not", "val", ":", "return", "None", "if", "not", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "if", "six", ".", "PY3", "and", "isinstance", "(", "val", ",", "bytes", ")", ":", ...
This is a basic case-sensitive "sorted order" index keygen function for strings. This will return a value that is suitable to be used for ordering by a 7-byte prefix of a string (that is 7 characters from a byte-string, and 1.75-7 characters from a unicode string, depending on character -> encoding length). .. warning:: Case sensitivity is based on the (encoded) byte prefixes of the strings/text being indexed, so ordering *may be different* than a native comparison ordering (especially if an order is different based on characters past the 7th encoded byte).
[ "This", "is", "a", "basic", "case", "-", "sensitive", "sorted", "order", "index", "keygen", "function", "for", "strings", ".", "This", "will", "return", "a", "value", "that", "is", "suitable", "to", "be", "used", "for", "ordering", "by", "a", "7", "-", ...
python
test
PmagPy/PmagPy
dialogs/demag_interpretation_editor.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/demag_interpretation_editor.py#L426-L531
def update_logger_entry(self,i): """ helper function that given a index in this objects fit_list parameter inserts a entry at that index @param: i -> index in fit_list to find the (specimen_name,fit object) tup that determines all the data for this logger entry. """ if i < len(self.fit_list): tup = self.fit_list[i] elif i < self.logger.GetItemCount(): self.logger.DeleteItem(i) return else: return coordinate_system = self.parent.COORDINATE_SYSTEM fit = tup[0] pars = fit.get(coordinate_system) fmin,fmax,n,ftype,dec,inc,mad,dang,a95,sk,sr2 = "","","","","","","","","","","" specimen = tup[1] if coordinate_system=='geographic': block_key = 'zijdblock_geo' elif coordinate_system=='tilt-corrected': block_key = 'zijdblock_tilt' else: block_key = 'zijdblock' name = fit.name if pars == {} and self.parent.Data[specimen][block_key] != []: fit.put(specimen, coordinate_system, self.parent.get_PCA_parameters(specimen,fit,fit.tmin,fit.tmax,coordinate_system,fit.PCA_type)) pars = fit.get(coordinate_system) if self.parent.Data[specimen][block_key]==[]: spars = fit.get('specimen') fmin = fit.tmin fmax = fit.tmax if 'specimen_n' in list(spars.keys()): n = str(spars['specimen_n']) else: n = 'No Data' if 'calculation_type' in list(spars.keys()): ftype = spars['calculation_type'] else: ftype = 'No Data' dec = 'No Data' inc = 'No Data' mad = 'No Data' dang = 'No Data' a95 = 'No Data' sk = 'No Data' sr2 = 'No Data' else: if 'measurement_step_min' in list(pars.keys()): fmin = str(fit.tmin) else: fmin = "N/A" if 'measurement_step_max' in list(pars.keys()): fmax = str(fit.tmax) else: fmax = "N/A" if 'specimen_n' in list(pars.keys()): n = str(pars['specimen_n']) else: n = "N/A" if 'calculation_type' in list(pars.keys()): ftype = pars['calculation_type'] else: ftype = "N/A" if 'specimen_dec' in list(pars.keys()): dec = "%.1f"%pars['specimen_dec'] else: dec = "N/A" if 'specimen_inc' in list(pars.keys()): inc = "%.1f"%pars['specimen_inc'] else: inc = "N/A" if 'specimen_mad' in list(pars.keys()): mad = "%.1f"%pars['specimen_mad'] else: mad = "N/A" if 'specimen_dang' in list(pars.keys()): dang = "%.1f"%pars['specimen_dang'] else: dang = "N/A" if 'specimen_alpha95' in list(pars.keys()): a95 = "%.1f"%pars['specimen_alpha95'] else: a95 = "N/A" if 'specimen_k' in list(pars.keys()): sk = "%.1f"%pars['specimen_k'] else: sk = "N/A" if 'specimen_r' in list(pars.keys()): sr2 = "%.1f"%pars['specimen_r'] else: sr2 = "N/A" if self.search_query != "": entry = (specimen+name+fmin+fmax+n+ftype+dec+inc+mad+dang+a95+sk+sr2).replace(" ","").lower() if self.search_query not in entry: self.fit_list.pop(i) if i < self.logger.GetItemCount(): self.logger.DeleteItem(i) return "s" for e in (specimen,name,fmin,fmax,n,ftype,dec,inc,mad,dang,a95,sk,sr2): if e not in self.search_choices: self.search_choices.append(e) if i < self.logger.GetItemCount(): self.logger.DeleteItem(i) self.logger.InsertItem(i, str(specimen)) self.logger.SetItem(i, 1, name) self.logger.SetItem(i, 2, fmin) self.logger.SetItem(i, 3, fmax) self.logger.SetItem(i, 4, n) self.logger.SetItem(i, 5, ftype) self.logger.SetItem(i, 6, dec) self.logger.SetItem(i, 7, inc) self.logger.SetItem(i, 8, mad) self.logger.SetItem(i, 9, dang) self.logger.SetItem(i, 10, a95) self.logger.SetItem(i, 11, sk) self.logger.SetItem(i, 12, sr2) self.logger.SetItemBackgroundColour(i,"WHITE") a,b = False,False if fit in self.parent.bad_fits: self.logger.SetItemBackgroundColour(i,"red") b = True if self.parent.current_fit == fit: self.logger.SetItemBackgroundColour(i,"LIGHT BLUE") self.logger_focus(i) self.current_fit_index = i a = True if a and b: self.logger.SetItemBackgroundColour(i,"red")
[ "def", "update_logger_entry", "(", "self", ",", "i", ")", ":", "if", "i", "<", "len", "(", "self", ".", "fit_list", ")", ":", "tup", "=", "self", ".", "fit_list", "[", "i", "]", "elif", "i", "<", "self", ".", "logger", ".", "GetItemCount", "(", "...
helper function that given a index in this objects fit_list parameter inserts a entry at that index @param: i -> index in fit_list to find the (specimen_name,fit object) tup that determines all the data for this logger entry.
[ "helper", "function", "that", "given", "a", "index", "in", "this", "objects", "fit_list", "parameter", "inserts", "a", "entry", "at", "that", "index" ]
python
train
Falkonry/falkonry-python-client
falkonryclient/service/http.py
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/http.py#L38-L60
def get(self, url): """ To make a GET request to Falkonry API server :param url: string """ response = requests.get( self.host + url, headers={ 'Authorization': 'Bearer ' + self.token, 'x-falkonry-source':self.sourceHeader }, verify=False ) if response.status_code == 200: try: return json.loads(response._content.decode('utf-8')) except Exception as error: return response.content elif response.status_code == 401: raise Exception(json.dumps({'message':'Unauthorized Access'})) else: raise Exception(response.content)
[ "def", "get", "(", "self", ",", "url", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "host", "+", "url", ",", "headers", "=", "{", "'Authorization'", ":", "'Bearer '", "+", "self", ".", "token", ",", "'x-falkonry-source'", ":", ...
To make a GET request to Falkonry API server :param url: string
[ "To", "make", "a", "GET", "request", "to", "Falkonry", "API", "server", ":", "param", "url", ":", "string" ]
python
train
bcbio/bcbio-nextgen
bcbio/srna/group.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L24-L56
def run_prepare(*data): """ Run seqcluster prepare to merge all samples in one file """ out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare") out_dir = os.path.abspath(safe_makedir(out_dir)) prepare_dir = os.path.join(out_dir, "prepare") tools = dd.get_expression_caller(data[0][0]) if len(tools) == 0: logger.info("You didn't specify any other expression caller tool." "You can add to the YAML file:" "expression_caller:[trna, seqcluster, mirdeep2]") fn = [] for sample in data: name = sample[0]["rgnames"]['sample'] fn.append("%s\t%s" % (sample[0]['collapse'], name)) args = namedtuple('args', 'debug print_debug minc minl maxl out') args = args(False, False, 2, 17, 40, out_dir) ma_out = op.join(out_dir, "seqs.ma") seq_out = op.join(out_dir, "seqs.fastq") min_shared = max(int(len(fn) / 10.0), 1) if not file_exists(ma_out): seq_l, sample_l = prepare._read_fastq_files(fn, args) with file_transaction(ma_out) as ma_tx: with open(ma_tx, 'w') as ma_handle: with open(seq_out, 'w') as seq_handle: logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1") prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared) for sample in data: sample[0]["seqcluster_prepare_ma"] = ma_out sample[0]["seqcluster_prepare_fastq"] = seq_out return data
[ "def", "run_prepare", "(", "*", "data", ")", ":", "out_dir", "=", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", "[", "0", "]", "[", "0", "]", ")", ",", "\"seqcluster\"", ",", "\"prepare\"", ")", "out_dir", "=", "os", ...
Run seqcluster prepare to merge all samples in one file
[ "Run", "seqcluster", "prepare", "to", "merge", "all", "samples", "in", "one", "file" ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L2662-L2692
def _parse_tokens(tokens): """Parse the tokens. This converts the tokens into a form where we can manipulate them more easily. """ index = 0 parsed_tokens = [] num_tokens = len(tokens) while index < num_tokens: tok = Token(*tokens[index]) assert tok.token_type != token.INDENT if tok.token_type == tokenize.NEWLINE: # There's only one newline and it's at the end. break if tok.token_string in '([{': (container, index) = _parse_container(tokens, index) if not container: return None parsed_tokens.append(container) else: parsed_tokens.append(Atom(tok)) index += 1 return parsed_tokens
[ "def", "_parse_tokens", "(", "tokens", ")", ":", "index", "=", "0", "parsed_tokens", "=", "[", "]", "num_tokens", "=", "len", "(", "tokens", ")", "while", "index", "<", "num_tokens", ":", "tok", "=", "Token", "(", "*", "tokens", "[", "index", "]", ")...
Parse the tokens. This converts the tokens into a form where we can manipulate them more easily.
[ "Parse", "the", "tokens", "." ]
python
train
cebel/pyuniprot
src/pyuniprot/cli.py
https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/cli.py#L57-L72
def update(taxids, conn, force_download, silent): """Update local UniProt database""" if not silent: click.secho("WARNING: Update is very time consuming and can take several " "hours depending which organisms you are importing!", fg="yellow") if not taxids: click.echo("Please note that you can restrict import to organisms by " "NCBI taxonomy IDs") click.echo("Example (human, mouse, rat):\n") click.secho("\tpyuniprot update --taxids 9606,10090,10116\n\n", fg="green") if taxids: taxids = [int(taxid.strip()) for taxid in taxids.strip().split(',') if re.search('^ *\d+ *$', taxid)] database.update(taxids=taxids, connection=conn, force_download=force_download, silent=silent)
[ "def", "update", "(", "taxids", ",", "conn", ",", "force_download", ",", "silent", ")", ":", "if", "not", "silent", ":", "click", ".", "secho", "(", "\"WARNING: Update is very time consuming and can take several \"", "\"hours depending which organisms you are importing!\"",...
Update local UniProt database
[ "Update", "local", "UniProt", "database" ]
python
train
edx/edx-enterprise
enterprise/migrations/0067_add_role_based_access_control_switch.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/migrations/0067_add_role_based_access_control_switch.py#L15-L18
def delete_switch(apps, schema_editor): """Delete the `role_based_access_control` switch.""" Switch = apps.get_model('waffle', 'Switch') Switch.objects.filter(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH).delete()
[ "def", "delete_switch", "(", "apps", ",", "schema_editor", ")", ":", "Switch", "=", "apps", ".", "get_model", "(", "'waffle'", ",", "'Switch'", ")", "Switch", ".", "objects", ".", "filter", "(", "name", "=", "ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH", ")", ...
Delete the `role_based_access_control` switch.
[ "Delete", "the", "role_based_access_control", "switch", "." ]
python
valid
miquelo/resort
packages/resort/component/glassfish.py
https://github.com/miquelo/resort/blob/097a25d3257c91a75c194fd44c2797ab356f85dd/packages/resort/component/glassfish.py#L276-L293
def custom_resource(self, name, restype, factclass, props): """ Domain custom resource. :param str name: Resource name. :param str restype: Resource type. :param str factclass: Resource factory class. :param dict props: Resource properties. :rtype: CustomResource """ return CustomResource(self.__endpoint, name, restype, factclass, props)
[ "def", "custom_resource", "(", "self", ",", "name", ",", "restype", ",", "factclass", ",", "props", ")", ":", "return", "CustomResource", "(", "self", ".", "__endpoint", ",", "name", ",", "restype", ",", "factclass", ",", "props", ")" ]
Domain custom resource. :param str name: Resource name. :param str restype: Resource type. :param str factclass: Resource factory class. :param dict props: Resource properties. :rtype: CustomResource
[ "Domain", "custom", "resource", ".", ":", "param", "str", "name", ":", "Resource", "name", ".", ":", "param", "str", "restype", ":", "Resource", "type", ".", ":", "param", "str", "factclass", ":", "Resource", "factory", "class", ".", ":", "param", "dict"...
python
train
stephrdev/django-formwizard
formwizard/views.py
https://github.com/stephrdev/django-formwizard/blob/7b35165f0340aae4e8302d5b05b0cb443f6c9904/formwizard/views.py#L363-L388
def get_form(self, step=None, data=None, files=None): """ Constructs the form for a given `step`. If no `step` is defined, the current step will be determined automatically. The form will be initialized using the `data` argument to prefill the new form. If needed, instance or queryset (for `ModelForm` or `ModelFormSet`) will be added too. """ if step is None: step = self.steps.current # prepare the kwargs for the form instance. kwargs = self.get_form_kwargs(step) kwargs.update({ 'data': data, 'files': files, 'prefix': self.get_form_prefix(step, self.form_list[step]), 'initial': self.get_form_initial(step), }) if issubclass(self.form_list[step], forms.ModelForm): # If the form is based on ModelForm, add instance if available. kwargs.update({'instance': self.get_form_instance(step)}) elif issubclass(self.form_list[step], forms.models.BaseModelFormSet): # If the form is based on ModelFormSet, add queryset if available. kwargs.update({'queryset': self.get_form_instance(step)}) return self.form_list[step](**kwargs)
[ "def", "get_form", "(", "self", ",", "step", "=", "None", ",", "data", "=", "None", ",", "files", "=", "None", ")", ":", "if", "step", "is", "None", ":", "step", "=", "self", ".", "steps", ".", "current", "# prepare the kwargs for the form instance.", "k...
Constructs the form for a given `step`. If no `step` is defined, the current step will be determined automatically. The form will be initialized using the `data` argument to prefill the new form. If needed, instance or queryset (for `ModelForm` or `ModelFormSet`) will be added too.
[ "Constructs", "the", "form", "for", "a", "given", "step", ".", "If", "no", "step", "is", "defined", "the", "current", "step", "will", "be", "determined", "automatically", "." ]
python
train
MacHu-GWU/rolex-project
rolex/util.py
https://github.com/MacHu-GWU/rolex-project/blob/a1111b410ed04b4b6eddd81df110fa2dacfa6537/rolex/util.py#L74-L91
def to_utc(a_datetime, keep_utc_tzinfo=False): """ Convert a time awared datetime to utc datetime. :param a_datetime: a timezone awared datetime. (If not, then just returns) :param keep_utc_tzinfo: whether to retain the utc time zone information. **中文文档** 将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。 """ if a_datetime.tzinfo: utc_datetime = a_datetime.astimezone(utc) # convert to utc time if keep_utc_tzinfo is False: utc_datetime = utc_datetime.replace(tzinfo=None) return utc_datetime else: return a_datetime
[ "def", "to_utc", "(", "a_datetime", ",", "keep_utc_tzinfo", "=", "False", ")", ":", "if", "a_datetime", ".", "tzinfo", ":", "utc_datetime", "=", "a_datetime", ".", "astimezone", "(", "utc", ")", "# convert to utc time", "if", "keep_utc_tzinfo", "is", "False", ...
Convert a time awared datetime to utc datetime. :param a_datetime: a timezone awared datetime. (If not, then just returns) :param keep_utc_tzinfo: whether to retain the utc time zone information. **中文文档** 将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
[ "Convert", "a", "time", "awared", "datetime", "to", "utc", "datetime", "." ]
python
train
plivo/plivo-python
plivo/utils/__init__.py
https://github.com/plivo/plivo-python/blob/f09a1fc63e378bf17269011a071e093aa83930d0/plivo/utils/__init__.py#L16-L38
def validate_signature(uri, nonce, signature, auth_token=''): """ Validates requests made by Plivo to your servers. :param uri: Your server URL :param nonce: X-Plivo-Signature-V2-Nonce :param signature: X-Plivo-Signature-V2 header :param auth_token: Plivo Auth token :return: True if the request matches signature, False otherwise """ auth_token = bytes(auth_token.encode('utf-8')) nonce = bytes(nonce.encode('utf-8')) signature = bytes(signature.encode('utf-8')) parsed_uri = urlparse(uri.encode('utf-8')) base_url = urlunparse((parsed_uri.scheme.decode('utf-8'), parsed_uri.netloc.decode('utf-8'), parsed_uri.path.decode('utf-8'), '', '', '')).encode('utf-8') return encodestring(hnew(auth_token, base_url + nonce, sha256) .digest()).strip() == signature
[ "def", "validate_signature", "(", "uri", ",", "nonce", ",", "signature", ",", "auth_token", "=", "''", ")", ":", "auth_token", "=", "bytes", "(", "auth_token", ".", "encode", "(", "'utf-8'", ")", ")", "nonce", "=", "bytes", "(", "nonce", ".", "encode", ...
Validates requests made by Plivo to your servers. :param uri: Your server URL :param nonce: X-Plivo-Signature-V2-Nonce :param signature: X-Plivo-Signature-V2 header :param auth_token: Plivo Auth token :return: True if the request matches signature, False otherwise
[ "Validates", "requests", "made", "by", "Plivo", "to", "your", "servers", "." ]
python
train
Bogdanp/anom-py
anom/query.py
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L429-L442
def get(self, **options): """Run this query and get the first result. Parameters: \**options(QueryOptions, optional) Returns: Model: An entity or None if there were no results. """ sub_query = self.with_limit(1) options = QueryOptions(sub_query).replace(batch_size=1) for result in sub_query.run(**options): return result return None
[ "def", "get", "(", "self", ",", "*", "*", "options", ")", ":", "sub_query", "=", "self", ".", "with_limit", "(", "1", ")", "options", "=", "QueryOptions", "(", "sub_query", ")", ".", "replace", "(", "batch_size", "=", "1", ")", "for", "result", "in",...
Run this query and get the first result. Parameters: \**options(QueryOptions, optional) Returns: Model: An entity or None if there were no results.
[ "Run", "this", "query", "and", "get", "the", "first", "result", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L2148-L2186
def transitions(self, transitions): """ Setter for _transitions field See property :param: transitions: Dictionary transitions[transition_id] of :class:`rafcon.core.transition.Transition` :raises exceptions.TypeError: if the transitions parameter has the wrong type :raises exceptions.AttributeError: if the keys of the transitions dictionary and the transition_ids of the transitions in the dictionary do not match """ if not isinstance(transitions, dict): raise TypeError("transitions must be of type dict") if [t_id for t_id, transition in transitions.items() if not isinstance(transition, Transition)]: raise TypeError("element of transitions must be of type Transition") if [t_id for t_id, transition in transitions.items() if not t_id == transition.transition_id]: raise AttributeError("The key of the transition dictionary and the id of the transition do not match") old_transitions = self._transitions self._transitions = transitions transition_ids_to_delete = [] for transition_id, transition in transitions.items(): try: transition.parent = self except (ValueError, RecoveryModeException) as e: if type(e) is RecoveryModeException: logger.error("Recovery error: {0}\n{1}".format(str(e), str(traceback.format_exc()))) if e.do_delete_item: transition_ids_to_delete.append(transition.transition_id) else: self._transitions = old_transitions raise self._transitions = dict((transition_id, t) for (transition_id, t) in self._transitions.items() if transition_id not in transition_ids_to_delete) # check that all old_transitions are no more referencing self as there parent for old_transition in old_transitions.values(): if old_transition not in self._transitions.values() and old_transition.parent is self: old_transition.parent = None
[ "def", "transitions", "(", "self", ",", "transitions", ")", ":", "if", "not", "isinstance", "(", "transitions", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"transitions must be of type dict\"", ")", "if", "[", "t_id", "for", "t_id", ",", "transition", ...
Setter for _transitions field See property :param: transitions: Dictionary transitions[transition_id] of :class:`rafcon.core.transition.Transition` :raises exceptions.TypeError: if the transitions parameter has the wrong type :raises exceptions.AttributeError: if the keys of the transitions dictionary and the transition_ids of the transitions in the dictionary do not match
[ "Setter", "for", "_transitions", "field" ]
python
train
python-diamond/Diamond
src/collectors/drbd/drbd.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/drbd/drbd.py#L24-L32
def get_default_config(self): """ Returns the default collector settings """ config = super(DRBDCollector, self).get_default_config() config.update({ 'path': 'drbd' }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "DRBDCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'drbd'", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/views.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/views.py#L718-L743
def multipart_complete(self, multipart): """Complete a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response. """ multipart.complete() db.session.commit() version_id = str(uuid.uuid4()) return self.make_response( data=multipart, context={ 'class': MultipartObject, 'bucket': multipart.bucket, 'object_version_id': version_id, }, # This will wait for the result, and send whitespace on the # connection until the task has finished (or max timeout reached). task_result=merge_multipartobject.delay( str(multipart.upload_id), version_id=version_id, ), )
[ "def", "multipart_complete", "(", "self", ",", "multipart", ")", ":", "multipart", ".", "complete", "(", ")", "db", ".", "session", ".", "commit", "(", ")", "version_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "return", "self", ".", "m...
Complete a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response.
[ "Complete", "a", "multipart", "upload", "." ]
python
train
saltstack/salt
salt/returners/couchbase_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/couchbase_return.py#L332-L338
def _format_jid_instance(jid, job): ''' Return a properly formatted jid dict ''' ret = _format_job_instance(job) ret.update({'StartTime': salt.utils.jid.jid_to_time(jid)}) return ret
[ "def", "_format_jid_instance", "(", "jid", ",", "job", ")", ":", "ret", "=", "_format_job_instance", "(", "job", ")", "ret", ".", "update", "(", "{", "'StartTime'", ":", "salt", ".", "utils", ".", "jid", ".", "jid_to_time", "(", "jid", ")", "}", ")", ...
Return a properly formatted jid dict
[ "Return", "a", "properly", "formatted", "jid", "dict" ]
python
train
aws/sagemaker-python-sdk
src/sagemaker/workflow/airflow.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/workflow/airflow.py#L333-L381
def prepare_framework_container_def(model, instance_type, s3_operations): """Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. s3_operations (dict): The dict to specify S3 operations (upload `source_dir`). Returns: dict: The container information of this framework model. """ deploy_image = model.image if not deploy_image: region_name = model.sagemaker_session.boto_session.region_name deploy_image = fw_utils.create_image_uri( region_name, model.__framework_name__, instance_type, model.framework_version, model.py_version) base_name = utils.base_name_from_image(deploy_image) model.name = model.name or utils.name_from_base(base_name) bucket = model.bucket or model.sagemaker_session._default_bucket script = os.path.basename(model.entry_point) key = '{}/source/sourcedir.tar.gz'.format(model.name) if model.source_dir and model.source_dir.lower().startswith('s3://'): code_dir = model.source_dir model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = 's3://{}/{}'.format(bucket, key) model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations['S3Upload'] = [{ 'Path': model.source_dir or script, 'Bucket': bucket, 'Key': key, 'Tar': True }] deploy_env = dict(model.env) deploy_env.update(model._framework_env_vars()) try: if model.model_server_workers: deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(model.model_server_workers) except AttributeError: # This applies to a FrameworkModel which is not SageMaker Deep Learning Framework Model pass return sagemaker.container_def(deploy_image, model.model_data, deploy_env)
[ "def", "prepare_framework_container_def", "(", "model", ",", "instance_type", ",", "s3_operations", ")", ":", "deploy_image", "=", "model", ".", "image", "if", "not", "deploy_image", ":", "region_name", "=", "model", ".", "sagemaker_session", ".", "boto_session", ...
Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. s3_operations (dict): The dict to specify S3 operations (upload `source_dir`). Returns: dict: The container information of this framework model.
[ "Prepare", "the", "framework", "model", "container", "information", ".", "Specify", "related", "S3", "operations", "for", "Airflow", "to", "perform", ".", "(", "Upload", "source_dir", ")" ]
python
train
PyCQA/astroid
astroid/rebuilder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L529-L537
def visit_exec(self, node, parent): """visit an Exec node by returning a fresh instance of it""" newnode = nodes.Exec(node.lineno, node.col_offset, parent) newnode.postinit( self.visit(node.body, newnode), _visit_or_none(node, "globals", self, newnode), _visit_or_none(node, "locals", self, newnode), ) return newnode
[ "def", "visit_exec", "(", "self", ",", "node", ",", "parent", ")", ":", "newnode", "=", "nodes", ".", "Exec", "(", "node", ".", "lineno", ",", "node", ".", "col_offset", ",", "parent", ")", "newnode", ".", "postinit", "(", "self", ".", "visit", "(", ...
visit an Exec node by returning a fresh instance of it
[ "visit", "an", "Exec", "node", "by", "returning", "a", "fresh", "instance", "of", "it" ]
python
train
saltstack/salt
salt/cloud/clouds/digitalocean.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/digitalocean.py#L273-L546
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'digitalocean', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) kwargs = { 'name': vm_['name'], 'size': get_size(vm_), 'image': get_image(vm_), 'region': get_location(vm_), 'ssh_keys': [], 'tags': [] } # backwards compat ssh_key_name = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) if ssh_key_name: kwargs['ssh_keys'].append(get_keyid(ssh_key_name)) ssh_key_names = config.get_cloud_config_value( 'ssh_key_names', vm_, __opts__, search_global=False, default=False ) if ssh_key_names: for key in ssh_key_names.split(','): kwargs['ssh_keys'].append(get_keyid(key)) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename ) ) if not __opts__.get('ssh_agent', False) and key_filename is None: raise SaltCloudConfigError( 'The DigitalOcean driver requires an ssh_key_file and an ssh_key_name ' 'because it does not supply a root password upon building the server.' ) ssh_interface = config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, search_global=False, default='public' ) if ssh_interface in ['private', 'public']: log.info("ssh_interface: Setting interface for ssh to %s", ssh_interface) kwargs['ssh_interface'] = ssh_interface else: raise SaltCloudConfigError( "The DigitalOcean driver requires ssh_interface to be defined as 'public' or 'private'." ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None, ) if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError("'private_networking' should be a boolean value.") kwargs['private_networking'] = private_networking if not private_networking and ssh_interface == 'private': raise SaltCloudConfigError( "The DigitalOcean driver requires ssh_interface if defined as 'private' " "then private_networking should be set as 'True'." ) backups_enabled = config.get_cloud_config_value( 'backups_enabled', vm_, __opts__, search_global=False, default=None, ) if backups_enabled is not None: if not isinstance(backups_enabled, bool): raise SaltCloudConfigError("'backups_enabled' should be a boolean value.") kwargs['backups'] = backups_enabled ipv6 = config.get_cloud_config_value( 'ipv6', vm_, __opts__, search_global=False, default=None, ) if ipv6 is not None: if not isinstance(ipv6, bool): raise SaltCloudConfigError("'ipv6' should be a boolean value.") kwargs['ipv6'] = ipv6 monitoring = config.get_cloud_config_value( 'monitoring', vm_, __opts__, search_global=False, default=None, ) if monitoring is not None: if not isinstance(monitoring, bool): raise SaltCloudConfigError("'monitoring' should be a boolean value.") kwargs['monitoring'] = monitoring kwargs['tags'] = config.get_cloud_config_value( 'tags', vm_, __opts__, search_global=False, default=False ) userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False, default=None ) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['user_data'] = salt.utils.cloud.userdata_template( __opts__, vm_, salt.utils.stringutils.to_unicode(fp_.read()) ) except Exception as exc: log.exception( 'Failed to read userdata from %s: %s', userdata_file, exc) create_dns_record = config.get_cloud_config_value( 'create_dns_record', vm_, __opts__, search_global=False, default=None, ) if create_dns_record: log.info('create_dns_record: will attempt to write DNS records') default_dns_domain = None dns_domain_name = vm_['name'].split('.') if len(dns_domain_name) > 2: log.debug('create_dns_record: inferring default dns_hostname, dns_domain from minion name as FQDN') default_dns_hostname = '.'.join(dns_domain_name[:-2]) default_dns_domain = '.'.join(dns_domain_name[-2:]) else: log.debug("create_dns_record: can't infer dns_domain from %s", vm_['name']) default_dns_hostname = dns_domain_name[0] dns_hostname = config.get_cloud_config_value( 'dns_hostname', vm_, __opts__, search_global=False, default=default_dns_hostname, ) dns_domain = config.get_cloud_config_value( 'dns_domain', vm_, __opts__, search_global=False, default=default_dns_domain, ) if dns_hostname and dns_domain: log.info('create_dns_record: using dns_hostname="%s", dns_domain="%s"', dns_hostname, dns_domain) __add_dns_addr__ = lambda t, d: post_dns_record(dns_domain=dns_domain, name=dns_hostname, record_type=t, record_data=d) log.debug('create_dns_record: %s', __add_dns_addr__) else: log.error('create_dns_record: could not determine dns_hostname and/or dns_domain') raise SaltCloudConfigError( '\'create_dns_record\' must be a dict specifying "domain" ' 'and "hostname" or the minion name must be an FQDN.' ) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args=__utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating %s on DIGITALOCEAN\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: %s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False def __query_node_data(vm_name): data = show_instance(vm_name, 'action') if not data: # Trigger an error in the wait_for_ip function return False if data['networks'].get('v4'): for network in data['networks']['v4']: if network['type'] == 'public': return data return False try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'],), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) if not vm_.get('ssh_host'): vm_['ssh_host'] = None # add DNS records, set ssh_host, default to first found IP, preferring IPv4 for ssh bootstrap script target addr_families, dns_arec_types = (('v4', 'v6'), ('A', 'AAAA')) arec_map = dict(list(zip(addr_families, dns_arec_types))) for facing, addr_family, ip_address in [(net['type'], family, net['ip_address']) for family in addr_families for net in data['networks'][family]]: log.info('found %s IP%s interface for "%s"', facing, addr_family, ip_address) dns_rec_type = arec_map[addr_family] if facing == 'public': if create_dns_record: __add_dns_addr__(dns_rec_type, ip_address) if facing == ssh_interface: if not vm_['ssh_host']: vm_['ssh_host'] = ip_address if vm_['ssh_host'] is None: raise SaltCloudSystemExit( 'No suitable IP addresses found for ssh minion bootstrapping: {0}'.format(repr(data['networks'])) ) log.debug( 'Found public IP address to use for ssh minion bootstrapping: %s', vm_['ssh_host'] ) vm_['key_filename'] = key_filename ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
[ "def", "create", "(", "vm_", ")", ":", "try", ":", "# Check for required profile parameters before sending any API calls.", "if", "vm_", "[", "'profile'", "]", "and", "config", ".", "is_profile_configured", "(", "__opts__", ",", "__active_provider_name__", "or", "'digit...
Create a single VM from a data dict
[ "Create", "a", "single", "VM", "from", "a", "data", "dict" ]
python
train
thomasantony/simplepipe
simplepipe.py
https://github.com/thomasantony/simplepipe/blob/c79d5f6ab27067e16d3d5d23364be5dd12448c04/simplepipe.py#L179-L195
def add_hook(self, name, function): """ Adds a function to be called for hook of a given name. The function gets entire workspace as input and does not return anything. Example: def hook_fcn(workspace): pass """ if not callable(function): return ValueError('Hook function should be callable') if name not in self.hooks: self.hooks[name] = [] self.hooks[name].append(function) return self
[ "def", "add_hook", "(", "self", ",", "name", ",", "function", ")", ":", "if", "not", "callable", "(", "function", ")", ":", "return", "ValueError", "(", "'Hook function should be callable'", ")", "if", "name", "not", "in", "self", ".", "hooks", ":", "self"...
Adds a function to be called for hook of a given name. The function gets entire workspace as input and does not return anything. Example: def hook_fcn(workspace): pass
[ "Adds", "a", "function", "to", "be", "called", "for", "hook", "of", "a", "given", "name", "." ]
python
train
benmoran56/esper
esper.py
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L224-L233
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]: """Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples. """ entity_db = self._entities for entity in self._components.get(component_type, []): yield entity, entity_db[entity][component_type]
[ "def", "_get_component", "(", "self", ",", "component_type", ":", "Type", "[", "C", "]", ")", "->", "Iterable", "[", "Tuple", "[", "int", ",", "C", "]", "]", ":", "entity_db", "=", "self", ".", "_entities", "for", "entity", "in", "self", ".", "_compo...
Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples.
[ "Get", "an", "iterator", "for", "Entity", "Component", "pairs", "." ]
python
train
quantopian/zipline
zipline/utils/events.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L373-L387
def calculate_dates(self, dt): """ Given a date, find that day's open and period end (open + offset). """ period_start, period_close = self.cal.open_and_close_for_session( self.cal.minute_to_session_label(dt), ) # Align the market open and close times here with the execution times # used by the simulation clock. This ensures that scheduled functions # trigger at the correct times. self._period_start = self.cal.execution_time_from_open(period_start) self._period_close = self.cal.execution_time_from_close(period_close) self._period_end = self._period_start + self.offset - self._one_minute
[ "def", "calculate_dates", "(", "self", ",", "dt", ")", ":", "period_start", ",", "period_close", "=", "self", ".", "cal", ".", "open_and_close_for_session", "(", "self", ".", "cal", ".", "minute_to_session_label", "(", "dt", ")", ",", ")", "# Align the market ...
Given a date, find that day's open and period end (open + offset).
[ "Given", "a", "date", "find", "that", "day", "s", "open", "and", "period", "end", "(", "open", "+", "offset", ")", "." ]
python
train
mushkevych/synergy_odm
odm/fields.py
https://github.com/mushkevych/synergy_odm/blob/3a5ac37333fc6391478564ef653a4be38e332f68/odm/fields.py#L141-L146
def validate(self, value): """Make sure that value is of the right type """ if not isinstance(value, self.nested_klass): self.raise_error('NestedClass is of the wrong type: {0} vs expected {1}' .format(value.__class__.__name__, self.nested_klass.__name__)) super(NestedDocumentField, self).validate(value)
[ "def", "validate", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "self", ".", "nested_klass", ")", ":", "self", ".", "raise_error", "(", "'NestedClass is of the wrong type: {0} vs expected {1}'", ".", "format", "(", "value", ...
Make sure that value is of the right type
[ "Make", "sure", "that", "value", "is", "of", "the", "right", "type" ]
python
train
tylerbutler/propane
propane/django/fields.py
https://github.com/tylerbutler/propane/blob/6c404285ab8d78865b7175a5c8adf8fae12d6be5/propane/django/fields.py#L129-L148
def get_db_prep_value(self, value): """ Pickle and b64encode the object, optionally compressing it. The pickling protocol is specified explicitly (by default 2), rather than as -1 or HIGHEST_PROTOCOL, because we don't want the protocol to change over time. If it did, ``exact`` and ``in`` lookups would likely fail, since pickle would now be generating a different string. """ if value is not None and not isinstance(value, PickledObject): # We call force_unicode here explicitly, so that the encoded string # isn't rejected by the postgresql_psycopg2 backend. Alternatively, # we could have just registered PickledObject with the psycopg # marshaller (telling it to store it like it would a string), but # since both of these methods result in the same value being stored, # doing things this way is much easier. value = force_unicode(dbsafe_encode(value, self.compress)) return value
[ "def", "get_db_prep_value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", "and", "not", "isinstance", "(", "value", ",", "PickledObject", ")", ":", "# We call force_unicode here explicitly, so that the encoded string", "# isn't rejected by the...
Pickle and b64encode the object, optionally compressing it. The pickling protocol is specified explicitly (by default 2), rather than as -1 or HIGHEST_PROTOCOL, because we don't want the protocol to change over time. If it did, ``exact`` and ``in`` lookups would likely fail, since pickle would now be generating a different string.
[ "Pickle", "and", "b64encode", "the", "object", "optionally", "compressing", "it", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/adf.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/adf.py#L277-L303
def remove_option(self, option): """ Remove an option. Parameters ---------- option : str or int The name (str) or index (int) of the option to remove. Raises ------ TypeError If the option has a wrong type. """ if len(self.options) > 0: if self._sized_op: if not isinstance(option, str): raise TypeError("``option`` should be a name string!") for i in range(len(self.options)): if self.options[i][0] == option: self.options.pop(i) break else: if not isinstance(option, int): raise TypeError("``option`` should be an integer index!") self.options.pop(option)
[ "def", "remove_option", "(", "self", ",", "option", ")", ":", "if", "len", "(", "self", ".", "options", ")", ">", "0", ":", "if", "self", ".", "_sized_op", ":", "if", "not", "isinstance", "(", "option", ",", "str", ")", ":", "raise", "TypeError", "...
Remove an option. Parameters ---------- option : str or int The name (str) or index (int) of the option to remove. Raises ------ TypeError If the option has a wrong type.
[ "Remove", "an", "option", "." ]
python
train
mozilla/configman
configman/def_sources/for_argparse.py
https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/def_sources/for_argparse.py#L171-L192
def add_parser(self, *args, **kwargs): """each time a subparser action is used to create a new parser object we must save the original args & kwargs. In a later phase of configman, we'll need to reproduce the subparsers exactly without resorting to copying. We save the args & kwargs in the 'foreign_data' section of the configman option that corresponds with the subparser action.""" command_name = args[0] new_kwargs = kwargs.copy() new_kwargs['configman_subparsers_option'] = self._configman_option new_kwargs['subparser_name'] = command_name subparsers = self._configman_option.foreign_data.argparse.subparsers a_subparser = super(ConfigmanSubParsersAction, self).add_parser( *args, **new_kwargs ) subparsers[command_name] = DotDict({ "args": args, "kwargs": new_kwargs, "subparser": a_subparser }) return a_subparser
[ "def", "add_parser", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "command_name", "=", "args", "[", "0", "]", "new_kwargs", "=", "kwargs", ".", "copy", "(", ")", "new_kwargs", "[", "'configman_subparsers_option'", "]", "=", "self", ...
each time a subparser action is used to create a new parser object we must save the original args & kwargs. In a later phase of configman, we'll need to reproduce the subparsers exactly without resorting to copying. We save the args & kwargs in the 'foreign_data' section of the configman option that corresponds with the subparser action.
[ "each", "time", "a", "subparser", "action", "is", "used", "to", "create", "a", "new", "parser", "object", "we", "must", "save", "the", "original", "args", "&", "kwargs", ".", "In", "a", "later", "phase", "of", "configman", "we", "ll", "need", "to", "re...
python
train
OSSOS/MOP
src/jjk/preproc/findTriplets.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/findTriplets.py#L134-L151
def getExpnums(pointing,night=None): """Get all exposures of specified pointing ID. Default is to return a list of all exposure numbers""" if night: night=" floor(e.mjdate-0.0833)=%d " % ( night ) else: night='' sql="SELECT e.expnum " sql=sql+"FROM bucket.exposure e " sql=sql+"JOIN bucket.association a on e.expnum=a.expnum " sql=sql+"WHERE a.pointing="+str(pointing)+" AND "+night sql=sql+" ORDER BY mjdate, uttime DESC " cfeps.execute(sql) return(cfeps.fetchall())
[ "def", "getExpnums", "(", "pointing", ",", "night", "=", "None", ")", ":", "if", "night", ":", "night", "=", "\" floor(e.mjdate-0.0833)=%d \"", "%", "(", "night", ")", "else", ":", "night", "=", "''", "sql", "=", "\"SELECT e.expnum \"", "sql", "=", "sql", ...
Get all exposures of specified pointing ID. Default is to return a list of all exposure numbers
[ "Get", "all", "exposures", "of", "specified", "pointing", "ID", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/brocade_lag_rpc/get_portchannel_info_by_intf/output/lacp/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/brocade_lag_rpc/get_portchannel_info_by_intf/output/lacp/__init__.py#L671-L694
def _set_oper_state(self, v, load=False): """ Setter method for oper_state, mapped from YANG variable /brocade_lag_rpc/get_portchannel_info_by_intf/output/lacp/oper_state (lacp-state) If this variable is read-only (config: false) in the source YANG file, then _set_oper_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_oper_state() directly. YANG Description: The Operational state """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'collecting': {'value': 4}, u'aggregation': {'value': 2}, u'distributing': {'value': 5}, u'synchronization': {'value': 3}, u'defaulted': {'value': 6}, u'timeout': {'value': 1}, u'activity': {'value': 0}, u'expired': {'value': 7}},), is_leaf=True, yang_name="oper-state", rest_name="oper-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-lag', defining_module='brocade-lag', yang_type='lacp-state', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """oper_state must be of a type compatible with lacp-state""", 'defined-type': "brocade-lag:lacp-state", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'collecting': {'value': 4}, u'aggregation': {'value': 2}, u'distributing': {'value': 5}, u'synchronization': {'value': 3}, u'defaulted': {'value': 6}, u'timeout': {'value': 1}, u'activity': {'value': 0}, u'expired': {'value': 7}},), is_leaf=True, yang_name="oper-state", rest_name="oper-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-lag', defining_module='brocade-lag', yang_type='lacp-state', is_config=True)""", }) self.__oper_state = t if hasattr(self, '_set'): self._set()
[ "def", "_set_oper_state", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "bas...
Setter method for oper_state, mapped from YANG variable /brocade_lag_rpc/get_portchannel_info_by_intf/output/lacp/oper_state (lacp-state) If this variable is read-only (config: false) in the source YANG file, then _set_oper_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_oper_state() directly. YANG Description: The Operational state
[ "Setter", "method", "for", "oper_state", "mapped", "from", "YANG", "variable", "/", "brocade_lag_rpc", "/", "get_portchannel_info_by_intf", "/", "output", "/", "lacp", "/", "oper_state", "(", "lacp", "-", "state", ")", "If", "this", "variable", "is", "read", "...
python
train
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L1180-L1198
def normalize(self): """ Shift actor's center of mass at origin and scale its average size to unit. """ cm = self.centerOfMass() coords = self.coordinates() if not len(coords): return pts = coords - cm xyz2 = np.sum(pts * pts, axis=0) scale = 1 / np.sqrt(np.sum(xyz2) / len(pts)) t = vtk.vtkTransform() t.Scale(scale, scale, scale) t.Translate(-cm) tf = vtk.vtkTransformPolyDataFilter() tf.SetInputData(self.poly) tf.SetTransform(t) tf.Update() return self.updateMesh(tf.GetOutput())
[ "def", "normalize", "(", "self", ")", ":", "cm", "=", "self", ".", "centerOfMass", "(", ")", "coords", "=", "self", ".", "coordinates", "(", ")", "if", "not", "len", "(", "coords", ")", ":", "return", "pts", "=", "coords", "-", "cm", "xyz2", "=", ...
Shift actor's center of mass at origin and scale its average size to unit.
[ "Shift", "actor", "s", "center", "of", "mass", "at", "origin", "and", "scale", "its", "average", "size", "to", "unit", "." ]
python
train
tjcsl/ion
intranet/apps/users/models.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/users/models.py#L735-L745
def absence_count(self): """Return the user's absence count. If the user has no absences or is not a signup user, returns 0. """ # FIXME: remove recursive dep from ..eighth.models import EighthSignup return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True).count()
[ "def", "absence_count", "(", "self", ")", ":", "# FIXME: remove recursive dep", "from", ".", ".", "eighth", ".", "models", "import", "EighthSignup", "return", "EighthSignup", ".", "objects", ".", "filter", "(", "user", "=", "self", ",", "was_absent", "=", "Tru...
Return the user's absence count. If the user has no absences or is not a signup user, returns 0.
[ "Return", "the", "user", "s", "absence", "count", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/defects/core.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/core.py#L159-L170
def multiplicity(self): """ Returns the multiplicity of a defect site within the structure (needed for concentration analysis) """ sga = SpacegroupAnalyzer(self.bulk_structure) periodic_struc = sga.get_symmetrized_structure() poss_deflist = sorted( periodic_struc.get_sites_in_sphere(self.site.coords, 2, include_index=True), key=lambda x: x[1]) defindex = poss_deflist[0][2] equivalent_sites = periodic_struc.find_equivalent_sites(self.bulk_structure[defindex]) return len(equivalent_sites)
[ "def", "multiplicity", "(", "self", ")", ":", "sga", "=", "SpacegroupAnalyzer", "(", "self", ".", "bulk_structure", ")", "periodic_struc", "=", "sga", ".", "get_symmetrized_structure", "(", ")", "poss_deflist", "=", "sorted", "(", "periodic_struc", ".", "get_sit...
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
[ "Returns", "the", "multiplicity", "of", "a", "defect", "site", "within", "the", "structure", "(", "needed", "for", "concentration", "analysis", ")" ]
python
train
python-bonobo/bonobo
bonobo/execution/strategies/__init__.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/execution/strategies/__init__.py#L28-L53
def create_strategy(name=None): """ Create a strategy, or just returns it if it's already one. :param name: :return: Strategy """ import logging from bonobo.execution.strategies.base import Strategy if isinstance(name, Strategy): return name if name is None: name = DEFAULT_STRATEGY logging.debug("Creating execution strategy {!r}...".format(name)) try: factory = STRATEGIES[name] except KeyError as exc: raise RuntimeError( "Invalid strategy {}. Available choices: {}.".format(repr(name), ", ".join(sorted(STRATEGIES.keys()))) ) from exc return factory()
[ "def", "create_strategy", "(", "name", "=", "None", ")", ":", "import", "logging", "from", "bonobo", ".", "execution", ".", "strategies", ".", "base", "import", "Strategy", "if", "isinstance", "(", "name", ",", "Strategy", ")", ":", "return", "name", "if",...
Create a strategy, or just returns it if it's already one. :param name: :return: Strategy
[ "Create", "a", "strategy", "or", "just", "returns", "it", "if", "it", "s", "already", "one", "." ]
python
train
iterative/dvc
dvc/analytics.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/analytics.py#L247-L261
def send_cmd(cmd, args, ret): """Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command. """ from dvc.daemon import daemon if not Analytics._is_enabled(cmd): return analytics = Analytics() analytics.collect_cmd(args, ret) daemon(["analytics", analytics.dump()])
[ "def", "send_cmd", "(", "cmd", ",", "args", ",", "ret", ")", ":", "from", "dvc", ".", "daemon", "import", "daemon", "if", "not", "Analytics", ".", "_is_enabled", "(", "cmd", ")", ":", "return", "analytics", "=", "Analytics", "(", ")", "analytics", ".",...
Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command.
[ "Collect", "and", "send", "analytics", "for", "CLI", "command", "." ]
python
train
openego/eTraGo
etrago/tools/plot.py
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/plot.py#L719-L789
def load_hours(network, min_load=0.9, max_load=1, boundaries=[0, 8760]): """Plot number of hours with line loading in selected range. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis min_load: float Choose lower bound of relative load max_load: float Choose upper bound of relative load boundaries: array Set boundaries of heatmap axis """ cmap_line = plt.cm.jet cmap_link = plt.cm.jet array_line = [['Line'] * len(network.lines), network.lines.index] load_lines = pd.Series(((abs(network.lines_t.p0[( abs(network.lines_t.p0.mul(network.snapshot_weightings, axis=0)) / network.lines.s_nom_opt >= min_load) & ( abs(network.lines_t.p0.mul(network.snapshot_weightings, axis=0)) / network.lines.s_nom_opt <= max_load)]) / abs(network.lines_t.p0[( abs(network.lines_t.p0) / network.lines.s_nom_opt >= min_load) & (abs(network.lines_t.p0) / network.lines.s_nom_opt <= max_load)])) .sum()).data, index=array_line) array_link = [['Link'] * len(network.links), network.links.index] load_links = pd.Series(((abs(network.links_t.p0[( abs(network.links_t.p0.mul(network.snapshot_weightings, axis=0)) / network.links.p_nom_opt >= min_load) & ( abs(network.links_t.p0.mul(network.snapshot_weightings, axis=0)) / network.links.p_nom_opt <= max_load)]) / abs(network.links_t.p0[( abs(network.links_t.p0) / network.links.p_nom_opt >= min_load) & (abs(network.links_t.p0) / network.links.p_nom_opt <= max_load)])) .sum()).data, index=array_link) load_hours = load_lines.append(load_links) ll = network.plot( line_colors=load_hours, line_cmap={ 'Line': cmap_line, 'Link': cmap_link}, bus_sizes=0, title="Number of hours with more then 90% load", line_widths=2) v1 = np.linspace(boundaries[0], boundaries[1], 101) v = np.linspace(boundaries[0], boundaries[1], 101) cb_Link = plt.colorbar(ll[2], boundaries=v1, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10]) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb.set_label('Number of hours')
[ "def", "load_hours", "(", "network", ",", "min_load", "=", "0.9", ",", "max_load", "=", "1", ",", "boundaries", "=", "[", "0", ",", "8760", "]", ")", ":", "cmap_line", "=", "plt", ".", "cm", ".", "jet", "cmap_link", "=", "plt", ".", "cm", ".", "j...
Plot number of hours with line loading in selected range. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis min_load: float Choose lower bound of relative load max_load: float Choose upper bound of relative load boundaries: array Set boundaries of heatmap axis
[ "Plot", "number", "of", "hours", "with", "line", "loading", "in", "selected", "range", ".", "Parameters", "----------", "network", ":", "PyPSA", "network", "container", "Holds", "topology", "of", "grid", "including", "results", "from", "powerflow", "analysis", "...
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/magic_arguments.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/magic_arguments.py#L95-L121
def construct_parser(magic_func): """ Construct an argument parser using the function decorations. """ kwds = getattr(magic_func, 'argcmd_kwds', {}) if 'description' not in kwds: kwds['description'] = getattr(magic_func, '__doc__', None) arg_name = real_name(magic_func) parser = MagicArgumentParser(arg_name, **kwds) # Reverse the list of decorators in order to apply them in the # order in which they appear in the source. group = None for deco in magic_func.decorators[::-1]: result = deco.add_to_parser(parser, group) if result is not None: group = result # Replace the starting 'usage: ' with IPython's %. help_text = parser.format_help() if help_text.startswith('usage: '): help_text = help_text.replace('usage: ', '%', 1) else: help_text = '%' + help_text # Replace the magic function's docstring with the full help text. magic_func.__doc__ = help_text return parser
[ "def", "construct_parser", "(", "magic_func", ")", ":", "kwds", "=", "getattr", "(", "magic_func", ",", "'argcmd_kwds'", ",", "{", "}", ")", "if", "'description'", "not", "in", "kwds", ":", "kwds", "[", "'description'", "]", "=", "getattr", "(", "magic_fun...
Construct an argument parser using the function decorations.
[ "Construct", "an", "argument", "parser", "using", "the", "function", "decorations", "." ]
python
test
tijme/not-your-average-web-crawler
nyawc/scrapers/HTMLSoupFormScraper.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/scrapers/HTMLSoupFormScraper.py#L102-L121
def __get_form_data(self, soup): """Build a form data dict from the given form. Args: soup (obj): The BeautifulSoup form. Returns: obj: The form data (key/value). """ elements = self.__get_valid_form_data_elements(soup) form_data = self.__get_default_form_data_input(elements) callback = self.options.callbacks.form_before_autofill action = callback(self.queue_item, elements, form_data) if action == CrawlerActions.DO_AUTOFILL_FORM: self.__autofill_form_data(form_data, elements) return form_data
[ "def", "__get_form_data", "(", "self", ",", "soup", ")", ":", "elements", "=", "self", ".", "__get_valid_form_data_elements", "(", "soup", ")", "form_data", "=", "self", ".", "__get_default_form_data_input", "(", "elements", ")", "callback", "=", "self", ".", ...
Build a form data dict from the given form. Args: soup (obj): The BeautifulSoup form. Returns: obj: The form data (key/value).
[ "Build", "a", "form", "data", "dict", "from", "the", "given", "form", "." ]
python
train
puiterwijk/flask-oidc
flask_oidc/__init__.py
https://github.com/puiterwijk/flask-oidc/blob/7f16e27b926fc12953d6b2ae78a9b9cc9b8d1769/flask_oidc/__init__.py#L669-L683
def custom_callback(self, view_func): """ Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server. """ @wraps(view_func) def decorated(*args, **kwargs): plainreturn, data = self._process_callback('custom') if plainreturn: return data else: return view_func(data, *args, **kwargs) self._custom_callback = decorated return decorated
[ "def", "custom_callback", "(", "self", ",", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "plainreturn", ",", "data", "=", "self", ".", "_process_callback", "(", "'c...
Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server.
[ "Wrapper", "function", "to", "use", "a", "custom", "callback", ".", "The", "custom", "OIDC", "callback", "will", "get", "the", "custom", "state", "field", "passed", "in", "with", "redirect_to_auth_server", "." ]
python
train
pantsbuild/pants
src/python/pants/base/project_tree.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/project_tree.py#L86-L91
def scandir(self, relpath): """Return paths relative to the root, which are in the given directory and not ignored.""" if self.isignored(relpath, directory=True): self._raise_access_ignored(relpath) return self._filter_ignored(self._scandir_raw(relpath), selector=lambda e: e.path)
[ "def", "scandir", "(", "self", ",", "relpath", ")", ":", "if", "self", ".", "isignored", "(", "relpath", ",", "directory", "=", "True", ")", ":", "self", ".", "_raise_access_ignored", "(", "relpath", ")", "return", "self", ".", "_filter_ignored", "(", "s...
Return paths relative to the root, which are in the given directory and not ignored.
[ "Return", "paths", "relative", "to", "the", "root", "which", "are", "in", "the", "given", "directory", "and", "not", "ignored", "." ]
python
train
denisenkom/pytds
src/pytds/tds_types.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L1628-L1635
def to_pydatetime(self): """ Converts datetimeoffset object into Python's datetime.datetime object @return: time zone aware datetime.datetime """ dt = datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime()) from .tz import FixedOffsetTimezone return dt.replace(tzinfo=_utc).astimezone(FixedOffsetTimezone(self._offset))
[ "def", "to_pydatetime", "(", "self", ")", ":", "dt", "=", "datetime", ".", "datetime", ".", "combine", "(", "self", ".", "_date", ".", "to_pydate", "(", ")", ",", "self", ".", "_time", ".", "to_pytime", "(", ")", ")", "from", ".", "tz", "import", "...
Converts datetimeoffset object into Python's datetime.datetime object @return: time zone aware datetime.datetime
[ "Converts", "datetimeoffset", "object", "into", "Python", "s", "datetime", ".", "datetime", "object" ]
python
train
nicolargo/glances
glances/plugins/glances_ports.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_ports.py#L130-L152
def get_web_alert(self, web, header="", log=False): """Return the alert status relative to the web/url scan return value.""" ret = 'OK' if web['status'] is None: ret = 'CAREFUL' elif web['status'] not in [200, 301, 302]: ret = 'CRITICAL' elif web['rtt_warning'] is not None and web['elapsed'] > web['rtt_warning']: ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, web[self.get_key()]) return ret
[ "def", "get_web_alert", "(", "self", ",", "web", ",", "header", "=", "\"\"", ",", "log", "=", "False", ")", ":", "ret", "=", "'OK'", "if", "web", "[", "'status'", "]", "is", "None", ":", "ret", "=", "'CAREFUL'", "elif", "web", "[", "'status'", "]",...
Return the alert status relative to the web/url scan return value.
[ "Return", "the", "alert", "status", "relative", "to", "the", "web", "/", "url", "scan", "return", "value", "." ]
python
train
DataBiosphere/dsub
dsub/lib/job_model.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/job_model.py#L105-L113
def validate_param_name(name, param_type): """Validate that the name follows posix conventions for env variables.""" # http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_235 # # 3.235 Name # In the shell command language, a word consisting solely of underscores, # digits, and alphabetics from the portable character set. if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', name): raise ValueError('Invalid %s: %s' % (param_type, name))
[ "def", "validate_param_name", "(", "name", ",", "param_type", ")", ":", "# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_235", "#", "# 3.235 Name", "# In the shell command language, a word consisting solely of underscores,", "# digits, and alphabetics from th...
Validate that the name follows posix conventions for env variables.
[ "Validate", "that", "the", "name", "follows", "posix", "conventions", "for", "env", "variables", "." ]
python
valid
honeynet/beeswarm
beeswarm/drones/client/baits/ssh.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/client/baits/ssh.py#L51-L90
def start(self): """ Launches a new SSH client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """ username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) self.sessions[session.id] = session logger.debug( 'Sending ssh bait session to {0}:{1}. (bait id: {2})'.format(server_host, server_port, session.id)) try: self.connect_login() session.did_connect = True # TODO: Handle failed login session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True except (SSHException, AuthenticationFailed) as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) else: command_count = 0 command_limit = random.randint(6, 11) while command_count < command_limit: command_count += 1 self.sense() comm, param = self.decide() self.act(comm, param) gevent.sleep(random.uniform(0.4, 5.6)) self.logout() session.did_complete = True finally: session.alldone = True session.end_session() self.comm_chan.close()
[ "def", "start", "(", "self", ")", ":", "username", "=", "self", ".", "options", "[", "'username'", "]", "password", "=", "self", ".", "options", "[", "'password'", "]", "server_host", "=", "self", ".", "options", "[", "'server'", "]", "server_port", "=",...
Launches a new SSH client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself
[ "Launches", "a", "new", "SSH", "client", "session", "on", "the", "server", "taken", "from", "the", "self", ".", "options", "dict", "." ]
python
train
pelotoncycle/cycle_detector
cycle_detector.py
https://github.com/pelotoncycle/cycle_detector/blob/a7c1a2e321e232de10f5862f6042471a3c60beb9/cycle_detector.py#L218-L252
def gosper(seqs, f=None, start=None, key=lambda x: x): """Gosper's cycle detector See help(cycle_detector) for more context. Args: sequence: A sequence to detect cyles in. f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found. Unlike Floyd and Brent's, Gosper's can only detect period of a cycle. It cannot compute the first position """ tab = [] for c, value in enumerate(seqs[0], start=1): yield value try: e = tab.index(key(value)) raise CycleDetected( period=c - ((((c >> e) - 1) | 1) << e)) except ValueError: try: tab[(c ^ (c - 1)).bit_length() - 1] = key(value) except IndexError: tab.append(value)
[ "def", "gosper", "(", "seqs", ",", "f", "=", "None", ",", "start", "=", "None", ",", "key", "=", "lambda", "x", ":", "x", ")", ":", "tab", "=", "[", "]", "for", "c", ",", "value", "in", "enumerate", "(", "seqs", "[", "0", "]", ",", "start", ...
Gosper's cycle detector See help(cycle_detector) for more context. Args: sequence: A sequence to detect cyles in. f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found. Unlike Floyd and Brent's, Gosper's can only detect period of a cycle. It cannot compute the first position
[ "Gosper", "s", "cycle", "detector" ]
python
test
has2k1/plydata
plydata/expressions.py
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/expressions.py#L58-L101
def evaluate(self, data, env): """ Evaluate statement Parameters ---------- data : pandas.DataFrame Data in whose namespace the statement will be evaluated. Typically, this is a group dataframe. Returns ------- out : object Result of the evaluation.pandas.DataFrame """ def n(): """ Return number of rows in groups This function is part of the public API """ return len(data) if isinstance(self.stmt, str): # Add function n() that computes the # size of the group data to the inner namespace. if self._has_n_func: namespace = dict(data, n=n) else: namespace = data # Avoid obvious keywords e.g if a column # is named class if self.stmt not in KEYWORDS: value = env.eval( self.stmt, source_name='Expression.evaluate', inner_namespace=namespace) else: value = namespace[self.stmt] elif callable(self.stmt): value = self.stmt(data) else: value = self.stmt return value
[ "def", "evaluate", "(", "self", ",", "data", ",", "env", ")", ":", "def", "n", "(", ")", ":", "\"\"\"\n Return number of rows in groups\n\n This function is part of the public API\n \"\"\"", "return", "len", "(", "data", ")", "if", "isinst...
Evaluate statement Parameters ---------- data : pandas.DataFrame Data in whose namespace the statement will be evaluated. Typically, this is a group dataframe. Returns ------- out : object Result of the evaluation.pandas.DataFrame
[ "Evaluate", "statement" ]
python
train
ml4ai/delphi
delphi/apps/rest_api/api.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/apps/rest_api/api.py#L63-L68
def deleteICM(uuid: str): """ Deletes an ICM""" _metadata = ICMMetadata.query.filter_by(id=uuid).first() db.session.delete(_metadata) db.session.commit() return ("", 204)
[ "def", "deleteICM", "(", "uuid", ":", "str", ")", ":", "_metadata", "=", "ICMMetadata", ".", "query", ".", "filter_by", "(", "id", "=", "uuid", ")", ".", "first", "(", ")", "db", ".", "session", ".", "delete", "(", "_metadata", ")", "db", ".", "ses...
Deletes an ICM
[ "Deletes", "an", "ICM" ]
python
train
saltstack/salt
salt/utils/job.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/job.py#L119-L136
def store_minions(opts, jid, minions, mminion=None, syndic_id=None): ''' Store additional minions matched on lower-level masters using the configured master_job_cache ''' if mminion is None: mminion = salt.minion.MasterMinion(opts, states=False, rend=False) job_cache = opts['master_job_cache'] minions_fstr = '{0}.save_minions'.format(job_cache) try: mminion.returners[minions_fstr](jid, minions, syndic_id=syndic_id) except KeyError: raise KeyError( 'Returner \'{0}\' does not support function save_minions'.format( job_cache ) )
[ "def", "store_minions", "(", "opts", ",", "jid", ",", "minions", ",", "mminion", "=", "None", ",", "syndic_id", "=", "None", ")", ":", "if", "mminion", "is", "None", ":", "mminion", "=", "salt", ".", "minion", ".", "MasterMinion", "(", "opts", ",", "...
Store additional minions matched on lower-level masters using the configured master_job_cache
[ "Store", "additional", "minions", "matched", "on", "lower", "-", "level", "masters", "using", "the", "configured", "master_job_cache" ]
python
train
LLNL/scraper
scraper/github/__init__.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L76-L101
def _check_api_limits(gh_session, api_required=250, sleep_time=15): """ Simplified check for API limits If necessary, spin in place waiting for API to reset before returning. See: https://developer.github.com/v3/#rate-limiting """ api_rates = gh_session.rate_limit() api_remaining = api_rates['rate']['remaining'] api_reset = api_rates['rate']['reset'] logger.debug('Rate Limit - %d requests remaining', api_remaining) if api_remaining > api_required: return now_time = time.time() time_to_reset = int(api_reset - now_time) logger.warn('Rate Limit Depleted - Sleeping for %d seconds', time_to_reset) while now_time < api_reset: time.sleep(10) now_time = time.time() return
[ "def", "_check_api_limits", "(", "gh_session", ",", "api_required", "=", "250", ",", "sleep_time", "=", "15", ")", ":", "api_rates", "=", "gh_session", ".", "rate_limit", "(", ")", "api_remaining", "=", "api_rates", "[", "'rate'", "]", "[", "'remaining'", "]...
Simplified check for API limits If necessary, spin in place waiting for API to reset before returning. See: https://developer.github.com/v3/#rate-limiting
[ "Simplified", "check", "for", "API", "limits" ]
python
test
nickoala/telepot
telepot/routing.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/routing.py#L184-L210
def make_routing_table(obj, keys, prefix='on_'): """ :return: a dictionary roughly equivalent to ``{'key1': obj.on_key1, 'key2': obj.on_key2, ...}``, but ``obj`` does not have to define all methods. It may define the needed ones only. :param obj: the object :param keys: a list of keys :param prefix: a string to be prepended to keys to make method names """ def maptuple(k): if isinstance(k, tuple): if len(k) == 2: return k elif len(k) == 1: return k[0], lambda *aa, **kw: getattr(obj, prefix+k[0])(*aa, **kw) else: raise ValueError() else: return k, lambda *aa, **kw: getattr(obj, prefix+k)(*aa, **kw) # Use `lambda` to delay evaluation of `getattr`. # I don't want to require definition of all methods. # Let users define only the ones he needs. return dict([maptuple(k) for k in keys])
[ "def", "make_routing_table", "(", "obj", ",", "keys", ",", "prefix", "=", "'on_'", ")", ":", "def", "maptuple", "(", "k", ")", ":", "if", "isinstance", "(", "k", ",", "tuple", ")", ":", "if", "len", "(", "k", ")", "==", "2", ":", "return", "k", ...
:return: a dictionary roughly equivalent to ``{'key1': obj.on_key1, 'key2': obj.on_key2, ...}``, but ``obj`` does not have to define all methods. It may define the needed ones only. :param obj: the object :param keys: a list of keys :param prefix: a string to be prepended to keys to make method names
[ ":", "return", ":", "a", "dictionary", "roughly", "equivalent", "to", "{", "key1", ":", "obj", ".", "on_key1", "key2", ":", "obj", ".", "on_key2", "...", "}", "but", "obj", "does", "not", "have", "to", "define", "all", "methods", ".", "It", "may", "d...
python
train
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L4001-L4045
def rank(self, dim, pct=False, keep_attrs=None): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If pct is True, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- ranked : Dataset Variables that do not depend on `dim` are dropped. """ if dim not in self.dims: raise ValueError( 'Dataset does not contain the dimension: %s' % dim) variables = OrderedDict() for name, var in self.variables.items(): if name in self.data_vars: if dim in var.dims: variables[name] = var.rank(dim, pct=pct) else: variables[name] = var coord_names = set(self.coords) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self.attrs if keep_attrs else None return self._replace_vars_and_dims(variables, coord_names, attrs=attrs)
[ "def", "rank", "(", "self", ",", "dim", ",", "pct", "=", "False", ",", "keep_attrs", "=", "None", ")", ":", "if", "dim", "not", "in", "self", ".", "dims", ":", "raise", "ValueError", "(", "'Dataset does not contain the dimension: %s'", "%", "dim", ")", "...
Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If pct is True, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- ranked : Dataset Variables that do not depend on `dim` are dropped.
[ "Ranks", "the", "data", "." ]
python
train
poldracklab/niworkflows
niworkflows/utils/misc.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/misc.py#L31-L44
def add_suffix(in_files, suffix): """ Wrap nipype's fname_presuffix to conveniently just add a prefix >>> add_suffix([ ... '/path/to/sub-045_ses-test_T1w.nii.gz', ... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test') 'sub-045_ses-test_T1w_test.nii.gz' """ import os.path as op from nipype.utils.filemanip import fname_presuffix, filename_to_list return op.basename(fname_presuffix(filename_to_list(in_files)[0], suffix=suffix))
[ "def", "add_suffix", "(", "in_files", ",", "suffix", ")", ":", "import", "os", ".", "path", "as", "op", "from", "nipype", ".", "utils", ".", "filemanip", "import", "fname_presuffix", ",", "filename_to_list", "return", "op", ".", "basename", "(", "fname_presu...
Wrap nipype's fname_presuffix to conveniently just add a prefix >>> add_suffix([ ... '/path/to/sub-045_ses-test_T1w.nii.gz', ... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test') 'sub-045_ses-test_T1w_test.nii.gz'
[ "Wrap", "nipype", "s", "fname_presuffix", "to", "conveniently", "just", "add", "a", "prefix" ]
python
train
singularityhub/singularity-cli
spython/main/parse/recipe.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/recipe.py#L41-L53
def load(self, recipe): '''load a recipe file into the client, first performing checks, and then parsing the file. Parameters ========== recipe: the original recipe file, parsed by the subclass either DockerRecipe or SingularityRecipe ''' self.recipe = recipe # the recipe file self._run_checks() # does the recipe file exist? self.parse()
[ "def", "load", "(", "self", ",", "recipe", ")", ":", "self", ".", "recipe", "=", "recipe", "# the recipe file", "self", ".", "_run_checks", "(", ")", "# does the recipe file exist?", "self", ".", "parse", "(", ")" ]
load a recipe file into the client, first performing checks, and then parsing the file. Parameters ========== recipe: the original recipe file, parsed by the subclass either DockerRecipe or SingularityRecipe
[ "load", "a", "recipe", "file", "into", "the", "client", "first", "performing", "checks", "and", "then", "parsing", "the", "file", "." ]
python
train
google/grr
grr/client_builder/grr_response_client_builder/build.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/build.py#L1055-L1088
def CreateNewZipWithSignedLibs(z_in, z_out, ignore_files=None, signer=None, skip_signing_files=None): """Copies files from one zip to another, signing all qualifying files.""" ignore_files = ignore_files or [] skip_signing_files = skip_signing_files or [] extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"] to_sign = [] for template_file in z_in.namelist(): if template_file not in ignore_files: extension = os.path.splitext(template_file)[1].lower() if (signer and template_file not in skip_signing_files and extension in extensions_to_sign): to_sign.append(template_file) else: CopyFileInZip(z_in, template_file, z_out) temp_files = {} for filename in to_sign: fd, path = tempfile.mkstemp() with os.fdopen(fd, "wb") as temp_fd: temp_fd.write(z_in.read(filename)) temp_files[filename] = path try: signer.SignFiles(itervalues(temp_files)) except AttributeError: for f in itervalues(temp_files): signer.SignFile(f) for filename, tempfile_path in iteritems(temp_files): z_out.writestr(filename, open(tempfile_path, "rb").read())
[ "def", "CreateNewZipWithSignedLibs", "(", "z_in", ",", "z_out", ",", "ignore_files", "=", "None", ",", "signer", "=", "None", ",", "skip_signing_files", "=", "None", ")", ":", "ignore_files", "=", "ignore_files", "or", "[", "]", "skip_signing_files", "=", "ski...
Copies files from one zip to another, signing all qualifying files.
[ "Copies", "files", "from", "one", "zip", "to", "another", "signing", "all", "qualifying", "files", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/TipoAcesso.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/TipoAcesso.py#L103-L124
def remover(self, id_tipo_acesso): """Removes access type by its identifier. :param id_tipo_acesso: Access type identifier. :return: None :raise TipoAcessoError: Access type associated with equipment, cannot be removed. :raise InvalidParameterError: Protocol value is invalid or none. :raise TipoAcessoNaoExisteError: Access type doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_tipo_acesso): raise InvalidParameterError( u'Access type id is invalid or was not informed.') url = 'tipoacesso/' + str(id_tipo_acesso) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
[ "def", "remover", "(", "self", ",", "id_tipo_acesso", ")", ":", "if", "not", "is_valid_int_param", "(", "id_tipo_acesso", ")", ":", "raise", "InvalidParameterError", "(", "u'Access type id is invalid or was not informed.'", ")", "url", "=", "'tipoacesso/'", "+", "str"...
Removes access type by its identifier. :param id_tipo_acesso: Access type identifier. :return: None :raise TipoAcessoError: Access type associated with equipment, cannot be removed. :raise InvalidParameterError: Protocol value is invalid or none. :raise TipoAcessoNaoExisteError: Access type doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Removes", "access", "type", "by", "its", "identifier", "." ]
python
train
idlesign/django-sitegate
sitegate/templatetags/sitegate.py
https://github.com/idlesign/django-sitegate/blob/0e58de91605071833d75a7c21f2d0de2f2e3c896/sitegate/templatetags/sitegate.py#L47-L61
def tag_builder(parser, token, cls, flow_type): """Helper function handling flow form tags.""" tokens = token.split_contents() tokens_num = len(tokens) if tokens_num == 1 or (tokens_num == 3 and tokens[1] == 'for'): flow_name = None if tokens_num == 3: flow_name = tokens[2] return cls(flow_name) else: raise template.TemplateSyntaxError( '"sitegate_%(type)s_form" tag requires zero or two arguments. ' 'E.g. {%% sitegate_%(type)s_form %%} or ' '{%% sitegate_%(type)s_form for ClassicSignup %%}.' % {'type': flow_type})
[ "def", "tag_builder", "(", "parser", ",", "token", ",", "cls", ",", "flow_type", ")", ":", "tokens", "=", "token", ".", "split_contents", "(", ")", "tokens_num", "=", "len", "(", "tokens", ")", "if", "tokens_num", "==", "1", "or", "(", "tokens_num", "=...
Helper function handling flow form tags.
[ "Helper", "function", "handling", "flow", "form", "tags", "." ]
python
train
xav-b/pyconsul
pyconsul/http.py
https://github.com/xav-b/pyconsul/blob/06ce3b921d01010c19643424486bea4b22196076/pyconsul/http.py#L35-L45
def set(self, key, value, **kwargs): ''' Store a new value at the given key kwargs can hold `cas` and `flags` params ''' return requests.put( '{}/{}/kv/{}'.format( self.master, pyconsul.__consul_api_version__, key), data=value, params=kwargs )
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "*", "*", "kwargs", ")", ":", "return", "requests", ".", "put", "(", "'{}/{}/kv/{}'", ".", "format", "(", "self", ".", "master", ",", "pyconsul", ".", "__consul_api_version__", ",", "key", ")", ...
Store a new value at the given key kwargs can hold `cas` and `flags` params
[ "Store", "a", "new", "value", "at", "the", "given", "key", "kwargs", "can", "hold", "cas", "and", "flags", "params" ]
python
train
log2timeline/plaso
plaso/output/manager.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/manager.py#L18-L41
def DeregisterOutput(cls, output_class): """Deregisters an output class. The output classes are identified based on their NAME attribute. Args: output_class (type): output module class. Raises: KeyError: if output class is not set for the corresponding data type. """ output_class_name = output_class.NAME.lower() if output_class_name in cls._disabled_output_classes: class_dict = cls._disabled_output_classes else: class_dict = cls._output_classes if output_class_name not in class_dict: raise KeyError( 'Output class not set for name: {0:s}.'.format( output_class.NAME)) del class_dict[output_class_name]
[ "def", "DeregisterOutput", "(", "cls", ",", "output_class", ")", ":", "output_class_name", "=", "output_class", ".", "NAME", ".", "lower", "(", ")", "if", "output_class_name", "in", "cls", ".", "_disabled_output_classes", ":", "class_dict", "=", "cls", ".", "_...
Deregisters an output class. The output classes are identified based on their NAME attribute. Args: output_class (type): output module class. Raises: KeyError: if output class is not set for the corresponding data type.
[ "Deregisters", "an", "output", "class", "." ]
python
train
josiahcarlson/parse-crontab
crontab/_crontab.py
https://github.com/josiahcarlson/parse-crontab/blob/b2bd254cf14e8c83e502615851b0d4b62f73ab15/crontab/_crontab.py#L361-L377
def _make_matchers(self, crontab): ''' This constructs the full matcher struct. ''' crontab = _aliases.get(crontab, crontab) ct = crontab.split() if len(ct) == 5: ct.insert(0, '0') ct.append('*') elif len(ct) == 6: ct.insert(0, '0') _assert(len(ct) == 7, "improper number of cron entries specified; got %i need 5 to 7"%(len(ct,))) matchers = [_Matcher(which, entry) for which, entry in enumerate(ct)] return Matcher(*matchers)
[ "def", "_make_matchers", "(", "self", ",", "crontab", ")", ":", "crontab", "=", "_aliases", ".", "get", "(", "crontab", ",", "crontab", ")", "ct", "=", "crontab", ".", "split", "(", ")", "if", "len", "(", "ct", ")", "==", "5", ":", "ct", ".", "in...
This constructs the full matcher struct.
[ "This", "constructs", "the", "full", "matcher", "struct", "." ]
python
train
Shizmob/pydle
pydle/features/rfc1459/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L776-L787
async def on_raw_301(self, message): """ User is away. """ target, nickname, message = message.params info = { 'away': True, 'away_message': message } if nickname in self.users: self._sync_user(nickname, info) if nickname in self._pending['whois']: self._whois_info[nickname].update(info)
[ "async", "def", "on_raw_301", "(", "self", ",", "message", ")", ":", "target", ",", "nickname", ",", "message", "=", "message", ".", "params", "info", "=", "{", "'away'", ":", "True", ",", "'away_message'", ":", "message", "}", "if", "nickname", "in", ...
User is away.
[ "User", "is", "away", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/shortcuts.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/shortcuts.py#L87-L98
def create_eventloop(inputhook=None, recognize_win32_paste=True): """ Create and return an :class:`~prompt_toolkit.eventloop.base.EventLoop` instance for a :class:`~prompt_toolkit.interface.CommandLineInterface`. """ if is_windows(): from prompt_toolkit.eventloop.win32 import Win32EventLoop as Loop return Loop(inputhook=inputhook, recognize_paste=recognize_win32_paste) else: from prompt_toolkit.eventloop.posix import PosixEventLoop as Loop return Loop(inputhook=inputhook)
[ "def", "create_eventloop", "(", "inputhook", "=", "None", ",", "recognize_win32_paste", "=", "True", ")", ":", "if", "is_windows", "(", ")", ":", "from", "prompt_toolkit", ".", "eventloop", ".", "win32", "import", "Win32EventLoop", "as", "Loop", "return", "Loo...
Create and return an :class:`~prompt_toolkit.eventloop.base.EventLoop` instance for a :class:`~prompt_toolkit.interface.CommandLineInterface`.
[ "Create", "and", "return", "an", ":", "class", ":", "~prompt_toolkit", ".", "eventloop", ".", "base", ".", "EventLoop", "instance", "for", "a", ":", "class", ":", "~prompt_toolkit", ".", "interface", ".", "CommandLineInterface", "." ]
python
train
singularitti/scientific-string
scientific_string/strings.py
https://github.com/singularitti/scientific-string/blob/615dca747e8fb1e89ed1d9f18aef4066295a17a9/scientific_string/strings.py#L39-L51
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]: """ Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: >>> strings_to_integers(['1', '1.0', '-0.2']) [1, 1, 0] """ return strings_to_(strings, lambda x: int(float(x)))
[ "def", "strings_to_integers", "(", "strings", ":", "Iterable", "[", "str", "]", ")", "->", "Iterable", "[", "int", "]", ":", "return", "strings_to_", "(", "strings", ",", "lambda", "x", ":", "int", "(", "float", "(", "x", ")", ")", ")" ]
Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: >>> strings_to_integers(['1', '1.0', '-0.2']) [1, 1, 0]
[ "Convert", "a", "list", "of", "strings", "to", "a", "list", "of", "integers", "." ]
python
train
sckott/pygbif
pygbif/occurrences/download.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L227-L243
def _extract_values(values_list): """extract values from either file or list :param values_list: list or file name (str) with list of values """ values = [] # check if file or list of values to iterate if isinstance(values_list, str): with open(values_list) as ff: reading = csv.reader(ff) for j in reading: values.append(j[0]) elif isinstance(values_list, list): values = values_list else: raise Exception("input datatype not supported.") return values
[ "def", "_extract_values", "(", "values_list", ")", ":", "values", "=", "[", "]", "# check if file or list of values to iterate", "if", "isinstance", "(", "values_list", ",", "str", ")", ":", "with", "open", "(", "values_list", ")", "as", "ff", ":", "reading", ...
extract values from either file or list :param values_list: list or file name (str) with list of values
[ "extract", "values", "from", "either", "file", "or", "list" ]
python
train
carljm/django-adminfiles
adminfiles/flickr.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L828-L840
def _parse_photo(photo): """Create a Photo object from photo data.""" owner = User(photo.owner) title = photo.title ispublic = photo.ispublic isfriend = photo.isfriend isfamily = photo.isfamily secret = photo.secret server = photo.server p = Photo(photo.id, owner=owner, title=title, ispublic=ispublic,\ isfriend=isfriend, isfamily=isfamily, secret=secret, \ server=server) return p
[ "def", "_parse_photo", "(", "photo", ")", ":", "owner", "=", "User", "(", "photo", ".", "owner", ")", "title", "=", "photo", ".", "title", "ispublic", "=", "photo", ".", "ispublic", "isfriend", "=", "photo", ".", "isfriend", "isfamily", "=", "photo", "...
Create a Photo object from photo data.
[ "Create", "a", "Photo", "object", "from", "photo", "data", "." ]
python
train
secdev/scapy
scapy/layers/l2.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/l2.py#L728-L732
def etherleak(target, **kargs): """Exploit Etherleak flaw""" return srp(Ether() / ARP(pdst=target), prn=lambda s_r: conf.padding_layer in s_r[1] and hexstr(s_r[1][conf.padding_layer].load), # noqa: E501 filter="arp", **kargs)
[ "def", "etherleak", "(", "target", ",", "*", "*", "kargs", ")", ":", "return", "srp", "(", "Ether", "(", ")", "/", "ARP", "(", "pdst", "=", "target", ")", ",", "prn", "=", "lambda", "s_r", ":", "conf", ".", "padding_layer", "in", "s_r", "[", "1",...
Exploit Etherleak flaw
[ "Exploit", "Etherleak", "flaw" ]
python
train
wbond/asn1crypto
asn1crypto/crl.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/crl.py#L483-L505
def delta_crl_distribution_points(self): """ Returns delta CRL URLs - only applies to complete CRLs :return: A list of zero or more DistributionPoint objects """ if self._delta_crl_distribution_points is None: self._delta_crl_distribution_points = [] if self.freshest_crl_value is not None: for distribution_point in self.freshest_crl_value: distribution_point_name = distribution_point['distribution_point'] # RFC 5280 indicates conforming CA should not use the relative form if distribution_point_name.name == 'name_relative_to_crl_issuer': continue # This library is currently only concerned with HTTP-based CRLs for general_name in distribution_point_name.chosen: if general_name.name == 'uniform_resource_identifier': self._delta_crl_distribution_points.append(distribution_point) return self._delta_crl_distribution_points
[ "def", "delta_crl_distribution_points", "(", "self", ")", ":", "if", "self", ".", "_delta_crl_distribution_points", "is", "None", ":", "self", ".", "_delta_crl_distribution_points", "=", "[", "]", "if", "self", ".", "freshest_crl_value", "is", "not", "None", ":", ...
Returns delta CRL URLs - only applies to complete CRLs :return: A list of zero or more DistributionPoint objects
[ "Returns", "delta", "CRL", "URLs", "-", "only", "applies", "to", "complete", "CRLs" ]
python
train
osrg/ryu
ryu/lib/igmplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/igmplib.py#L720-L728
def _do_flood(self, in_port, msg): """the process when the snooper received a message of the outside for processing. """ datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser actions = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)] self._do_packet_out(datapath, msg.data, in_port, actions)
[ "def", "_do_flood", "(", "self", ",", "in_port", ",", "msg", ")", ":", "datapath", "=", "msg", ".", "datapath", "ofproto", "=", "datapath", ".", "ofproto", "parser", "=", "datapath", ".", "ofproto_parser", "actions", "=", "[", "parser", ".", "OFPActionOutp...
the process when the snooper received a message of the outside for processing.
[ "the", "process", "when", "the", "snooper", "received", "a", "message", "of", "the", "outside", "for", "processing", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/script_editor/search_in_files.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/search_in_files.py#L1089-L1116
def __add_location(self, type, *args): """ Defines the slot triggered by **Where_lineEdit** Widget when a context menu entry is clicked. :param type: Location type. :type type: unicode :param \*args: Arguments. :type \*args: \* """ if type == "directory": location = umbra.ui.common.store_last_browsed_path((QFileDialog.getExistingDirectory(self, "Add Directory:", RuntimeGlobals.last_browsed_path))) elif type == "file": location = umbra.ui.common.store_last_browsed_path((QFileDialog.getOpenFileName(self, "Add File:", RuntimeGlobals.last_browsed_path, "All Files (*)"))) elif type == "editors": location = self.__targets_format.format(self.__default_target) elif type == "include_filter": location = self.__filters_in_format.format(self.__default_filter_in) elif type == "exclude_filter": location = self.__filters_out_format.format(self.__default_filter_out) location and self.Where_lineEdit.setText(", ".join(filter(bool, (foundations.strings.to_string( self.Where_lineEdit.text()), location))))
[ "def", "__add_location", "(", "self", ",", "type", ",", "*", "args", ")", ":", "if", "type", "==", "\"directory\"", ":", "location", "=", "umbra", ".", "ui", ".", "common", ".", "store_last_browsed_path", "(", "(", "QFileDialog", ".", "getExistingDirectory",...
Defines the slot triggered by **Where_lineEdit** Widget when a context menu entry is clicked. :param type: Location type. :type type: unicode :param \*args: Arguments. :type \*args: \*
[ "Defines", "the", "slot", "triggered", "by", "**", "Where_lineEdit", "**", "Widget", "when", "a", "context", "menu", "entry", "is", "clicked", "." ]
python
train
sdispater/eloquent
eloquent/orm/builder.py
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/builder.py#L736-L756
def _parse_nested(self, name, results): """ Parse the nested relationship in a relation. :param name: The name of the relationship :type name: str :type results: dict :rtype: dict """ progress = [] for segment in name.split('.'): progress.append(segment) last = '.'.join(progress) if last not in results: results[last] = self.__class__(self.get_query().new_query()) return results
[ "def", "_parse_nested", "(", "self", ",", "name", ",", "results", ")", ":", "progress", "=", "[", "]", "for", "segment", "in", "name", ".", "split", "(", "'.'", ")", ":", "progress", ".", "append", "(", "segment", ")", "last", "=", "'.'", ".", "joi...
Parse the nested relationship in a relation. :param name: The name of the relationship :type name: str :type results: dict :rtype: dict
[ "Parse", "the", "nested", "relationship", "in", "a", "relation", "." ]
python
train
elemoine/papyrus
papyrus/protocol.py
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L356-L367
def delete(self, request, id): """ Remove the targeted feature from the database """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() if self.before_delete is not None: self.before_delete(request, obj) session.delete(obj) return Response(status_int=204)
[ "def", "delete", "(", "self", ",", "request", ",", "id", ")", ":", "if", "self", ".", "readonly", ":", "return", "HTTPMethodNotAllowed", "(", "headers", "=", "{", "'Allow'", ":", "'GET, HEAD'", "}", ")", "session", "=", "self", ".", "Session", "(", ")"...
Remove the targeted feature from the database
[ "Remove", "the", "targeted", "feature", "from", "the", "database" ]
python
train
riggsd/davies
examples/wx_compass.py
https://github.com/riggsd/davies/blob/8566c626202a875947ad01c087300108c68d80b5/examples/wx_compass.py#L220-L237
def OnSelChanged(self, event): """Method called when selected item is changed""" # Get the selected item object item = event.GetItem() obj = self.leftPanel.model.ItemToObject(item) if isinstance(obj, compass.Survey): l = [ 'Survey Name: %s' % obj.name, 'Survey Date: %s' % obj.date, 'Comment: %s' % obj.comment, 'Team: %s' % ', '.join(obj.team), 'Surveyed Footage: %0.1f' % obj.length, '', ] l.extend([' '.join(['%s: %s' % (k,v) for (k,v) in shot.items()]) for shot in obj.shots]) self.display.SetLabel(str('\n'.join(l))) else: self.display.SetLabel('')
[ "def", "OnSelChanged", "(", "self", ",", "event", ")", ":", "# Get the selected item object", "item", "=", "event", ".", "GetItem", "(", ")", "obj", "=", "self", ".", "leftPanel", ".", "model", ".", "ItemToObject", "(", "item", ")", "if", "isinstance", "("...
Method called when selected item is changed
[ "Method", "called", "when", "selected", "item", "is", "changed" ]
python
train
odlgroup/odl
odl/contrib/torch/operator.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/torch/operator.py#L302-L394
def forward(self, x): """Compute forward-pass of this module on ``x``. Parameters ---------- x : `torch.autograd.variable.Variable` Input of this layer. The contained tensor must have shape ``extra_shape + operator.domain.shape``, and ``len(extra_shape)`` must be at least 1 (batch axis). Returns ------- out : `torch.autograd.variable.Variable` The computed output. Its tensor will have shape ``extra_shape + operator.range.shape``, where ``extra_shape`` are the extra axes of ``x``. Examples -------- Evaluating on a 2D tensor, where the operator expects a 1D input, i.e., with extra batch axis only: >>> matrix = np.array([[1, 0, 0], ... [0, 1, 1]], dtype='float32') >>> odl_op = odl.MatrixOperator(matrix) >>> odl_op.domain.shape (3,) >>> odl_op.range.shape (2,) >>> op_mod = OperatorAsModule(odl_op) >>> t = torch.ones(3) >>> x = autograd.Variable(t[None, :]) # "fake" batch axis >>> op_mod(x) Variable containing: 1 2 [torch.FloatTensor of size 1x2] >>> t = torch.ones(3) >>> x_tensor = torch.stack([0 * t, 1 * t]) >>> x = autograd.Variable(x_tensor) # batch of 2 inputs >>> op_mod(x) Variable containing: 0 0 1 2 [torch.FloatTensor of size 2x2] An arbitrary number of axes is supported: >>> x = autograd.Variable(t[None, None, :]) # "fake" batch and channel >>> op_mod(x) Variable containing: (0 ,.,.) = 1 2 [torch.FloatTensor of size 1x1x2] >>> x_tensor = torch.stack([torch.stack([0 * t, 1 * t]), ... torch.stack([2 * t, 3 * t]), ... torch.stack([4 * t, 5 * t])]) >>> x = autograd.Variable(x_tensor) # batch of 3x2 inputs >>> op_mod(x) Variable containing: (0 ,.,.) = 0 0 1 2 <BLANKLINE> (1 ,.,.) = 2 4 3 6 <BLANKLINE> (2 ,.,.) = 4 8 5 10 [torch.FloatTensor of size 3x2x2] """ in_shape = x.data.shape op_in_shape = self.op_func.operator.domain.shape op_out_shape = self.op_func.operator.range.shape extra_shape = in_shape[:-len(op_in_shape)] if in_shape[-len(op_in_shape):] != op_in_shape or not extra_shape: shp_str = str(op_in_shape).strip('()') raise ValueError('expected input of shape (N, *, {}), got input ' 'with shape {}'.format(shp_str, in_shape)) # Flatten extra axes, then do one entry at a time newshape = (int(np.prod(extra_shape)),) + op_in_shape x_flat_xtra = x.reshape(*newshape) results = [] for i in range(x_flat_xtra.data.shape[0]): results.append(self.op_func(x_flat_xtra[i])) # Reshape the resulting stack to the expected output shape stack_flat_xtra = torch.stack(results) return stack_flat_xtra.view(extra_shape + op_out_shape)
[ "def", "forward", "(", "self", ",", "x", ")", ":", "in_shape", "=", "x", ".", "data", ".", "shape", "op_in_shape", "=", "self", ".", "op_func", ".", "operator", ".", "domain", ".", "shape", "op_out_shape", "=", "self", ".", "op_func", ".", "operator", ...
Compute forward-pass of this module on ``x``. Parameters ---------- x : `torch.autograd.variable.Variable` Input of this layer. The contained tensor must have shape ``extra_shape + operator.domain.shape``, and ``len(extra_shape)`` must be at least 1 (batch axis). Returns ------- out : `torch.autograd.variable.Variable` The computed output. Its tensor will have shape ``extra_shape + operator.range.shape``, where ``extra_shape`` are the extra axes of ``x``. Examples -------- Evaluating on a 2D tensor, where the operator expects a 1D input, i.e., with extra batch axis only: >>> matrix = np.array([[1, 0, 0], ... [0, 1, 1]], dtype='float32') >>> odl_op = odl.MatrixOperator(matrix) >>> odl_op.domain.shape (3,) >>> odl_op.range.shape (2,) >>> op_mod = OperatorAsModule(odl_op) >>> t = torch.ones(3) >>> x = autograd.Variable(t[None, :]) # "fake" batch axis >>> op_mod(x) Variable containing: 1 2 [torch.FloatTensor of size 1x2] >>> t = torch.ones(3) >>> x_tensor = torch.stack([0 * t, 1 * t]) >>> x = autograd.Variable(x_tensor) # batch of 2 inputs >>> op_mod(x) Variable containing: 0 0 1 2 [torch.FloatTensor of size 2x2] An arbitrary number of axes is supported: >>> x = autograd.Variable(t[None, None, :]) # "fake" batch and channel >>> op_mod(x) Variable containing: (0 ,.,.) = 1 2 [torch.FloatTensor of size 1x1x2] >>> x_tensor = torch.stack([torch.stack([0 * t, 1 * t]), ... torch.stack([2 * t, 3 * t]), ... torch.stack([4 * t, 5 * t])]) >>> x = autograd.Variable(x_tensor) # batch of 3x2 inputs >>> op_mod(x) Variable containing: (0 ,.,.) = 0 0 1 2 <BLANKLINE> (1 ,.,.) = 2 4 3 6 <BLANKLINE> (2 ,.,.) = 4 8 5 10 [torch.FloatTensor of size 3x2x2]
[ "Compute", "forward", "-", "pass", "of", "this", "module", "on", "x", "." ]
python
train
gabstopper/smc-python
smc/examples/ip_lists.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/examples/ip_lists.py#L85-L97
def download_as_zip(name, filename): """ Download IPList with zip compression. Recommended for IPLists of larger sizes. This is the default format for downloading IPLists. :param str name: name of IPList :param str filename: name of filename for IPList """ location = list(IPList.objects.filter(name)) if location: iplist = location[0] return iplist.download(filename=filename)
[ "def", "download_as_zip", "(", "name", ",", "filename", ")", ":", "location", "=", "list", "(", "IPList", ".", "objects", ".", "filter", "(", "name", ")", ")", "if", "location", ":", "iplist", "=", "location", "[", "0", "]", "return", "iplist", ".", ...
Download IPList with zip compression. Recommended for IPLists of larger sizes. This is the default format for downloading IPLists. :param str name: name of IPList :param str filename: name of filename for IPList
[ "Download", "IPList", "with", "zip", "compression", ".", "Recommended", "for", "IPLists", "of", "larger", "sizes", ".", "This", "is", "the", "default", "format", "for", "downloading", "IPLists", "." ]
python
train
gem/oq-engine
openquake/server/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L658-L703
def extract(request, calc_id, what): """ Wrapper over the `oq extract` command. If `setting.LOCKDOWN` is true only calculations owned by the current user can be retrieved. """ job = logs.dbcmd('get_job', int(calc_id)) if job is None: return HttpResponseNotFound() if not utils.user_has_permission(request, job.user_name): return HttpResponseForbidden() try: # read the data and save them on a temporary .npz file with datastore.read(job.ds_calc_dir + '.hdf5') as ds: fd, fname = tempfile.mkstemp( prefix=what.replace('/', '-'), suffix='.npz') os.close(fd) n = len(request.path_info) query_string = unquote_plus(request.get_full_path()[n:]) aw = _extract(ds, what + query_string) a = {} for key, val in vars(aw).items(): key = str(key) # can be a numpy.bytes_ if isinstance(val, str): # without this oq extract would fail a[key] = numpy.array(val.encode('utf-8')) elif isinstance(val, dict): # this is hack: we are losing the values a[key] = list(val) else: a[key] = val numpy.savez_compressed(fname, **a) except Exception as exc: tb = ''.join(traceback.format_tb(exc.__traceback__)) return HttpResponse( content='%s: %s\n%s' % (exc.__class__.__name__, exc, tb), content_type='text/plain', status=500) # stream the data back stream = FileWrapper(open(fname, 'rb')) stream.close = lambda: (FileWrapper.close(stream), os.remove(fname)) response = FileResponse(stream, content_type='application/octet-stream') response['Content-Disposition'] = ( 'attachment; filename=%s' % os.path.basename(fname)) response['Content-Length'] = str(os.path.getsize(fname)) return response
[ "def", "extract", "(", "request", ",", "calc_id", ",", "what", ")", ":", "job", "=", "logs", ".", "dbcmd", "(", "'get_job'", ",", "int", "(", "calc_id", ")", ")", "if", "job", "is", "None", ":", "return", "HttpResponseNotFound", "(", ")", "if", "not"...
Wrapper over the `oq extract` command. If `setting.LOCKDOWN` is true only calculations owned by the current user can be retrieved.
[ "Wrapper", "over", "the", "oq", "extract", "command", ".", "If", "setting", ".", "LOCKDOWN", "is", "true", "only", "calculations", "owned", "by", "the", "current", "user", "can", "be", "retrieved", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/util/__init__.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/__init__.py#L263-L365
def _assert_sframe_equal(sf1, sf2, check_column_names=True, check_column_order=True, check_row_order=True, float_column_delta=None): """ Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns. """ from .. import SFrame as _SFrame if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame): raise TypeError("Cannot function on types other than SFrames.") if not check_column_order and not check_column_names: raise ValueError("Cannot ignore both column order and column names.") sf1.__materialize__() sf2.__materialize__() if sf1.num_columns() != sf2.num_columns(): raise AssertionError("Number of columns mismatched: " + str(sf1.num_columns()) + " != " + str(sf2.num_columns())) s1_names = sf1.column_names() s2_names = sf2.column_names() sorted_s1_names = sorted(s1_names) sorted_s2_names = sorted(s2_names) if check_column_names: if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names): raise AssertionError("SFrame does not have same column names: " + str(sf1.column_names()) + " != " + str(sf2.column_names())) if sf1.num_rows() != sf2.num_rows(): raise AssertionError("Number of rows mismatched: " + str(sf1.num_rows()) + " != " + str(sf2.num_rows())) if not check_row_order and (sf1.num_rows() > 1): sf1 = sf1.sort(s1_names) sf2 = sf2.sort(s2_names) names_to_check = None if check_column_names: names_to_check = list(zip(sorted_s1_names, sorted_s2_names)) else: names_to_check = list(zip(s1_names, s2_names)) for i in names_to_check: col1 = sf1[i[0]] col2 = sf2[i[1]] if col1.dtype != col2.dtype: raise AssertionError("Columns " + str(i) + " types mismatched.") compare_ary = None if col1.dtype == float and float_column_delta is not None: dt = float_column_delta compare_ary = ((col1 > col2-dt) & (col1 < col2+dt)) else: compare_ary = (sf1[i[0]] == sf2[i[1]]) if not compare_ary.all(): count = 0 for j in compare_ary: if not j: first_row = count break count += 1 raise AssertionError("Columns " + str(i) + " are not equal! First differing element is at row " + str(first_row) + ": " + str((col1[first_row],col2[first_row])))
[ "def", "_assert_sframe_equal", "(", "sf1", ",", "sf2", ",", "check_column_names", "=", "True", ",", "check_column_order", "=", "True", ",", "check_row_order", "=", "True", ",", "float_column_delta", "=", "None", ")", ":", "from", ".", ".", "import", "SFrame", ...
Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns.
[ "Assert", "the", "two", "SFrames", "are", "equal", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/task/task_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/task/task_client.py#L126-L162
def get_attachment_content(self, scope_identifier, hub_name, plan_id, timeline_id, record_id, type, name, **kwargs): """GetAttachmentContent. [Preview API] :param str scope_identifier: The project GUID to scope the request :param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server :param str plan_id: :param str timeline_id: :param str record_id: :param str type: :param str name: :rtype: object """ route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='7898f959-9cdf-4096-b29e-7f293031629e', version='5.0-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
[ "def", "get_attachment_content", "(", "self", ",", "scope_identifier", ",", "hub_name", ",", "plan_id", ",", "timeline_id", ",", "record_id", ",", "type", ",", "name", ",", "*", "*", "kwargs", ")", ":", "route_values", "=", "{", "}", "if", "scope_identifier"...
GetAttachmentContent. [Preview API] :param str scope_identifier: The project GUID to scope the request :param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server :param str plan_id: :param str timeline_id: :param str record_id: :param str type: :param str name: :rtype: object
[ "GetAttachmentContent", ".", "[", "Preview", "API", "]", ":", "param", "str", "scope_identifier", ":", "The", "project", "GUID", "to", "scope", "the", "request", ":", "param", "str", "hub_name", ":", "The", "name", "of", "the", "server", "hub", ":", "build...
python
train
suds-community/suds
suds/client.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/client.py#L755-L831
def process_reply(self, reply, status, description): """ Process a web service operation SOAP reply. Depending on how the ``retxml`` option is set, may return the SOAP reply XML or process it and return the Python object representing the returned value. @param reply: The SOAP reply envelope. @type reply: I{bytes} @param status: The HTTP status code (None indicates httplib.OK). @type status: int|I{None} @param description: Additional status description. @type description: str @return: The invoked web service operation return value. @rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None} """ if status is None: status = httplib.OK debug_message = "Reply HTTP status - %d" % (status,) if status in (httplib.ACCEPTED, httplib.NO_CONTENT): log.debug(debug_message) return #TODO: Consider whether and how to allow plugins to handle error, # httplib.ACCEPTED & httplib.NO_CONTENT replies as well as successful # ones. if status == httplib.OK: log.debug("%s\n%s", debug_message, reply) else: log.debug("%s - %s\n%s", debug_message, description, reply) plugins = PluginContainer(self.options.plugins) ctx = plugins.message.received(reply=reply) reply = ctx.reply # SOAP standard states that SOAP errors must be accompanied by HTTP # status code 500 - internal server error: # # From SOAP 1.1 specification: # In case of a SOAP error while processing the request, the SOAP HTTP # server MUST issue an HTTP 500 "Internal Server Error" response and # include a SOAP message in the response containing a SOAP Fault # element (see section 4.4) indicating the SOAP processing error. # # From WS-I Basic profile: # An INSTANCE MUST use a "500 Internal Server Error" HTTP status code # if the response message is a SOAP Fault. replyroot = None if status in (httplib.OK, httplib.INTERNAL_SERVER_ERROR): replyroot = _parse(reply) plugins.message.parsed(reply=replyroot) fault = self.__get_fault(replyroot) if fault: if status != httplib.INTERNAL_SERVER_ERROR: log.warn("Web service reported a SOAP processing fault " "using an unexpected HTTP status code %d. Reporting " "as an internal server error.", status) if self.options.faults: raise WebFault(fault, replyroot) return httplib.INTERNAL_SERVER_ERROR, fault if status != httplib.OK: if self.options.faults: #TODO: Use a more specific exception class here. raise Exception((status, description)) return status, description if self.options.retxml: return reply result = replyroot and self.method.binding.output.get_reply( self.method, replyroot) ctx = plugins.message.unmarshalled(reply=result) result = ctx.reply if self.options.faults: return result return httplib.OK, result
[ "def", "process_reply", "(", "self", ",", "reply", ",", "status", ",", "description", ")", ":", "if", "status", "is", "None", ":", "status", "=", "httplib", ".", "OK", "debug_message", "=", "\"Reply HTTP status - %d\"", "%", "(", "status", ",", ")", "if", ...
Process a web service operation SOAP reply. Depending on how the ``retxml`` option is set, may return the SOAP reply XML or process it and return the Python object representing the returned value. @param reply: The SOAP reply envelope. @type reply: I{bytes} @param status: The HTTP status code (None indicates httplib.OK). @type status: int|I{None} @param description: Additional status description. @type description: str @return: The invoked web service operation return value. @rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None}
[ "Process", "a", "web", "service", "operation", "SOAP", "reply", "." ]
python
train
michal-stuglik/django-blastplus
blastplus/features/record.py
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/features/record.py#L35-L37
def chop_sequence(sequence, limit_length): """Input sequence is divided on smaller non-overlapping sequences with set length. """ return [sequence[i:i + limit_length] for i in range(0, len(sequence), limit_length)]
[ "def", "chop_sequence", "(", "sequence", ",", "limit_length", ")", ":", "return", "[", "sequence", "[", "i", ":", "i", "+", "limit_length", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "sequence", ")", ",", "limit_length", ")", "]" ]
Input sequence is divided on smaller non-overlapping sequences with set length.
[ "Input", "sequence", "is", "divided", "on", "smaller", "non", "-", "overlapping", "sequences", "with", "set", "length", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/gff.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L1174-L1308
def chain(args): """ %prog chain gffile > chained.gff Fill in parent features by chaining child features and return extent of the child coordinates. """ valid_merge_op = ('sum', 'min', 'max', 'mean', 'collapse') p = OptionParser(chain.__doc__) p.add_option("--key", dest="attrib_key", default=None, help="Attribute to use as `key` for chaining operation") p.add_option("--chain_ftype", default="cDNA_match", help="GFF feature type to use for chaining operation") p.add_option("--parent_ftype", default=None, help="GFF feature type to use for the chained coordinates") p.add_option("--break", dest="break_chain", action="store_true", help="Break long chains which are non-contiguous") p.add_option("--transfer_attrib", dest="attrib_list", help="Attributes to transfer to parent feature; accepts comma" + \ " separated list of attribute names [default: %default]") p.add_option("--transfer_score", dest="score_merge_op", choices=valid_merge_op, help="Transfer value stored in score field to parent feature." + \ " Score is reported based on chosen operation") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args attrib_key = opts.attrib_key attrib_list = opts.attrib_list score_merge_op = opts.score_merge_op break_chain = opts.break_chain chain_ftype = opts.chain_ftype parent_ftype = opts.parent_ftype if opts.parent_ftype else chain_ftype gffdict = {} fw = must_open(opts.outfile, "w") gff = Gff(gffile) if break_chain: ctr, prev_gid = dict(), None for g in gff: if g.type != chain_ftype: print(g, file=fw) continue id = g.accn gid = id if attrib_key: assert attrib_key in g.attributes.keys(), \ "Attribute `{0}` not present in GFF3".format(attrib_key) gid = g.get_attr(attrib_key) curr_gid = gid if break_chain: if prev_gid != curr_gid: if curr_gid not in ctr: ctr[curr_gid] = 0 else: ctr[curr_gid] += 1 gid = "{0}:{1}".format(gid, ctr[curr_gid]) gkey = (g.seqid, gid) if gkey not in gffdict: gffdict[gkey] = { 'seqid': g.seqid, 'source': g.source, 'strand': g.strand, 'type': parent_ftype, 'coords': [], 'children': [], 'score': [], 'attrs': DefaultOrderedDict(set) } gffdict[gkey]['attrs']['ID'].add(gid) if attrib_list: for a in attrib_list.split(","): if a in g.attributes: [gffdict[gkey]['attrs'][a].add(x) for x in g.attributes[a]] del g.attributes[a] if break_chain: _attrib = "Alias" if attrib_list and ("Name" not in attrib_list) else "Name" gffdict[gkey]['attrs'][_attrib].add(curr_gid) gffdict[gkey]['coords'].append((g.start, g.end)) if score_merge_op: if is_number(g.score): gffdict[gkey]['score'].append(float(g.score)) g.score = "." g.attributes["Parent"] = [gid] g.attributes["ID"] = ["{0}-{1}".\ format(gid, len(gffdict[gkey]['children']) + 1)] g.type = valid_gff_parent_child[g.type] g.update_attributes() gffdict[gkey]['children'].append(g) if break_chain: prev_gid = curr_gid for gkey, v in sorted(gffdict.items()): gseqid, key = gkey seqid = v['seqid'] source = v['source'] type = v['type'] strand = v['strand'] start, stop = range_minmax(gffdict[gkey]['coords']) score = "." if score_merge_op: v['score'].sort() if score_merge_op == "sum": score = sum(v['score']) elif score_merge_op == "min": score = min(v['score']) elif score_merge_op == "max": score = max(v['score']) elif score_merge_op == "mean": score = sum(v['score'], 0.0)/len(v['score']) elif score_merge_op == "collapse": score = ",".join((str(x) for x in v['score'])) g = GffLine("\t".join(str(x) for x in [seqid, source, type, start, stop, \ score, strand, ".", None])) g.attributes = v['attrs'] g.update_attributes() print(g, file=fw) for child in gffdict[gkey]['children']: print(child, file=fw) fw.close()
[ "def", "chain", "(", "args", ")", ":", "valid_merge_op", "=", "(", "'sum'", ",", "'min'", ",", "'max'", ",", "'mean'", ",", "'collapse'", ")", "p", "=", "OptionParser", "(", "chain", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--key\"", ",", ...
%prog chain gffile > chained.gff Fill in parent features by chaining child features and return extent of the child coordinates.
[ "%prog", "chain", "gffile", ">", "chained", ".", "gff" ]
python
train
FujiMakoto/IPS-Vagrant
ips_vagrant/common/__init__.py
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/common/__init__.py#L12-L19
def config(): """ Load system configuration @rtype: ConfigParser """ cfg = ConfigParser() cfg.read(os.path.join(os.path.dirname(os.path.realpath(ips_vagrant.__file__)), 'config/ipsv.conf')) return cfg
[ "def", "config", "(", ")", ":", "cfg", "=", "ConfigParser", "(", ")", "cfg", ".", "read", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "ips_vagrant", ".", "__file__", "...
Load system configuration @rtype: ConfigParser
[ "Load", "system", "configuration" ]
python
train
KeplerGO/K2fov
K2fov/fov.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/fov.py#L291-L300
def colRowIsOnSciencePixelList(self, col, row, padding=DEFAULT_PADDING): """similar to colRowIsOnSciencePixelList() but takes lists as input""" out = np.ones(len(col), dtype=bool) col_arr = np.array(col) row_arr = np.array(row) mask = np.bitwise_or(col_arr < 12. - padding, col_arr > 1111 + padding) out[mask] = False mask = np.bitwise_or(row_arr < 20. - padding, row_arr > 1043 + padding) out[mask] = False return out
[ "def", "colRowIsOnSciencePixelList", "(", "self", ",", "col", ",", "row", ",", "padding", "=", "DEFAULT_PADDING", ")", ":", "out", "=", "np", ".", "ones", "(", "len", "(", "col", ")", ",", "dtype", "=", "bool", ")", "col_arr", "=", "np", ".", "array"...
similar to colRowIsOnSciencePixelList() but takes lists as input
[ "similar", "to", "colRowIsOnSciencePixelList", "()", "but", "takes", "lists", "as", "input" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L218-L226
def pad(self, message, k=None): '''Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).''' if not k: k = self.k pad = None if len(message) < k: #pad = "\x00" * (k-len(message)) pad = bytearray(k-len(message)) message = pad + message return [message, pad]
[ "def", "pad", "(", "self", ",", "message", ",", "k", "=", "None", ")", ":", "if", "not", "k", ":", "k", "=", "self", ".", "k", "pad", "=", "None", "if", "len", "(", "message", ")", "<", "k", ":", "#pad = \"\\x00\" * (k-len(message))", "pad", "=", ...
Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).
[ "Automatically", "left", "pad", "with", "null", "bytes", "a", "message", "if", "too", "small", "or", "leave", "unchanged", "if", "not", "necessary", ".", "This", "allows", "to", "keep", "track", "of", "padding", "and", "strip", "the", "null", "bytes", "aft...
python
train
praekeltfoundation/marathon-acme
marathon_acme/vault_store.py
https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/vault_store.py#L20-L42
def sort_pem_objects(pem_objects): """ Given a list of pem objects, sort the objects into the private key, leaf certificate, and list of CA certificates in the trust chain. This function assumes that the list of pem objects will contain exactly one private key and exactly one leaf certificate and that only key and certificate type objects are provided. """ keys, certs, ca_certs = [], [], [] for pem_object in pem_objects: if isinstance(pem_object, pem.Key): keys.append(pem_object) else: # This assumes all pem objects provided are either of type pem.Key # or pem.Certificate. Technically, there are CSR and CRL types, but # we should never be passed those. if _is_ca(pem_object): ca_certs.append(pem_object) else: certs.append(pem_object) [key], [cert] = keys, certs return key, cert, ca_certs
[ "def", "sort_pem_objects", "(", "pem_objects", ")", ":", "keys", ",", "certs", ",", "ca_certs", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "pem_object", "in", "pem_objects", ":", "if", "isinstance", "(", "pem_object", ",", "pem", ".", "Key", ...
Given a list of pem objects, sort the objects into the private key, leaf certificate, and list of CA certificates in the trust chain. This function assumes that the list of pem objects will contain exactly one private key and exactly one leaf certificate and that only key and certificate type objects are provided.
[ "Given", "a", "list", "of", "pem", "objects", "sort", "the", "objects", "into", "the", "private", "key", "leaf", "certificate", "and", "list", "of", "CA", "certificates", "in", "the", "trust", "chain", ".", "This", "function", "assumes", "that", "the", "li...
python
valid
evhub/coconut
coconut/compiler/util.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/util.py#L468-L477
def collapse_indents(indentation): """Removes all openindent-closeindent pairs.""" change_in_level = ind_change(indentation) if change_in_level == 0: indents = "" elif change_in_level < 0: indents = closeindent * (-change_in_level) else: indents = openindent * change_in_level return indentation.replace(openindent, "").replace(closeindent, "") + indents
[ "def", "collapse_indents", "(", "indentation", ")", ":", "change_in_level", "=", "ind_change", "(", "indentation", ")", "if", "change_in_level", "==", "0", ":", "indents", "=", "\"\"", "elif", "change_in_level", "<", "0", ":", "indents", "=", "closeindent", "*...
Removes all openindent-closeindent pairs.
[ "Removes", "all", "openindent", "-", "closeindent", "pairs", "." ]
python
train
snobear/ezmomi
ezmomi/ezmomi.py
https://github.com/snobear/ezmomi/blob/c98e26dc2d32cd5c92134fdcbcb8353540ac0208/ezmomi/ezmomi.py#L176-L450
def clone(self): """ Command Section: clone Clone a VM from a template """ self.config['hostname'] = self.config['hostname'].lower() self.config['mem'] = int(self.config['mem'] * 1024) # convert GB to MB print("Cloning %s to new host %s with %sMB RAM..." % ( self.config['template'], self.config['hostname'], self.config['mem'] )) # initialize a list to hold our network settings ip_settings = list() # Get network settings for each IP for key, ip_string in enumerate(self.config['ips']): # convert ip from string to the 'IPAddress' type ip = IPAddress(ip_string) # determine network this IP is in for network in self.config['networks']: if ip in IPNetwork(network): self.config['networks'][network]['ip'] = ip ipnet = IPNetwork(network) self.config['networks'][network]['subnet_mask'] = str( ipnet.netmask ) ip_settings.append(self.config['networks'][network]) # throw an error if we couldn't find a network for this ip if not any(d['ip'] == ip for d in ip_settings): print("I don't know what network %s is in. You can supply " "settings for this network in config.yml." % ip_string) sys.exit(1) # network to place new VM in self.get_obj([vim.Network], ip_settings[0]['network']) datacenter = self.get_obj([vim.Datacenter], ip_settings[0]['datacenter'] ) # get the folder where VMs are kept for this datacenter if self.config['destination_folder']: destfolder = self.content.searchIndex.FindByInventoryPath( self.config['destination_folder'] ) else: destfolder = datacenter.vmFolder cluster = self.get_obj([vim.ClusterComputeResource], ip_settings[0]['cluster'] ) resource_pool_str = self.config['resource_pool'] # resource_pool setting in config file takes priority over the # default 'Resources' pool if resource_pool_str == 'Resources' \ and ('resource_pool' in ip_settings[key]): resource_pool_str = ip_settings[key]['resource_pool'] resource_pool = self.get_resource_pool(cluster, resource_pool_str) host_system = self.config['host'] if host_system != "": host_system = self.get_obj([vim.HostSystem], self.config['host'] ) if self.debug: self.print_debug( "Destination cluster", cluster ) self.print_debug( "Resource pool", resource_pool ) if resource_pool is None: # use default resource pool of target cluster resource_pool = cluster.resourcePool datastore = None if self.config['datastore']: datastore = self.get_obj( [vim.Datastore], self.config['datastore']) elif 'datastore' in ip_settings[0]: datastore = self.get_obj( [vim.Datastore], ip_settings[0]['datastore']) if datastore is None: print("Error: Unable to find Datastore '%s'" % ip_settings[0]['datastore']) sys.exit(1) if self.config['template_folder']: template_vm = self.get_vm_failfast( self.config['template'], False, 'Template VM', path=self.config['template_folder'] ) else: template_vm = self.get_vm_failfast( self.config['template'], False, 'Template VM' ) # Relocation spec relospec = vim.vm.RelocateSpec() relospec.datastore = datastore if host_system: relospec.host = host_system if resource_pool: relospec.pool = resource_pool # Networking self.config for VM and guest OS devices = [] adaptermaps = [] # add existing NIC devices from template to our list of NICs # to be created try: for device in template_vm.config.hardware.device: if hasattr(device, 'addressType'): # this is a VirtualEthernetCard, so we'll delete it nic = vim.vm.device.VirtualDeviceSpec() nic.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.remove nic.device = device devices.append(nic) except: # not the most graceful handling, but unable to reproduce # user's issues in #57 at this time. pass # create a Network device for each static IP for key, ip in enumerate(ip_settings): # VM device nic = vim.vm.device.VirtualDeviceSpec() # or edit if a device exists nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nic.device = vim.vm.device.VirtualVmxnet3() nic.device.wakeOnLanEnabled = True nic.device.addressType = 'assigned' # 4000 seems to be the value to use for a vmxnet3 device nic.device.key = 4000 nic.device.deviceInfo = vim.Description() nic.device.deviceInfo.label = 'Network Adapter %s' % (key + 1) if 'dvportgroup' in ip_settings[key]: dvpg = ip_settings[key]['dvportgroup'] nic.device.deviceInfo.summary = dvpg pg_obj = self.get_obj([vim.dvs.DistributedVirtualPortgroup], dvpg) # noqa dvs_port_connection = vim.dvs.PortConnection() dvs_port_connection.portgroupKey = pg_obj.key dvs_port_connection.switchUuid = ( pg_obj.config.distributedVirtualSwitch.uuid ) # did it to get pep8 e_nic = vim.vm.device.VirtualEthernetCard nic.device.backing = ( e_nic.DistributedVirtualPortBackingInfo() ) nic.device.backing.port = dvs_port_connection else: nic.device.deviceInfo.summary = ip_settings[key]['network'] nic.device.backing = ( vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() ) nic.device.backing.network = ( self.get_obj([vim.Network], ip_settings[key]['network']) ) nic.device.backing.deviceName = ip_settings[key]['network'] nic.device.backing.useAutoDetect = False nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.startConnected = True nic.device.connectable.allowGuestControl = True devices.append(nic) if 'customspecname' in ip_settings[key]: custom_spec_name = ip_settings[key]['customspecname'] customspec = ( self.get_customization_settings(custom_spec_name) ) guest_map = customspec.nicSettingMap[0] else: customspec = vim.vm.customization.Specification() # guest NIC settings, i.e. 'adapter map' guest_map = vim.vm.customization.AdapterMapping() guest_map.adapter = vim.vm.customization.IPSettings() guest_map.adapter.ip = vim.vm.customization.FixedIp() guest_map.adapter.ip.ipAddress = str(ip_settings[key]['ip']) if 'subnet_mask' in ip_settings[key]: guest_map.adapter.subnetMask = ( str(ip_settings[key]['subnet_mask']) ) if 'gateway' in ip_settings[key]: guest_map.adapter.gateway = ip_settings[key]['gateway'] if self.config['domain']: guest_map.adapter.dnsDomain = self.config['domain'] adaptermaps.append(guest_map) # DNS settings if 'dns_servers' in self.config: globalip = vim.vm.customization.GlobalIPSettings() globalip.dnsServerList = self.config['dns_servers'] globalip.dnsSuffixList = self.config['domain'] customspec.globalIPSettings = globalip # Hostname settings ident = vim.vm.customization.LinuxPrep() ident.domain = self.config['domain'] ident.hostName = vim.vm.customization.FixedName() ident.hostName.name = self.config['hostname'] customspec.nicSettingMap = adaptermaps customspec.identity = ident # VM config spec vmconf = vim.vm.ConfigSpec() vmconf.numCPUs = self.config['cpus'] vmconf.memoryMB = self.config['mem'] vmconf.cpuHotAddEnabled = True vmconf.memoryHotAddEnabled = True vmconf.deviceChange = devices # Clone spec clonespec = vim.vm.CloneSpec() clonespec.location = relospec clonespec.config = vmconf clonespec.customization = customspec clonespec.powerOn = True clonespec.template = False self.addDisks(template_vm, clonespec) if self.debug: self.print_debug("CloneSpec", clonespec) # fire the clone task tasks = [template_vm.Clone(folder=destfolder, name=self.config['hostname'], spec=clonespec )] result = self.WaitForTasks(tasks) if self.config['post_clone_cmd']: try: # helper env variables os.environ['EZMOMI_CLONE_HOSTNAME'] = self.config['hostname'] print("Running --post-clone-cmd %s" % self.config['post_clone_cmd']) os.system(self.config['post_clone_cmd']) except Exception as e: print("Error running post-clone command. Exception: %s" % e) pass # send notification email if self.config['mail']: self.send_email()
[ "def", "clone", "(", "self", ")", ":", "self", ".", "config", "[", "'hostname'", "]", "=", "self", ".", "config", "[", "'hostname'", "]", ".", "lower", "(", ")", "self", ".", "config", "[", "'mem'", "]", "=", "int", "(", "self", ".", "config", "[...
Command Section: clone Clone a VM from a template
[ "Command", "Section", ":", "clone", "Clone", "a", "VM", "from", "a", "template" ]
python
train
programa-stic/barf-project
barf/barf.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/barf.py#L174-L200
def _setup_analysis_modules(self): """Set up analysis modules. """ # Basic block. self.bb_builder = CFGRecoverer(RecursiveDescent(self.disassembler, self.text_section, self.ir_translator, self.arch_info)) # Code analyzer. self.code_analyzer = None if self.smt_translator: self.code_analyzer = CodeAnalyzer(self.smt_solver, self.smt_translator, self.arch_info) # Gadgets classifier. self.gadget_classifier = GadgetClassifier(self.ir_emulator, self.arch_info) # Gadgets finder. self.gadget_finder = GadgetFinder(self.disassembler, self.text_section, self.ir_translator, self.binary.architecture, self.binary.architecture_mode) # Gadget verifier. self.gadget_verifier = None if self.code_analyzer: self.gadget_verifier = GadgetVerifier(self.code_analyzer, self.arch_info) self.emulator = Emulator(self.arch_info, self.ir_emulator, self.ir_translator, self.disassembler)
[ "def", "_setup_analysis_modules", "(", "self", ")", ":", "# Basic block.", "self", ".", "bb_builder", "=", "CFGRecoverer", "(", "RecursiveDescent", "(", "self", ".", "disassembler", ",", "self", ".", "text_section", ",", "self", ".", "ir_translator", ",", "self"...
Set up analysis modules.
[ "Set", "up", "analysis", "modules", "." ]
python
train
jayclassless/tidypy
src/tidypy/collector.py
https://github.com/jayclassless/tidypy/blob/3c3497ca377fbbe937103b77b02b326c860c748f/src/tidypy/collector.py#L46-L58
def add_issues(self, issues): """ Adds an issue to the collection. :param issues: the issue(s) to add :type issues: tidypy.Issue or list(tidypy.Issue) """ if not isinstance(issues, (list, tuple)): issues = [issues] with self._lock: self._all_issues.extend(issues) self._cleaned_issues = None
[ "def", "add_issues", "(", "self", ",", "issues", ")", ":", "if", "not", "isinstance", "(", "issues", ",", "(", "list", ",", "tuple", ")", ")", ":", "issues", "=", "[", "issues", "]", "with", "self", ".", "_lock", ":", "self", ".", "_all_issues", "....
Adds an issue to the collection. :param issues: the issue(s) to add :type issues: tidypy.Issue or list(tidypy.Issue)
[ "Adds", "an", "issue", "to", "the", "collection", "." ]
python
valid
ajdavis/mongo-mockup-db
mockupdb/__init__.py
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L473-L481
def hangup(self): """Close the connection. Returns True so it is suitable as an `~MockupDB.autoresponds` handler. """ if self._server: self._server._log('\t%d\thangup' % self.client_port) self._client.shutdown(socket.SHUT_RDWR) return True
[ "def", "hangup", "(", "self", ")", ":", "if", "self", ".", "_server", ":", "self", ".", "_server", ".", "_log", "(", "'\\t%d\\thangup'", "%", "self", ".", "client_port", ")", "self", ".", "_client", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")"...
Close the connection. Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
[ "Close", "the", "connection", "." ]
python
train
tkf/python-epc
epc/utils.py
https://github.com/tkf/python-epc/blob/f3673ae5c35f20a0f71546ab34c28e3dde3595c1/epc/utils.py#L121-L131
def callwith(context_manager): """ A decorator to wrap execution of function with a context manager. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwds): with context_manager: return func(*args, **kwds) return wrapper return decorator
[ "def", "callwith", "(", "context_manager", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "with", "context_manager", ":", "re...
A decorator to wrap execution of function with a context manager.
[ "A", "decorator", "to", "wrap", "execution", "of", "function", "with", "a", "context", "manager", "." ]
python
train
lepture/flask-oauthlib
flask_oauthlib/provider/oauth2.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth2.py#L663-L691
def client_authentication_required(self, request, *args, **kwargs): """Determine if client authentication is required for current request. According to the rfc6749, client authentication is required in the following cases: Resource Owner Password Credentials Grant: see `Section 4.3.2`_. Authorization Code Grant: see `Section 4.1.3`_. Refresh Token Grant: see `Section 6`_. .. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2 .. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3 .. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6 """ def is_confidential(client): if hasattr(client, 'is_confidential'): return client.is_confidential client_type = getattr(client, 'client_type', None) if client_type: return client_type == 'confidential' return True grant_types = ('password', 'authorization_code', 'refresh_token') client_id, _ = self._get_client_creds_from_request(request) if client_id and request.grant_type in grant_types: client = self._clientgetter(client_id) if client: return is_confidential(client) return False
[ "def", "client_authentication_required", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "is_confidential", "(", "client", ")", ":", "if", "hasattr", "(", "client", ",", "'is_confidential'", ")", ":", "return", "cli...
Determine if client authentication is required for current request. According to the rfc6749, client authentication is required in the following cases: Resource Owner Password Credentials Grant: see `Section 4.3.2`_. Authorization Code Grant: see `Section 4.1.3`_. Refresh Token Grant: see `Section 6`_. .. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2 .. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3 .. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
[ "Determine", "if", "client", "authentication", "is", "required", "for", "current", "request", "." ]
python
test
aboSamoor/polyglot
polyglot/mapping/embeddings.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/mapping/embeddings.py#L295-L310
def load(fname): """Load an embedding dump generated by `save`""" content = _open(fname).read() if PY2: state = pickle.loads(content) else: state = pickle.loads(content, encoding='latin1') voc, vec = state if len(voc) == 2: words, counts = voc word_count = dict(zip(words, counts)) vocab = CountedVocabulary(word_count=word_count) else: vocab = OrderedVocabulary(voc) return Embedding(vocabulary=vocab, vectors=vec)
[ "def", "load", "(", "fname", ")", ":", "content", "=", "_open", "(", "fname", ")", ".", "read", "(", ")", "if", "PY2", ":", "state", "=", "pickle", ".", "loads", "(", "content", ")", "else", ":", "state", "=", "pickle", ".", "loads", "(", "conten...
Load an embedding dump generated by `save`
[ "Load", "an", "embedding", "dump", "generated", "by", "save" ]
python
train
quantumlib/Cirq
dev_tools/shell_tools.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/dev_tools/shell_tools.py#L199-L251
def run_shell(cmd: str, out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout, err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr, raise_on_fail: bool = True, log_run_to_stderr: bool = True, **kwargs ) -> CommandOutput: """Invokes a shell command and waits for it to finish. Args: cmd: The command line string to execute, e.g. "echo dog | cat > file". out: Where to write the process' stdout. Defaults to sys.stdout. Can be anything accepted by print's 'file' parameter, or None if the output should be dropped, or a TeeCapture instance. If a TeeCapture instance is given, the first element of the returned tuple will be the captured output. err: Where to write the process' stderr. Defaults to sys.stderr. Can be anything accepted by print's 'file' parameter, or None if the output should be dropped, or a TeeCapture instance. If a TeeCapture instance is given, the second element of the returned tuple will be the captured error output. raise_on_fail: If the process returns a non-zero error code and this flag is set, a CalledProcessError will be raised. Otherwise the return code is the third element of the returned tuple. log_run_to_stderr: Determines whether the fact that this shell command was executed is logged to sys.stderr or not. **kwargs: Extra arguments for asyncio.create_subprocess_shell, such as a cwd (current working directory) argument. Returns: A (captured output, captured error output, return code) triplet. The captured outputs will be None if the out or err parameters were not set to an instance of TeeCapture. Raises: subprocess.CalledProcessError: The process returned a non-zero error code and raise_on_fail was set. """ if log_run_to_stderr: print('shell:', cmd, file=sys.stderr) result = asyncio.get_event_loop().run_until_complete( _async_wait_for_process( asyncio.create_subprocess_shell( cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, **kwargs), out, err)) if raise_on_fail and result[2]: raise subprocess.CalledProcessError(result[2], cmd) return result
[ "def", "run_shell", "(", "cmd", ":", "str", ",", "out", ":", "Optional", "[", "Union", "[", "TeeCapture", ",", "IO", "[", "str", "]", "]", "]", "=", "sys", ".", "stdout", ",", "err", ":", "Optional", "[", "Union", "[", "TeeCapture", ",", "IO", "[...
Invokes a shell command and waits for it to finish. Args: cmd: The command line string to execute, e.g. "echo dog | cat > file". out: Where to write the process' stdout. Defaults to sys.stdout. Can be anything accepted by print's 'file' parameter, or None if the output should be dropped, or a TeeCapture instance. If a TeeCapture instance is given, the first element of the returned tuple will be the captured output. err: Where to write the process' stderr. Defaults to sys.stderr. Can be anything accepted by print's 'file' parameter, or None if the output should be dropped, or a TeeCapture instance. If a TeeCapture instance is given, the second element of the returned tuple will be the captured error output. raise_on_fail: If the process returns a non-zero error code and this flag is set, a CalledProcessError will be raised. Otherwise the return code is the third element of the returned tuple. log_run_to_stderr: Determines whether the fact that this shell command was executed is logged to sys.stderr or not. **kwargs: Extra arguments for asyncio.create_subprocess_shell, such as a cwd (current working directory) argument. Returns: A (captured output, captured error output, return code) triplet. The captured outputs will be None if the out or err parameters were not set to an instance of TeeCapture. Raises: subprocess.CalledProcessError: The process returned a non-zero error code and raise_on_fail was set.
[ "Invokes", "a", "shell", "command", "and", "waits", "for", "it", "to", "finish", "." ]
python
train
FreshXOpenSource/wallaby-frontend-qt
wallaby/frontends/qt/reactor/qt4reactor.py
https://github.com/FreshXOpenSource/wallaby-frontend-qt/blob/eee70d0ec4ce34827f62a1654e28dbff8a8afb1a/wallaby/frontends/qt/reactor/qt4reactor.py#L164-L168
def addWriter(self, writer): """ Add a FileDescriptor for notification of data available to write. """ self._add(writer, self._writes, QtCore.QSocketNotifier.Write)
[ "def", "addWriter", "(", "self", ",", "writer", ")", ":", "self", ".", "_add", "(", "writer", ",", "self", ".", "_writes", ",", "QtCore", ".", "QSocketNotifier", ".", "Write", ")" ]
Add a FileDescriptor for notification of data available to write.
[ "Add", "a", "FileDescriptor", "for", "notification", "of", "data", "available", "to", "write", "." ]
python
train
not-na/peng3d
peng3d/actor/__init__.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/actor/__init__.py#L113-L127
def setAnimation(self,animation,transition=None,force=False): """ Sets the animation the model of this actor should show. ``animation`` is the name of the animation to switch to. ``transition`` can be used to override the transition between the animations. ``force`` can be used to force reset the animation even if it is already running. If there is no model set for this actor, a :py:exc:`RuntimeError` will be raised. """ if self.model is None: raise RuntimeError("Can only set animation if a model is set") self.model.setAnimation(self,animation,transition,force)
[ "def", "setAnimation", "(", "self", ",", "animation", ",", "transition", "=", "None", ",", "force", "=", "False", ")", ":", "if", "self", ".", "model", "is", "None", ":", "raise", "RuntimeError", "(", "\"Can only set animation if a model is set\"", ")", "self"...
Sets the animation the model of this actor should show. ``animation`` is the name of the animation to switch to. ``transition`` can be used to override the transition between the animations. ``force`` can be used to force reset the animation even if it is already running. If there is no model set for this actor, a :py:exc:`RuntimeError` will be raised.
[ "Sets", "the", "animation", "the", "model", "of", "this", "actor", "should", "show", ".", "animation", "is", "the", "name", "of", "the", "animation", "to", "switch", "to", ".", "transition", "can", "be", "used", "to", "override", "the", "transition", "betw...
python
test
persephone-tools/persephone
persephone/preprocess/elan.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L43-L59
def initialize_media_descriptor(self) -> None: """ Returns the media descriptor for the first media descriptor where the file can be found. """ for md in self.media_descriptors: media_path = self.get_media_path(md) if media_path.is_file(): self.media_descriptor = md return raise FileNotFoundError( """Cannot find media file corresponding to {}. Tried looking for the following files: {}. """.format(self.eaf_path, [self.get_media_path(md) for md in self.media_descriptors]))
[ "def", "initialize_media_descriptor", "(", "self", ")", "->", "None", ":", "for", "md", "in", "self", ".", "media_descriptors", ":", "media_path", "=", "self", ".", "get_media_path", "(", "md", ")", "if", "media_path", ".", "is_file", "(", ")", ":", "self"...
Returns the media descriptor for the first media descriptor where the file can be found.
[ "Returns", "the", "media", "descriptor", "for", "the", "first", "media", "descriptor", "where", "the", "file", "can", "be", "found", "." ]
python
train
cuihantao/andes
andes/models/line.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/line.py#L521-L537
def get_flow_by_idx(self, idx, bus): """Return seriesflow based on the external idx on the `bus` side""" P, Q = [], [] if type(idx) is not list: idx = [idx] if type(bus) is not list: bus = [bus] for line_idx, bus_idx in zip(idx, bus): line_int = self.uid[line_idx] if bus_idx == self.bus1[line_int]: P.append(self.P1[line_int]) Q.append(self.Q1[line_int]) elif bus_idx == self.bus2[line_int]: P.append(self.P2[line_int]) Q.append(self.Q2[line_int]) return matrix(P), matrix(Q)
[ "def", "get_flow_by_idx", "(", "self", ",", "idx", ",", "bus", ")", ":", "P", ",", "Q", "=", "[", "]", ",", "[", "]", "if", "type", "(", "idx", ")", "is", "not", "list", ":", "idx", "=", "[", "idx", "]", "if", "type", "(", "bus", ")", "is",...
Return seriesflow based on the external idx on the `bus` side
[ "Return", "seriesflow", "based", "on", "the", "external", "idx", "on", "the", "bus", "side" ]
python
train
GetmeUK/MongoFrames
mongoframes/factory/__init__.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/factory/__init__.py#L39-L50
def assemble(self, blueprint, quota): """Assemble a quota of documents""" # Reset the blueprint blueprint.reset() # Assemble the documents documents = [] for i in range(0, int(quota)): documents.append(blueprint.assemble()) return documents
[ "def", "assemble", "(", "self", ",", "blueprint", ",", "quota", ")", ":", "# Reset the blueprint", "blueprint", ".", "reset", "(", ")", "# Assemble the documents", "documents", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "int", "(", "quota", ...
Assemble a quota of documents
[ "Assemble", "a", "quota", "of", "documents" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/actionreport.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/actionreport.py#L96-L110
def create_action_model(actioncollection): """Create and return a new model for the given actioncollection :param actioncollection: the action collection that should get a model :type actioncollection: :class:`jukeboxcore.action.ActionCollection` :returns: the created model :rtype: :class:`TreeModel` :raises: None """ rootdata = ListItemData(["Name", "Description", "Status", "Message", "Traceback"]) root = TreeItem(rootdata) for au in actioncollection.actions: adata = ActionItemData(au) TreeItem(adata, parent=root) return TreeModel(root)
[ "def", "create_action_model", "(", "actioncollection", ")", ":", "rootdata", "=", "ListItemData", "(", "[", "\"Name\"", ",", "\"Description\"", ",", "\"Status\"", ",", "\"Message\"", ",", "\"Traceback\"", "]", ")", "root", "=", "TreeItem", "(", "rootdata", ")", ...
Create and return a new model for the given actioncollection :param actioncollection: the action collection that should get a model :type actioncollection: :class:`jukeboxcore.action.ActionCollection` :returns: the created model :rtype: :class:`TreeModel` :raises: None
[ "Create", "and", "return", "a", "new", "model", "for", "the", "given", "actioncollection" ]
python
train
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L603-L613
def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf
[ "def", "AddConstant", "(", "self", ",", "other", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "v1", ",", "p1", "in", "self", ".", "Items", "(", ")", ":", "pmf", ".", "Set", "(", "v1", "+", "other", ",", "p1", ")", "return", "pmf" ]
Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf
[ "Computes", "the", "Pmf", "of", "the", "sum", "a", "constant", "and", "values", "from", "self", "." ]
python
train