repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
cirruscluster/cirruscluster
cirruscluster/core.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/core.py#L618-L634
def __WaitForInstance(instance, desired_state): """ Blocks until instance is in desired_state. """ print 'Waiting for instance %s to change to %s' % (instance.id, desired_state) while True: try: instance.update() state = instance.state sys.stdout.write('.') sys.stdout.flush() if state == desired_state: break except boto_exception.EC2ResponseError as e: logging.info(e) #except boto_exception.ResponseError as e: # This is an alias # logging.info(e) time.sleep(5) return
[ "def", "__WaitForInstance", "(", "instance", ",", "desired_state", ")", ":", "print", "'Waiting for instance %s to change to %s'", "%", "(", "instance", ".", "id", ",", "desired_state", ")", "while", "True", ":", "try", ":", "instance", ".", "update", "(", ")", ...
Blocks until instance is in desired_state.
[ "Blocks", "until", "instance", "is", "in", "desired_state", "." ]
python
train
jleclanche/fireplace
fireplace/player.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/player.py#L168-L175
def get_spell_damage(self, amount: int) -> int: """ Returns the amount of damage \a amount will do, taking SPELLPOWER and SPELLPOWER_DOUBLE into account. """ amount += self.spellpower amount <<= self.controller.spellpower_double return amount
[ "def", "get_spell_damage", "(", "self", ",", "amount", ":", "int", ")", "->", "int", ":", "amount", "+=", "self", ".", "spellpower", "amount", "<<=", "self", ".", "controller", ".", "spellpower_double", "return", "amount" ]
Returns the amount of damage \a amount will do, taking SPELLPOWER and SPELLPOWER_DOUBLE into account.
[ "Returns", "the", "amount", "of", "damage", "\\", "a", "amount", "will", "do", "taking", "SPELLPOWER", "and", "SPELLPOWER_DOUBLE", "into", "account", "." ]
python
train
frejanordsiek/hdf5storage
hdf5storage/__init__.py
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/__init__.py#L1855-L1964
def savemat(file_name, mdict, appendmat=True, format='7.3', oned_as='row', store_python_metadata=True, action_for_matlab_incompatible='error', marshaller_collection=None, truncate_existing=False, truncate_invalid_matlab=False, **keywords): """ Save a dictionary of python types to a MATLAB MAT file. Saves the data provided in the dictionary `mdict` to a MATLAB MAT file. `format` determines which kind/vesion of file to use. The '7.3' version, which is HDF5 based, is handled by this package and all types that this package can write are supported. Versions 4 and 5 are not HDF5 based, so everything is dispatched to the SciPy package's ``scipy.io.savemat`` function, which this function is modelled after (arguments not specific to this package have the same names, etc.). Parameters ---------- file_name : str or file-like object Name of the MAT file to store in. The '.mat' extension is added on automatically if not present if `appendmat` is set to ``True``. An open file-like object can be passed if the writing is being dispatched to SciPy (`format` < 7.3). mdict : dict The dictionary of variables and their contents to store in the file. appendmat : bool, optional Whether to append the '.mat' extension to `file_name` if it doesn't already end in it or not. format : {'4', '5', '7.3'}, optional The MATLAB mat file format to use. The '7.3' format is handled by this package while the '4' and '5' formats are dispatched to SciPy. oned_as : {'row', 'column'}, optional Whether 1D arrays should be turned into row or column vectors. store_python_metadata : bool, optional Whether or not to store Python type information. Doing so allows most types to be read back perfectly. Only applicable if not dispatching to SciPy (`format` >= 7.3). action_for_matlab_incompatible: str, optional The action to perform writing data that is not MATLAB compatible. The actions are to write the data anyways ('ignore'), don't write the incompatible data ('discard'), or throw a ``TypeNotMatlabCompatibleError`` exception. marshaller_collection : MarshallerCollection, optional Collection of marshallers to disk to use. Only applicable if not dispatching to SciPy (`format` >= 7.3). truncate_existing : bool, optional Whether to truncate the file if it already exists before writing to it. truncate_invalid_matlab : bool, optional Whether to truncate a file if the file doesn't have the proper header (userblock in HDF5 terms) setup for MATLAB metadata to be placed. **keywords : Additional keywords arguments to be passed onto ``scipy.io.savemat`` if dispatching to SciPy (`format` < 7.3). Raises ------ ImportError If `format` < 7.3 and the ``scipy`` module can't be found. NotImplementedError If writing a variable in `mdict` is not supported. exceptions.TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `action_for_matlab_incompatible` is set to ``'error'``. Notes ----- Writing the same data and then reading it back from disk using the HDF5 based version 7.3 format (the functions in this package) or the older format (SciPy functions) can lead to very different results. Each package supports a different set of data types and converts them to and from the same MATLAB types differently. See Also -------- loadmat : Equivelent function to do reading. scipy.io.savemat : SciPy function this one models after and dispatches to. Options writes : Function used to do the actual writing. """ # If format is a number less than 7.3, the call needs to be # dispatched to the scipy version, if it is available, with all the # relevant and extra keywords options provided. if float(format) < 7.3: import scipy.io scipy.io.savemat(file_name, mdict, appendmat=appendmat, format=format, oned_as=oned_as, **keywords) return # Append .mat if it isn't on the end of the file name and we are # supposed to. if appendmat and not file_name.endswith('.mat'): file_name = file_name + '.mat' # Make the options with matlab compatibility forced. options = Options(store_python_metadata=store_python_metadata, \ matlab_compatible=True, oned_as=oned_as, \ action_for_matlab_incompatible=action_for_matlab_incompatible, \ marshaller_collection=marshaller_collection) # Write the variables in the dictionary to file. writes(mdict=mdict, filename=file_name, truncate_existing=truncate_existing, truncate_invalid_matlab=truncate_invalid_matlab, options=options)
[ "def", "savemat", "(", "file_name", ",", "mdict", ",", "appendmat", "=", "True", ",", "format", "=", "'7.3'", ",", "oned_as", "=", "'row'", ",", "store_python_metadata", "=", "True", ",", "action_for_matlab_incompatible", "=", "'error'", ",", "marshaller_collect...
Save a dictionary of python types to a MATLAB MAT file. Saves the data provided in the dictionary `mdict` to a MATLAB MAT file. `format` determines which kind/vesion of file to use. The '7.3' version, which is HDF5 based, is handled by this package and all types that this package can write are supported. Versions 4 and 5 are not HDF5 based, so everything is dispatched to the SciPy package's ``scipy.io.savemat`` function, which this function is modelled after (arguments not specific to this package have the same names, etc.). Parameters ---------- file_name : str or file-like object Name of the MAT file to store in. The '.mat' extension is added on automatically if not present if `appendmat` is set to ``True``. An open file-like object can be passed if the writing is being dispatched to SciPy (`format` < 7.3). mdict : dict The dictionary of variables and their contents to store in the file. appendmat : bool, optional Whether to append the '.mat' extension to `file_name` if it doesn't already end in it or not. format : {'4', '5', '7.3'}, optional The MATLAB mat file format to use. The '7.3' format is handled by this package while the '4' and '5' formats are dispatched to SciPy. oned_as : {'row', 'column'}, optional Whether 1D arrays should be turned into row or column vectors. store_python_metadata : bool, optional Whether or not to store Python type information. Doing so allows most types to be read back perfectly. Only applicable if not dispatching to SciPy (`format` >= 7.3). action_for_matlab_incompatible: str, optional The action to perform writing data that is not MATLAB compatible. The actions are to write the data anyways ('ignore'), don't write the incompatible data ('discard'), or throw a ``TypeNotMatlabCompatibleError`` exception. marshaller_collection : MarshallerCollection, optional Collection of marshallers to disk to use. Only applicable if not dispatching to SciPy (`format` >= 7.3). truncate_existing : bool, optional Whether to truncate the file if it already exists before writing to it. truncate_invalid_matlab : bool, optional Whether to truncate a file if the file doesn't have the proper header (userblock in HDF5 terms) setup for MATLAB metadata to be placed. **keywords : Additional keywords arguments to be passed onto ``scipy.io.savemat`` if dispatching to SciPy (`format` < 7.3). Raises ------ ImportError If `format` < 7.3 and the ``scipy`` module can't be found. NotImplementedError If writing a variable in `mdict` is not supported. exceptions.TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `action_for_matlab_incompatible` is set to ``'error'``. Notes ----- Writing the same data and then reading it back from disk using the HDF5 based version 7.3 format (the functions in this package) or the older format (SciPy functions) can lead to very different results. Each package supports a different set of data types and converts them to and from the same MATLAB types differently. See Also -------- loadmat : Equivelent function to do reading. scipy.io.savemat : SciPy function this one models after and dispatches to. Options writes : Function used to do the actual writing.
[ "Save", "a", "dictionary", "of", "python", "types", "to", "a", "MATLAB", "MAT", "file", "." ]
python
train
sepandhaghighi/pycm
pycm/pycm_obj.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_obj.py#L129-L147
def stat(self, overall_param=None, class_param=None, class_name=None): """ Print statistical measures table. :param overall_param : overall parameters list for print, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for print, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :return: None """ classes = class_filter(self.classes, class_name) print( stat_print( classes, self.class_stat, self.overall_stat, self.digit, overall_param, class_param))
[ "def", "stat", "(", "self", ",", "overall_param", "=", "None", ",", "class_param", "=", "None", ",", "class_name", "=", "None", ")", ":", "classes", "=", "class_filter", "(", "self", ".", "classes", ",", "class_name", ")", "print", "(", "stat_print", "("...
Print statistical measures table. :param overall_param : overall parameters list for print, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for print, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :return: None
[ "Print", "statistical", "measures", "table", "." ]
python
train
lbusoni/plico
plico/utils/zernike_generator.py
https://github.com/lbusoni/plico/blob/08a29da8f06e920470516838878a51ac83bab847/plico/utils/zernike_generator.py#L126-L142
def getZernike(self, index): """getZernike Retrieve a map representing the index-th Zernike polynomial Args: index (int): The index of Zernike map to be generated, following Noll 1976 ordering. Returns: np.array: A map representing the index-th Zernike polynomial """ if index not in list(self._dictCache.keys()): self._dictCache[index]= self._polar(index, self._rhoMap, self._thetaMap) return self._dictCache[index]
[ "def", "getZernike", "(", "self", ",", "index", ")", ":", "if", "index", "not", "in", "list", "(", "self", ".", "_dictCache", ".", "keys", "(", ")", ")", ":", "self", ".", "_dictCache", "[", "index", "]", "=", "self", ".", "_polar", "(", "index", ...
getZernike Retrieve a map representing the index-th Zernike polynomial Args: index (int): The index of Zernike map to be generated, following Noll 1976 ordering. Returns: np.array: A map representing the index-th Zernike polynomial
[ "getZernike" ]
python
train
saltstack/salt
salt/modules/restartcheck.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/restartcheck.py#L360-L384
def _file_changed_nilrt(full_filepath): ''' Detect whether a file changed in an NILinuxRT system using md5sum and timestamp files from a state directory. Returns: - False if md5sum/timestamp state files don't exist - True/False depending if ``base_filename`` got modified/touched ''' rs_state_dir = "/var/lib/salt/restartcheck_state" base_filename = os.path.basename(full_filepath) timestamp_file = os.path.join(rs_state_dir, '{0}.timestamp'.format(base_filename)) md5sum_file = os.path.join(rs_state_dir, '{0}.md5sum'.format(base_filename)) if not os.path.exists(timestamp_file) or not os.path.exists(md5sum_file): return True prev_timestamp = __salt__['file.read'](timestamp_file).rstrip() # Need timestamp in seconds so floor it using int() cur_timestamp = str(int(os.path.getmtime(full_filepath))) if prev_timestamp != cur_timestamp: return True return bool(__salt__['cmd.retcode']('md5sum -cs {0}'.format(md5sum_file), output_loglevel="quiet"))
[ "def", "_file_changed_nilrt", "(", "full_filepath", ")", ":", "rs_state_dir", "=", "\"/var/lib/salt/restartcheck_state\"", "base_filename", "=", "os", ".", "path", ".", "basename", "(", "full_filepath", ")", "timestamp_file", "=", "os", ".", "path", ".", "join", "...
Detect whether a file changed in an NILinuxRT system using md5sum and timestamp files from a state directory. Returns: - False if md5sum/timestamp state files don't exist - True/False depending if ``base_filename`` got modified/touched
[ "Detect", "whether", "a", "file", "changed", "in", "an", "NILinuxRT", "system", "using", "md5sum", "and", "timestamp", "files", "from", "a", "state", "directory", "." ]
python
train
fastmonkeys/stellar
stellar/command.py
https://github.com/fastmonkeys/stellar/blob/79f0353563c35fa6ae46a2f00886ab1dd31c4492/stellar/command.py#L171-L182
def replace(name): """Replaces a snapshot""" app = get_app() snapshot = app.get_snapshot(name) if not snapshot: click.echo("Couldn't find snapshot %s" % name) sys.exit(1) app.remove_snapshot(snapshot) app.create_snapshot(name) click.echo("Replaced snapshot %s" % name)
[ "def", "replace", "(", "name", ")", ":", "app", "=", "get_app", "(", ")", "snapshot", "=", "app", ".", "get_snapshot", "(", "name", ")", "if", "not", "snapshot", ":", "click", ".", "echo", "(", "\"Couldn't find snapshot %s\"", "%", "name", ")", "sys", ...
Replaces a snapshot
[ "Replaces", "a", "snapshot" ]
python
test
idmillington/layout
layout/managers/grid.py
https://github.com/idmillington/layout/blob/c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5/layout/managers/grid.py#L98-L118
def _compile_dimension_size(self, base_index, array, property, sized_elements): """Build one set of col widths or row heights.""" sort_index = base_index + 2 sized_elements.sort(key=lambda x: x[sort_index]) for element_data in sized_elements: start, end = element_data[base_index], element_data[sort_index] end += start element, size = element_data[4:6] # Find the total current size of the set set_size = sum(array[start:end]) + (end-start-1)*self.margin # Work out the extra space we need extra_space_needed = getattr(size, property) - set_size if extra_space_needed < 0: continue # Distribute it among the entries extra_space_each = extra_space_needed / (end-start) for index in range(start, end): array[index] += extra_space_each
[ "def", "_compile_dimension_size", "(", "self", ",", "base_index", ",", "array", ",", "property", ",", "sized_elements", ")", ":", "sort_index", "=", "base_index", "+", "2", "sized_elements", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "sort...
Build one set of col widths or row heights.
[ "Build", "one", "set", "of", "col", "widths", "or", "row", "heights", "." ]
python
train
Visgean/urljects
urljects/patterns.py
https://github.com/Visgean/urljects/blob/29a3ca03f639ea7a9ee2f795ed17941c86b278ba/urljects/patterns.py#L22-L40
def render(value): """ This function finishes the url pattern creation by adding starting character ^ end possibly by adding end character at the end :param value: naive URL value :return: raw string """ # Empty urls if not value: # use case: wild card imports return r'^$' if value[0] != beginning: value = beginning + value if value[-1] != end: value += end return value
[ "def", "render", "(", "value", ")", ":", "# Empty urls", "if", "not", "value", ":", "# use case: wild card imports", "return", "r'^$'", "if", "value", "[", "0", "]", "!=", "beginning", ":", "value", "=", "beginning", "+", "value", "if", "value", "[", "-", ...
This function finishes the url pattern creation by adding starting character ^ end possibly by adding end character at the end :param value: naive URL value :return: raw string
[ "This", "function", "finishes", "the", "url", "pattern", "creation", "by", "adding", "starting", "character", "^", "end", "possibly", "by", "adding", "end", "character", "at", "the", "end" ]
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1114-L1118
def Rank(a): """ Rank op. """ return np.array([len(a.shape)], dtype=np.int32),
[ "def", "Rank", "(", "a", ")", ":", "return", "np", ".", "array", "(", "[", "len", "(", "a", ".", "shape", ")", "]", ",", "dtype", "=", "np", ".", "int32", ")", "," ]
Rank op.
[ "Rank", "op", "." ]
python
train
KeithSSmith/switcheo-python
switcheo/neo/signatures.py
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/signatures.py#L69-L99
def sign_create_deposit(deposit_params, key_pair): """ Function to create a deposit request by generating a transaction request from the Switcheo API. Execution of this function is as follows:: sign_create_deposit(deposit_details=create_deposit, key_pair=key_pair) The expected return result for this function is as follows:: { 'blockchain': 'neo', 'asset_id': 'SWTH', 'amount': '100', 'timestamp': 1542091927575, 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'signature': '24ef6c63964988a2efe5fe67f04f46fdc2f1504fb5....' } :param deposit_params: The parameters generated by the create deposit function that now requires signature. :type deposit_params: dict :param key_pair: The KeyPair for the wallet being used to sign deposit message. :type key_pair: KeyPair :return: Dictionary response of signed deposit request that is ready to be executed on the NEO blockchain. """ encoded_message = encode_message(deposit_params) create_params = deposit_params.copy() create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString() create_params['signature'] = sign_message(encoded_message=encoded_message, private_key_hex=private_key_to_hex(key_pair=key_pair)) return create_params
[ "def", "sign_create_deposit", "(", "deposit_params", ",", "key_pair", ")", ":", "encoded_message", "=", "encode_message", "(", "deposit_params", ")", "create_params", "=", "deposit_params", ".", "copy", "(", ")", "create_params", "[", "'address'", "]", "=", "neo_g...
Function to create a deposit request by generating a transaction request from the Switcheo API. Execution of this function is as follows:: sign_create_deposit(deposit_details=create_deposit, key_pair=key_pair) The expected return result for this function is as follows:: { 'blockchain': 'neo', 'asset_id': 'SWTH', 'amount': '100', 'timestamp': 1542091927575, 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'signature': '24ef6c63964988a2efe5fe67f04f46fdc2f1504fb5....' } :param deposit_params: The parameters generated by the create deposit function that now requires signature. :type deposit_params: dict :param key_pair: The KeyPair for the wallet being used to sign deposit message. :type key_pair: KeyPair :return: Dictionary response of signed deposit request that is ready to be executed on the NEO blockchain.
[ "Function", "to", "create", "a", "deposit", "request", "by", "generating", "a", "transaction", "request", "from", "the", "Switcheo", "API", ".", "Execution", "of", "this", "function", "is", "as", "follows", "::" ]
python
train
ultrabug/py3status
py3status/storage.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/storage.py#L66-L77
def get_legacy_storage_path(self): """ Detect and return existing legacy storage path. """ config_dir = os.path.dirname( self.py3_wrapper.config.get("i3status_config_path", "/tmp") ) storage_path = os.path.join(config_dir, "py3status.data") if os.path.exists(storage_path): return storage_path else: return None
[ "def", "get_legacy_storage_path", "(", "self", ")", ":", "config_dir", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "py3_wrapper", ".", "config", ".", "get", "(", "\"i3status_config_path\"", ",", "\"/tmp\"", ")", ")", "storage_path", "=", "os", ...
Detect and return existing legacy storage path.
[ "Detect", "and", "return", "existing", "legacy", "storage", "path", "." ]
python
train
rhjdjong/SlipLib
sliplib/slip.py
https://github.com/rhjdjong/SlipLib/blob/8300dba3e512bca282380f234be34d75f4a73ce1/sliplib/slip.py#L43-L60
def decode(packet): """decode(packet) -> message from SLIP-encoded packet Retrieves the message from the SLIP-encoded packet. :param bytes packet: The SLIP-encoded message. Note that this must be exactly one complete packet. The :func:`decode` function does not provide any buffering for incomplete packages, nor does it provide support for decoding data with multiple packets. :return: The decoded message :rtype: bytes :raises ProtocolError: if the packet contains an invalid byte sequence. """ packet = bytes(packet).strip(END) if not is_valid(packet): raise ProtocolError(packet) return packet.strip(END).replace(ESC + ESC_END, END).replace(ESC + ESC_ESC, ESC)
[ "def", "decode", "(", "packet", ")", ":", "packet", "=", "bytes", "(", "packet", ")", ".", "strip", "(", "END", ")", "if", "not", "is_valid", "(", "packet", ")", ":", "raise", "ProtocolError", "(", "packet", ")", "return", "packet", ".", "strip", "("...
decode(packet) -> message from SLIP-encoded packet Retrieves the message from the SLIP-encoded packet. :param bytes packet: The SLIP-encoded message. Note that this must be exactly one complete packet. The :func:`decode` function does not provide any buffering for incomplete packages, nor does it provide support for decoding data with multiple packets. :return: The decoded message :rtype: bytes :raises ProtocolError: if the packet contains an invalid byte sequence.
[ "decode", "(", "packet", ")", "-", ">", "message", "from", "SLIP", "-", "encoded", "packet", "Retrieves", "the", "message", "from", "the", "SLIP", "-", "encoded", "packet", ".", ":", "param", "bytes", "packet", ":", "The", "SLIP", "-", "encoded", "messag...
python
train
ska-sa/katcp-python
katcp/client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L241-L245
def _next_id(self): """Return the next available message id.""" assert get_thread_ident() == self.ioloop_thread_id self._last_msg_id += 1 return str(self._last_msg_id)
[ "def", "_next_id", "(", "self", ")", ":", "assert", "get_thread_ident", "(", ")", "==", "self", ".", "ioloop_thread_id", "self", ".", "_last_msg_id", "+=", "1", "return", "str", "(", "self", ".", "_last_msg_id", ")" ]
Return the next available message id.
[ "Return", "the", "next", "available", "message", "id", "." ]
python
train
mosdef-hub/foyer
foyer/smarts_graph.py
https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L57-L75
def _add_edges(self, ast_node, trunk=None): """"Add all bonds in the SMARTS string as edges in the graph.""" atom_indices = self._atom_indices for atom in ast_node.tail: if atom.head == 'atom': atom_idx = atom_indices[id(atom)] if atom.is_first_kid and atom.parent().head == 'branch': trunk_idx = atom_indices[id(trunk)] self.add_edge(atom_idx, trunk_idx) if not atom.is_last_kid: if atom.next_kid.head == 'atom': next_idx = atom_indices[id(atom.next_kid)] self.add_edge(atom_idx, next_idx) elif atom.next_kid.head == 'branch': trunk = atom else: # We traveled through the whole branch. return elif atom.head == 'branch': self._add_edges(atom, trunk)
[ "def", "_add_edges", "(", "self", ",", "ast_node", ",", "trunk", "=", "None", ")", ":", "atom_indices", "=", "self", ".", "_atom_indices", "for", "atom", "in", "ast_node", ".", "tail", ":", "if", "atom", ".", "head", "==", "'atom'", ":", "atom_idx", "=...
Add all bonds in the SMARTS string as edges in the graph.
[ "Add", "all", "bonds", "in", "the", "SMARTS", "string", "as", "edges", "in", "the", "graph", "." ]
python
train
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1102-L1107
def simxSetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' return c_SetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
[ "def", "simxSetObjectFloatParameter", "(", "clientID", ",", "objectHandle", ",", "parameterID", ",", "parameterValue", ",", "operationMode", ")", ":", "return", "c_SetObjectFloatParameter", "(", "clientID", ",", "objectHandle", ",", "parameterID", ",", "parameterValue",...
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
authomatic/authomatic
authomatic/providers/__init__.py
https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/providers/__init__.py#L315-L333
def csrf_generator(secret): """ Generates CSRF token. Inspired by this article: http://blog.ptsecurity.com/2012/10/random-number-security-in-python.html :returns: :class:`str` Random unguessable string. """ # Create hash from random string plus salt. hashed = hashlib.md5(uuid.uuid4().bytes + six.b(secret)).hexdigest() # Each time return random portion of the hash. span = 5 shift = random.randint(0, span) return hashed[shift:shift - span - 1]
[ "def", "csrf_generator", "(", "secret", ")", ":", "# Create hash from random string plus salt.", "hashed", "=", "hashlib", ".", "md5", "(", "uuid", ".", "uuid4", "(", ")", ".", "bytes", "+", "six", ".", "b", "(", "secret", ")", ")", ".", "hexdigest", "(", ...
Generates CSRF token. Inspired by this article: http://blog.ptsecurity.com/2012/10/random-number-security-in-python.html :returns: :class:`str` Random unguessable string.
[ "Generates", "CSRF", "token", "." ]
python
test
Timusan/wtforms-dynamic-fields
wtforms_dynamic_fields/wtforms_dynamic_fields.py
https://github.com/Timusan/wtforms-dynamic-fields/blob/d984a646075219a6f8a0e931c96035ca3e44be56/wtforms_dynamic_fields/wtforms_dynamic_fields.py#L90-L214
def process(self, form, post): """ Process the given WTForm Form object. Itterate over the POST values and check each field against the configuration that was made. For each field that is valid, check all the validator parameters for possible %field% replacement, then bind these parameters to their validator. Finally, add the field together with their validators to the form. :param form: A valid WTForm Form object :param post: A MultiDict with the POST variables """ if not isinstance(form, FormMeta): raise TypeError('Given form is not a valid WTForm.') re_field_name = re.compile(r'\%([a-zA-Z0-9_]*)\%') class F(form): pass for field, data in post.iteritems(): if field in F(): # Skip it if the POST field is one of the standard form fields. continue else: if field in self._dyn_fields: # If we can find the field name directly, it means the field # is not a set so just set the canonical name and go on. field_cname = field # Since we are not in a set, (re)set the current set. current_set_number = None elif (field.split('_')[-1].isdigit() and field[:-(len(field.split('_')[-1]))-1] in self._dyn_fields.keys()): # If the field can be split on underscore characters, # the last part contains only digits and the # everything *but* the last part is found in the # field configuration, we are good to go. # (Cowardly refusing to use regex here). field_cname = field[:-(len(field.split('_')[-1]))-1] # Since we apparently are in a set, remember the # the set number we are at. current_set_number = str(field.split('_')[-1]) else: # The field did not match to a canonical name # from the fields dictionary or the name # was malformed, throw it out. continue # Since the field seems to be a valid one, let us # prepare the validator arguments and, if we are in a set # replace the %field_name% convention where we find it. validators = [] if 'validators' in self._dyn_fields[field_cname]: for validator in self._dyn_fields[field_cname]['validators']: args = [] kwargs = {} if 'args' in self._dyn_fields[field_cname]\ [validator.__name__]: if not current_set_number: args = self._dyn_fields[field_cname]\ [validator.__name__]['args'] else: # If we are currently in a set, append the set number # to all the words that are decorated with %'s within # the arguments. for arg in self._dyn_fields[field_cname]\ [validator.__name__]['args']: try: arg = re_field_name.sub(r'\1'+'_'+current_set_number, arg) except: # The argument does not seem to be regex-able # Probably not a string, thus we can skip it. pass args.append(arg) if 'kwargs' in self._dyn_fields[field_cname]\ [validator.__name__]: if not current_set_number: kwargs = self._dyn_fields[field_cname]\ [validator.__name__]['kwargs'] else: # If we are currently in a set, append the set number # to all the words that are decorated with %'s within # the arguments. for key, arg in self.iteritems(self._dyn_fields[field_cname]\ [validator.__name__]['kwargs']): try: arg = re_field_name.sub(r'\1'+'_'+current_set_number, arg) except: # The argument does not seem to be regex-able # Probably not a string, thus we can skip it. pass kwargs[key] = arg # Finally, bind arguments to the validator # and add it to the list validators.append(validator(*args, **kwargs)) # The field is setup, it is time to add it to the form. field_type = self._dyn_fields[field_cname]['type'] field_label = self._dyn_fields[field_cname]['label'] field_args = self._dyn_fields[field_cname]['args'] field_kwargs = self._dyn_fields[field_cname]['kwargs'] setattr(F, field, field_type(field_label, validators=validators, *field_args, **field_kwargs)) # Create an instance of the form with the newly # created fields and give it back to the caller. if self.flask_wtf: # Flask WTF overrides the form initialization # and already injects the POST variables. form = F() else: form = F(post) return form
[ "def", "process", "(", "self", ",", "form", ",", "post", ")", ":", "if", "not", "isinstance", "(", "form", ",", "FormMeta", ")", ":", "raise", "TypeError", "(", "'Given form is not a valid WTForm.'", ")", "re_field_name", "=", "re", ".", "compile", "(", "r...
Process the given WTForm Form object. Itterate over the POST values and check each field against the configuration that was made. For each field that is valid, check all the validator parameters for possible %field% replacement, then bind these parameters to their validator. Finally, add the field together with their validators to the form. :param form: A valid WTForm Form object :param post: A MultiDict with the POST variables
[ "Process", "the", "given", "WTForm", "Form", "object", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/plugins/exit_koji_promote.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/exit_koji_promote.py#L142-L180
def get_rpms(self): """ Build a list of installed RPMs in the format required for the metadata. """ tags = [ 'NAME', 'VERSION', 'RELEASE', 'ARCH', 'EPOCH', 'SIGMD5', 'SIGPGP:pgpsig', 'SIGGPG:pgpsig', ] cmd = "/bin/rpm " + rpm_qf_args(tags) try: # py3 (status, output) = subprocess.getstatusoutput(cmd) except AttributeError: # py2 with open('/dev/null', 'r+') as devnull: p = subprocess.Popen(cmd, shell=True, stdin=devnull, stdout=subprocess.PIPE, stderr=devnull) (stdout, stderr) = p.communicate() status = p.wait() output = stdout.decode() if status != 0: self.log.debug("%s: stderr output: %s", cmd, stderr) raise RuntimeError("%s: exit code %s" % (cmd, status)) return parse_rpm_output(output.splitlines(), tags)
[ "def", "get_rpms", "(", "self", ")", ":", "tags", "=", "[", "'NAME'", ",", "'VERSION'", ",", "'RELEASE'", ",", "'ARCH'", ",", "'EPOCH'", ",", "'SIGMD5'", ",", "'SIGPGP:pgpsig'", ",", "'SIGGPG:pgpsig'", ",", "]", "cmd", "=", "\"/bin/rpm \"", "+", "rpm_qf_ar...
Build a list of installed RPMs in the format required for the metadata.
[ "Build", "a", "list", "of", "installed", "RPMs", "in", "the", "format", "required", "for", "the", "metadata", "." ]
python
train
python-xlib/python-xlib
Xlib/display.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/display.py#L835-L842
def set_screen_saver(self, timeout, interval, prefer_blank, allow_exposures, onerror = None): """See XSetScreenSaver(3X11).""" request.SetScreenSaver(display = self.display, onerror = onerror, timeout = timeout, interval = interval, prefer_blank = prefer_blank, allow_exposures = allow_exposures)
[ "def", "set_screen_saver", "(", "self", ",", "timeout", ",", "interval", ",", "prefer_blank", ",", "allow_exposures", ",", "onerror", "=", "None", ")", ":", "request", ".", "SetScreenSaver", "(", "display", "=", "self", ".", "display", ",", "onerror", "=", ...
See XSetScreenSaver(3X11).
[ "See", "XSetScreenSaver", "(", "3X11", ")", "." ]
python
train
python-cmd2/cmd2
cmd2/utils.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/utils.py#L211-L223
def alphabetical_sort(list_to_sort: Iterable[str]) -> List[str]: """Sorts a list of strings alphabetically. For example: ['a1', 'A11', 'A2', 'a22', 'a3'] To sort a list in place, don't call this method, which makes a copy. Instead, do this: my_list.sort(key=norm_fold) :param list_to_sort: the list being sorted :return: the sorted list """ return sorted(list_to_sort, key=norm_fold)
[ "def", "alphabetical_sort", "(", "list_to_sort", ":", "Iterable", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "return", "sorted", "(", "list_to_sort", ",", "key", "=", "norm_fold", ")" ]
Sorts a list of strings alphabetically. For example: ['a1', 'A11', 'A2', 'a22', 'a3'] To sort a list in place, don't call this method, which makes a copy. Instead, do this: my_list.sort(key=norm_fold) :param list_to_sort: the list being sorted :return: the sorted list
[ "Sorts", "a", "list", "of", "strings", "alphabetically", "." ]
python
train
mojaie/chorus
chorus/molutil.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L117-L120
def rotatable_count(mol): """Rotatable bond count """ mol.require("Rotatable") return sum(1 for _, _, b in mol.bonds_iter() if b.rotatable)
[ "def", "rotatable_count", "(", "mol", ")", ":", "mol", ".", "require", "(", "\"Rotatable\"", ")", "return", "sum", "(", "1", "for", "_", ",", "_", ",", "b", "in", "mol", ".", "bonds_iter", "(", ")", "if", "b", ".", "rotatable", ")" ]
Rotatable bond count
[ "Rotatable", "bond", "count" ]
python
train
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py#L94-L116
def _DownloadUrl(self, url, dest_dir): """Download a script from a given URL. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script. """ dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) dest_file.close() dest = dest_file.name self.logger.info('Downloading url from %s to %s.', url, dest) try: urlretrieve.urlretrieve(url, dest) return dest except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: self.logger.warning('Could not download %s. %s.', url, str(e)) except Exception as e: self.logger.warning('Exception downloading %s. %s.', url, str(e)) return None
[ "def", "_DownloadUrl", "(", "self", ",", "url", ",", "dest_dir", ")", ":", "dest_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "dir", "=", "dest_dir", ",", "delete", "=", "False", ")", "dest_file", ".", "close", "(", ")", "dest", "=", "dest_fil...
Download a script from a given URL. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
[ "Download", "a", "script", "from", "a", "given", "URL", "." ]
python
train
onecodex/onecodex
onecodex/viz/_bargraph.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/viz/_bargraph.py#L6-L202
def plot_bargraph( self, rank="auto", normalize="auto", top_n="auto", threshold="auto", title=None, xlabel=None, ylabel=None, tooltip=None, return_chart=False, haxis=None, legend="auto", label=None, ): """Plot a bargraph of relative abundance of taxa for multiple samples. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. normalize : 'auto' or `bool`, optional Convert read counts to relative abundances such that each sample sums to 1.0. Setting 'auto' will choose automatically based on the data. return_chart : `bool`, optional When True, return an `altair.Chart` object instead of displaying the resulting plot in the current notebook. top_n : `int`, optional Display the top N most abundant taxa in the entire cohort of samples. threshold : `float` Display only taxa that are more abundant that this threshold in one or more samples. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. tooltip : `string` or `list`, optional A string or list containing strings representing metadata fields. When a point in the plot is hovered over, the value of the metadata associated with that sample will be displayed in a modal. haxis : `string`, optional The metadata field (or tuple containing multiple categorical fields) used to group samples together. legend: `string`, optional Title for color scale. Defaults to the field used to generate the plot, e.g. readcount_w_children or abundance. label : `string` or `callable`, optional A metadata field (or function) used to label each analysis. If passing a function, a dict containing the metadata for each analysis is passed as the first and only positional argument. The callable function must return a string. Examples -------- Plot a bargraph of the top 10 most abundant genera >>> plot_bargraph(rank='genus', top_n=10) """ if rank is None: raise OneCodexException("Please specify a rank or 'auto' to choose automatically") if not (threshold or top_n): raise OneCodexException("Please specify at least one of: threshold, top_n") if top_n == "auto" and threshold == "auto": top_n = 10 threshold = None elif top_n == "auto" and threshold != "auto": top_n = None elif top_n != "auto" and threshold == "auto": threshold = None if legend == "auto": legend = self._field df = self.to_df( rank=rank, normalize=normalize, top_n=top_n, threshold=threshold, table_format="long" ) if tooltip: if not isinstance(tooltip, list): tooltip = [tooltip] else: tooltip = [] if haxis: tooltip.append(haxis) tooltip.insert(0, "Label") # takes metadata columns and returns a dataframe with just those columns # renames columns in the case where columns are taxids magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label) # add sort order to long-format df if haxis: sort_order = magic_metadata.sort_values(magic_fields[haxis]).index.tolist() for sort_num, sort_class_id in enumerate(sort_order): magic_metadata.loc[sort_class_id, "sort_order"] = sort_num df["sort_order"] = magic_metadata["sort_order"][df["classification_id"]].tolist() sort_order = alt.EncodingSortField(field="sort_order", op="mean") else: sort_order = None # transfer metadata from wide-format df (magic_metadata) to long-format df for f in tooltip: df[magic_fields[f]] = magic_metadata[magic_fields[f]][df["classification_id"]].tolist() # add taxa names df["tax_name"] = [ "{} ({})".format(self.taxonomy["name"][t], t) if t in self.taxonomy["name"] else t for t in df["tax_id"] ] # # TODO: how to sort bars in bargraph # - abundance (mean across all samples) # - parent taxon (this will require that we make a few assumptions # about taxonomic ranks but as all taxonomic data will be coming from # OCX this should be okay) # ylabel = self._field if ylabel is None else ylabel xlabel = "" if xlabel is None else xlabel # should ultimately be Label, tax_name, readcount_w_children, then custom fields tooltip_for_altair = [magic_fields[f] for f in tooltip] tooltip_for_altair.insert(1, "tax_name") tooltip_for_altair.insert(2, "{}:Q".format(self._field)) # generate dataframes to plot, one per facet dfs_to_plot = [] if haxis: # if using facets, first facet is just the vertical axis blank_df = df.iloc[:1].copy() blank_df[self._field] = 0 dfs_to_plot.append(blank_df) for md_val in magic_metadata[magic_fields[haxis]].unique(): plot_df = df.where(df[magic_fields[haxis]] == md_val).dropna() # preserve booleans if magic_metadata[magic_fields[haxis]].dtype == "bool": plot_df[magic_fields[haxis]] = plot_df[magic_fields[haxis]].astype(bool) dfs_to_plot.append(plot_df) else: dfs_to_plot.append(df) charts = [] for plot_num, plot_df in enumerate(dfs_to_plot): chart = ( alt.Chart(plot_df) .mark_bar() .encode( x=alt.X("Label", axis=alt.Axis(title=xlabel), sort=sort_order), y=alt.Y( self._field, axis=alt.Axis(title=ylabel), scale=alt.Scale(domain=[0, 1], zero=True, nice=False), ), color=alt.Color("tax_name", legend=alt.Legend(title=legend)), tooltip=tooltip_for_altair, href="url:N", ) ) if haxis: if plot_num == 0: # first plot (blank_df) has vert axis but no horiz axis chart.encoding.x.axis = None elif plot_num > 0: # strip vertical axis from subsequent facets chart.encoding.y.axis = None # facet's title set to value of metadata in this group chart.title = str(plot_df[magic_fields[haxis]].tolist()[0]) charts.append(chart) # add all the facets together final_chart = charts[0] if len(charts) > 1: for chart in charts[1:]: final_chart |= chart # add title to chart # (cannot specify None or False for no title) final_chart = final_chart.properties(title=title) if title else final_chart return final_chart if return_chart else final_chart.display()
[ "def", "plot_bargraph", "(", "self", ",", "rank", "=", "\"auto\"", ",", "normalize", "=", "\"auto\"", ",", "top_n", "=", "\"auto\"", ",", "threshold", "=", "\"auto\"", ",", "title", "=", "None", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", "...
Plot a bargraph of relative abundance of taxa for multiple samples. Parameters ---------- rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. normalize : 'auto' or `bool`, optional Convert read counts to relative abundances such that each sample sums to 1.0. Setting 'auto' will choose automatically based on the data. return_chart : `bool`, optional When True, return an `altair.Chart` object instead of displaying the resulting plot in the current notebook. top_n : `int`, optional Display the top N most abundant taxa in the entire cohort of samples. threshold : `float` Display only taxa that are more abundant that this threshold in one or more samples. title : `string`, optional Text label at the top of the plot. xlabel : `string`, optional Text label along the horizontal axis. ylabel : `string`, optional Text label along the vertical axis. tooltip : `string` or `list`, optional A string or list containing strings representing metadata fields. When a point in the plot is hovered over, the value of the metadata associated with that sample will be displayed in a modal. haxis : `string`, optional The metadata field (or tuple containing multiple categorical fields) used to group samples together. legend: `string`, optional Title for color scale. Defaults to the field used to generate the plot, e.g. readcount_w_children or abundance. label : `string` or `callable`, optional A metadata field (or function) used to label each analysis. If passing a function, a dict containing the metadata for each analysis is passed as the first and only positional argument. The callable function must return a string. Examples -------- Plot a bargraph of the top 10 most abundant genera >>> plot_bargraph(rank='genus', top_n=10)
[ "Plot", "a", "bargraph", "of", "relative", "abundance", "of", "taxa", "for", "multiple", "samples", "." ]
python
train
IdentityPython/pysaml2
src/saml2/mdstore.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mdstore.py#L747-L758
def load(self, *args, **kwargs): """ Imports metadata by the use of HTTP GET. If the fingerprint is known the file will be checked for compliance before it is imported. """ response = self.http.send(self.url) if response.status_code == 200: _txt = response.content return self.parse_and_check_signature(_txt) else: logger.info("Response status: %s", response.status_code) raise SourceNotFound(self.url)
[ "def", "load", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "http", ".", "send", "(", "self", ".", "url", ")", "if", "response", ".", "status_code", "==", "200", ":", "_txt", "=", "response", "."...
Imports metadata by the use of HTTP GET. If the fingerprint is known the file will be checked for compliance before it is imported.
[ "Imports", "metadata", "by", "the", "use", "of", "HTTP", "GET", ".", "If", "the", "fingerprint", "is", "known", "the", "file", "will", "be", "checked", "for", "compliance", "before", "it", "is", "imported", "." ]
python
train
jazzband/django-pipeline
pipeline/compressors/__init__.py
https://github.com/jazzband/django-pipeline/blob/3cd2f93bb47bf8d34447e13ff691f7027e7b07a2/pipeline/compressors/__init__.py#L58-L71
def compress_js(self, paths, templates=None, **kwargs): """Concatenate and compress JS files""" js = self.concatenate(paths) if templates: js = js + self.compile_templates(templates) if not settings.DISABLE_WRAPPER: js = settings.JS_WRAPPER % js compressor = self.js_compressor if compressor: js = getattr(compressor(verbose=self.verbose), 'compress_js')(js) return js
[ "def", "compress_js", "(", "self", ",", "paths", ",", "templates", "=", "None", ",", "*", "*", "kwargs", ")", ":", "js", "=", "self", ".", "concatenate", "(", "paths", ")", "if", "templates", ":", "js", "=", "js", "+", "self", ".", "compile_templates...
Concatenate and compress JS files
[ "Concatenate", "and", "compress", "JS", "files" ]
python
train
nakagami/pyfirebirdsql
firebirdsql/utils.py
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/utils.py#L43-L50
def hex_to_bytes(s): """ convert hex string to bytes """ if len(s) % 2: s = b'0' + s ia = [int(s[i:i+2], 16) for i in range(0, len(s), 2)] # int array return bs(ia) if PYTHON_MAJOR_VER == 3 else b''.join([chr(c) for c in ia])
[ "def", "hex_to_bytes", "(", "s", ")", ":", "if", "len", "(", "s", ")", "%", "2", ":", "s", "=", "b'0'", "+", "s", "ia", "=", "[", "int", "(", "s", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ...
convert hex string to bytes
[ "convert", "hex", "string", "to", "bytes" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9221-L9240
def is_descendant_of_bank(self, id_, bank_id): """Tests if an ``Id`` is a descendant of a bank. arg: id (osid.id.Id): an ``Id`` arg: bank_id (osid.id.Id): the ``Id`` of a bank return: (boolean) - ``true`` if the ``id`` is a descendant of the ``bank_id,`` ``false`` otherwise raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_descendant_of_bin if self._catalog_session is not None: return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bank_id) return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bank_id)
[ "def", "is_descendant_of_bank", "(", "self", ",", "id_", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_descendant_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_ca...
Tests if an ``Id`` is a descendant of a bank. arg: id (osid.id.Id): an ``Id`` arg: bank_id (osid.id.Id): the ``Id`` of a bank return: (boolean) - ``true`` if the ``id`` is a descendant of the ``bank_id,`` ``false`` otherwise raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``.
[ "Tests", "if", "an", "Id", "is", "a", "descendant", "of", "a", "bank", "." ]
python
train
Titan-C/slaveparticles
slaveparticles/utils/plotter.py
https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/slaveparticles/utils/plotter.py#L120-L139
def plot_mean_field_conv(N=1, n=0.5, Uspan=np.arange(0, 3.6, 0.5)): """Generates the plot on the convergenge of the mean field in single site spin hamiltonian under with N degenerate half-filled orbitals """ sl = Spinon(slaves=2*N, orbitals=N, avg_particles=2*n, hopping=[0.5]*2*N, orbital_e=[0]*2*N) hlog = solve_loop(sl, Uspan, [0.])[1] f, (ax1, ax2) = plt.subplots(2, sharex=True) for field in hlog: field = np.asarray(field) ax1.semilogy(abs(field[1:]-field[:-1])) ax2.plot(field)#, label = 'h, U = {}'.format(Uint)) plt.title('Convergence of selfconsintent mean field') ax1.set_ylabel('$\\Delta h$') ax2.set_ylabel('mean field $h$') plt.xlabel('iterations') return hlog
[ "def", "plot_mean_field_conv", "(", "N", "=", "1", ",", "n", "=", "0.5", ",", "Uspan", "=", "np", ".", "arange", "(", "0", ",", "3.6", ",", "0.5", ")", ")", ":", "sl", "=", "Spinon", "(", "slaves", "=", "2", "*", "N", ",", "orbitals", "=", "N...
Generates the plot on the convergenge of the mean field in single site spin hamiltonian under with N degenerate half-filled orbitals
[ "Generates", "the", "plot", "on", "the", "convergenge", "of", "the", "mean", "field", "in", "single", "site", "spin", "hamiltonian", "under", "with", "N", "degenerate", "half", "-", "filled", "orbitals" ]
python
train
ModisWorks/modis
modis/discord_modis/main.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/main.py#L110-L180
def _get_event_handlers(): """ Gets dictionary of event handlers and the modules that define them Returns: event_handlers (dict): Contains "all", "on_ready", "on_message", "on_reaction_add", "on_error" """ import os import importlib event_handlers = { "on_ready": [], "on_resume": [], "on_error": [], "on_message": [], "on_socket_raw_receive": [], "on_socket_raw_send": [], "on_message_delete": [], "on_message_edit": [], "on_reaction_add": [], "on_reaction_remove": [], "on_reaction_clear": [], "on_channel_delete": [], "on_channel_create": [], "on_channel_update": [], "on_member_join": [], "on_member_remove": [], "on_member_update": [], "on_server_join": [], "on_server_remove": [], "on_server_update": [], "on_server_role_create": [], "on_server_role_delete": [], "on_server_role_update": [], "on_server_emojis_update": [], "on_server_available": [], "on_server_unavailable": [], "on_voice_state_update": [], "on_member_ban": [], "on_member_unban": [], "on_typing": [], "on_group_join": [], "on_group_remove": [] } # Iterate through module folders database_dir = "{}/modules".format( os.path.dirname(os.path.realpath(__file__))) for module_name in os.listdir(database_dir): module_dir = "{}/{}".format(database_dir, module_name) # Iterate through files in module if os.path.isdir(module_dir) and not module_name.startswith("_"): # Add all defined event handlers in module files module_event_handlers = os.listdir(module_dir) for event_handler in event_handlers.keys(): if "{}.py".format(event_handler) in module_event_handlers: import_name = ".discord_modis.modules.{}.{}".format( module_name, event_handler) logger.debug("Found event handler {}".format(import_name[23:])) try: event_handlers[event_handler].append( importlib.import_module(import_name, "modis")) except Exception as e: # Log errors in modules logger.exception(e) return event_handlers
[ "def", "_get_event_handlers", "(", ")", ":", "import", "os", "import", "importlib", "event_handlers", "=", "{", "\"on_ready\"", ":", "[", "]", ",", "\"on_resume\"", ":", "[", "]", ",", "\"on_error\"", ":", "[", "]", ",", "\"on_message\"", ":", "[", "]", ...
Gets dictionary of event handlers and the modules that define them Returns: event_handlers (dict): Contains "all", "on_ready", "on_message", "on_reaction_add", "on_error"
[ "Gets", "dictionary", "of", "event", "handlers", "and", "the", "modules", "that", "define", "them" ]
python
train
smarie/python-parsyfiles
parsyfiles/plugins_base/support_for_configparser.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_base/support_for_configparser.py#L30-L39
def get_default_config_parsers() -> List[AnyParser]: """ Utility method to return the default parsers able to parse a dictionary from a file. :return: """ return [SingleFileParserFunction(parser_function=read_config, streaming_mode=True, supported_exts={'.cfg', '.ini'}, supported_types={ConfigParser}), ]
[ "def", "get_default_config_parsers", "(", ")", "->", "List", "[", "AnyParser", "]", ":", "return", "[", "SingleFileParserFunction", "(", "parser_function", "=", "read_config", ",", "streaming_mode", "=", "True", ",", "supported_exts", "=", "{", "'.cfg'", ",", "'...
Utility method to return the default parsers able to parse a dictionary from a file. :return:
[ "Utility", "method", "to", "return", "the", "default", "parsers", "able", "to", "parse", "a", "dictionary", "from", "a", "file", ".", ":", "return", ":" ]
python
train
smarie/python-parsyfiles
parsyfiles/plugins_optional/support_for_jprops.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_optional/support_for_jprops.py#L11-L34
def try_parse_num_and_booleans(num_str): """ Tries to parse the provided string as a number or boolean :param num_str: :return: """ if isinstance(num_str, str): # bool if num_str.lower() == 'true': return True elif num_str.lower() == 'false': return False # int if num_str.isdigit(): return int(num_str) # float try: return float(num_str) except ValueError: # give up return num_str else: # dont try return num_str
[ "def", "try_parse_num_and_booleans", "(", "num_str", ")", ":", "if", "isinstance", "(", "num_str", ",", "str", ")", ":", "# bool", "if", "num_str", ".", "lower", "(", ")", "==", "'true'", ":", "return", "True", "elif", "num_str", ".", "lower", "(", ")", ...
Tries to parse the provided string as a number or boolean :param num_str: :return:
[ "Tries", "to", "parse", "the", "provided", "string", "as", "a", "number", "or", "boolean", ":", "param", "num_str", ":", ":", "return", ":" ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/predicates.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/predicates.py#L86-L95
def is_symmetric_matrix(op, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT): """Test if an array is a symmetrix matrix""" if atol is None: atol = ATOL_DEFAULT if rtol is None: rtol = RTOL_DEFAULT mat = np.array(op) if mat.ndim != 2: return False return np.allclose(mat, mat.T, rtol=rtol, atol=atol)
[ "def", "is_symmetric_matrix", "(", "op", ",", "rtol", "=", "RTOL_DEFAULT", ",", "atol", "=", "ATOL_DEFAULT", ")", ":", "if", "atol", "is", "None", ":", "atol", "=", "ATOL_DEFAULT", "if", "rtol", "is", "None", ":", "rtol", "=", "RTOL_DEFAULT", "mat", "=",...
Test if an array is a symmetrix matrix
[ "Test", "if", "an", "array", "is", "a", "symmetrix", "matrix" ]
python
test
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L5276-L5299
def initialize_fields(self, content): """ Initializes the `Pointer` field itself and the :class:`Field` items in the :attr:`data` object referenced by the `Pointer` field with the *values* in the *content* dictionary. The ``['value']`` key in the *content* dictionary refers to the `Pointer` field itself and the ``['data']`` key refers to the :attr:`data` object referenced by the `Pointer` field. :param dict content: a dictionary contains the :class:`~Field.value` for the `Pointer` field and the :class:`~Field.value` for each :class:`Field` in the :attr:`data` object referenced by the `Pointer` field. """ for name, value in content.items(): if name is 'value': self.value = value elif name is 'data': # Container or Pointer if is_mixin(self._data): self._data.initialize_fields(value) # Field elif is_field(self._data): self._data.value = value
[ "def", "initialize_fields", "(", "self", ",", "content", ")", ":", "for", "name", ",", "value", "in", "content", ".", "items", "(", ")", ":", "if", "name", "is", "'value'", ":", "self", ".", "value", "=", "value", "elif", "name", "is", "'data'", ":",...
Initializes the `Pointer` field itself and the :class:`Field` items in the :attr:`data` object referenced by the `Pointer` field with the *values* in the *content* dictionary. The ``['value']`` key in the *content* dictionary refers to the `Pointer` field itself and the ``['data']`` key refers to the :attr:`data` object referenced by the `Pointer` field. :param dict content: a dictionary contains the :class:`~Field.value` for the `Pointer` field and the :class:`~Field.value` for each :class:`Field` in the :attr:`data` object referenced by the `Pointer` field.
[ "Initializes", "the", "Pointer", "field", "itself", "and", "the", ":", "class", ":", "Field", "items", "in", "the", ":", "attr", ":", "data", "object", "referenced", "by", "the", "Pointer", "field", "with", "the", "*", "values", "*", "in", "the", "*", ...
python
train
solvebio/solvebio-python
solvebio/resource/apiresource.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/apiresource.py#L54-L65
def instance_url(self): """Get instance URL by ID""" id_ = self.get(self.ID_ATTR) base = self.class_url() if id_: return '/'.join([base, six.text_type(id_)]) else: raise Exception( 'Could not determine which URL to request: %s instance ' 'has invalid ID: %r' % (type(self).__name__, id_), self.ID_ATTR)
[ "def", "instance_url", "(", "self", ")", ":", "id_", "=", "self", ".", "get", "(", "self", ".", "ID_ATTR", ")", "base", "=", "self", ".", "class_url", "(", ")", "if", "id_", ":", "return", "'/'", ".", "join", "(", "[", "base", ",", "six", ".", ...
Get instance URL by ID
[ "Get", "instance", "URL", "by", "ID" ]
python
test
materialsproject/pymatgen
pymatgen/analysis/transition_state.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/transition_state.py#L108-L151
def from_outcars(cls, outcars, structures, **kwargs): """ Initializes an NEBAnalysis from Outcar and Structure objects. Use the static constructors, e.g., :class:`from_dir` instead if you prefer to have these automatically generated from a directory of NEB calculations. Args: outcars ([Outcar]): List of Outcar objects. Note that these have to be ordered from start to end along reaction coordinates. structures ([Structure]): List of Structures along reaction coordinate. Must be same length as outcar. interpolation_order (int): Order of polynomial to use to interpolate between images. Same format as order parameter in scipy.interplotate.PiecewisePolynomial. """ if len(outcars) != len(structures): raise ValueError("# of Outcars must be same as # of Structures") # Calculate cumulative root mean square distance between structures, # which serves as the reaction coordinate. Note that these are # calculated from the final relaxed structures as the coordinates may # have changed from the initial interpolation. r = [0] prev = structures[0] for st in structures[1:]: dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)]) r.append(np.sqrt(np.sum(dists ** 2))) prev = st r = np.cumsum(r) energies = [] forces = [] for i, o in enumerate(outcars): o.read_neb() energies.append(o.data["energy"]) if i in [0, len(outcars) - 1]: forces.append(0) else: forces.append(o.data["tangent_force"]) forces = np.array(forces) r = np.array(r) return cls(r=r, energies=energies, forces=forces, structures=structures, **kwargs)
[ "def", "from_outcars", "(", "cls", ",", "outcars", ",", "structures", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "outcars", ")", "!=", "len", "(", "structures", ")", ":", "raise", "ValueError", "(", "\"# of Outcars must be same as # of Structures\"",...
Initializes an NEBAnalysis from Outcar and Structure objects. Use the static constructors, e.g., :class:`from_dir` instead if you prefer to have these automatically generated from a directory of NEB calculations. Args: outcars ([Outcar]): List of Outcar objects. Note that these have to be ordered from start to end along reaction coordinates. structures ([Structure]): List of Structures along reaction coordinate. Must be same length as outcar. interpolation_order (int): Order of polynomial to use to interpolate between images. Same format as order parameter in scipy.interplotate.PiecewisePolynomial.
[ "Initializes", "an", "NEBAnalysis", "from", "Outcar", "and", "Structure", "objects", ".", "Use", "the", "static", "constructors", "e", ".", "g", ".", ":", "class", ":", "from_dir", "instead", "if", "you", "prefer", "to", "have", "these", "automatically", "ge...
python
train
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L113-L144
def shutdown_host(port, username=None, password=None, authdb=None): """ Send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread. """ host = 'localhost:%i' % port try: mc = MongoConnection(host) try: if username and password and authdb: if authdb != "admin": raise RuntimeError("given username/password is not for " "admin database") else: try: mc.admin.authenticate(name=username, password=password) except OperationFailure: # perhaps auth is not required pass mc.admin.command('shutdown', force=True) except AutoReconnect: pass except OperationFailure: print("Error: cannot authenticate to shut down %s." % host) return except ConnectionFailure: pass else: mc.close()
[ "def", "shutdown_host", "(", "port", ",", "username", "=", "None", ",", "password", "=", "None", ",", "authdb", "=", "None", ")", ":", "host", "=", "'localhost:%i'", "%", "port", "try", ":", "mc", "=", "MongoConnection", "(", "host", ")", "try", ":", ...
Send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread.
[ "Send", "the", "shutdown", "command", "to", "a", "mongod", "or", "mongos", "on", "given", "port", "." ]
python
train
buildbot/buildbot
master/buildbot/schedulers/forcesched.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/schedulers/forcesched.py#L180-L182
def updateFromKwargs(self, properties, kwargs, collector, **unused): """Primary entry point to turn 'kwargs' into 'properties'""" properties[self.name] = self.getFromKwargs(kwargs)
[ "def", "updateFromKwargs", "(", "self", ",", "properties", ",", "kwargs", ",", "collector", ",", "*", "*", "unused", ")", ":", "properties", "[", "self", ".", "name", "]", "=", "self", ".", "getFromKwargs", "(", "kwargs", ")" ]
Primary entry point to turn 'kwargs' into 'properties
[ "Primary", "entry", "point", "to", "turn", "kwargs", "into", "properties" ]
python
train
mulkieran/justbases
src/justbases/_rationals.py
https://github.com/mulkieran/justbases/blob/dd52ff4b3d11609f54b2673599ee4eeb20f9734f/src/justbases/_rationals.py#L208-L261
def _validate( # pylint: disable=too-many-arguments cls, sign, integer_part, non_repeating_part, repeating_part, base ): """ Check if radix is valid. :param int sign: -1, 0, or 1 as appropriate :param integer_part: the part on the left side of the radix :type integer_part: list of int :param non_repeating_part: non repeating part on left side :type non_repeating_part: list of int :param repeating_part: repeating part :type repeating_part: list of int :param int base: base of the radix, must be at least 2 :returns: BasesValueError if invalid values :rtype: BasesValueError or NoneType Complexity: O(len(integer_part + non_repeating_part + repeating_part)) """ if any(x < 0 or x >= base for x in integer_part): return BasesValueError( integer_part, "integer_part", "values must be between 0 and %s" % base ) if any(x < 0 or x >= base for x in non_repeating_part): return BasesValueError( non_repeating_part, "non_repeating_part", "values must be between 0 and %s" % base ) if any(x < 0 or x >= base for x in repeating_part): return BasesValueError( repeating_part, "repeating_part", "values must be between 0 and %s" % base ) if base < 2: return BasesValueError(base, "base", "must be at least 2") if sign not in (-1, 0, 1) or sign is True or sign is False: return BasesValueError( sign, "sign", "must be an int between -1 and 1" ) return None
[ "def", "_validate", "(", "# pylint: disable=too-many-arguments", "cls", ",", "sign", ",", "integer_part", ",", "non_repeating_part", ",", "repeating_part", ",", "base", ")", ":", "if", "any", "(", "x", "<", "0", "or", "x", ">=", "base", "for", "x", "in", "...
Check if radix is valid. :param int sign: -1, 0, or 1 as appropriate :param integer_part: the part on the left side of the radix :type integer_part: list of int :param non_repeating_part: non repeating part on left side :type non_repeating_part: list of int :param repeating_part: repeating part :type repeating_part: list of int :param int base: base of the radix, must be at least 2 :returns: BasesValueError if invalid values :rtype: BasesValueError or NoneType Complexity: O(len(integer_part + non_repeating_part + repeating_part))
[ "Check", "if", "radix", "is", "valid", "." ]
python
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L126-L143
def point_in_multipolygon(point, multipoly): """ valid whether the point is located in a mulitpolygon (donut polygon is not supported) Keyword arguments: point -- point geojson object multipoly -- multipolygon geojson object if(point inside multipoly) return true else false """ coords_array = [multipoly['coordinates']] if multipoly[ 'type'] == "MultiPolygon" else multipoly['coordinates'] for coords in coords_array: if _point_in_polygon(point, coords): return True return False
[ "def", "point_in_multipolygon", "(", "point", ",", "multipoly", ")", ":", "coords_array", "=", "[", "multipoly", "[", "'coordinates'", "]", "]", "if", "multipoly", "[", "'type'", "]", "==", "\"MultiPolygon\"", "else", "multipoly", "[", "'coordinates'", "]", "f...
valid whether the point is located in a mulitpolygon (donut polygon is not supported) Keyword arguments: point -- point geojson object multipoly -- multipolygon geojson object if(point inside multipoly) return true else false
[ "valid", "whether", "the", "point", "is", "located", "in", "a", "mulitpolygon", "(", "donut", "polygon", "is", "not", "supported", ")" ]
python
train
ynop/audiomate
audiomate/corpus/validation/label_list.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/validation/label_list.py#L188-L216
def validate_utterance(self, utterance): """ Validate the given utterance and return a list of uncovered segments (start, end). """ uncovered_segments = [] if self.label_list_idx in utterance.label_lists.keys(): start = 0 end = utterance.duration ll = utterance.label_lists[self.label_list_idx] ranges = list(ll.ranges(yield_ranges_without_labels=True)) # Check coverage at start if ranges[0][0] - start > self.threshold: uncovered_segments.append((start, ranges[0][0])) # Check for empty ranges for range in ranges: if len(range[2]) == 0 and range[1] - range[0] > self.threshold: uncovered_segments.append((range[0], range[1])) # Check coverage at end if ranges[-1][1] > 0 and end - ranges[-1][1] > self.threshold: uncovered_segments.append((ranges[-1][1], end)) else: uncovered_segments.append((utterance.start, utterance.end)) return uncovered_segments
[ "def", "validate_utterance", "(", "self", ",", "utterance", ")", ":", "uncovered_segments", "=", "[", "]", "if", "self", ".", "label_list_idx", "in", "utterance", ".", "label_lists", ".", "keys", "(", ")", ":", "start", "=", "0", "end", "=", "utterance", ...
Validate the given utterance and return a list of uncovered segments (start, end).
[ "Validate", "the", "given", "utterance", "and", "return", "a", "list", "of", "uncovered", "segments", "(", "start", "end", ")", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L3434-L3441
def JP(cpu, target): """ Jumps short if parity. :param cpu: current CPU. :param target: destination operand. """ cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.PF, target.read(), cpu.PC)
[ "def", "JP", "(", "cpu", ",", "target", ")", ":", "cpu", ".", "PC", "=", "Operators", ".", "ITEBV", "(", "cpu", ".", "address_bit_size", ",", "cpu", ".", "PF", ",", "target", ".", "read", "(", ")", ",", "cpu", ".", "PC", ")" ]
Jumps short if parity. :param cpu: current CPU. :param target: destination operand.
[ "Jumps", "short", "if", "parity", "." ]
python
valid
ereOn/azmq
azmq/context.py
https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/context.py#L58-L86
def set_zap_authenticator(self, zap_authenticator): """ Setup a ZAP authenticator. :param zap_authenticator: A ZAP authenticator instance to use. The context takes ownership of the specified instance. It will close it automatically when it stops. If `None` is specified, any previously owner instance is disowned and returned. It becomes the caller's responsibility to close it. :returns: The previous ZAP authenticator instance. """ result = self._zap_authenticator if result: self.unregister_child(result) self._zap_authenticator = zap_authenticator if self.zap_client: self.zap_client.close() if self._zap_authenticator: self.register_child(zap_authenticator) self.zap_client = ZAPClient(context=self) self.register_child(self.zap_client) else: self.zap_client = None return result
[ "def", "set_zap_authenticator", "(", "self", ",", "zap_authenticator", ")", ":", "result", "=", "self", ".", "_zap_authenticator", "if", "result", ":", "self", ".", "unregister_child", "(", "result", ")", "self", ".", "_zap_authenticator", "=", "zap_authenticator"...
Setup a ZAP authenticator. :param zap_authenticator: A ZAP authenticator instance to use. The context takes ownership of the specified instance. It will close it automatically when it stops. If `None` is specified, any previously owner instance is disowned and returned. It becomes the caller's responsibility to close it. :returns: The previous ZAP authenticator instance.
[ "Setup", "a", "ZAP", "authenticator", "." ]
python
train
dopefishh/pympi
pympi/Praat.py
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Praat.py#L167-L178
def remove_tier(self, name_num): """Remove a tier, when multiple tiers exist with that name only the first is removed. :param name_num: Name or number of the tier to remove. :type name_num: int or str :raises IndexError: If there is no tier with that number. """ if isinstance(name_num, int): del(self.tiers[name_num-1]) else: self.tiers = [i for i in self.tiers if i.name != name_num]
[ "def", "remove_tier", "(", "self", ",", "name_num", ")", ":", "if", "isinstance", "(", "name_num", ",", "int", ")", ":", "del", "(", "self", ".", "tiers", "[", "name_num", "-", "1", "]", ")", "else", ":", "self", ".", "tiers", "=", "[", "i", "for...
Remove a tier, when multiple tiers exist with that name only the first is removed. :param name_num: Name or number of the tier to remove. :type name_num: int or str :raises IndexError: If there is no tier with that number.
[ "Remove", "a", "tier", "when", "multiple", "tiers", "exist", "with", "that", "name", "only", "the", "first", "is", "removed", "." ]
python
test
SoCo/SoCo
soco/ms_data_structures.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/ms_data_structures.py#L61-L148
def from_xml(cls, xml, service, parent_id): """Return a Music Service item generated from xml. :param xml: Object XML. All items containing text are added to the content of the item. The class variable ``valid_fields`` of each of the classes list the valid fields (after translating the camel case to underscore notation). Required fields are listed in the class variable by that name (where 'id' has been renamed to 'item_id'). :type xml: :py:class:`xml.etree.ElementTree.Element` :param service: The music service (plugin) instance that retrieved the element. This service must contain ``id_to_extended_id`` and ``form_uri`` methods and ``description`` and ``service_id`` attributes. :type service: Instance of sub-class of :class:`soco.plugins.SoCoPlugin` :param parent_id: The parent ID of the item, will either be the extended ID of another MusicServiceItem or of a search :type parent_id: str For a track the XML can e.g. be on the following form: .. code :: xml <mediaMetadata xmlns="http://www.sonos.com/Services/1.1"> <id>trackid_141359</id> <itemType>track</itemType> <mimeType>audio/aac</mimeType> <title>Teacher</title> <trackMetadata> <artistId>artistid_10597</artistId> <artist>Jethro Tull</artist> <composerId>artistid_10597</composerId> <composer>Jethro Tull</composer> <albumId>albumid_141358</albumId> <album>MU - The Best Of Jethro Tull</album> <albumArtistId>artistid_10597</albumArtistId> <albumArtist>Jethro Tull</albumArtist> <duration>229</duration> <albumArtURI>http://varnish01.music.aspiro.com/sca/ imscale?h=90&amp;w=90&amp;img=/content/music10/prod/wmg/ 1383757201/094639008452_20131105025504431/resources/094639008452. jpg</albumArtURI> <canPlay>true</canPlay> <canSkip>true</canSkip> <canAddToFavorites>true</canAddToFavorites> </trackMetadata> </mediaMetadata> """ # Add a few extra pieces of information content = {'description': service.description, 'service_id': service.service_id, 'parent_id': parent_id} # Extract values from the XML all_text_elements = tags_with_text(xml) for item in all_text_elements: tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace tag = camel_to_underscore(tag) # Convert to nice names if tag not in cls.valid_fields: message = 'The info tag \'{}\' is not allowed for this item'.\ format(tag) raise ValueError(message) content[tag] = item.text # Convert values for known types for key, value in content.items(): if key == 'duration': content[key] = int(value) if key in ['can_play', 'can_skip', 'can_add_to_favorites', 'can_enumerate']: content[key] = True if value == 'true' else False # Rename a single item content['item_id'] = content.pop('id') # And get the extended id content['extended_id'] = service.id_to_extended_id(content['item_id'], cls) # Add URI if there is one for the relevant class uri = service.form_uri(content, cls) if uri: content['uri'] = uri # Check for all required values for key in cls.required_fields: if key not in content: message = 'An XML field that correspond to the key \'{}\' '\ 'is required. See the docstring for help.'.format(key) return cls.from_dict(content)
[ "def", "from_xml", "(", "cls", ",", "xml", ",", "service", ",", "parent_id", ")", ":", "# Add a few extra pieces of information", "content", "=", "{", "'description'", ":", "service", ".", "description", ",", "'service_id'", ":", "service", ".", "service_id", ",...
Return a Music Service item generated from xml. :param xml: Object XML. All items containing text are added to the content of the item. The class variable ``valid_fields`` of each of the classes list the valid fields (after translating the camel case to underscore notation). Required fields are listed in the class variable by that name (where 'id' has been renamed to 'item_id'). :type xml: :py:class:`xml.etree.ElementTree.Element` :param service: The music service (plugin) instance that retrieved the element. This service must contain ``id_to_extended_id`` and ``form_uri`` methods and ``description`` and ``service_id`` attributes. :type service: Instance of sub-class of :class:`soco.plugins.SoCoPlugin` :param parent_id: The parent ID of the item, will either be the extended ID of another MusicServiceItem or of a search :type parent_id: str For a track the XML can e.g. be on the following form: .. code :: xml <mediaMetadata xmlns="http://www.sonos.com/Services/1.1"> <id>trackid_141359</id> <itemType>track</itemType> <mimeType>audio/aac</mimeType> <title>Teacher</title> <trackMetadata> <artistId>artistid_10597</artistId> <artist>Jethro Tull</artist> <composerId>artistid_10597</composerId> <composer>Jethro Tull</composer> <albumId>albumid_141358</albumId> <album>MU - The Best Of Jethro Tull</album> <albumArtistId>artistid_10597</albumArtistId> <albumArtist>Jethro Tull</albumArtist> <duration>229</duration> <albumArtURI>http://varnish01.music.aspiro.com/sca/ imscale?h=90&amp;w=90&amp;img=/content/music10/prod/wmg/ 1383757201/094639008452_20131105025504431/resources/094639008452. jpg</albumArtURI> <canPlay>true</canPlay> <canSkip>true</canSkip> <canAddToFavorites>true</canAddToFavorites> </trackMetadata> </mediaMetadata>
[ "Return", "a", "Music", "Service", "item", "generated", "from", "xml", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/adafactor_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/adafactor_experiments.py#L167-L173
def afx_adafactor(): """Adafactor with recommended learning rate schedule.""" hparams = afx_adam() hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 return hparams
[ "def", "afx_adafactor", "(", ")", ":", "hparams", "=", "afx_adam", "(", ")", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "learning_rate_schedule", "=", "\"rsqrt_decay\"", "hparams", ".", "learning_rate_warmup_steps", "=", "10000", "return", ...
Adafactor with recommended learning rate schedule.
[ "Adafactor", "with", "recommended", "learning", "rate", "schedule", "." ]
python
train
jobovy/galpy
galpy/df/streamdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L926-L1063
def _determine_stream_track(self,nTrackChunks): """Determine the track of the stream in real space""" #Determine how much orbital time is necessary for the progenitor's orbit to cover the stream if nTrackChunks is None: #default is floor(self._deltaAngleTrack/0.15)+1 self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1 else: self._nTrackChunks= nTrackChunks if self._nTrackChunks < 4: self._nTrackChunks= 4 if not hasattr(self,'nInterpolatedTrackChunks'): self.nInterpolatedTrackChunks= 1001 dt= self._deltaAngleTrack\ /self._progenitor_Omega_along_dOmega self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it if self._useTM: return self._determine_stream_track_TM() #Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle prog_stream_offset=\ _determine_stream_track_single(self._aA, self._progenitor, 0., #time = 0 self._progenitor_angle, self._sigMeanSign, self._dsigomeanProgDirection, lambda x: self.meanOmega(x,use_physical=False), 0.) #angle = 0 auxiliaryTrack= Orbit(prog_stream_offset[3]) if dt < 0.: self._trackts= numpy.linspace(0.,-2.*dt,2*self._nTrackChunks-1) #Flip velocities before integrating auxiliaryTrack= auxiliaryTrack.flip() auxiliaryTrack.integrate(self._trackts,self._pot) if dt < 0.: #Flip velocities again auxiliaryTrack._orb.orbit[:,1]= -auxiliaryTrack._orb.orbit[:,1] auxiliaryTrack._orb.orbit[:,2]= -auxiliaryTrack._orb.orbit[:,2] auxiliaryTrack._orb.orbit[:,4]= -auxiliaryTrack._orb.orbit[:,4] #Calculate the actions, frequencies, and angle for this auxiliary orbit acfs= self._aA.actionsFreqs(auxiliaryTrack(0.), use_physical=False) auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\ ) auxiliary_Omega_along_dOmega= \ numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection) #Now calculate the actions, frequencies, and angles + Jacobian for each chunk allAcfsTrack= numpy.empty((self._nTrackChunks,9)) alljacsTrack= numpy.empty((self._nTrackChunks,6,6)) allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6)) thetasTrack= numpy.linspace(0.,self._deltaAngleTrack, self._nTrackChunks) ObsTrack= numpy.empty((self._nTrackChunks,6)) ObsTrackAA= numpy.empty((self._nTrackChunks,6)) detdOdJps= numpy.empty((self._nTrackChunks)) if self._multi is None: for ii in range(self._nTrackChunks): multiOut= _determine_stream_track_single(self._aA, auxiliaryTrack, self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track self._progenitor_angle, self._sigMeanSign, self._dsigomeanProgDirection, lambda x: self.meanOmega(x,use_physical=False), thetasTrack[ii]) allAcfsTrack[ii,:]= multiOut[0] alljacsTrack[ii,:,:]= multiOut[1] allinvjacsTrack[ii,:,:]= multiOut[2] ObsTrack[ii,:]= multiOut[3] ObsTrackAA[ii,:]= multiOut[4] detdOdJps[ii]= multiOut[5] else: multiOut= multi.parallel_map(\ (lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack, self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), self._progenitor_angle, self._sigMeanSign, self._dsigomeanProgDirection, lambda x: self.meanOmega(x,use_physical=False), thetasTrack[x])), range(self._nTrackChunks), numcores=numpy.amin([self._nTrackChunks, multiprocessing.cpu_count(), self._multi])) for ii in range(self._nTrackChunks): allAcfsTrack[ii,:]= multiOut[ii][0] alljacsTrack[ii,:,:]= multiOut[ii][1] allinvjacsTrack[ii,:,:]= multiOut[ii][2] ObsTrack[ii,:]= multiOut[ii][3] ObsTrackAA[ii,:]= multiOut[ii][4] detdOdJps[ii]= multiOut[ii][5] #Repeat the track calculation using the previous track, to get closer to it for nn in range(self.nTrackIterations): if self._multi is None: for ii in range(self._nTrackChunks): multiOut= _determine_stream_track_single(self._aA, Orbit(ObsTrack[ii,:]), 0., self._progenitor_angle, self._sigMeanSign, self._dsigomeanProgDirection, lambda x:self.meanOmega(x,use_physical=False), thetasTrack[ii]) allAcfsTrack[ii,:]= multiOut[0] alljacsTrack[ii,:,:]= multiOut[1] allinvjacsTrack[ii,:,:]= multiOut[2] ObsTrack[ii,:]= multiOut[3] ObsTrackAA[ii,:]= multiOut[4] detdOdJps[ii]= multiOut[5] else: multiOut= multi.parallel_map(\ (lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0., self._progenitor_angle, self._sigMeanSign, self._dsigomeanProgDirection, lambda x: self.meanOmega(x,use_physical=False), thetasTrack[x])), range(self._nTrackChunks), numcores=numpy.amin([self._nTrackChunks, multiprocessing.cpu_count(), self._multi])) for ii in range(self._nTrackChunks): allAcfsTrack[ii,:]= multiOut[ii][0] alljacsTrack[ii,:,:]= multiOut[ii][1] allinvjacsTrack[ii,:,:]= multiOut[ii][2] ObsTrack[ii,:]= multiOut[ii][3] ObsTrackAA[ii,:]= multiOut[ii][4] detdOdJps[ii]= multiOut[ii][5] #Store the track self._thetasTrack= thetasTrack self._ObsTrack= ObsTrack self._ObsTrackAA= ObsTrackAA self._allAcfsTrack= allAcfsTrack self._alljacsTrack= alljacsTrack self._allinvjacsTrack= allinvjacsTrack self._detdOdJps= detdOdJps self._meandetdOdJp= numpy.mean(self._detdOdJps) self._logmeandetdOdJp= numpy.log(self._meandetdOdJp) self._calc_ObsTrackXY() return None
[ "def", "_determine_stream_track", "(", "self", ",", "nTrackChunks", ")", ":", "#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream", "if", "nTrackChunks", "is", "None", ":", "#default is floor(self._deltaAngleTrack/0.15)+1", "self", ".", "_n...
Determine the track of the stream in real space
[ "Determine", "the", "track", "of", "the", "stream", "in", "real", "space" ]
python
train
brocade/pynos
pynos/versions/base/yang/tailf_netconf_transactions.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/tailf_netconf_transactions.py#L57-L67
def start_transaction_input_with_inactive(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") start_transaction = ET.Element("start_transaction") config = start_transaction input = ET.SubElement(start_transaction, "input") with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "start_transaction_input_with_inactive", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "start_transaction", "=", "ET", ".", "Element", "(", "\"start_transaction\"", ")", "config", "=", "star...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
pmacosta/peng
peng/wave_functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_functions.py#L1473-L1507
def naverage(wave, indep_min=None, indep_max=None): r""" Return the numerical average of a waveform's dependent variable vector. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.naverage :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]] """ ret = copy.copy(wave) _bound_waveform(ret, indep_min, indep_max) delta_x = ret._indep_vector[-1] - ret._indep_vector[0] return np.trapz(ret._dep_vector, x=ret._indep_vector) / delta_x
[ "def", "naverage", "(", "wave", ",", "indep_min", "=", "None", ",", "indep_max", "=", "None", ")", ":", "ret", "=", "copy", ".", "copy", "(", "wave", ")", "_bound_waveform", "(", "ret", ",", "indep_min", ",", "indep_max", ")", "delta_x", "=", "ret", ...
r""" Return the numerical average of a waveform's dependent variable vector. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.naverage :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]]
[ "r", "Return", "the", "numerical", "average", "of", "a", "waveform", "s", "dependent", "variable", "vector", "." ]
python
test
pantsbuild/pants
src/python/pants/goal/run_tracker.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/run_tracker.py#L522-L557
def get_critical_path_timings(self): """ Get the cumulative timings of each goal and all of the goals it (transitively) depended on. """ setup_workunit = WorkUnitLabel.SETUP.lower() transitive_dependencies = dict() for goal_info in self._sorted_goal_infos: deps = transitive_dependencies.setdefault(goal_info.goal.name, set()) for dep in goal_info.goal_dependencies: deps.add(dep.name) deps.update(transitive_dependencies.get(dep.name)) # Add setup workunit as a dep manually, as its unaccounted for, otherwise. deps.add(setup_workunit) raw_timings = dict() for entry in self.cumulative_timings.get_all(): raw_timings[entry["label"]] = entry["timing"] critical_path_timings = AggregatedTimings() def add_to_timings(goal, dep): tracking_label = get_label(goal) timing_label = get_label(dep) critical_path_timings.add_timing(tracking_label, raw_timings.get(timing_label, 0.0)) def get_label(dep): return "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, dep) # Add setup workunit to critical_path_timings manually, as its unaccounted for, otherwise. add_to_timings(setup_workunit, setup_workunit) for goal, deps in transitive_dependencies.items(): add_to_timings(goal, goal) for dep in deps: add_to_timings(goal, dep) return critical_path_timings
[ "def", "get_critical_path_timings", "(", "self", ")", ":", "setup_workunit", "=", "WorkUnitLabel", ".", "SETUP", ".", "lower", "(", ")", "transitive_dependencies", "=", "dict", "(", ")", "for", "goal_info", "in", "self", ".", "_sorted_goal_infos", ":", "deps", ...
Get the cumulative timings of each goal and all of the goals it (transitively) depended on.
[ "Get", "the", "cumulative", "timings", "of", "each", "goal", "and", "all", "of", "the", "goals", "it", "(", "transitively", ")", "depended", "on", "." ]
python
train
ciena/afkak
afkak/client.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L267-L275
def reset_all_metadata(self): """Clear all cached metadata Metadata will be re-fetched as required to satisfy requests. """ self.topics_to_brokers.clear() self.topic_partitions.clear() self.topic_errors.clear() self.consumer_group_to_brokers.clear()
[ "def", "reset_all_metadata", "(", "self", ")", ":", "self", ".", "topics_to_brokers", ".", "clear", "(", ")", "self", ".", "topic_partitions", ".", "clear", "(", ")", "self", ".", "topic_errors", ".", "clear", "(", ")", "self", ".", "consumer_group_to_broker...
Clear all cached metadata Metadata will be re-fetched as required to satisfy requests.
[ "Clear", "all", "cached", "metadata" ]
python
train
CloverHealth/temple
temple/clean.py
https://github.com/CloverHealth/temple/blob/d7b75da2459f72ba74d6f3b6e1ab95c3d1b92ccd/temple/clean.py#L11-L14
def _get_current_branch(): """Determine the current git branch""" result = temple.utils.shell('git rev-parse --abbrev-ref HEAD', stdout=subprocess.PIPE) return result.stdout.decode('utf8').strip()
[ "def", "_get_current_branch", "(", ")", ":", "result", "=", "temple", ".", "utils", ".", "shell", "(", "'git rev-parse --abbrev-ref HEAD'", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "return", "result", ".", "stdout", ".", "decode", "(", "'utf8'", "...
Determine the current git branch
[ "Determine", "the", "current", "git", "branch" ]
python
valid
shreyaspotnis/rampage
rampage/daq/gpib.py
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/gpib.py#L479-L489
def disable_all(self, disable): """Disables all modulation and outputs of the Standford MW func. generator""" commands = ['ENBH 0', #disable high freq. rear output 'ENBL 0', #disable low freq. front bnc 'MODL 0' #disable modulation ] command_string = '\n'.join(commands) print_string = '\n\t' + command_string.replace('\n', '\n\t') logging.info(print_string) if disable: self.instr.write(command_string)
[ "def", "disable_all", "(", "self", ",", "disable", ")", ":", "commands", "=", "[", "'ENBH 0'", ",", "#disable high freq. rear output", "'ENBL 0'", ",", "#disable low freq. front bnc", "'MODL 0'", "#disable modulation", "]", "command_string", "=", "'\\n'", ".", "join",...
Disables all modulation and outputs of the Standford MW func. generator
[ "Disables", "all", "modulation", "and", "outputs", "of", "the", "Standford", "MW", "func", ".", "generator" ]
python
train
corpusops/pdbclone
lib/pdb_clone/pdb.py
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/pdb.py#L1494-L1506
def do_longlist(self, arg): """longlist | ll List the whole source code for the current function or frame. """ filename = self.curframe.f_code.co_filename breaklist = self.get_file_breaks(filename) try: lines, lineno = getsourcelines(self.curframe, self.get_locals(self.curframe)) except IOError as err: self.error(err) return self._print_lines(lines, lineno, breaklist, self.curframe)
[ "def", "do_longlist", "(", "self", ",", "arg", ")", ":", "filename", "=", "self", ".", "curframe", ".", "f_code", ".", "co_filename", "breaklist", "=", "self", ".", "get_file_breaks", "(", "filename", ")", "try", ":", "lines", ",", "lineno", "=", "getsou...
longlist | ll List the whole source code for the current function or frame.
[ "longlist", "|", "ll", "List", "the", "whole", "source", "code", "for", "the", "current", "function", "or", "frame", "." ]
python
train
singularitti/text-stream
text_stream/__init__.py
https://github.com/singularitti/text-stream/blob/4df53b98e9f61d983dbd46edd96db93122577eb5/text_stream/__init__.py#L127-L139
def generator_between(self, begin: int, end: int) -> Iterator[str]: """ Create a generate that iterates the whole content of the file or string, starting from *begin* index, end by *end* index. **Not byte!** :param begin: An integer labels the starting index. :param end: An integer labels the ending index. :return: An iterator of string. """ s: str = self.content[begin:end + 1] for line in s: yield line
[ "def", "generator_between", "(", "self", ",", "begin", ":", "int", ",", "end", ":", "int", ")", "->", "Iterator", "[", "str", "]", ":", "s", ":", "str", "=", "self", ".", "content", "[", "begin", ":", "end", "+", "1", "]", "for", "line", "in", ...
Create a generate that iterates the whole content of the file or string, starting from *begin* index, end by *end* index. **Not byte!** :param begin: An integer labels the starting index. :param end: An integer labels the ending index. :return: An iterator of string.
[ "Create", "a", "generate", "that", "iterates", "the", "whole", "content", "of", "the", "file", "or", "string", "starting", "from", "*", "begin", "*", "index", "end", "by", "*", "end", "*", "index", ".", "**", "Not", "byte!", "**" ]
python
train
yodle/docker-registry-client
docker_registry_client/_BaseClient.py
https://github.com/yodle/docker-registry-client/blob/8abf6b0200a68bed986f698dcbf02d444257b75c/docker_registry_client/_BaseClient.py#L73-L77
def search(self, q=''): """GET /v1/search""" if q: q = '?q=' + q return self._http_call('/v1/search' + q, get)
[ "def", "search", "(", "self", ",", "q", "=", "''", ")", ":", "if", "q", ":", "q", "=", "'?q='", "+", "q", "return", "self", ".", "_http_call", "(", "'/v1/search'", "+", "q", ",", "get", ")" ]
GET /v1/search
[ "GET", "/", "v1", "/", "search" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/works.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L429-L432
def register_bec_task(self, *args, **kwargs): """Register a BEC task.""" kwargs["task_class"] = BecTask return self.register_task(*args, **kwargs)
[ "def", "register_bec_task", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"task_class\"", "]", "=", "BecTask", "return", "self", ".", "register_task", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Register a BEC task.
[ "Register", "a", "BEC", "task", "." ]
python
train
pgmpy/pgmpy
pgmpy/readwrite/XMLBIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBIF.py#L345-L370
def get_properties(self): """ Add property to variables in XMLBIF Return ------ dict: dict of type {variable: property tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_property() {'light-on': <Element PROPERTY at 0x7f7a2ffac1c8>, 'family-out': <Element PROPERTY at 0x7f7a2ffac148>, 'hear-bark': <Element PROPERTY at 0x7f7a2ffac188>, 'bowel-problem': <Element PROPERTY at 0x7f7a2ffac0c8>, 'dog-out': <Element PROPERTY at 0x7f7a2ffac108>} """ variables = self.model.nodes() property_tag = {} for var in sorted(variables): properties = self.model.node[var] property_tag[var] = etree.SubElement(self.variables[var], "PROPERTY") for prop, val in properties.items(): property_tag[var].text = str(prop) + " = " + str(val) return property_tag
[ "def", "get_properties", "(", "self", ")", ":", "variables", "=", "self", ".", "model", ".", "nodes", "(", ")", "property_tag", "=", "{", "}", "for", "var", "in", "sorted", "(", "variables", ")", ":", "properties", "=", "self", ".", "model", ".", "no...
Add property to variables in XMLBIF Return ------ dict: dict of type {variable: property tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_property() {'light-on': <Element PROPERTY at 0x7f7a2ffac1c8>, 'family-out': <Element PROPERTY at 0x7f7a2ffac148>, 'hear-bark': <Element PROPERTY at 0x7f7a2ffac188>, 'bowel-problem': <Element PROPERTY at 0x7f7a2ffac0c8>, 'dog-out': <Element PROPERTY at 0x7f7a2ffac108>}
[ "Add", "property", "to", "variables", "in", "XMLBIF" ]
python
train
Kitware/tangelo
tangelo/tangelo/__init__.py
https://github.com/Kitware/tangelo/blob/470034ee9b3d7a01becc1ce5fddc7adc1d5263ef/tangelo/tangelo/__init__.py#L289-L316
def return_type(rettype): """ Decorate a function to automatically convert its return type to a string using a custom function. Web-based service functions must return text to the client. Tangelo contains default logic to convert many kinds of values into string, but this decorator allows the service writer to specify custom behavior falling outside of the default. If the conversion fails, an appropriate server error will be raised. """ def wrap(f): @functools.wraps(f) def converter(*pargs, **kwargs): # Run the function to capture the output. result = f(*pargs, **kwargs) # Convert the result using the return type function. try: result = rettype(result) except ValueError as e: http_status(500, "Return Value Conversion Failed") content_type("application/json") return {"error": str(e)} return result return converter return wrap
[ "def", "return_type", "(", "rettype", ")", ":", "def", "wrap", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "converter", "(", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "# Run the function to capture the output.", "resu...
Decorate a function to automatically convert its return type to a string using a custom function. Web-based service functions must return text to the client. Tangelo contains default logic to convert many kinds of values into string, but this decorator allows the service writer to specify custom behavior falling outside of the default. If the conversion fails, an appropriate server error will be raised.
[ "Decorate", "a", "function", "to", "automatically", "convert", "its", "return", "type", "to", "a", "string", "using", "a", "custom", "function", "." ]
python
train
limix/limix-core
limix_core/mean/mean_efficient.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/mean_efficient.py#L69-L77
def dof(self, index=None): """The number of degrees of freedom""" if index is None: dof = 0 for i in range(self.len): dof += self.A[i].shape[0] * self.F[i].shape[1] return dof else: return self.A[index].shape[0] * self.F[index].shape[1]
[ "def", "dof", "(", "self", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "dof", "=", "0", "for", "i", "in", "range", "(", "self", ".", "len", ")", ":", "dof", "+=", "self", ".", "A", "[", "i", "]", ".", "shape", "["...
The number of degrees of freedom
[ "The", "number", "of", "degrees", "of", "freedom" ]
python
train
radzak/rtv-downloader
rtv/extractors/common.py
https://github.com/radzak/rtv-downloader/blob/b9114b7f4c35fabe6ec9ad1764a65858667a866e/rtv/extractors/common.py#L40-L45
def get_info(self) -> dict: """Get information about the videos from YoutubeDL package.""" with suppress_stdout(): with youtube_dl.YoutubeDL() as ydl: info_dict = ydl.extract_info(self.url, download=False) return info_dict
[ "def", "get_info", "(", "self", ")", "->", "dict", ":", "with", "suppress_stdout", "(", ")", ":", "with", "youtube_dl", ".", "YoutubeDL", "(", ")", "as", "ydl", ":", "info_dict", "=", "ydl", ".", "extract_info", "(", "self", ".", "url", ",", "download"...
Get information about the videos from YoutubeDL package.
[ "Get", "information", "about", "the", "videos", "from", "YoutubeDL", "package", "." ]
python
train
jazzband/sorl-thumbnail
sorl/thumbnail/engines/convert_engine.py
https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/convert_engine.py#L174-L180
def _cropbox(self, image, x, y, x2, y2): """ Crops the image to a set of x,y coordinates (x,y) is top left, (x2,y2) is bottom left """ image['options']['crop'] = '%sx%s+%s+%s' % (x2 - x, y2 - y, x, y) image['size'] = (x2 - x, y2 - y) # update image size return image
[ "def", "_cropbox", "(", "self", ",", "image", ",", "x", ",", "y", ",", "x2", ",", "y2", ")", ":", "image", "[", "'options'", "]", "[", "'crop'", "]", "=", "'%sx%s+%s+%s'", "%", "(", "x2", "-", "x", ",", "y2", "-", "y", ",", "x", ",", "y", "...
Crops the image to a set of x,y coordinates (x,y) is top left, (x2,y2) is bottom left
[ "Crops", "the", "image", "to", "a", "set", "of", "x", "y", "coordinates", "(", "x", "y", ")", "is", "top", "left", "(", "x2", "y2", ")", "is", "bottom", "left" ]
python
train
renzon/gaeforms
gaeforms/base.py
https://github.com/renzon/gaeforms/blob/7d3f4d964f087c992fe92bc8d41222010b7f6430/gaeforms/base.py#L86-L95
def normalize_field(self, value): """ Method that must transform the value from string Ex: if the expected type is int, it should return int(self._attr) """ if self.default is not None: if value is None or value == '': value = self.default return value
[ "def", "normalize_field", "(", "self", ",", "value", ")", ":", "if", "self", ".", "default", "is", "not", "None", ":", "if", "value", "is", "None", "or", "value", "==", "''", ":", "value", "=", "self", ".", "default", "return", "value" ]
Method that must transform the value from string Ex: if the expected type is int, it should return int(self._attr)
[ "Method", "that", "must", "transform", "the", "value", "from", "string", "Ex", ":", "if", "the", "expected", "type", "is", "int", "it", "should", "return", "int", "(", "self", ".", "_attr", ")" ]
python
train
getpelican/pelican-plugins
org_python_reader/org_python_reader.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/org_python_reader/org_python_reader.py#L60-L75
def _parse_metadatas(self, text_lines): """ From a given Org text, return the metadatas Keyword Arguments: text_lines -- A list, each item is a line of the texte Return: A dict containing metadatas """ if not text_lines: return {} expr_metadata = re.compile(r'^#\+([a-zA-Z]+):(.*)') return { expr_metadata.match(line).group(1).lower() : expr_metadata.match(line).group(2).strip() for line in text_lines }
[ "def", "_parse_metadatas", "(", "self", ",", "text_lines", ")", ":", "if", "not", "text_lines", ":", "return", "{", "}", "expr_metadata", "=", "re", ".", "compile", "(", "r'^#\\+([a-zA-Z]+):(.*)'", ")", "return", "{", "expr_metadata", ".", "match", "(", "lin...
From a given Org text, return the metadatas Keyword Arguments: text_lines -- A list, each item is a line of the texte Return: A dict containing metadatas
[ "From", "a", "given", "Org", "text", "return", "the", "metadatas", "Keyword", "Arguments", ":", "text_lines", "--", "A", "list", "each", "item", "is", "a", "line", "of", "the", "texte", "Return", ":", "A", "dict", "containing", "metadatas" ]
python
train
HDI-Project/ballet
ballet/util/fs.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/util/fs.py#L65-L79
def isemptyfile(filepath): """Determine if the file both exists and isempty Args: filepath (str, path): file path Returns: bool """ exists = os.path.exists(safepath(filepath)) if exists: filesize = os.path.getsize(safepath(filepath)) return filesize == 0 else: return False
[ "def", "isemptyfile", "(", "filepath", ")", ":", "exists", "=", "os", ".", "path", ".", "exists", "(", "safepath", "(", "filepath", ")", ")", "if", "exists", ":", "filesize", "=", "os", ".", "path", ".", "getsize", "(", "safepath", "(", "filepath", "...
Determine if the file both exists and isempty Args: filepath (str, path): file path Returns: bool
[ "Determine", "if", "the", "file", "both", "exists", "and", "isempty" ]
python
train
quandyfactory/dicttoxml
dicttoxml.py
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L324-L339
def convert_kv(key, val, attr_type, attr={}, cdata=False): """Converts a number or string into an XML element""" LOG.info('Inside convert_kv(): key="%s", val="%s", type(val) is: "%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s>%s</%s>' % ( key, attrstring, wrap_cdata(val) if cdata == True else escape_xml(val), key )
[ "def", "convert_kv", "(", "key", ",", "val", ",", "attr_type", ",", "attr", "=", "{", "}", ",", "cdata", "=", "False", ")", ":", "LOG", ".", "info", "(", "'Inside convert_kv(): key=\"%s\", val=\"%s\", type(val) is: \"%s\"'", "%", "(", "unicode_me", "(", "key",...
Converts a number or string into an XML element
[ "Converts", "a", "number", "or", "string", "into", "an", "XML", "element" ]
python
train
tradenity/python-sdk
tradenity/resources/return_operation.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/return_operation.py#L671-L692
def replace_return_operation_by_id(cls, return_operation_id, return_operation, **kwargs): """Replace ReturnOperation Replace all attributes of ReturnOperation This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_return_operation_by_id(return_operation_id, return_operation, async=True) >>> result = thread.get() :param async bool :param str return_operation_id: ID of returnOperation to replace (required) :param ReturnOperation return_operation: Attributes of returnOperation to replace (required) :return: ReturnOperation If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_return_operation_by_id_with_http_info(return_operation_id, return_operation, **kwargs) else: (data) = cls._replace_return_operation_by_id_with_http_info(return_operation_id, return_operation, **kwargs) return data
[ "def", "replace_return_operation_by_id", "(", "cls", ",", "return_operation_id", ",", "return_operation", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "ret...
Replace ReturnOperation Replace all attributes of ReturnOperation This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_return_operation_by_id(return_operation_id, return_operation, async=True) >>> result = thread.get() :param async bool :param str return_operation_id: ID of returnOperation to replace (required) :param ReturnOperation return_operation: Attributes of returnOperation to replace (required) :return: ReturnOperation If the method is called asynchronously, returns the request thread.
[ "Replace", "ReturnOperation" ]
python
train
hugapi/hug
hug/use.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/use.py#L206-L214
def setsockopt(self, *sockopts): """Add socket options to set""" if type(sockopts[0]) in (list, tuple): for sock_opt in sockopts[0]: level, option, value = sock_opt self.connection.sockopts.add((level, option, value)) else: level, option, value = sockopts self.connection.sockopts.add((level, option, value))
[ "def", "setsockopt", "(", "self", ",", "*", "sockopts", ")", ":", "if", "type", "(", "sockopts", "[", "0", "]", ")", "in", "(", "list", ",", "tuple", ")", ":", "for", "sock_opt", "in", "sockopts", "[", "0", "]", ":", "level", ",", "option", ",", ...
Add socket options to set
[ "Add", "socket", "options", "to", "set" ]
python
train
thusoy/headsup
headsup.py
https://github.com/thusoy/headsup/blob/165a63cc6c987f664f2efd901d483ca07b7bc898/headsup.py#L68-L80
def get_device_address(device): """ find the local ip address on the given device """ if device is None: return None command = ['ip', 'route', 'list', 'dev', device] ip_routes = subprocess.check_output(command).strip() for line in ip_routes.split('\n'): seen = '' for a in line.split(): if seen == 'src': return a seen = a return None
[ "def", "get_device_address", "(", "device", ")", ":", "if", "device", "is", "None", ":", "return", "None", "command", "=", "[", "'ip'", ",", "'route'", ",", "'list'", ",", "'dev'", ",", "device", "]", "ip_routes", "=", "subprocess", ".", "check_output", ...
find the local ip address on the given device
[ "find", "the", "local", "ip", "address", "on", "the", "given", "device" ]
python
train
python-openxml/python-docx
docx/oxml/xmlchemy.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/xmlchemy.py#L84-L91
def _parse_line(cls, line): """ Return front, attrs, close, text 4-tuple result of parsing XML element string *line*. """ match = cls._xml_elm_line_patt.match(line) front, attrs, close, text = [match.group(n) for n in range(1, 5)] return front, attrs, close, text
[ "def", "_parse_line", "(", "cls", ",", "line", ")", ":", "match", "=", "cls", ".", "_xml_elm_line_patt", ".", "match", "(", "line", ")", "front", ",", "attrs", ",", "close", ",", "text", "=", "[", "match", ".", "group", "(", "n", ")", "for", "n", ...
Return front, attrs, close, text 4-tuple result of parsing XML element string *line*.
[ "Return", "front", "attrs", "close", "text", "4", "-", "tuple", "result", "of", "parsing", "XML", "element", "string", "*", "line", "*", "." ]
python
train
Exanis/django-rest-generators
django_rest_generators/steps/http.py
https://github.com/Exanis/django-rest-generators/blob/fb14ccbba8cb029dc056d852bc13d9216dc924e4/django_rest_generators/steps/http.py#L33-L45
def when_i_send_the_request(context, method): """ :type method: str :type context: behave.runner.Context """ data = context.apiRequestData context.apiRequest = context.apiClient.generic( method, data['url'], data=json.dumps(data['params']), content_type=data['content-type'], format=data['format'], )
[ "def", "when_i_send_the_request", "(", "context", ",", "method", ")", ":", "data", "=", "context", ".", "apiRequestData", "context", ".", "apiRequest", "=", "context", ".", "apiClient", ".", "generic", "(", "method", ",", "data", "[", "'url'", "]", ",", "d...
:type method: str :type context: behave.runner.Context
[ ":", "type", "method", ":", "str", ":", "type", "context", ":", "behave", ".", "runner", ".", "Context" ]
python
train
rainwoodman/kdcount
kdcount/__init__.py
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L112-L129
def make_forest(self, chunksize): """ Divide a tree branch to a forest, each subtree of size at most chunksize """ heap = [] heappush(heap, (-self.size, self)) while True: w, x = heappop(heap) if w == 0: heappush(heap, (0, x)) break if x.less is None \ or (x.size < chunksize): heappush(heap, (0, x)) continue heappush(heap, (x.less.size, x.less)) heappush(heap, (x.greater.size, x.greater)) for w, x in heap: yield x
[ "def", "make_forest", "(", "self", ",", "chunksize", ")", ":", "heap", "=", "[", "]", "heappush", "(", "heap", ",", "(", "-", "self", ".", "size", ",", "self", ")", ")", "while", "True", ":", "w", ",", "x", "=", "heappop", "(", "heap", ")", "if...
Divide a tree branch to a forest, each subtree of size at most chunksize
[ "Divide", "a", "tree", "branch", "to", "a", "forest", "each", "subtree", "of", "size", "at", "most", "chunksize" ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L1896-L1908
def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): '''Edit the parameters of a media. This will delete all existing inputs and add the specified one. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error. ''' return libvlc_vlm_change_media(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
[ "def", "vlm_change_media", "(", "self", ",", "psz_name", ",", "psz_input", ",", "psz_output", ",", "i_options", ",", "ppsz_options", ",", "b_enabled", ",", "b_loop", ")", ":", "return", "libvlc_vlm_change_media", "(", "self", ",", "str_to_bytes", "(", "psz_name"...
Edit the parameters of a media. This will delete all existing inputs and add the specified one. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error.
[ "Edit", "the", "parameters", "of", "a", "media", ".", "This", "will", "delete", "all", "existing", "inputs", "and", "add", "the", "specified", "one", "." ]
python
train
d0ugal/home
home/__main__.py
https://github.com/d0ugal/home/blob/e984716ae6c74dc8e40346584668ac5cfeaaf520/home/__main__.py#L55-L60
def create_user(username): "Create a new user." password = prompt_pass("Enter password") user = User(username=username, password=password) db.session.add(user) db.session.commit()
[ "def", "create_user", "(", "username", ")", ":", "password", "=", "prompt_pass", "(", "\"Enter password\"", ")", "user", "=", "User", "(", "username", "=", "username", ",", "password", "=", "password", ")", "db", ".", "session", ".", "add", "(", "user", ...
Create a new user.
[ "Create", "a", "new", "user", "." ]
python
test
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L1148-L1208
def update_grade(self, grade_form): """Updates an existing grade. arg: grade_form (osid.grading.GradeForm): the form containing the elements to be updated raise: IllegalState - ``grade_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``grade_id`` or ``grade_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_form`` did not originate from ``get_grade_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetAdminSession.update_asset_content_template from dlkit.abstract_osid.grading.objects import GradeForm as ABCGradeForm collection = JSONClientValidated('grading', collection='GradeSystem', runtime=self._runtime) if not isinstance(grade_form, ABCGradeForm): raise errors.InvalidArgument('argument type is not an GradeForm') if not grade_form.is_for_update(): raise errors.InvalidArgument('the GradeForm is for update only, not create') try: if self._forms[grade_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('grade_form already used in an update transaction') except KeyError: raise errors.Unsupported('grade_form did not originate from this session') if not grade_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') grade_system_id = Id(grade_form._my_map['gradeSystemId']).get_identifier() grade_system = collection.find_one( {'$and': [{'_id': ObjectId(grade_system_id)}, {'assigned' + self._catalog_name + 'Ids': {'$in': [str(self._catalog_id)]}}]}) index = 0 found = False for i in grade_system['grades']: if i['_id'] == ObjectId(grade_form._my_map['_id']): grade_system['grades'].pop(index) grade_system['grades'].insert(index, grade_form._my_map) found = True break index += 1 if not found: raise errors.NotFound() try: collection.save(grade_system) except: # what exceptions does mongodb save raise? raise errors.OperationFailed() self._forms[grade_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: from .objects import Grade return Grade( osid_object_map=grade_form._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "update_grade", "(", "self", ",", "grade_form", ")", ":", "# Implemented from template for", "# osid.repository.AssetAdminSession.update_asset_content_template", "from", "dlkit", ".", "abstract_osid", ".", "grading", ".", "objects", "import", "GradeForm", "as", "ABCG...
Updates an existing grade. arg: grade_form (osid.grading.GradeForm): the form containing the elements to be updated raise: IllegalState - ``grade_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``grade_id`` or ``grade_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_form`` did not originate from ``get_grade_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
[ "Updates", "an", "existing", "grade", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L862-L869
def removeDataset(self, dataset): """ Removes the specified dataset from this repository. This performs a cascading removal of all items within this dataset. """ for datasetRecord in models.Dataset.select().where( models.Dataset.id == dataset.getId()): datasetRecord.delete_instance(recursive=True)
[ "def", "removeDataset", "(", "self", ",", "dataset", ")", ":", "for", "datasetRecord", "in", "models", ".", "Dataset", ".", "select", "(", ")", ".", "where", "(", "models", ".", "Dataset", ".", "id", "==", "dataset", ".", "getId", "(", ")", ")", ":",...
Removes the specified dataset from this repository. This performs a cascading removal of all items within this dataset.
[ "Removes", "the", "specified", "dataset", "from", "this", "repository", ".", "This", "performs", "a", "cascading", "removal", "of", "all", "items", "within", "this", "dataset", "." ]
python
train
acatton/python-spm
docs/files/shell.py
https://github.com/acatton/python-spm/blob/0d4e7177aef17c90f7676ae2e099bb4137a0226f/docs/files/shell.py#L35-L41
def run(line): """ Run a shell line: run('ls /tmp') will execv('/usr/bin/ls', ['ls', '/tmp']) """ arguments = shlex.split(line) path = lookup(arguments[0]) # Lookup the first arguments in PATH execute(path, arguments)
[ "def", "run", "(", "line", ")", ":", "arguments", "=", "shlex", ".", "split", "(", "line", ")", "path", "=", "lookup", "(", "arguments", "[", "0", "]", ")", "# Lookup the first arguments in PATH", "execute", "(", "path", ",", "arguments", ")" ]
Run a shell line: run('ls /tmp') will execv('/usr/bin/ls', ['ls', '/tmp'])
[ "Run", "a", "shell", "line", ":", "run", "(", "ls", "/", "tmp", ")", "will", "execv", "(", "/", "usr", "/", "bin", "/", "ls", "[", "ls", "/", "tmp", "]", ")" ]
python
train
google/openhtf
openhtf/plugs/usb/filesync_service.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L391-L413
def read_until_done(self, command, timeout=None): """Yield messages read until we receive a 'DONE' command. Read messages of the given command until we receive a 'DONE' command. If a command different than the requested one is received, an AdbProtocolError is raised. Args: command: The command to expect, like 'DENT' or 'DATA'. timeout: The timeouts.PolledTimeout to use for this operation. Yields: Messages read, of type self.RECV_MSG_TYPE, see read_message(). Raises: AdbProtocolError: If an unexpected command is read. AdbRemoteError: If a 'FAIL' message is read. """ message = self.read_message(timeout) while message.command != 'DONE': message.assert_command_is(command) yield message message = self.read_message(timeout)
[ "def", "read_until_done", "(", "self", ",", "command", ",", "timeout", "=", "None", ")", ":", "message", "=", "self", ".", "read_message", "(", "timeout", ")", "while", "message", ".", "command", "!=", "'DONE'", ":", "message", ".", "assert_command_is", "(...
Yield messages read until we receive a 'DONE' command. Read messages of the given command until we receive a 'DONE' command. If a command different than the requested one is received, an AdbProtocolError is raised. Args: command: The command to expect, like 'DENT' or 'DATA'. timeout: The timeouts.PolledTimeout to use for this operation. Yields: Messages read, of type self.RECV_MSG_TYPE, see read_message(). Raises: AdbProtocolError: If an unexpected command is read. AdbRemoteError: If a 'FAIL' message is read.
[ "Yield", "messages", "read", "until", "we", "receive", "a", "DONE", "command", "." ]
python
train
pyQode/pyqode.core
pyqode/core/share.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/share.py#L43-L55
def to_dict(self): """ Serializes a definition to a dictionary, ready for json. Children are serialised recursively. """ ddict = {'name': self.name, 'icon': self.icon, 'line': self.line, 'column': self.column, 'children': [], 'description': self.description, 'user_data': self.user_data, 'path': self.file_path} for child in self.children: ddict['children'].append(child.to_dict()) return ddict
[ "def", "to_dict", "(", "self", ")", ":", "ddict", "=", "{", "'name'", ":", "self", ".", "name", ",", "'icon'", ":", "self", ".", "icon", ",", "'line'", ":", "self", ".", "line", ",", "'column'", ":", "self", ".", "column", ",", "'children'", ":", ...
Serializes a definition to a dictionary, ready for json. Children are serialised recursively.
[ "Serializes", "a", "definition", "to", "a", "dictionary", "ready", "for", "json", "." ]
python
train
ayust/kitnirc
kitnirc/modular.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/modular.py#L276-L303
def reload_module(self, module_name): """Reloads the specified module without changing its ordering. 1. Calls stop(reloading=True) on the module 2. Reloads the Module object into .loaded_modules 3. Calls start(reloading=True) on the new object If called with a module name that is not currently loaded, it will load it. Returns True if the module was successfully reloaded, otherwise False. """ module = self.loaded_modules.get(module_name) if module: module.stop(reloading=True) else: _log.info("Reload loading new module module '%s'", module_name) success = self.load_module(module_name) if success: _log.info("Successfully (re)loaded module '%s'.", module_name) elif module: _log.error("Unable to reload module '%s', reusing existing.", module_name) else: _log.error("Failed to load module '%s'.", module_name) return False self.loaded_modules[module_name].start(reloading=True) return success
[ "def", "reload_module", "(", "self", ",", "module_name", ")", ":", "module", "=", "self", ".", "loaded_modules", ".", "get", "(", "module_name", ")", "if", "module", ":", "module", ".", "stop", "(", "reloading", "=", "True", ")", "else", ":", "_log", "...
Reloads the specified module without changing its ordering. 1. Calls stop(reloading=True) on the module 2. Reloads the Module object into .loaded_modules 3. Calls start(reloading=True) on the new object If called with a module name that is not currently loaded, it will load it. Returns True if the module was successfully reloaded, otherwise False.
[ "Reloads", "the", "specified", "module", "without", "changing", "its", "ordering", "." ]
python
train
amzn/ion-python
amazon/ion/reader_text.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L1567-L1592
def _short_string_handler_factory(): """Generates the short string (double quoted) handler.""" def before(c, ctx, is_field_name, is_clob): assert not (is_clob and is_field_name) is_string = not is_clob and not is_field_name if is_string: ctx.set_ion_type(IonType.STRING) val = ctx.value if is_field_name: assert not val ctx.set_pending_symbol() val = ctx.pending_symbol return val, is_string def on_close(ctx): ctx.set_self_delimiting(True) return ctx.event_transition(IonEvent, IonEventType.SCALAR, ctx.ion_type, ctx.value.as_text()) def after(c, ctx, is_field_name): ctx.set_quoted_text(False).set_self_delimiting(True) return ctx.immediate_transition( ctx.whence if is_field_name else _clob_end_handler(c, ctx), ) return _quoted_text_handler_factory(_DOUBLE_QUOTE, lambda c: c == _DOUBLE_QUOTE, before, after, append_first=False, on_close=on_close)
[ "def", "_short_string_handler_factory", "(", ")", ":", "def", "before", "(", "c", ",", "ctx", ",", "is_field_name", ",", "is_clob", ")", ":", "assert", "not", "(", "is_clob", "and", "is_field_name", ")", "is_string", "=", "not", "is_clob", "and", "not", "i...
Generates the short string (double quoted) handler.
[ "Generates", "the", "short", "string", "(", "double", "quoted", ")", "handler", "." ]
python
train
lazka/hypothesis-fspaths
hypothesis_fspaths.py
https://github.com/lazka/hypothesis-fspaths/blob/19edb40a91ae4055bccf125a1e0b1796fa2e6a5c/hypothesis_fspaths.py#L161-L215
def fspaths(draw, allow_pathlike=None): """A strategy which generates filesystem path values. The generated values include everything which the builtin :func:`python:open` function accepts i.e. which won't lead to :exc:`ValueError` or :exc:`TypeError` being raised. Note that the range of the returned values depends on the operating system, the Python version, and the filesystem encoding as returned by :func:`sys.getfilesystemencoding`. :param allow_pathlike: If :obj:`python:None` makes the strategy include objects implementing the :class:`python:os.PathLike` interface when Python >= 3.6 is used. If :obj:`python:False` no pathlike objects will be generated. If :obj:`python:True` pathlike will be generated (Python >= 3.6 required) :type allow_pathlike: :obj:`python:bool` or :obj:`python:None` .. versionadded:: 3.15 """ has_pathlike = hasattr(os, 'PathLike') if allow_pathlike is None: allow_pathlike = has_pathlike if allow_pathlike and not has_pathlike: raise InvalidArgument( 'allow_pathlike: os.PathLike not supported, use None instead ' 'to enable it only when available') result_type = draw(sampled_from([bytes, text_type])) def tp(s=''): return _str_to_path(s, result_type) special_component = sampled_from([tp(os.curdir), tp(os.pardir)]) normal_component = _filename(result_type) path_component = one_of(normal_component, special_component) extension = normal_component.map(lambda f: tp(os.extsep) + f) root = _path_root(result_type) def optional(st): return one_of(st, just(result_type())) sep = sampled_from([os.sep, os.altsep or os.sep]).map(tp) path_part = builds(lambda s, l: s.join(l), sep, lists(path_component)) main_strategy = builds(lambda *x: tp().join(x), optional(root), path_part, optional(extension)) if allow_pathlike and hasattr(os, 'fspath'): pathlike_strategy = main_strategy.map(lambda p: _PathLike(p)) main_strategy = one_of(main_strategy, pathlike_strategy) return draw(main_strategy)
[ "def", "fspaths", "(", "draw", ",", "allow_pathlike", "=", "None", ")", ":", "has_pathlike", "=", "hasattr", "(", "os", ",", "'PathLike'", ")", "if", "allow_pathlike", "is", "None", ":", "allow_pathlike", "=", "has_pathlike", "if", "allow_pathlike", "and", "...
A strategy which generates filesystem path values. The generated values include everything which the builtin :func:`python:open` function accepts i.e. which won't lead to :exc:`ValueError` or :exc:`TypeError` being raised. Note that the range of the returned values depends on the operating system, the Python version, and the filesystem encoding as returned by :func:`sys.getfilesystemencoding`. :param allow_pathlike: If :obj:`python:None` makes the strategy include objects implementing the :class:`python:os.PathLike` interface when Python >= 3.6 is used. If :obj:`python:False` no pathlike objects will be generated. If :obj:`python:True` pathlike will be generated (Python >= 3.6 required) :type allow_pathlike: :obj:`python:bool` or :obj:`python:None` .. versionadded:: 3.15
[ "A", "strategy", "which", "generates", "filesystem", "path", "values", "." ]
python
valid
eventbrite/pysoa
pysoa/utils.py
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/utils.py#L12-L32
def dict_to_hashable(d): """ Takes a dict and returns an immutable, hashable version of that dict that can be used as a key in dicts or as a set value. Any two dicts passed in with the same content are guaranteed to return the same value. Any two dicts passed in with different content are guaranteed to return different values. Performs comparatively to `repr`. >> %timeit repr(d1) The slowest run took 5.76 times longer than the fastest. This could mean that an intermediate result is being cached 100000 loops, best of 3: 3.48 µs per loop >> %timeit dict_to_hashable(d1) The slowest run took 4.16 times longer than the fastest. This could mean that an intermediate result is being cached 100000 loops, best of 3: 4.07 µs per loop :param d: The dict :return: The hashable representation of the dict """ return frozenset( (k, tuple(v) if isinstance(v, list) else (dict_to_hashable(v) if isinstance(v, dict) else v)) for k, v in six.iteritems(d) )
[ "def", "dict_to_hashable", "(", "d", ")", ":", "return", "frozenset", "(", "(", "k", ",", "tuple", "(", "v", ")", "if", "isinstance", "(", "v", ",", "list", ")", "else", "(", "dict_to_hashable", "(", "v", ")", "if", "isinstance", "(", "v", ",", "di...
Takes a dict and returns an immutable, hashable version of that dict that can be used as a key in dicts or as a set value. Any two dicts passed in with the same content are guaranteed to return the same value. Any two dicts passed in with different content are guaranteed to return different values. Performs comparatively to `repr`. >> %timeit repr(d1) The slowest run took 5.76 times longer than the fastest. This could mean that an intermediate result is being cached 100000 loops, best of 3: 3.48 µs per loop >> %timeit dict_to_hashable(d1) The slowest run took 4.16 times longer than the fastest. This could mean that an intermediate result is being cached 100000 loops, best of 3: 4.07 µs per loop :param d: The dict :return: The hashable representation of the dict
[ "Takes", "a", "dict", "and", "returns", "an", "immutable", "hashable", "version", "of", "that", "dict", "that", "can", "be", "used", "as", "a", "key", "in", "dicts", "or", "as", "a", "set", "value", ".", "Any", "two", "dicts", "passed", "in", "with", ...
python
train
sammchardy/python-binance
binance/depthcache.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/depthcache.py#L153-L181
def _init_cache(self): """Initialise the depth cache calling REST endpoint :return: """ self._last_update_id = None self._depth_message_buffer = [] res = self._client.get_order_book(symbol=self._symbol, limit=self._limit) # process bid and asks from the order book for bid in res['bids']: self._depth_cache.add_bid(bid) for ask in res['asks']: self._depth_cache.add_ask(ask) # set first update id self._last_update_id = res['lastUpdateId'] # set a time to refresh the depth cache if self._refresh_interval: self._refresh_time = int(time.time()) + self._refresh_interval # Apply any updates from the websocket for msg in self._depth_message_buffer: self._process_depth_message(msg, buffer=True) # clear the depth buffer del self._depth_message_buffer
[ "def", "_init_cache", "(", "self", ")", ":", "self", ".", "_last_update_id", "=", "None", "self", ".", "_depth_message_buffer", "=", "[", "]", "res", "=", "self", ".", "_client", ".", "get_order_book", "(", "symbol", "=", "self", ".", "_symbol", ",", "li...
Initialise the depth cache calling REST endpoint :return:
[ "Initialise", "the", "depth", "cache", "calling", "REST", "endpoint" ]
python
train
johnwmillr/LyricsGenius
lyricsgenius/api.py
https://github.com/johnwmillr/LyricsGenius/blob/e36482f7c42235037f3b9b7013edcd54141124e3/lyricsgenius/api.py#L87-L91
def search_genius(self, search_term): """Search documents hosted on Genius.""" endpoint = "search/" params = {'q': search_term} return self._make_request(endpoint, params_=params)
[ "def", "search_genius", "(", "self", ",", "search_term", ")", ":", "endpoint", "=", "\"search/\"", "params", "=", "{", "'q'", ":", "search_term", "}", "return", "self", ".", "_make_request", "(", "endpoint", ",", "params_", "=", "params", ")" ]
Search documents hosted on Genius.
[ "Search", "documents", "hosted", "on", "Genius", "." ]
python
train
heikomuller/sco-client
scocli/__init__.py
https://github.com/heikomuller/sco-client/blob/c4afab71297f73003379bba4c1679be9dcf7cef8/scocli/__init__.py#L126-L138
def cache_clear(self): """Clear local cache by deleting all cached resources and their downloaded files. """ # Delete content of local cache directory for f in os.listdir(self.directory): f = os.path.join(self.directory, f) if os.path.isfile(f): os.remove(f) elif os.path.isdir(f): shutil.rmtree(f) # Empty cache index self.cache = {}
[ "def", "cache_clear", "(", "self", ")", ":", "# Delete content of local cache directory", "for", "f", "in", "os", ".", "listdir", "(", "self", ".", "directory", ")", ":", "f", "=", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "f", ...
Clear local cache by deleting all cached resources and their downloaded files.
[ "Clear", "local", "cache", "by", "deleting", "all", "cached", "resources", "and", "their", "downloaded", "files", "." ]
python
train
adamcharnock/swiftwind
swiftwind/billing_cycle/models.py
https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L148-L150
def get_next(self): """Get the billing cycle after this one. May return None""" return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first()
[ "def", "get_next", "(", "self", ")", ":", "return", "BillingCycle", ".", "objects", ".", "filter", "(", "date_range__gt", "=", "self", ".", "date_range", ")", ".", "order_by", "(", "'date_range'", ")", ".", "first", "(", ")" ]
Get the billing cycle after this one. May return None
[ "Get", "the", "billing", "cycle", "after", "this", "one", ".", "May", "return", "None" ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L121-L125
def add(self, datapoint): """ Adds the datapoint to the tensor if room is available. """ if not self.is_full: self.set_datapoint(self.cur_index, datapoint) self.cur_index += 1
[ "def", "add", "(", "self", ",", "datapoint", ")", ":", "if", "not", "self", ".", "is_full", ":", "self", ".", "set_datapoint", "(", "self", ".", "cur_index", ",", "datapoint", ")", "self", ".", "cur_index", "+=", "1" ]
Adds the datapoint to the tensor if room is available.
[ "Adds", "the", "datapoint", "to", "the", "tensor", "if", "room", "is", "available", "." ]
python
train
saltstack/salt
salt/fileserver/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L353-L401
def backends(self, back=None): ''' Return the backend list ''' if not back: back = self.opts['fileserver_backend'] else: if not isinstance(back, list): try: back = back.split(',') except AttributeError: back = six.text_type(back).split(',') if isinstance(back, Sequence): # The test suite uses an ImmutableList type (based on # collections.Sequence) for lists, which breaks this function in # the test suite. This normalizes the value from the opts into a # list if it is based on collections.Sequence. back = list(back) ret = [] if not isinstance(back, list): return ret # Avoid error logging when performing lookups in the LazyDict by # instead doing the membership check on the result of a call to its # .keys() attribute rather than on the LazyDict itself. server_funcs = self.servers.keys() try: subtract_only = all((x.startswith('-') for x in back)) except AttributeError: pass else: if subtract_only: # Only subtracting backends from enabled ones ret = self.opts['fileserver_backend'] for sub in back: if '{0}.envs'.format(sub[1:]) in server_funcs: ret.remove(sub[1:]) elif '{0}.envs'.format(sub[1:-2]) in server_funcs: ret.remove(sub[1:-2]) return ret for sub in back: if '{0}.envs'.format(sub) in server_funcs: ret.append(sub) elif '{0}.envs'.format(sub[:-2]) in server_funcs: ret.append(sub[:-2]) return ret
[ "def", "backends", "(", "self", ",", "back", "=", "None", ")", ":", "if", "not", "back", ":", "back", "=", "self", ".", "opts", "[", "'fileserver_backend'", "]", "else", ":", "if", "not", "isinstance", "(", "back", ",", "list", ")", ":", "try", ":"...
Return the backend list
[ "Return", "the", "backend", "list" ]
python
train
hobson/aima
aima/mdp.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/mdp.py#L118-L131
def policy_iteration(mdp): "Solve an MDP by policy iteration [Fig. 17.7]" U = dict([(s, 0) for s in mdp.states]) pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states]) while True: U = policy_evaluation(pi, U, mdp) unchanged = True for s in mdp.states: a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp)) if a != pi[s]: pi[s] = a unchanged = False if unchanged: return pi
[ "def", "policy_iteration", "(", "mdp", ")", ":", "U", "=", "dict", "(", "[", "(", "s", ",", "0", ")", "for", "s", "in", "mdp", ".", "states", "]", ")", "pi", "=", "dict", "(", "[", "(", "s", ",", "random", ".", "choice", "(", "mdp", ".", "a...
Solve an MDP by policy iteration [Fig. 17.7]
[ "Solve", "an", "MDP", "by", "policy", "iteration", "[", "Fig", ".", "17", ".", "7", "]" ]
python
valid
fkarb/xltable
xltable/expression.py
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/expression.py#L358-L370
def _to_addr(worksheet, row, col, row_fixed=False, col_fixed=False): """converts a (0,0) based coordinate to an excel address""" addr = "" A = ord('A') col += 1 while col > 0: addr = chr(A + ((col - 1) % 26)) + addr col = (col - 1) // 26 prefix = ("'%s'!" % worksheet) if worksheet else "" col_modifier = "$" if col_fixed else "" row_modifier = "$" if row_fixed else "" return prefix + "%s%s%s%d" % (col_modifier, addr, row_modifier, row+1)
[ "def", "_to_addr", "(", "worksheet", ",", "row", ",", "col", ",", "row_fixed", "=", "False", ",", "col_fixed", "=", "False", ")", ":", "addr", "=", "\"\"", "A", "=", "ord", "(", "'A'", ")", "col", "+=", "1", "while", "col", ">", "0", ":", "addr",...
converts a (0,0) based coordinate to an excel address
[ "converts", "a", "(", "0", "0", ")", "based", "coordinate", "to", "an", "excel", "address" ]
python
train
PyPSA/PyPSA
pypsa/io.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L537-L605
def _import_from_importer(network, importer, basename, skip_time=False): """ Import network data from importer. Parameters ---------- skip_time : bool Skip importing time """ attrs = importer.get_attributes() current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")] pypsa_version = None if attrs is not None: network.name = attrs.pop('name') try: pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")] except KeyError: pypsa_version = None for attr, val in iteritems(attrs): setattr(network, attr, val) ##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types if pypsa_version is None or pypsa_version < current_pypsa_version: logger.warning(dedent(""" Importing PyPSA from older version of PyPSA than current version {}. Please read the release notes at https://pypsa.org/doc/release_notes.html carefully to prepare your network for import. """).format(network.pypsa_version)) importer.pypsa_version = pypsa_version importer.current_pypsa_version = current_pypsa_version # if there is snapshots.csv, read in snapshot data df = importer.get_snapshots() if df is not None: network.set_snapshots(df.index) if "weightings" in df.columns: network.snapshot_weightings = df["weightings"].reindex(network.snapshots) imported_components = [] # now read in other components; make sure buses and carriers come first for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}): list_name = network.components[component]["list_name"] df = importer.get_static(list_name) if df is None: if component == "Bus": logger.error("Error, no buses found") return else: continue import_components_from_dataframe(network, df, component) if not skip_time: for attr, df in importer.get_series(list_name): import_series_from_dataframe(network, df, component, attr) logger.debug(getattr(network,list_name)) imported_components.append(list_name) logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
[ "def", "_import_from_importer", "(", "network", ",", "importer", ",", "basename", ",", "skip_time", "=", "False", ")", ":", "attrs", "=", "importer", ".", "get_attributes", "(", ")", "current_pypsa_version", "=", "[", "int", "(", "s", ")", "for", "s", "in"...
Import network data from importer. Parameters ---------- skip_time : bool Skip importing time
[ "Import", "network", "data", "from", "importer", "." ]
python
train
Harvard-University-iCommons/dj-log-config-helper
dj_log_config_helper.py
https://github.com/Harvard-University-iCommons/dj-log-config-helper/blob/5e568a65c455ca984d8f6d652986df190ed2a6dd/dj_log_config_helper.py#L110-L135
def configure_installed_apps_logger(level, verbose=False, additional_packages=None, filename=None): """Builds and enables a logger with a logger list of the top-level list of installed app modules (based on package name) plus any additional application packages passed in - for example, a user may want to log a dependent package of one the installed apps. The logger will write either to the console or to a file based on the presence of the filename parameter. Check that the LOGGING_CONFIG setting is None before we configure the logger in order to prevent maintaining Django's list of log handlers.""" if settings.LOGGING_CONFIG: raise ImproperlyConfigured(LOGGING_CONFIG_ERROR_MSG) app_set = _normalize_apps(settings.INSTALLED_APPS) # Add any additional app modules to the set of apps we want to configure if additional_packages: # Make sure we're dealing with a list of additional apps before we # convert to a set if not isinstance(additional_packages, list): additional_packages = list(additional_packages) # Update the app set with these additional app modules app_set.update(set(additional_packages)) config = _build_logging_config( level, app_set, verbose, filename) logging.config.dictConfig(config)
[ "def", "configure_installed_apps_logger", "(", "level", ",", "verbose", "=", "False", ",", "additional_packages", "=", "None", ",", "filename", "=", "None", ")", ":", "if", "settings", ".", "LOGGING_CONFIG", ":", "raise", "ImproperlyConfigured", "(", "LOGGING_CONF...
Builds and enables a logger with a logger list of the top-level list of installed app modules (based on package name) plus any additional application packages passed in - for example, a user may want to log a dependent package of one the installed apps. The logger will write either to the console or to a file based on the presence of the filename parameter. Check that the LOGGING_CONFIG setting is None before we configure the logger in order to prevent maintaining Django's list of log handlers.
[ "Builds", "and", "enables", "a", "logger", "with", "a", "logger", "list", "of", "the", "top", "-", "level", "list", "of", "installed", "app", "modules", "(", "based", "on", "package", "name", ")", "plus", "any", "additional", "application", "packages", "pa...
python
train
angr/angr
angr/sim_state.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_state.py#L428-L507
def add_constraints(self, *args, **kwargs): """ Add some constraints to the state. You may pass in any number of symbolic booleans as variadic positional arguments. """ if len(args) > 0 and isinstance(args[0], (list, tuple)): raise Exception("Tuple or list passed to add_constraints!") if o.TRACK_CONSTRAINTS in self.options and len(args) > 0: if o.SIMPLIFY_CONSTRAINTS in self.options: constraints = [ self.simplify(a) for a in args ] else: constraints = args self._inspect('constraints', BP_BEFORE, added_constraints=constraints) constraints = self._inspect_getattr("added_constraints", constraints) added = self.solver.add(*constraints) self._inspect('constraints', BP_AFTER) # add actions for the added constraints if o.TRACK_CONSTRAINT_ACTIONS in self.options: for c in added: sac = SimActionConstraint(self, c) self.history.add_action(sac) else: # preserve the old action logic for when we don't track constraints (why?) if ( 'action' in kwargs and kwargs['action'] and o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0 ): for arg in args: if self.solver.symbolic(arg): sac = SimActionConstraint(self, arg) self.history.add_action(sac) if o.ABSTRACT_SOLVER in self.options and len(args) > 0: for arg in args: if self.solver.is_false(arg): self._satisfiable = False return if self.solver.is_true(arg): continue # `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in # claripy). There is a chance that VSA backend can in fact handle it. # Therefore we try to resolve it with VSABackend again if claripy.backends.vsa.is_false(arg): self._satisfiable = False return if claripy.backends.vsa.is_true(arg): continue # It's neither True or False. Let's try to apply the condition # We take the argument, extract a list of constrained SIs out of it (if we could, of course), and # then replace each original SI the intersection of original SI and the constrained one. _, converted = self.solver.constraint_to_si(arg) for original_expr, constrained_si in converted: if not original_expr.variables: l.error('Incorrect original_expression to replace in add_constraints(). ' 'This is due to defects in VSA logics inside claripy. Please report ' 'to Fish and he will fix it if he\'s free.') continue new_expr = constrained_si self.registers.replace_all(original_expr, new_expr) for _, region in self.memory.regions.items(): region.memory.replace_all(original_expr, new_expr) l.debug("SimState.add_constraints: Applied to final state.") elif o.SYMBOLIC not in self.options and len(args) > 0: for arg in args: if self.solver.is_false(arg): self._satisfiable = False return
[ "def", "add_constraints", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", ">", "0", "and", "isinstance", "(", "args", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "Excep...
Add some constraints to the state. You may pass in any number of symbolic booleans as variadic positional arguments.
[ "Add", "some", "constraints", "to", "the", "state", "." ]
python
train
biocore/burrito-fillings
bfillings/muscle_v38.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L457-L499
def aln_tree_seqs(seqs, input_handler=None, tree_type='neighborjoining', params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_hours=5.0, constructor=PhyloNode, clean_up=True ): """Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files """ params["-maxhours"] = max_hours if tree_type: params["-cluster2"] = tree_type params["-tree2"] = get_tmp_filename(WorkingDir) params["-out"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, input_handler=input_handler, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree2Out"], constructor=constructor) aln = [line for line in muscle_res["MuscleOut"]] if clean_up: muscle_res.cleanUp() return tree, aln
[ "def", "aln_tree_seqs", "(", "seqs", ",", "input_handler", "=", "None", ",", "tree_type", "=", "'neighborjoining'", ",", "params", "=", "{", "}", ",", "add_seq_names", "=", "True", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "Supp...
Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files
[ "Muscle", "align", "sequences", "and", "report", "tree", "from", "iteration2", "." ]
python
train
roclark/sportsreference
sportsreference/nba/boxscore.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/boxscore.py#L139-L153
def two_point_attempts(self): """ Returns an ``int`` of the total number of two point field goals the player attempted during the season. """ if self.field_goal_attempts and self.three_point_attempts: return int(self.field_goal_attempts - self.three_point_attempts) # Occurs when the player didn't take any three pointers, so the number # of two pointers the player took is equal to the total number of field # goals the player took. if self.field_goal_attempts: return int(self.field_goal_attempts) # If the player didn't take any shots, they didn't take any two point # field goals. return None
[ "def", "two_point_attempts", "(", "self", ")", ":", "if", "self", ".", "field_goal_attempts", "and", "self", ".", "three_point_attempts", ":", "return", "int", "(", "self", ".", "field_goal_attempts", "-", "self", ".", "three_point_attempts", ")", "# Occurs when t...
Returns an ``int`` of the total number of two point field goals the player attempted during the season.
[ "Returns", "an", "int", "of", "the", "total", "number", "of", "two", "point", "field", "goals", "the", "player", "attempted", "during", "the", "season", "." ]
python
train
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/filter_common.py
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/filter_common.py#L187-L202
def filter_single_need(need, filter_string=""): """ Checks if a single need/need_part passes a filter_string :param need: need or need_part :param filter_string: string, which is used as input for eval() :return: True, if need as passed the filter_string, else False """ filter_context = need.copy() filter_context["search"] = re.search result = False try: result = bool(eval(filter_string, None, filter_context)) except Exception as e: raise NeedInvalidFilter("Filter {0} not valid: Error: {1}".format(filter_string, e)) return result
[ "def", "filter_single_need", "(", "need", ",", "filter_string", "=", "\"\"", ")", ":", "filter_context", "=", "need", ".", "copy", "(", ")", "filter_context", "[", "\"search\"", "]", "=", "re", ".", "search", "result", "=", "False", "try", ":", "result", ...
Checks if a single need/need_part passes a filter_string :param need: need or need_part :param filter_string: string, which is used as input for eval() :return: True, if need as passed the filter_string, else False
[ "Checks", "if", "a", "single", "need", "/", "need_part", "passes", "a", "filter_string" ]
python
train
biocore/burrito-fillings
bfillings/fastq_join.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/fastq_join.py#L69-L75
def _get_stitch_report_path(self): """Checks if stitch report label / path is set. Returns absolute path.""" if self.Parameters['-r'].isOn(): stitch_path = self._absolute(str(self.Parameters['-r'].Value)) return stitch_path elif self.Parameters['-r'].isOff(): return None
[ "def", "_get_stitch_report_path", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-r'", "]", ".", "isOn", "(", ")", ":", "stitch_path", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-r'", "]", ".", "...
Checks if stitch report label / path is set. Returns absolute path.
[ "Checks", "if", "stitch", "report", "label", "/", "path", "is", "set", ".", "Returns", "absolute", "path", "." ]
python
train
aws/sagemaker-python-sdk
src/sagemaker/local/utils.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/local/utils.py#L43-L68
def move_to_destination(source, destination, job_name, sagemaker_session): """move source to destination. Can handle uploading to S3 Args: source (str): root directory to move destination (str): file:// or s3:// URI that source will be moved to. job_name (str): SageMaker job name. sagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed Returns: (str): destination URI """ parsed_uri = urlparse(destination) if parsed_uri.scheme == 'file': recursive_copy(source, parsed_uri.path) final_uri = destination elif parsed_uri.scheme == 's3': bucket = parsed_uri.netloc path = "%s%s" % (parsed_uri.path.lstrip('/'), job_name) final_uri = 's3://%s/%s' % (bucket, path) sagemaker_session.upload_data(source, bucket, path) else: raise ValueError('Invalid destination URI, must be s3:// or file://, got: %s' % destination) shutil.rmtree(source) return final_uri
[ "def", "move_to_destination", "(", "source", ",", "destination", ",", "job_name", ",", "sagemaker_session", ")", ":", "parsed_uri", "=", "urlparse", "(", "destination", ")", "if", "parsed_uri", ".", "scheme", "==", "'file'", ":", "recursive_copy", "(", "source",...
move source to destination. Can handle uploading to S3 Args: source (str): root directory to move destination (str): file:// or s3:// URI that source will be moved to. job_name (str): SageMaker job name. sagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed Returns: (str): destination URI
[ "move", "source", "to", "destination", ".", "Can", "handle", "uploading", "to", "S3" ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/linux_file_parser.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/linux_file_parser.py#L382-L398
def ReconcileShadow(self, store_type): """Verify that entries that claim to use shadow files have a shadow entry. If the entries of the non-shadowed file indicate that a shadow file is used, check that there is actually an entry for that file in shadow. Args: store_type: The type of password store that should be used (e.g. /etc/shadow or /etc/gshadow) """ for k, v in iteritems(self.entry): if v.pw_entry.store == store_type: shadow_entry = self.shadow.get(k) if shadow_entry is not None: v.pw_entry = shadow_entry else: v.pw_entry.store = "UNKNOWN"
[ "def", "ReconcileShadow", "(", "self", ",", "store_type", ")", ":", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "entry", ")", ":", "if", "v", ".", "pw_entry", ".", "store", "==", "store_type", ":", "shadow_entry", "=", "self", ".", "sh...
Verify that entries that claim to use shadow files have a shadow entry. If the entries of the non-shadowed file indicate that a shadow file is used, check that there is actually an entry for that file in shadow. Args: store_type: The type of password store that should be used (e.g. /etc/shadow or /etc/gshadow)
[ "Verify", "that", "entries", "that", "claim", "to", "use", "shadow", "files", "have", "a", "shadow", "entry", "." ]
python
train