repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
mapnik/Cascadenik
cascadenik/compile.py
https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L186-L206
def midpoint(self): """ Return a point guranteed to fall within this range, hopefully near the middle. """ minpoint = self.leftedge if self.leftop is gt: minpoint += 1 maxpoint = self.rightedge if self.rightop is lt: maxpoint -= 1 if minpoint is None: return maxpoint elif maxpoint is None: return minpoint else: return (minpoint + maxpoint) / 2
[ "def", "midpoint", "(", "self", ")", ":", "minpoint", "=", "self", ".", "leftedge", "if", "self", ".", "leftop", "is", "gt", ":", "minpoint", "+=", "1", "maxpoint", "=", "self", ".", "rightedge", "if", "self", ".", "rightop", "is", "lt", ":", "maxpoi...
Return a point guranteed to fall within this range, hopefully near the middle.
[ "Return", "a", "point", "guranteed", "to", "fall", "within", "this", "range", "hopefully", "near", "the", "middle", "." ]
python
train
pyviz/holoviews
holoviews/core/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/element.py#L29-L61
def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, **kwargs): """Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram """ from ..operation import histogram if not isinstance(dimension, list): dimension = [dimension] hists = [] for d in dimension[::-1]: hist = histogram(self, num_bins=num_bins, bin_range=bin_range, dimension=d, **kwargs) hists.append(hist) if adjoin: layout = self for didx in range(len(dimension)): layout = layout << hists[didx] elif len(dimension) > 1: layout = Layout(hists) else: layout = hists[0] return layout
[ "def", "hist", "(", "self", ",", "dimension", "=", "None", ",", "num_bins", "=", "20", ",", "bin_range", "=", "None", ",", "adjoin", "=", "True", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", "operation", "import", "histogram", "if", "not", ...
Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram
[ "Computes", "and", "adjoins", "histogram", "along", "specified", "dimension", "(", "s", ")", "." ]
python
train
zsimic/runez
src/runez/logsetup.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L418-L429
def _add_handler(cls, handler, fmt, level): """ Args: handler (logging.Handler): Handler to decorate fmt (str | unicode): Format to use """ handler.setFormatter(_get_formatter(fmt)) if level: handler.setLevel(level) logging.root.addHandler(handler) cls.used_formats = ("%s %s" % (cls.used_formats or "", fmt)).strip() cls.handlers.append(handler)
[ "def", "_add_handler", "(", "cls", ",", "handler", ",", "fmt", ",", "level", ")", ":", "handler", ".", "setFormatter", "(", "_get_formatter", "(", "fmt", ")", ")", "if", "level", ":", "handler", ".", "setLevel", "(", "level", ")", "logging", ".", "root...
Args: handler (logging.Handler): Handler to decorate fmt (str | unicode): Format to use
[ "Args", ":", "handler", "(", "logging", ".", "Handler", ")", ":", "Handler", "to", "decorate", "fmt", "(", "str", "|", "unicode", ")", ":", "Format", "to", "use" ]
python
train
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L229-L266
def start_txn(self, txn_name=None): ''' Request new transaction from repository, init new Transaction, store in self.txns Args: txn_name (str): human name for transaction Return: (Transaction): returns intance of newly created transaction ''' # if no name provided, create one if not txn_name: txn_name = uuid.uuid4().hex # request new transaction txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None) # if 201, transaction was created if txn_response.status_code == 201: txn_uri = txn_response.headers['Location'] logger.debug("spawning transaction: %s" % txn_uri) # init new Transaction, and pass Expires header txn = Transaction( self, # pass the repository txn_name, txn_uri, expires = txn_response.headers['Expires']) # append to self self.txns[txn_name] = txn # return return txn
[ "def", "start_txn", "(", "self", ",", "txn_name", "=", "None", ")", ":", "# if no name provided, create one", "if", "not", "txn_name", ":", "txn_name", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "# request new transaction", "txn_response", "=", "self", "...
Request new transaction from repository, init new Transaction, store in self.txns Args: txn_name (str): human name for transaction Return: (Transaction): returns intance of newly created transaction
[ "Request", "new", "transaction", "from", "repository", "init", "new", "Transaction", "store", "in", "self", ".", "txns" ]
python
train
gem/oq-engine
openquake/calculators/ucerf_event_based.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/ucerf_event_based.py#L41-L76
def generate_event_set(ucerf, background_sids, src_filter, ses_idx, seed): """ Generates the event set corresponding to a particular branch """ serial = seed + ses_idx * TWO16 # get rates from file with h5py.File(ucerf.source_file, 'r') as hdf5: occurrences = ucerf.tom.sample_number_of_occurrences(ucerf.rate, seed) indices, = numpy.where(occurrences) logging.debug( 'Considering "%s", %d ruptures', ucerf.source_id, len(indices)) # get ruptures from the indices ruptures = [] rupture_occ = [] for iloc, n_occ in zip(indices, occurrences[indices]): ucerf_rup = ucerf.get_ucerf_rupture(iloc, src_filter) if ucerf_rup: ucerf_rup.serial = serial serial += 1 ruptures.append(ucerf_rup) rupture_occ.append(n_occ) # sample background sources background_ruptures, background_n_occ = sample_background_model( hdf5, ucerf.idx_set["grid_key"], ucerf.tom, seed, background_sids, ucerf.min_mag, ucerf.npd, ucerf.hdd, ucerf.usd, ucerf.lsd, ucerf.msr, ucerf.aspect, ucerf.tectonic_region_type) for i, brup in enumerate(background_ruptures): brup.serial = serial serial += 1 ruptures.append(brup) rupture_occ.extend(background_n_occ) assert len(ruptures) < TWO16, len(ruptures) # < 2^16 ruptures per SES return ruptures, rupture_occ
[ "def", "generate_event_set", "(", "ucerf", ",", "background_sids", ",", "src_filter", ",", "ses_idx", ",", "seed", ")", ":", "serial", "=", "seed", "+", "ses_idx", "*", "TWO16", "# get rates from file", "with", "h5py", ".", "File", "(", "ucerf", ".", "source...
Generates the event set corresponding to a particular branch
[ "Generates", "the", "event", "set", "corresponding", "to", "a", "particular", "branch" ]
python
train
scarface-4711/denonavr
denonavr/denonavr.py
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1611-L1622
def power_off(self): """Turn off receiver via HTTP get command.""" try: if self.send_get_command(self._urls.command_power_standby): self._power = POWER_STANDBY self._state = STATE_OFF return True else: return False except requests.exceptions.RequestException: _LOGGER.error("Connection error: power off command not sent.") return False
[ "def", "power_off", "(", "self", ")", ":", "try", ":", "if", "self", ".", "send_get_command", "(", "self", ".", "_urls", ".", "command_power_standby", ")", ":", "self", ".", "_power", "=", "POWER_STANDBY", "self", ".", "_state", "=", "STATE_OFF", "return",...
Turn off receiver via HTTP get command.
[ "Turn", "off", "receiver", "via", "HTTP", "get", "command", "." ]
python
train
shmir/PyIxExplorer
ixexplorer/ixe_port.py
https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L344-L352
def set_phy_mode(self, mode=IxePhyMode.ignore): """ Set phy mode to copper or fiber. :param mode: requested PHY mode. """ if isinstance(mode, IxePhyMode): if mode.value: self.api.call_rc('port setPhyMode {} {}'.format(mode.value, self.uri)) else: self.api.call_rc('port setPhyMode {} {}'.format(mode, self.uri))
[ "def", "set_phy_mode", "(", "self", ",", "mode", "=", "IxePhyMode", ".", "ignore", ")", ":", "if", "isinstance", "(", "mode", ",", "IxePhyMode", ")", ":", "if", "mode", ".", "value", ":", "self", ".", "api", ".", "call_rc", "(", "'port setPhyMode {} {}'"...
Set phy mode to copper or fiber. :param mode: requested PHY mode.
[ "Set", "phy", "mode", "to", "copper", "or", "fiber", ".", ":", "param", "mode", ":", "requested", "PHY", "mode", "." ]
python
train
AbdealiJK/pycolorname
pycolorname/color_system.py
https://github.com/AbdealiJK/pycolorname/blob/d535de3d340a1673906cb484cc4c49c87d296ec0/pycolorname/color_system.py#L35-L70
def load(self, filename=None, refresh=False): """ Try to load the data from a pre existing data file if it exists. If the data file does not exist, refresh the data and save it in the data file for future use. The data file is a json file. :param filename: The filename to save or fetch the data from. :param refresh: Whether to force refresh the data or not """ filename = filename or self.data_file() dirname = os.path.dirname(filename) if refresh is False: try: data = None with open(filename) as fp: data = json.load(fp) self.clear() self.update(data) return except (ValueError, IOError): # Refresh data if reading gave errors pass data = self.refresh() self.clear() self.update(data) if not os.path.isdir(dirname): os.makedirs(dirname) with open(filename, 'w') as fp: json.dump(data, fp, sort_keys=True, indent=2, separators=(',', ': '))
[ "def", "load", "(", "self", ",", "filename", "=", "None", ",", "refresh", "=", "False", ")", ":", "filename", "=", "filename", "or", "self", ".", "data_file", "(", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", ...
Try to load the data from a pre existing data file if it exists. If the data file does not exist, refresh the data and save it in the data file for future use. The data file is a json file. :param filename: The filename to save or fetch the data from. :param refresh: Whether to force refresh the data or not
[ "Try", "to", "load", "the", "data", "from", "a", "pre", "existing", "data", "file", "if", "it", "exists", ".", "If", "the", "data", "file", "does", "not", "exist", "refresh", "the", "data", "and", "save", "it", "in", "the", "data", "file", "for", "fu...
python
train
quantumlib/Cirq
cirq/ops/pauli_string_raw_types.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/ops/pauli_string_raw_types.py#L42-L49
def map_qubits(self: TSelf_PauliStringGateOperation, qubit_map: Dict[raw_types.Qid, raw_types.Qid] ) -> TSelf_PauliStringGateOperation: """Return an equivalent operation on new qubits with its Pauli string mapped to new qubits. new_pauli_string = self.pauli_string.map_qubits(qubit_map) """
[ "def", "map_qubits", "(", "self", ":", "TSelf_PauliStringGateOperation", ",", "qubit_map", ":", "Dict", "[", "raw_types", ".", "Qid", ",", "raw_types", ".", "Qid", "]", ")", "->", "TSelf_PauliStringGateOperation", ":" ]
Return an equivalent operation on new qubits with its Pauli string mapped to new qubits. new_pauli_string = self.pauli_string.map_qubits(qubit_map)
[ "Return", "an", "equivalent", "operation", "on", "new", "qubits", "with", "its", "Pauli", "string", "mapped", "to", "new", "qubits", "." ]
python
train
jsommers/switchyard
switchyard/lib/socket/socketemu.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/socket/socketemu.py#L314-L336
def bind(self, address): ''' Alter the local address with which this socket is associated. The address parameter is a 2-tuple consisting of an IP address and port number. NB: this method fails and returns -1 if the requested port to bind to is already in use but does *not* check that the address is valid. ''' portset = _gather_ports().union(ApplicationLayer._emuports()) if address[1] in portset: log_warn("Port is already in use.") return -1 oldid = self._sockid() # block firewall port # set stack to only allow packets through for addr/port self._local_addr = _normalize_addrs(address) # update firewall and pcap filters self.__set_fw_rules() ApplicationLayer._registry_update(self, oldid) return 0
[ "def", "bind", "(", "self", ",", "address", ")", ":", "portset", "=", "_gather_ports", "(", ")", ".", "union", "(", "ApplicationLayer", ".", "_emuports", "(", ")", ")", "if", "address", "[", "1", "]", "in", "portset", ":", "log_warn", "(", "\"Port is a...
Alter the local address with which this socket is associated. The address parameter is a 2-tuple consisting of an IP address and port number. NB: this method fails and returns -1 if the requested port to bind to is already in use but does *not* check that the address is valid.
[ "Alter", "the", "local", "address", "with", "which", "this", "socket", "is", "associated", ".", "The", "address", "parameter", "is", "a", "2", "-", "tuple", "consisting", "of", "an", "IP", "address", "and", "port", "number", "." ]
python
train
astropy/photutils
photutils/extern/sigma_clipping.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L315-L385
def _sigmaclip_withaxis(self, data, axis=None, masked=True, return_bounds=False, copy=True): """ Sigma clip the data when ``axis`` is specified. In this case, we replace clipped values with NaNs as placeholder values. """ # float array type is needed to insert nans into the array filtered_data = data.astype(float) # also makes a copy # remove invalid values bad_mask = ~np.isfinite(filtered_data) if np.any(bad_mask): filtered_data[bad_mask] = np.nan warnings.warn('Input data contains invalid values (NaNs or ' 'infs), which were automatically clipped.', AstropyUserWarning) # remove masked values and convert to plain ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = np.ma.masked_invalid(filtered_data).astype(float) filtered_data = filtered_data.filled(np.nan) # convert negative axis/axes if not isiterable(axis): axis = (axis,) axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis) # define the shape of min/max arrays so that they can be broadcast # with the data mshape = tuple(1 if dim in axis else size for dim, size in enumerate(filtered_data.shape)) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 n_nan = np.count_nonzero(np.isnan(filtered_data)) self._compute_bounds(filtered_data, axis=axis) if not np.isscalar(self._min_value): self._min_value = self._min_value.reshape(mshape) self._max_value = self._max_value.reshape(mshape) with np.errstate(invalid='ignore'): filtered_data[(filtered_data < self._min_value) | (filtered_data > self._max_value)] = np.nan nchanged = n_nan - np.count_nonzero(np.isnan(filtered_data)) self._niterations = iteration if masked: # create an output masked array if copy: filtered_data = np.ma.masked_invalid(filtered_data) else: # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid='ignore'): out = np.ma.masked_invalid(data, copy=False) filtered_data = np.ma.masked_where(np.logical_or( out < self._min_value, out > self._max_value), out, copy=False) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data
[ "def", "_sigmaclip_withaxis", "(", "self", ",", "data", ",", "axis", "=", "None", ",", "masked", "=", "True", ",", "return_bounds", "=", "False", ",", "copy", "=", "True", ")", ":", "# float array type is needed to insert nans into the array", "filtered_data", "="...
Sigma clip the data when ``axis`` is specified. In this case, we replace clipped values with NaNs as placeholder values.
[ "Sigma", "clip", "the", "data", "when", "axis", "is", "specified", "." ]
python
train
secdev/scapy
scapy/layers/ipsec.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/ipsec.py#L562-L598
def verify(self, pkt, key): """ Check that the integrity check value (icv) of a packet is valid. @param pkt: a packet that contains a valid encrypted ESP or AH layer @param key: the authentication key, a byte string @raise IPSecIntegrityError: if the integrity check fails """ if not self.mac or self.icv_size == 0: return mac = self.new_mac(key) pkt_icv = 'not found' computed_icv = 'not computed' if isinstance(pkt, ESP): pkt_icv = pkt.data[len(pkt.data) - self.icv_size:] clone = pkt.copy() clone.data = clone.data[:len(clone.data) - self.icv_size] elif pkt.haslayer(AH): if len(pkt[AH].icv) != self.icv_size: # Fill padding since we know the actual icv_size pkt[AH].padding = pkt[AH].icv[self.icv_size:] pkt[AH].icv = pkt[AH].icv[:self.icv_size] pkt_icv = pkt[AH].icv clone = zero_mutable_fields(pkt.copy(), sending=False) mac.update(raw(clone)) computed_icv = mac.finalize()[:self.icv_size] # XXX: Cannot use mac.verify because the ICV can be truncated if pkt_icv != computed_icv: raise IPSecIntegrityError('pkt_icv=%r, computed_icv=%r' % (pkt_icv, computed_icv))
[ "def", "verify", "(", "self", ",", "pkt", ",", "key", ")", ":", "if", "not", "self", ".", "mac", "or", "self", ".", "icv_size", "==", "0", ":", "return", "mac", "=", "self", ".", "new_mac", "(", "key", ")", "pkt_icv", "=", "'not found'", "computed_...
Check that the integrity check value (icv) of a packet is valid. @param pkt: a packet that contains a valid encrypted ESP or AH layer @param key: the authentication key, a byte string @raise IPSecIntegrityError: if the integrity check fails
[ "Check", "that", "the", "integrity", "check", "value", "(", "icv", ")", "of", "a", "packet", "is", "valid", "." ]
python
train
angr/angr
angr/engines/successors.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/engines/successors.py#L406-L493
def _eval_target_jumptable(state, ip, limit): """ A *very* fast method to evaluate symbolic jump targets if they are a) concrete targets, or b) targets coming from jump tables. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs, or None which indicates fallback is necessary. :rtype: list or None """ if ip.symbolic is False: return [ (claripy.ast.bool.true, ip) ] # concrete # Detect whether ip is in the form of "if a == 1 then addr_0 else if a == 2 then addr_1 else ..." cond_and_targets = [ ] # tuple of (condition, target) ip_ = ip # Handle the outer Reverse outer_reverse = False if ip_.op == "Reverse": ip_ = ip_.args[0] outer_reverse = True fallback = False target_variable = None concretes = set() reached_sentinel = False for cond, target in claripy.reverse_ite_cases(ip_): # We must fully unpack the entire AST to make sure it indeed complies with the form above if reached_sentinel: # We should not have any other value beyond the sentinel - maybe one of the possible targets happens to # be the same as the sentinel value? fallback = True break if target.symbolic is False and state.solver.eval(target) == DUMMY_SYMBOLIC_READ_VALUE: # Ignore the dummy value, which acts as the sentinel of this ITE tree reached_sentinel = True continue if cond.op != "__eq__": # We only support equivalence right now. Fallback fallback = True break if cond.args[0].symbolic is True and cond.args[1].symbolic is False: variable, value = cond.args elif cond.args[0].symbolic is False and cond.args[1].symbolic is True: value, variable = cond.args else: # Cannot determine variable and value. Fallback fallback = True break if target_variable is None: target_variable = variable elif target_variable is not variable: # it's checking a different variable. Fallback fallback = True break # Make sure the conditions are mutually exclusive value_concrete = state.solver.eval(value) if value_concrete in concretes: # oops... the conditions are not mutually exclusive fallback = True break concretes.add(value_concrete) if target.symbolic is True: # Cannot handle symbolic targets. Fallback fallback = True break cond_and_targets.append((cond, target if not outer_reverse else state.solver.Reverse(target))) if reached_sentinel is False: # huh? fallback = True if fallback: return None else: return cond_and_targets[ : limit]
[ "def", "_eval_target_jumptable", "(", "state", ",", "ip", ",", "limit", ")", ":", "if", "ip", ".", "symbolic", "is", "False", ":", "return", "[", "(", "claripy", ".", "ast", ".", "bool", ".", "true", ",", "ip", ")", "]", "# concrete", "# Detect whether...
A *very* fast method to evaluate symbolic jump targets if they are a) concrete targets, or b) targets coming from jump tables. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs, or None which indicates fallback is necessary. :rtype: list or None
[ "A", "*", "very", "*", "fast", "method", "to", "evaluate", "symbolic", "jump", "targets", "if", "they", "are", "a", ")", "concrete", "targets", "or", "b", ")", "targets", "coming", "from", "jump", "tables", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/__init__.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L1026-L1040
def __disclaimer(opt=""): """ Print the disclaimers once. If they've already been shown, skip over. :return none: """ global settings if opt is "update": print("Disclaimer: LiPD files may be updated and modified to adhere to standards\n") settings["note_update"] = False if opt is "validate": print("Note: Use lipd.validate() or www.LiPD.net/create " "to ensure that your new LiPD file(s) are valid") settings["note_validate"] = False return
[ "def", "__disclaimer", "(", "opt", "=", "\"\"", ")", ":", "global", "settings", "if", "opt", "is", "\"update\"", ":", "print", "(", "\"Disclaimer: LiPD files may be updated and modified to adhere to standards\\n\"", ")", "settings", "[", "\"note_update\"", "]", "=", "...
Print the disclaimers once. If they've already been shown, skip over. :return none:
[ "Print", "the", "disclaimers", "once", ".", "If", "they", "ve", "already", "been", "shown", "skip", "over", "." ]
python
train
gabstopper/smc-python
smc/elements/network.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/network.py#L92-L107
def create(cls, name, ip_range, comment=None): """ Create an AddressRange element :param str name: Name of element :param str iprange: iprange of element :param str comment: comment (optional) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: AddressRange """ json = {'name': name, 'ip_range': ip_range, 'comment': comment} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "ip_range", ",", "comment", "=", "None", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'ip_range'", ":", "ip_range", ",", "'comment'", ":", "comment", "}", "return", "ElementCreator", "(", "cls"...
Create an AddressRange element :param str name: Name of element :param str iprange: iprange of element :param str comment: comment (optional) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: AddressRange
[ "Create", "an", "AddressRange", "element" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/req/req_uninstall.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/req/req_uninstall.py#L90-L132
def remove(self, auto_confirm=False): """Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).""" if not self._can_uninstall(): return if not self.paths: logger.info( "Can't uninstall '%s'. No files were found to uninstall.", self.dist.project_name, ) return logger.info( 'Uninstalling %s-%s:', self.dist.project_name, self.dist.version ) with indent_log(): paths = sorted(self.compact(self.paths)) if auto_confirm: response = 'y' else: for path in paths: logger.info(path) response = ask('Proceed (y/n)? ', ('y', 'n')) if self._refuse: logger.info('Not removing or modifying (outside of prefix):') for path in self.compact(self._refuse): logger.info(path) if response == 'y': self.save_dir = tempfile.mkdtemp(suffix='-uninstall', prefix='pip-') for path in paths: new_path = self._stash(path) logger.debug('Removing file or directory %s', path) self._moved_paths.append(path) renames(path, new_path) for pth in self.pth.values(): pth.remove() logger.info( 'Successfully uninstalled %s-%s', self.dist.project_name, self.dist.version )
[ "def", "remove", "(", "self", ",", "auto_confirm", "=", "False", ")", ":", "if", "not", "self", ".", "_can_uninstall", "(", ")", ":", "return", "if", "not", "self", ".", "paths", ":", "logger", ".", "info", "(", "\"Can't uninstall '%s'. No files were found t...
Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).
[ "Remove", "paths", "in", "self", ".", "paths", "with", "confirmation", "(", "unless", "auto_confirm", "is", "True", ")", "." ]
python
test
teffland/pytorch-monitor
build/lib/pytorch_monitor/monitor.py
https://github.com/teffland/pytorch-monitor/blob/56dce63ba85c0f9faf9df11df78161ee60f5fae9/build/lib/pytorch_monitor/monitor.py#L46-L53
def remove_grad_hooks(module, input): """ Remove gradient hooks to all of the parameters and monitored vars """ for hook in list(module.param_hooks.keys()): module.param_hooks[hook].remove() module.param_hooks.pop(hook) for hook in list(module.var_hooks.keys()): module.var_hooks[hook].remove() module.var_hooks.pop(hook)
[ "def", "remove_grad_hooks", "(", "module", ",", "input", ")", ":", "for", "hook", "in", "list", "(", "module", ".", "param_hooks", ".", "keys", "(", ")", ")", ":", "module", ".", "param_hooks", "[", "hook", "]", ".", "remove", "(", ")", "module", "."...
Remove gradient hooks to all of the parameters and monitored vars
[ "Remove", "gradient", "hooks", "to", "all", "of", "the", "parameters", "and", "monitored", "vars" ]
python
train
panosl/django-currencies
currencies/management/commands/_yahoofinance.py
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/management/commands/_yahoofinance.py#L155-L171
def get_info(self, code): """Return a dict of information about the currency""" currency = self.get_currency(code) info = {} users = list(filter(None, currency['users'].split(','))) if users: info['Users'] = users alt = list(filter(None, currency['alternatives'].split(','))) if alt: info['Alternatives'] = alt if self.modified: info['YFUpdate'] = self.modified.isoformat() return info
[ "def", "get_info", "(", "self", ",", "code", ")", ":", "currency", "=", "self", ".", "get_currency", "(", "code", ")", "info", "=", "{", "}", "users", "=", "list", "(", "filter", "(", "None", ",", "currency", "[", "'users'", "]", ".", "split", "(",...
Return a dict of information about the currency
[ "Return", "a", "dict", "of", "information", "about", "the", "currency" ]
python
train
hvac/hvac
hvac/api/secrets_engines/identity.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/identity.py#L585-L605
def read_group(self, group_id, mount_point=DEFAULT_MOUNT_POINT): """Query the group by its identifier. Supported methods: GET: /{mount_point}/group/id/{id}. Produces: 200 application/json :param group_id: Identifier of the group. :type group_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response """ api_path = '/v1/{mount_point}/group/id/{id}'.format( mount_point=mount_point, id=group_id, ) response = self._adapter.get( url=api_path, ) return response.json()
[ "def", "read_group", "(", "self", ",", "group_id", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/group/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "group_id", ",", ")", "respo...
Query the group by its identifier. Supported methods: GET: /{mount_point}/group/id/{id}. Produces: 200 application/json :param group_id: Identifier of the group. :type group_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response
[ "Query", "the", "group", "by", "its", "identifier", "." ]
python
train
metagriffin/pysyncml
pysyncml/items/base.py
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/items/base.py#L101-L108
def loads(cls, data, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`load`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`load`. ''' buf = six.StringIO(data) return cls.load(buf, contentType, version)
[ "def", "loads", "(", "cls", ",", "data", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "buf", "=", "six", ".", "StringIO", "(", "data", ")", "return", "cls", ".", "load", "(", "buf", ",", "contentType", ",", "version", ")"...
[OPTIONAL] Identical to :meth:`load`, except the serialized form is provided as a string representation in `data` instead of as a stream. The default implementation just wraps :meth:`load`.
[ "[", "OPTIONAL", "]", "Identical", "to", ":", "meth", ":", "load", "except", "the", "serialized", "form", "is", "provided", "as", "a", "string", "representation", "in", "data", "instead", "of", "as", "a", "stream", ".", "The", "default", "implementation", ...
python
valid
gitpython-developers/GitPython
git/repo/base.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/repo/base.py#L483-L486
def iter_trees(self, *args, **kwargs): """:return: Iterator yielding Tree objects :note: Takes all arguments known to iter_commits method""" return (c.tree for c in self.iter_commits(*args, **kwargs))
[ "def", "iter_trees", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "(", "c", ".", "tree", "for", "c", "in", "self", ".", "iter_commits", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
:return: Iterator yielding Tree objects :note: Takes all arguments known to iter_commits method
[ ":", "return", ":", "Iterator", "yielding", "Tree", "objects", ":", "note", ":", "Takes", "all", "arguments", "known", "to", "iter_commits", "method" ]
python
train
ailionx/cloudflare-ddns
cloudflare_ddns/cloudflare.py
https://github.com/ailionx/cloudflare-ddns/blob/e4808b8314e447f69fab77b5bd3880846e59adbe/cloudflare_ddns/cloudflare.py#L120-L134
def get_record(self, dns_type, name): """ Get a dns record :param dns_type: :param name: :return: """ try: record = [record for record in self.dns_records if record['type'] == dns_type and record['name'] == name][0] except IndexError: raise RecordNotFound( 'Cannot find the specified dns record in domain {domain}' .format(domain=name)) return record
[ "def", "get_record", "(", "self", ",", "dns_type", ",", "name", ")", ":", "try", ":", "record", "=", "[", "record", "for", "record", "in", "self", ".", "dns_records", "if", "record", "[", "'type'", "]", "==", "dns_type", "and", "record", "[", "'name'",...
Get a dns record :param dns_type: :param name: :return:
[ "Get", "a", "dns", "record", ":", "param", "dns_type", ":", ":", "param", "name", ":", ":", "return", ":" ]
python
train
axltxl/m2bk
m2bk/app.py
https://github.com/axltxl/m2bk/blob/980083dfd17e6e783753a946e9aa809714551141/m2bk/app.py#L187-L216
def main(argv=None): """ This is the main thread of execution :param argv: list of command line arguments """ # Exit code exit_code = 0 # First, we change main() to take an optional 'argv' # argument, which allows us to call it from the interactive # Python prompt if argv is None: argv = sys.argv try: # Bootstrap init(argv) # Perform the actual backup job make_backup_files() except Exception as e: # ... and if everything else fails _handle_except(e) exit_code = 1 finally: # All cleanup actions are taken from here shutdown() return exit_code
[ "def", "main", "(", "argv", "=", "None", ")", ":", "# Exit code", "exit_code", "=", "0", "# First, we change main() to take an optional 'argv'", "# argument, which allows us to call it from the interactive", "# Python prompt", "if", "argv", "is", "None", ":", "argv", "=", ...
This is the main thread of execution :param argv: list of command line arguments
[ "This", "is", "the", "main", "thread", "of", "execution" ]
python
train
mk-fg/feedjack
feedjack/models.py
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/models.py#L654-L675
def similar(self, threshold, **criterias): '''Find text-based field matches with similarity (1-levenshtein/length) higher than specified threshold (0 to 1, 1 being an exact match)''' # XXX: use F from https://docs.djangoproject.com/en/1.8/ref/models/expressions/ meta = self.model._meta funcs, params = list(), list() for name,val in criterias.iteritems(): name = meta.get_field(name, many_to_many=False).column name = '.'.join(it.imap(connection.ops.quote_name, (meta.db_table, name))) # Alas, pg_trgm is for containment tests, not fuzzy matches, # but it can potentially be used to find closest results as well # funcs.append( 'similarity(CAST({0}.{1} as text), CAST(%s as text))'\ # Ok, these two are just to make sure levenshtein() won't crash # w/ "argument exceeds the maximum length of N bytes error" funcs.append('octet_length({0}) <= {1}'.format(name, self.levenshtein_limit)) funcs.append('octet_length(%s) <= {0}'.format(self.levenshtein_limit)) # Then there's a possibility of division by zero... funcs.append('length({0}) > 0'.format(name)) # And if everything else fits, the comparison itself funcs.append('levenshtein({0}, %s) / CAST(length({0}) AS numeric) < %s'.format(name)) params.extend((val, val, float(1 - threshold))) return self.extra(where=funcs, params=params)
[ "def", "similar", "(", "self", ",", "threshold", ",", "*", "*", "criterias", ")", ":", "# XXX: use F from https://docs.djangoproject.com/en/1.8/ref/models/expressions/", "meta", "=", "self", ".", "model", ".", "_meta", "funcs", ",", "params", "=", "list", "(", ")"...
Find text-based field matches with similarity (1-levenshtein/length) higher than specified threshold (0 to 1, 1 being an exact match)
[ "Find", "text", "-", "based", "field", "matches", "with", "similarity", "(", "1", "-", "levenshtein", "/", "length", ")", "higher", "than", "specified", "threshold", "(", "0", "to", "1", "1", "being", "an", "exact", "match", ")" ]
python
train
SoftwareDefinedBuildings/XBOS
apps/Data_quality_analysis/Clean_Data.py
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L544-L563
def _find_outOfBound(self, data, lowBound, highBound): """ Mask for selecting data that is out of bounds. Parameters ---------- data : pd.DataFrame() Input dataframe. lowBound : float Lower bound for dataframe. highBound : float Higher bound for dataframe. Returns ------- ??? """ data = ((data < lowBound) | (data > highBound)) return data
[ "def", "_find_outOfBound", "(", "self", ",", "data", ",", "lowBound", ",", "highBound", ")", ":", "data", "=", "(", "(", "data", "<", "lowBound", ")", "|", "(", "data", ">", "highBound", ")", ")", "return", "data" ]
Mask for selecting data that is out of bounds. Parameters ---------- data : pd.DataFrame() Input dataframe. lowBound : float Lower bound for dataframe. highBound : float Higher bound for dataframe. Returns ------- ???
[ "Mask", "for", "selecting", "data", "that", "is", "out", "of", "bounds", ".", "Parameters", "----------", "data", ":", "pd", ".", "DataFrame", "()", "Input", "dataframe", ".", "lowBound", ":", "float", "Lower", "bound", "for", "dataframe", ".", "highBound", ...
python
train
annoviko/pyclustering
pyclustering/nnet/dynamic_visualizer.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/dynamic_visualizer.py#L195-L210
def set_canvas_properties(self, canvas, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True): """! @brief Set properties for specified canvas. @param[in] canvas (uint): Index of canvas whose properties should changed. @param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed. @param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed. @param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then borders are calculated automatically. @param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated automatically. @param[in] x_labels (bool): If True then labels of X axis are displayed. @param[in] y_labels (bool): If True then labels of Y axis are displayed. """ self.__canvases[canvas] = canvas_descr(x_title, y_title, x_lim, y_lim, x_labels, y_labels);
[ "def", "set_canvas_properties", "(", "self", ",", "canvas", ",", "x_title", "=", "None", ",", "y_title", "=", "None", ",", "x_lim", "=", "None", ",", "y_lim", "=", "None", ",", "x_labels", "=", "True", ",", "y_labels", "=", "True", ")", ":", "self", ...
! @brief Set properties for specified canvas. @param[in] canvas (uint): Index of canvas whose properties should changed. @param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed. @param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed. @param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then borders are calculated automatically. @param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated automatically. @param[in] x_labels (bool): If True then labels of X axis are displayed. @param[in] y_labels (bool): If True then labels of Y axis are displayed.
[ "!" ]
python
valid
pandas-dev/pandas
pandas/io/formats/latex.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L40-L163
def write_result(self, buf): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ # string representation of the columns if len(self.frame.columns) == 0 or len(self.frame.index) == 0: info_line = ('Empty {name}\nColumns: {col}\nIndex: {idx}' .format(name=type(self.frame).__name__, col=self.frame.columns, idx=self.frame.index)) strcols = [[info_line]] else: strcols = self.fmt._to_str_columns() def get_col_type(dtype): if issubclass(dtype.type, np.number): return 'r' else: return 'l' # reestablish the MultiIndex that has been joined by _to_str_column if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): out = self.frame.index.format( adjoin=False, sparsify=self.fmt.sparsify, names=self.fmt.has_index_names, na_rep=self.fmt.na_rep ) # index.format will sparsify repeated entries with empty strings # so pad these with some empty space def pad_empties(x): for pad in reversed(x): if pad: break return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]] out = (pad_empties(i) for i in out) # Add empty spaces for each column level clevels = self.frame.columns.nlevels out = [[' ' * len(i[-1])] * clevels + i for i in out] # Add the column names to the last index column cnames = self.frame.columns.names if any(cnames): new_names = [i if i else '{}' for i in cnames] out[self.frame.index.nlevels - 1][:clevels] = new_names # Get rid of old multiindex column and add new ones strcols = out + strcols[1:] column_format = self.column_format if column_format is None: dtypes = self.frame.dtypes._values column_format = ''.join(map(get_col_type, dtypes)) if self.fmt.index: index_format = 'l' * self.frame.index.nlevels column_format = index_format + column_format elif not isinstance(column_format, str): # pragma: no cover raise AssertionError('column_format must be str or unicode, ' 'not {typ}'.format(typ=type(column_format))) if not self.longtable: buf.write('\\begin{{tabular}}{{{fmt}}}\n' .format(fmt=column_format)) buf.write('\\toprule\n') else: buf.write('\\begin{{longtable}}{{{fmt}}}\n' .format(fmt=column_format)) buf.write('\\toprule\n') ilevels = self.frame.index.nlevels clevels = self.frame.columns.nlevels nlevels = clevels if self.fmt.has_index_names and self.fmt.show_index_names: nlevels += 1 strrows = list(zip(*strcols)) self.clinebuf = [] for i, row in enumerate(strrows): if i == nlevels and self.fmt.header: buf.write('\\midrule\n') # End of header if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next ' 'page}}}} \\\\\n'.format(n=len(row))) buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') buf.write('\\endlastfoot\n') if self.fmt.kwds.get('escape', True): # escape backslashes first crow = [(x.replace('\\', '\\textbackslash ') .replace('_', '\\_') .replace('%', '\\%').replace('$', '\\$') .replace('#', '\\#').replace('{', '\\{') .replace('}', '\\}').replace('~', '\\textasciitilde ') .replace('^', '\\textasciicircum ') .replace('&', '\\&') if (x and x != '{}') else '{}') for x in row] else: crow = [x if x else '{}' for x in row] if self.bold_rows and self.fmt.index: # bold row labels crow = ['\\textbf{{{x}}}'.format(x=x) if j < ilevels and x.strip() not in ['', '{}'] else x for j, x in enumerate(crow)] if i < clevels and self.fmt.header and self.multicolumn: # sum up columns to multicolumns crow = self._format_multicolumn(crow, ilevels) if (i >= nlevels and self.fmt.index and self.multirow and ilevels > 1): # sum up rows to multirows crow = self._format_multirow(crow, ilevels, i, strrows) buf.write(' & '.join(crow)) buf.write(' \\\\\n') if self.multirow and i < len(strrows) - 1: self._print_cline(buf, i, len(strcols)) if not self.longtable: buf.write('\\bottomrule\n') buf.write('\\end{tabular}\n') else: buf.write('\\end{longtable}\n')
[ "def", "write_result", "(", "self", ",", "buf", ")", ":", "# string representation of the columns", "if", "len", "(", "self", ".", "frame", ".", "columns", ")", "==", "0", "or", "len", "(", "self", ".", "frame", ".", "index", ")", "==", "0", ":", "info...
Render a DataFrame to a LaTeX tabular/longtable environment output.
[ "Render", "a", "DataFrame", "to", "a", "LaTeX", "tabular", "/", "longtable", "environment", "output", "." ]
python
train
CalebBell/thermo
thermo/volume.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/volume.py#L307-L372
def Bhirud_normal(T, Tc, Pc, omega): r'''Calculates saturation liquid density using the Bhirud [1]_ CSP method. Uses Critical temperature and pressure and acentric factor. The density of a liquid is given by: .. math:: &\ln \frac{P_c}{\rho RT} = \ln U^{(0)} + \omega\ln U^{(1)} &\ln U^{(0)} = 1.396 44 - 24.076T_r+ 102.615T_r^2 -255.719T_r^3+355.805T_r^4-256.671T_r^5 + 75.1088T_r^6 &\ln U^{(1)} = 13.4412 - 135.7437 T_r + 533.380T_r^2- 1091.453T_r^3+1231.43T_r^4 - 728.227T_r^5 + 176.737T_r^6 Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] omega : float Acentric factor for fluid, [-] Returns ------- Vm : float Saturated liquid molar volume, [mol/m^3] Notes ----- Claimed inadequate by others. An interpolation table for ln U values are used from Tr = 0.98 - 1.000. Has terrible behavior at low reduced temperatures. Examples -------- Pentane >>> Bhirud_normal(280.0, 469.7, 33.7E5, 0.252) 0.00011249654029488583 References ---------- .. [1] Bhirud, Vasant L. "Saturated Liquid Densities of Normal Fluids." AIChE Journal 24, no. 6 (November 1, 1978): 1127-31. doi:10.1002/aic.690240630 ''' Tr = T/Tc if Tr <= 0.98: lnU0 = 1.39644 - 24.076*Tr + 102.615*Tr**2 - 255.719*Tr**3 \ + 355.805*Tr**4 - 256.671*Tr**5 + 75.1088*Tr**6 lnU1 = 13.4412 - 135.7437*Tr + 533.380*Tr**2-1091.453*Tr**3 \ + 1231.43*Tr**4 - 728.227*Tr**5 + 176.737*Tr**6 elif Tr > 1: raise Exception('Critical phase, correlation does not apply') else: lnU0 = Bhirud_normal_lnU0_interp(Tr) lnU1 = Bhirud_normal_lnU1_interp(Tr) Unonpolar = exp(lnU0 + omega*lnU1) Vm = Unonpolar*R*T/Pc return Vm
[ "def", "Bhirud_normal", "(", "T", ",", "Tc", ",", "Pc", ",", "omega", ")", ":", "Tr", "=", "T", "/", "Tc", "if", "Tr", "<=", "0.98", ":", "lnU0", "=", "1.39644", "-", "24.076", "*", "Tr", "+", "102.615", "*", "Tr", "**", "2", "-", "255.719", ...
r'''Calculates saturation liquid density using the Bhirud [1]_ CSP method. Uses Critical temperature and pressure and acentric factor. The density of a liquid is given by: .. math:: &\ln \frac{P_c}{\rho RT} = \ln U^{(0)} + \omega\ln U^{(1)} &\ln U^{(0)} = 1.396 44 - 24.076T_r+ 102.615T_r^2 -255.719T_r^3+355.805T_r^4-256.671T_r^5 + 75.1088T_r^6 &\ln U^{(1)} = 13.4412 - 135.7437 T_r + 533.380T_r^2- 1091.453T_r^3+1231.43T_r^4 - 728.227T_r^5 + 176.737T_r^6 Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] omega : float Acentric factor for fluid, [-] Returns ------- Vm : float Saturated liquid molar volume, [mol/m^3] Notes ----- Claimed inadequate by others. An interpolation table for ln U values are used from Tr = 0.98 - 1.000. Has terrible behavior at low reduced temperatures. Examples -------- Pentane >>> Bhirud_normal(280.0, 469.7, 33.7E5, 0.252) 0.00011249654029488583 References ---------- .. [1] Bhirud, Vasant L. "Saturated Liquid Densities of Normal Fluids." AIChE Journal 24, no. 6 (November 1, 1978): 1127-31. doi:10.1002/aic.690240630
[ "r", "Calculates", "saturation", "liquid", "density", "using", "the", "Bhirud", "[", "1", "]", "_", "CSP", "method", ".", "Uses", "Critical", "temperature", "and", "pressure", "and", "acentric", "factor", "." ]
python
valid
RRZE-HPC/kerncraft
kerncraft/models/roofline.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/roofline.py#L339-L386
def report(self, output_file=sys.stdout): """Print human readable report of model.""" cpu_perf = self.results['cpu bottleneck']['performance throughput'] if self.verbose >= 3: print('{}'.format(pformat(self.results)), file=output_file) if self.verbose >= 1: print('Bottlenecks:', file=output_file) print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel', file=output_file) print('--------+--------------+-----------------+-------------------+----------------------', file=output_file) print(' CPU | | {!s:>15} | |'.format( cpu_perf[self._args.unit]), file=output_file) for b in self.results['mem bottlenecks']: # Skip CPU-L1 from Roofline model if b is None: continue print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |' ' {bandwidth!s:>17} | {bw kernel:<8}'.format( b['performance'][self._args.unit], **b), file=output_file) print('', file=output_file) print('IACA analisys:', file=output_file) print('{!s}'.format( {k: v for k, v in list(self.results['cpu bottleneck'].items()) if k not in['IACA output']}), file=output_file) if self.results['min performance']['FLOP/s'] > cpu_perf['FLOP/s']: # CPU bound print('CPU bound. {!s} due to CPU bottleneck'.format(cpu_perf[self._args.unit]), file=output_file) else: # Cache or mem bound print('Cache or mem bound.', file=output_file) bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']] print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format( bottleneck['performance'][self._args.unit], bottleneck['level'], bottleneck['bw kernel']), file=output_file) print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']), file=output_file)
[ "def", "report", "(", "self", ",", "output_file", "=", "sys", ".", "stdout", ")", ":", "cpu_perf", "=", "self", ".", "results", "[", "'cpu bottleneck'", "]", "[", "'performance throughput'", "]", "if", "self", ".", "verbose", ">=", "3", ":", "print", "("...
Print human readable report of model.
[ "Print", "human", "readable", "report", "of", "model", "." ]
python
test
raiden-network/raiden
raiden/network/proxies/token_network.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/token_network.py#L562-L597
def closing_address( self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID = None, ) -> Optional[Address]: """ Returns the address of the closer, if the channel is closed and not settled. None otherwise. """ try: channel_data = self._detail_channel( participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier, ) except RaidenRecoverableError: return None if channel_data.state >= ChannelState.SETTLED: return None participants_data = self.detail_participants( participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_data.channel_identifier, ) if participants_data.our_details.is_closer: return participants_data.our_details.address elif participants_data.partner_details.is_closer: return participants_data.partner_details.address return None
[ "def", "closing_address", "(", "self", ",", "participant1", ":", "Address", ",", "participant2", ":", "Address", ",", "block_identifier", ":", "BlockSpecification", ",", "channel_identifier", ":", "ChannelID", "=", "None", ",", ")", "->", "Optional", "[", "Addre...
Returns the address of the closer, if the channel is closed and not settled. None otherwise.
[ "Returns", "the", "address", "of", "the", "closer", "if", "the", "channel", "is", "closed", "and", "not", "settled", ".", "None", "otherwise", "." ]
python
train
saltstack/salt
salt/modules/pagerduty_util.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L312-L331
def delete_resource(resource_name, key, identifier_fields, profile='pagerduty', subdomain=None, api_key=None): ''' delete any pagerduty resource Helper method for absent() example: delete_resource("users", key, ["id","name","email"]) # delete by id or name or email ''' resource = get_resource(resource_name, key, identifier_fields, profile, subdomain, api_key) if resource: if __opts__['test']: return 'would delete' # flush the resource_cache, because we're modifying a resource del __context__['pagerduty_util.resource_cache'][resource_name] resource_id = _get_resource_id(resource) return _query(method='DELETE', action='{0}/{1}'.format(resource_name, resource_id), profile=profile, subdomain=subdomain, api_key=api_key) else: return True
[ "def", "delete_resource", "(", "resource_name", ",", "key", ",", "identifier_fields", ",", "profile", "=", "'pagerduty'", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ")", ":", "resource", "=", "get_resource", "(", "resource_name", ",", "key", ...
delete any pagerduty resource Helper method for absent() example: delete_resource("users", key, ["id","name","email"]) # delete by id or name or email
[ "delete", "any", "pagerduty", "resource" ]
python
train
PyCQA/pylint
pylint/__init__.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/__init__.py#L15-L22
def run_pylint(): """run pylint""" from pylint.lint import Run try: Run(sys.argv[1:]) except KeyboardInterrupt: sys.exit(1)
[ "def", "run_pylint", "(", ")", ":", "from", "pylint", ".", "lint", "import", "Run", "try", ":", "Run", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "except", "KeyboardInterrupt", ":", "sys", ".", "exit", "(", "1", ")" ]
run pylint
[ "run", "pylint" ]
python
test
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/mavproxy.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/mavproxy.py#L401-L411
def clear_zipimport_cache(): """Clear out cached entries from _zip_directory_cache. See http://www.digi.com/wiki/developer/index.php/Error_messages""" import sys, zipimport syspath_backup = list(sys.path) zipimport._zip_directory_cache.clear() # load back items onto sys.path sys.path = syspath_backup # add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html sys.path_importer_cache.clear()
[ "def", "clear_zipimport_cache", "(", ")", ":", "import", "sys", ",", "zipimport", "syspath_backup", "=", "list", "(", "sys", ".", "path", ")", "zipimport", ".", "_zip_directory_cache", ".", "clear", "(", ")", "# load back items onto sys.path", "sys", ".", "path"...
Clear out cached entries from _zip_directory_cache. See http://www.digi.com/wiki/developer/index.php/Error_messages
[ "Clear", "out", "cached", "entries", "from", "_zip_directory_cache", ".", "See", "http", ":", "//", "www", ".", "digi", ".", "com", "/", "wiki", "/", "developer", "/", "index", ".", "php", "/", "Error_messages" ]
python
train
opendatateam/udata
udata/harvest/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L138-L143
def run(ident): '''Launch or resume an harvesting for a given source if none is running''' source = get_source(ident) cls = backends.get(current_app, source.backend) backend = cls(source) backend.harvest()
[ "def", "run", "(", "ident", ")", ":", "source", "=", "get_source", "(", "ident", ")", "cls", "=", "backends", ".", "get", "(", "current_app", ",", "source", ".", "backend", ")", "backend", "=", "cls", "(", "source", ")", "backend", ".", "harvest", "(...
Launch or resume an harvesting for a given source if none is running
[ "Launch", "or", "resume", "an", "harvesting", "for", "a", "given", "source", "if", "none", "is", "running" ]
python
train
sampsyo/confuse
confuse.py
https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L644-L660
def _package_path(name): """Returns the path to the package containing the named module or None if the path could not be identified (e.g., if ``name == "__main__"``). """ loader = pkgutil.get_loader(name) if loader is None or name == '__main__': return None if hasattr(loader, 'get_filename'): filepath = loader.get_filename(name) else: # Fall back to importing the specified module. __import__(name) filepath = sys.modules[name].__file__ return os.path.dirname(os.path.abspath(filepath))
[ "def", "_package_path", "(", "name", ")", ":", "loader", "=", "pkgutil", ".", "get_loader", "(", "name", ")", "if", "loader", "is", "None", "or", "name", "==", "'__main__'", ":", "return", "None", "if", "hasattr", "(", "loader", ",", "'get_filename'", ")...
Returns the path to the package containing the named module or None if the path could not be identified (e.g., if ``name == "__main__"``).
[ "Returns", "the", "path", "to", "the", "package", "containing", "the", "named", "module", "or", "None", "if", "the", "path", "could", "not", "be", "identified", "(", "e", ".", "g", ".", "if", "name", "==", "__main__", ")", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/utils/progressbar.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/progressbar.py#L700-L703
def render(self, progress, width=None, status=None): """Render the widget.""" current_pct = int(progress * 100 + 0.1) return RenderResult(rendered="%3d%%" % current_pct, next_progress=(current_pct + 1) / 100)
[ "def", "render", "(", "self", ",", "progress", ",", "width", "=", "None", ",", "status", "=", "None", ")", ":", "current_pct", "=", "int", "(", "progress", "*", "100", "+", "0.1", ")", "return", "RenderResult", "(", "rendered", "=", "\"%3d%%\"", "%", ...
Render the widget.
[ "Render", "the", "widget", "." ]
python
test
robdmc/behold
behold/logger.py
https://github.com/robdmc/behold/blob/ac1b7707e2d7472a50d837dda78be1e23af8fce5/behold/logger.py#L236-L267
def when_values(self, **criteria): """ By default, ``Behold`` objects call ``str()`` on all variables before sending them to the output stream. This method enables you to filter on those extracted string representations. The syntax is exactly like that of the ``when_context()`` method. Here is an example. .. code-block:: python from behold import Behold, Item items = [ Item(a=1, b=2), Item(c=3, d=4), ] for item in items: # You can filter on the string representation Behold(tag='first').when_values(a='1').show(item) # Behold is smart enough to transform your criteria to strings # so this also works Behold(tag='second').when_values(a=1).show(item) # Because the string representation is not present in the local # scope, you must use Django-query-like syntax for logical # operations. Behold(tag='third').when_values(a__gte=1).show(item) """ criteria = {k: str(v) for k, v in criteria.items()} self._add_value_filters(**criteria) return self
[ "def", "when_values", "(", "self", ",", "*", "*", "criteria", ")", ":", "criteria", "=", "{", "k", ":", "str", "(", "v", ")", "for", "k", ",", "v", "in", "criteria", ".", "items", "(", ")", "}", "self", ".", "_add_value_filters", "(", "*", "*", ...
By default, ``Behold`` objects call ``str()`` on all variables before sending them to the output stream. This method enables you to filter on those extracted string representations. The syntax is exactly like that of the ``when_context()`` method. Here is an example. .. code-block:: python from behold import Behold, Item items = [ Item(a=1, b=2), Item(c=3, d=4), ] for item in items: # You can filter on the string representation Behold(tag='first').when_values(a='1').show(item) # Behold is smart enough to transform your criteria to strings # so this also works Behold(tag='second').when_values(a=1).show(item) # Because the string representation is not present in the local # scope, you must use Django-query-like syntax for logical # operations. Behold(tag='third').when_values(a__gte=1).show(item)
[ "By", "default", "Behold", "objects", "call", "str", "()", "on", "all", "variables", "before", "sending", "them", "to", "the", "output", "stream", ".", "This", "method", "enables", "you", "to", "filter", "on", "those", "extracted", "string", "representations",...
python
train
jjjake/internetarchive
internetarchive/api.py
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/api.py#L563-L579
def configure(username=None, password=None, config_file=None): """Configure internetarchive with your Archive.org credentials. :type username: str :param username: The email address associated with your Archive.org account. :type password: str :param password: Your Archive.org password. Usage: >>> from internetarchive import configure >>> configure('user@example.com', 'password') """ username = input('Email address: ') if not username else username password = getpass('Password: ') if not password else password config_file_path = config_module.write_config_file(username, password, config_file) return config_file_path
[ "def", "configure", "(", "username", "=", "None", ",", "password", "=", "None", ",", "config_file", "=", "None", ")", ":", "username", "=", "input", "(", "'Email address: '", ")", "if", "not", "username", "else", "username", "password", "=", "getpass", "("...
Configure internetarchive with your Archive.org credentials. :type username: str :param username: The email address associated with your Archive.org account. :type password: str :param password: Your Archive.org password. Usage: >>> from internetarchive import configure >>> configure('user@example.com', 'password')
[ "Configure", "internetarchive", "with", "your", "Archive", ".", "org", "credentials", "." ]
python
train
opencobra/memote
memote/suite/reporting/report.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/report.py#L114-L164
def compute_score(self): """Calculate the overall test score using the configuration.""" # LOGGER.info("Begin scoring") cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() # Calculate the scores for each test individually. for test, result in iteritems(self.result.cases): # LOGGER.info("Calculate score for test: '%s'.", test) # Test metric may be a dictionary for a parametrized test. metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value # For some reason there are parametrized tests without cases. if len(metric) == 0: metric = 0.0 else: metric = total / len(metric) else: metric = 1.0 - metric scores.at[test, "score"] = metric scores.loc[test, :] *= self.config["weights"].get(test, 1.0) score = 0.0 maximum = 0.0 # Calculate the scores for each section considering the individual test # case scores. for section_id, card in iteritems( self.config['cards']['scored']['sections'] ): # LOGGER.info("Calculate score for section: '%s'.", section_id) cases = card.get("cases", None) if cases is None: continue card_score = scores.loc[cases, "score"].sum() card_total = scores.loc[cases, "max"].sum() # Format results nicely to work immediately with Vega Bar Chart. section_score = {"section": section_id, "score": card_score / card_total} self.result["score"]["sections"].append(section_score) # Calculate the final score for the entire model. weight = card.get("weight", 1.0) score += card_score * weight maximum += card_total * weight self.result["score"]["total_score"] = score / maximum
[ "def", "compute_score", "(", "self", ")", ":", "# LOGGER.info(\"Begin scoring\")", "cases", "=", "self", ".", "get_configured_tests", "(", ")", "|", "set", "(", "self", ".", "result", ".", "cases", ")", "scores", "=", "DataFrame", "(", "{", "\"score\"", ":",...
Calculate the overall test score using the configuration.
[ "Calculate", "the", "overall", "test", "score", "using", "the", "configuration", "." ]
python
train
kensho-technologies/grift
grift/property_types.py
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L101-L119
def validate_resource(self, value): """Validate the network resource with exponential backoff""" def do_backoff(*args, **kwargs): """Call self._test_connection with exponential backoff, for self._max_tries attempts""" attempts = 0 while True: try: self._test_connection(*args, **kwargs) break except ValidationError: wait_secs = min(self._max_wait, 2 ** attempts) attempts += 1 if attempts < self._max_tries: time.sleep(wait_secs) else: raise do_backoff(value)
[ "def", "validate_resource", "(", "self", ",", "value", ")", ":", "def", "do_backoff", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Call self._test_connection with exponential backoff, for self._max_tries attempts\"\"\"", "attempts", "=", "0", "while", "...
Validate the network resource with exponential backoff
[ "Validate", "the", "network", "resource", "with", "exponential", "backoff" ]
python
train
jpscaletti/solution
solution/fields/splitted_datetime.py
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/splitted_datetime.py#L83-L90
def _str_to_datetime(self, str_value): """Parses a `YYYY-MM-DD` string into a datetime object.""" try: ldt = [int(f) for f in str_value.split('-')] dt = datetime.datetime(*ldt) except (ValueError, TypeError): return None return dt
[ "def", "_str_to_datetime", "(", "self", ",", "str_value", ")", ":", "try", ":", "ldt", "=", "[", "int", "(", "f", ")", "for", "f", "in", "str_value", ".", "split", "(", "'-'", ")", "]", "dt", "=", "datetime", ".", "datetime", "(", "*", "ldt", ")"...
Parses a `YYYY-MM-DD` string into a datetime object.
[ "Parses", "a", "YYYY", "-", "MM", "-", "DD", "string", "into", "a", "datetime", "object", "." ]
python
train
Pylons/plaster_pastedeploy
src/plaster_pastedeploy/__init__.py
https://github.com/Pylons/plaster_pastedeploy/blob/72a08f3fb6d11a0b039f381ade83f045668cfcb0/src/plaster_pastedeploy/__init__.py#L155-L174
def get_wsgi_filter(self, name=None, defaults=None): """Reads the configuration soruce and finds and loads a WSGI filter defined by the filter entry with the name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI filter to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadfilter`. :param defaults: The ``global_conf`` that will be used during filter instantiation. :return: A callable that can filter a WSGI application. """ name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadfilter( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
[ "def", "get_wsgi_filter", "(", "self", ",", "name", "=", "None", ",", "defaults", "=", "None", ")", ":", "name", "=", "self", ".", "_maybe_get_default_name", "(", "name", ")", "defaults", "=", "self", ".", "_get_defaults", "(", "defaults", ")", "return", ...
Reads the configuration soruce and finds and loads a WSGI filter defined by the filter entry with the name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI filter to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadfilter`. :param defaults: The ``global_conf`` that will be used during filter instantiation. :return: A callable that can filter a WSGI application.
[ "Reads", "the", "configuration", "soruce", "and", "finds", "and", "loads", "a", "WSGI", "filter", "defined", "by", "the", "filter", "entry", "with", "the", "name", "name", "per", "the", "PasteDeploy", "configuration", "format", "and", "loading", "mechanism", "...
python
train
google/grr
grr/server/grr_response_server/email_alerts.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/email_alerts.py#L42-L52
def SplitEmailsAndAppendEmailDomain(self, address_list): """Splits a string of comma-separated emails, appending default domain.""" result = [] # Process email addresses, and build up a list. if isinstance(address_list, rdf_standard.DomainEmailAddress): address_list = [str(address_list)] elif isinstance(address_list, string_types): address_list = [address for address in address_list.split(",") if address] for address in address_list: result.append(self.AddEmailDomain(address)) return result
[ "def", "SplitEmailsAndAppendEmailDomain", "(", "self", ",", "address_list", ")", ":", "result", "=", "[", "]", "# Process email addresses, and build up a list.", "if", "isinstance", "(", "address_list", ",", "rdf_standard", ".", "DomainEmailAddress", ")", ":", "address_...
Splits a string of comma-separated emails, appending default domain.
[ "Splits", "a", "string", "of", "comma", "-", "separated", "emails", "appending", "default", "domain", "." ]
python
train
SmileyChris/easy-thumbnails
easy_thumbnails/utils.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/utils.py#L111-L137
def exif_orientation(im): """ Rotate and/or flip an image to respect the image's EXIF orientation data. """ try: exif = im._getexif() except Exception: # There are many ways that _getexif fails, we're just going to blanket # cover them all. exif = None if exif: orientation = exif.get(0x0112) if orientation == 2: im = im.transpose(Image.FLIP_LEFT_RIGHT) elif orientation == 3: im = im.transpose(Image.ROTATE_180) elif orientation == 4: im = im.transpose(Image.FLIP_TOP_BOTTOM) elif orientation == 5: im = im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT) elif orientation == 6: im = im.transpose(Image.ROTATE_270) elif orientation == 7: im = im.transpose(Image.ROTATE_90).transpose(Image.FLIP_LEFT_RIGHT) elif orientation == 8: im = im.transpose(Image.ROTATE_90) return im
[ "def", "exif_orientation", "(", "im", ")", ":", "try", ":", "exif", "=", "im", ".", "_getexif", "(", ")", "except", "Exception", ":", "# There are many ways that _getexif fails, we're just going to blanket", "# cover them all.", "exif", "=", "None", "if", "exif", ":...
Rotate and/or flip an image to respect the image's EXIF orientation data.
[ "Rotate", "and", "/", "or", "flip", "an", "image", "to", "respect", "the", "image", "s", "EXIF", "orientation", "data", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10702-L10724
def separate_directions(di_block): """ Separates set of directions into two modes based on principal direction Parameters _______________ di_block : block of nested dec,inc pairs Return mode_1_block,mode_2_block : two lists of nested dec,inc pairs """ ppars = doprinc(di_block) di_df = pd.DataFrame(di_block) # turn into a data frame for easy filtering di_df.columns = ['dec', 'inc'] di_df['pdec'] = ppars['dec'] di_df['pinc'] = ppars['inc'] di_df['angle'] = angle(di_df[['dec', 'inc']].values, di_df[['pdec', 'pinc']].values) mode1_df = di_df[di_df['angle'] <= 90] mode2_df = di_df[di_df['angle'] > 90] mode1 = mode1_df[['dec', 'inc']].values.tolist() mode2 = mode2_df[['dec', 'inc']].values.tolist() return mode1, mode2
[ "def", "separate_directions", "(", "di_block", ")", ":", "ppars", "=", "doprinc", "(", "di_block", ")", "di_df", "=", "pd", ".", "DataFrame", "(", "di_block", ")", "# turn into a data frame for easy filtering", "di_df", ".", "columns", "=", "[", "'dec'", ",", ...
Separates set of directions into two modes based on principal direction Parameters _______________ di_block : block of nested dec,inc pairs Return mode_1_block,mode_2_block : two lists of nested dec,inc pairs
[ "Separates", "set", "of", "directions", "into", "two", "modes", "based", "on", "principal", "direction" ]
python
train
spyder-ide/conda-manager
conda_manager/api/client_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/client_api.py#L342-L352
def set_domain(self, domain='https://api.anaconda.org'): """Reset current api domain.""" logger.debug(str((domain))) config = binstar_client.utils.get_config() config['url'] = domain binstar_client.utils.set_config(config) self._anaconda_client_api = binstar_client.utils.get_server_api( token=None, log_level=logging.NOTSET) return self.user()
[ "def", "set_domain", "(", "self", ",", "domain", "=", "'https://api.anaconda.org'", ")", ":", "logger", ".", "debug", "(", "str", "(", "(", "domain", ")", ")", ")", "config", "=", "binstar_client", ".", "utils", ".", "get_config", "(", ")", "config", "["...
Reset current api domain.
[ "Reset", "current", "api", "domain", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/shellapp.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/shellapp.py#L326-L347
def _run_cmd_line_code(self): """Run code or file specified at the command-line""" if self.code_to_run: line = self.code_to_run try: self.log.info("Running code given at command line (c=): %s" % line) self.shell.run_cell(line, store_history=False) except: self.log.warn("Error in executing line in user namespace: %s" % line) self.shell.showtraceback() # Like Python itself, ignore the second if the first of these is present elif self.file_to_run: fname = self.file_to_run try: self._exec_file(fname) except: self.log.warn("Error in executing file in user namespace: %s" % fname) self.shell.showtraceback()
[ "def", "_run_cmd_line_code", "(", "self", ")", ":", "if", "self", ".", "code_to_run", ":", "line", "=", "self", ".", "code_to_run", "try", ":", "self", ".", "log", ".", "info", "(", "\"Running code given at command line (c=): %s\"", "%", "line", ")", "self", ...
Run code or file specified at the command-line
[ "Run", "code", "or", "file", "specified", "at", "the", "command", "-", "line" ]
python
test
bsmurphy/PyKrige
pykrige/ok.py
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/ok.py#L459-L478
def _get_kriging_matrix(self, n): """Assembles the kriging matrix.""" if self.coordinates_type == 'euclidean': xy = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1) d = cdist(xy, xy, 'euclidean') elif self.coordinates_type == 'geographic': d = core.great_circle_distance(self.X_ADJUSTED[:,np.newaxis], self.Y_ADJUSTED[:,np.newaxis], self.X_ADJUSTED, self.Y_ADJUSTED) a = np.zeros((n+1, n+1)) a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d) np.fill_diagonal(a, 0.) a[n, :] = 1.0 a[:, n] = 1.0 a[n, n] = 0.0 return a
[ "def", "_get_kriging_matrix", "(", "self", ",", "n", ")", ":", "if", "self", ".", "coordinates_type", "==", "'euclidean'", ":", "xy", "=", "np", ".", "concatenate", "(", "(", "self", ".", "X_ADJUSTED", "[", ":", ",", "np", ".", "newaxis", "]", ",", "...
Assembles the kriging matrix.
[ "Assembles", "the", "kriging", "matrix", "." ]
python
train
chewse/djangorestframework-signed-permissions
signedpermissions/signing.py
https://github.com/chewse/djangorestframework-signed-permissions/blob/b1cc4c57999fc5be8361f60f0ada1d777b27feab/signedpermissions/signing.py#L35-L38
def unsign_filters_and_actions(sign, dotted_model_name): """Return the list of filters and actions for dotted_model_name.""" permissions = signing.loads(sign) return permissions.get(dotted_model_name, [])
[ "def", "unsign_filters_and_actions", "(", "sign", ",", "dotted_model_name", ")", ":", "permissions", "=", "signing", ".", "loads", "(", "sign", ")", "return", "permissions", ".", "get", "(", "dotted_model_name", ",", "[", "]", ")" ]
Return the list of filters and actions for dotted_model_name.
[ "Return", "the", "list", "of", "filters", "and", "actions", "for", "dotted_model_name", "." ]
python
train
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L2742-L2751
def eval(self, script, numkeys, *keys_and_args): """ Execute the Lua ``script``, specifying the ``numkeys`` the script will touch and the key names and argument values in ``keys_and_args``. Returns the result of the script. In practice, use the object returned by ``register_script``. This function exists purely for Redis API completion. """ return self.execute_command('EVAL', script, numkeys, *keys_and_args)
[ "def", "eval", "(", "self", ",", "script", ",", "numkeys", ",", "*", "keys_and_args", ")", ":", "return", "self", ".", "execute_command", "(", "'EVAL'", ",", "script", ",", "numkeys", ",", "*", "keys_and_args", ")" ]
Execute the Lua ``script``, specifying the ``numkeys`` the script will touch and the key names and argument values in ``keys_and_args``. Returns the result of the script. In practice, use the object returned by ``register_script``. This function exists purely for Redis API completion.
[ "Execute", "the", "Lua", "script", "specifying", "the", "numkeys", "the", "script", "will", "touch", "and", "the", "key", "names", "and", "argument", "values", "in", "keys_and_args", ".", "Returns", "the", "result", "of", "the", "script", "." ]
python
train
OpenKMIP/PyKMIP
kmip/core/objects.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/objects.py#L2903-L2974
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the KeyWrappingSpecification struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. """ super(KeyWrappingSpecification, self).read( input_stream, kmip_version=kmip_version ) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream): self._wrapping_method = primitives.Enumeration( enum=enums.WrappingMethod, tag=enums.Tags.WRAPPING_METHOD ) self._wrapping_method.read( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Invalid struct missing the wrapping method attribute." ) if self.is_tag_next( enums.Tags.ENCRYPTION_KEY_INFORMATION, local_stream ): self._encryption_key_information = EncryptionKeyInformation() self._encryption_key_information.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next( enums.Tags.MAC_SIGNATURE_KEY_INFORMATION, local_stream ): self._mac_signature_key_information = MACSignatureKeyInformation() self._mac_signature_key_information.read( local_stream, kmip_version=kmip_version ) attribute_names = [] while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_stream): attribute_name = primitives.TextString( tag=enums.Tags.ATTRIBUTE_NAME ) attribute_name.read(local_stream, kmip_version=kmip_version) attribute_names.append(attribute_name) self._attribute_names = attribute_names if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream): self._encoding_option = primitives.Enumeration( enum=enums.EncodingOption, tag=enums.Tags.ENCODING_OPTION ) self._encoding_option.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
[ "def", "read", "(", "self", ",", "input_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "KeyWrappingSpecification", ",", "self", ")", ".", "read", "(", "input_stream", ",", "kmip_version", "=", "kmip_v...
Read the data encoding the KeyWrappingSpecification struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
[ "Read", "the", "data", "encoding", "the", "KeyWrappingSpecification", "struct", "and", "decode", "it", "into", "its", "constituent", "parts", "." ]
python
test
4Catalyzer/flask-resty
flask_resty/decorators.py
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/decorators.py#L31-L50
def request_cached_property(func): """Make the given method a per-request cached property. This caches the value on the request context rather than on the object itself, preventing problems if the object gets reused across multiple requests. """ @property @functools.wraps(func) def wrapped(self): cached_value = context.get_for_view(self, func.__name__, UNDEFINED) if cached_value is not UNDEFINED: return cached_value value = func(self) context.set_for_view(self, func.__name__, value) return value return wrapped
[ "def", "request_cached_property", "(", "func", ")", ":", "@", "property", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "self", ")", ":", "cached_value", "=", "context", ".", "get_for_view", "(", "self", ",", "func", ".", "__na...
Make the given method a per-request cached property. This caches the value on the request context rather than on the object itself, preventing problems if the object gets reused across multiple requests.
[ "Make", "the", "given", "method", "a", "per", "-", "request", "cached", "property", "." ]
python
train
mozilla/amo-validator
validator/metadata_helpers.py
https://github.com/mozilla/amo-validator/blob/0251bfbd7d93106e01ecdb6de5fcd1dc1a180664/validator/metadata_helpers.py#L53-L78
def validate_version(err, value, source): 'Tests a manifest version number' field_name = '<em:version>' if source == 'install.rdf' else 'version' err.metadata['version'] = value # May not be longer than 32 characters if len(value) > 32: err.error( ('metadata_helpers', '_test_version', 'too_long'), 'The value of {name} is too long'.format(name=field_name), 'Values supplied for {name} in the {source} file must be 32 ' 'characters or less.'.format(name=field_name, source=source), source) # Must be a valid version number. if not VERSION_PATTERN.match(value): err.error( ('metadata_helpers', '_test_version', 'invalid_format'), 'The value of {name} is invalid'.format(name=field_name), 'The values supplied for version in the {source} file is not a ' 'valid version string. It can only contain letters, numbers, and ' 'the symbols +*.-_.'.format(name=field_name, source=source), source)
[ "def", "validate_version", "(", "err", ",", "value", ",", "source", ")", ":", "field_name", "=", "'<em:version>'", "if", "source", "==", "'install.rdf'", "else", "'version'", "err", ".", "metadata", "[", "'version'", "]", "=", "value", "# May not be longer than ...
Tests a manifest version number
[ "Tests", "a", "manifest", "version", "number" ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1863-L1873
def on_for_rotations(self, left_speed, right_speed, rotations, brake=True, block=True): """ Rotate the motors at 'left_speed & right_speed' for 'rotations'. Speeds can be percentages or any SpeedValue implementation. If the left speed is not equal to the right speed (i.e., the robot will turn), the motor on the outside of the turn will rotate for the full ``rotations`` while the motor on the inside will have its requested distance calculated according to the expected turn. """ MoveTank.on_for_degrees(self, left_speed, right_speed, rotations * 360, brake, block)
[ "def", "on_for_rotations", "(", "self", ",", "left_speed", ",", "right_speed", ",", "rotations", ",", "brake", "=", "True", ",", "block", "=", "True", ")", ":", "MoveTank", ".", "on_for_degrees", "(", "self", ",", "left_speed", ",", "right_speed", ",", "ro...
Rotate the motors at 'left_speed & right_speed' for 'rotations'. Speeds can be percentages or any SpeedValue implementation. If the left speed is not equal to the right speed (i.e., the robot will turn), the motor on the outside of the turn will rotate for the full ``rotations`` while the motor on the inside will have its requested distance calculated according to the expected turn.
[ "Rotate", "the", "motors", "at", "left_speed", "&", "right_speed", "for", "rotations", ".", "Speeds", "can", "be", "percentages", "or", "any", "SpeedValue", "implementation", "." ]
python
train
mozilla-releng/signtool
signtool/util/archives.py
https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L186-L196
def unpackfile(filename, destdir): """Unpack a mar or exe into destdir""" if filename.endswith(".mar"): return unpackmar(filename, destdir) elif filename.endswith(".exe"): return unpackexe(filename, destdir) elif filename.endswith(".tar") or filename.endswith(".tar.gz") \ or filename.endswith(".tgz"): return unpacktar(filename, destdir) else: raise ValueError("Unknown file type: %s" % filename)
[ "def", "unpackfile", "(", "filename", ",", "destdir", ")", ":", "if", "filename", ".", "endswith", "(", "\".mar\"", ")", ":", "return", "unpackmar", "(", "filename", ",", "destdir", ")", "elif", "filename", ".", "endswith", "(", "\".exe\"", ")", ":", "re...
Unpack a mar or exe into destdir
[ "Unpack", "a", "mar", "or", "exe", "into", "destdir" ]
python
train
ymotongpoo/pysuddendeath
suddendeath/__init__.py
https://github.com/ymotongpoo/pysuddendeath/blob/2ce26d3229e60ce1f1fd5f032a7b0512dec25c5a/suddendeath/__init__.py#L50-L69
def suddendeathmessage(message): ''' suddendeathmessage returns "突然の死" like ascii art decorated message string. :param str message: random unicode mixed text :rtype: str ''' msg_len = message_length(message) header_len = msg_len // 2 + 2 footer_len = (msg_len // 2) * 2 + 1 footer_pattern = cycle(["Y", "^"]) header = "_" + "人" * header_len + "_" footer = " ̄" for _ in range(footer_len): footer += next(footer_pattern) footer += " ̄" middle = "> " + message + " <" return "\n".join([header, middle, footer])
[ "def", "suddendeathmessage", "(", "message", ")", ":", "msg_len", "=", "message_length", "(", "message", ")", "header_len", "=", "msg_len", "//", "2", "+", "2", "footer_len", "=", "(", "msg_len", "//", "2", ")", "*", "2", "+", "1", "footer_pattern", "=",...
suddendeathmessage returns "突然の死" like ascii art decorated message string. :param str message: random unicode mixed text :rtype: str
[ "suddendeathmessage", "returns", "突然の死", "like", "ascii", "art", "decorated", "message", "string", "." ]
python
train
dslackw/slpkg
slpkg/sbo/slackbuild.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/sbo/slackbuild.py#L224-L233
def sbo_version_source(self, slackbuilds): """Create sbo name with version """ sbo_versions, sources = [], [] for sbo in slackbuilds: status(0.02) sbo_ver = "{0}-{1}".format(sbo, SBoGrep(sbo).version()) sbo_versions.append(sbo_ver) sources.append(SBoGrep(sbo).source()) return [sbo_versions, sources]
[ "def", "sbo_version_source", "(", "self", ",", "slackbuilds", ")", ":", "sbo_versions", ",", "sources", "=", "[", "]", ",", "[", "]", "for", "sbo", "in", "slackbuilds", ":", "status", "(", "0.02", ")", "sbo_ver", "=", "\"{0}-{1}\"", ".", "format", "(", ...
Create sbo name with version
[ "Create", "sbo", "name", "with", "version" ]
python
train
Duke-GCB/DukeDSClient
ddsc/cmdparser.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/cmdparser.py#L98-L107
def _path_has_ok_chars(path): """ Validate path for invalid characters. :param path: str possible filesystem path :return: path if it was ok otherwise raises error """ basename = os.path.basename(path) if any([bad_char in basename for bad_char in INVALID_PATH_CHARS]): raise argparse.ArgumentTypeError("{} contains invalid characters for a directory.".format(path)) return path
[ "def", "_path_has_ok_chars", "(", "path", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "any", "(", "[", "bad_char", "in", "basename", "for", "bad_char", "in", "INVALID_PATH_CHARS", "]", ")", ":", "raise", "argpar...
Validate path for invalid characters. :param path: str possible filesystem path :return: path if it was ok otherwise raises error
[ "Validate", "path", "for", "invalid", "characters", ".", ":", "param", "path", ":", "str", "possible", "filesystem", "path", ":", "return", ":", "path", "if", "it", "was", "ok", "otherwise", "raises", "error" ]
python
train
python-constraint/python-constraint
constraint/__init__.py
https://github.com/python-constraint/python-constraint/blob/e23fe9852cddddf1c3e258e03f2175df24b4c702/constraint/__init__.py#L880-L919
def forwardCheck(self, variables, domains, assignments, _unassigned=Unassigned): """ Helper method for generic forward checking Currently, this method acts only when there's a single unassigned variable. @param variables: Variables affected by that constraint, in the same order provided by the user @type variables: sequence @param domains: Dictionary mapping variables to their domains @type domains: dict @param assignments: Dictionary mapping assigned variables to their current assumed value @type assignments: dict @return: Boolean value stating if this constraint is currently broken or not @rtype: bool """ unassignedvariable = _unassigned for variable in variables: if variable not in assignments: if unassignedvariable is _unassigned: unassignedvariable = variable else: break else: if unassignedvariable is not _unassigned: # Remove from the unassigned variable domain's all # values which break our variable's constraints. domain = domains[unassignedvariable] if domain: for value in domain[:]: assignments[unassignedvariable] = value if not self(variables, domains, assignments): domain.hideValue(value) del assignments[unassignedvariable] if not domain: return False return True
[ "def", "forwardCheck", "(", "self", ",", "variables", ",", "domains", ",", "assignments", ",", "_unassigned", "=", "Unassigned", ")", ":", "unassignedvariable", "=", "_unassigned", "for", "variable", "in", "variables", ":", "if", "variable", "not", "in", "assi...
Helper method for generic forward checking Currently, this method acts only when there's a single unassigned variable. @param variables: Variables affected by that constraint, in the same order provided by the user @type variables: sequence @param domains: Dictionary mapping variables to their domains @type domains: dict @param assignments: Dictionary mapping assigned variables to their current assumed value @type assignments: dict @return: Boolean value stating if this constraint is currently broken or not @rtype: bool
[ "Helper", "method", "for", "generic", "forward", "checking" ]
python
train
Grunny/zap-cli
zapcli/helpers.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/helpers.py#L19-L29
def validate_ids(ctx, param, value): """Validate a list of IDs and convert them to a list.""" if not value: return None ids = [x.strip() for x in value.split(',')] for id_item in ids: if not id_item.isdigit(): raise click.BadParameter('Non-numeric value "{0}" provided for an ID.'.format(id_item)) return ids
[ "def", "validate_ids", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "not", "value", ":", "return", "None", "ids", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "value", ".", "split", "(", "','", ")", "]", "for", "id_item", ...
Validate a list of IDs and convert them to a list.
[ "Validate", "a", "list", "of", "IDs", "and", "convert", "them", "to", "a", "list", "." ]
python
train
schlamar/latexmk.py
latexmake.py
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L372-L426
def run(self): '''Run the LaTeX compilation.''' # store files self.old_dir = [] if self.opt.clean: self.old_dir = os.listdir('.') cite_counter, toc_file, gloss_files = self._read_latex_files() self.latex_run() self.read_glossaries() gloss_changed = self.makeindex_runs(gloss_files) if gloss_changed or self._is_toc_changed(toc_file): self.latex_run() if self._need_bib_run(cite_counter): self.bibtex_run() self.latex_run() while (self.latex_run_counter < MAX_RUNS): if not self.need_latex_rerun(): break self.latex_run() if self.opt.check_cite: cites = set() with open('%s.aux' % self.project_name) as fobj: aux_content = fobj.read() for match in BIBCITE_PATTERN.finditer(aux_content): name = match.groups()[0] cites.add(name) with open('%s.bib' % self.bib_file) as fobj: bib_content = fobj.read() for match in BIBENTRY_PATTERN.finditer(bib_content): name = match.groups()[0] if name not in cites: self.log.info('Bib entry not cited: "%s"' % name) if self.opt.clean: ending = '.dvi' if self.opt.pdf: ending = '.pdf' for fname in os.listdir('.'): if not (fname in self.old_dir or fname.endswith(ending)): try: os.remove(fname) except IOError: pass if self.opt.preview: self.open_preview()
[ "def", "run", "(", "self", ")", ":", "# store files", "self", ".", "old_dir", "=", "[", "]", "if", "self", ".", "opt", ".", "clean", ":", "self", ".", "old_dir", "=", "os", ".", "listdir", "(", "'.'", ")", "cite_counter", ",", "toc_file", ",", "glo...
Run the LaTeX compilation.
[ "Run", "the", "LaTeX", "compilation", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L283-L289
def applet_remove_tags(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /applet-xxxx/removeTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags """ return DXHTTPRequest('/%s/removeTags' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "applet_remove_tags", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/removeTags'", "%", "object_id", ",", "input_params", ",", "always_retr...
Invokes the /applet-xxxx/removeTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
[ "Invokes", "the", "/", "applet", "-", "xxxx", "/", "removeTags", "API", "method", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/learn/get_realtime_data.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/learn/get_realtime_data.py#L8-L33
def _example_stock_quote(quote_ctx): """ 获取批量报价,输出 股票名称,时间,当前价,开盘价,最高价,最低价,昨天收盘价,成交量,成交额,换手率,振幅,股票状态 """ stock_code_list = ["US.AAPL", "HK.00700"] # subscribe "QUOTE" ret_status, ret_data = quote_ctx.subscribe(stock_code_list, ft.SubType.QUOTE) if ret_status != ft.RET_OK: print("%s %s: %s" % (stock_code_list, "QUOTE", ret_data)) exit() ret_status, ret_data = quote_ctx.query_subscription() if ret_status != ft.RET_OK: print(ret_status) exit() print(ret_data) ret_status, ret_data = quote_ctx.get_stock_quote(stock_code_list) if ret_status != ft.RET_OK: print(ret_data) exit() quote_table = ret_data print("QUOTE_TABLE") print(quote_table)
[ "def", "_example_stock_quote", "(", "quote_ctx", ")", ":", "stock_code_list", "=", "[", "\"US.AAPL\"", ",", "\"HK.00700\"", "]", "# subscribe \"QUOTE\"", "ret_status", ",", "ret_data", "=", "quote_ctx", ".", "subscribe", "(", "stock_code_list", ",", "ft", ".", "Su...
获取批量报价,输出 股票名称,时间,当前价,开盘价,最高价,最低价,昨天收盘价,成交量,成交额,换手率,振幅,股票状态
[ "获取批量报价,输出", "股票名称,时间,当前价,开盘价,最高价,最低价,昨天收盘价,成交量,成交额,换手率,振幅,股票状态" ]
python
train
oscarbranson/latools
latools/latools.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3290-L3367
def trace_plots(self, analytes=None, samples=None, ranges=False, focus=None, outdir=None, filt=None, scale='log', figsize=[10, 4], stats=False, stat='nanmean', err='nanstd', subset='All_Analyses'): """ Plot analytes as a function of time. Parameters ---------- analytes : optional, array_like or str The analyte(s) to plot. Defaults to all analytes. samples: optional, array_like or str The sample(s) to plot. Defaults to all samples. ranges : bool Whether or not to show the signal/backgroudn regions identified by 'autorange'. focus : str The focus 'stage' of the analysis to plot. Can be 'rawdata', 'despiked':, 'signal', 'background', 'bkgsub', 'ratios' or 'calibrated'. outdir : str Path to a directory where you'd like the plots to be saved. Defaults to 'reports/[focus]' in your data directory. filt : str, dict or bool Either logical filter expression contained in a str, a dict of expressions specifying the filter string to use for each analyte or a boolean. Passed to `grab_filt`. scale : str If 'log', plots the data on a log scale. figsize : array_like Array of length 2 specifying figure [width, height] in inches. stats : bool Whether or not to overlay the mean and standard deviations for each trace. stat, err: str The names of the statistic and error components to plot. Deafaults to 'nanmean' and 'nanstd'. Returns ------- None """ if focus is None: focus = self.focus_stage if outdir is None: outdir = self.report_dir + '/' + focus if not os.path.isdir(outdir): os.mkdir(outdir) # if samples is not None: # subset = self.make_subset(samples) if subset is not None: samples = self._get_samples(subset) elif samples is None: samples = self.subsets['All_Analyses'] elif isinstance(samples, str): samples = [samples] with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog: for s in samples: f, a = self.data[s].tplot(analytes=analytes, figsize=figsize, scale=scale, filt=filt, ranges=ranges, stats=stats, stat=stat, err=err, focus_stage=focus) # ax = fig.axes[0] # for l, u in s.sigrng: # ax.axvspan(l, u, color='r', alpha=0.1) # for l, u in s.bkgrng: # ax.axvspan(l, u, color='k', alpha=0.1) f.savefig(outdir + '/' + s + '_traces.pdf') # TODO: on older(?) computers raises # 'OSError: [Errno 24] Too many open files' plt.close(f) prog.update() return
[ "def", "trace_plots", "(", "self", ",", "analytes", "=", "None", ",", "samples", "=", "None", ",", "ranges", "=", "False", ",", "focus", "=", "None", ",", "outdir", "=", "None", ",", "filt", "=", "None", ",", "scale", "=", "'log'", ",", "figsize", ...
Plot analytes as a function of time. Parameters ---------- analytes : optional, array_like or str The analyte(s) to plot. Defaults to all analytes. samples: optional, array_like or str The sample(s) to plot. Defaults to all samples. ranges : bool Whether or not to show the signal/backgroudn regions identified by 'autorange'. focus : str The focus 'stage' of the analysis to plot. Can be 'rawdata', 'despiked':, 'signal', 'background', 'bkgsub', 'ratios' or 'calibrated'. outdir : str Path to a directory where you'd like the plots to be saved. Defaults to 'reports/[focus]' in your data directory. filt : str, dict or bool Either logical filter expression contained in a str, a dict of expressions specifying the filter string to use for each analyte or a boolean. Passed to `grab_filt`. scale : str If 'log', plots the data on a log scale. figsize : array_like Array of length 2 specifying figure [width, height] in inches. stats : bool Whether or not to overlay the mean and standard deviations for each trace. stat, err: str The names of the statistic and error components to plot. Deafaults to 'nanmean' and 'nanstd'. Returns ------- None
[ "Plot", "analytes", "as", "a", "function", "of", "time", "." ]
python
test
iotaledger/iota.lib.py
iota/api.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/api.py#L611-L708
def get_inputs( self, start=0, stop=None, threshold=None, security_level=None, ): # type: (int, Optional[int], Optional[int], Optional[int]) -> dict """ Gets all possible inputs of a seed and returns them, along with the total balance. This is either done deterministically (by generating all addresses until :py:meth:`find_transactions` returns an empty result), or by providing a key range to search. :param start: Starting key index. Defaults to 0. :param stop: Stop before this index. Note that this parameter behaves like the ``stop`` attribute in a :py:class:`slice` object; the stop index is *not* included in the result. If ``None`` (default), then this method will not stop until it finds an unused address. :param threshold: If set, determines the minimum threshold for a successful result: - As soon as this threshold is reached, iteration will stop. - If the command runs out of addresses before the threshold is reached, an exception is raised. .. note:: This method does not attempt to "optimize" the result (e.g., smallest number of inputs, get as close to ``threshold`` as possible, etc.); it simply accumulates inputs in order until the threshold is met. If ``threshold`` is 0, the first address in the key range with a non-zero balance will be returned (if it exists). If ``threshold`` is ``None`` (default), this method will return **all** inputs in the specified key range. :param security_level: Number of iterations to use when generating new addresses (see :py:meth:`get_new_addresses`). This value must be between 1 and 3, inclusive. If not set, defaults to :py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`. :return: Dict with the following structure:: { 'inputs': List[Address], Addresses with nonzero balances that can be used as inputs. 'totalBalance': int, Aggregate balance from all matching addresses. } Note that each Address in the result has its ``balance`` attribute set. Example: .. code-block:: python response = iota.get_inputs(...) input0 = response['inputs'][0] # type: Address input0.balance # 42 :raise: - :py:class:`iota.adapter.BadApiResponse` if ``threshold`` is not met. Not applicable if ``threshold`` is ``None``. References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getinputs """ return extended.GetInputsCommand(self.adapter)( seed=self.seed, start=start, stop=stop, threshold=threshold, securityLevel=security_level )
[ "def", "get_inputs", "(", "self", ",", "start", "=", "0", ",", "stop", "=", "None", ",", "threshold", "=", "None", ",", "security_level", "=", "None", ",", ")", ":", "# type: (int, Optional[int], Optional[int], Optional[int]) -> dict", "return", "extended", ".", ...
Gets all possible inputs of a seed and returns them, along with the total balance. This is either done deterministically (by generating all addresses until :py:meth:`find_transactions` returns an empty result), or by providing a key range to search. :param start: Starting key index. Defaults to 0. :param stop: Stop before this index. Note that this parameter behaves like the ``stop`` attribute in a :py:class:`slice` object; the stop index is *not* included in the result. If ``None`` (default), then this method will not stop until it finds an unused address. :param threshold: If set, determines the minimum threshold for a successful result: - As soon as this threshold is reached, iteration will stop. - If the command runs out of addresses before the threshold is reached, an exception is raised. .. note:: This method does not attempt to "optimize" the result (e.g., smallest number of inputs, get as close to ``threshold`` as possible, etc.); it simply accumulates inputs in order until the threshold is met. If ``threshold`` is 0, the first address in the key range with a non-zero balance will be returned (if it exists). If ``threshold`` is ``None`` (default), this method will return **all** inputs in the specified key range. :param security_level: Number of iterations to use when generating new addresses (see :py:meth:`get_new_addresses`). This value must be between 1 and 3, inclusive. If not set, defaults to :py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`. :return: Dict with the following structure:: { 'inputs': List[Address], Addresses with nonzero balances that can be used as inputs. 'totalBalance': int, Aggregate balance from all matching addresses. } Note that each Address in the result has its ``balance`` attribute set. Example: .. code-block:: python response = iota.get_inputs(...) input0 = response['inputs'][0] # type: Address input0.balance # 42 :raise: - :py:class:`iota.adapter.BadApiResponse` if ``threshold`` is not met. Not applicable if ``threshold`` is ``None``. References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getinputs
[ "Gets", "all", "possible", "inputs", "of", "a", "seed", "and", "returns", "them", "along", "with", "the", "total", "balance", "." ]
python
test
rbarrois/restricted_pkg
restricted_pkg/base.py
https://github.com/rbarrois/restricted_pkg/blob/abbd3cb33ed85af02fbb531fd85dda9c1b070c85/restricted_pkg/base.py#L153-L160
def get_clean_url(self): """Retrieve the clean, full URL - including username/password.""" if self.needs_auth: self.prompt_auth() url = RepositoryURL(self.url.full_url) url.username = self.username url.password = self.password return url
[ "def", "get_clean_url", "(", "self", ")", ":", "if", "self", ".", "needs_auth", ":", "self", ".", "prompt_auth", "(", ")", "url", "=", "RepositoryURL", "(", "self", ".", "url", ".", "full_url", ")", "url", ".", "username", "=", "self", ".", "username",...
Retrieve the clean, full URL - including username/password.
[ "Retrieve", "the", "clean", "full", "URL", "-", "including", "username", "/", "password", "." ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L3631-L3709
def vm_netstats(vm_=None, **kwargs): ''' Return combined network counters used by the vms on this hyper in a list of dicts: :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: python [ 'your-vm': { 'rx_bytes' : 0, 'rx_packets' : 0, 'rx_errs' : 0, 'rx_drop' : 0, 'tx_bytes' : 0, 'tx_packets' : 0, 'tx_errs' : 0, 'tx_drop' : 0 }, ... ] If you pass a VM name in as an argument then it will return info for just the named VM, otherwise it will return all VMs. CLI Example: .. code-block:: bash salt '*' virt.vm_netstats ''' def _info(dom): ''' Compute network stats of a domain ''' nics = _get_nics(dom) ret = { 'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0, 'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0 } for attrs in six.itervalues(nics): if 'target' in attrs: dev = attrs['target'] stats = dom.interfaceStats(dev) ret['rx_bytes'] += stats[0] ret['rx_packets'] += stats[1] ret['rx_errs'] += stats[2] ret['rx_drop'] += stats[3] ret['tx_bytes'] += stats[4] ret['tx_packets'] += stats[5] ret['tx_errs'] += stats[6] ret['tx_drop'] += stats[7] return ret info = {} conn = __get_conn(**kwargs) if vm_: info[vm_] = _info(_get_domain(conn, vm_)) else: for domain in _get_domain(conn, iterable=True): info[domain.name()] = _info(domain) conn.close() return info
[ "def", "vm_netstats", "(", "vm_", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "_info", "(", "dom", ")", ":", "'''\n Compute network stats of a domain\n '''", "nics", "=", "_get_nics", "(", "dom", ")", "ret", "=", "{", "'rx_bytes'", "...
Return combined network counters used by the vms on this hyper in a list of dicts: :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: python [ 'your-vm': { 'rx_bytes' : 0, 'rx_packets' : 0, 'rx_errs' : 0, 'rx_drop' : 0, 'tx_bytes' : 0, 'tx_packets' : 0, 'tx_errs' : 0, 'tx_drop' : 0 }, ... ] If you pass a VM name in as an argument then it will return info for just the named VM, otherwise it will return all VMs. CLI Example: .. code-block:: bash salt '*' virt.vm_netstats
[ "Return", "combined", "network", "counters", "used", "by", "the", "vms", "on", "this", "hyper", "in", "a", "list", "of", "dicts", ":" ]
python
train
etingof/pyasn1
pyasn1/type/univ.py
https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/univ.py#L581-L603
def fromHexString(cls, value, internalFormat=False, prepend=None): """Create a |ASN.1| object initialized from the hex string. Parameters ---------- value: :class:`str` Text string like 'DEADBEEF' """ try: value = SizedInteger(value, 16).setBitLength(len(value) * 4) except ValueError: raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1])) if prepend is not None: value = SizedInteger( (SizedInteger(prepend) << len(value)) | value ).setBitLength(len(prepend) + len(value)) if not internalFormat: value = cls(value) return value
[ "def", "fromHexString", "(", "cls", ",", "value", ",", "internalFormat", "=", "False", ",", "prepend", "=", "None", ")", ":", "try", ":", "value", "=", "SizedInteger", "(", "value", ",", "16", ")", ".", "setBitLength", "(", "len", "(", "value", ")", ...
Create a |ASN.1| object initialized from the hex string. Parameters ---------- value: :class:`str` Text string like 'DEADBEEF'
[ "Create", "a", "|ASN", ".", "1|", "object", "initialized", "from", "the", "hex", "string", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L3842-L3864
def remove_users_from_account_group(self, account_id, group_id, **kwargs): # noqa: E501 """Remove users from a group. # noqa: E501 An endpoint for removing users from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_users_from_account_group(account_id, group_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str group_id: (required) :param SubjectList body: :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_users_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501 else: (data) = self.remove_users_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501 return data
[ "def", "remove_users_from_account_group", "(", "self", ",", "account_id", ",", "group_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", "...
Remove users from a group. # noqa: E501 An endpoint for removing users from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_users_from_account_group(account_id, group_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str group_id: (required) :param SubjectList body: :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
[ "Remove", "users", "from", "a", "group", ".", "#", "noqa", ":", "E501" ]
python
train
idlesign/django-sitetree
sitetree/sitetreeapp.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/sitetreeapp.py#L72-L83
def get_sitetree(): """Returns SiteTree (thread-singleton) object, implementing utility methods. :rtype: SiteTree """ sitetree = getattr(_THREAD_LOCAL, _THREAD_SITETREE, None) if sitetree is None: sitetree = SiteTree() setattr(_THREAD_LOCAL, _THREAD_SITETREE, sitetree) return sitetree
[ "def", "get_sitetree", "(", ")", ":", "sitetree", "=", "getattr", "(", "_THREAD_LOCAL", ",", "_THREAD_SITETREE", ",", "None", ")", "if", "sitetree", "is", "None", ":", "sitetree", "=", "SiteTree", "(", ")", "setattr", "(", "_THREAD_LOCAL", ",", "_THREAD_SITE...
Returns SiteTree (thread-singleton) object, implementing utility methods. :rtype: SiteTree
[ "Returns", "SiteTree", "(", "thread", "-", "singleton", ")", "object", "implementing", "utility", "methods", "." ]
python
test
BlackEarth/bxml
bxml/xlsx.py
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xlsx.py#L50-L57
def sheets(self): """return the sheets of data.""" data = Dict() for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml return data
[ "def", "sheets", "(", "self", ")", ":", "data", "=", "Dict", "(", ")", "for", "src", "in", "[", "src", "for", "src", "in", "self", ".", "zipfile", ".", "namelist", "(", ")", "if", "'xl/worksheets/'", "in", "src", "]", ":", "name", "=", "os", ".",...
return the sheets of data.
[ "return", "the", "sheets", "of", "data", "." ]
python
train
saltstack/salt
salt/states/influxdb08_user.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/influxdb08_user.py#L25-L90
def present(name, passwd, database=None, user=None, password=None, host=None, port=None): ''' Ensure that the cluster admin or database user is present. name The name of the user to manage passwd The password of the user database The database to create the user in user The user to connect as (must be able to create the user) password The password of the user host The host to connect to port The port to connect to ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # check if db does not exist if database and not __salt__['influxdb08.db_exists']( database, user, password, host, port): ret['result'] = False ret['comment'] = 'Database {0} does not exist'.format(database) return ret # check if user exists if not __salt__['influxdb08.user_exists']( name, database, user, password, host, port): if __opts__['test']: ret['result'] = None ret['comment'] = 'User {0} is not present and needs to be created'\ .format(name) return ret # The user is not present, make it! if __salt__['influxdb08.user_create']( name, passwd, database, user, password, host, port): ret['comment'] = 'User {0} has been created'.format(name) ret['changes'][name] = 'Present' return ret else: ret['comment'] = 'Failed to create user {0}'.format(name) ret['result'] = False return ret # fallback ret['comment'] = 'User {0} is already present'.format(name) return ret
[ "def", "present", "(", "name", ",", "passwd", ",", "database", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'chang...
Ensure that the cluster admin or database user is present. name The name of the user to manage passwd The password of the user database The database to create the user in user The user to connect as (must be able to create the user) password The password of the user host The host to connect to port The port to connect to
[ "Ensure", "that", "the", "cluster", "admin", "or", "database", "user", "is", "present", "." ]
python
train
saltstack/salt
salt/modules/localemod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/localemod.py#L65-L97
def _localectl_status(): ''' Parse localectl status into a dict. :return: dict ''' if salt.utils.path.which('localectl') is None: raise CommandExecutionError('Unable to find "localectl"') ret = {} locale_ctl_out = (__salt__['cmd.run']('localectl status') or '').strip() ctl_key = None for line in locale_ctl_out.splitlines(): if ': ' in line: # Keys are separate with ":" and a space (!). ctl_key, ctl_data = line.split(': ') ctl_key = ctl_key.strip().lower().replace(' ', '_') else: ctl_data = line.strip() if not ctl_data: continue if ctl_key: if '=' in ctl_data: loc_set = ctl_data.split('=') if len(loc_set) == 2: if ctl_key not in ret: ret[ctl_key] = {} ret[ctl_key][loc_set[0]] = loc_set[1] else: ret[ctl_key] = {'data': None if ctl_data == 'n/a' else ctl_data} if not ret: log.debug("Unable to find any locale information inside the following data:\n%s", locale_ctl_out) raise CommandExecutionError('Unable to parse result of "localectl"') return ret
[ "def", "_localectl_status", "(", ")", ":", "if", "salt", ".", "utils", ".", "path", ".", "which", "(", "'localectl'", ")", "is", "None", ":", "raise", "CommandExecutionError", "(", "'Unable to find \"localectl\"'", ")", "ret", "=", "{", "}", "locale_ctl_out", ...
Parse localectl status into a dict. :return: dict
[ "Parse", "localectl", "status", "into", "a", "dict", ".", ":", "return", ":", "dict" ]
python
train
Azure/azure-event-hubs-python
azure/eventprocessorhost/partition_manager.py
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/partition_manager.py#L63-L69
async def stop_async(self): """ Terminiates the partition manger. """ self.cancellation_token.cancel() if self.run_task and not self.run_task.done(): await self.run_task
[ "async", "def", "stop_async", "(", "self", ")", ":", "self", ".", "cancellation_token", ".", "cancel", "(", ")", "if", "self", ".", "run_task", "and", "not", "self", ".", "run_task", ".", "done", "(", ")", ":", "await", "self", ".", "run_task" ]
Terminiates the partition manger.
[ "Terminiates", "the", "partition", "manger", "." ]
python
train
urinieto/msaf
msaf/algorithms/sf/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/sf/segmenter.py#L116-L209
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Structural Features params Mp = self.config["Mp_adaptive"] # Size of the adaptive threshold for # peak picking od = self.config["offset_thres"] # Offset coefficient for adaptive # thresholding M = self.config["M_gaussian"] # Size of gaussian kernel in beats m = self.config["m_embedded"] # Number of embedded dimensions k = self.config["k_nearest"] # k*N-nearest neighbors for the # recurrence plot # Preprocess to obtain features, times, and input boundary indeces F = self._preprocess() # Normalize F = U.normalize(F, norm_type=self.config["bound_norm_feats"]) # Check size in case the track is too short if F.shape[0] > 20: if self.framesync: red = 0.1 F_copy = np.copy(F) F = librosa.util.utils.sync( F.T, np.linspace(0, F.shape[0], num=F.shape[0] * red), pad=False).T # Emedding the feature space (i.e. shingle) E = embedded_space(F, m) # plt.imshow(E.T, interpolation="nearest", aspect="auto"); plt.show() # Recurrence matrix R = librosa.segment.recurrence_matrix( E.T, k=k * int(F.shape[0]), width=1, # zeros from the diagonal metric="euclidean", sym=True).astype(np.float32) # Circular shift L = circular_shift(R) #plt.imshow(L, interpolation="nearest", cmap=plt.get_cmap("binary")) #plt.show() # Obtain structural features by filtering the lag matrix SF = gaussian_filter(L.T, M=M, axis=1) SF = gaussian_filter(L.T, M=1, axis=0) # plt.imshow(SF.T, interpolation="nearest", aspect="auto") #plt.show() # Compute the novelty curve nc = compute_nc(SF) # Find peaks in the novelty curve est_bounds = pick_peaks(nc, L=Mp, offset_denom=od) # Re-align embedded space est_bounds = np.asarray(est_bounds) + int(np.ceil(m / 2.)) if self.framesync: est_bounds /= red F = F_copy else: est_bounds = [] # Add first and last frames est_idxs = np.concatenate(([0], est_bounds, [F.shape[0] - 1])) est_idxs = np.unique(est_idxs) assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # Empty labels est_labels = np.ones(len(est_idxs) - 1) * - 1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) # plt.figure(1) # plt.plot(nc); # [plt.axvline(p, color="m", ymin=.6) for p in est_bounds] # [plt.axvline(b, color="b", ymax=.6, ymin=.3) for b in brian_bounds] # [plt.axvline(b, color="g", ymax=.3) for b in ann_bounds] # plt.show() return est_idxs, est_labels
[ "def", "processFlat", "(", "self", ")", ":", "# Structural Features params", "Mp", "=", "self", ".", "config", "[", "\"Mp_adaptive\"", "]", "# Size of the adaptive threshold for", "# peak picking", "od", "=", "self", ".", "config", "[", "\"offset_thres\"", "]", "# O...
Main process. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments.
[ "Main", "process", ".", "Returns", "-------", "est_idxs", ":", "np", ".", "array", "(", "N", ")", "Estimated", "times", "for", "the", "segment", "boundaries", "in", "frame", "indeces", ".", "est_labels", ":", "np", ".", "array", "(", "N", "-", "1", ")"...
python
test
bids-standard/pybids
bids/variables/variables.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L315-L352
def to_dense(self, sampling_rate): ''' Convert the current sparse column to a dense representation. Returns: A DenseRunVariable. Args: sampling_rate (int, str): Sampling rate (in Hz) to use when constructing the DenseRunVariable. Returns: A DenseRunVariable. ''' duration = int(math.ceil(sampling_rate * self.get_duration())) ts = np.zeros(duration, dtype=self.values.dtype) onsets = np.round(self.onset * sampling_rate).astype(int) durations = np.round(self.duration * sampling_rate).astype(int) run_i, start, last_ind = 0, 0, 0 for i, val in enumerate(self.values.values): if onsets[i] < last_ind: start += self.run_info[run_i].duration * sampling_rate run_i += 1 _onset = int(start + onsets[i]) _offset = int(_onset + durations[i]) if _onset >= duration: warnings.warn("The onset time of a variable seems to exceed the runs" "duration, hence runs are incremented by one internally.") ts[_onset:_offset] = val last_ind = onsets[i] run_info = list(self.run_info) return DenseRunVariable( name=self.name, values=ts, run_info=run_info, source=self.source, sampling_rate=sampling_rate)
[ "def", "to_dense", "(", "self", ",", "sampling_rate", ")", ":", "duration", "=", "int", "(", "math", ".", "ceil", "(", "sampling_rate", "*", "self", ".", "get_duration", "(", ")", ")", ")", "ts", "=", "np", ".", "zeros", "(", "duration", ",", "dtype"...
Convert the current sparse column to a dense representation. Returns: A DenseRunVariable. Args: sampling_rate (int, str): Sampling rate (in Hz) to use when constructing the DenseRunVariable. Returns: A DenseRunVariable.
[ "Convert", "the", "current", "sparse", "column", "to", "a", "dense", "representation", ".", "Returns", ":", "A", "DenseRunVariable", "." ]
python
train
ray-project/ray
python/ray/experimental/tf_utils.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/tf_utils.py#L139-L157
def set_flat(self, new_weights): """Sets the weights to new_weights, converting from a flat array. Note: You can only set all weights in the network using this function, i.e., the length of the array must match get_flat_size. Args: new_weights (np.ndarray): Flat array containing weights. """ self._check_sess() shapes = [v.get_shape().as_list() for v in self.variables.values()] arrays = unflatten(new_weights, shapes) placeholders = [ self.placeholders[k] for k, v in self.variables.items() ] self.sess.run( list(self.assignment_nodes.values()), feed_dict=dict(zip(placeholders, arrays)))
[ "def", "set_flat", "(", "self", ",", "new_weights", ")", ":", "self", ".", "_check_sess", "(", ")", "shapes", "=", "[", "v", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "for", "v", "in", "self", ".", "variables", ".", "values", "(", ")", ...
Sets the weights to new_weights, converting from a flat array. Note: You can only set all weights in the network using this function, i.e., the length of the array must match get_flat_size. Args: new_weights (np.ndarray): Flat array containing weights.
[ "Sets", "the", "weights", "to", "new_weights", "converting", "from", "a", "flat", "array", "." ]
python
train
idlesign/django-sitetree
sitetree/sitetreeapp.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/sitetreeapp.py#L1024-L1053
def filter_items(self, items, navigation_type=None): """Filters sitetree item's children if hidden and by navigation type. NB: We do not apply any filters to sitetree in admin app. :param list items: :param str|unicode navigation_type: sitetree, breadcrumbs, menu :rtype: list """ if self.current_app_is_admin(): return items items_filtered = [] context = self.current_page_context check_access = self.check_access for item in items: if item.hidden: continue if not check_access(item, context): continue if not getattr(item, 'in%s' % navigation_type, True): # Hidden for current nav type continue items_filtered.append(item) return items_filtered
[ "def", "filter_items", "(", "self", ",", "items", ",", "navigation_type", "=", "None", ")", ":", "if", "self", ".", "current_app_is_admin", "(", ")", ":", "return", "items", "items_filtered", "=", "[", "]", "context", "=", "self", ".", "current_page_context"...
Filters sitetree item's children if hidden and by navigation type. NB: We do not apply any filters to sitetree in admin app. :param list items: :param str|unicode navigation_type: sitetree, breadcrumbs, menu :rtype: list
[ "Filters", "sitetree", "item", "s", "children", "if", "hidden", "and", "by", "navigation", "type", "." ]
python
test
Yelp/pyramid_zipkin
pyramid_zipkin/tween.py
https://github.com/Yelp/pyramid_zipkin/blob/ed8581b4466e9ce93d6cf3ecfbdde5369932a80b/pyramid_zipkin/tween.py#L143-L196
def zipkin_tween(handler, registry): """ Factory for pyramid tween to handle zipkin server logging. Note that even if the request isn't sampled, Zipkin attributes are generated and pushed into threadlocal storage, so `create_http_headers_for_new_span` and `zipkin_span` will have access to the proper Zipkin state. Consumes custom create_zipkin_attr function if one is set in the pyramid registry. :param handler: pyramid request handler :param registry: pyramid app registry :returns: pyramid tween """ def tween(request): zipkin_settings = _get_settings_from_request(request) tracer = get_default_tracer() tween_kwargs = dict( service_name=zipkin_settings.service_name, span_name=zipkin_settings.span_name, zipkin_attrs=zipkin_settings.zipkin_attrs, transport_handler=zipkin_settings.transport_handler, host=zipkin_settings.host, port=zipkin_settings.port, add_logging_annotation=zipkin_settings.add_logging_annotation, report_root_timestamp=zipkin_settings.report_root_timestamp, context_stack=zipkin_settings.context_stack, max_span_batch_size=zipkin_settings.max_span_batch_size, encoding=zipkin_settings.encoding, kind=Kind.SERVER, ) if zipkin_settings.firehose_handler is not None: tween_kwargs['firehose_handler'] = zipkin_settings.firehose_handler with tracer.zipkin_span(**tween_kwargs) as zipkin_context: response = handler(request) if zipkin_settings.use_pattern_as_span_name and request.matched_route: zipkin_context.override_span_name('{} {}'.format( request.method, request.matched_route.pattern, )) zipkin_context.update_binary_annotations( get_binary_annotations(request, response), ) if zipkin_settings.post_handler_hook: zipkin_settings.post_handler_hook(request, response) return response return tween
[ "def", "zipkin_tween", "(", "handler", ",", "registry", ")", ":", "def", "tween", "(", "request", ")", ":", "zipkin_settings", "=", "_get_settings_from_request", "(", "request", ")", "tracer", "=", "get_default_tracer", "(", ")", "tween_kwargs", "=", "dict", "...
Factory for pyramid tween to handle zipkin server logging. Note that even if the request isn't sampled, Zipkin attributes are generated and pushed into threadlocal storage, so `create_http_headers_for_new_span` and `zipkin_span` will have access to the proper Zipkin state. Consumes custom create_zipkin_attr function if one is set in the pyramid registry. :param handler: pyramid request handler :param registry: pyramid app registry :returns: pyramid tween
[ "Factory", "for", "pyramid", "tween", "to", "handle", "zipkin", "server", "logging", ".", "Note", "that", "even", "if", "the", "request", "isn", "t", "sampled", "Zipkin", "attributes", "are", "generated", "and", "pushed", "into", "threadlocal", "storage", "so"...
python
train
KelSolaar/Umbra
umbra/ui/widgets/variable_QPushButton.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/variable_QPushButton.py#L175-L188
def labels(self, value): """ Setter for **self.__labels** attribute. :param value: Attribute value. :type value: tuple """ if value is not None: assert type(value) is tuple, "'{0}' attribute: '{1}' type is not 'tuple'!".format("labels", value) assert len(value) == 2, "'{0}' attribute: '{1}' length should be '2'!".format("labels", value) for index in range(len(value)): assert type(value[index]) is unicode, \ "'{0}' attribute element '{1}': '{2}' type is not 'unicode'!".format("labels", index, value) self.__labels = value
[ "def", "labels", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "tuple", ",", "\"'{0}' attribute: '{1}' type is not 'tuple'!\"", ".", "format", "(", "\"labels\"", ",", "value", ")", ...
Setter for **self.__labels** attribute. :param value: Attribute value. :type value: tuple
[ "Setter", "for", "**", "self", ".", "__labels", "**", "attribute", "." ]
python
train
gregmuellegger/django-superform
django_superform/fields.py
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L87-L96
def get_kwargs(self, form, name): """ Return the keyword arguments that are used to instantiate the formset. """ kwargs = { 'prefix': self.get_prefix(form, name), 'initial': self.get_initial(form, name), } kwargs.update(self.default_kwargs) return kwargs
[ "def", "get_kwargs", "(", "self", ",", "form", ",", "name", ")", ":", "kwargs", "=", "{", "'prefix'", ":", "self", ".", "get_prefix", "(", "form", ",", "name", ")", ",", "'initial'", ":", "self", ".", "get_initial", "(", "form", ",", "name", ")", "...
Return the keyword arguments that are used to instantiate the formset.
[ "Return", "the", "keyword", "arguments", "that", "are", "used", "to", "instantiate", "the", "formset", "." ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/namespace_range.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L457-L460
def get_namespace_keys(app, limit): """Get namespace keys.""" ns_query = datastore.Query('__namespace__', keys_only=True, _app=app) return list(ns_query.Run(limit=limit, batch_size=limit))
[ "def", "get_namespace_keys", "(", "app", ",", "limit", ")", ":", "ns_query", "=", "datastore", ".", "Query", "(", "'__namespace__'", ",", "keys_only", "=", "True", ",", "_app", "=", "app", ")", "return", "list", "(", "ns_query", ".", "Run", "(", "limit",...
Get namespace keys.
[ "Get", "namespace", "keys", "." ]
python
train
twisted/mantissa
xmantissa/liveform.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L481-L488
def _coerceSingleRepetition(self, dataSet): """ Make a new liveform with our parameters, and get it to coerce our data for us. """ # make a liveform because there is some logic in _coerced form = LiveForm(lambda **k: None, self.parameters, self.name) return form.fromInputs(dataSet)
[ "def", "_coerceSingleRepetition", "(", "self", ",", "dataSet", ")", ":", "# make a liveform because there is some logic in _coerced", "form", "=", "LiveForm", "(", "lambda", "*", "*", "k", ":", "None", ",", "self", ".", "parameters", ",", "self", ".", "name", ")...
Make a new liveform with our parameters, and get it to coerce our data for us.
[ "Make", "a", "new", "liveform", "with", "our", "parameters", "and", "get", "it", "to", "coerce", "our", "data", "for", "us", "." ]
python
train
wavefrontHQ/python-client
wavefront_api_client/models/response_status.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/models/response_status.py#L117-L133
def result(self, result): """Sets the result of this ResponseStatus. :param result: The result of this ResponseStatus. # noqa: E501 :type: str """ if result is None: raise ValueError("Invalid value for `result`, must not be `None`") # noqa: E501 allowed_values = ["OK", "ERROR"] # noqa: E501 if result not in allowed_values: raise ValueError( "Invalid value for `result` ({0}), must be one of {1}" # noqa: E501 .format(result, allowed_values) ) self._result = result
[ "def", "result", "(", "self", ",", "result", ")", ":", "if", "result", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `result`, must not be `None`\"", ")", "# noqa: E501", "allowed_values", "=", "[", "\"OK\"", ",", "\"ERROR\"", "]", "# noqa: E...
Sets the result of this ResponseStatus. :param result: The result of this ResponseStatus. # noqa: E501 :type: str
[ "Sets", "the", "result", "of", "this", "ResponseStatus", "." ]
python
train
ray-project/ray
python/ray/node.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/node.py#L289-L306
def _prepare_socket_file(self, socket_path, default_prefix): """Prepare the socket file for raylet and plasma. This method helps to prepare a socket file. 1. Make the directory if the directory does not exist. 2. If the socket file exists, raise exception. Args: socket_path (string): the socket file to prepare. """ if socket_path is not None: if os.path.exists(socket_path): raise Exception("Socket file {} exists!".format(socket_path)) socket_dir = os.path.dirname(socket_path) try_to_create_directory(socket_dir) return socket_path return self._make_inc_temp( prefix=default_prefix, directory_name=self._sockets_dir)
[ "def", "_prepare_socket_file", "(", "self", ",", "socket_path", ",", "default_prefix", ")", ":", "if", "socket_path", "is", "not", "None", ":", "if", "os", ".", "path", ".", "exists", "(", "socket_path", ")", ":", "raise", "Exception", "(", "\"Socket file {}...
Prepare the socket file for raylet and plasma. This method helps to prepare a socket file. 1. Make the directory if the directory does not exist. 2. If the socket file exists, raise exception. Args: socket_path (string): the socket file to prepare.
[ "Prepare", "the", "socket", "file", "for", "raylet", "and", "plasma", "." ]
python
train
horazont/aioxmpp
aioxmpp/xml.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/xml.py#L1085-L1095
def make_parser(): """ Create a parser which is suitably configured for parsing an XMPP XML stream. It comes equipped with :class:`XMPPLexicalHandler`. """ p = xml.sax.make_parser() p.setFeature(xml.sax.handler.feature_namespaces, True) p.setFeature(xml.sax.handler.feature_external_ges, False) p.setProperty(xml.sax.handler.property_lexical_handler, XMPPLexicalHandler) return p
[ "def", "make_parser", "(", ")", ":", "p", "=", "xml", ".", "sax", ".", "make_parser", "(", ")", "p", ".", "setFeature", "(", "xml", ".", "sax", ".", "handler", ".", "feature_namespaces", ",", "True", ")", "p", ".", "setFeature", "(", "xml", ".", "s...
Create a parser which is suitably configured for parsing an XMPP XML stream. It comes equipped with :class:`XMPPLexicalHandler`.
[ "Create", "a", "parser", "which", "is", "suitably", "configured", "for", "parsing", "an", "XMPP", "XML", "stream", ".", "It", "comes", "equipped", "with", ":", "class", ":", "XMPPLexicalHandler", "." ]
python
train
iotile/coretools
transport_plugins/websocket/iotile_transport_websocket/generic/async_client.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/websocket/iotile_transport_websocket/generic/async_client.py#L192-L230
def register_event(self, name, callback, validator): """Register a callback to receive events. Every event with the matching name will have its payload validated using validator and then will be passed to callback if validation succeeds. Callback must be a normal callback function, coroutines are not allowed. If you need to run a coroutine you are free to schedule it from your callback. Args: name (str): The name of the event that we are listening for callback (callable): The function that should be called when a message that matches validator is received. validator (Verifier): A schema verifier that will validate a received message uniquely """ async def _validate_and_call(message): payload = message.get('payload') try: payload = validator.verify(payload) except ValidationError: self._logger.warning("Dropping invalid payload for event %s, payload=%s", name, payload) return try: result = callback(payload) if inspect.isawaitable(result): await result except: # pylint:disable=bare-except;This is a background logging routine self._logger.error("Error calling callback for event %s, payload=%s", name, payload, exc_info=True) self._manager.every_match(_validate_and_call, type="event", name=name)
[ "def", "register_event", "(", "self", ",", "name", ",", "callback", ",", "validator", ")", ":", "async", "def", "_validate_and_call", "(", "message", ")", ":", "payload", "=", "message", ".", "get", "(", "'payload'", ")", "try", ":", "payload", "=", "val...
Register a callback to receive events. Every event with the matching name will have its payload validated using validator and then will be passed to callback if validation succeeds. Callback must be a normal callback function, coroutines are not allowed. If you need to run a coroutine you are free to schedule it from your callback. Args: name (str): The name of the event that we are listening for callback (callable): The function that should be called when a message that matches validator is received. validator (Verifier): A schema verifier that will validate a received message uniquely
[ "Register", "a", "callback", "to", "receive", "events", "." ]
python
train
jaywink/federation
federation/entities/activitypub/mappers.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/entities/activitypub/mappers.py#L15-L32
def element_to_objects(payload: Dict) -> List: """ Transform an Element to a list of entities recursively. """ entities = [] cls = MAPPINGS.get(payload.get('type')) if not cls: return [] transformed = transform_attributes(payload, cls) entity = cls(**transformed) if hasattr(entity, "post_receive"): entity.post_receive() entities.append(entity) return entities
[ "def", "element_to_objects", "(", "payload", ":", "Dict", ")", "->", "List", ":", "entities", "=", "[", "]", "cls", "=", "MAPPINGS", ".", "get", "(", "payload", ".", "get", "(", "'type'", ")", ")", "if", "not", "cls", ":", "return", "[", "]", "tran...
Transform an Element to a list of entities recursively.
[ "Transform", "an", "Element", "to", "a", "list", "of", "entities", "recursively", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L1930-L1974
def upload_read(infile, table): """ Reads a table from a MagIC upload (or downloaded) txt file, puts data in a list of dictionaries """ delim = 'tab' hold, magic_data, magic_record, magic_keys = [], [], {}, [] f = open(infile, "r") # # look for right table # line = f.readline()[:-1] file_type = line.split('\t')[1] if file_type == 'delimited': file_type = line.split('\t')[2] if delim == 'tab': line = f.readline()[:-1].split('\t') else: f.close() print("only tab delimitted files are supported now") return while file_type != table: while line[0][0:5] in f.readlines() != ">>>>>": pass line = f.readline()[:-1] file_type = line.split('\t')[1] if file_type == 'delimited': file_type = line.split('\t')[2] ine = f.readline()[:-1].split('\t') while line[0][0:5] in f.readlines() != ">>>>>": for key in line: magic_keys.append(key) for line in f.readlines(): rec = line[:-1].split('\t') hold.append(rec) for rec in hold: magic_record = {} if len(magic_keys) != len(rec): print("Uneven record lengths detected: ", rec) input("Return to continue.... ") for k in range(len(magic_keys)): magic_record[magic_keys[k]] = rec[k] magic_data.append(magic_record) f.close() return magic_data
[ "def", "upload_read", "(", "infile", ",", "table", ")", ":", "delim", "=", "'tab'", "hold", ",", "magic_data", ",", "magic_record", ",", "magic_keys", "=", "[", "]", ",", "[", "]", ",", "{", "}", ",", "[", "]", "f", "=", "open", "(", "infile", ",...
Reads a table from a MagIC upload (or downloaded) txt file, puts data in a list of dictionaries
[ "Reads", "a", "table", "from", "a", "MagIC", "upload", "(", "or", "downloaded", ")", "txt", "file", "puts", "data", "in", "a", "list", "of", "dictionaries" ]
python
train
arve0/leicaexperiment
leicaexperiment/experiment.py
https://github.com/arve0/leicaexperiment/blob/c0393c4d51984a506f813319efb66e54c4f2a426/leicaexperiment/experiment.py#L179-L197
def well_images(self, well_row, well_column): """Get list of paths to images in specified well. Parameters ---------- well_row : int Starts at 0. Same as --V in files. well_column : int Starts at 0. Save as --U in files. Returns ------- list of strings Paths to images or empty list if no images are found. """ return list(i for i in self.images if attribute(i, 'u') == well_column and attribute(i, 'v') == well_row)
[ "def", "well_images", "(", "self", ",", "well_row", ",", "well_column", ")", ":", "return", "list", "(", "i", "for", "i", "in", "self", ".", "images", "if", "attribute", "(", "i", ",", "'u'", ")", "==", "well_column", "and", "attribute", "(", "i", ",...
Get list of paths to images in specified well. Parameters ---------- well_row : int Starts at 0. Same as --V in files. well_column : int Starts at 0. Save as --U in files. Returns ------- list of strings Paths to images or empty list if no images are found.
[ "Get", "list", "of", "paths", "to", "images", "in", "specified", "well", "." ]
python
valid
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/device_directory/device_directory.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/device_directory/device_directory.py#L54-L77
def list_devices(self, **kwargs): """List devices in the device catalog. Example usage, listing all registered devices in the catalog: .. code-block:: python filters = { 'state': {'$eq': 'registered' } } devices = api.list_devices(order='asc', filters=filters) for idx, d in enumerate(devices): print(idx, d.id) :param int limit: The number of devices to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get devices after/starting at given `device_id` :param filters: Dictionary of filters to apply. :returns: a list of :py:class:`Device` objects registered in the catalog. :rtype: PaginatedResponse """ kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, Device, True) api = self._get_api(device_directory.DefaultApi) return PaginatedResponse(api.device_list, lwrap_type=Device, **kwargs)
[ "def", "list_devices", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_verify_sort_options", "(", "kwargs", ")", "kwargs", "=", "self", ".", "_verify_filters", "(", "kwargs", ",", "Device", ",", "True", ")", "api", "=", "s...
List devices in the device catalog. Example usage, listing all registered devices in the catalog: .. code-block:: python filters = { 'state': {'$eq': 'registered' } } devices = api.list_devices(order='asc', filters=filters) for idx, d in enumerate(devices): print(idx, d.id) :param int limit: The number of devices to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get devices after/starting at given `device_id` :param filters: Dictionary of filters to apply. :returns: a list of :py:class:`Device` objects registered in the catalog. :rtype: PaginatedResponse
[ "List", "devices", "in", "the", "device", "catalog", "." ]
python
train
saltstack/salt
salt/modules/pf.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pf.py#L235-L310
def table(command, table, **kwargs): ''' Apply a command on the specified table. table: Name of the table. command: Command to apply to the table. Supported commands are: - add - delete - expire - flush - kill - replace - show - test - zero Please refer to the OpenBSD `pfctl(8) <https://man.openbsd.org/pfctl#T>`_ documentation for a detailed explanation of each command. CLI example: .. code-block:: bash salt '*' pf.table expire table=spam_hosts number=300 salt '*' pf.table add table=local_hosts addresses='["127.0.0.1", "::1"]' ''' ret = {} all_commands = ['kill', 'flush', 'add', 'delete', 'expire', 'replace', 'show', 'test', 'zero'] if command not in all_commands: raise SaltInvocationError('Unknown table command: {0}'.format(command)) cmd = ['pfctl', '-t', table, '-T', command] if command in ['add', 'delete', 'replace', 'test']: cmd += kwargs.get('addresses', []) elif command == 'expire': number = kwargs.get('number', None) if not number: raise SaltInvocationError('need expire_number argument for expire command') else: cmd.append(number) result = __salt__['cmd.run_all'](cmd, output_level='trace', python_shell=False) if result['retcode'] == 0: if command == 'show': ret = {'comment': result['stdout'].split()} elif command == 'test': ret = {'comment': result['stderr'], 'matches': True} else: if re.match(r'^(0.*|no changes)', result['stderr']): ret['changes'] = False else: ret['changes'] = True ret['comment'] = result['stderr'] else: # 'test' returns a non-zero code if the address didn't match, even if # the command itself ran fine; also set 'matches' to False since not # everything matched. if command == 'test' and re.match(r'^\d+/\d+ addresses match.$', result['stderr']): ret = {'comment': result['stderr'], 'matches': False} else: raise CommandExecutionError( 'Could not apply {0} on table {1}'.format(command, table), info={'errors': [result['stderr']], 'changes': False} ) return ret
[ "def", "table", "(", "command", ",", "table", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "all_commands", "=", "[", "'kill'", ",", "'flush'", ",", "'add'", ",", "'delete'", ",", "'expire'", ",", "'replace'", ",", "'show'", ",", "'test'",...
Apply a command on the specified table. table: Name of the table. command: Command to apply to the table. Supported commands are: - add - delete - expire - flush - kill - replace - show - test - zero Please refer to the OpenBSD `pfctl(8) <https://man.openbsd.org/pfctl#T>`_ documentation for a detailed explanation of each command. CLI example: .. code-block:: bash salt '*' pf.table expire table=spam_hosts number=300 salt '*' pf.table add table=local_hosts addresses='["127.0.0.1", "::1"]'
[ "Apply", "a", "command", "on", "the", "specified", "table", "." ]
python
train
saltstack/salt
salt/modules/win_file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L1230-L1332
def mkdir(path, owner=None, grant_perms=None, deny_perms=None, inheritance=True, reset=False): ''' Ensure that the directory is available and permissions are set. Args: path (str): The full path to the directory. owner (str): The owner of the directory. If not passed, it will be the account that created the directory, likely SYSTEM grant_perms (dict): A dictionary containing the user/group and the basic permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also set the ``applies_to`` setting here. The default is ``this_folder_subfolders_files``. Specify another ``applies_to`` setting like this: .. code-block:: yaml {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} To set advanced permissions use a list for the ``perms`` parameter, ie: .. code-block:: yaml {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} deny_perms (dict): A dictionary containing the user/group and permissions to deny along with the ``applies_to`` setting. Use the same format used for the ``grant_perms`` parameter. Remember, deny permissions supersede grant permissions. inheritance (bool): If True the object will inherit permissions from the parent, if ``False``, inheritance will be disabled. Inheritance setting will not apply to parent directories if they must be created. reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. .. versionadded:: 2018.3.0 Returns: bool: True if successful Raises: CommandExecutionError: If unsuccessful CLI Example: .. code-block:: bash # To grant the 'Users' group 'read & execute' permissions. salt '*' file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute'}}" # Locally using salt call salt-call file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}" # Specify advanced attributes with a list salt '*' file.mkdir C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}" ''' # Make sure the drive is valid drive = os.path.splitdrive(path)[0] if not os.path.isdir(drive): raise CommandExecutionError('Drive {0} is not mapped'.format(drive)) path = os.path.expanduser(path) path = os.path.expandvars(path) if not os.path.isdir(path): try: # Make the directory os.mkdir(path) # Set owner if owner: salt.utils.win_dacl.set_owner(obj_name=path, principal=owner) # Set permissions set_perms( path=path, grant_perms=grant_perms, deny_perms=deny_perms, inheritance=inheritance, reset=reset) except WindowsError as exc: raise CommandExecutionError(exc) return True
[ "def", "mkdir", "(", "path", ",", "owner", "=", "None", ",", "grant_perms", "=", "None", ",", "deny_perms", "=", "None", ",", "inheritance", "=", "True", ",", "reset", "=", "False", ")", ":", "# Make sure the drive is valid", "drive", "=", "os", ".", "pa...
Ensure that the directory is available and permissions are set. Args: path (str): The full path to the directory. owner (str): The owner of the directory. If not passed, it will be the account that created the directory, likely SYSTEM grant_perms (dict): A dictionary containing the user/group and the basic permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also set the ``applies_to`` setting here. The default is ``this_folder_subfolders_files``. Specify another ``applies_to`` setting like this: .. code-block:: yaml {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} To set advanced permissions use a list for the ``perms`` parameter, ie: .. code-block:: yaml {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} deny_perms (dict): A dictionary containing the user/group and permissions to deny along with the ``applies_to`` setting. Use the same format used for the ``grant_perms`` parameter. Remember, deny permissions supersede grant permissions. inheritance (bool): If True the object will inherit permissions from the parent, if ``False``, inheritance will be disabled. Inheritance setting will not apply to parent directories if they must be created. reset (bool): If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. .. versionadded:: 2018.3.0 Returns: bool: True if successful Raises: CommandExecutionError: If unsuccessful CLI Example: .. code-block:: bash # To grant the 'Users' group 'read & execute' permissions. salt '*' file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute'}}" # Locally using salt call salt-call file.mkdir C:\\Temp\\ Administrators "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}" # Specify advanced attributes with a list salt '*' file.mkdir C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}"
[ "Ensure", "that", "the", "directory", "is", "available", "and", "permissions", "are", "set", "." ]
python
train
saltstack/salt
salt/modules/bigip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bigip.py#L162-L219
def _set_value(value): ''' A function to detect if user is trying to pass a dictionary or list. parse it and return a dictionary list or a string ''' #don't continue if already an acceptable data-type if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list): return value #check if json if value.startswith('j{') and value.endswith('}j'): value = value.replace('j{', '{') value = value.replace('}j', '}') try: return salt.utils.json.loads(value) except Exception: raise salt.exceptions.CommandExecutionError #detect list of dictionaries if '|' in value and r'\|' not in value: values = value.split('|') items = [] for value in values: items.append(_set_value(value)) return items #parse out dictionary if detected if ':' in value and r'\:' not in value: options = {} #split out pairs key_pairs = value.split(',') for key_pair in key_pairs: k = key_pair.split(':')[0] v = key_pair.split(':')[1] options[k] = v return options #try making a list elif ',' in value and r'\,' not in value: value_items = value.split(',') return value_items #just return a string else: #remove escape chars if added if r'\|' in value: value = value.replace(r'\|', '|') if r'\:' in value: value = value.replace(r'\:', ':') if r'\,' in value: value = value.replace(r'\,', ',') return value
[ "def", "_set_value", "(", "value", ")", ":", "#don't continue if already an acceptable data-type", "if", "isinstance", "(", "value", ",", "bool", ")", "or", "isinstance", "(", "value", ",", "dict", ")", "or", "isinstance", "(", "value", ",", "list", ")", ":", ...
A function to detect if user is trying to pass a dictionary or list. parse it and return a dictionary list or a string
[ "A", "function", "to", "detect", "if", "user", "is", "trying", "to", "pass", "a", "dictionary", "or", "list", ".", "parse", "it", "and", "return", "a", "dictionary", "list", "or", "a", "string" ]
python
train
szastupov/aiotg
aiotg/bot.py
https://github.com/szastupov/aiotg/blob/eed81a6a728c02120f1d730a6e8b8fe50263c010/aiotg/bot.py#L155-L196
def run(self, debug=False, reload=None): """ Convenience method for running bots in getUpdates mode :param bool debug: Enable debug logging and automatic reloading :param bool reload: Automatically reload bot on code change :Example: >>> if __name__ == '__main__': >>> bot.run() """ loop = asyncio.get_event_loop() logging.basicConfig(level=logging.DEBUG if debug else logging.INFO) if reload is None: reload = debug bot_loop = asyncio.ensure_future(self.loop()) try: if reload: loop.run_until_complete(run_with_reloader(loop, bot_loop, self.stop)) else: loop.run_until_complete(bot_loop) # User cancels except KeyboardInterrupt: logger.debug("User cancelled") bot_loop.cancel() self.stop() # Stop loop finally: if AIOHTTP_23: loop.run_until_complete(self.session.close()) logger.debug("Closing loop") loop.stop() loop.close()
[ "def", "run", "(", "self", ",", "debug", "=", "False", ",", "reload", "=", "None", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", "if", "debug", "else", "l...
Convenience method for running bots in getUpdates mode :param bool debug: Enable debug logging and automatic reloading :param bool reload: Automatically reload bot on code change :Example: >>> if __name__ == '__main__': >>> bot.run()
[ "Convenience", "method", "for", "running", "bots", "in", "getUpdates", "mode" ]
python
train
JamesRamm/longclaw
longclaw/contrib/productrequests/views.py
https://github.com/JamesRamm/longclaw/blob/8bbf2e6d703271b815ec111813c7c5d1d4e4e810/longclaw/contrib/productrequests/views.py#L11-L31
def requests_admin(request, pk): """Table display of each request for a given product. Allows the given Page pk to refer to a direct parent of the ProductVariant model or be the ProductVariant model itself. This allows for the standard longclaw product modelling philosophy where ProductVariant refers to the actual product (in the case where there is only 1 variant) or to be variants of the product page. """ page = Page.objects.get(pk=pk).specific if hasattr(page, 'variants'): requests = ProductRequest.objects.filter( variant__in=page.variants.all() ) else: requests = ProductRequest.objects.filter(variant=page) return render( request, "productrequests/requests_admin.html", {'page': page, 'requests': requests} )
[ "def", "requests_admin", "(", "request", ",", "pk", ")", ":", "page", "=", "Page", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", ".", "specific", "if", "hasattr", "(", "page", ",", "'variants'", ")", ":", "requests", "=", "ProductRequest", "...
Table display of each request for a given product. Allows the given Page pk to refer to a direct parent of the ProductVariant model or be the ProductVariant model itself. This allows for the standard longclaw product modelling philosophy where ProductVariant refers to the actual product (in the case where there is only 1 variant) or to be variants of the product page.
[ "Table", "display", "of", "each", "request", "for", "a", "given", "product", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/licensing/licensing_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/licensing/licensing_client.py#L176-L197
def get_account_entitlement_for_user(self, user_id, determine_rights=None, create_if_not_exists=None): """GetAccountEntitlementForUser. [Preview API] Get the entitlements for a user :param str user_id: The id of the user :param bool determine_rights: :param bool create_if_not_exists: :rtype: :class:`<AccountEntitlement> <azure.devops.v5_1.licensing.models.AccountEntitlement>` """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') query_parameters = {} if determine_rights is not None: query_parameters['determineRights'] = self._serialize.query('determine_rights', determine_rights, 'bool') if create_if_not_exists is not None: query_parameters['createIfNotExists'] = self._serialize.query('create_if_not_exists', create_if_not_exists, 'bool') response = self._send(http_method='GET', location_id='6490e566-b299-49a7-a4e4-28749752581f', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('AccountEntitlement', response)
[ "def", "get_account_entitlement_for_user", "(", "self", ",", "user_id", ",", "determine_rights", "=", "None", ",", "create_if_not_exists", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "user_id", "is", "not", "None", ":", "route_values", "[", "'u...
GetAccountEntitlementForUser. [Preview API] Get the entitlements for a user :param str user_id: The id of the user :param bool determine_rights: :param bool create_if_not_exists: :rtype: :class:`<AccountEntitlement> <azure.devops.v5_1.licensing.models.AccountEntitlement>`
[ "GetAccountEntitlementForUser", ".", "[", "Preview", "API", "]", "Get", "the", "entitlements", "for", "a", "user", ":", "param", "str", "user_id", ":", "The", "id", "of", "the", "user", ":", "param", "bool", "determine_rights", ":", ":", "param", "bool", "...
python
train
SHTOOLS/SHTOOLS
pyshtools/shclasses/slepian.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/slepian.py#L918-L943
def _to_array(self, alpha, normalization='4pi', csphase=1): """ Return the spherical harmonic coefficients of Slepian function i as an array, where i = 0 is the best concentrated function. """ if self.coeffs is None: coeffs = _np.copy(self._taper2coeffs(alpha)) else: if alpha > self.nrot - 1: raise ValueError('alpha must be less than or equal to ' + 'nrot - 1. alpha = {:d}, nrot = {:d}' .format(alpha, self.nrot)) coeffs = _shtools.SHVectorToCilm(self.coeffs[:, alpha]) if normalization == 'schmidt': for l in range(self.lmax + 1): coeffs[:, l, :l+1] *= _np.sqrt(2.0 * l + 1.0) elif normalization == 'ortho': coeffs *= _np.sqrt(4.0 * _np.pi) if csphase == -1: for m in range(self.lmax + 1): if m % 2 == 1: coeffs[:, :, m] = - coeffs[:, :, m] return coeffs
[ "def", "_to_array", "(", "self", ",", "alpha", ",", "normalization", "=", "'4pi'", ",", "csphase", "=", "1", ")", ":", "if", "self", ".", "coeffs", "is", "None", ":", "coeffs", "=", "_np", ".", "copy", "(", "self", ".", "_taper2coeffs", "(", "alpha",...
Return the spherical harmonic coefficients of Slepian function i as an array, where i = 0 is the best concentrated function.
[ "Return", "the", "spherical", "harmonic", "coefficients", "of", "Slepian", "function", "i", "as", "an", "array", "where", "i", "=", "0", "is", "the", "best", "concentrated", "function", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py#L189-L214
def _check_table(self): """Ensure that an incorrect table doesn't exist If a bad (old) table does exist, return False """ cursor = self._db.execute("PRAGMA table_info(%s)"%self.table) lines = cursor.fetchall() if not lines: # table does not exist return True types = {} keys = [] for line in lines: keys.append(line[1]) types[line[1]] = line[2] if self._keys != keys: # key mismatch self.log.warn('keys mismatch') return False for key in self._keys: if types[key] != self._types[key]: self.log.warn( 'type mismatch: %s: %s != %s'%(key,types[key],self._types[key]) ) return False return True
[ "def", "_check_table", "(", "self", ")", ":", "cursor", "=", "self", ".", "_db", ".", "execute", "(", "\"PRAGMA table_info(%s)\"", "%", "self", ".", "table", ")", "lines", "=", "cursor", ".", "fetchall", "(", ")", "if", "not", "lines", ":", "# table does...
Ensure that an incorrect table doesn't exist If a bad (old) table does exist, return False
[ "Ensure", "that", "an", "incorrect", "table", "doesn", "t", "exist" ]
python
test
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L1983-L1998
def connect_to_database_odbc_sqlserver(self, odbc_connection_string: str = None, dsn: str = None, database: str = None, user: str = None, password: str = None, server: str = "localhost", driver: str = "{SQL Server}", autocommit: bool = True) -> None: """Connects to an SQL Server database via ODBC.""" self.connect(engine=ENGINE_SQLSERVER, interface=INTERFACE_ODBC, odbc_connection_string=odbc_connection_string, dsn=dsn, database=database, user=user, password=password, host=server, driver=driver, autocommit=autocommit)
[ "def", "connect_to_database_odbc_sqlserver", "(", "self", ",", "odbc_connection_string", ":", "str", "=", "None", ",", "dsn", ":", "str", "=", "None", ",", "database", ":", "str", "=", "None", ",", "user", ":", "str", "=", "None", ",", "password", ":", "...
Connects to an SQL Server database via ODBC.
[ "Connects", "to", "an", "SQL", "Server", "database", "via", "ODBC", "." ]
python
train