repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
kobinpy/kobin
kobin/routes.py
https://github.com/kobinpy/kobin/blob/e6caff5af05db8a6e511d3de275d262466ab36a6/kobin/routes.py#L84-L102
def match_url_vars_type(url_vars, type_hints): """ Match types of url vars. >>> match_url_vars_type({'user_id': '1'}, {'user_id': int}) (True, {'user_id': 1}) >>> match_url_vars_type({'user_id': 'foo'}, {'user_id': int}) (False, {}) """ typed_url_vars = {} try: for k, v in url_vars.items(): arg_type = type_hints.get(k) if arg_type and arg_type != str: typed_url_vars[k] = arg_type(v) else: typed_url_vars[k] = v except ValueError: return False, {} return True, typed_url_vars
[ "def", "match_url_vars_type", "(", "url_vars", ",", "type_hints", ")", ":", "typed_url_vars", "=", "{", "}", "try", ":", "for", "k", ",", "v", "in", "url_vars", ".", "items", "(", ")", ":", "arg_type", "=", "type_hints", ".", "get", "(", "k", ")", "i...
Match types of url vars. >>> match_url_vars_type({'user_id': '1'}, {'user_id': int}) (True, {'user_id': 1}) >>> match_url_vars_type({'user_id': 'foo'}, {'user_id': int}) (False, {})
[ "Match", "types", "of", "url", "vars", "." ]
python
train
xtuml/pyxtuml
bridgepoint/ooaofooa.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/ooaofooa.py#L448-L490
def mk_component(bp_model, c_c=None, derived_attributes=False): ''' Create a pyxtuml meta model from a BridgePoint model. Optionally, restrict to classes and associations contained in the component c_c. ''' target = Domain() c_c_filt = lambda sel: c_c is None or is_contained_in(sel, c_c) for o_obj in bp_model.select_many('O_OBJ', c_c_filt): mk_class(target, o_obj, derived_attributes) for r_rel in bp_model.select_many('R_REL', c_c_filt): mk_association(target, r_rel) for s_sync in bp_model.select_many('S_SYNC', c_c_filt): fn = mk_function(target, s_sync) target.add_symbol(s_sync.Name, fn) for s_dt in bp_model.select_many('S_DT', c_c_filt): s_edt = one(s_dt).S_EDT[17]() if s_edt: enum = mk_enum(s_edt) target.add_symbol(s_dt.Name, enum) for cnst_csp in bp_model.select_many('CNST_CSP', c_c_filt): for cnst_syc in many(cnst_csp).CNST_SYC[1504](): value = mk_constant(cnst_syc) target.add_symbol(cnst_syc.Name, value) for ass in target.associations: ass.formalize() for s_ee in bp_model.select_many('S_EE', c_c_filt): if s_ee.Key_Lett in ['LOG', 'ARCH', 'TIM', 'NVS', 'PERSIST']: target.add_symbol(s_ee.Key_Lett, getattr(builtin_ee, s_ee.Key_Lett)) else: ee = mk_external_entity(target, s_ee) target.add_symbol(s_ee.Key_Lett, ee) return target
[ "def", "mk_component", "(", "bp_model", ",", "c_c", "=", "None", ",", "derived_attributes", "=", "False", ")", ":", "target", "=", "Domain", "(", ")", "c_c_filt", "=", "lambda", "sel", ":", "c_c", "is", "None", "or", "is_contained_in", "(", "sel", ",", ...
Create a pyxtuml meta model from a BridgePoint model. Optionally, restrict to classes and associations contained in the component c_c.
[ "Create", "a", "pyxtuml", "meta", "model", "from", "a", "BridgePoint", "model", ".", "Optionally", "restrict", "to", "classes", "and", "associations", "contained", "in", "the", "component", "c_c", "." ]
python
test
tisimst/pyDOE
pyDOE/doe_lhs.py
https://github.com/tisimst/pyDOE/blob/436143702507a5c8ff87b361223eee8171d6a1d7/pyDOE/doe_lhs.py#L21-L119
def lhs(n, samples=None, criterion=None, iterations=None): """ Generate a latin-hypercube design Parameters ---------- n : int The number of factors to generate samples for Optional -------- samples : int The number of samples to generate for each factor (Default: n) criterion : str Allowable values are "center" or "c", "maximin" or "m", "centermaximin" or "cm", and "correlation" or "corr". If no value given, the design is simply randomized. iterations : int The number of iterations in the maximin and correlations algorithms (Default: 5). Returns ------- H : 2d-array An n-by-samples design matrix that has been normalized so factor values are uniformly spaced between zero and one. Example ------- A 3-factor design (defaults to 3 samples):: >>> lhs(3) array([[ 0.40069325, 0.08118402, 0.69763298], [ 0.19524568, 0.41383587, 0.29947106], [ 0.85341601, 0.75460699, 0.360024 ]]) A 4-factor design with 6 samples:: >>> lhs(4, samples=6) array([[ 0.27226812, 0.02811327, 0.62792445, 0.91988196], [ 0.76945538, 0.43501682, 0.01107457, 0.09583358], [ 0.45702981, 0.76073773, 0.90245401, 0.18773015], [ 0.99342115, 0.85814198, 0.16996665, 0.65069309], [ 0.63092013, 0.22148567, 0.33616859, 0.36332478], [ 0.05276917, 0.5819198 , 0.67194243, 0.78703262]]) A 2-factor design with 5 centered samples:: >>> lhs(2, samples=5, criterion='center') array([[ 0.3, 0.5], [ 0.7, 0.9], [ 0.1, 0.3], [ 0.9, 0.1], [ 0.5, 0.7]]) A 3-factor design with 4 samples where the minimum distance between all samples has been maximized:: >>> lhs(3, samples=4, criterion='maximin') array([[ 0.02642564, 0.55576963, 0.50261649], [ 0.51606589, 0.88933259, 0.34040838], [ 0.98431735, 0.0380364 , 0.01621717], [ 0.40414671, 0.33339132, 0.84845707]]) A 4-factor design with 5 samples where the samples are as uncorrelated as possible (within 10 iterations):: >>> lhs(4, samples=5, criterion='correlate', iterations=10) """ H = None if samples is None: samples = n if criterion is not None: assert criterion.lower() in ('center', 'c', 'maximin', 'm', 'centermaximin', 'cm', 'correlation', 'corr'), 'Invalid value for "criterion": {}'.format(criterion) else: H = _lhsclassic(n, samples) if criterion is None: criterion = 'center' if iterations is None: iterations = 5 if H is None: if criterion.lower() in ('center', 'c'): H = _lhscentered(n, samples) elif criterion.lower() in ('maximin', 'm'): H = _lhsmaximin(n, samples, iterations, 'maximin') elif criterion.lower() in ('centermaximin', 'cm'): H = _lhsmaximin(n, samples, iterations, 'centermaximin') elif criterion.lower() in ('correlate', 'corr'): H = _lhscorrelate(n, samples, iterations) return H
[ "def", "lhs", "(", "n", ",", "samples", "=", "None", ",", "criterion", "=", "None", ",", "iterations", "=", "None", ")", ":", "H", "=", "None", "if", "samples", "is", "None", ":", "samples", "=", "n", "if", "criterion", "is", "not", "None", ":", ...
Generate a latin-hypercube design Parameters ---------- n : int The number of factors to generate samples for Optional -------- samples : int The number of samples to generate for each factor (Default: n) criterion : str Allowable values are "center" or "c", "maximin" or "m", "centermaximin" or "cm", and "correlation" or "corr". If no value given, the design is simply randomized. iterations : int The number of iterations in the maximin and correlations algorithms (Default: 5). Returns ------- H : 2d-array An n-by-samples design matrix that has been normalized so factor values are uniformly spaced between zero and one. Example ------- A 3-factor design (defaults to 3 samples):: >>> lhs(3) array([[ 0.40069325, 0.08118402, 0.69763298], [ 0.19524568, 0.41383587, 0.29947106], [ 0.85341601, 0.75460699, 0.360024 ]]) A 4-factor design with 6 samples:: >>> lhs(4, samples=6) array([[ 0.27226812, 0.02811327, 0.62792445, 0.91988196], [ 0.76945538, 0.43501682, 0.01107457, 0.09583358], [ 0.45702981, 0.76073773, 0.90245401, 0.18773015], [ 0.99342115, 0.85814198, 0.16996665, 0.65069309], [ 0.63092013, 0.22148567, 0.33616859, 0.36332478], [ 0.05276917, 0.5819198 , 0.67194243, 0.78703262]]) A 2-factor design with 5 centered samples:: >>> lhs(2, samples=5, criterion='center') array([[ 0.3, 0.5], [ 0.7, 0.9], [ 0.1, 0.3], [ 0.9, 0.1], [ 0.5, 0.7]]) A 3-factor design with 4 samples where the minimum distance between all samples has been maximized:: >>> lhs(3, samples=4, criterion='maximin') array([[ 0.02642564, 0.55576963, 0.50261649], [ 0.51606589, 0.88933259, 0.34040838], [ 0.98431735, 0.0380364 , 0.01621717], [ 0.40414671, 0.33339132, 0.84845707]]) A 4-factor design with 5 samples where the samples are as uncorrelated as possible (within 10 iterations):: >>> lhs(4, samples=5, criterion='correlate', iterations=10)
[ "Generate", "a", "latin", "-", "hypercube", "design", "Parameters", "----------", "n", ":", "int", "The", "number", "of", "factors", "to", "generate", "samples", "for", "Optional", "--------", "samples", ":", "int", "The", "number", "of", "samples", "to", "g...
python
train
RI-imaging/qpsphere
qpsphere/cnvnc.py
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/cnvnc.py#L112-L142
def bg_phase_mask_from_sim(sim, radial_clearance=1.1): """Return the background phase mask of a qpsphere simulation Parameters ---------- sim: qpimage.QPImage Quantitative phase data simulated with qpsphere; The simulation keyword arguments "sim center", "sim radius", and "pixel size" must be present in `sim.meta`. radial_clearance: float Multiplicator to the fitted radius of the sphere; modifies the size of the mask; set to "1" to use the radius determined by :func:`qpsphere.analyze`. The circular area containing the phase object is set to `False` in the output `mask` image. Returns ------- mask: boolean 2d np.ndarray The mask is `True` for background regions and `False` for object regions. """ # Mask values around the object cx, cy = sim["sim center"] radius = sim["sim radius"] px_um = sim["pixel size"] x = np.arange(sim.shape[0]).reshape(-1, 1) y = np.arange(sim.shape[1]).reshape(1, -1) rsq = (x - cx)**2 + (y - cy)**2 mask = rsq > (radius/px_um * radial_clearance)**2 return mask
[ "def", "bg_phase_mask_from_sim", "(", "sim", ",", "radial_clearance", "=", "1.1", ")", ":", "# Mask values around the object", "cx", ",", "cy", "=", "sim", "[", "\"sim center\"", "]", "radius", "=", "sim", "[", "\"sim radius\"", "]", "px_um", "=", "sim", "[", ...
Return the background phase mask of a qpsphere simulation Parameters ---------- sim: qpimage.QPImage Quantitative phase data simulated with qpsphere; The simulation keyword arguments "sim center", "sim radius", and "pixel size" must be present in `sim.meta`. radial_clearance: float Multiplicator to the fitted radius of the sphere; modifies the size of the mask; set to "1" to use the radius determined by :func:`qpsphere.analyze`. The circular area containing the phase object is set to `False` in the output `mask` image. Returns ------- mask: boolean 2d np.ndarray The mask is `True` for background regions and `False` for object regions.
[ "Return", "the", "background", "phase", "mask", "of", "a", "qpsphere", "simulation" ]
python
train
SoCo/SoCo
soco/discovery.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/discovery.py#L22-L185
def discover(timeout=5, include_invisible=False, interface_addr=None): """ Discover Sonos zones on the local network. Return a set of `SoCo` instances for each zone found. Include invisible zones (bridges and slave zones in stereo pairs if ``include_invisible`` is `True`. Will block for up to ``timeout`` seconds, after which return `None` if no zones found. Args: timeout (int, optional): block for this many seconds, at most. Defaults to 5. include_invisible (bool, optional): include invisible zones in the return set. Defaults to `False`. interface_addr (str or None): Discovery operates by sending UDP multicast datagrams. ``interface_addr`` is a string (dotted quad) representation of the network interface address to use as the source of the datagrams (i.e. it is a value for `socket.IP_MULTICAST_IF <socket>`). If `None` or not specified, the system default interface for UDP multicast messages will be used. This is probably what you want to happen. Defaults to `None`. Returns: set: a set of `SoCo` instances, one for each zone found, or else `None`. Note: There is no easy cross-platform way to find out the addresses of the local machine's network interfaces. You might try the `netifaces module <https://pypi.python.org/pypi/netifaces>`_ and some code like this: >>> from netifaces import interfaces, AF_INET, ifaddresses >>> data = [ifaddresses(i) for i in interfaces()] >>> [d[AF_INET][0]['addr'] for d in data if d.get(AF_INET)] ['127.0.0.1', '192.168.1.20'] This should provide you with a list of values to try for interface_addr if you are having trouble finding your Sonos devices """ def create_socket(interface_addr=None): """ A helper function for creating a socket for discover purposes. Create and return a socket with appropriate options set for multicast. """ _sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # UPnP v1.0 requires a TTL of 4 _sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("B", 4)) if interface_addr is not None: _sock.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(interface_addr)) return _sock # pylint: disable=invalid-name PLAYER_SEARCH = dedent("""\ M-SEARCH * HTTP/1.1 HOST: 239.255.255.250:1900 MAN: "ssdp:discover" MX: 1 ST: urn:schemas-upnp-org:device:ZonePlayer:1 """).encode('utf-8') MCAST_GRP = "239.255.255.250" MCAST_PORT = 1900 _sockets = [] # Use the specified interface, if any if interface_addr is not None: try: address = socket.inet_aton(interface_addr) except socket.error: raise ValueError("{0} is not a valid IP address string".format( interface_addr)) _sockets.append(create_socket(interface_addr)) _LOG.info("Sending discovery packets on default interface") else: # Find the local network address using a couple of different methods. # Create a socket for each unique address found, and one for the # default multicast address addresses = set() try: addresses.add(socket.gethostbyname(socket.gethostname())) except socket.error: pass try: addresses.add(socket.gethostbyname(socket.getfqdn())) except socket.error: pass for address in addresses: try: _sockets.append(create_socket(address)) except socket.error as e: _LOG.warning("Can't make a discovery socket for %s: %s: %s", address, e.__class__.__name__, e) # Add a socket using the system default address _sockets.append(create_socket()) # Used to be logged as: # list(s.getsockname()[0] for s in _sockets) # but getsockname fails on Windows with unconnected unbound sockets # https://bugs.python.org/issue1049 _LOG.info("Sending discovery packets on %s", _sockets) for _ in range(0, 3): # Send a few times to each socket. UDP is unreliable for _sock in _sockets: _sock.sendto(really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT)) t0 = time.time() while True: # Check if the timeout is exceeded. We could do this check just # before the currently only continue statement of this loop, # but I feel it is safer to do it here, so that we do not forget # to do it if/when another continue statement is added later. # Note: this is sensitive to clock adjustments. AFAIK there # is no monotonic timer available before Python 3.3. t1 = time.time() if t1 - t0 > timeout: return None # The timeout of the select call is set to be no greater than # 100ms, so as not to exceed (too much) the required timeout # in case the loop is executed more than once. response, _, _ = select.select(_sockets, [], [], min(timeout, 0.1)) # Only Zone Players should respond, given the value of ST in the # PLAYER_SEARCH message. However, to prevent misbehaved devices # on the network disrupting the discovery process, we check that # the response contains the "Sonos" string; otherwise we keep # waiting for a correct response. # # Here is a sample response from a real Sonos device (actual numbers # have been redacted): # HTTP/1.1 200 OK # CACHE-CONTROL: max-age = 1800 # EXT: # LOCATION: http://***.***.***.***:1400/xml/device_description.xml # SERVER: Linux UPnP/1.0 Sonos/26.1-76230 (ZPS3) # ST: urn:schemas-upnp-org:device:ZonePlayer:1 # USN: uuid:RINCON_B8*************00::urn:schemas-upnp-org:device: # ZonePlayer:1 # X-RINCON-BOOTSEQ: 3 # X-RINCON-HOUSEHOLD: Sonos_7O********************R7eU if response: for _sock in response: data, addr = _sock.recvfrom(1024) _LOG.debug( 'Received discovery response from %s: "%s"', addr, data ) if b"Sonos" in data: # Now we have an IP, we can build a SoCo instance and query # that player for the topology to find the other players. # It is much more efficient to rely upon the Zone # Player's ability to find the others, than to wait for # query responses from them ourselves. zone = config.SOCO_CLASS(addr[0]) if include_invisible: return zone.all_zones else: return zone.visible_zones
[ "def", "discover", "(", "timeout", "=", "5", ",", "include_invisible", "=", "False", ",", "interface_addr", "=", "None", ")", ":", "def", "create_socket", "(", "interface_addr", "=", "None", ")", ":", "\"\"\" A helper function for creating a socket for discover purpos...
Discover Sonos zones on the local network. Return a set of `SoCo` instances for each zone found. Include invisible zones (bridges and slave zones in stereo pairs if ``include_invisible`` is `True`. Will block for up to ``timeout`` seconds, after which return `None` if no zones found. Args: timeout (int, optional): block for this many seconds, at most. Defaults to 5. include_invisible (bool, optional): include invisible zones in the return set. Defaults to `False`. interface_addr (str or None): Discovery operates by sending UDP multicast datagrams. ``interface_addr`` is a string (dotted quad) representation of the network interface address to use as the source of the datagrams (i.e. it is a value for `socket.IP_MULTICAST_IF <socket>`). If `None` or not specified, the system default interface for UDP multicast messages will be used. This is probably what you want to happen. Defaults to `None`. Returns: set: a set of `SoCo` instances, one for each zone found, or else `None`. Note: There is no easy cross-platform way to find out the addresses of the local machine's network interfaces. You might try the `netifaces module <https://pypi.python.org/pypi/netifaces>`_ and some code like this: >>> from netifaces import interfaces, AF_INET, ifaddresses >>> data = [ifaddresses(i) for i in interfaces()] >>> [d[AF_INET][0]['addr'] for d in data if d.get(AF_INET)] ['127.0.0.1', '192.168.1.20'] This should provide you with a list of values to try for interface_addr if you are having trouble finding your Sonos devices
[ "Discover", "Sonos", "zones", "on", "the", "local", "network", "." ]
python
train
pysal/spglm
spglm/family.py
https://github.com/pysal/spglm/blob/1339898adcb7e1638f1da83d57aa37392525f018/spglm/family.py#L800-L831
def deviance(self, endog, mu, freq_weights=1, scale=1., axis=None): r''' Deviance function for either Bernoulli or Binomial data. Parameters ---------- endog : array-like Endogenous response variable (already transformed to a probability if appropriate). mu : array Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional An optional scale argument. The default is 1. Returns -------- deviance : float The deviance function as defined below ''' if np.shape(self.n) == () and self.n == 1: one = np.equal(endog, 1) return -2 * np.sum((one * np.log(mu + 1e-200) + (1-one) * np.log(1 - mu + 1e-200)) * freq_weights, axis=axis) else: return 2 * np.sum(self.n * freq_weights * (endog * np.log(endog/mu + 1e-200) + (1 - endog) * np.log((1 - endog) / (1 - mu) + 1e-200)), axis=axis)
[ "def", "deviance", "(", "self", ",", "endog", ",", "mu", ",", "freq_weights", "=", "1", ",", "scale", "=", "1.", ",", "axis", "=", "None", ")", ":", "if", "np", ".", "shape", "(", "self", ".", "n", ")", "==", "(", ")", "and", "self", ".", "n"...
r''' Deviance function for either Bernoulli or Binomial data. Parameters ---------- endog : array-like Endogenous response variable (already transformed to a probability if appropriate). mu : array Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional An optional scale argument. The default is 1. Returns -------- deviance : float The deviance function as defined below
[ "r", "Deviance", "function", "for", "either", "Bernoulli", "or", "Binomial", "data", "." ]
python
train
GNS3/gns3-server
gns3server/compute/port_manager.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/port_manager.py#L168-L184
def _check_port(host, port, socket_type): """ Check if an a port is available and raise an OSError if port is not available :returns: boolean """ if socket_type == "UDP": socket_type = socket.SOCK_DGRAM else: socket_type = socket.SOCK_STREAM for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket_type, 0, socket.AI_PASSIVE): af, socktype, proto, _, sa = res with socket.socket(af, socktype, proto) as s: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(sa) # the port is available if bind is a success return True
[ "def", "_check_port", "(", "host", ",", "port", ",", "socket_type", ")", ":", "if", "socket_type", "==", "\"UDP\"", ":", "socket_type", "=", "socket", ".", "SOCK_DGRAM", "else", ":", "socket_type", "=", "socket", ".", "SOCK_STREAM", "for", "res", "in", "so...
Check if an a port is available and raise an OSError if port is not available :returns: boolean
[ "Check", "if", "an", "a", "port", "is", "available", "and", "raise", "an", "OSError", "if", "port", "is", "not", "available" ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L4417-L4445
def nvmlDeviceGetSupportedEventTypes(handle): r""" /** * Returns information about events supported on device * * For Fermi &tm; or newer fully supported devices. * * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. * * @param device The identifier of the target device * @param eventTypes Reference in which to return bitmask of supported events * * @return * - \ref NVML_SUCCESS if the eventTypes has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlEventType * @see nvmlDeviceRegisterEvents */ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes """ c_eventTypes = c_ulonglong() fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedEventTypes") ret = fn(handle, byref(c_eventTypes)) _nvmlCheckReturn(ret) return bytes_to_str(c_eventTypes.value)
[ "def", "nvmlDeviceGetSupportedEventTypes", "(", "handle", ")", ":", "c_eventTypes", "=", "c_ulonglong", "(", ")", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceGetSupportedEventTypes\"", ")", "ret", "=", "fn", "(", "handle", ",", "byref", "(", "c_eventType...
r""" /** * Returns information about events supported on device * * For Fermi &tm; or newer fully supported devices. * * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. * * @param device The identifier of the target device * @param eventTypes Reference in which to return bitmask of supported events * * @return * - \ref NVML_SUCCESS if the eventTypes has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlEventType * @see nvmlDeviceRegisterEvents */ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes
[ "r", "/", "**", "*", "Returns", "information", "about", "events", "supported", "on", "device", "*", "*", "For", "Fermi", "&tm", ";", "or", "newer", "fully", "supported", "devices", ".", "*", "*", "Events", "are", "not", "supported", "on", "Windows", ".",...
python
train
numenta/htmresearch
htmresearch/algorithms/TM.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/TM.py#L2310-L2352
def printSegment(self): """Print segment information for verbose messaging and debugging. This uses the following format: ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75 where: 54413 - is the unique segment id True - is sequence segment 0.64801 - moving average duty cycle (24/36) - (numPositiveActivations / numTotalActivations) 101 - age, number of iterations since last activated [9,1]0.75 - synapse from column 9, cell #1, strength 0.75 [10,1]0.75 - synapse from column 10, cell #1, strength 0.75 [11,1]0.75 - synapse from column 11, cell #1, strength 0.75 """ # Segment ID print "ID:%-5d" % (self.segID), # Sequence segment or pooling segment if self.isSequenceSeg: print "True", else: print "False", # Duty cycle print "%9.7f" % (self.dutyCycle(readOnly=True)), # numPositive/totalActivations print "(%4d/%-4d)" % (self.positiveActivations, self.totalActivations), # Age print "%4d" % (self.tp.lrnIterationIdx - self.lastActiveIteration), # Print each synapses on this segment as: srcCellCol/srcCellIdx/perm # if the permanence is above connected, put [] around the synapse info # For aid in comparing to the C++ implementation, print them in sorted # order sortedSyns = sorted(self.syns) for _, synapse in enumerate(sortedSyns): print "[%d,%d]%4.2f" % (synapse[0], synapse[1], synapse[2]), print
[ "def", "printSegment", "(", "self", ")", ":", "# Segment ID", "print", "\"ID:%-5d\"", "%", "(", "self", ".", "segID", ")", ",", "# Sequence segment or pooling segment", "if", "self", ".", "isSequenceSeg", ":", "print", "\"True\"", ",", "else", ":", "print", "\...
Print segment information for verbose messaging and debugging. This uses the following format: ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75 where: 54413 - is the unique segment id True - is sequence segment 0.64801 - moving average duty cycle (24/36) - (numPositiveActivations / numTotalActivations) 101 - age, number of iterations since last activated [9,1]0.75 - synapse from column 9, cell #1, strength 0.75 [10,1]0.75 - synapse from column 10, cell #1, strength 0.75 [11,1]0.75 - synapse from column 11, cell #1, strength 0.75
[ "Print", "segment", "information", "for", "verbose", "messaging", "and", "debugging", ".", "This", "uses", "the", "following", "format", ":" ]
python
train
limix/glimix-core
glimix_core/glmm/_glmm.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/glmm/_glmm.py#L231-L235
def posteriori_covariance(self): r""" Covariance of the estimated posteriori.""" K = GLMM.covariance(self) tau = self._ep._posterior.tau return pinv(pinv(K) + diag(1 / tau))
[ "def", "posteriori_covariance", "(", "self", ")", ":", "K", "=", "GLMM", ".", "covariance", "(", "self", ")", "tau", "=", "self", ".", "_ep", ".", "_posterior", ".", "tau", "return", "pinv", "(", "pinv", "(", "K", ")", "+", "diag", "(", "1", "/", ...
r""" Covariance of the estimated posteriori.
[ "r", "Covariance", "of", "the", "estimated", "posteriori", "." ]
python
valid
rigetti/pyquil
pyquil/operator_estimation.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/operator_estimation.py#L746-L900
def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment, n_shots: int = 10000, progress_callback=None, active_reset=False, symmetrize_readout: str = 'exhaustive', calibrate_readout: str = 'plus-eig'): """ Measure all the observables in a TomographyExperiment. :param qc: A QuantumComputer which can run quantum programs :param tomo_experiment: A suite of tomographic observables to measure :param n_shots: The number of shots to take per ExperimentSetting :param progress_callback: If not None, this function is called each time a group of settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress is ``i / len(tomo_experiment)``. :param active_reset: Whether to actively reset qubits instead of waiting several times the coherence length for qubits to decay to |0> naturally. Setting this to True is much faster but there is a ~1% error per qubit in the reset operation. Thermal noise from "traditional" reset is not routinely characterized but is of the same order. :param symmetrize_readout: Method used to symmetrize the readout errors, i.e. set p(0|1) = p(1|0). For uncorrelated readout errors, this can be achieved by randomly selecting between the POVMs {X.D1.X, X.D0.X} and {D0, D1} (where both D0 and D1 are diagonal). However, here we currently support exhaustive symmetrization and loop through all possible 2^n POVMs {X/I . POVM . X/I}^n, and obtain symmetrization more generally, i.e. set p(00|00) = p(01|01) = .. = p(11|11), as well as p(00|01) = p(01|00) etc. If this is None, no symmetrization is performed. The exhaustive method can be specified by setting this variable to 'exhaustive' (default value). Set to `None` if no symmetrization is desired. :param calibrate_readout: Method used to calibrate the readout results. Currently, the only method supported is normalizing against the operator's expectation value in its +1 eigenstate, which can be specified by setting this variable to 'plus-eig' (default value). The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired. """ # calibration readout only works with symmetrization turned on if calibrate_readout is not None and symmetrize_readout is None: raise ValueError("Readout calibration only works with readout symmetrization turned on") # Outer loop over a collection of grouped settings for which we can simultaneously # estimate. for i, settings in enumerate(tomo_experiment): log.info(f"Collecting bitstrings for the {len(settings)} settings: {settings}") # 1.1 Prepare a state according to the amalgam of all setting.in_state total_prog = Program() if active_reset: total_prog += RESET() max_weight_in_state = _max_weight_state(setting.in_state for setting in settings) for oneq_state in max_weight_in_state.states: total_prog += _one_q_state_prep(oneq_state) # 1.2 Add in the program total_prog += tomo_experiment.program # 1.3 Measure the state according to setting.out_operator max_weight_out_op = _max_weight_operator(setting.out_operator for setting in settings) for qubit, op_str in max_weight_out_op: total_prog += _local_pauli_eig_meas(op_str, qubit) # 2. Symmetrization qubits = max_weight_out_op.get_qubits() if symmetrize_readout == 'exhaustive' and len(qubits) > 0: bitstrings, d_qub_idx = _exhaustive_symmetrization(qc, qubits, n_shots, total_prog) elif symmetrize_readout is None and len(qubits) > 0: total_prog_no_symm = total_prog.copy() ro = total_prog_no_symm.declare('ro', 'BIT', len(qubits)) d_qub_idx = {} for i, q in enumerate(qubits): total_prog_no_symm += MEASURE(q, ro[i]) # Keep track of qubit-classical register mapping via dict d_qub_idx[q] = i total_prog_no_symm.wrap_in_numshots_loop(n_shots) total_prog_no_symm_native = qc.compiler.quil_to_native_quil(total_prog_no_symm) total_prog_no_symm_bin = qc.compiler.native_quil_to_executable(total_prog_no_symm_native) bitstrings = qc.run(total_prog_no_symm_bin) elif len(qubits) == 0: # looks like an identity operation pass else: raise ValueError("Readout symmetrization method must be either 'exhaustive' or None") if progress_callback is not None: progress_callback(i, len(tomo_experiment)) # 3. Post-process # Inner loop over the grouped settings. They only differ in which qubits' measurements # we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we # measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting # either the first column, second column, or both and multiplying along the row. for setting in settings: # 3.1 Get the term's coefficient so we can multiply it in later. coeff = complex(setting.out_operator.coefficient) if not np.isclose(coeff.imag, 0): raise ValueError(f"{setting}'s out_operator has a complex coefficient.") coeff = coeff.real # 3.2 Special case for measuring the "identity" operator, which doesn't make much # sense but should happen perfectly. if is_identity(setting.out_operator): yield ExperimentResult( setting=setting, expectation=coeff, std_err=0.0, total_counts=n_shots, ) continue # 3.3 Obtain statistics from result of experiment obs_mean, obs_var = _stats_from_measurements(bitstrings, d_qub_idx, setting, n_shots, coeff) if calibrate_readout == 'plus-eig': # 4 Readout calibration # 4.1 Obtain calibration program calibr_prog = _calibration_program(qc, tomo_experiment, setting) # 4.2 Perform symmetrization on the calibration program if symmetrize_readout == 'exhaustive': qubs_calibr = setting.out_operator.get_qubits() calibr_shots = n_shots calibr_results, d_calibr_qub_idx = _exhaustive_symmetrization(qc, qubs_calibr, calibr_shots, calibr_prog) else: raise ValueError("Readout symmetrization method must be either 'exhaustive' or None") # 4.3 Obtain statistics from the measurement process obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results, d_calibr_qub_idx, setting, calibr_shots) # 4.3 Calibrate the readout results corrected_mean = obs_mean / obs_calibr_mean corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var) yield ExperimentResult( setting=setting, expectation=corrected_mean.item(), std_err=np.sqrt(corrected_var).item(), total_counts=n_shots, raw_expectation=obs_mean.item(), raw_std_err=np.sqrt(obs_var).item(), calibration_expectation=obs_calibr_mean.item(), calibration_std_err=np.sqrt(obs_calibr_var).item(), calibration_counts=calibr_shots, ) elif calibrate_readout is None: # No calibration yield ExperimentResult( setting=setting, expectation=obs_mean.item(), std_err=np.sqrt(obs_var).item(), total_counts=n_shots, ) else: raise ValueError("Calibration readout method must be either 'plus-eig' or None")
[ "def", "measure_observables", "(", "qc", ":", "QuantumComputer", ",", "tomo_experiment", ":", "TomographyExperiment", ",", "n_shots", ":", "int", "=", "10000", ",", "progress_callback", "=", "None", ",", "active_reset", "=", "False", ",", "symmetrize_readout", ":"...
Measure all the observables in a TomographyExperiment. :param qc: A QuantumComputer which can run quantum programs :param tomo_experiment: A suite of tomographic observables to measure :param n_shots: The number of shots to take per ExperimentSetting :param progress_callback: If not None, this function is called each time a group of settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress is ``i / len(tomo_experiment)``. :param active_reset: Whether to actively reset qubits instead of waiting several times the coherence length for qubits to decay to |0> naturally. Setting this to True is much faster but there is a ~1% error per qubit in the reset operation. Thermal noise from "traditional" reset is not routinely characterized but is of the same order. :param symmetrize_readout: Method used to symmetrize the readout errors, i.e. set p(0|1) = p(1|0). For uncorrelated readout errors, this can be achieved by randomly selecting between the POVMs {X.D1.X, X.D0.X} and {D0, D1} (where both D0 and D1 are diagonal). However, here we currently support exhaustive symmetrization and loop through all possible 2^n POVMs {X/I . POVM . X/I}^n, and obtain symmetrization more generally, i.e. set p(00|00) = p(01|01) = .. = p(11|11), as well as p(00|01) = p(01|00) etc. If this is None, no symmetrization is performed. The exhaustive method can be specified by setting this variable to 'exhaustive' (default value). Set to `None` if no symmetrization is desired. :param calibrate_readout: Method used to calibrate the readout results. Currently, the only method supported is normalizing against the operator's expectation value in its +1 eigenstate, which can be specified by setting this variable to 'plus-eig' (default value). The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
[ "Measure", "all", "the", "observables", "in", "a", "TomographyExperiment", "." ]
python
train
necaris/python3-openid
openid/message.py
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/message.py#L256-L268
def setOpenIDNamespace(self, openid_ns_uri, implicit): """Set the OpenID namespace URI used in this message. @raises InvalidOpenIDNamespace: if the namespace is not in L{Message.allowed_openid_namespaces} """ if isinstance(openid_ns_uri, bytes): openid_ns_uri = str(openid_ns_uri, encoding="utf-8") if openid_ns_uri not in self.allowed_openid_namespaces: raise InvalidOpenIDNamespace(openid_ns_uri) self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit) self._openid_ns_uri = openid_ns_uri
[ "def", "setOpenIDNamespace", "(", "self", ",", "openid_ns_uri", ",", "implicit", ")", ":", "if", "isinstance", "(", "openid_ns_uri", ",", "bytes", ")", ":", "openid_ns_uri", "=", "str", "(", "openid_ns_uri", ",", "encoding", "=", "\"utf-8\"", ")", "if", "ope...
Set the OpenID namespace URI used in this message. @raises InvalidOpenIDNamespace: if the namespace is not in L{Message.allowed_openid_namespaces}
[ "Set", "the", "OpenID", "namespace", "URI", "used", "in", "this", "message", "." ]
python
train
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L399-L401
def open_mask(fn:PathOrStr, div=False, convert_mode='L', after_open:Callable=None)->ImageSegment: "Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255." return open_image(fn, div=div, convert_mode=convert_mode, cls=ImageSegment, after_open=after_open)
[ "def", "open_mask", "(", "fn", ":", "PathOrStr", ",", "div", "=", "False", ",", "convert_mode", "=", "'L'", ",", "after_open", ":", "Callable", "=", "None", ")", "->", "ImageSegment", ":", "return", "open_image", "(", "fn", ",", "div", "=", "div", ",",...
Return `ImageSegment` object create from mask in file `fn`. If `div`, divides pixel values by 255.
[ "Return", "ImageSegment", "object", "create", "from", "mask", "in", "file", "fn", ".", "If", "div", "divides", "pixel", "values", "by", "255", "." ]
python
train
deepmind/pysc2
pysc2/lib/stopwatch.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/stopwatch.py#L60-L64
def dev(self): """Standard deviation.""" if self.num == 0: return 0 return math.sqrt(max(0, self.sum_sq / self.num - (self.sum / self.num)**2))
[ "def", "dev", "(", "self", ")", ":", "if", "self", ".", "num", "==", "0", ":", "return", "0", "return", "math", ".", "sqrt", "(", "max", "(", "0", ",", "self", ".", "sum_sq", "/", "self", ".", "num", "-", "(", "self", ".", "sum", "/", "self",...
Standard deviation.
[ "Standard", "deviation", "." ]
python
train
Parquery/icontract
icontract/_recompute.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_recompute.py#L386-L394
def visit_SetComp(self, node: ast.SetComp) -> Any: """Compile the set comprehension as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
[ "def", "visit_SetComp", "(", "self", ",", "node", ":", "ast", ".", "SetComp", ")", "->", "Any", ":", "result", "=", "self", ".", "_execute_comprehension", "(", "node", "=", "node", ")", "for", "generator", "in", "node", ".", "generators", ":", "self", ...
Compile the set comprehension as a function and call it.
[ "Compile", "the", "set", "comprehension", "as", "a", "function", "and", "call", "it", "." ]
python
train
uw-it-aca/uw-restclients-sws
uw_sws/enrollment.py
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/enrollment.py#L216-L228
def get_enrollment_history_by_regid(regid, verbose='true', transcriptable_course='all', changed_since_date='', include_unfinished_pce_course_reg=False): """ :return: a complete chronological list of all the enrollemnts [Enrollment], where the Enrollment object has a term element. """ return _json_to_enrollment_list( _enrollment_search(regid, verbose, transcriptable_course, changed_since_date), include_unfinished_pce_course_reg)
[ "def", "get_enrollment_history_by_regid", "(", "regid", ",", "verbose", "=", "'true'", ",", "transcriptable_course", "=", "'all'", ",", "changed_since_date", "=", "''", ",", "include_unfinished_pce_course_reg", "=", "False", ")", ":", "return", "_json_to_enrollment_list...
:return: a complete chronological list of all the enrollemnts [Enrollment], where the Enrollment object has a term element.
[ ":", "return", ":", "a", "complete", "chronological", "list", "of", "all", "the", "enrollemnts", "[", "Enrollment", "]", "where", "the", "Enrollment", "object", "has", "a", "term", "element", "." ]
python
train
etingof/pysnmp
pysnmp/smi/mibs/SNMPv2-TC.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-TC.py#L250-L437
def prettyIn(self, value): # override asn1 type method """Implements DISPLAY-HINT parsing into base SNMP value Proper parsing seems impossible due to ambiguities. Here we are trying to do our best, but be prepared for failures on complicated DISPLAY-HINTs. Keep in mind that this parser only works with "text" input meaning `unicode` (Py2) or `str` (Py3). """ for base in inspect.getmro(self.__class__): if not issubclass(base, TextualConvention) and issubclass(base, Asn1Item): break else: raise SmiError('TEXTUAL-CONVENTION has no underlying SNMP base type') if self.displayHint and (self.__integer.isSuperTypeOf(self, matchConstraints=False) and self.getNamedValues() or self.__unsigned32.isSuperTypeOf(self, matchConstraints=False) or self.__timeticks.isSuperTypeOf(self, matchConstraints=False)): value = str(value) _ = lambda t, f=0: (t, f) displayHintType, decimalPrecision = _(*self.displayHint.split('-')) if displayHintType == 'x' and (value.startswith('0x') or value.startswith('-0x')): try: if value.startswith('-'): return base.prettyIn(self, -int(value[3:], 16)) else: return base.prettyIn(self, int(value[2:], 16)) except Exception as exc: raise SmiError( 'integer evaluation error: %s' % exc ) elif displayHintType == 'd': try: return base.prettyIn(self, int(float(value) * 10**int(decimalPrecision))) except Exception as exc: raise SmiError( 'float evaluation error: %s' % exc ) elif displayHintType == 'o' and (value.startswith('0') or value.startswith('-0')): try: return base.prettyIn(self, int(value, 8)) except Exception as exc: raise SmiError( 'octal evaluation error: %s' % exc ) elif displayHintType == 'b' and (value.startswith('B') or value.startswith('-B')): negative = value.startswith('-') if negative: value = value[2:] else: value = value[1:] value = [x != '0' and 1 or 0 for x in value] binValue = 0 while value: binValue <<= value[0] del value[0] return base.prettyIn(self, binValue) else: raise SmiError( 'Unsupported numeric type spec "%s" at %s' % (displayHintType, self.__class__.__name__) ) elif self.displayHint and self.__octetString.isSuperTypeOf(self, matchConstraints=False): numBase = { 'x': 16, 'd': 10, 'o': 8 } numDigits = { 'x': octets.str2octs(string.hexdigits), 'o': octets.str2octs(string.octdigits), 'd': octets.str2octs(string.digits) } # how do we know if object is initialized with display-hint # formatted text? based on "text" input maybe? # That boils down to `str` object on Py3 or `unicode` on Py2. if octets.isStringType(value) and not octets.isOctetsType(value): value = base.prettyIn(self, value) else: return base.prettyIn(self, value) outputValue = octets.str2octs('') runningValue = value displayHint = self.displayHint while runningValue and displayHint: # 1 this information is totally lost, just fail explicitly if displayHint[0] == '*': raise SmiError( 'Can\'t parse "*" in DISPLAY-HINT (%s)' % self.__class__.__name__ ) # 2 this becomes ambiguous when it comes to rendered value octetLength = '' while displayHint and displayHint[0] in string.digits: octetLength += displayHint[0] displayHint = displayHint[1:] # length is mandatory but people ignore that if not octetLength: octetLength = len(runningValue) try: octetLength = int(octetLength) except Exception: raise SmiError( 'Bad octet length: %s' % octetLength ) if not displayHint: raise SmiError( 'Short octet length: %s' % self.displayHint ) # 3 displayFormat = displayHint[0] displayHint = displayHint[1:] # 4 this is the lifesaver -- we could use it as an anchor if displayHint and displayHint[0] not in string.digits and displayHint[0] != '*': displaySep = displayHint[0] displayHint = displayHint[1:] else: displaySep = '' # 5 is probably impossible to support if displayFormat in ('a', 't'): outputValue += runningValue[:octetLength] elif displayFormat in numBase: if displaySep: guessedOctetLength = runningValue.find(octets.str2octs(displaySep)) if guessedOctetLength == -1: guessedOctetLength = len(runningValue) else: for idx in range(len(runningValue)): if runningValue[idx] not in numDigits[displayFormat]: guessedOctetLength = idx break else: guessedOctetLength = len(runningValue) try: num = int(octets.octs2str(runningValue[:guessedOctetLength]), numBase[displayFormat]) except Exception as exc: raise SmiError( 'Display format eval failure: %s: %s' % (runningValue[:guessedOctetLength], exc) ) num_as_bytes = [] if num: while num: num_as_bytes.append(num & 0xFF) num >>= 8 else: num_as_bytes = [0] while len(num_as_bytes) < octetLength: num_as_bytes.append(0) num_as_bytes.reverse() outputValue += octets.ints2octs(num_as_bytes) if displaySep: guessedOctetLength += 1 octetLength = guessedOctetLength else: raise SmiError( 'Unsupported display format char: %s' % displayFormat ) runningValue = runningValue[octetLength:] if not displayHint: displayHint = self.displayHint return base.prettyIn(self, outputValue) else: return base.prettyIn(self, value)
[ "def", "prettyIn", "(", "self", ",", "value", ")", ":", "# override asn1 type method", "for", "base", "in", "inspect", ".", "getmro", "(", "self", ".", "__class__", ")", ":", "if", "not", "issubclass", "(", "base", ",", "TextualConvention", ")", "and", "is...
Implements DISPLAY-HINT parsing into base SNMP value Proper parsing seems impossible due to ambiguities. Here we are trying to do our best, but be prepared for failures on complicated DISPLAY-HINTs. Keep in mind that this parser only works with "text" input meaning `unicode` (Py2) or `str` (Py3).
[ "Implements", "DISPLAY", "-", "HINT", "parsing", "into", "base", "SNMP", "value" ]
python
train
mozilla/crontabber
crontabber/mixins.py
https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/mixins.py#L118-L141
def with_resource_connection_as_argument(resource_name): """a class decorator for Crontabber Apps. This decorator will a class a _run_proxy method that passes a databsase connection as a context manager into the CronApp's run method. The connection will automatically be closed when the ConApp's run method ends. In order for this dectorator to function properly, it must be used in conjunction with previous dectorator, "with_transactional_resource" or equivalent. This decorator depends on the mechanims added by that decorator. """ connection_factory_attr_name = '%s_connection_factory' % resource_name def class_decorator(cls): def _run_proxy(self, *args, **kwargs): factory = getattr(self, connection_factory_attr_name) with factory() as connection: try: self.run(connection, *args, **kwargs) finally: factory.close_connection(connection, force=True) cls._run_proxy = _run_proxy return cls return class_decorator
[ "def", "with_resource_connection_as_argument", "(", "resource_name", ")", ":", "connection_factory_attr_name", "=", "'%s_connection_factory'", "%", "resource_name", "def", "class_decorator", "(", "cls", ")", ":", "def", "_run_proxy", "(", "self", ",", "*", "args", ","...
a class decorator for Crontabber Apps. This decorator will a class a _run_proxy method that passes a databsase connection as a context manager into the CronApp's run method. The connection will automatically be closed when the ConApp's run method ends. In order for this dectorator to function properly, it must be used in conjunction with previous dectorator, "with_transactional_resource" or equivalent. This decorator depends on the mechanims added by that decorator.
[ "a", "class", "decorator", "for", "Crontabber", "Apps", ".", "This", "decorator", "will", "a", "class", "a", "_run_proxy", "method", "that", "passes", "a", "databsase", "connection", "as", "a", "context", "manager", "into", "the", "CronApp", "s", "run", "met...
python
train
aio-libs/aioftp
aioftp/server.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/server.py#L391-L424
async def start(self, host=None, port=0, **kwargs): """ :py:func:`asyncio.coroutine` Start server. :param host: ip address to bind for listening. :type host: :py:class:`str` :param port: port number to bind for listening. :type port: :py:class:`int` :param kwargs: keyword arguments, they passed to :py:func:`asyncio.start_server` """ self._start_server_extra_arguments = kwargs self.connections = {} self.server_host = host self.server_port = port self.server = await asyncio.start_server( self.dispatcher, host, port, ssl=self.ssl, **self._start_server_extra_arguments, ) for sock in self.server.sockets: if sock.family in (socket.AF_INET, socket.AF_INET6): host, port, *_ = sock.getsockname() if not self.server_port: self.server_port = port if not self.server_host: self.server_host = host logger.info("serving on %s:%s", host, port)
[ "async", "def", "start", "(", "self", ",", "host", "=", "None", ",", "port", "=", "0", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_start_server_extra_arguments", "=", "kwargs", "self", ".", "connections", "=", "{", "}", "self", ".", "server_host"...
:py:func:`asyncio.coroutine` Start server. :param host: ip address to bind for listening. :type host: :py:class:`str` :param port: port number to bind for listening. :type port: :py:class:`int` :param kwargs: keyword arguments, they passed to :py:func:`asyncio.start_server`
[ ":", "py", ":", "func", ":", "asyncio", ".", "coroutine" ]
python
valid
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L2964-L2990
def crop(self, height, width, center_i=None, center_j=None): """Crop the image centered around center_i, center_j. Parameters ---------- height : int The height of the desired image. width : int The width of the desired image. center_i : int The center height point at which to crop. If not specified, the center of the image is used. center_j : int The center width point at which to crop. If not specified, the center of the image is used. Returns ------- :obj:`Image` A cropped Image of the same type. """ color_im_crop = self.color.crop(height, width, center_i, center_j) depth_im_crop = self.depth.crop(height, width, center_i, center_j) return RgbdImage.from_color_and_depth(color_im_crop, depth_im_crop)
[ "def", "crop", "(", "self", ",", "height", ",", "width", ",", "center_i", "=", "None", ",", "center_j", "=", "None", ")", ":", "color_im_crop", "=", "self", ".", "color", ".", "crop", "(", "height", ",", "width", ",", "center_i", ",", "center_j", ")"...
Crop the image centered around center_i, center_j. Parameters ---------- height : int The height of the desired image. width : int The width of the desired image. center_i : int The center height point at which to crop. If not specified, the center of the image is used. center_j : int The center width point at which to crop. If not specified, the center of the image is used. Returns ------- :obj:`Image` A cropped Image of the same type.
[ "Crop", "the", "image", "centered", "around", "center_i", "center_j", "." ]
python
train
stsouko/CIMtools
CIMtools/applicability_domain/leverage.py
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/leverage.py#L151-L170
def predict(self, X): """Predict inside or outside AD for X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- ad : array of shape = [n_samples] Array contains True (reaction in AD) and False (reaction residing outside AD). """ # Check is fit had been called check_is_fitted(self, ['inverse_influence_matrix']) # Check that X have correct shape X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix) <= self.threshold_value
[ "def", "predict", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'inverse_influence_matrix'", "]", ")", "# Check that X have correct shape", "X", "=", "check_array", "(", "X", ")", "return", "self", "....
Predict inside or outside AD for X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- ad : array of shape = [n_samples] Array contains True (reaction in AD) and False (reaction residing outside AD).
[ "Predict", "inside", "or", "outside", "AD", "for", "X", "." ]
python
valid
theislab/scanpy
scanpy/datasets/__init__.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/datasets/__init__.py#L177-L192
def toggleswitch() -> AnnData: """Simulated toggleswitch. Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature (2000) <https://doi.org/10.1038/35002131>`__. Simulate via :func:`~scanpy.api.sim`. Returns ------- Annotated data matrix. """ filename = os.path.dirname(__file__) + '/toggleswitch.txt' adata = sc.read(filename, first_column_names=True) adata.uns['iroot'] = 0 return adata
[ "def", "toggleswitch", "(", ")", "->", "AnnData", ":", "filename", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "+", "'/toggleswitch.txt'", "adata", "=", "sc", ".", "read", "(", "filename", ",", "first_column_names", "=", "True", ")", "a...
Simulated toggleswitch. Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature (2000) <https://doi.org/10.1038/35002131>`__. Simulate via :func:`~scanpy.api.sim`. Returns ------- Annotated data matrix.
[ "Simulated", "toggleswitch", "." ]
python
train
samjabrahams/anchorhub
anchorhub/builtin/github/cstrategies.py
https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/builtin/github/cstrategies.py#L46-L64
def get(self, file_lines, index): """ Extract the specified AnchorHub tag, as well as the portion of the line that should be converted from the ATX style Markdown header. :param file_lines: List of strings corresponding to lines in a text file :param index: index of file_lines corresponding to the current line :return: [tag, string] - tag: the extracted AnchorHub tag. string - the portion of the header that should be converted into an anchor """ line = file_lines[index] start_index = line.find('# ') + 2 # Start index for header text start_tag = line.rfind(self._open) # Start index of AnchorHub tag end_tag = line.rfind(self._close) # End index of AnchorHub tag # The magic '+1' below knocks out the hash '#' character from extraction tag = line[start_tag + len(self._open) + 1:end_tag] string = line[start_index:start_tag] return [tag, string]
[ "def", "get", "(", "self", ",", "file_lines", ",", "index", ")", ":", "line", "=", "file_lines", "[", "index", "]", "start_index", "=", "line", ".", "find", "(", "'# '", ")", "+", "2", "# Start index for header text", "start_tag", "=", "line", ".", "rfin...
Extract the specified AnchorHub tag, as well as the portion of the line that should be converted from the ATX style Markdown header. :param file_lines: List of strings corresponding to lines in a text file :param index: index of file_lines corresponding to the current line :return: [tag, string] - tag: the extracted AnchorHub tag. string - the portion of the header that should be converted into an anchor
[ "Extract", "the", "specified", "AnchorHub", "tag", "as", "well", "as", "the", "portion", "of", "the", "line", "that", "should", "be", "converted", "from", "the", "ATX", "style", "Markdown", "header", "." ]
python
train
theonion/django-bulbs
bulbs/promotion/views.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L171-L179
def perform_update(self, serializer): """creates a record in the `bulbs.promotion.PZoneHistory` :param obj: the instance saved :param created: boolean expressing if the object was newly created (`False` if updated) """ instance = serializer.save() # create history object instance.history.create(data=instance.data)
[ "def", "perform_update", "(", "self", ",", "serializer", ")", ":", "instance", "=", "serializer", ".", "save", "(", ")", "# create history object", "instance", ".", "history", ".", "create", "(", "data", "=", "instance", ".", "data", ")" ]
creates a record in the `bulbs.promotion.PZoneHistory` :param obj: the instance saved :param created: boolean expressing if the object was newly created (`False` if updated)
[ "creates", "a", "record", "in", "the", "bulbs", ".", "promotion", ".", "PZoneHistory" ]
python
train
kgori/treeCl
treeCl/tree.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/tree.py#L1091-L1114
def autocorrelated_relaxed_clock(self, root_rate, autocorrel, distribution='lognormal'): """ Attaches rates to each node according to autocorrelated lognormal model from Kishino et al.(2001), or autocorrelated exponential """ optioncheck(distribution, ['exponential', 'lognormal']) if autocorrel == 0: for node in self._tree.preorder_node_iter(): node.rate = root_rate return for node in self._tree.preorder_node_iter(): if node == self._tree.seed_node: node.rate = root_rate else: parent_rate = node.parent_node.rate bl = node.edge_length if distribution == 'lognormal': node.rate = logn_correlated_rate(parent_rate, bl, autocorrel) else: node.rate = np.random.exponential(parent_rate)
[ "def", "autocorrelated_relaxed_clock", "(", "self", ",", "root_rate", ",", "autocorrel", ",", "distribution", "=", "'lognormal'", ")", ":", "optioncheck", "(", "distribution", ",", "[", "'exponential'", ",", "'lognormal'", "]", ")", "if", "autocorrel", "==", "0"...
Attaches rates to each node according to autocorrelated lognormal model from Kishino et al.(2001), or autocorrelated exponential
[ "Attaches", "rates", "to", "each", "node", "according", "to", "autocorrelated", "lognormal", "model", "from", "Kishino", "et", "al", ".", "(", "2001", ")", "or", "autocorrelated", "exponential" ]
python
train
geophysics-ubonn/reda
lib/reda/eis/plots.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/eis/plots.py#L358-L370
def add(self, response, label=None): """add one response object to the list """ if not isinstance(response, sip_response.sip_response): raise Exception( 'can only add sip_reponse.sip_response objects' ) self.objects.append(response) if label is None: self.labels.append('na') else: self.labels.append(label)
[ "def", "add", "(", "self", ",", "response", ",", "label", "=", "None", ")", ":", "if", "not", "isinstance", "(", "response", ",", "sip_response", ".", "sip_response", ")", ":", "raise", "Exception", "(", "'can only add sip_reponse.sip_response objects'", ")", ...
add one response object to the list
[ "add", "one", "response", "object", "to", "the", "list" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/mixins.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/mixins.py#L660-L666
def is_question_answered(self, question_id): """has the question matching item_id been answered and not skipped""" question_map = self._get_question_map(question_id) # will raise NotFound() if 'missingResponse' in question_map['responses'][0]: return False else: return True
[ "def", "is_question_answered", "(", "self", ",", "question_id", ")", ":", "question_map", "=", "self", ".", "_get_question_map", "(", "question_id", ")", "# will raise NotFound()", "if", "'missingResponse'", "in", "question_map", "[", "'responses'", "]", "[", "0", ...
has the question matching item_id been answered and not skipped
[ "has", "the", "question", "matching", "item_id", "been", "answered", "and", "not", "skipped" ]
python
train
slundberg/shap
shap/benchmark/metrics.py
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L297-L304
def keep_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (impute) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 19 """ return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
[ "def", "keep_absolute_impute__roc_auc", "(", "X", ",", "y", ",", "model_generator", ",", "method_name", ",", "num_fcounts", "=", "11", ")", ":", "return", "__run_measure", "(", "measures", ".", "keep_mask", ",", "X", ",", "y", ",", "model_generator", ",", "m...
Keep Absolute (impute) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 19
[ "Keep", "Absolute", "(", "impute", ")", "xlabel", "=", "Max", "fraction", "of", "features", "kept", "ylabel", "=", "ROC", "AUC", "transform", "=", "identity", "sort_order", "=", "19" ]
python
train
heikomuller/sco-datastore
scodata/__init__.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/__init__.py#L398-L427
def experiments_predictions_attachments_download(self, experiment_id, run_id, resource_id): """Download a data file that has been attached with a successful model run. Parameters ---------- experiment_id : string Unique experiment identifier model_id : string Unique identifier of model to run resource_id : string Unique attachment identifier Returns ------- FileInfo Information about attachmed file on disk or None if attachment with given resource identifier exists """ # Get experiment to ensure that it exists if self.experiments_get(experiment_id) is None: return None attachment, mime_type = self.predictions.get_data_file_attachment( run_id, resource_id ) if attachment is None: return None # Return information about the result file return FileInfo(attachment, mime_type, os.path.basename(attachment))
[ "def", "experiments_predictions_attachments_download", "(", "self", ",", "experiment_id", ",", "run_id", ",", "resource_id", ")", ":", "# Get experiment to ensure that it exists", "if", "self", ".", "experiments_get", "(", "experiment_id", ")", "is", "None", ":", "retur...
Download a data file that has been attached with a successful model run. Parameters ---------- experiment_id : string Unique experiment identifier model_id : string Unique identifier of model to run resource_id : string Unique attachment identifier Returns ------- FileInfo Information about attachmed file on disk or None if attachment with given resource identifier exists
[ "Download", "a", "data", "file", "that", "has", "been", "attached", "with", "a", "successful", "model", "run", "." ]
python
train
rainwoodman/kdcount
kdcount/correlate.py
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L317-L326
def _update_mean_coords(self, dig, N, centers_sum, **paircoords): """ Update the mean coordinate sums """ if N is None or centers_sum is None: return N.flat[:] += utils.bincount(dig, 1., minlength=N.size) for i, dim in enumerate(self.dims): size = centers_sum[i].size centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
[ "def", "_update_mean_coords", "(", "self", ",", "dig", ",", "N", ",", "centers_sum", ",", "*", "*", "paircoords", ")", ":", "if", "N", "is", "None", "or", "centers_sum", "is", "None", ":", "return", "N", ".", "flat", "[", ":", "]", "+=", "utils", "...
Update the mean coordinate sums
[ "Update", "the", "mean", "coordinate", "sums" ]
python
train
PmagPy/PmagPy
programs/dayplot_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/dayplot_magic.py#L12-L45
def main(): """ NAME dayplot_magic.py DESCRIPTION makes 'day plots' (Day et al. 1977) and squareness/coercivity, plots 'linear mixing' curve from Dunlop and Carter-Stiglitz (2006). squareness coercivity of remanence (Neel, 1955) plots after Tauxe et al. (2002) SYNTAX dayplot_magic.py [command line options] OPTIONS -h prints help message and quits -f: specify input hysteresis file, default is specimens.txt -fmt [svg,png,jpg] format for output plots, default svg -sav saves plots and quits quietly """ args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg('-WD', '.') fmt = pmag.get_named_arg('-fmt', 'svg') save_plots = False interactive = True if '-sav' in sys.argv: save_plots = True interactive = False infile = pmag.get_named_arg("-f", "specimens.txt") ipmag.dayplot_magic(dir_path, infile, save=save_plots, fmt=fmt, interactive=interactive)
[ "def", "main", "(", ")", ":", "args", "=", "sys", ".", "argv", "if", "\"-h\"", "in", "args", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "dir_path", "=", "pmag", ".", "get_named_arg", "(", "'-WD'", ",", "'.'", ")...
NAME dayplot_magic.py DESCRIPTION makes 'day plots' (Day et al. 1977) and squareness/coercivity, plots 'linear mixing' curve from Dunlop and Carter-Stiglitz (2006). squareness coercivity of remanence (Neel, 1955) plots after Tauxe et al. (2002) SYNTAX dayplot_magic.py [command line options] OPTIONS -h prints help message and quits -f: specify input hysteresis file, default is specimens.txt -fmt [svg,png,jpg] format for output plots, default svg -sav saves plots and quits quietly
[ "NAME", "dayplot_magic", ".", "py" ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/typeconv.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/typeconv.py#L25-L39
def string_array_to_list(a): """ Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list """ result = [] length = javabridge.get_env().get_array_length(a) wrapped = javabridge.get_env().get_object_array_elements(a) for i in range(length): result.append(javabridge.get_env().get_string(wrapped[i])) return result
[ "def", "string_array_to_list", "(", "a", ")", ":", "result", "=", "[", "]", "length", "=", "javabridge", ".", "get_env", "(", ")", ".", "get_array_length", "(", "a", ")", "wrapped", "=", "javabridge", ".", "get_env", "(", ")", ".", "get_object_array_elemen...
Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list
[ "Turns", "the", "Java", "string", "array", "into", "Python", "unicode", "string", "list", "." ]
python
train
dariosky/wfcli
wfcli/tossl.py
https://github.com/dariosky/wfcli/blob/87a9ed30dbd456f801135a55099f0541b0614ccb/wfcli/tossl.py#L76-L88
def get_affected_domains(self): """ Return a list of all affected domain and subdomains """ results = set() dotted_domain = ("." + self.domain) if self.domain else None for website in self.websites: for subdomain in website['subdomains']: if self.domain is None or subdomain == self.domain or \ (self.include_subdomains and subdomain.endswith(dotted_domain)): results.add(subdomain) # sort them by lenght so the shortest domain is the first results = sorted(list(results), key=lambda item: len(item)) return results
[ "def", "get_affected_domains", "(", "self", ")", ":", "results", "=", "set", "(", ")", "dotted_domain", "=", "(", "\".\"", "+", "self", ".", "domain", ")", "if", "self", ".", "domain", "else", "None", "for", "website", "in", "self", ".", "websites", ":...
Return a list of all affected domain and subdomains
[ "Return", "a", "list", "of", "all", "affected", "domain", "and", "subdomains" ]
python
train
agoragames/haigha
haigha/channel.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L214-L224
def dispatch(self, method_frame): ''' Dispatch a method. ''' klass = self._class_map.get(method_frame.class_id) if klass: klass.dispatch(method_frame) else: raise Channel.InvalidClass( "class %d is not supported on channel %d", method_frame.class_id, self.channel_id)
[ "def", "dispatch", "(", "self", ",", "method_frame", ")", ":", "klass", "=", "self", ".", "_class_map", ".", "get", "(", "method_frame", ".", "class_id", ")", "if", "klass", ":", "klass", ".", "dispatch", "(", "method_frame", ")", "else", ":", "raise", ...
Dispatch a method.
[ "Dispatch", "a", "method", "." ]
python
train
leonidessaguisagjr/unicodeutil
unicodeutil/hangulutil.py
https://github.com/leonidessaguisagjr/unicodeutil/blob/c25c882cf9cb38c123df49fad365be67e5818928/unicodeutil/hangulutil.py#L126-L167
def compose_hangul_syllable(jamo): """ Function for taking a tuple or list of Unicode scalar values representing Jamo and composing it into a Hangul syllable. If the values in the list or tuple passed in are not in the ranges of Jamo, a ValueError will be raised. The algorithm for doing the composition is described in the Unicode Standard, ch. 03, section 3.12, "Conjoining Jamo Behavior." Example: (U+1111, U+1171) -> U+D4CC (U+D4CC, U+11B6) -> U+D4DB (U+1111, U+1171, U+11B6) -> U+D4DB :param jamo: Tuple of list of Jamo to compose :return: Composed Hangul syllable """ fmt_str_invalid_sequence = "{0} does not represent a valid sequence of Jamo!" if len(jamo) == 3: l_part, v_part, t_part = jamo if not (l_part in range(0x1100, 0x1112 + 1) and v_part in range(0x1161, 0x1175 + 1) and t_part in range(0x11a8, 0x11c2 + 1)): raise ValueError(fmt_str_invalid_sequence.format(jamo)) l_index = l_part - L_BASE v_index = v_part - V_BASE t_index = t_part - T_BASE lv_index = l_index * N_COUNT + v_index * T_COUNT return S_BASE + lv_index + t_index elif len(jamo) == 2: if jamo[0] in range(0x1100, 0x1112 + 1) and jamo[1] in range(0x1161, 0x1175 + 1): l_part, v_part = jamo l_index = l_part - L_BASE v_index = v_part - V_BASE lv_index = l_index * N_COUNT + v_index * T_COUNT return S_BASE + lv_index elif _get_hangul_syllable_type(jamo[0]) == "LV" and jamo[1] in range(0x11a8, 0x11c2 + 1): lv_part, t_part = jamo t_index = t_part - T_BASE return lv_part + t_index else: raise ValueError(fmt_str_invalid_sequence.format(jamo)) else: raise ValueError(fmt_str_invalid_sequence.format(jamo))
[ "def", "compose_hangul_syllable", "(", "jamo", ")", ":", "fmt_str_invalid_sequence", "=", "\"{0} does not represent a valid sequence of Jamo!\"", "if", "len", "(", "jamo", ")", "==", "3", ":", "l_part", ",", "v_part", ",", "t_part", "=", "jamo", "if", "not", "(", ...
Function for taking a tuple or list of Unicode scalar values representing Jamo and composing it into a Hangul syllable. If the values in the list or tuple passed in are not in the ranges of Jamo, a ValueError will be raised. The algorithm for doing the composition is described in the Unicode Standard, ch. 03, section 3.12, "Conjoining Jamo Behavior." Example: (U+1111, U+1171) -> U+D4CC (U+D4CC, U+11B6) -> U+D4DB (U+1111, U+1171, U+11B6) -> U+D4DB :param jamo: Tuple of list of Jamo to compose :return: Composed Hangul syllable
[ "Function", "for", "taking", "a", "tuple", "or", "list", "of", "Unicode", "scalar", "values", "representing", "Jamo", "and", "composing", "it", "into", "a", "Hangul", "syllable", ".", "If", "the", "values", "in", "the", "list", "or", "tuple", "passed", "in...
python
train
jrmontag/STLDecompose
stldecompose/forecast_funcs.py
https://github.com/jrmontag/STLDecompose/blob/f53f89dab4b13618c1cf13f88a01e3e3dc8abdec/stldecompose/forecast_funcs.py#L61-L76
def drift(data, n=3, **kwargs): """The drift forecast for the next point is a linear extrapolation from the previous ``n`` points in the series. Args: data (np.array): Observed data, presumed to be ordered in time. n (int): period over which to calculate linear model for extrapolation Returns: float: a single-valued forecast for the next value in the series. """ yi = data[-n] yf = data[-1] slope = (yf - yi) / (n - 1) forecast = yf + slope return forecast
[ "def", "drift", "(", "data", ",", "n", "=", "3", ",", "*", "*", "kwargs", ")", ":", "yi", "=", "data", "[", "-", "n", "]", "yf", "=", "data", "[", "-", "1", "]", "slope", "=", "(", "yf", "-", "yi", ")", "/", "(", "n", "-", "1", ")", "...
The drift forecast for the next point is a linear extrapolation from the previous ``n`` points in the series. Args: data (np.array): Observed data, presumed to be ordered in time. n (int): period over which to calculate linear model for extrapolation Returns: float: a single-valued forecast for the next value in the series.
[ "The", "drift", "forecast", "for", "the", "next", "point", "is", "a", "linear", "extrapolation", "from", "the", "previous", "n", "points", "in", "the", "series", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/api/abstract.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/abstract.py#L78-L97
def get_block(self, parent, config='running_config'): """ Scans the config and returns a block of code Args: parent (str): The parent string to search the config for and return the block config (str): A text config string to be searched. Default is to search the running-config of the Node. Returns: A string object that represents the block from the config. If the parent string is not found, then this method will return None. """ try: parent = r'^%s$' % parent return self.node.section(parent, config=config) except TypeError: return None
[ "def", "get_block", "(", "self", ",", "parent", ",", "config", "=", "'running_config'", ")", ":", "try", ":", "parent", "=", "r'^%s$'", "%", "parent", "return", "self", ".", "node", ".", "section", "(", "parent", ",", "config", "=", "config", ")", "exc...
Scans the config and returns a block of code Args: parent (str): The parent string to search the config for and return the block config (str): A text config string to be searched. Default is to search the running-config of the Node. Returns: A string object that represents the block from the config. If the parent string is not found, then this method will return None.
[ "Scans", "the", "config", "and", "returns", "a", "block", "of", "code" ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L4089-L4093
def show(self, idx): """ Print the instruction """ print(self.get_name() + " " + self.get_output(idx), end=' ')
[ "def", "show", "(", "self", ",", "idx", ")", ":", "print", "(", "self", ".", "get_name", "(", ")", "+", "\" \"", "+", "self", ".", "get_output", "(", "idx", ")", ",", "end", "=", "' '", ")" ]
Print the instruction
[ "Print", "the", "instruction" ]
python
train
da4089/simplefix
simplefix/message.py
https://github.com/da4089/simplefix/blob/10f7f165a99a03467110bee69cc7c083c3531c68/simplefix/message.py#L384-L436
def append_tz_time_only_parts(self, tag, h, m, s=None, ms=None, us=None, offset=0, header=False): """Append a field with a TZTimeOnly value from components. :param tag: Integer or string FIX tag number. :param h: Hours, in range 0 to 23. :param m: Minutes, in range 0 to 59. :param s: Optional seconds, in range 0 to 59 (60 for leap second). :param ms: Optional milliseconds, in range 0 to 999. :param us: Optional microseconds, in range 0 to 999. :param offset: Minutes east of UTC, in range -1439 to +1439. :param header: Append to FIX header if True; default to body. Formats the TZTimeOnly value from its components. If `s`, `ms` or `us` are None, the precision is truncated at that point.""" ih = int(h) if ih < 0 or ih > 23: raise ValueError("Hour value `h` (%u) out of range " "0 to 23" % ih) im = int(m) if im < 0 or im > 59: raise ValueError("Minute value `m` (%u) out of range " "0 to 59" % im) v = "%02u:%02u" % (ih, im) if s is not None: isec = int(s) if isec < 0 or isec > 60: raise ValueError("Seconds value `s` (%u) out of range " "0 to 60" % isec) v += ":%02u" % isec if ms is not None: ims = int(ms) if ims < 0 or ims > 999: raise ValueError("Milliseconds value `ms` (%u) " "out of range 0 to 999" % ims) v += ".%03u" % ims if us is not None: ius = int(us) if ius < 0 or ius > 999: raise ValueError("Microseconds value `us` (%u) " "out of range 0 to 999" % ius) v += "%03u" % ius v += self._tz_offset_string(offset) return self.append_pair(tag, v, header=header)
[ "def", "append_tz_time_only_parts", "(", "self", ",", "tag", ",", "h", ",", "m", ",", "s", "=", "None", ",", "ms", "=", "None", ",", "us", "=", "None", ",", "offset", "=", "0", ",", "header", "=", "False", ")", ":", "ih", "=", "int", "(", "h", ...
Append a field with a TZTimeOnly value from components. :param tag: Integer or string FIX tag number. :param h: Hours, in range 0 to 23. :param m: Minutes, in range 0 to 59. :param s: Optional seconds, in range 0 to 59 (60 for leap second). :param ms: Optional milliseconds, in range 0 to 999. :param us: Optional microseconds, in range 0 to 999. :param offset: Minutes east of UTC, in range -1439 to +1439. :param header: Append to FIX header if True; default to body. Formats the TZTimeOnly value from its components. If `s`, `ms` or `us` are None, the precision is truncated at that point.
[ "Append", "a", "field", "with", "a", "TZTimeOnly", "value", "from", "components", "." ]
python
train
waqasbhatti/astrobase
astrobase/lcproc/awsrun.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/awsrun.py#L194-L225
def cache_clean_handler(min_age_hours=1): """This periodically cleans up the ~/.astrobase cache to save us from disk-space doom. Parameters ---------- min_age_hours : int Files older than this number of hours from the current time will be deleted. Returns ------- Nothing. """ # find the files to delete cmd = ( "find ~ec2-user/.astrobase -type f -mmin +{mmin} -exec rm -v '{{}}' \;" ) mmin = '%.1f' % (min_age_hours*60.0) cmd = cmd.format(mmin=mmin) try: proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) ndeleted = len(proc.stdout.decode().split('\n')) LOGWARNING('cache clean: %s files older than %s hours deleted' % (ndeleted, min_age_hours)) except Exception as e: LOGEXCEPTION('cache clean: could not delete old files')
[ "def", "cache_clean_handler", "(", "min_age_hours", "=", "1", ")", ":", "# find the files to delete", "cmd", "=", "(", "\"find ~ec2-user/.astrobase -type f -mmin +{mmin} -exec rm -v '{{}}' \\;\"", ")", "mmin", "=", "'%.1f'", "%", "(", "min_age_hours", "*", "60.0", ")", ...
This periodically cleans up the ~/.astrobase cache to save us from disk-space doom. Parameters ---------- min_age_hours : int Files older than this number of hours from the current time will be deleted. Returns ------- Nothing.
[ "This", "periodically", "cleans", "up", "the", "~", "/", ".", "astrobase", "cache", "to", "save", "us", "from", "disk", "-", "space", "doom", "." ]
python
valid
blockstack/blockstack-core
blockstack/lib/snv.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/snv.py#L58-L68
def get_bitcoind_client(): """ Connect to the bitcoind node """ bitcoind_opts = get_bitcoin_opts() bitcoind_host = bitcoind_opts['bitcoind_server'] bitcoind_port = bitcoind_opts['bitcoind_port'] bitcoind_user = bitcoind_opts['bitcoind_user'] bitcoind_passwd = bitcoind_opts['bitcoind_passwd'] return create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port)
[ "def", "get_bitcoind_client", "(", ")", ":", "bitcoind_opts", "=", "get_bitcoin_opts", "(", ")", "bitcoind_host", "=", "bitcoind_opts", "[", "'bitcoind_server'", "]", "bitcoind_port", "=", "bitcoind_opts", "[", "'bitcoind_port'", "]", "bitcoind_user", "=", "bitcoind_o...
Connect to the bitcoind node
[ "Connect", "to", "the", "bitcoind", "node" ]
python
train
tanghaibao/goatools
goatools/rpt/nts_xfrm.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/nts_xfrm.py#L15-L20
def get_set(self, fieldname): """Get all study items (e.g., geneids).""" set_items = set() for ntdata in self.nts: set_items |= getattr(ntdata, fieldname) return set_items
[ "def", "get_set", "(", "self", ",", "fieldname", ")", ":", "set_items", "=", "set", "(", ")", "for", "ntdata", "in", "self", ".", "nts", ":", "set_items", "|=", "getattr", "(", "ntdata", ",", "fieldname", ")", "return", "set_items" ]
Get all study items (e.g., geneids).
[ "Get", "all", "study", "items", "(", "e", ".", "g", ".", "geneids", ")", "." ]
python
train
casebeer/factual
factual/v2/requests.py
https://github.com/casebeer/factual/blob/f2795a8c9fd447c5d62887ae0f960481ce13be84/factual/v2/requests.py#L13-L15
def within(self, lat, lon, radius): '''Convenience method to apply a $loc/$within/$center filter. Radius is in meters.''' return self.filter(filter_helpers.within_(lat, lon, radius))
[ "def", "within", "(", "self", ",", "lat", ",", "lon", ",", "radius", ")", ":", "return", "self", ".", "filter", "(", "filter_helpers", ".", "within_", "(", "lat", ",", "lon", ",", "radius", ")", ")" ]
Convenience method to apply a $loc/$within/$center filter. Radius is in meters.
[ "Convenience", "method", "to", "apply", "a", "$loc", "/", "$within", "/", "$center", "filter", ".", "Radius", "is", "in", "meters", "." ]
python
train
tensorpack/tensorpack
examples/FasterRCNN/dataset.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/dataset.py#L49-L75
def print_coco_metrics(self, json_file): """ Args: json_file (str): path to the results json file in coco format Returns: dict: the evaluation metrics """ from pycocotools.cocoeval import COCOeval ret = {} cocoDt = self.coco.loadRes(json_file) cocoEval = COCOeval(self.coco, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large'] for k in range(6): ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k] json_obj = json.load(open(json_file)) if len(json_obj) > 0 and 'segmentation' in json_obj[0]: cocoEval = COCOeval(self.coco, cocoDt, 'segm') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() for k in range(6): ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k] return ret
[ "def", "print_coco_metrics", "(", "self", ",", "json_file", ")", ":", "from", "pycocotools", ".", "cocoeval", "import", "COCOeval", "ret", "=", "{", "}", "cocoDt", "=", "self", ".", "coco", ".", "loadRes", "(", "json_file", ")", "cocoEval", "=", "COCOeval"...
Args: json_file (str): path to the results json file in coco format Returns: dict: the evaluation metrics
[ "Args", ":", "json_file", "(", "str", ")", ":", "path", "to", "the", "results", "json", "file", "in", "coco", "format", "Returns", ":", "dict", ":", "the", "evaluation", "metrics" ]
python
train
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L342-L354
def set_line_text(self, line_nbr, new_text): """ Replace an entire line with ``new_text``. :param line_nbr: line number of the line to change. :param new_text: The replacement text. """ editor = self._editor text_cursor = self._move_cursor_to(line_nbr) text_cursor.select(text_cursor.LineUnderCursor) text_cursor.insertText(new_text) editor.setTextCursor(text_cursor)
[ "def", "set_line_text", "(", "self", ",", "line_nbr", ",", "new_text", ")", ":", "editor", "=", "self", ".", "_editor", "text_cursor", "=", "self", ".", "_move_cursor_to", "(", "line_nbr", ")", "text_cursor", ".", "select", "(", "text_cursor", ".", "LineUnde...
Replace an entire line with ``new_text``. :param line_nbr: line number of the line to change. :param new_text: The replacement text.
[ "Replace", "an", "entire", "line", "with", "new_text", "." ]
python
train
ubccr/pinky
pinky/perception/cycle.py
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/cycle.py#L77-L91
def set_aromatic(self): """set the cycle to be an aromatic ring""" #XXX FIX ME # this probably shouldn't be here for atom in self.atoms: atom.aromatic = 1 for bond in self.bonds: bond.aromatic = 1 bond.bondorder = 1.5 bond.bondtype = 4 bond.symbol = ":" bond.fixed = 1 self.aromatic = 1
[ "def", "set_aromatic", "(", "self", ")", ":", "#XXX FIX ME", "# this probably shouldn't be here", "for", "atom", "in", "self", ".", "atoms", ":", "atom", ".", "aromatic", "=", "1", "for", "bond", "in", "self", ".", "bonds", ":", "bond", ".", "aromatic", "=...
set the cycle to be an aromatic ring
[ "set", "the", "cycle", "to", "be", "an", "aromatic", "ring" ]
python
train
googleads/googleads-python-lib
examples/adwords/adwords_appengine_demo/handlers/api_handler.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/adwords_appengine_demo/handlers/api_handler.py#L150-L181
def GetAdGroups(self, client_customer_id, campaign_id): """Retrieves all AdGroups for the given campaign that haven't been removed. Args: client_customer_id: str Client Customer Id being used in API request. campaign_id: str id of the campaign for which to fetch ad groups. Returns: list List of AdGroup data objects. """ self.client.SetClientCustomerId(client_customer_id) selector = { 'fields': ['Id', 'Name', 'Status'], 'predicates': [ { 'field': 'CampaignId', 'operator': 'EQUALS', 'values': [campaign_id] }, { 'field': 'Status', 'operator': 'NOT_EQUALS', 'values': ['REMOVED'] } ] } adgroups = self.client.GetService('AdGroupService').get(selector) if int(adgroups['totalNumEntries']) > 0: return adgroups['entries'] else: return None
[ "def", "GetAdGroups", "(", "self", ",", "client_customer_id", ",", "campaign_id", ")", ":", "self", ".", "client", ".", "SetClientCustomerId", "(", "client_customer_id", ")", "selector", "=", "{", "'fields'", ":", "[", "'Id'", ",", "'Name'", ",", "'Status'", ...
Retrieves all AdGroups for the given campaign that haven't been removed. Args: client_customer_id: str Client Customer Id being used in API request. campaign_id: str id of the campaign for which to fetch ad groups. Returns: list List of AdGroup data objects.
[ "Retrieves", "all", "AdGroups", "for", "the", "given", "campaign", "that", "haven", "t", "been", "removed", "." ]
python
train
fitnr/convertdate
convertdate/french_republican.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/french_republican.py#L81-L95
def premier_da_la_annee(jd): '''Determine the year in the French revolutionary calendar in which a given Julian day falls. Returns Julian day number containing fall equinox (first day of the FR year)''' p = ephem.previous_fall_equinox(dublin.from_jd(jd)) previous = trunc(dublin.to_jd(p) - 0.5) + 0.5 if previous + 364 < jd: # test if current day is the equinox if the previous equinox was a long time ago n = ephem.next_fall_equinox(dublin.from_jd(jd)) nxt = trunc(dublin.to_jd(n) - 0.5) + 0.5 if nxt <= jd: return nxt return previous
[ "def", "premier_da_la_annee", "(", "jd", ")", ":", "p", "=", "ephem", ".", "previous_fall_equinox", "(", "dublin", ".", "from_jd", "(", "jd", ")", ")", "previous", "=", "trunc", "(", "dublin", ".", "to_jd", "(", "p", ")", "-", "0.5", ")", "+", "0.5",...
Determine the year in the French revolutionary calendar in which a given Julian day falls. Returns Julian day number containing fall equinox (first day of the FR year)
[ "Determine", "the", "year", "in", "the", "French", "revolutionary", "calendar", "in", "which", "a", "given", "Julian", "day", "falls", ".", "Returns", "Julian", "day", "number", "containing", "fall", "equinox", "(", "first", "day", "of", "the", "FR", "year",...
python
train
cole/aiosmtplib
src/aiosmtplib/esmtp.py
https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/esmtp.py#L189-L210
async def expn( self, address: str, timeout: DefaultNumType = _default ) -> SMTPResponse: """ Send an SMTP EXPN command, which expands a mailing list. Not many servers support this command. :raises SMTPResponseException: on unexpected server response code """ await self._ehlo_or_helo_if_needed() parsed_address = parse_address(address) async with self._command_lock: response = await self.execute_command( b"EXPN", parsed_address.encode("ascii"), timeout=timeout ) if response.code != SMTPStatus.completed: raise SMTPResponseException(response.code, response.message) return response
[ "async", "def", "expn", "(", "self", ",", "address", ":", "str", ",", "timeout", ":", "DefaultNumType", "=", "_default", ")", "->", "SMTPResponse", ":", "await", "self", ".", "_ehlo_or_helo_if_needed", "(", ")", "parsed_address", "=", "parse_address", "(", "...
Send an SMTP EXPN command, which expands a mailing list. Not many servers support this command. :raises SMTPResponseException: on unexpected server response code
[ "Send", "an", "SMTP", "EXPN", "command", "which", "expands", "a", "mailing", "list", ".", "Not", "many", "servers", "support", "this", "command", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L519-L528
def id_pools(self): """ Gets the IdPools API client. Returns: IdPools: """ if not self.__id_pools: self.__id_pools = IdPools(self.__connection) return self.__id_pools
[ "def", "id_pools", "(", "self", ")", ":", "if", "not", "self", ".", "__id_pools", ":", "self", ".", "__id_pools", "=", "IdPools", "(", "self", ".", "__connection", ")", "return", "self", ".", "__id_pools" ]
Gets the IdPools API client. Returns: IdPools:
[ "Gets", "the", "IdPools", "API", "client", "." ]
python
train
collectiveacuity/labPack
labpack/events/meetup.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/events/meetup.py#L1082-L1134
def get_member_brief(self, member_id=0): ''' a method to retrieve member profile info :param member_id: [optional] integer with member id from member profile :return: dictionary with member profile inside [json] key member_profile = self.objects.profile_brief.schema ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#get title = '%s.get_member_brief' % self.__class__.__name__ # validate inputs input_fields = { 'member_id': member_id } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = '%s/members/' % self.endpoint params = { 'member_id': 'self' } if member_id: params['member_id'] = member_id # send request response_details = self._get_request(url, params=params) # construct method output dictionary profile_details = { 'json': {} } for key, value in response_details.items(): if not key == 'json': profile_details[key] = value # parse response if response_details['json']: if 'results' in response_details['json'].keys(): if response_details['json']['results']: details = response_details['json']['results'][0] for key, value in details.items(): if key != 'topics': profile_details['json'][key] = value profile_details['json'] = self.objects.profile_brief.ingest(**profile_details['json']) return profile_details
[ "def", "get_member_brief", "(", "self", ",", "member_id", "=", "0", ")", ":", "# https://www.meetup.com/meetup_api/docs/members/:member_id/#get\r", "title", "=", "'%s.get_member_brief'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input_fields", ...
a method to retrieve member profile info :param member_id: [optional] integer with member id from member profile :return: dictionary with member profile inside [json] key member_profile = self.objects.profile_brief.schema
[ "a", "method", "to", "retrieve", "member", "profile", "info", ":", "param", "member_id", ":", "[", "optional", "]", "integer", "with", "member", "id", "from", "member", "profile", ":", "return", ":", "dictionary", "with", "member", "profile", "inside", "[", ...
python
train
marcomusy/vtkplotter
vtkplotter/shapes.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/shapes.py#L948-L980
def Plane(pos=(0, 0, 0), normal=(0, 0, 1), sx=1, sy=None, c="g", bc="darkgreen", alpha=1, texture=None): """ Draw a plane of size `sx` and `sy` oriented perpendicular to vector `normal` and so that it passes through point `pos`. |Plane| """ if sy is None: sy = sx ps = vtk.vtkPlaneSource() ps.SetResolution(1, 1) tri = vtk.vtkTriangleFilter() tri.SetInputConnection(ps.GetOutputPort()) tri.Update() poly = tri.GetOutput() axis = np.array(normal) / np.linalg.norm(normal) theta = np.arccos(axis[2]) phi = np.arctan2(axis[1], axis[0]) t = vtk.vtkTransform() t.PostMultiply() t.Scale(sx, sy, 1) t.RotateY(theta * 57.3) t.RotateZ(phi * 57.3) tf = vtk.vtkTransformPolyDataFilter() tf.SetInputData(poly) tf.SetTransform(t) tf.Update() pd = tf.GetOutput() actor = Actor(pd, c=c, bc=bc, alpha=alpha, texture=texture) actor.SetPosition(pos) settings.collectable_actors.append(actor) return actor
[ "def", "Plane", "(", "pos", "=", "(", "0", ",", "0", ",", "0", ")", ",", "normal", "=", "(", "0", ",", "0", ",", "1", ")", ",", "sx", "=", "1", ",", "sy", "=", "None", ",", "c", "=", "\"g\"", ",", "bc", "=", "\"darkgreen\"", ",", "alpha",...
Draw a plane of size `sx` and `sy` oriented perpendicular to vector `normal` and so that it passes through point `pos`. |Plane|
[ "Draw", "a", "plane", "of", "size", "sx", "and", "sy", "oriented", "perpendicular", "to", "vector", "normal", "and", "so", "that", "it", "passes", "through", "point", "pos", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xmultitagedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmultitagedit.py#L79-L97
def createEditor( self, parent, option, index ): """ Overloads the create editor method to assign the parent's completer to any line edit created. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None """ multi_tag = projexui.ancestor(self, XMultiTagEdit) edit = QLineEdit(parent) edit.setFrame(False) edit.setCompleter(multi_tag.completer()) edit.installEventFilter(multi_tag) return edit
[ "def", "createEditor", "(", "self", ",", "parent", ",", "option", ",", "index", ")", ":", "multi_tag", "=", "projexui", ".", "ancestor", "(", "self", ",", "XMultiTagEdit", ")", "edit", "=", "QLineEdit", "(", "parent", ")", "edit", ".", "setFrame", "(", ...
Overloads the create editor method to assign the parent's completer to any line edit created. :param parent | <QWidget> option | <QStyleOption> index | <QModelIndex> :return <QWidget> || None
[ "Overloads", "the", "create", "editor", "method", "to", "assign", "the", "parent", "s", "completer", "to", "any", "line", "edit", "created", ".", ":", "param", "parent", "|", "<QWidget", ">", "option", "|", "<QStyleOption", ">", "index", "|", "<QModelIndex",...
python
train
ForensicArtifacts/artifacts
artifacts/source_type.py
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/artifacts/source_type.py#L460-L478
def RegisterSourceType(cls, source_type_class): """Registers a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if source types is already set for the corresponding type indicator. """ if source_type_class.TYPE_INDICATOR in cls._source_type_classes: raise KeyError( 'Source type already set for type: {0:s}.'.format( source_type_class.TYPE_INDICATOR)) cls._source_type_classes[source_type_class.TYPE_INDICATOR] = ( source_type_class)
[ "def", "RegisterSourceType", "(", "cls", ",", "source_type_class", ")", ":", "if", "source_type_class", ".", "TYPE_INDICATOR", "in", "cls", ".", "_source_type_classes", ":", "raise", "KeyError", "(", "'Source type already set for type: {0:s}.'", ".", "format", "(", "s...
Registers a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if source types is already set for the corresponding type indicator.
[ "Registers", "a", "source", "type", "." ]
python
train
foremast/gogo-utils
src/gogoutils/generator.py
https://github.com/foremast/gogo-utils/blob/3909c2d26e49baa8ad68e6be40977d4370d7c1ca/src/gogoutils/generator.py#L161-L168
def archaius(self): """Generate archaius bucket path.""" bucket = self.format['s3_bucket'].format(**self.data) path = self.format['s3_bucket_path'].format(**self.data) archaius_name = self.format['s3_archaius_name'].format(**self.data) archaius = {'s3': archaius_name, 'bucket': bucket, 'path': path} return archaius
[ "def", "archaius", "(", "self", ")", ":", "bucket", "=", "self", ".", "format", "[", "'s3_bucket'", "]", ".", "format", "(", "*", "*", "self", ".", "data", ")", "path", "=", "self", ".", "format", "[", "'s3_bucket_path'", "]", ".", "format", "(", "...
Generate archaius bucket path.
[ "Generate", "archaius", "bucket", "path", "." ]
python
train
alerta/python-alerta-client
alertaclient/commands/cmd_signup.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_signup.py#L15-L30
def cli(obj, name, email, password, status, text): """Create new Basic Auth user.""" client = obj['client'] if not email: raise click.UsageError('Need "--email" to sign-up new user.') if not password: raise click.UsageError('Need "--password" to sign-up new user.') try: r = client.signup(name=name, email=email, password=password, status=status, attributes=None, text=text) except Exception as e: click.echo('ERROR: {}'.format(e)) sys.exit(1) if 'token' in r: click.echo('Signed Up.') else: raise AuthError
[ "def", "cli", "(", "obj", ",", "name", ",", "email", ",", "password", ",", "status", ",", "text", ")", ":", "client", "=", "obj", "[", "'client'", "]", "if", "not", "email", ":", "raise", "click", ".", "UsageError", "(", "'Need \"--email\" to sign-up new...
Create new Basic Auth user.
[ "Create", "new", "Basic", "Auth", "user", "." ]
python
train
PyCQA/pylint
pylint/checkers/variables.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/variables.py#L192-L232
def _fix_dot_imports(not_consumed): """ Try to fix imports with multiple dots, by returning a dictionary with the import names expanded. The function unflattens root imports, like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree' and 'xml.sax' respectively. """ # TODO: this should be improved in issue astroid #46 names = {} for name, stmts in not_consumed.items(): if any( isinstance(stmt, astroid.AssignName) and isinstance(stmt.assign_type(), astroid.AugAssign) for stmt in stmts ): continue for stmt in stmts: if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)): continue for imports in stmt.names: second_name = None import_module_name = imports[0] if import_module_name == "*": # In case of wildcard imports, # pick the name from inside the imported module. second_name = name else: name_matches_dotted_import = False if ( import_module_name.startswith(name) and import_module_name.find(".") > -1 ): name_matches_dotted_import = True if name_matches_dotted_import or name in imports: # Most likely something like 'xml.etree', # which will appear in the .locals as 'xml'. # Only pick the name if it wasn't consumed. second_name = import_module_name if second_name and second_name not in names: names[second_name] = stmt return sorted(names.items(), key=lambda a: a[1].fromlineno)
[ "def", "_fix_dot_imports", "(", "not_consumed", ")", ":", "# TODO: this should be improved in issue astroid #46", "names", "=", "{", "}", "for", "name", ",", "stmts", "in", "not_consumed", ".", "items", "(", ")", ":", "if", "any", "(", "isinstance", "(", "stmt",...
Try to fix imports with multiple dots, by returning a dictionary with the import names expanded. The function unflattens root imports, like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree' and 'xml.sax' respectively.
[ "Try", "to", "fix", "imports", "with", "multiple", "dots", "by", "returning", "a", "dictionary", "with", "the", "import", "names", "expanded", ".", "The", "function", "unflattens", "root", "imports", "like", "xml", "(", "when", "we", "have", "both", "xml", ...
python
test
tensorpack/tensorpack
examples/FasterRCNN/eval.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/eval.py#L108-L146
def predict_dataflow(df, model_func, tqdm_bar=None): """ Args: df: a DataFlow which produces (image, image_id) model_func: a callable from the TF model. It takes image and returns (boxes, probs, labels, [masks]) tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None, will create a new one. Returns: list of dict, in the format used by `DetectionDataset.eval_or_save_inference_results` """ df.reset_state() all_results = [] with ExitStack() as stack: # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323 if tqdm_bar is None: tqdm_bar = stack.enter_context(get_tqdm(total=df.size())) for img, img_id in df: results = predict_image(img, model_func) for r in results: # int()/float() to make it json-serializable res = { 'image_id': img_id, 'category_id': int(r.class_id), 'bbox': [round(float(x), 4) for x in r.box], 'score': round(float(r.score), 4), } # also append segmentation to results if r.mask is not None: rle = cocomask.encode( np.array(r.mask[:, :, None], order='F'))[0] rle['counts'] = rle['counts'].decode('ascii') res['segmentation'] = rle all_results.append(res) tqdm_bar.update(1) return all_results
[ "def", "predict_dataflow", "(", "df", ",", "model_func", ",", "tqdm_bar", "=", "None", ")", ":", "df", ".", "reset_state", "(", ")", "all_results", "=", "[", "]", "with", "ExitStack", "(", ")", "as", "stack", ":", "# tqdm is not quite thread-safe: https://gith...
Args: df: a DataFlow which produces (image, image_id) model_func: a callable from the TF model. It takes image and returns (boxes, probs, labels, [masks]) tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None, will create a new one. Returns: list of dict, in the format used by `DetectionDataset.eval_or_save_inference_results`
[ "Args", ":", "df", ":", "a", "DataFlow", "which", "produces", "(", "image", "image_id", ")", "model_func", ":", "a", "callable", "from", "the", "TF", "model", ".", "It", "takes", "image", "and", "returns", "(", "boxes", "probs", "labels", "[", "masks", ...
python
train
opendatateam/udata
udata/frontend/csv.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/csv.py#L114-L117
def header(self): '''Generate the CSV header row''' return (super(NestedAdapter, self).header() + [name for name, getter in self.get_nested_fields()])
[ "def", "header", "(", "self", ")", ":", "return", "(", "super", "(", "NestedAdapter", ",", "self", ")", ".", "header", "(", ")", "+", "[", "name", "for", "name", ",", "getter", "in", "self", ".", "get_nested_fields", "(", ")", "]", ")" ]
Generate the CSV header row
[ "Generate", "the", "CSV", "header", "row" ]
python
train
mbr/flask-kvsession
flask_kvsession/__init__.py
https://github.com/mbr/flask-kvsession/blob/83238b74d4e4d2ffbdfd65c1c0a00ceb4bdfd9fa/flask_kvsession/__init__.py#L60-L67
def unserialize(cls, string): """Unserializes from a string. :param string: A string created by :meth:`serialize`. """ id_s, created_s = string.split('_') return cls(int(id_s, 16), datetime.utcfromtimestamp(int(created_s, 16)))
[ "def", "unserialize", "(", "cls", ",", "string", ")", ":", "id_s", ",", "created_s", "=", "string", ".", "split", "(", "'_'", ")", "return", "cls", "(", "int", "(", "id_s", ",", "16", ")", ",", "datetime", ".", "utcfromtimestamp", "(", "int", "(", ...
Unserializes from a string. :param string: A string created by :meth:`serialize`.
[ "Unserializes", "from", "a", "string", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L9951-L9962
def mount_status_send(self, target_system, target_component, pointing_a, pointing_b, pointing_c, force_mavlink1=False): ''' Message with some status from APM to GCS about camera or antenna mount target_system : System ID (uint8_t) target_component : Component ID (uint8_t) pointing_a : pitch(deg*100) (int32_t) pointing_b : roll(deg*100) (int32_t) pointing_c : yaw(deg*100) (int32_t) ''' return self.send(self.mount_status_encode(target_system, target_component, pointing_a, pointing_b, pointing_c), force_mavlink1=force_mavlink1)
[ "def", "mount_status_send", "(", "self", ",", "target_system", ",", "target_component", ",", "pointing_a", ",", "pointing_b", ",", "pointing_c", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "mount_status_encode"...
Message with some status from APM to GCS about camera or antenna mount target_system : System ID (uint8_t) target_component : Component ID (uint8_t) pointing_a : pitch(deg*100) (int32_t) pointing_b : roll(deg*100) (int32_t) pointing_c : yaw(deg*100) (int32_t)
[ "Message", "with", "some", "status", "from", "APM", "to", "GCS", "about", "camera", "or", "antenna", "mount" ]
python
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py#L231-L339
def create_table( self, parent, table_id, table, initial_splits=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> >>> # TODO: Initialize `table_id`: >>> table_id = '' >>> >>> # TODO: Initialize `table`: >>> table = {} >>> >>> response = client.create_table(parent, table_id, table) Args: parent (str): The unique name of the instance in which to create the table. Values are of the form ``projects/<project>/instances/<instance>``. table_id (str): The name by which the new table should be referred to within the parent instance, e.g., ``foobar`` rather than ``<parent>/tables/foobar``. table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Table` initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, ``s1`` and ``s2``, three tablets will be created, spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - initial\_split\_keys := ``["apple", "customer_1", "customer_2", "other"]`` - Key assignment: - Tablet 1 ``[, apple) => {"a"}.`` - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - Tablet 5 ``[other, ) => {"other", "zz"}.`` If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Split` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_table" not in self._inner_api_calls: self._inner_api_calls[ "create_table" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_table, default_retry=self._method_configs["CreateTable"].retry, default_timeout=self._method_configs["CreateTable"].timeout, client_info=self._client_info, ) request = bigtable_table_admin_pb2.CreateTableRequest( parent=parent, table_id=table_id, table=table, initial_splits=initial_splits ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_table"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "create_table", "(", "self", ",", "parent", ",", "table_id", ",", "table", ",", "initial_splits", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_c...
Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> >>> # TODO: Initialize `table_id`: >>> table_id = '' >>> >>> # TODO: Initialize `table`: >>> table = {} >>> >>> response = client.create_table(parent, table_id, table) Args: parent (str): The unique name of the instance in which to create the table. Values are of the form ``projects/<project>/instances/<instance>``. table_id (str): The name by which the new table should be referred to within the parent instance, e.g., ``foobar`` rather than ``<parent>/tables/foobar``. table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Table` initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, ``s1`` and ``s2``, three tablets will be created, spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - initial\_split\_keys := ``["apple", "customer_1", "customer_2", "other"]`` - Key assignment: - Tablet 1 ``[, apple) => {"a"}.`` - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - Tablet 5 ``[other, ) => {"other", "zz"}.`` If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Split` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Creates", "a", "new", "table", "in", "the", "specified", "instance", ".", "The", "table", "can", "be", "created", "with", "a", "full", "set", "of", "initial", "column", "families", "specified", "in", "the", "request", "." ]
python
train
inasafe/inasafe
safe/gui/tools/wizard/step_fc20_hazlayer_from_canvas.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc20_hazlayer_from_canvas.py#L72-L90
def selected_canvas_hazlayer(self): """Obtain the canvas layer selected by user. :returns: The currently selected map layer in the list. :rtype: QgsMapLayer """ if self.lstCanvasHazLayers.selectedItems(): item = self.lstCanvasHazLayers.currentItem() else: return None try: layer_id = item.data(Qt.UserRole) except (AttributeError, NameError): layer_id = None # noinspection PyArgumentList layer = QgsProject.instance().mapLayer(layer_id) return layer
[ "def", "selected_canvas_hazlayer", "(", "self", ")", ":", "if", "self", ".", "lstCanvasHazLayers", ".", "selectedItems", "(", ")", ":", "item", "=", "self", ".", "lstCanvasHazLayers", ".", "currentItem", "(", ")", "else", ":", "return", "None", "try", ":", ...
Obtain the canvas layer selected by user. :returns: The currently selected map layer in the list. :rtype: QgsMapLayer
[ "Obtain", "the", "canvas", "layer", "selected", "by", "user", "." ]
python
train
hubo1016/vlcp
vlcp/utils/connector.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/connector.py#L474-L482
async def run_async_task(self, container, asynctask, newthread = True): "Run asynctask(sender) in task pool, call sender(events) to send customized events, return result" e = TaskEvent(self, async_task = asynctask, newthread = newthread) await container.wait_for_send(e) ev = await TaskDoneEvent.createMatcher(e) if hasattr(ev, 'exception'): raise ev.exception else: return ev.result
[ "async", "def", "run_async_task", "(", "self", ",", "container", ",", "asynctask", ",", "newthread", "=", "True", ")", ":", "e", "=", "TaskEvent", "(", "self", ",", "async_task", "=", "asynctask", ",", "newthread", "=", "newthread", ")", "await", "containe...
Run asynctask(sender) in task pool, call sender(events) to send customized events, return result
[ "Run", "asynctask", "(", "sender", ")", "in", "task", "pool", "call", "sender", "(", "events", ")", "to", "send", "customized", "events", "return", "result" ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_rep.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_rep.py#L105-L120
def get_nodes(self, node_type=""): """ Gets all nodes of requested type. If no type is provided by user, all nodes in BPMN diagram graph are returned. Returns a dictionary, where key is an ID of node, value is a dictionary of all node attributes. :param node_type: string with valid BPMN XML tag name (e.g. 'task', 'sequenceFlow'). """ tmp_nodes = self.diagram_graph.nodes(True) if node_type == "": return tmp_nodes else: nodes = [] for node in tmp_nodes: if node[1][consts.Consts.type] == node_type: nodes.append(node) return nodes
[ "def", "get_nodes", "(", "self", ",", "node_type", "=", "\"\"", ")", ":", "tmp_nodes", "=", "self", ".", "diagram_graph", ".", "nodes", "(", "True", ")", "if", "node_type", "==", "\"\"", ":", "return", "tmp_nodes", "else", ":", "nodes", "=", "[", "]", ...
Gets all nodes of requested type. If no type is provided by user, all nodes in BPMN diagram graph are returned. Returns a dictionary, where key is an ID of node, value is a dictionary of all node attributes. :param node_type: string with valid BPMN XML tag name (e.g. 'task', 'sequenceFlow').
[ "Gets", "all", "nodes", "of", "requested", "type", ".", "If", "no", "type", "is", "provided", "by", "user", "all", "nodes", "in", "BPMN", "diagram", "graph", "are", "returned", ".", "Returns", "a", "dictionary", "where", "key", "is", "an", "ID", "of", ...
python
train
uber/rides-python-sdk
uber_rides/request.py
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/request.py#L154-L190
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises UberIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type if auth_session.server_token: token = auth_session.server_token else: token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise UberIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), 'X-Uber-User-Agent': 'Python Rides SDK v{}'.format(LIB_VERSION), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
[ "def", "_build_headers", "(", "self", ",", "method", ",", "auth_session", ")", ":", "token_type", "=", "auth_session", ".", "token_type", "if", "auth_session", ".", "server_token", ":", "token", "=", "auth_session", ".", "server_token", "else", ":", "token", "...
Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises UberIllegalState (ApiError) Raised if headers are invalid.
[ "Create", "headers", "for", "the", "request", "." ]
python
train
bububa/pyTOP
pyTOP/packages/requests/packages/urllib3/_collections.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/packages/urllib3/_collections.py#L50-L56
def _invalidate_entry(self, key): "If exists: Invalidate old entry and return it." old_entry = self.access_lookup.get(key) if old_entry: old_entry.is_valid = False return old_entry
[ "def", "_invalidate_entry", "(", "self", ",", "key", ")", ":", "old_entry", "=", "self", ".", "access_lookup", ".", "get", "(", "key", ")", "if", "old_entry", ":", "old_entry", ".", "is_valid", "=", "False", "return", "old_entry" ]
If exists: Invalidate old entry and return it.
[ "If", "exists", ":", "Invalidate", "old", "entry", "and", "return", "it", "." ]
python
train
SuperCowPowers/workbench
workbench/workers/pe_peid.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/pe_peid.py#L31-L43
def execute(self, input_data): ''' Execute the PEIDWorker ''' raw_bytes = input_data['sample']['raw_bytes'] # Have the PE File module process the file try: pefile_handle = pefile.PE(data=raw_bytes, fast_load=False) except (AttributeError, pefile.PEFormatError), error: return {'error': str(error), 'match_list': []} # Now get information from PEID module peid_match = self.peid_features(pefile_handle) return {'match_list': peid_match}
[ "def", "execute", "(", "self", ",", "input_data", ")", ":", "raw_bytes", "=", "input_data", "[", "'sample'", "]", "[", "'raw_bytes'", "]", "# Have the PE File module process the file", "try", ":", "pefile_handle", "=", "pefile", ".", "PE", "(", "data", "=", "r...
Execute the PEIDWorker
[ "Execute", "the", "PEIDWorker" ]
python
train
eandersson/amqpstorm
amqpstorm/management/connection.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/connection.py#L32-L52
def close(self, connection, reason='Closed via management api'): """Close Connection. :param str connection: Connection name :param str reason: Reason for closing connection. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: None """ close_payload = json.dumps({ 'name': connection, 'reason': reason }) connection = quote(connection, '') return self.http_client.delete(API_CONNECTION % connection, payload=close_payload, headers={ 'X-Reason': reason })
[ "def", "close", "(", "self", ",", "connection", ",", "reason", "=", "'Closed via management api'", ")", ":", "close_payload", "=", "json", ".", "dumps", "(", "{", "'name'", ":", "connection", ",", "'reason'", ":", "reason", "}", ")", "connection", "=", "qu...
Close Connection. :param str connection: Connection name :param str reason: Reason for closing connection. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: None
[ "Close", "Connection", "." ]
python
train
CI-WATER/gsshapy
gsshapy/modeling/framework.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L29-L37
def replace_file(from_file, to_file): """ Replaces to_file with from_file """ try: os.remove(to_file) except OSError: pass copy(from_file, to_file)
[ "def", "replace_file", "(", "from_file", ",", "to_file", ")", ":", "try", ":", "os", ".", "remove", "(", "to_file", ")", "except", "OSError", ":", "pass", "copy", "(", "from_file", ",", "to_file", ")" ]
Replaces to_file with from_file
[ "Replaces", "to_file", "with", "from_file" ]
python
train
aviaryan/python-gsearch
gsearch/googlesearch.py
https://github.com/aviaryan/python-gsearch/blob/fba2f42fbf4c2672b72d05b53120adcb25ba8b69/gsearch/googlesearch.py#L101-L130
def search(query, num_results=10): """ searches google for :query and returns a list of tuples of the format (name, url) """ data = download(query, num_results) results = re.findall(r'\<h3.*?\>.*?\<\/h3\>', data, re.IGNORECASE) if results is None or len(results) == 0: print('No results where found. Did the rate limit exceed?') return [] # search has results links = [] for r in results: mtch = re.match(r'.*?a\s*?href=\"(.*?)\".*?\>(.*?)\<\/a\>.*$', r, flags=re.IGNORECASE) if mtch is None: continue # parse url url = mtch.group(1) # clean url https://github.com/aviaryan/pythons/blob/master/Others/GoogleSearchLinks.py url = re.sub(r'^.*?=', '', url, count=1) # prefixed over urls \url=q? url = re.sub(r'\&amp.*$', '', url, count=1) # suffixed google things url = unquote(url) # url = re.sub(r'\%.*$', '', url) # NOT SAFE, causes issues with Youtube watch url # parse name name = prune_html(mtch.group(2)) name = convert_unicode(name) # append to links if is_url(url): # can be google images result links.append((name, url)) return links
[ "def", "search", "(", "query", ",", "num_results", "=", "10", ")", ":", "data", "=", "download", "(", "query", ",", "num_results", ")", "results", "=", "re", ".", "findall", "(", "r'\\<h3.*?\\>.*?\\<\\/h3\\>'", ",", "data", ",", "re", ".", "IGNORECASE", ...
searches google for :query and returns a list of tuples of the format (name, url)
[ "searches", "google", "for", ":", "query", "and", "returns", "a", "list", "of", "tuples", "of", "the", "format", "(", "name", "url", ")" ]
python
train
Esri/ArcREST
src/arcrest/ags/_uploads.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_uploads.py#L62-L91
def upload(self, filePath, description=None): """ This operation uploads an item to the server. Each uploaded item is identified by a unique itemID. Since this request uploads a file, it must be a multi-part request as per IETF RFC1867. All uploaded items are subjected to the deletion rules set on the upload directory by the administrator of the server. Additionally, the administrator can explicitly delete an item as each uploaded item shows up in the list of all the uploaded items in Site Directory. Users can provide arguments to the upload operation as query parameters. The parameter details are provided in the parameters listed below. Inputs: filePath - The file to be uploaded. description - An optional description for the uploaded item. """ params = { "f" : "json"} if description is not None: params['description'] = str(description) url = self._url + "/upload" files = {} files['file'] = filePath return self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "upload", "(", "self", ",", "filePath", ",", "description", "=", "None", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "if", "description", "is", "not", "None", ":", "params", "[", "'description'", "]", "=", "str", "(", "descripti...
This operation uploads an item to the server. Each uploaded item is identified by a unique itemID. Since this request uploads a file, it must be a multi-part request as per IETF RFC1867. All uploaded items are subjected to the deletion rules set on the upload directory by the administrator of the server. Additionally, the administrator can explicitly delete an item as each uploaded item shows up in the list of all the uploaded items in Site Directory. Users can provide arguments to the upload operation as query parameters. The parameter details are provided in the parameters listed below. Inputs: filePath - The file to be uploaded. description - An optional description for the uploaded item.
[ "This", "operation", "uploads", "an", "item", "to", "the", "server", ".", "Each", "uploaded", "item", "is", "identified", "by", "a", "unique", "itemID", ".", "Since", "this", "request", "uploads", "a", "file", "it", "must", "be", "a", "multi", "-", "part...
python
train
Kyria/EsiPy
esipy/security.py
https://github.com/Kyria/EsiPy/blob/06407a0218a126678f80d8a7e8a67b9729327865/esipy/security.py#L275-L292
def update_token(self, response_json, **kwargs): """ Update access_token, refresh_token and token_expiry from the response body. The response must be converted to a json object before being passed as a parameter :param response_json: the response body to use. :param token_identifier: the user identifier for the token """ self.token_identifier = kwargs.pop( 'token_identifier', self.token_identifier ) self.access_token = response_json['access_token'] self.token_expiry = int(time.time()) + response_json['expires_in'] if 'refresh_token' in response_json: self.refresh_token = response_json['refresh_token']
[ "def", "update_token", "(", "self", ",", "response_json", ",", "*", "*", "kwargs", ")", ":", "self", ".", "token_identifier", "=", "kwargs", ".", "pop", "(", "'token_identifier'", ",", "self", ".", "token_identifier", ")", "self", ".", "access_token", "=", ...
Update access_token, refresh_token and token_expiry from the response body. The response must be converted to a json object before being passed as a parameter :param response_json: the response body to use. :param token_identifier: the user identifier for the token
[ "Update", "access_token", "refresh_token", "and", "token_expiry", "from", "the", "response", "body", ".", "The", "response", "must", "be", "converted", "to", "a", "json", "object", "before", "being", "passed", "as", "a", "parameter", ":", "param", "response_json...
python
train
Parsl/parsl
parsl/providers/aws/aws.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/providers/aws/aws.py#L434-L502
def spin_up_instance(self, command, job_name): """Start an instance in the VPC in the first available subnet. N instances will be started if nodes_per_block > 1. Not supported. We only do 1 node per block. Parameters ---------- command : str Command string to execute on the node. job_name : str Name associated with the instances. """ command = Template(template_string).substitute(jobname=job_name, user_script=command, linger=str(self.linger).lower(), worker_init=self.worker_init) instance_type = self.instance_type subnet = self.sn_ids[0] ami_id = self.image_id total_instances = len(self.instances) if float(self.spot_max_bid) > 0: spot_options = { 'MarketType': 'spot', 'SpotOptions': { 'MaxPrice': str(self.spot_max_bid), 'SpotInstanceType': 'one-time', 'InstanceInterruptionBehavior': 'terminate' } } else: spot_options = {} if total_instances > self.max_nodes: logger.warn("Exceeded instance limit ({}). Cannot continue\n".format(self.max_nodes)) return [None] try: tag_spec = [{"ResourceType": "instance", "Tags": [{'Key': 'Name', 'Value': job_name}]}] instance = self.ec2.create_instances( MinCount=1, MaxCount=1, InstanceType=instance_type, ImageId=ami_id, KeyName=self.key_name, SubnetId=subnet, SecurityGroupIds=[self.sg_id], TagSpecifications=tag_spec, InstanceMarketOptions=spot_options, InstanceInitiatedShutdownBehavior='terminate', IamInstanceProfile={'Arn': self.iam_instance_profile_arn}, UserData=command ) except ClientError as e: print(e) logger.error(e.response) return [None] except Exception as e: logger.error("Request for EC2 resources failed : {0}".format(e)) return [None] self.instances.append(instance[0].id) logger.info( "Started up 1 instance {} . Instance type:{}".format(instance[0].id, instance_type) ) return instance
[ "def", "spin_up_instance", "(", "self", ",", "command", ",", "job_name", ")", ":", "command", "=", "Template", "(", "template_string", ")", ".", "substitute", "(", "jobname", "=", "job_name", ",", "user_script", "=", "command", ",", "linger", "=", "str", "...
Start an instance in the VPC in the first available subnet. N instances will be started if nodes_per_block > 1. Not supported. We only do 1 node per block. Parameters ---------- command : str Command string to execute on the node. job_name : str Name associated with the instances.
[ "Start", "an", "instance", "in", "the", "VPC", "in", "the", "first", "available", "subnet", "." ]
python
valid
saltstack/salt
salt/states/boto_secgroup.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_secgroup.py#L666-L761
def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' helper function to validate tags are correct ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, keyid=keyid, profile=profile, vpc_id=vpc_id, vpc_name=vpc_name) if not sg: ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name) ret['result'] = False return ret tags_to_add = tags tags_to_update = {} tags_to_remove = [] if sg.get('tags'): for existing_tag in sg['tags']: if existing_tag not in tags: if existing_tag not in tags_to_remove: tags_to_remove.append(existing_tag) else: if tags[existing_tag] != sg['tags'][existing_tag]: tags_to_update[existing_tag] = tags[existing_tag] tags_to_add.pop(existing_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: temp_ret = __salt__['boto_secgroup.delete_tags'](tags_to_remove, name=name, group_id=None, vpc_name=vpc_name, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile) if not temp_ret: ret['result'] = False ret['comment'] = ' '.join([ ret['comment'], 'Error attempting to delete tags {0}.'.format(tags_to_remove) ]) return ret if 'old' not in ret['changes']: ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for rem_tag in tags_to_remove: ret['changes']['old']['tags'][rem_tag] = sg['tags'][rem_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: all_tag_changes = dictupdate.update(tags_to_add, tags_to_update) temp_ret = __salt__['boto_secgroup.set_tags'](all_tag_changes, name=name, group_id=None, vpc_name=vpc_name, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile) if not temp_ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in sg: if sg['tags']: if tag in sg['tags']: ret['changes']['old']['tags'][tag] = sg['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: ret['comment'] = ' '.join([ret['comment'], 'Tags are already set.']) return ret
[ "def", "_tags_present", "(", "name", ",", "tags", ",", "vpc_id", "=", "None", ",", "vpc_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'...
helper function to validate tags are correct
[ "helper", "function", "to", "validate", "tags", "are", "correct" ]
python
train
sdispater/orator
orator/orm/factory.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/factory.py#L98-L111
def register_as(self, klass, name, callback): """ Register a class with a function. :param klass: The class :type klass: class :param callback: The callable :type callback: callable :param name: The short name :type name: str """ return self.register(klass, callback, name)
[ "def", "register_as", "(", "self", ",", "klass", ",", "name", ",", "callback", ")", ":", "return", "self", ".", "register", "(", "klass", ",", "callback", ",", "name", ")" ]
Register a class with a function. :param klass: The class :type klass: class :param callback: The callable :type callback: callable :param name: The short name :type name: str
[ "Register", "a", "class", "with", "a", "function", "." ]
python
train
Clinical-Genomics/trailblazer
trailblazer/cli/clean.py
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/clean.py#L14-L29
def clean(context, days_ago, yes): """Clean up files from "old" analyses runs.""" number_of_days_ago = dt.datetime.now() - dt.timedelta(days=days_ago) analyses = context.obj['store'].analyses( status='completed', before=number_of_days_ago, deleted=False, ) for analysis_obj in analyses: LOG.debug(f"checking analysis: {analysis_obj.family} ({analysis_obj.id})") latest_analysis = context.obj['store'].analyses(family=analysis_obj.family).first() if analysis_obj != latest_analysis: print(click.style(f"{analysis_obj.family}: family has been re-started", fg='yellow')) else: print(f"delete analysis: {analysis_obj.family} ({analysis_obj.id})") context.invoke(delete, analysis_id=analysis_obj.id, yes=yes)
[ "def", "clean", "(", "context", ",", "days_ago", ",", "yes", ")", ":", "number_of_days_ago", "=", "dt", ".", "datetime", ".", "now", "(", ")", "-", "dt", ".", "timedelta", "(", "days", "=", "days_ago", ")", "analyses", "=", "context", ".", "obj", "["...
Clean up files from "old" analyses runs.
[ "Clean", "up", "files", "from", "old", "analyses", "runs", "." ]
python
train
ivanyu/idx2numpy
idx2numpy/converters.py
https://github.com/ivanyu/idx2numpy/blob/9b88698314973226212181d1747dfad6c6974e51/idx2numpy/converters.py#L142-L149
def convert_to_string(ndarr): """ Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and returns it. """ with contextlib.closing(BytesIO()) as bytesio: _internal_write(bytesio, ndarr) return bytesio.getvalue()
[ "def", "convert_to_string", "(", "ndarr", ")", ":", "with", "contextlib", ".", "closing", "(", "BytesIO", "(", ")", ")", "as", "bytesio", ":", "_internal_write", "(", "bytesio", ",", "ndarr", ")", "return", "bytesio", ".", "getvalue", "(", ")" ]
Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and returns it.
[ "Writes", "the", "contents", "of", "the", "numpy", ".", "ndarray", "ndarr", "to", "bytes", "in", "IDX", "format", "and", "returns", "it", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/wcs_functions.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wcs_functions.py#L170-L186
def create_CD(orient, scale, cx=None, cy=None): """ Create a (un?)distorted CD matrix from the basic inputs. The 'cx' and 'cy' parameters, if given, provide the X and Y coefficients of the distortion as returned by reading the IDCTAB. Only the first 2 elements are used and should correspond to the 'OC[X/Y]10' and 'OC[X/Y]11' terms in that order as read from the expanded SIP headers. The units of 'scale' should be 'arcseconds/pixel' of the reference pixel. The value of 'orient' should be the absolute orientation on the sky of the reference pixel. """ cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]]) rotmat = fileutil.buildRotMatrix(orient)*scale/3600. new_cd = np.dot(rotmat,cxymat) return new_cd
[ "def", "create_CD", "(", "orient", ",", "scale", ",", "cx", "=", "None", ",", "cy", "=", "None", ")", ":", "cxymat", "=", "np", ".", "array", "(", "[", "[", "cx", "[", "1", "]", ",", "cx", "[", "0", "]", "]", ",", "[", "cy", "[", "1", "]"...
Create a (un?)distorted CD matrix from the basic inputs. The 'cx' and 'cy' parameters, if given, provide the X and Y coefficients of the distortion as returned by reading the IDCTAB. Only the first 2 elements are used and should correspond to the 'OC[X/Y]10' and 'OC[X/Y]11' terms in that order as read from the expanded SIP headers. The units of 'scale' should be 'arcseconds/pixel' of the reference pixel. The value of 'orient' should be the absolute orientation on the sky of the reference pixel.
[ "Create", "a", "(", "un?", ")", "distorted", "CD", "matrix", "from", "the", "basic", "inputs", "." ]
python
train
materials-data-facility/toolbox
mdf_toolbox/toolbox.py
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L257-L361
def confidential_login(credentials=None, client_id=None, client_secret=None, services=None, make_clients=True, token_dir=DEFAULT_CRED_PATH): """Log in to Globus services as a confidential client (a client with its own login information). Arguments: credentials (str or dict): A string filename, string JSON, or dictionary with credential and config information. By default, uses the ``DEFAULT_CRED_FILENAME`` and token_dir. Contains ``client_id``, ``client_secret``, and ``services`` as defined below. client_id (str): The ID of the client. client_secret (str): The client's secret for authentication. services (list of str): Services to authenticate with. make_clients (bool): If ``True``, will make and return appropriate clients with generated tokens. If ``False``, will only return authorizers. **Default**: ``True``. token_dir (str): The path to the directory to save tokens in and look for credentials by default. **Default**: ``DEFAULT_CRED_PATH``. Returns: dict: The clients and authorizers requested, indexed by service name. """ DEFAULT_CRED_FILENAME = "confidential_globus_login.json" # Read credentials if supplied if credentials: if type(credentials) is str: try: with open(credentials) as cred_file: creds = json.load(cred_file) except IOError: try: creds = json.loads(credentials) except ValueError: raise ValueError("Credentials unreadable or missing") elif type(credentials) is dict: creds = credentials else: try: with open(os.path.join(os.getcwd(), DEFAULT_CRED_FILENAME)) as cred_file: creds = json.load(cred_file) except IOError: try: with open(os.path.join(token_dir, DEFAULT_CRED_FILENAME)) as cred_file: creds = json.load(cred_file) except IOError: raise ValueError("Credentials/configuration must be passed as a " "filename string, JSON string, or dictionary, or provided " "in '{}' or '{}'.".format(DEFAULT_CRED_FILENAME, token_dir)) client_id = creds.get("client_id") client_secret = creds.get("client_secret") services = creds.get("services", services) if not client_id or not client_secret: raise ValueError("A client_id and client_secret are required.") if not services: services = [] elif isinstance(services, str): services = [services] conf_client = globus_sdk.ConfidentialAppAuthClient(client_id, client_secret) servs = [] for serv in services: serv = serv.lower().strip() if type(serv) is str: servs += serv.split(" ") else: servs += list(serv) # Translate services into scopes as possible scopes = [KNOWN_SCOPES.get(sc, sc) for sc in servs] # Make authorizers with every returned token all_authorizers = {} for scope in scopes: # TODO: Allow non-CC authorizers? try: all_authorizers[scope] = globus_sdk.ClientCredentialsAuthorizer(conf_client, scope) except Exception as e: print("Error: Cannot create authorizer for scope '{}' ({})".format(scope, str(e))) returnables = {} # Populate clients and named services # Only translate back services - if user provides scope directly, don't translate back # ex. transfer => urn:transfer.globus.org:all => transfer, # but urn:transfer.globus.org:all !=> transfer for service in servs: token_key = KNOWN_SCOPES.get(service) # If the .by_resource_server key (token key) for the service was returned if token_key in all_authorizers.keys(): # If there is an applicable client (all clients have known token key) # Pop from all_authorizers to remove from final return value if make_clients and KNOWN_CLIENTS.get(service): try: returnables[service] = KNOWN_CLIENTS[service]( authorizer=all_authorizers.pop(token_key), http_timeout=STD_TIMEOUT) except globus_sdk.GlobusAPIError as e: print("Error: Unable to create {} client: {}".format(service, e.message)) # If no applicable client, just translate the key else: returnables[service] = all_authorizers.pop(token_key) # Add authorizers not associated with service to returnables returnables.update(all_authorizers) return returnables
[ "def", "confidential_login", "(", "credentials", "=", "None", ",", "client_id", "=", "None", ",", "client_secret", "=", "None", ",", "services", "=", "None", ",", "make_clients", "=", "True", ",", "token_dir", "=", "DEFAULT_CRED_PATH", ")", ":", "DEFAULT_CRED_...
Log in to Globus services as a confidential client (a client with its own login information). Arguments: credentials (str or dict): A string filename, string JSON, or dictionary with credential and config information. By default, uses the ``DEFAULT_CRED_FILENAME`` and token_dir. Contains ``client_id``, ``client_secret``, and ``services`` as defined below. client_id (str): The ID of the client. client_secret (str): The client's secret for authentication. services (list of str): Services to authenticate with. make_clients (bool): If ``True``, will make and return appropriate clients with generated tokens. If ``False``, will only return authorizers. **Default**: ``True``. token_dir (str): The path to the directory to save tokens in and look for credentials by default. **Default**: ``DEFAULT_CRED_PATH``. Returns: dict: The clients and authorizers requested, indexed by service name.
[ "Log", "in", "to", "Globus", "services", "as", "a", "confidential", "client", "(", "a", "client", "with", "its", "own", "login", "information", ")", "." ]
python
train
jrfonseca/xdot.py
xdot/ui/pen.py
https://github.com/jrfonseca/xdot.py/blob/6248c81c21a0fe825089311b17f2c302eea614a2/xdot/ui/pen.py#L46-L50
def copy(self): """Create a copy of this pen.""" pen = Pen() pen.__dict__ = self.__dict__.copy() return pen
[ "def", "copy", "(", "self", ")", ":", "pen", "=", "Pen", "(", ")", "pen", ".", "__dict__", "=", "self", ".", "__dict__", ".", "copy", "(", ")", "return", "pen" ]
Create a copy of this pen.
[ "Create", "a", "copy", "of", "this", "pen", "." ]
python
test
LogicalDash/LiSE
ELiDE/ELiDE/board/spot.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/spot.py#L81-L89
def push_pos(self, *args): """Set my current position, expressed as proportions of the board's width and height, into the ``_x`` and ``_y`` keys of the entity in my ``proxy`` property, such that it will be recorded in the database. """ self.proxy['_x'] = self.x / self.board.width self.proxy['_y'] = self.y / self.board.height
[ "def", "push_pos", "(", "self", ",", "*", "args", ")", ":", "self", ".", "proxy", "[", "'_x'", "]", "=", "self", ".", "x", "/", "self", ".", "board", ".", "width", "self", ".", "proxy", "[", "'_y'", "]", "=", "self", ".", "y", "/", "self", "....
Set my current position, expressed as proportions of the board's width and height, into the ``_x`` and ``_y`` keys of the entity in my ``proxy`` property, such that it will be recorded in the database.
[ "Set", "my", "current", "position", "expressed", "as", "proportions", "of", "the", "board", "s", "width", "and", "height", "into", "the", "_x", "and", "_y", "keys", "of", "the", "entity", "in", "my", "proxy", "property", "such", "that", "it", "will", "be...
python
train
acroz/pylivy
livy/session.py
https://github.com/acroz/pylivy/blob/14fc65e19434c51ec959c92acb0925b87a6e3569/livy/session.py#L193-L200
def state(self) -> SessionState: """The state of the managed Spark session.""" if self.session_id is None: raise ValueError("session not yet started") session = self.client.get_session(self.session_id) if session is None: raise ValueError("session not found - it may have been shut down") return session.state
[ "def", "state", "(", "self", ")", "->", "SessionState", ":", "if", "self", ".", "session_id", "is", "None", ":", "raise", "ValueError", "(", "\"session not yet started\"", ")", "session", "=", "self", ".", "client", ".", "get_session", "(", "self", ".", "s...
The state of the managed Spark session.
[ "The", "state", "of", "the", "managed", "Spark", "session", "." ]
python
train
m110/climb
climb/core.py
https://github.com/m110/climb/blob/0a35dfb94df48f85963490fbe0514c2ea80bff34/climb/core.py#L32-L71
def run(self): """Loops and executes commands in interactive mode.""" if self._skip_delims: delims = readline.get_completer_delims() for delim in self._skip_delims: delims = delims.replace(delim, '') readline.set_completer_delims(delims) readline.parse_and_bind("tab: complete") readline.set_completer(self._completer.complete) if self._history_file: # Ensure history file exists if not os.path.isfile(self._history_file): open(self._history_file, 'w').close() readline.read_history_file(self._history_file) self._running = True try: while self._running: try: command = input(self._format_prompt()) if command: result = self.execute(*shlex.split(command)) if result: print(result) except CLIException as exc: print(exc) except (KeyboardInterrupt, EOFError): self._running = False print() except Exception as exc: if self._verbose: traceback.print_exc() else: print(exc) finally: if self._history_file: readline.write_history_file(self._history_file)
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "_skip_delims", ":", "delims", "=", "readline", ".", "get_completer_delims", "(", ")", "for", "delim", "in", "self", ".", "_skip_delims", ":", "delims", "=", "delims", ".", "replace", "(", "delim", ...
Loops and executes commands in interactive mode.
[ "Loops", "and", "executes", "commands", "in", "interactive", "mode", "." ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/bccache.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/bccache.py#L72-L81
def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError('can\'t write empty bucket') f.write(bc_magic) pickle.dump(self.checksum, f, 2) if isinstance(f, file): marshal.dump(self.code, f) else: f.write(marshal.dumps(self.code))
[ "def", "write_bytecode", "(", "self", ",", "f", ")", ":", "if", "self", ".", "code", "is", "None", ":", "raise", "TypeError", "(", "'can\\'t write empty bucket'", ")", "f", ".", "write", "(", "bc_magic", ")", "pickle", ".", "dump", "(", "self", ".", "c...
Dump the bytecode into the file or file like object passed.
[ "Dump", "the", "bytecode", "into", "the", "file", "or", "file", "like", "object", "passed", "." ]
python
train
singularityhub/singularity-python
singularity/analysis/reproduce/levels.py
https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L155-L164
def make_level_set(level): '''make level set will convert one level into a set''' new_level = dict() for key,value in level.items(): if isinstance(value,list): new_level[key] = set(value) else: new_level[key] = value return new_level
[ "def", "make_level_set", "(", "level", ")", ":", "new_level", "=", "dict", "(", ")", "for", "key", ",", "value", "in", "level", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "new_level", "[", "key", "]", "=",...
make level set will convert one level into a set
[ "make", "level", "set", "will", "convert", "one", "level", "into", "a", "set" ]
python
train
ArchiveTeam/wpull
wpull/document/htmlparse/lxml_.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/document/htmlparse/lxml_.py#L189-L208
def parse_doctype(cls, file, encoding=None): '''Get the doctype from the document. Returns: str, None ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding try: parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True) tree = lxml.etree.parse( io.BytesIO(wpull.util.peek_file(file)), parser=parser ) if tree.getroot() is not None: return tree.docinfo.doctype except lxml.etree.LxmlError: pass
[ "def", "parse_doctype", "(", "cls", ",", "file", ",", "encoding", "=", "None", ")", ":", "if", "encoding", ":", "lxml_encoding", "=", "to_lxml_encoding", "(", "encoding", ")", "or", "'latin1'", "else", ":", "lxml_encoding", "=", "encoding", "try", ":", "pa...
Get the doctype from the document. Returns: str, None
[ "Get", "the", "doctype", "from", "the", "document", "." ]
python
train
Kozea/cairocffi
cairocffi/fonts.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L16-L20
def _encode_string(string): """Return a byte string, encoding Unicode with UTF-8.""" if not isinstance(string, bytes): string = string.encode('utf8') return ffi.new('char[]', string)
[ "def", "_encode_string", "(", "string", ")", ":", "if", "not", "isinstance", "(", "string", ",", "bytes", ")", ":", "string", "=", "string", ".", "encode", "(", "'utf8'", ")", "return", "ffi", ".", "new", "(", "'char[]'", ",", "string", ")" ]
Return a byte string, encoding Unicode with UTF-8.
[ "Return", "a", "byte", "string", "encoding", "Unicode", "with", "UTF", "-", "8", "." ]
python
train
markdrago/pgsanity
pgsanity/pgsanity.py
https://github.com/markdrago/pgsanity/blob/3bd391be1c5f0e2ce041652bdaf1d6b54424c6d8/pgsanity/pgsanity.py#L46-L55
def check_string(sql_string, add_semicolon=False): """ Check whether a string is valid PostgreSQL. Returns a boolean indicating validity and a message from ecpg, which will be an empty string if the input was valid, or a description of the problem otherwise. """ prepped_sql = sqlprep.prepare_sql(sql_string, add_semicolon=add_semicolon) success, msg = ecpg.check_syntax(prepped_sql) return success, msg
[ "def", "check_string", "(", "sql_string", ",", "add_semicolon", "=", "False", ")", ":", "prepped_sql", "=", "sqlprep", ".", "prepare_sql", "(", "sql_string", ",", "add_semicolon", "=", "add_semicolon", ")", "success", ",", "msg", "=", "ecpg", ".", "check_synta...
Check whether a string is valid PostgreSQL. Returns a boolean indicating validity and a message from ecpg, which will be an empty string if the input was valid, or a description of the problem otherwise.
[ "Check", "whether", "a", "string", "is", "valid", "PostgreSQL", ".", "Returns", "a", "boolean", "indicating", "validity", "and", "a", "message", "from", "ecpg", "which", "will", "be", "an", "empty", "string", "if", "the", "input", "was", "valid", "or", "a"...
python
train
Pipoline/rocket-python
rocketchat/api.py
https://github.com/Pipoline/rocket-python/blob/643ece8a9db106922e019984a859ca04283262ff/rocketchat/api.py#L39-L43
def get_private_rooms(self, **kwargs): """ Get a listing of all private rooms with their names and IDs """ return GetPrivateRooms(settings=self.settings, **kwargs).call(**kwargs)
[ "def", "get_private_rooms", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "GetPrivateRooms", "(", "settings", "=", "self", ".", "settings", ",", "*", "*", "kwargs", ")", ".", "call", "(", "*", "*", "kwargs", ")" ]
Get a listing of all private rooms with their names and IDs
[ "Get", "a", "listing", "of", "all", "private", "rooms", "with", "their", "names", "and", "IDs" ]
python
train
django-leonardo/django-leonardo
leonardo/decorators.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/decorators.py#L53-L68
def _decorate_urlconf(urlpatterns, decorator=require_auth, *args, **kwargs): '''Decorate all urlpatterns by specified decorator''' if isinstance(urlpatterns, (list, tuple)): for pattern in urlpatterns: if getattr(pattern, 'callback', None): pattern._callback = decorator( pattern.callback, *args, **kwargs) if getattr(pattern, 'url_patterns', []): _decorate_urlconf( pattern.url_patterns, decorator, *args, **kwargs) else: if getattr(urlpatterns, 'callback', None): urlpatterns._callback = decorator( urlpatterns.callback, *args, **kwargs)
[ "def", "_decorate_urlconf", "(", "urlpatterns", ",", "decorator", "=", "require_auth", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "urlpatterns", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "pattern", "in", "...
Decorate all urlpatterns by specified decorator
[ "Decorate", "all", "urlpatterns", "by", "specified", "decorator" ]
python
train
Azure/msrest-for-python
msrest/serialization.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L1580-L1599
def deserialize_unicode(data): """Preserve unicode objects in Python 2, otherwise return data as a string. :param str data: response string to be deserialized. :rtype: str or unicode """ # We might be here because we have an enum modeled as string, # and we try to deserialize a partial dict with enum inside if isinstance(data, Enum): return data # Consider this is real string try: if isinstance(data, unicode): return data except NameError: return str(data) else: return str(data)
[ "def", "deserialize_unicode", "(", "data", ")", ":", "# We might be here because we have an enum modeled as string,", "# and we try to deserialize a partial dict with enum inside", "if", "isinstance", "(", "data", ",", "Enum", ")", ":", "return", "data", "# Consider this is real ...
Preserve unicode objects in Python 2, otherwise return data as a string. :param str data: response string to be deserialized. :rtype: str or unicode
[ "Preserve", "unicode", "objects", "in", "Python", "2", "otherwise", "return", "data", "as", "a", "string", "." ]
python
train
fastai/fastai
fastai/data_block.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L668-L670
def load_empty(cls, path:PathOrStr, fn:PathOrStr): "Load the state in `fn` to create an empty `LabelList` for inference." return cls.load_state(path, pickle.load(open(Path(path)/fn, 'rb')))
[ "def", "load_empty", "(", "cls", ",", "path", ":", "PathOrStr", ",", "fn", ":", "PathOrStr", ")", ":", "return", "cls", ".", "load_state", "(", "path", ",", "pickle", ".", "load", "(", "open", "(", "Path", "(", "path", ")", "/", "fn", ",", "'rb'", ...
Load the state in `fn` to create an empty `LabelList` for inference.
[ "Load", "the", "state", "in", "fn", "to", "create", "an", "empty", "LabelList", "for", "inference", "." ]
python
train
vimalkvn/riboplot
riboplot/ribocore.py
https://github.com/vimalkvn/riboplot/blob/914515df54eccc2e726ba71e751c3260f2066d97/riboplot/ribocore.py#L350-L363
def check_rna_file(rna_file): """Check if bedtools is available and if the given RNA-Seq bam file is valid. """ try: subprocess.check_output(['bedtools', '--version']) except OSError: log.error('Could not find bedtools in PATH. bedtools is required ' 'for generating RNA coverage plot.') raise # Is this a valid BAM file? i.e., can pysam read it? try: is_bam_valid(rna_file) except ValueError: log.error('The given RNASeq BAM file is not valid') raise
[ "def", "check_rna_file", "(", "rna_file", ")", ":", "try", ":", "subprocess", ".", "check_output", "(", "[", "'bedtools'", ",", "'--version'", "]", ")", "except", "OSError", ":", "log", ".", "error", "(", "'Could not find bedtools in PATH. bedtools is required '", ...
Check if bedtools is available and if the given RNA-Seq bam file is valid.
[ "Check", "if", "bedtools", "is", "available", "and", "if", "the", "given", "RNA", "-", "Seq", "bam", "file", "is", "valid", "." ]
python
train
saltstack/salt
salt/modules/dockermod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockermod.py#L1325-L1366
def connected(name, verbose=False): ''' .. versionadded:: 2018.3.0 Return a list of running containers attached to the specified network name Network name verbose : False If ``True``, return extended info about each container (IP configuration, etc.) CLI Example: .. code-block:: bash salt myminion docker.connected net_name ''' containers = inspect_network(name).get('Containers', {}) ret = {} for cid, cinfo in six.iteritems(containers): # The Containers dict is keyed by container ID, but we want the results # to be keyed by container name, so we need to pop off the Name and # then add the Id key to the cinfo dict. try: name = cinfo.pop('Name') except (KeyError, AttributeError): # Should never happen log.warning( '\'Name\' key not present in container definition for ' 'container ID \'%s\' within inspect results for Docker ' 'network \'%s\'. Full container definition: %s', cid, name, cinfo ) continue else: cinfo['Id'] = cid ret[name] = cinfo if not verbose: return list(ret) return ret
[ "def", "connected", "(", "name", ",", "verbose", "=", "False", ")", ":", "containers", "=", "inspect_network", "(", "name", ")", ".", "get", "(", "'Containers'", ",", "{", "}", ")", "ret", "=", "{", "}", "for", "cid", ",", "cinfo", "in", "six", "."...
.. versionadded:: 2018.3.0 Return a list of running containers attached to the specified network name Network name verbose : False If ``True``, return extended info about each container (IP configuration, etc.) CLI Example: .. code-block:: bash salt myminion docker.connected net_name
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0" ]
python
train
chimera0/accel-brain-code
Generative-Adversarial-Networks/pygan/generativemodel/conditionalgenerativemodel/conditional_convolutional_model.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Generative-Adversarial-Networks/pygan/generativemodel/conditionalgenerativemodel/conditional_convolutional_model.py#L190-L206
def learn(self, grad_arr, fix_opt_flag=False): ''' Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients. ''' channel = grad_arr.shape[1] // 2 grad_arr = self.__deconvolution_model.learn(grad_arr[:, :channel], fix_opt_flag=fix_opt_flag) delta_arr = self.__cnn.back_propagation(grad_arr) if fix_opt_flag is False: self.__cnn.optimize(self.__learning_rate, 1) return delta_arr
[ "def", "learn", "(", "self", ",", "grad_arr", ",", "fix_opt_flag", "=", "False", ")", ":", "channel", "=", "grad_arr", ".", "shape", "[", "1", "]", "//", "2", "grad_arr", "=", "self", ".", "__deconvolution_model", ".", "learn", "(", "grad_arr", "[", ":...
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. fix_opt_flag: If `False`, no optimization in this model will be done. Returns: `np.ndarray` of delta or gradients.
[ "Update", "this", "Discriminator", "by", "ascending", "its", "stochastic", "gradient", ".", "Args", ":", "grad_arr", ":", "np", ".", "ndarray", "of", "gradients", ".", "fix_opt_flag", ":", "If", "False", "no", "optimization", "in", "this", "model", "will", "...
python
train
google/grr
grr/client/grr_response_client/windows/installers.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/windows/installers.py#L235-L255
def InstallNanny(self): """Install the nanny program.""" # We need to copy the nanny sections to the registry to ensure the # service is correctly configured. new_config = config.CONFIG.MakeNewConfig() new_config.SetWriteBack(config.CONFIG["Config.writeback"]) for option in self.nanny_options: new_config.Set(option, config.CONFIG.Get(option)) new_config.Write() args = [ config.CONFIG["Nanny.binary"], "--service_key", config.CONFIG["Client.config_key"], "install" ] logging.debug("Calling %s", (args,)) output = subprocess.check_output( args, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) logging.debug("%s", output)
[ "def", "InstallNanny", "(", "self", ")", ":", "# We need to copy the nanny sections to the registry to ensure the", "# service is correctly configured.", "new_config", "=", "config", ".", "CONFIG", ".", "MakeNewConfig", "(", ")", "new_config", ".", "SetWriteBack", "(", "con...
Install the nanny program.
[ "Install", "the", "nanny", "program", "." ]
python
train
sassoo/goldman
goldman/validators/__init__.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/validators/__init__.py#L24-L32
def validate_uuid(value): """ UUID 128-bit validator """ if value and not isinstance(value, UUID): try: return UUID(str(value), version=4) except (AttributeError, ValueError): raise ValidationError('not a valid UUID') return value
[ "def", "validate_uuid", "(", "value", ")", ":", "if", "value", "and", "not", "isinstance", "(", "value", ",", "UUID", ")", ":", "try", ":", "return", "UUID", "(", "str", "(", "value", ")", ",", "version", "=", "4", ")", "except", "(", "AttributeError...
UUID 128-bit validator
[ "UUID", "128", "-", "bit", "validator" ]
python
train
saltstack/salt
salt/modules/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2882-L2945
def set_password(name, users, password, encrypted=True, path=None): ''' .. versionchanged:: 2015.5.0 Function renamed from ``set_pass`` to ``set_password``. Additionally, this function now supports (and defaults to using) a password hash instead of a plaintext password. Set the password of one or more system users inside containers users Comma-separated list (or python list) of users to change password password Password to set for the specified user(s) encrypted : True If true, ``password`` must be a password hash. Set to ``False`` to set a plaintext password (not recommended). .. versionadded:: 2015.5.0 path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' lxc.set_pass container-name root '$6$uJ2uAyLU$KoI67t8As/0fXtJOPcHKGXmUpcoYUcVR2K6x93walnShTCQvjRwq25yIkiCBOqgbfdKQSFnAo28/ek6716vEV1' salt '*' lxc.set_pass container-name root foo encrypted=False ''' def _bad_user_input(): raise SaltInvocationError('Invalid input for \'users\' parameter') if not isinstance(users, list): try: users = users.split(',') except AttributeError: _bad_user_input() if not users: _bad_user_input() failed_users = [] for user in users: result = retcode(name, 'chpasswd{0}'.format(' -e' if encrypted else ''), stdin=':'.join((user, password)), python_shell=False, path=path, chroot_fallback=True, output_loglevel='quiet') if result != 0: failed_users.append(user) if failed_users: raise CommandExecutionError( 'Password change failed for the following user(s): {0}' .format(', '.join(failed_users)) ) return True
[ "def", "set_password", "(", "name", ",", "users", ",", "password", ",", "encrypted", "=", "True", ",", "path", "=", "None", ")", ":", "def", "_bad_user_input", "(", ")", ":", "raise", "SaltInvocationError", "(", "'Invalid input for \\'users\\' parameter'", ")", ...
.. versionchanged:: 2015.5.0 Function renamed from ``set_pass`` to ``set_password``. Additionally, this function now supports (and defaults to using) a password hash instead of a plaintext password. Set the password of one or more system users inside containers users Comma-separated list (or python list) of users to change password password Password to set for the specified user(s) encrypted : True If true, ``password`` must be a password hash. Set to ``False`` to set a plaintext password (not recommended). .. versionadded:: 2015.5.0 path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' lxc.set_pass container-name root '$6$uJ2uAyLU$KoI67t8As/0fXtJOPcHKGXmUpcoYUcVR2K6x93walnShTCQvjRwq25yIkiCBOqgbfdKQSFnAo28/ek6716vEV1' salt '*' lxc.set_pass container-name root foo encrypted=False
[ "..", "versionchanged", "::", "2015", ".", "5", ".", "0", "Function", "renamed", "from", "set_pass", "to", "set_password", ".", "Additionally", "this", "function", "now", "supports", "(", "and", "defaults", "to", "using", ")", "a", "password", "hash", "inste...
python
train
F5Networks/f5-common-python
f5-sdk-dist/build_pkgs.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5-sdk-dist/build_pkgs.py#L351-L366
def store_json(obj, destination): """store_json Takes in a json-portable object and a filesystem-based destination and stores the json-portable object as JSON into the filesystem-based destination. This is blind, dumb, and stupid; thus, it can fail if the object is more complex than simple dict, list, int, str, etc. type object structures. """ with open(destination, 'r+') as FH: fcntl.lockf(FH, fcntl.LOCK_EX) json_in = json.loads(FH.read()) json_in.update(obj) # obj overwrites items in json_in... FH.seek(0) FH.write(json.dumps(json_in, sort_keys=True, indent=4, separators=(',', ': ')))
[ "def", "store_json", "(", "obj", ",", "destination", ")", ":", "with", "open", "(", "destination", ",", "'r+'", ")", "as", "FH", ":", "fcntl", ".", "lockf", "(", "FH", ",", "fcntl", ".", "LOCK_EX", ")", "json_in", "=", "json", ".", "loads", "(", "F...
store_json Takes in a json-portable object and a filesystem-based destination and stores the json-portable object as JSON into the filesystem-based destination. This is blind, dumb, and stupid; thus, it can fail if the object is more complex than simple dict, list, int, str, etc. type object structures.
[ "store_json" ]
python
train