repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
incuna/django-user-management
user_management/api/views.py
https://github.com/incuna/django-user-management/blob/6784e33191d4eff624d2cf2df9ca01db4f23c9c6/user_management/api/views.py#L277-L287
def initial(self, request, *args, **kwargs): """Disallow users other than the user whose email is being reset.""" email = request.data.get('email') if request.user.is_authenticated() and email != request.user.email: raise PermissionDenied() return super(ResendConfirmationEmail, self).initial( request, *args, **kwargs )
[ "def", "initial", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "email", "=", "request", ".", "data", ".", "get", "(", "'email'", ")", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", "and", "emai...
Disallow users other than the user whose email is being reset.
[ "Disallow", "users", "other", "than", "the", "user", "whose", "email", "is", "being", "reset", "." ]
python
test
36.272727
wandb/client
wandb/vendor/prompt_toolkit/buffer.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L700-L706
def go_to_history(self, index): """ Go to this item in the history. """ if index < len(self._working_lines): self.working_index = index self.cursor_position = len(self.text)
[ "def", "go_to_history", "(", "self", ",", "index", ")", ":", "if", "index", "<", "len", "(", "self", ".", "_working_lines", ")", ":", "self", ".", "working_index", "=", "index", "self", ".", "cursor_position", "=", "len", "(", "self", ".", "text", ")" ...
Go to this item in the history.
[ "Go", "to", "this", "item", "in", "the", "history", "." ]
python
train
31.857143
kwikteam/phy
phy/io/array.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L472-L498
def _spikes_per_cluster(spike_clusters, spike_ids=None): """Return a dictionary {cluster: list_of_spikes}.""" if spike_clusters is None or not len(spike_clusters): return {} if spike_ids is None: spike_ids = np.arange(len(spike_clusters)).astype(np.int64) # NOTE: this sort method is stable, so spike ids are increasing # among any cluster. Therefore we don't have to sort again down here, # when creating the spikes_in_clusters dictionary. rel_spikes = np.argsort(spike_clusters, kind='mergesort') abs_spikes = spike_ids[rel_spikes] spike_clusters = spike_clusters[rel_spikes] diff = np.empty_like(spike_clusters) diff[0] = 1 diff[1:] = np.diff(spike_clusters) idx = np.nonzero(diff > 0)[0] clusters = spike_clusters[idx] # NOTE: we don't have to sort abs_spikes[...] here because the argsort # using 'mergesort' above is stable. spikes_in_clusters = {clusters[i]: abs_spikes[idx[i]:idx[i + 1]] for i in range(len(clusters) - 1)} spikes_in_clusters[clusters[-1]] = abs_spikes[idx[-1]:] return spikes_in_clusters
[ "def", "_spikes_per_cluster", "(", "spike_clusters", ",", "spike_ids", "=", "None", ")", ":", "if", "spike_clusters", "is", "None", "or", "not", "len", "(", "spike_clusters", ")", ":", "return", "{", "}", "if", "spike_ids", "is", "None", ":", "spike_ids", ...
Return a dictionary {cluster: list_of_spikes}.
[ "Return", "a", "dictionary", "{", "cluster", ":", "list_of_spikes", "}", "." ]
python
train
41.037037
msoulier/tftpy
tftpy/TftpStates.py
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L371-L407
def handle(self, pkt, raddress, rport): "Handle an initial WRQ packet as a server." log.debug("In TftpStateServerRecvWRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.full_path if self.context.upload_open: f = self.context.upload_open(path, self.context) if f is None: self.sendError(TftpErrors.AccessViolation) raise TftpException("Dynamic path %s not permitted" % path) else: self.context.fileobj = f else: log.info("Opening file %s for writing" % path) if os.path.exists(path): # FIXME: correct behavior? log.warning("File %s exists already, overwriting..." % ( self.context.file_to_transfer)) # FIXME: I think we should upload to a temp file and not overwrite # the existing file until the file is successfully uploaded. self.make_subdirs() self.context.fileobj = open(path, "wb") # Options negotiation. if sendoack: log.debug("Sending OACK to client") self.sendOACK() else: log.debug("No requested options, expecting transfer to begin...") self.sendACK() # Whether we're sending an oack or not, we're expecting a DAT for # block 1 self.context.next_block = 1 # We may have sent an OACK, but we're expecting a DAT as the response # to either the OACK or an ACK, so lets unconditionally use the # TftpStateExpectDAT state. return TftpStateExpectDAT(self.context)
[ "def", "handle", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "log", ".", "debug", "(", "\"In TftpStateServerRecvWRQ.handle\"", ")", "sendoack", "=", "self", ".", "serverInitial", "(", "pkt", ",", "raddress", ",", "rport", ")", "path",...
Handle an initial WRQ packet as a server.
[ "Handle", "an", "initial", "WRQ", "packet", "as", "a", "server", "." ]
python
train
44.351351
ValvePython/steam
steam/client/__init__.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/__init__.py#L236-L251
def wait_msg(self, event, timeout=None, raises=None): """Wait for a message, similiar to :meth:`.wait_event` :param event: :class:`.EMsg' or job id :param timeout: seconds to wait before timeout :type timeout: :class:`int` :param raises: On timeout when ``False` returns :class:`None`, else raise :class:`gevent.Timeout` :type raises: :class:`bool` :return: returns a message or :class:`None` :rtype: :class:`None`, or `proto message` :raises: ``gevent.Timeout`` """ resp = self.wait_event(event, timeout, raises) if resp is not None: return resp[0]
[ "def", "wait_msg", "(", "self", ",", "event", ",", "timeout", "=", "None", ",", "raises", "=", "None", ")", ":", "resp", "=", "self", ".", "wait_event", "(", "event", ",", "timeout", ",", "raises", ")", "if", "resp", "is", "not", "None", ":", "retu...
Wait for a message, similiar to :meth:`.wait_event` :param event: :class:`.EMsg' or job id :param timeout: seconds to wait before timeout :type timeout: :class:`int` :param raises: On timeout when ``False` returns :class:`None`, else raise :class:`gevent.Timeout` :type raises: :class:`bool` :return: returns a message or :class:`None` :rtype: :class:`None`, or `proto message` :raises: ``gevent.Timeout``
[ "Wait", "for", "a", "message", "similiar", "to", ":", "meth", ":", ".", "wait_event" ]
python
train
40.25
osrg/ryu
ryu/services/protocols/bgp/bgpspeaker.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/bgpspeaker.py#L688-L836
def evpn_prefix_add(self, route_type, route_dist, esi=0, ethernet_tag_id=None, mac_addr=None, ip_addr=None, ip_prefix=None, gw_ip_addr=None, vni=None, next_hop=None, tunnel_type=None, pmsi_tunnel_type=None, redundancy_mode=None): """ This method adds a new EVPN route to be advertised. ``route_type`` specifies one of the EVPN route type name. This parameter must be one of the following. - EVPN_ETH_AUTO_DISCOVERY = 'eth_ad' - EVPN_MAC_IP_ADV_ROUTE = 'mac_ip_adv' - EVPN_MULTICAST_ETAG_ROUTE = 'multicast_etag' - EVPN_ETH_SEGMENT = 'eth_seg' - EVPN_IP_PREFIX_ROUTE = 'ip_prefix' ``route_dist`` specifies a route distinguisher value. ``esi`` is an value to specify the Ethernet Segment Identifier. 0 is the default and denotes a single-homed site. If you want to advertise esi other than 0, it must be set as dictionary type. If esi is dictionary type, 'type' key must be set and specifies ESI type. For the supported ESI type, see :py:mod:`ryu.lib.packet.bgp.EvpnEsi`. The remaining arguments are the same as that for the corresponding class. ``ethernet_tag_id`` specifies the Ethernet Tag ID. ``mac_addr`` specifies a MAC address to advertise. ``ip_addr`` specifies an IPv4 or IPv6 address to advertise. ``ip_prefix`` specifies an IPv4 or IPv6 prefix to advertise. ``gw_ip_addr`` specifies an IPv4 or IPv6 address of gateway to advertise. ``vni`` specifies an Virtual Network Identifier for VXLAN or Virtual Subnet Identifier for NVGRE. If tunnel_type is not TUNNEL_TYPE_VXLAN or TUNNEL_TYPE_NVGRE, this field is ignored. ``next_hop`` specifies the next hop address for this prefix. ``tunnel_type`` specifies the data plane encapsulation type to advertise. By the default, this attribute is not advertised. The supported encapsulation types are following. - TUNNEL_TYPE_VXLAN = 'vxlan' - TUNNEL_TYPE_NVGRE = 'nvgre ``pmsi_tunnel_type`` specifies the type of the PMSI tunnel attribute used to encode the multicast tunnel identifier. This attribute is advertised only if route_type is EVPN_MULTICAST_ETAG_ROUTE and not advertised by the default. This attribute can also carry vni if tunnel_type is specified. The supported PMSI tunnel types are following. - PMSI_TYPE_NO_TUNNEL_INFO = 0 - PMSI_TYPE_INGRESS_REP = 6 ``redundancy_mode`` specifies a redundancy mode type. This attribute is advertised only if route_type is EVPN_ETH_AUTO_DISCOVERY and not advertised by the default. The supported redundancy mode types are following. - REDUNDANCY_MODE_ALL_ACTIVE = 'all_active' - REDUNDANCY_MODE_SINGLE_ACTIVE = 'single_active' """ func_name = 'evpn_prefix.add_local' # Check the default values if not next_hop: next_hop = '0.0.0.0' # Set required arguments kwargs = {EVPN_ROUTE_TYPE: route_type, ROUTE_DISTINGUISHER: route_dist, NEXT_HOP: next_hop} # Set optional arguments if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]: kwargs[TUNNEL_TYPE] = tunnel_type elif tunnel_type is not None: raise ValueError('Unsupported tunnel type: %s' % tunnel_type) # Set route type specific arguments if route_type == EVPN_ETH_AUTO_DISCOVERY: kwargs.update({ EVPN_ESI: esi, EVPN_ETHERNET_TAG_ID: ethernet_tag_id, }) if vni is not None: kwargs[EVPN_VNI] = vni # Set Redundancy Mode Attribute arguments if redundancy_mode in [ REDUNDANCY_MODE_ALL_ACTIVE, REDUNDANCY_MODE_SINGLE_ACTIVE]: kwargs[REDUNDANCY_MODE] = redundancy_mode elif redundancy_mode is not None: raise ValueError('Unsupported Redundancy Mode: %s' % redundancy_mode) elif route_type == EVPN_MAC_IP_ADV_ROUTE: kwargs.update({ EVPN_ESI: esi, EVPN_ETHERNET_TAG_ID: ethernet_tag_id, MAC_ADDR: mac_addr, IP_ADDR: ip_addr, }) # Set tunnel type specific arguments if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]: kwargs[EVPN_VNI] = vni elif route_type == EVPN_MULTICAST_ETAG_ROUTE: kwargs.update({ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, IP_ADDR: ip_addr, }) # Set tunnel type specific arguments if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]: kwargs[EVPN_VNI] = vni # Set PMSI Tunnel Attribute arguments if pmsi_tunnel_type in [ PMSI_TYPE_NO_TUNNEL_INFO, PMSI_TYPE_INGRESS_REP]: kwargs[PMSI_TUNNEL_TYPE] = pmsi_tunnel_type elif pmsi_tunnel_type is not None: raise ValueError('Unsupported PMSI tunnel type: %s' % pmsi_tunnel_type) elif route_type == EVPN_ETH_SEGMENT: kwargs.update({ EVPN_ESI: esi, IP_ADDR: ip_addr, }) elif route_type == EVPN_IP_PREFIX_ROUTE: kwargs.update({ EVPN_ESI: esi, EVPN_ETHERNET_TAG_ID: ethernet_tag_id, IP_PREFIX: ip_prefix, GW_IP_ADDR: gw_ip_addr, }) # Set tunnel type specific arguments if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]: kwargs[EVPN_VNI] = vni else: raise ValueError('Unsupported EVPN route type: %s' % route_type) call(func_name, **kwargs)
[ "def", "evpn_prefix_add", "(", "self", ",", "route_type", ",", "route_dist", ",", "esi", "=", "0", ",", "ethernet_tag_id", "=", "None", ",", "mac_addr", "=", "None", ",", "ip_addr", "=", "None", ",", "ip_prefix", "=", "None", ",", "gw_ip_addr", "=", "Non...
This method adds a new EVPN route to be advertised. ``route_type`` specifies one of the EVPN route type name. This parameter must be one of the following. - EVPN_ETH_AUTO_DISCOVERY = 'eth_ad' - EVPN_MAC_IP_ADV_ROUTE = 'mac_ip_adv' - EVPN_MULTICAST_ETAG_ROUTE = 'multicast_etag' - EVPN_ETH_SEGMENT = 'eth_seg' - EVPN_IP_PREFIX_ROUTE = 'ip_prefix' ``route_dist`` specifies a route distinguisher value. ``esi`` is an value to specify the Ethernet Segment Identifier. 0 is the default and denotes a single-homed site. If you want to advertise esi other than 0, it must be set as dictionary type. If esi is dictionary type, 'type' key must be set and specifies ESI type. For the supported ESI type, see :py:mod:`ryu.lib.packet.bgp.EvpnEsi`. The remaining arguments are the same as that for the corresponding class. ``ethernet_tag_id`` specifies the Ethernet Tag ID. ``mac_addr`` specifies a MAC address to advertise. ``ip_addr`` specifies an IPv4 or IPv6 address to advertise. ``ip_prefix`` specifies an IPv4 or IPv6 prefix to advertise. ``gw_ip_addr`` specifies an IPv4 or IPv6 address of gateway to advertise. ``vni`` specifies an Virtual Network Identifier for VXLAN or Virtual Subnet Identifier for NVGRE. If tunnel_type is not TUNNEL_TYPE_VXLAN or TUNNEL_TYPE_NVGRE, this field is ignored. ``next_hop`` specifies the next hop address for this prefix. ``tunnel_type`` specifies the data plane encapsulation type to advertise. By the default, this attribute is not advertised. The supported encapsulation types are following. - TUNNEL_TYPE_VXLAN = 'vxlan' - TUNNEL_TYPE_NVGRE = 'nvgre ``pmsi_tunnel_type`` specifies the type of the PMSI tunnel attribute used to encode the multicast tunnel identifier. This attribute is advertised only if route_type is EVPN_MULTICAST_ETAG_ROUTE and not advertised by the default. This attribute can also carry vni if tunnel_type is specified. The supported PMSI tunnel types are following. - PMSI_TYPE_NO_TUNNEL_INFO = 0 - PMSI_TYPE_INGRESS_REP = 6 ``redundancy_mode`` specifies a redundancy mode type. This attribute is advertised only if route_type is EVPN_ETH_AUTO_DISCOVERY and not advertised by the default. The supported redundancy mode types are following. - REDUNDANCY_MODE_ALL_ACTIVE = 'all_active' - REDUNDANCY_MODE_SINGLE_ACTIVE = 'single_active'
[ "This", "method", "adds", "a", "new", "EVPN", "route", "to", "be", "advertised", "." ]
python
train
40.489933
JohnVinyard/zounds
zounds/nputil/npx.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/nputil/npx.py#L128-L190
def windowed(a, windowsize, stepsize=None, dopad=False): """ Parameters a - the input array to restructure into overlapping windows windowsize - the size of each window of samples stepsize - the number of samples to shift the window each step. If not specified, this defaults to windowsize dopad - If false (default), leftover samples are returned seperately. If true, the input array is padded with zeros so that all samples are used. """ if windowsize < 1: raise ValueError('windowsize must be greater than or equal to one') if stepsize is None: stepsize = windowsize if stepsize < 1: raise ValueError('stepsize must be greater than or equal to one') if not a.flags['C_CONTIGUOUS']: a = a.copy() if windowsize == 1 and stepsize == 1: # A windowsize and stepsize of one mean that no windowing is necessary. # Return the array unchanged. return np.zeros((0,) + a.shape[1:], dtype=a.dtype), a if windowsize == 1 and stepsize > 1: return np.zeros(0, dtype=a.dtype), a[::stepsize] # the original length of the input array l = a.shape[0] if dopad: p = _wpad(l, windowsize, stepsize) # pad the array with enough zeros so that there are no leftover samples a = pad(a, p) # no leftovers; an empty array leftover = np.zeros((0,) + a.shape[1:], dtype=a.dtype) else: # cut the array so that any leftover samples are returned seperately c, lc = _wcut(l, windowsize, stepsize) leftover = a[lc:] a = a[:c] if 0 == a.shape[0]: return leftover, np.zeros(a.shape, dtype=a.dtype) n = 1 + (a.shape[0] - windowsize) // (stepsize) s = a.strides[0] newshape = (n, windowsize) + a.shape[1:] newstrides = (stepsize * s, s) + a.strides[1:] out = np.ndarray.__new__( \ np.ndarray, strides=newstrides, shape=newshape, buffer=a, dtype=a.dtype) return leftover, out
[ "def", "windowed", "(", "a", ",", "windowsize", ",", "stepsize", "=", "None", ",", "dopad", "=", "False", ")", ":", "if", "windowsize", "<", "1", ":", "raise", "ValueError", "(", "'windowsize must be greater than or equal to one'", ")", "if", "stepsize", "is",...
Parameters a - the input array to restructure into overlapping windows windowsize - the size of each window of samples stepsize - the number of samples to shift the window each step. If not specified, this defaults to windowsize dopad - If false (default), leftover samples are returned seperately. If true, the input array is padded with zeros so that all samples are used.
[ "Parameters", "a", "-", "the", "input", "array", "to", "restructure", "into", "overlapping", "windows", "windowsize", "-", "the", "size", "of", "each", "window", "of", "samples", "stepsize", "-", "the", "number", "of", "samples", "to", "shift", "the", "windo...
python
train
32.730159
yyuu/botornado
boto/sdb/db/manager/xmlmanager.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sdb/db/manager/xmlmanager.py#L306-L326
def get_props_from_doc(self, cls, id, doc): """ Pull out the properties from this document Returns the class, the properties in a hash, and the id if provided as a tuple :return: (cls, props, id) """ obj_node = doc.getElementsByTagName('object')[0] if not cls: class_name = obj_node.getAttribute('class') cls = find_class(class_name) if not id: id = obj_node.getAttribute('id') props = {} for prop_node in obj_node.getElementsByTagName('property'): prop_name = prop_node.getAttribute('name') prop = cls.find_property(prop_name) value = self.decode_value(prop, prop_node) value = prop.make_value_from_datastore(value) if value != None: props[prop.name] = value return (cls, props, id)
[ "def", "get_props_from_doc", "(", "self", ",", "cls", ",", "id", ",", "doc", ")", ":", "obj_node", "=", "doc", ".", "getElementsByTagName", "(", "'object'", ")", "[", "0", "]", "if", "not", "cls", ":", "class_name", "=", "obj_node", ".", "getAttribute", ...
Pull out the properties from this document Returns the class, the properties in a hash, and the id if provided as a tuple :return: (cls, props, id)
[ "Pull", "out", "the", "properties", "from", "this", "document", "Returns", "the", "class", "the", "properties", "in", "a", "hash", "and", "the", "id", "if", "provided", "as", "a", "tuple", ":", "return", ":", "(", "cls", "props", "id", ")" ]
python
train
41.095238
saltstack/salt
salt/modules/status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L296-L441
def cpustats(): ''' Return the CPU stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.cpustats ''' def linux_cpustats(): ''' linux specific implementation of cpustats ''' ret = {} try: with salt.utils.files.fopen('/proc/stat', 'r') as fp_: stats = salt.utils.stringutils.to_unicode(fp_.read()) except IOError: pass else: for line in stats.splitlines(): if not line: continue comps = line.split() if comps[0] == 'cpu': ret[comps[0]] = {'idle': _number(comps[4]), 'iowait': _number(comps[5]), 'irq': _number(comps[6]), 'nice': _number(comps[2]), 'softirq': _number(comps[7]), 'steal': _number(comps[8]), 'system': _number(comps[3]), 'user': _number(comps[1])} elif comps[0] == 'intr': ret[comps[0]] = {'total': _number(comps[1]), 'irqs': [_number(x) for x in comps[2:]]} elif comps[0] == 'softirq': ret[comps[0]] = {'total': _number(comps[1]), 'softirqs': [_number(x) for x in comps[2:]]} else: ret[comps[0]] = _number(comps[1]) return ret def freebsd_cpustats(): ''' freebsd specific implementation of cpustats ''' vmstat = __salt__['cmd.run']('vmstat -P').splitlines() vm0 = vmstat[0].split() cpu0loc = vm0.index('cpu0') vm1 = vmstat[1].split() usloc = vm1.index('us') vm2 = vmstat[2].split() cpuctr = 0 ret = {} for cpu in vm0[cpu0loc:]: ret[cpu] = {'us': _number(vm2[usloc + 3 * cpuctr]), 'sy': _number(vm2[usloc + 1 + 3 * cpuctr]), 'id': _number(vm2[usloc + 2 + 3 * cpuctr]), } cpuctr += 1 return ret def sunos_cpustats(): ''' sunos specific implementation of cpustats ''' mpstat = __salt__['cmd.run']('mpstat 1 2').splitlines() fields = mpstat[0].split() ret = {} for cpu in mpstat: if cpu.startswith('CPU'): continue cpu = cpu.split() ret[_number(cpu[0])] = {} for i in range(1, len(fields)-1): ret[_number(cpu[0])][fields[i]] = _number(cpu[i]) return ret def aix_cpustats(): ''' AIX specific implementation of cpustats ''' ret = {} ret['mpstat'] = [] procn = None fields = [] for line in __salt__['cmd.run']('mpstat -a').splitlines(): if not line: continue procn = len(ret['mpstat']) if line.startswith('System'): comps = line.split(':') ret['mpstat'].append({}) ret['mpstat'][procn]['system'] = {} cpu_comps = comps[1].split() for i in range(0, len(cpu_comps)): cpu_vals = cpu_comps[i].split('=') ret['mpstat'][procn]['system'][cpu_vals[0]] = cpu_vals[1] if line.startswith('cpu'): fields = line.split() continue if fields: cpustat = line.split() ret[_number(cpustat[0])] = {} for i in range(1, len(fields)-1): ret[_number(cpustat[0])][fields[i]] = _number(cpustat[i]) return ret def openbsd_cpustats(): ''' openbsd specific implementation of cpustats ''' systat = __salt__['cmd.run']('systat -s 2 -B cpu').splitlines() fields = systat[3].split() ret = {} for cpu in systat[4:]: cpu_line = cpu.split() cpu_idx = cpu_line[0] ret[cpu_idx] = {} for idx, field in enumerate(fields[1:]): ret[cpu_idx][field] = cpu_line[idx+1] return ret # dict that return a function that does the right thing per platform get_version = { 'Linux': linux_cpustats, 'FreeBSD': freebsd_cpustats, 'OpenBSD': openbsd_cpustats, 'SunOS': sunos_cpustats, 'AIX': aix_cpustats, } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
[ "def", "cpustats", "(", ")", ":", "def", "linux_cpustats", "(", ")", ":", "'''\n linux specific implementation of cpustats\n '''", "ret", "=", "{", "}", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/proc/stat'", ",",...
Return the CPU stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.cpustats
[ "Return", "the", "CPU", "stats", "for", "this", "minion" ]
python
train
32.739726
angr/angr
angr/analyses/cdg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cdg.py#L84-L112
def _construct(self): """ Construct a control dependence graph. This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. """ self._acyclic_cfg = self._cfg.copy() # TODO: Cycle-removing is not needed - confirm it later # The CFG we use should be acyclic! #self._acyclic_cfg.remove_cycles() # Pre-process the acyclic CFG self._pre_process_cfg() # Construct post-dominator tree self._pd_construct() self._graph = networkx.DiGraph() # Construct the reversed dominance frontier mapping rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom) for y in self._cfg.graph.nodes(): if y not in rdf: continue for x in rdf[y]: self._graph.add_edge(x, y)
[ "def", "_construct", "(", "self", ")", ":", "self", ".", "_acyclic_cfg", "=", "self", ".", "_cfg", ".", "copy", "(", ")", "# TODO: Cycle-removing is not needed - confirm it later", "# The CFG we use should be acyclic!", "#self._acyclic_cfg.remove_cycles()", "# Pre-process the...
Construct a control dependence graph. This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc.
[ "Construct", "a", "control", "dependence", "graph", "." ]
python
train
31.103448
annoviko/pyclustering
pyclustering/cluster/optics.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/optics.py#L682-L713
def __update_order_seed(self, optic_descriptor, neighbors_descriptors, order_seed): """! @brief Update sorted list of reachable objects (from core-object) that should be processed using neighbors of core-object. @param[in] optic_descriptor (optics_descriptor): Core-object whose neighbors should be analysed. @param[in] neighbors_descriptors (list): List of neighbors of core-object. @param[in|out] order_seed (list): List of sorted object in line with reachable distance. """ for neighbor_descriptor in neighbors_descriptors: index_neighbor = neighbor_descriptor[0] current_reachable_distance = neighbor_descriptor[1] if self.__optics_objects[index_neighbor].processed is not True: reachable_distance = max(current_reachable_distance, optic_descriptor.core_distance) if self.__optics_objects[index_neighbor].reachability_distance is None: self.__optics_objects[index_neighbor].reachability_distance = reachable_distance # insert element in queue O(n) - worst case. index_insertion = len(order_seed) for index_seed in range(0, len(order_seed)): if reachable_distance < order_seed[index_seed].reachability_distance: index_insertion = index_seed break order_seed.insert(index_insertion, self.__optics_objects[index_neighbor]) else: if reachable_distance < self.__optics_objects[index_neighbor].reachability_distance: self.__optics_objects[index_neighbor].reachability_distance = reachable_distance order_seed.sort(key = lambda obj: obj.reachability_distance)
[ "def", "__update_order_seed", "(", "self", ",", "optic_descriptor", ",", "neighbors_descriptors", ",", "order_seed", ")", ":", "for", "neighbor_descriptor", "in", "neighbors_descriptors", ":", "index_neighbor", "=", "neighbor_descriptor", "[", "0", "]", "current_reachab...
! @brief Update sorted list of reachable objects (from core-object) that should be processed using neighbors of core-object. @param[in] optic_descriptor (optics_descriptor): Core-object whose neighbors should be analysed. @param[in] neighbors_descriptors (list): List of neighbors of core-object. @param[in|out] order_seed (list): List of sorted object in line with reachable distance.
[ "!" ]
python
valid
60.21875
Jammy2211/PyAutoLens
autolens/data/ccd.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/ccd.py#L728-L906
def load_ccd_data_from_fits(image_path, pixel_scale, image_hdu=0, resized_ccd_shape=None, resized_ccd_origin_pixels=None, resized_ccd_origin_arcsec=None, psf_path=None, psf_hdu=0, resized_psf_shape=None, renormalize_psf=True, noise_map_path=None, noise_map_hdu=0, noise_map_from_image_and_background_noise_map=False, convert_noise_map_from_weight_map=False, convert_noise_map_from_inverse_noise_map=False, background_noise_map_path=None, background_noise_map_hdu=0, convert_background_noise_map_from_weight_map=False, convert_background_noise_map_from_inverse_noise_map=False, poisson_noise_map_path=None, poisson_noise_map_hdu=0, poisson_noise_map_from_image=False, convert_poisson_noise_map_from_weight_map=False, convert_poisson_noise_map_from_inverse_noise_map=False, exposure_time_map_path=None, exposure_time_map_hdu=0, exposure_time_map_from_single_value=None, exposure_time_map_from_inverse_noise_map=False, background_sky_map_path=None, background_sky_map_hdu=0, convert_from_electrons=False, gain=None, convert_from_adus=False, lens_name=None): """Factory for loading the ccd data from .fits files, as well as computing properties like the noise-map, exposure-time map, etc. from the ccd-data. This factory also includes a number of routines for converting the ccd-data from units not supported by PyAutoLens \ (e.g. adus, electrons) to electrons per second. Parameters ---------- lens_name image_path : str The path to the image .fits file containing the image (e.g. '/path/to/image.fits') pixel_scale : float The size of each pixel in arc seconds. image_hdu : int The hdu the image is contained in the .fits file specified by *image_path*. image_hdu : int The hdu the image is contained in the .fits file that *image_path* points too. resized_ccd_shape : (int, int) | None If input, the ccd arrays that are image sized, e.g. the image, noise-maps) are resized to these dimensions. resized_ccd_origin_pixels : (int, int) | None If the ccd arrays are resized, this defines a new origin (in pixels) around which recentering occurs. resized_ccd_origin_arcsec : (float, float) | None If the ccd arrays are resized, this defines a new origin (in arc-seconds) around which recentering occurs. psf_path : str The path to the psf .fits file containing the psf (e.g. '/path/to/psf.fits') psf_hdu : int The hdu the psf is contained in the .fits file specified by *psf_path*. resized_psf_shape : (int, int) | None If input, the psf is resized to these dimensions. renormalize_psf : bool If True, the PSF is renoralized such that all elements sum to 1.0. noise_map_path : str The path to the noise_map .fits file containing the noise_map (e.g. '/path/to/noise_map.fits') noise_map_hdu : int The hdu the noise_map is contained in the .fits file specified by *noise_map_path*. noise_map_from_image_and_background_noise_map : bool If True, the noise-map is computed from the observed image and background noise-map \ (see NoiseMap.from_image_and_background_noise_map). convert_noise_map_from_weight_map : bool If True, the noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \ *NoiseMap.from_weight_map). convert_noise_map_from_inverse_noise_map : bool If True, the noise-map loaded from the .fits file is converted from an inverse noise-map to a noise-map (see \ *NoiseMap.from_inverse_noise_map). background_noise_map_path : str The path to the background_noise_map .fits file containing the background noise-map \ (e.g. '/path/to/background_noise_map.fits') background_noise_map_hdu : int The hdu the background_noise_map is contained in the .fits file specified by *background_noise_map_path*. convert_background_noise_map_from_weight_map : bool If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \ *NoiseMap.from_weight_map). convert_background_noise_map_from_inverse_noise_map : bool If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a \ noise-map (see *NoiseMap.from_inverse_noise_map). poisson_noise_map_path : str The path to the poisson_noise_map .fits file containing the Poisson noise-map \ (e.g. '/path/to/poisson_noise_map.fits') poisson_noise_map_hdu : int The hdu the poisson_noise_map is contained in the .fits file specified by *poisson_noise_map_path*. poisson_noise_map_from_image : bool If True, the Poisson noise-map is estimated using the image. convert_poisson_noise_map_from_weight_map : bool If True, the Poisson noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \ *NoiseMap.from_weight_map). convert_poisson_noise_map_from_inverse_noise_map : bool If True, the Poisson noise-map loaded from the .fits file is converted from an inverse noise-map to a \ noise-map (see *NoiseMap.from_inverse_noise_map). exposure_time_map_path : str The path to the exposure_time_map .fits file containing the exposure time map \ (e.g. '/path/to/exposure_time_map.fits') exposure_time_map_hdu : int The hdu the exposure_time_map is contained in the .fits file specified by *exposure_time_map_path*. exposure_time_map_from_single_value : float The exposure time of the ccd imaging, which is used to compute the exposure-time map as a single value \ (see *ExposureTimeMap.from_single_value*). exposure_time_map_from_inverse_noise_map : bool If True, the exposure-time map is computed from the background noise_map map \ (see *ExposureTimeMap.from_background_noise_map*) background_sky_map_path : str The path to the background_sky_map .fits file containing the background sky map \ (e.g. '/path/to/background_sky_map.fits'). background_sky_map_hdu : int The hdu the background_sky_map is contained in the .fits file specified by *background_sky_map_path*. convert_from_electrons : bool If True, the input unblurred_image_1d are in units of electrons and all converted to electrons / second using the exposure \ time map. gain : float The image gain, used for convert from ADUs. convert_from_adus : bool If True, the input unblurred_image_1d are in units of adus and all converted to electrons / second using the exposure \ time map and gain. """ image = load_image(image_path=image_path, image_hdu=image_hdu, pixel_scale=pixel_scale) background_noise_map = load_background_noise_map(background_noise_map_path=background_noise_map_path, background_noise_map_hdu=background_noise_map_hdu, pixel_scale=pixel_scale, convert_background_noise_map_from_weight_map=convert_background_noise_map_from_weight_map, convert_background_noise_map_from_inverse_noise_map=convert_background_noise_map_from_inverse_noise_map) if background_noise_map is not None: inverse_noise_map = 1.0 / background_noise_map else: inverse_noise_map = None exposure_time_map = load_exposure_time_map(exposure_time_map_path=exposure_time_map_path, exposure_time_map_hdu=exposure_time_map_hdu, pixel_scale=pixel_scale, shape=image.shape, exposure_time=exposure_time_map_from_single_value, exposure_time_map_from_inverse_noise_map=exposure_time_map_from_inverse_noise_map, inverse_noise_map=inverse_noise_map) poisson_noise_map = load_poisson_noise_map(poisson_noise_map_path=poisson_noise_map_path, poisson_noise_map_hdu=poisson_noise_map_hdu, pixel_scale=pixel_scale, convert_poisson_noise_map_from_weight_map=convert_poisson_noise_map_from_weight_map, convert_poisson_noise_map_from_inverse_noise_map=convert_poisson_noise_map_from_inverse_noise_map, image=image, exposure_time_map=exposure_time_map, poisson_noise_map_from_image=poisson_noise_map_from_image, convert_from_electrons=convert_from_electrons, gain=gain, convert_from_adus=convert_from_adus) noise_map = load_noise_map(noise_map_path=noise_map_path, noise_map_hdu=noise_map_hdu, pixel_scale=pixel_scale, image=image, background_noise_map=background_noise_map, exposure_time_map=exposure_time_map, convert_noise_map_from_weight_map=convert_noise_map_from_weight_map, convert_noise_map_from_inverse_noise_map=convert_noise_map_from_inverse_noise_map, noise_map_from_image_and_background_noise_map=noise_map_from_image_and_background_noise_map, convert_from_electrons=convert_from_electrons, gain=gain, convert_from_adus=convert_from_adus) psf = load_psf(psf_path=psf_path, psf_hdu=psf_hdu, pixel_scale=pixel_scale, renormalize=renormalize_psf) background_sky_map = load_background_sky_map(background_sky_map_path=background_sky_map_path, background_sky_map_hdu=background_sky_map_hdu, pixel_scale=pixel_scale) image = CCDData(image=image, pixel_scale=pixel_scale, psf=psf, noise_map=noise_map, background_noise_map=background_noise_map, poisson_noise_map=poisson_noise_map, exposure_time_map=exposure_time_map, background_sky_map=background_sky_map, gain=gain, name=lens_name) if resized_ccd_shape is not None: image = image.new_ccd_data_with_resized_arrays(new_shape=resized_ccd_shape, new_centre_pixels=resized_ccd_origin_pixels, new_centre_arcsec=resized_ccd_origin_arcsec) if resized_psf_shape is not None: image = image.new_ccd_data_with_resized_psf(new_shape=resized_psf_shape) if convert_from_electrons: image = image.new_ccd_data_converted_from_electrons() elif convert_from_adus: image = image.new_ccd_data_converted_from_adus(gain=gain) return image
[ "def", "load_ccd_data_from_fits", "(", "image_path", ",", "pixel_scale", ",", "image_hdu", "=", "0", ",", "resized_ccd_shape", "=", "None", ",", "resized_ccd_origin_pixels", "=", "None", ",", "resized_ccd_origin_arcsec", "=", "None", ",", "psf_path", "=", "None", ...
Factory for loading the ccd data from .fits files, as well as computing properties like the noise-map, exposure-time map, etc. from the ccd-data. This factory also includes a number of routines for converting the ccd-data from units not supported by PyAutoLens \ (e.g. adus, electrons) to electrons per second. Parameters ---------- lens_name image_path : str The path to the image .fits file containing the image (e.g. '/path/to/image.fits') pixel_scale : float The size of each pixel in arc seconds. image_hdu : int The hdu the image is contained in the .fits file specified by *image_path*. image_hdu : int The hdu the image is contained in the .fits file that *image_path* points too. resized_ccd_shape : (int, int) | None If input, the ccd arrays that are image sized, e.g. the image, noise-maps) are resized to these dimensions. resized_ccd_origin_pixels : (int, int) | None If the ccd arrays are resized, this defines a new origin (in pixels) around which recentering occurs. resized_ccd_origin_arcsec : (float, float) | None If the ccd arrays are resized, this defines a new origin (in arc-seconds) around which recentering occurs. psf_path : str The path to the psf .fits file containing the psf (e.g. '/path/to/psf.fits') psf_hdu : int The hdu the psf is contained in the .fits file specified by *psf_path*. resized_psf_shape : (int, int) | None If input, the psf is resized to these dimensions. renormalize_psf : bool If True, the PSF is renoralized such that all elements sum to 1.0. noise_map_path : str The path to the noise_map .fits file containing the noise_map (e.g. '/path/to/noise_map.fits') noise_map_hdu : int The hdu the noise_map is contained in the .fits file specified by *noise_map_path*. noise_map_from_image_and_background_noise_map : bool If True, the noise-map is computed from the observed image and background noise-map \ (see NoiseMap.from_image_and_background_noise_map). convert_noise_map_from_weight_map : bool If True, the noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \ *NoiseMap.from_weight_map). convert_noise_map_from_inverse_noise_map : bool If True, the noise-map loaded from the .fits file is converted from an inverse noise-map to a noise-map (see \ *NoiseMap.from_inverse_noise_map). background_noise_map_path : str The path to the background_noise_map .fits file containing the background noise-map \ (e.g. '/path/to/background_noise_map.fits') background_noise_map_hdu : int The hdu the background_noise_map is contained in the .fits file specified by *background_noise_map_path*. convert_background_noise_map_from_weight_map : bool If True, the bacground noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \ *NoiseMap.from_weight_map). convert_background_noise_map_from_inverse_noise_map : bool If True, the background noise-map loaded from the .fits file is converted from an inverse noise-map to a \ noise-map (see *NoiseMap.from_inverse_noise_map). poisson_noise_map_path : str The path to the poisson_noise_map .fits file containing the Poisson noise-map \ (e.g. '/path/to/poisson_noise_map.fits') poisson_noise_map_hdu : int The hdu the poisson_noise_map is contained in the .fits file specified by *poisson_noise_map_path*. poisson_noise_map_from_image : bool If True, the Poisson noise-map is estimated using the image. convert_poisson_noise_map_from_weight_map : bool If True, the Poisson noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \ *NoiseMap.from_weight_map). convert_poisson_noise_map_from_inverse_noise_map : bool If True, the Poisson noise-map loaded from the .fits file is converted from an inverse noise-map to a \ noise-map (see *NoiseMap.from_inverse_noise_map). exposure_time_map_path : str The path to the exposure_time_map .fits file containing the exposure time map \ (e.g. '/path/to/exposure_time_map.fits') exposure_time_map_hdu : int The hdu the exposure_time_map is contained in the .fits file specified by *exposure_time_map_path*. exposure_time_map_from_single_value : float The exposure time of the ccd imaging, which is used to compute the exposure-time map as a single value \ (see *ExposureTimeMap.from_single_value*). exposure_time_map_from_inverse_noise_map : bool If True, the exposure-time map is computed from the background noise_map map \ (see *ExposureTimeMap.from_background_noise_map*) background_sky_map_path : str The path to the background_sky_map .fits file containing the background sky map \ (e.g. '/path/to/background_sky_map.fits'). background_sky_map_hdu : int The hdu the background_sky_map is contained in the .fits file specified by *background_sky_map_path*. convert_from_electrons : bool If True, the input unblurred_image_1d are in units of electrons and all converted to electrons / second using the exposure \ time map. gain : float The image gain, used for convert from ADUs. convert_from_adus : bool If True, the input unblurred_image_1d are in units of adus and all converted to electrons / second using the exposure \ time map and gain.
[ "Factory", "for", "loading", "the", "ccd", "data", "from", ".", "fits", "files", "as", "well", "as", "computing", "properties", "like", "the", "noise", "-", "map", "exposure", "-", "time", "map", "etc", ".", "from", "the", "ccd", "-", "data", "." ]
python
valid
64.798883
jart/fabulous
fabulous/utils.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/utils.py#L100-L115
def dimensions(self): """Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``. """ try: call = fcntl.ioctl(self.termfd, termios.TIOCGWINSZ, "\000" * 8) except IOError: return (79, 40) else: height, width = struct.unpack("hhhh", call)[:2] return (width, height)
[ "def", "dimensions", "(", "self", ")", ":", "try", ":", "call", "=", "fcntl", ".", "ioctl", "(", "self", ".", "termfd", ",", "termios", ".", "TIOCGWINSZ", ",", "\"\\000\"", "*", "8", ")", "except", "IOError", ":", "return", "(", "79", ",", "40", ")...
Returns terminal dimensions Don't save this information for long periods of time because the user might resize their terminal. :return: Returns ``(width, height)``. If there's no terminal to be found, we'll just return ``(79, 40)``.
[ "Returns", "terminal", "dimensions" ]
python
train
34.75
klmitch/requiem
requiem/headers.py
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/headers.py#L63-L66
def get(self, k, d=None): """Override dict.get() to title-case keys.""" return super(HeaderDict, self).get(k.title(), d)
[ "def", "get", "(", "self", ",", "k", ",", "d", "=", "None", ")", ":", "return", "super", "(", "HeaderDict", ",", "self", ")", ".", "get", "(", "k", ".", "title", "(", ")", ",", "d", ")" ]
Override dict.get() to title-case keys.
[ "Override", "dict", ".", "get", "()", "to", "title", "-", "case", "keys", "." ]
python
train
33.5
Robpol86/libnl
libnl/genl/mngt.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/mngt.py#L206-L232
def genl_register_family(ops): """Register Generic Netlink family and associated commands. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L164 Registers the specified Generic Netlink family definition together with all associated commands. After registration, received Generic Netlink messages can be passed to genl_handle_msg() which will validate the messages, look for a matching command and call the respective callback function automatically. Positional arguments: ops -- Generic Netlink family definition (genl_ops class instance). Returns: 0 on success or a negative error code. """ if not ops.o_name or (ops.o_cmds and ops.o_ncmds <= 0): return -NLE_INVAL if ops.o_id and lookup_family(ops.o_id): return -NLE_EXIST if lookup_family_by_name(ops.o_name): return -NLE_EXIST nl_list_add_tail(ops.o_list, genl_ops_list) return 0
[ "def", "genl_register_family", "(", "ops", ")", ":", "if", "not", "ops", ".", "o_name", "or", "(", "ops", ".", "o_cmds", "and", "ops", ".", "o_ncmds", "<=", "0", ")", ":", "return", "-", "NLE_INVAL", "if", "ops", ".", "o_id", "and", "lookup_family", ...
Register Generic Netlink family and associated commands. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L164 Registers the specified Generic Netlink family definition together with all associated commands. After registration, received Generic Netlink messages can be passed to genl_handle_msg() which will validate the messages, look for a matching command and call the respective callback function automatically. Positional arguments: ops -- Generic Netlink family definition (genl_ops class instance). Returns: 0 on success or a negative error code.
[ "Register", "Generic", "Netlink", "family", "and", "associated", "commands", "." ]
python
train
33.925926
JosuaKrause/quick_server
quick_server/quick_server.py
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L511-L633
def get_post_file(self, hdr, f_in, clen, post, files): """Reads from a multipart/form-data.""" lens = { 'clen': clen, 'push': [], } prefix = "boundary=" if not hdr.startswith(prefix): return None boundary = hdr[len(prefix):].strip().encode('utf8') if not boundary: return None boundary = b'--' + boundary raw_boundary = b'\r\n' + boundary end_boundary = boundary + b'--' def push_back(line): ln = BytesIO() ln.write(line) ln.flush() ln.seek(0) lens['clen'] += len(line) lens['push'].append(ln) def read_line(): line = b'' while not line.endswith(b'\n') and lens['push']: br = lens['push'].pop() line += br.readline() tmp = br.read(1) if tmp != b'': br.seek(br.tell() - 1) lens['push'].append(br) if not line.endswith(b'\n'): line += f_in.readline(lens['clen']) lens['clen'] -= len(line) if line == b'' or lens['clen'] < 0: raise ValueError("Unexpected EOF") return line.strip() def read(length): res = b'' while len(res) < length and lens['push']: br = lens['push'].pop() res += br.read(length - len(res)) tmp = br.read(1) if tmp != b'': br.seek(br.tell() - 1) lens['push'].append(br) if len(res) < length: res += f_in.read(length - len(res)) lens['clen'] -= len(res) if res == b'' or lens['clen'] < 0: raise ValueError("Unexpected EOF") return res def parse_file(): f = BytesIO() buff_size = 10 * 1024 def write_buff(buff): if f.tell() + len(buff) > self.server.max_file_size: raise PreventDefaultResponse( 413, "Uploaded file is too large! {0} > {1}".format( f.tell() + len(buff), self.server.max_file_size)) f.write(buff) f.flush() buff = b"" while True: buff += read(min(lens['clen'], buff_size)) bix = buff.find(raw_boundary) if bix >= 0: write_buff(buff[:bix]) push_back(buff[bix + len(raw_boundary) - len(boundary):]) break out_split = max(len(buff) - len(raw_boundary), 0) if out_split > 0: write_buff(buff[:out_split]) buff = buff[out_split:] f.seek(0) return f def parse_field(): return parse_file().read().decode('utf8') while True: line = read_line() if line == end_boundary: if lens['clen'] > 0: raise ValueError( "Expected EOF got: {0}".format( repr(f_in.read(lens['clen'])))) return if line != boundary: raise ValueError( "Expected boundary got: {0}".format(repr(line))) headers = {} while True: line = read_line() if not line: break key, value = line.split(b':', 1) headers[key.lower()] = value.strip() name = None if b'content-disposition' in headers: cdis = headers[b'content-disposition'] if not cdis.startswith(b'form-data'): raise ValueError( "Unknown content-disposition: {0}".format(repr(cdis))) name_field = b'name="' ix = cdis.find(name_field) if ix >= 0: name = cdis[ix + len(name_field):] name = name[:name.index(b'"')].decode('utf8') ctype = None if b'content-type' in headers: ctype = headers[b'content-type'] # b'application/octet-stream': # we treat all files the same if ctype is not None: files[name] = parse_file() else: post[name] = parse_field()
[ "def", "get_post_file", "(", "self", ",", "hdr", ",", "f_in", ",", "clen", ",", "post", ",", "files", ")", ":", "lens", "=", "{", "'clen'", ":", "clen", ",", "'push'", ":", "[", "]", ",", "}", "prefix", "=", "\"boundary=\"", "if", "not", "hdr", "...
Reads from a multipart/form-data.
[ "Reads", "from", "a", "multipart", "/", "form", "-", "data", "." ]
python
train
35.829268
a10networks/a10-neutron-lbaas
a10_neutron_lbaas/v1/handler_hm.py
https://github.com/a10networks/a10-neutron-lbaas/blob/ff834c295c8019874ca4b209d864367e40cc9881/a10_neutron_lbaas/v1/handler_hm.py#L81-L87
def dissociate(self, c, context, hm, pool_id): """Remove a pool association, and the healthmonitor if its the last one""" self._dissociate(c, context, hm, pool_id) pools = hm.get("pools", []) if not any(p for p in pools if p.get("pool_id") != pool_id): self._delete_unused(c, context, hm)
[ "def", "dissociate", "(", "self", ",", "c", ",", "context", ",", "hm", ",", "pool_id", ")", ":", "self", ".", "_dissociate", "(", "c", ",", "context", ",", "hm", ",", "pool_id", ")", "pools", "=", "hm", ".", "get", "(", "\"pools\"", ",", "[", "]"...
Remove a pool association, and the healthmonitor if its the last one
[ "Remove", "a", "pool", "association", "and", "the", "healthmonitor", "if", "its", "the", "last", "one" ]
python
train
46.714286
mediawiki-utilities/python-mwreverts
mwreverts/utilities/revdocs2reverts.py
https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/utilities/revdocs2reverts.py#L54-L114
def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False, resort=False, verbose=False): """ Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff """ page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page')) for page_doc, rev_docs in page_rev_docs: if verbose: sys.stderr.write(page_doc.get('title') + ": ") sys.stderr.flush() if resort: if verbose: sys.stderr.write("(sorting) ") sys.stderr.flush() rev_docs = sorted( rev_docs, key=lambda r: (r.get('timestamp'), r.get('id'))) detector = Detector(radius=radius) for rev_doc in rev_docs: if not use_sha1 and 'text' not in rev_doc: logger.warn("Skipping {0}: 'text' field not found in {0}" .format(rev_doc['id'], rev_doc)) continue if use_sha1: checksum = rev_doc.get('sha1') or DummyChecksum() elif 'text' in rev_doc: text_bytes = bytes(rev_doc['text'], 'utf8', 'replace') checksum = hashlib.sha1(text_bytes).digest() revert = detector.process(checksum, rev_doc) if revert: yield revert.to_json() if verbose: sys.stderr.write("r") sys.stderr.flush() else: if verbose: sys.stderr.write(".") sys.stderr.flush() if verbose: sys.stderr.write("\n") sys.stderr.flush()
[ "def", "revdocs2reverts", "(", "rev_docs", ",", "radius", "=", "defaults", ".", "RADIUS", ",", "use_sha1", "=", "False", ",", "resort", "=", "False", ",", "verbose", "=", "False", ")", ":", "page_rev_docs", "=", "groupby", "(", "rev_docs", ",", "lambda", ...
Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff
[ "Converts", "a", "sequence", "of", "page", "-", "partitioned", "revision", "documents", "into", "a", "sequence", "of", "reverts", "." ]
python
train
33.786885
grycap/RADL
radl/radl.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L1326-L1344
def check(self, radl): """Check the features in this network.""" SIMPLE_FEATURES = { "host": (str, None), "credentials.username": (str, None), "credentials.password": (str, None), "credentials.private_key": (str, None) } self.check_simple(SIMPLE_FEATURES, radl) if not self.getHost(): raise RADLParseException("Ansible host must have a host", line=self.line) (username, password, private_key) = self.getCredentialValues() if not username: raise RADLParseException("Ansible host must have a credentials.username", line=self.line) if not password and not private_key: raise RADLParseException("Ansible host must have a credentials.password or credentials.private_key", line=self.line)
[ "def", "check", "(", "self", ",", "radl", ")", ":", "SIMPLE_FEATURES", "=", "{", "\"host\"", ":", "(", "str", ",", "None", ")", ",", "\"credentials.username\"", ":", "(", "str", ",", "None", ")", ",", "\"credentials.password\"", ":", "(", "str", ",", "...
Check the features in this network.
[ "Check", "the", "features", "in", "this", "network", "." ]
python
train
44.789474
abingham/spor
src/spor/repository/repository.py
https://github.com/abingham/spor/blob/673c8c36c99a4b9ea882f002bfb529f1eca89126/src/spor/repository/repository.py#L148-L171
def _find_root_dir(path, spor_dir): """Search for a spor repo containing `path`. This searches for `spor_dir` in directories dominating `path`. If a directory containing `spor_dir` is found, then that directory is returned as a `pathlib.Path`. Returns: The dominating directory containing `spor_dir` as a `pathlib.Path`. Raises: ValueError: No repository is found. """ start_path = pathlib.Path(os.getcwd() if path is None else path) paths = [start_path] + list(start_path.parents) for path in paths: data_dir = path / spor_dir if data_dir.exists() and data_dir.is_dir(): return path raise ValueError('No spor repository found')
[ "def", "_find_root_dir", "(", "path", ",", "spor_dir", ")", ":", "start_path", "=", "pathlib", ".", "Path", "(", "os", ".", "getcwd", "(", ")", "if", "path", "is", "None", "else", "path", ")", "paths", "=", "[", "start_path", "]", "+", "list", "(", ...
Search for a spor repo containing `path`. This searches for `spor_dir` in directories dominating `path`. If a directory containing `spor_dir` is found, then that directory is returned as a `pathlib.Path`. Returns: The dominating directory containing `spor_dir` as a `pathlib.Path`. Raises: ValueError: No repository is found.
[ "Search", "for", "a", "spor", "repo", "containing", "path", "." ]
python
train
29
not-na/peng3d
peng3d/actor/player.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/actor/player.py#L155-L165
def registerEventHandlers(self): """ Registers the up and down handlers. Also registers a scheduled function every 60th of a second, causing pyglet to redraw your window with 60fps. """ # Crouch/fly down self.peng.keybinds.add(self.peng.cfg["controls.controls.crouch"],"peng3d:actor.%s.player.controls.crouch"%self.actor.uuid,self.on_crouch_down,False) # Jump/fly up self.peng.keybinds.add(self.peng.cfg["controls.controls.jump"],"peng3d:actor.%s.player.controls.jump"%self.actor.uuid,self.on_jump_down,False) pyglet.clock.schedule_interval(self.update,1.0/60)
[ "def", "registerEventHandlers", "(", "self", ")", ":", "# Crouch/fly down", "self", ".", "peng", ".", "keybinds", ".", "add", "(", "self", ".", "peng", ".", "cfg", "[", "\"controls.controls.crouch\"", "]", ",", "\"peng3d:actor.%s.player.controls.crouch\"", "%", "s...
Registers the up and down handlers. Also registers a scheduled function every 60th of a second, causing pyglet to redraw your window with 60fps.
[ "Registers", "the", "up", "and", "down", "handlers", ".", "Also", "registers", "a", "scheduled", "function", "every", "60th", "of", "a", "second", "causing", "pyglet", "to", "redraw", "your", "window", "with", "60fps", "." ]
python
test
57.363636
xtrementl/focus
focus/plugin/registration.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/registration.py#L181-L203
def setup_sudo_access(plugin): """ Injects a `run_root` method into the provided plugin instance that forks a shell command using sudo. Used for command plugin needs. `plugin` ``Plugin`` instance. """ def run_root(self, command): """ Executes a shell command as root. `command` Shell command string. Returns boolean. """ try: return not (common.shell_process('sudo ' + command) is None) except KeyboardInterrupt: # user cancelled return False plugin.run_root = types.MethodType(run_root, plugin)
[ "def", "setup_sudo_access", "(", "plugin", ")", ":", "def", "run_root", "(", "self", ",", "command", ")", ":", "\"\"\" Executes a shell command as root.\n\n `command`\n Shell command string.\n\n Returns boolean.\n \"\"\"", "try", ":", ...
Injects a `run_root` method into the provided plugin instance that forks a shell command using sudo. Used for command plugin needs. `plugin` ``Plugin`` instance.
[ "Injects", "a", "run_root", "method", "into", "the", "provided", "plugin", "instance", "that", "forks", "a", "shell", "command", "using", "sudo", ".", "Used", "for", "command", "plugin", "needs", "." ]
python
train
27.304348
saltstack/salt
salt/utils/functools.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/functools.py#L15-L39
def namespaced_function(function, global_dict, defaults=None, preserve_context=False): ''' Redefine (clone) a function under a different globals() namespace scope preserve_context: Allow keeping the context taken from orignal namespace, and extend it with globals() taken from new targetted namespace. ''' if defaults is None: defaults = function.__defaults__ if preserve_context: _global_dict = function.__globals__.copy() _global_dict.update(global_dict) global_dict = _global_dict new_namespaced_function = types.FunctionType( function.__code__, global_dict, name=function.__name__, argdefs=defaults, closure=function.__closure__ ) new_namespaced_function.__dict__.update(function.__dict__) return new_namespaced_function
[ "def", "namespaced_function", "(", "function", ",", "global_dict", ",", "defaults", "=", "None", ",", "preserve_context", "=", "False", ")", ":", "if", "defaults", "is", "None", ":", "defaults", "=", "function", ".", "__defaults__", "if", "preserve_context", "...
Redefine (clone) a function under a different globals() namespace scope preserve_context: Allow keeping the context taken from orignal namespace, and extend it with globals() taken from new targetted namespace.
[ "Redefine", "(", "clone", ")", "a", "function", "under", "a", "different", "globals", "()", "namespace", "scope" ]
python
train
34.16
ninuxorg/nodeshot
nodeshot/core/websockets/handlers.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/websockets/handlers.py#L21-L36
def add_client(self, user_id=None): """ Adds current instance to public or private channel. If user_id is specified it will be added to the private channel, If user_id is not specified it will be added to the public one instead. """ if user_id is None: # generate a random uuid if it's an unauthenticated client self.channel = 'public' user_id = uuid.uuid1().hex else: self.channel = 'private' self.id = user_id self.channels[self.channel][self.id] = self print 'Client connected to the %s channel.' % self.channel
[ "def", "add_client", "(", "self", ",", "user_id", "=", "None", ")", ":", "if", "user_id", "is", "None", ":", "# generate a random uuid if it's an unauthenticated client", "self", ".", "channel", "=", "'public'", "user_id", "=", "uuid", ".", "uuid1", "(", ")", ...
Adds current instance to public or private channel. If user_id is specified it will be added to the private channel, If user_id is not specified it will be added to the public one instead.
[ "Adds", "current", "instance", "to", "public", "or", "private", "channel", ".", "If", "user_id", "is", "specified", "it", "will", "be", "added", "to", "the", "private", "channel", "If", "user_id", "is", "not", "specified", "it", "will", "be", "added", "to"...
python
train
39.75
bfrog/whizzer
whizzer/client.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/client.py#L162-L178
def _connect(self, sock, addr, timeout): """Start watching the socket for it to be writtable.""" if self.connection: raise SocketClientConnectedError() if self.connector: raise SocketClientConnectingError() self.connect_deferred = Deferred(self.loop) self.sock = sock self.addr = addr self.connector = Connector(self.loop, sock, addr, timeout) self.connector.deferred.add_callback(self._connected) self.connector.deferred.add_errback(self._connect_failed) self.connector.start() return self.connect_deferred
[ "def", "_connect", "(", "self", ",", "sock", ",", "addr", ",", "timeout", ")", ":", "if", "self", ".", "connection", ":", "raise", "SocketClientConnectedError", "(", ")", "if", "self", ".", "connector", ":", "raise", "SocketClientConnectingError", "(", ")", ...
Start watching the socket for it to be writtable.
[ "Start", "watching", "the", "socket", "for", "it", "to", "be", "writtable", "." ]
python
train
35.647059
kwikteam/phy
phy/gui/qt.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/qt.py#L162-L166
def stop(self): """Stop the current timer if there is one and cancel the async call.""" if self._timer: self._timer.stop() self._timer.deleteLater()
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_timer", ":", "self", ".", "_timer", ".", "stop", "(", ")", "self", ".", "_timer", ".", "deleteLater", "(", ")" ]
Stop the current timer if there is one and cancel the async call.
[ "Stop", "the", "current", "timer", "if", "there", "is", "one", "and", "cancel", "the", "async", "call", "." ]
python
train
36.8
pyviz/imagen
imagen/colorspaces.py
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L49-L62
def _threeDdot_simple(M,a): "Return Ma, where M is a 3x3 transformation matrix, for each pixel" result = np.empty(a.shape,dtype=a.dtype) for i in range(a.shape[0]): for j in range(a.shape[1]): A = np.array([a[i,j,0],a[i,j,1],a[i,j,2]]).reshape((3,1)) L = np.dot(M,A) result[i,j,0] = L[0] result[i,j,1] = L[1] result[i,j,2] = L[2] return result
[ "def", "_threeDdot_simple", "(", "M", ",", "a", ")", ":", "result", "=", "np", ".", "empty", "(", "a", ".", "shape", ",", "dtype", "=", "a", ".", "dtype", ")", "for", "i", "in", "range", "(", "a", ".", "shape", "[", "0", "]", ")", ":", "for",...
Return Ma, where M is a 3x3 transformation matrix, for each pixel
[ "Return", "Ma", "where", "M", "is", "a", "3x3", "transformation", "matrix", "for", "each", "pixel" ]
python
train
29.785714
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/interactive.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L1768-L1793
def do_bd(self, arg): """ [~process] bd <address> - disable a code breakpoint [~thread] bd <address> - disable a hardware breakpoint [~process] bd <address-address> - disable a memory breakpoint [~process] bd <address> <size> - disable a memory breakpoint """ token_list = self.split_tokens(arg, 1, 2) pid, tid, address, size = self.input_breakpoint(token_list) debug = self.debug found = False if size is None: if tid is not None: if debug.has_hardware_breakpoint(tid, address): debug.disable_hardware_breakpoint(tid, address) found = True if pid is not None: if debug.has_code_breakpoint(pid, address): debug.disable_code_breakpoint(pid, address) found = True else: if debug.has_page_breakpoint(pid, address): debug.disable_page_breakpoint(pid, address) found = True if not found: print("Error: breakpoint not found.")
[ "def", "do_bd", "(", "self", ",", "arg", ")", ":", "token_list", "=", "self", ".", "split_tokens", "(", "arg", ",", "1", ",", "2", ")", "pid", ",", "tid", ",", "address", ",", "size", "=", "self", ".", "input_breakpoint", "(", "token_list", ")", "d...
[~process] bd <address> - disable a code breakpoint [~thread] bd <address> - disable a hardware breakpoint [~process] bd <address-address> - disable a memory breakpoint [~process] bd <address> <size> - disable a memory breakpoint
[ "[", "~process", "]", "bd", "<address", ">", "-", "disable", "a", "code", "breakpoint", "[", "~thread", "]", "bd", "<address", ">", "-", "disable", "a", "hardware", "breakpoint", "[", "~process", "]", "bd", "<address", "-", "address", ">", "-", "disable"...
python
train
41.961538
DigitalGlobe/gbdxtools
gbdxtools/answerfactory.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/answerfactory.py#L200-L215
def delete(self, project_id): ''' Deletes a project by id Args: project_id: The project id to delete Returns: Nothing ''' self.logger.debug('Deleting project by id: ' + project_id) url = '%(base_url)s/%(project_id)s' % { 'base_url': self.base_url, 'project_id': project_id } r = self.gbdx_connection.delete(url) r.raise_for_status()
[ "def", "delete", "(", "self", ",", "project_id", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Deleting project by id: '", "+", "project_id", ")", "url", "=", "'%(base_url)s/%(project_id)s'", "%", "{", "'base_url'", ":", "self", ".", "base_url", ",", ...
Deletes a project by id Args: project_id: The project id to delete Returns: Nothing
[ "Deletes", "a", "project", "by", "id" ]
python
valid
27.3125
crate/crate-python
src/crate/client/sqlalchemy/types.py
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/sqlalchemy/types.py#L77-L87
def coerce(cls, key, value): "Convert plain dictionaries to MutableDict." if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) # this call will raise ValueError return Mutable.coerce(key, value) else: return value
[ "def", "coerce", "(", "cls", ",", "key", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "MutableDict", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "MutableDict", "(", "value", ")", "# this call wi...
Convert plain dictionaries to MutableDict.
[ "Convert", "plain", "dictionaries", "to", "MutableDict", "." ]
python
train
30.272727
dronekit/dronekit-python
dronekit/__init__.py
https://github.com/dronekit/dronekit-python/blob/91c147fa61f521f5fff5d0cee06d07ed93614af8/dronekit/__init__.py#L2587-L2617
def rotate(self, pitch, roll, yaw): """ Rotate the gimbal to a specific vector. .. code-block:: python #Point the gimbal straight down vehicle.gimbal.rotate(-90, 0, 0) :param pitch: Gimbal pitch in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). A value of 0 represents a camera pointed straight ahead relative to the front of the vehicle, while -90 points the camera straight down. :param roll: Gimbal roll in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). :param yaw: Gimbal yaw in degrees relative to *global frame* (0 is North, 90 is West, 180 is South etc.) """ msg = self._vehicle.message_factory.mount_configure_encode( 0, 1, # target system, target component mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING, #mount_mode 1, # stabilize roll 1, # stabilize pitch 1, # stabilize yaw ) self._vehicle.send_mavlink(msg) msg = self._vehicle.message_factory.mount_control_encode( 0, 1, # target system, target component pitch * 100, # pitch is in centidegrees roll * 100, # roll yaw * 100, # yaw is in centidegrees 0 # save position ) self._vehicle.send_mavlink(msg)
[ "def", "rotate", "(", "self", ",", "pitch", ",", "roll", ",", "yaw", ")", ":", "msg", "=", "self", ".", "_vehicle", ".", "message_factory", ".", "mount_configure_encode", "(", "0", ",", "1", ",", "# target system, target component", "mavutil", ".", "mavlink"...
Rotate the gimbal to a specific vector. .. code-block:: python #Point the gimbal straight down vehicle.gimbal.rotate(-90, 0, 0) :param pitch: Gimbal pitch in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). A value of 0 represents a camera pointed straight ahead relative to the front of the vehicle, while -90 points the camera straight down. :param roll: Gimbal roll in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). :param yaw: Gimbal yaw in degrees relative to *global frame* (0 is North, 90 is West, 180 is South etc.)
[ "Rotate", "the", "gimbal", "to", "a", "specific", "vector", "." ]
python
train
45.193548
urinieto/msaf
msaf/algorithms/olda/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/segmenter.py#L265-L300
def processFlat(self): """Main process for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features and duration F, dur = features(self.file_struct, self.annot_beats, self.framesync) try: # Load and apply transform W = load_transform(self.config["transform"]) F = W.dot(F) # Get Segments kmin, kmax = get_num_segs(dur) est_idxs = get_segments(F, kmin=kmin, kmax=kmax) except: # The audio file is too short, only beginning and end logging.warning("Audio file too short! " "Only start and end boundaries.") est_idxs = [0, F.shape[1] - 1] # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[1] - 1 # Empty labels est_labels = np.ones(len(est_idxs) - 1) * -1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
[ "def", "processFlat", "(", "self", ")", ":", "# Preprocess to obtain features and duration", "F", ",", "dur", "=", "features", "(", "self", ".", "file_struct", ",", "self", ".", "annot_beats", ",", "self", ".", "framesync", ")", "try", ":", "# Load and apply tra...
Main process for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments.
[ "Main", "process", "for", "flat", "segmentation", ".", "Returns", "-------", "est_idxs", ":", "np", ".", "array", "(", "N", ")", "Estimated", "times", "for", "the", "segment", "boundaries", "in", "frame", "indeces", ".", "est_labels", ":", "np", ".", "arra...
python
test
34.972222
GGiecold/Concurrent_AP
Concurrent_AP.py
https://github.com/GGiecold/Concurrent_AP/blob/d4cebe06268b5d520352a83cadb2f7520650460c/Concurrent_AP.py#L537-L562
def compute_similarities(hdf5_file, data, N_processes): """Compute a matrix of pairwise L2 Euclidean distances among samples from 'data'. This computation is to be done in parallel by 'N_processes' distinct processes. Those processes (which are instances of the class 'Similarities_worker') are prevented from simultaneously accessing the HDF5 data structure at 'hdf5_file' through the use of a multiprocessing.Lock object. """ slice_queue = multiprocessing.JoinableQueue() pid_list = [] for i in range(N_processes): worker = Similarities_worker(hdf5_file, '/aff_prop_group/similarities', data, slice_queue) worker.daemon = True worker.start() pid_list.append(worker.pid) for rows_slice in chunk_generator(data.shape[0], 2 * N_processes): slice_queue.put(rows_slice) slice_queue.join() slice_queue.close() terminate_processes(pid_list) gc.collect()
[ "def", "compute_similarities", "(", "hdf5_file", ",", "data", ",", "N_processes", ")", ":", "slice_queue", "=", "multiprocessing", ".", "JoinableQueue", "(", ")", "pid_list", "=", "[", "]", "for", "i", "in", "range", "(", "N_processes", ")", ":", "worker", ...
Compute a matrix of pairwise L2 Euclidean distances among samples from 'data'. This computation is to be done in parallel by 'N_processes' distinct processes. Those processes (which are instances of the class 'Similarities_worker') are prevented from simultaneously accessing the HDF5 data structure at 'hdf5_file' through the use of a multiprocessing.Lock object.
[ "Compute", "a", "matrix", "of", "pairwise", "L2", "Euclidean", "distances", "among", "samples", "from", "data", ".", "This", "computation", "is", "to", "be", "done", "in", "parallel", "by", "N_processes", "distinct", "processes", ".", "Those", "processes", "("...
python
train
38.615385
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L127-L137
def dilate_obs(self, dilation_radius): """ Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return: """ for s in self.size_thresholds: self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape) for t in range(self.dilated_obs[s].shape[0]): self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1
[ "def", "dilate_obs", "(", "self", ",", "dilation_radius", ")", ":", "for", "s", "in", "self", ".", "size_thresholds", ":", "self", ".", "dilated_obs", "[", "s", "]", "=", "np", ".", "zeros", "(", "self", ".", "window_obs", "[", "self", ".", "mrms_varia...
Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return:
[ "Use", "a", "dilation", "filter", "to", "grow", "positive", "observation", "areas", "by", "a", "specified", "number", "of", "grid", "points" ]
python
train
50.545455
mathiasertl/django-ca
ca/django_ca/utils.py
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L766-L783
def shlex_split(s, sep): """Split a character on the given set of characters. Example:: >>> shlex_split('foo,bar', ', ') ['foo', 'bar'] >>> shlex_split('foo\\\\,bar1', ',') # escape a separator ['foo,bar1'] >>> shlex_split('"foo,bar", bla', ', ') ['foo,bar', 'bla'] >>> shlex_split('foo,"bar,bla"', ',') ['foo', 'bar,bla'] """ lex = shlex.shlex(s, posix=True) lex.whitespace = sep lex.whitespace_split = True return [l for l in lex]
[ "def", "shlex_split", "(", "s", ",", "sep", ")", ":", "lex", "=", "shlex", ".", "shlex", "(", "s", ",", "posix", "=", "True", ")", "lex", ".", "whitespace", "=", "sep", "lex", ".", "whitespace_split", "=", "True", "return", "[", "l", "for", "l", ...
Split a character on the given set of characters. Example:: >>> shlex_split('foo,bar', ', ') ['foo', 'bar'] >>> shlex_split('foo\\\\,bar1', ',') # escape a separator ['foo,bar1'] >>> shlex_split('"foo,bar", bla', ', ') ['foo,bar', 'bla'] >>> shlex_split('foo,"bar,bla"', ',') ['foo', 'bar,bla']
[ "Split", "a", "character", "on", "the", "given", "set", "of", "characters", "." ]
python
train
28.333333
nephics/mat4py
mat4py/loadmat.py
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L398-L471
def loadmat(filename, meta=False): """Load data from MAT-file: data = loadmat(filename, meta=False) The filename argument is either a string with the filename, or a file like object. The returned parameter ``data`` is a dict with the variables found in the MAT file. Call ``loadmat`` with parameter meta=True to include meta data, such as file header information and list of globals. A ``ParseError`` exception is raised if the MAT-file is corrupt or contains a data type that cannot be parsed. """ if isinstance(filename, basestring): fd = open(filename, 'rb') else: fd = filename # Check mat file format is version 5 # For 5 format we need to read an integer in the header. # Bytes 124 through 128 contain a version integer and an # endian test string fd.seek(124) tst_str = fd.read(4) little_endian = (tst_str[2:4] == b'IM') endian = '' if (sys.byteorder == 'little' and little_endian) or \ (sys.byteorder == 'big' and not little_endian): # no byte swapping same endian pass elif sys.byteorder == 'little': # byte swapping endian = '>' else: # byte swapping endian = '<' maj_ind = int(little_endian) # major version number maj_val = ord(tst_str[maj_ind]) if ispy2 else tst_str[maj_ind] if maj_val != 1: raise ParseError('Can only read from Matlab level 5 MAT-files') # the minor version number (unused value) # min_val = ord(tst_str[1 - maj_ind]) if ispy2 else tst_str[1 - maj_ind] mdict = {} if meta: # read the file header fd.seek(0) mdict['__header__'] = read_file_header(fd, endian) mdict['__globals__'] = [] # read data elements while not eof(fd): hdr, next_position, fd_var = read_var_header(fd, endian) name = hdr['name'] if name in mdict: raise ParseError('Duplicate variable name "{}" in mat file.' .format(name)) # read the matrix mdict[name] = read_var_array(fd_var, endian, hdr) if meta and hdr['is_global']: mdict['__globals__'].append(name) # move on to next entry in file fd.seek(next_position) fd.close() return mdict
[ "def", "loadmat", "(", "filename", ",", "meta", "=", "False", ")", ":", "if", "isinstance", "(", "filename", ",", "basestring", ")", ":", "fd", "=", "open", "(", "filename", ",", "'rb'", ")", "else", ":", "fd", "=", "filename", "# Check mat file format i...
Load data from MAT-file: data = loadmat(filename, meta=False) The filename argument is either a string with the filename, or a file like object. The returned parameter ``data`` is a dict with the variables found in the MAT file. Call ``loadmat`` with parameter meta=True to include meta data, such as file header information and list of globals. A ``ParseError`` exception is raised if the MAT-file is corrupt or contains a data type that cannot be parsed.
[ "Load", "data", "from", "MAT", "-", "file", ":" ]
python
valid
30.364865
DeepHorizons/iarm
iarm_kernel/iarmkernel.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm_kernel/iarmkernel.py#L256-L283
def magic_help(self, line): """ Print out the help for magics Usage: Call help with no arguments to list all magics, or call it with a magic to print out it's help info. `%help` or `%help run """ line = line.strip() if not line: for magic in self.magics: stream_content = {'name': 'stdout', 'text': "%{}\n".format(magic)} self.send_response(self.iopub_socket, 'stream', stream_content) elif line in self.magics: # its a magic stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.magics[line].__doc__)} self.send_response(self.iopub_socket, 'stream', stream_content) elif line in self.interpreter.ops: # it's an instruction stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.interpreter.ops[line].__doc__)} self.send_response(self.iopub_socket, 'stream', stream_content) else: stream_content = {'name': 'stderr', 'text': "'{}' not a known magic or instruction".format(line)} self.send_response(self.iopub_socket, 'stream', stream_content)
[ "def", "magic_help", "(", "self", ",", "line", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "for", "magic", "in", "self", ".", "magics", ":", "stream_content", "=", "{", "'name'", ":", "'stdout'", ",", "'text'", ...
Print out the help for magics Usage: Call help with no arguments to list all magics, or call it with a magic to print out it's help info. `%help` or `%help run
[ "Print", "out", "the", "help", "for", "magics" ]
python
train
42.928571
Nic30/hwtGraph
hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L170-L216
def mergeSplitsOnInterfaces(root: LNode): """ collect all split/concatenation nodes and group them by target interface """ for ch in root.children: if ch.children: mergeSplitsOnInterfaces(ch) ctx = MergeSplitsOnInterfacesCtx() for ch in root.children: srcPorts = None try: if ch.name == "CONCAT": p = single(ch.east, lambda x: True) e = single(p.outgoingEdges, lambda x: True) srcPorts = e.dsts elif ch.name == "SLICE": p = single(ch.west, lambda x: True) e = single(p.incomingEdges, lambda x: True) srcPorts = e.srcs except (DuplicitValueExc, NoValueExc): continue if srcPorts is not None: for srcPort in srcPorts: if isinstance(srcPort.parent, LPort): # only for non primitive ports rootPort = getRootIntfPort(srcPort) ctx.register(rootPort, ch, e) # join them if it is possible for srcPort, splitsAndConcats in ctx.iterPortSplits(): if len(splitsAndConcats) <= 1: continue name = "SPLIT" if srcPort.direction == PortType.OUTPUT else "CONCAT" newSplitNode = root.addNode(name) copyPort(srcPort, newSplitNode, True, "") n = splitsAndConcats[0][0] for i in range(max(len(n.west), len(n.east))): copyPort( srcPort, newSplitNode, False, "[%d]" % i) reconnectPorts(root, srcPort, splitsAndConcats, newSplitNode)
[ "def", "mergeSplitsOnInterfaces", "(", "root", ":", "LNode", ")", ":", "for", "ch", "in", "root", ".", "children", ":", "if", "ch", ".", "children", ":", "mergeSplitsOnInterfaces", "(", "ch", ")", "ctx", "=", "MergeSplitsOnInterfacesCtx", "(", ")", "for", ...
collect all split/concatenation nodes and group them by target interface
[ "collect", "all", "split", "/", "concatenation", "nodes", "and", "group", "them", "by", "target", "interface" ]
python
train
34.744681
ANTsX/ANTsPy
ants/contrib/sampling/affine2d.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/contrib/sampling/affine2d.py#L62-L101
def transform(self, X=None, y=None): """ Transform an image using an Affine transform with the given translation parameters. Return the transform if X=None. Arguments --------- X : ANTsImage Image to transform y : ANTsImage (optional) Another image to transform Returns ------- ANTsImage if y is None, else a tuple of ANTsImage types Examples -------- >>> import ants >>> img = ants.image_read(ants.get_data('r16')) >>> tx = ants.contrib.Translate2D(translation=(10,0)) >>> img2_x = tx.transform(img) >>> tx = ants.contrib.Translate2D(translation=(-10,0)) # other direction >>> img2_x = tx.transform(img) >>> tx = ants.contrib.Translate2D(translation=(0,10)) >>> img2_z = tx.transform(img) >>> tx = ants.contrib.Translate2D(translation=(10,10)) >>> img2 = tx.transform(img) """ # convert to radians and unpack translation_x, translation_y = self.translation translation_matrix = np.array([[1, 0, translation_x], [0, 1, translation_y]]) self.tx.set_parameters(translation_matrix) if self.lazy or X is None: return self.tx else: return self.tx.apply_to_image(X, reference=self.reference)
[ "def", "transform", "(", "self", ",", "X", "=", "None", ",", "y", "=", "None", ")", ":", "# convert to radians and unpack", "translation_x", ",", "translation_y", "=", "self", ".", "translation", "translation_matrix", "=", "np", ".", "array", "(", "[", "[", ...
Transform an image using an Affine transform with the given translation parameters. Return the transform if X=None. Arguments --------- X : ANTsImage Image to transform y : ANTsImage (optional) Another image to transform Returns ------- ANTsImage if y is None, else a tuple of ANTsImage types Examples -------- >>> import ants >>> img = ants.image_read(ants.get_data('r16')) >>> tx = ants.contrib.Translate2D(translation=(10,0)) >>> img2_x = tx.transform(img) >>> tx = ants.contrib.Translate2D(translation=(-10,0)) # other direction >>> img2_x = tx.transform(img) >>> tx = ants.contrib.Translate2D(translation=(0,10)) >>> img2_z = tx.transform(img) >>> tx = ants.contrib.Translate2D(translation=(10,10)) >>> img2 = tx.transform(img)
[ "Transform", "an", "image", "using", "an", "Affine", "transform", "with", "the", "given", "translation", "parameters", ".", "Return", "the", "transform", "if", "X", "=", "None", "." ]
python
train
34.275
roanuz/py-cricket
src/pycricket_storagehandler.py
https://github.com/roanuz/py-cricket/blob/fa47fe2e92915fc58db38898213e974742af55d4/src/pycricket_storagehandler.py#L56-L73
def set_value(self, key, value): """ Set key value to the file. The fuction will be make the key and value to dictinary formate. If its exist then it will update the current new key value to the file. Arg: key : cache key value : cache value """ file_cache = self.read_file() if file_cache: file_cache[key] = value else: file_cache = {} file_cache[key] = value self.update_file(file_cache)
[ "def", "set_value", "(", "self", ",", "key", ",", "value", ")", ":", "file_cache", "=", "self", ".", "read_file", "(", ")", "if", "file_cache", ":", "file_cache", "[", "key", "]", "=", "value", "else", ":", "file_cache", "=", "{", "}", "file_cache", ...
Set key value to the file. The fuction will be make the key and value to dictinary formate. If its exist then it will update the current new key value to the file. Arg: key : cache key value : cache value
[ "Set", "key", "value", "to", "the", "file", "." ]
python
train
28.277778
yuce/pyswip
pyswip/core.py
https://github.com/yuce/pyswip/blob/f7c1f1e8c3a13b90bd775861d374788a8b5677d8/pyswip/core.py#L234-L271
def _findSwiplLin(): """ This function uses several heuristics to guess where SWI-Prolog is installed in Linuxes. :returns: A tuple of (path to the swipl so, path to the resource file) :returns type: ({str, None}, {str, None}) """ # Maybe the exec is on path? (path, swiHome) = _findSwiplFromExec() if path is not None: return (path, swiHome) # If it is not, use find_library path = _findSwiplPathFromFindLib() if path is not None: return (path, swiHome) # Our last try: some hardcoded paths. paths = ['/lib', '/usr/lib', '/usr/local/lib', '.', './lib'] names = ['libswipl.so', 'libpl.so'] path = None for name in names: for try_ in paths: try_ = os.path.join(try_, name) if os.path.exists(try_): path = try_ break if path is not None: return (path, swiHome) return (None, None)
[ "def", "_findSwiplLin", "(", ")", ":", "# Maybe the exec is on path?", "(", "path", ",", "swiHome", ")", "=", "_findSwiplFromExec", "(", ")", "if", "path", "is", "not", "None", ":", "return", "(", "path", ",", "swiHome", ")", "# If it is not, use find_library",...
This function uses several heuristics to guess where SWI-Prolog is installed in Linuxes. :returns: A tuple of (path to the swipl so, path to the resource file) :returns type: ({str, None}, {str, None})
[ "This", "function", "uses", "several", "heuristics", "to", "guess", "where", "SWI", "-", "Prolog", "is", "installed", "in", "Linuxes", "." ]
python
train
24.473684
postmanlabs/httpbin
httpbin/helpers.py
https://github.com/postmanlabs/httpbin/blob/f8ec666b4d1b654e4ff6aedd356f510dcac09f83/httpbin/helpers.py#L127-L139
def get_headers(hide_env=True): """Returns headers dict from request context.""" headers = dict(request.headers.items()) if hide_env and ('show_env' not in request.args): for key in ENV_HEADERS: try: del headers[key] except KeyError: pass return CaseInsensitiveDict(headers.items())
[ "def", "get_headers", "(", "hide_env", "=", "True", ")", ":", "headers", "=", "dict", "(", "request", ".", "headers", ".", "items", "(", ")", ")", "if", "hide_env", "and", "(", "'show_env'", "not", "in", "request", ".", "args", ")", ":", "for", "key"...
Returns headers dict from request context.
[ "Returns", "headers", "dict", "from", "request", "context", "." ]
python
train
27.153846
secynic/ipwhois
ipwhois/utils.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/utils.py#L264-L358
def ipv4_is_defined(address): """ The function for checking if an IPv4 address is defined (does not need to be resolved). Args: address (:obj:`str`): An IPv4 address. Returns: namedtuple: :is_defined (bool): True if given address is defined, otherwise False :ietf_name (str): IETF assignment name if given address is defined, otherwise '' :ietf_rfc (str): IETF assignment RFC if given address is defined, otherwise '' """ # Initialize the IP address object. query_ip = IPv4Address(str(address)) # Initialize the results named tuple results = namedtuple('ipv4_is_defined_results', 'is_defined, ietf_name, ' 'ietf_rfc') # This Network if query_ip in IPv4Network('0.0.0.0/8'): return results(True, 'This Network', 'RFC 1122, Section 3.2.1.3') # Loopback elif query_ip.is_loopback: return results(True, 'Loopback', 'RFC 1122, Section 3.2.1.3') # Link Local elif query_ip.is_link_local: return results(True, 'Link Local', 'RFC 3927') # IETF Protocol Assignments elif query_ip in IPv4Network('192.0.0.0/24'): return results(True, 'IETF Protocol Assignments', 'RFC 5736') # TEST-NET-1 elif query_ip in IPv4Network('192.0.2.0/24'): return results(True, 'TEST-NET-1', 'RFC 5737') # 6to4 Relay Anycast elif query_ip in IPv4Network('192.88.99.0/24'): return results(True, '6to4 Relay Anycast', 'RFC 3068') # Network Interconnect Device Benchmark Testing elif query_ip in IPv4Network('198.18.0.0/15'): return (results(True, 'Network Interconnect Device Benchmark Testing', 'RFC 2544')) # TEST-NET-2 elif query_ip in IPv4Network('198.51.100.0/24'): return results(True, 'TEST-NET-2', 'RFC 5737') # TEST-NET-3 elif query_ip in IPv4Network('203.0.113.0/24'): return results(True, 'TEST-NET-3', 'RFC 5737') # Multicast elif query_ip.is_multicast: return results(True, 'Multicast', 'RFC 3171') # Limited Broadcast elif query_ip in IPv4Network('255.255.255.255/32'): return results(True, 'Limited Broadcast', 'RFC 919, Section 7') # Private-Use Networks elif query_ip.is_private: return results(True, 'Private-Use Networks', 'RFC 1918') # New IANA Reserved # TODO: Someone needs to find the RFC for this elif query_ip in IPv4Network('198.97.38.0/24'): return results(True, 'IANA Reserved', '') return results(False, '', '')
[ "def", "ipv4_is_defined", "(", "address", ")", ":", "# Initialize the IP address object.", "query_ip", "=", "IPv4Address", "(", "str", "(", "address", ")", ")", "# Initialize the results named tuple", "results", "=", "namedtuple", "(", "'ipv4_is_defined_results'", ",", ...
The function for checking if an IPv4 address is defined (does not need to be resolved). Args: address (:obj:`str`): An IPv4 address. Returns: namedtuple: :is_defined (bool): True if given address is defined, otherwise False :ietf_name (str): IETF assignment name if given address is defined, otherwise '' :ietf_rfc (str): IETF assignment RFC if given address is defined, otherwise ''
[ "The", "function", "for", "checking", "if", "an", "IPv4", "address", "is", "defined", "(", "does", "not", "need", "to", "be", "resolved", ")", "." ]
python
train
27.021053
AltSchool/dynamic-rest
dynamic_rest/filters.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/filters.py#L471-L608
def _build_queryset( self, serializer=None, filters=None, queryset=None, requirements=None, extra_filters=None, disable_prefetches=False, ): """Build a queryset that pulls in all data required by this request. Handles nested prefetching of related data and deferring fields at the queryset level. Arguments: serializer: An optional serializer to use a base for the queryset. If no serializer is passed, the `get_serializer` method will be used to initialize the base serializer for the viewset. filters: An optional TreeMap of nested filters. queryset: An optional base queryset. requirements: An optional TreeMap of nested requirements. """ is_root_level = False if not serializer: serializer = self.view.get_serializer() is_root_level = True queryset = self._get_queryset(queryset=queryset, serializer=serializer) model = getattr(serializer.Meta, 'model', None) if not model: return queryset prefetches = {} # build a nested Prefetch queryset # based on request parameters and serializer fields fields = serializer.fields if requirements is None: requirements = TreeMap() self._get_implicit_requirements( fields, requirements ) if filters is None: filters = self._get_requested_filters() # build nested Prefetch queryset self._build_requested_prefetches( prefetches, requirements, model, fields, filters ) # build remaining prefetches out of internal requirements # that are not already covered by request requirements self._build_implicit_prefetches( model, prefetches, requirements ) # use requirements at this level to limit fields selected # only do this for GET requests where we are not requesting the # entire fieldset if ( '*' not in requirements and not self.view.is_update() and not self.view.is_delete() ): id_fields = getattr(serializer, 'get_id_fields', lambda: [])() # only include local model fields only = [ field for field in set( id_fields + list(requirements.keys()) ) if is_model_field(model, field) and not is_field_remote(model, field) ] queryset = queryset.only(*only) # add request filters query = self._filters_to_query( includes=filters.get('_include'), excludes=filters.get('_exclude'), serializer=serializer ) # add additional filters specified by calling view if extra_filters: query = extra_filters if not query else extra_filters & query if query: # Convert internal django ValidationError to # APIException-based one in order to resolve validation error # from 500 status code to 400. try: queryset = queryset.filter(query) except InternalValidationError as e: raise ValidationError( dict(e) if hasattr(e, 'error_dict') else list(e) ) except Exception as e: # Some other Django error in parsing the filter. # Very likely a bad query, so throw a ValidationError. err_msg = getattr(e, 'message', '') raise ValidationError(err_msg) # A serializer can have this optional function # to dynamically apply additional filters on # any queries that will use that serializer # You could use this to have (for example) different # serializers for different subsets of a model or to # implement permissions which work even in sideloads if hasattr(serializer, 'filter_queryset'): queryset = self._serializer_filter( serializer=serializer, queryset=queryset ) # add prefetches and remove duplicates if necessary prefetch = prefetches.values() if prefetch and not disable_prefetches: queryset = queryset.prefetch_related(*prefetch) elif isinstance(queryset, Manager): queryset = queryset.all() if has_joins(queryset) or not is_root_level: queryset = queryset.distinct() if self.DEBUG: queryset._using_prefetches = prefetches return queryset
[ "def", "_build_queryset", "(", "self", ",", "serializer", "=", "None", ",", "filters", "=", "None", ",", "queryset", "=", "None", ",", "requirements", "=", "None", ",", "extra_filters", "=", "None", ",", "disable_prefetches", "=", "False", ",", ")", ":", ...
Build a queryset that pulls in all data required by this request. Handles nested prefetching of related data and deferring fields at the queryset level. Arguments: serializer: An optional serializer to use a base for the queryset. If no serializer is passed, the `get_serializer` method will be used to initialize the base serializer for the viewset. filters: An optional TreeMap of nested filters. queryset: An optional base queryset. requirements: An optional TreeMap of nested requirements.
[ "Build", "a", "queryset", "that", "pulls", "in", "all", "data", "required", "by", "this", "request", "." ]
python
train
33.927536
saltstack/salt
salt/states/onyx.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/onyx.py#L307-L379
def replace(name, repl, full_match=False): ''' Replace all instances of a string or full line in the running config name String to replace repl The replacement text full_match Whether `name` will match the full line or only a subset of the line. Defaults to False. When False, .* is added around `name` for matching in the `show run` config. Examples: .. code-block:: yaml replace snmp string: onyx.replace: - name: randoSNMPstringHERE - repl: NEWrandoSNMPstringHERE replace full snmp string: onyx.replace: - name: ^snmp-server community randoSNMPstringHERE group network-operator$ - repl: snmp-server community NEWrandoSNMPstringHERE group network-operator - full_match: True .. note:: The first example will replace the SNMP string on both the group and the ACL, so you will not lose the ACL setting. Because the second is an exact match of the line, when the group is removed, the ACL is removed, but not readded, because it was not matched. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} if full_match is False: search = '^.*{0}.*$'.format(name) else: search = name matches = __salt__['onyx.cmd']('find', search) if not matches: ret['result'] = True ret['comment'] = 'Nothing found to replace' return ret if __opts__['test'] is True: ret['result'] = None ret['comment'] = 'Configs will be changed' ret['changes']['old'] = matches ret['changes']['new'] = [re.sub(name, repl, match) for match in matches] return ret ret['changes'] = __salt__['onyx.cmd']('replace', name, repl, full_match=full_match) matches = __salt__['onyx.cmd']('find', search) if matches: ret['result'] = False ret['comment'] = 'Failed to replace all instances of "{0}"'.format(name) else: ret['result'] = True ret['comment'] = 'Successfully replaced all instances of "{0}" with "{1}"'.format(name, repl) return ret
[ "def", "replace", "(", "name", ",", "repl", ",", "full_match", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "if", "full_match", "is...
Replace all instances of a string or full line in the running config name String to replace repl The replacement text full_match Whether `name` will match the full line or only a subset of the line. Defaults to False. When False, .* is added around `name` for matching in the `show run` config. Examples: .. code-block:: yaml replace snmp string: onyx.replace: - name: randoSNMPstringHERE - repl: NEWrandoSNMPstringHERE replace full snmp string: onyx.replace: - name: ^snmp-server community randoSNMPstringHERE group network-operator$ - repl: snmp-server community NEWrandoSNMPstringHERE group network-operator - full_match: True .. note:: The first example will replace the SNMP string on both the group and the ACL, so you will not lose the ACL setting. Because the second is an exact match of the line, when the group is removed, the ACL is removed, but not readded, because it was not matched.
[ "Replace", "all", "instances", "of", "a", "string", "or", "full", "line", "in", "the", "running", "config" ]
python
train
29.465753
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L6018-L6027
def libvlc_video_get_adjust_float(p_mi, option): '''Get float adjust option. @param p_mi: libvlc media player instance. @param option: adjust option to get, values of libvlc_video_adjust_option_t. @version: LibVLC 1.1.1 and later. ''' f = _Cfunctions.get('libvlc_video_get_adjust_float', None) or \ _Cfunction('libvlc_video_get_adjust_float', ((1,), (1,),), None, ctypes.c_float, MediaPlayer, ctypes.c_uint) return f(p_mi, option)
[ "def", "libvlc_video_get_adjust_float", "(", "p_mi", ",", "option", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_get_adjust_float'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_get_adjust_float'", ",", "(", "(", "1", ",", ")",...
Get float adjust option. @param p_mi: libvlc media player instance. @param option: adjust option to get, values of libvlc_video_adjust_option_t. @version: LibVLC 1.1.1 and later.
[ "Get", "float", "adjust", "option", "." ]
python
train
47.7
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L105-L132
def register(full_name, email, password, lang=None, timezone=None): """Register a new Todoist account. :param full_name: The user's full name. :type full_name: str :param email: The user's email address. :type email: str :param password: The user's password. :type password: str :param lang: The user's language. :type lang: str :param timezone: The user's timezone. :type timezone: str :return: The Todoist user. :rtype: :class:`pytodoist.todoist.User` >>> from pytodoist import todoist >>> user = todoist.register('John Doe', 'john.doe@gmail.com', 'password') >>> print(user.full_name) John Doe """ response = API.register(email, full_name, password, lang=lang, timezone=timezone) _fail_if_contains_errors(response) user_json = response.json() user = User(user_json) user.password = password return user
[ "def", "register", "(", "full_name", ",", "email", ",", "password", ",", "lang", "=", "None", ",", "timezone", "=", "None", ")", ":", "response", "=", "API", ".", "register", "(", "email", ",", "full_name", ",", "password", ",", "lang", "=", "lang", ...
Register a new Todoist account. :param full_name: The user's full name. :type full_name: str :param email: The user's email address. :type email: str :param password: The user's password. :type password: str :param lang: The user's language. :type lang: str :param timezone: The user's timezone. :type timezone: str :return: The Todoist user. :rtype: :class:`pytodoist.todoist.User` >>> from pytodoist import todoist >>> user = todoist.register('John Doe', 'john.doe@gmail.com', 'password') >>> print(user.full_name) John Doe
[ "Register", "a", "new", "Todoist", "account", "." ]
python
train
32.285714
xray7224/PyPump
pypump/models/collection.py
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/collection.py#L83-L106
def remove(self, obj): """ Removes a member from the collection. :param obj: Object to remove. Example: >>> mycollection.remove(pump.Person('bob@example.org')) """ activity = { "verb": "remove", "object": { "objectType": obj.object_type, "id": obj.id }, "target": { "objectType": self.object_type, "id": self.id } } self._post_activity(activity) # Remove the cash so it's re-generated next time it's needed self._members = None
[ "def", "remove", "(", "self", ",", "obj", ")", ":", "activity", "=", "{", "\"verb\"", ":", "\"remove\"", ",", "\"object\"", ":", "{", "\"objectType\"", ":", "obj", ".", "object_type", ",", "\"id\"", ":", "obj", ".", "id", "}", ",", "\"target\"", ":", ...
Removes a member from the collection. :param obj: Object to remove. Example: >>> mycollection.remove(pump.Person('bob@example.org'))
[ "Removes", "a", "member", "from", "the", "collection", "." ]
python
train
25.666667
prompt-toolkit/pymux
pymux/pipes/win32.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/pipes/win32.py#L196-L205
def wait_for_event(event): """ Wraps a win32 event into a `Future` and wait for it. """ f = Future() def ready(): get_event_loop().remove_win32_handle(event) f.set_result(None) get_event_loop().add_win32_handle(event, ready) return f
[ "def", "wait_for_event", "(", "event", ")", ":", "f", "=", "Future", "(", ")", "def", "ready", "(", ")", ":", "get_event_loop", "(", ")", ".", "remove_win32_handle", "(", "event", ")", "f", ".", "set_result", "(", "None", ")", "get_event_loop", "(", ")...
Wraps a win32 event into a `Future` and wait for it.
[ "Wraps", "a", "win32", "event", "into", "a", "Future", "and", "wait", "for", "it", "." ]
python
train
26.8
priestc/giotto
giotto/views/__init__.py
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/views/__init__.py#L231-L246
def partial_jinja_template(template_name, name='data', mimetype="text/html"): """ Partial render of jinja templates. This is useful if you want to re-render the template in the output middleware phase. These templates are rendered in a way that all undefined variables will be kept in the emplate intact. """ def partial_jinja_renderer(result, errors): template = get_jinja_template(template_name) old = template.environment.undefined template.environment.undefined = DebugUndefined context = {name: result or Mock(), 'errors': errors} rendered = template.render(**context) template.environment.undefined = old return {'body': rendered, 'mimetype': mimetype} return partial_jinja_renderer
[ "def", "partial_jinja_template", "(", "template_name", ",", "name", "=", "'data'", ",", "mimetype", "=", "\"text/html\"", ")", ":", "def", "partial_jinja_renderer", "(", "result", ",", "errors", ")", ":", "template", "=", "get_jinja_template", "(", "template_name"...
Partial render of jinja templates. This is useful if you want to re-render the template in the output middleware phase. These templates are rendered in a way that all undefined variables will be kept in the emplate intact.
[ "Partial", "render", "of", "jinja", "templates", ".", "This", "is", "useful", "if", "you", "want", "to", "re", "-", "render", "the", "template", "in", "the", "output", "middleware", "phase", ".", "These", "templates", "are", "rendered", "in", "a", "way", ...
python
train
47.625
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L450-L466
def err(r): """ Input: { return - return code error - error text } Output: Nothing; quits program """ import sys rc=r['return'] re=r['error'] out('Error: '+re) sys.exit(rc)
[ "def", "err", "(", "r", ")", ":", "import", "sys", "rc", "=", "r", "[", "'return'", "]", "re", "=", "r", "[", "'error'", "]", "out", "(", "'Error: '", "+", "re", ")", "sys", ".", "exit", "(", "rc", ")" ]
Input: { return - return code error - error text } Output: Nothing; quits program
[ "Input", ":", "{", "return", "-", "return", "code", "error", "-", "error", "text", "}" ]
python
train
13.941176
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_pool.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_pool.py#L854-L870
def _MakeEnumValueDescriptor(self, value_proto, index): """Creates a enum value descriptor object from a enum value proto. Args: value_proto: The proto describing the enum value. index: The index of the enum value. Returns: An initialized EnumValueDescriptor object. """ return descriptor.EnumValueDescriptor( name=value_proto.name, index=index, number=value_proto.number, options=_OptionsOrNone(value_proto), type=None)
[ "def", "_MakeEnumValueDescriptor", "(", "self", ",", "value_proto", ",", "index", ")", ":", "return", "descriptor", ".", "EnumValueDescriptor", "(", "name", "=", "value_proto", ".", "name", ",", "index", "=", "index", ",", "number", "=", "value_proto", ".", ...
Creates a enum value descriptor object from a enum value proto. Args: value_proto: The proto describing the enum value. index: The index of the enum value. Returns: An initialized EnumValueDescriptor object.
[ "Creates", "a", "enum", "value", "descriptor", "object", "from", "a", "enum", "value", "proto", "." ]
python
train
28.529412
nimeshkverma/mongo_joins
mongojoin/mongocollection.py
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongocollection.py#L97-L115
def bulk_cursor_execute(self, bulk_cursor): """ Executes the bulk_cursor :param bulk_cursor: Cursor to perform bulk operations :type bulk_cursor: pymongo bulk cursor object :returns: pymongo bulk cursor object (for bulk operations) """ try: result = bulk_cursor.execute() except BulkWriteError as bwe: msg = "bulk_cursor_execute: Exception in executing Bulk cursor to mongo with {error}".format( error=str(bwe)) raise Exception(msg) except Exception as e: msg = "Mongo Bulk cursor could not be fetched, Error: {error}".format( error=str(e)) raise Exception(msg)
[ "def", "bulk_cursor_execute", "(", "self", ",", "bulk_cursor", ")", ":", "try", ":", "result", "=", "bulk_cursor", ".", "execute", "(", ")", "except", "BulkWriteError", "as", "bwe", ":", "msg", "=", "\"bulk_cursor_execute: Exception in executing Bulk cursor to mongo w...
Executes the bulk_cursor :param bulk_cursor: Cursor to perform bulk operations :type bulk_cursor: pymongo bulk cursor object :returns: pymongo bulk cursor object (for bulk operations)
[ "Executes", "the", "bulk_cursor" ]
python
train
38.157895
vsudilov/flask-consulate
flask_consulate/consul.py
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/consul.py#L62-L75
def _create_session(self, test_connection=False): """ Create a consulate.session object, and query for its leader to ensure that the connection is made. :param test_connection: call .leader() to ensure that the connection is valid :type test_connection: bool :return consulate.Session instance """ session = consulate.Session(host=self.host, port=self.port) if test_connection: session.status.leader() return session
[ "def", "_create_session", "(", "self", ",", "test_connection", "=", "False", ")", ":", "session", "=", "consulate", ".", "Session", "(", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ")", "if", "test_connection", ":", "session"...
Create a consulate.session object, and query for its leader to ensure that the connection is made. :param test_connection: call .leader() to ensure that the connection is valid :type test_connection: bool :return consulate.Session instance
[ "Create", "a", "consulate", ".", "session", "object", "and", "query", "for", "its", "leader", "to", "ensure", "that", "the", "connection", "is", "made", "." ]
python
train
36.285714
SheffieldML/GPy
GPy/inference/latent_function_inference/vardtc_svi_multiout.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/inference/latent_function_inference/vardtc_svi_multiout.py#L42-L206
def inference(self, kern_r, kern_c, Xr, Xc, Zr, Zc, likelihood, Y, qU_mean ,qU_var_r, qU_var_c): """ The SVI-VarDTC inference """ N, D, Mr, Mc, Qr, Qc = Y.shape[0], Y.shape[1], Zr.shape[0], Zc.shape[0], Zr.shape[1], Zc.shape[1] uncertain_inputs_r = isinstance(Xr, VariationalPosterior) uncertain_inputs_c = isinstance(Xc, VariationalPosterior) uncertain_outputs = isinstance(Y, VariationalPosterior) beta = 1./likelihood.variance psi0_r, psi1_r, psi2_r = self.gatherPsiStat(kern_r, Xr, Zr, uncertain_inputs_r) psi0_c, psi1_c, psi2_c = self.gatherPsiStat(kern_c, Xc, Zc, uncertain_inputs_c) #====================================================================== # Compute Common Components #====================================================================== Kuu_r = kern_r.K(Zr).copy() diag.add(Kuu_r, self.const_jitter) Lr = jitchol(Kuu_r) Kuu_c = kern_c.K(Zc).copy() diag.add(Kuu_c, self.const_jitter) Lc = jitchol(Kuu_c) mu, Sr, Sc = qU_mean, qU_var_r, qU_var_c LSr = jitchol(Sr) LSc = jitchol(Sc) LcInvMLrInvT = dtrtrs(Lc,dtrtrs(Lr,mu.T)[0].T)[0] LcInvPsi2_cLcInvT = backsub_both_sides(Lc, psi2_c,'right') LrInvPsi2_rLrInvT = backsub_both_sides(Lr, psi2_r,'right') LcInvLSc = dtrtrs(Lc, LSc)[0] LrInvLSr = dtrtrs(Lr, LSr)[0] LcInvScLcInvT = tdot(LcInvLSc) LrInvSrLrInvT = tdot(LrInvLSr) LcInvPsi1_cT = dtrtrs(Lc, psi1_c.T)[0] LrInvPsi1_rT = dtrtrs(Lr, psi1_r.T)[0] tr_LrInvPsi2_rLrInvT_LrInvSrLrInvT = (LrInvPsi2_rLrInvT*LrInvSrLrInvT).sum() tr_LcInvPsi2_cLcInvT_LcInvScLcInvT = (LcInvPsi2_cLcInvT*LcInvScLcInvT).sum() tr_LrInvSrLrInvT = np.square(LrInvLSr).sum() tr_LcInvScLcInvT = np.square(LcInvLSc).sum() tr_LrInvPsi2_rLrInvT = np.trace(LrInvPsi2_rLrInvT) tr_LcInvPsi2_cLcInvT = np.trace(LcInvPsi2_cLcInvT) #====================================================================== # Compute log-likelihood #====================================================================== logL_A = - np.square(Y).sum() \ - (LcInvMLrInvT.T.dot(LcInvPsi2_cLcInvT).dot(LcInvMLrInvT)*LrInvPsi2_rLrInvT).sum() \ - tr_LrInvPsi2_rLrInvT_LrInvSrLrInvT* tr_LcInvPsi2_cLcInvT_LcInvScLcInvT \ + 2 * (Y * LcInvPsi1_cT.T.dot(LcInvMLrInvT).dot(LrInvPsi1_rT)).sum() - psi0_c * psi0_r \ + tr_LrInvPsi2_rLrInvT * tr_LcInvPsi2_cLcInvT logL = -N*D/2.*(np.log(2.*np.pi)-np.log(beta)) + beta/2.* logL_A \ -Mc * (np.log(np.diag(Lr)).sum()-np.log(np.diag(LSr)).sum()) -Mr * (np.log(np.diag(Lc)).sum()-np.log(np.diag(LSc)).sum()) \ - np.square(LcInvMLrInvT).sum()/2. - tr_LrInvSrLrInvT * tr_LcInvScLcInvT/2. + Mr*Mc/2. #====================================================================== # Compute dL_dKuu #====================================================================== tmp = beta* LcInvPsi2_cLcInvT.dot(LcInvMLrInvT).dot(LrInvPsi2_rLrInvT).dot(LcInvMLrInvT.T) \ + beta* tr_LrInvPsi2_rLrInvT_LrInvSrLrInvT * LcInvPsi2_cLcInvT.dot(LcInvScLcInvT) \ - beta* LcInvMLrInvT.dot(LrInvPsi1_rT).dot(Y.T).dot(LcInvPsi1_cT.T) \ - beta/2. * tr_LrInvPsi2_rLrInvT* LcInvPsi2_cLcInvT - Mr/2.*np.eye(Mc) \ + tdot(LcInvMLrInvT)/2. + tr_LrInvSrLrInvT/2. * LcInvScLcInvT dL_dKuu_c = backsub_both_sides(Lc, tmp, 'left') dL_dKuu_c += dL_dKuu_c.T dL_dKuu_c *= 0.5 tmp = beta* LcInvMLrInvT.T.dot(LcInvPsi2_cLcInvT).dot(LcInvMLrInvT).dot(LrInvPsi2_rLrInvT) \ + beta* tr_LcInvPsi2_cLcInvT_LcInvScLcInvT * LrInvPsi2_rLrInvT.dot(LrInvSrLrInvT) \ - beta* LrInvPsi1_rT.dot(Y.T).dot(LcInvPsi1_cT.T).dot(LcInvMLrInvT) \ - beta/2. * tr_LcInvPsi2_cLcInvT * LrInvPsi2_rLrInvT - Mc/2.*np.eye(Mr) \ + tdot(LcInvMLrInvT.T)/2. + tr_LcInvScLcInvT/2. * LrInvSrLrInvT dL_dKuu_r = backsub_both_sides(Lr, tmp, 'left') dL_dKuu_r += dL_dKuu_r.T dL_dKuu_r *= 0.5 #====================================================================== # Compute dL_dthetaL #====================================================================== dL_dthetaL = -D*N*beta/2. - logL_A*beta*beta/2. #====================================================================== # Compute dL_dqU #====================================================================== tmp = -beta * LcInvPsi2_cLcInvT.dot(LcInvMLrInvT).dot(LrInvPsi2_rLrInvT)\ + beta* LcInvPsi1_cT.dot(Y).dot(LrInvPsi1_rT.T) - LcInvMLrInvT dL_dqU_mean = dtrtrs(Lc, dtrtrs(Lr, tmp.T, trans=1)[0].T, trans=1)[0] LScInv = dtrtri(LSc) tmp = -beta/2.*tr_LrInvPsi2_rLrInvT_LrInvSrLrInvT * LcInvPsi2_cLcInvT -tr_LrInvSrLrInvT/2.*np.eye(Mc) dL_dqU_var_c = backsub_both_sides(Lc, tmp, 'left') + tdot(LScInv.T) * Mr/2. LSrInv = dtrtri(LSr) tmp = -beta/2.*tr_LcInvPsi2_cLcInvT_LcInvScLcInvT * LrInvPsi2_rLrInvT -tr_LcInvScLcInvT/2.*np.eye(Mr) dL_dqU_var_r = backsub_both_sides(Lr, tmp, 'left') + tdot(LSrInv.T) * Mc/2. #====================================================================== # Compute the Posterior distribution of inducing points p(u|Y) #====================================================================== post = PosteriorMultioutput(LcInvMLrInvT=LcInvMLrInvT, LcInvScLcInvT=LcInvScLcInvT, LrInvSrLrInvT=LrInvSrLrInvT, Lr=Lr, Lc=Lc, kern_r=kern_r, Xr=Xr, Zr=Zr) #====================================================================== # Compute dL_dpsi #====================================================================== dL_dpsi0_r = - psi0_c * beta/2. * np.ones((D,)) dL_dpsi0_c = - psi0_r * beta/2. * np.ones((N,)) dL_dpsi1_c = beta * dtrtrs(Lc, (Y.dot(LrInvPsi1_rT.T).dot(LcInvMLrInvT.T)).T, trans=1)[0].T dL_dpsi1_r = beta * dtrtrs(Lr, (Y.T.dot(LcInvPsi1_cT.T).dot(LcInvMLrInvT)).T, trans=1)[0].T tmp = beta/2.*(-LcInvMLrInvT.dot(LrInvPsi2_rLrInvT).dot(LcInvMLrInvT.T) - tr_LrInvPsi2_rLrInvT_LrInvSrLrInvT * LcInvScLcInvT +tr_LrInvPsi2_rLrInvT *np.eye(Mc)) dL_dpsi2_c = backsub_both_sides(Lc, tmp, 'left') tmp = beta/2.*(-LcInvMLrInvT.T.dot(LcInvPsi2_cLcInvT).dot(LcInvMLrInvT) - tr_LcInvPsi2_cLcInvT_LcInvScLcInvT * LrInvSrLrInvT +tr_LcInvPsi2_cLcInvT *np.eye(Mr)) dL_dpsi2_r = backsub_both_sides(Lr, tmp, 'left') if not uncertain_inputs_r: dL_dpsi1_r += psi1_r.dot(dL_dpsi2_r+dL_dpsi2_r.T) if not uncertain_inputs_c: dL_dpsi1_c += psi1_c.dot(dL_dpsi2_c+dL_dpsi2_c.T) grad_dict = { 'dL_dthetaL':dL_dthetaL, 'dL_dqU_mean':dL_dqU_mean, 'dL_dqU_var_c':dL_dqU_var_c, 'dL_dqU_var_r':dL_dqU_var_r, 'dL_dKuu_c': dL_dKuu_c, 'dL_dKuu_r': dL_dKuu_r, } if uncertain_inputs_c: grad_dict['dL_dpsi0_c'] = dL_dpsi0_c grad_dict['dL_dpsi1_c'] = dL_dpsi1_c grad_dict['dL_dpsi2_c'] = dL_dpsi2_c else: grad_dict['dL_dKdiag_c'] = dL_dpsi0_c grad_dict['dL_dKfu_c'] = dL_dpsi1_c if uncertain_inputs_r: grad_dict['dL_dpsi0_r'] = dL_dpsi0_r grad_dict['dL_dpsi1_r'] = dL_dpsi1_r grad_dict['dL_dpsi2_r'] = dL_dpsi2_r else: grad_dict['dL_dKdiag_r'] = dL_dpsi0_r grad_dict['dL_dKfu_r'] = dL_dpsi1_r return post, logL, grad_dict
[ "def", "inference", "(", "self", ",", "kern_r", ",", "kern_c", ",", "Xr", ",", "Xc", ",", "Zr", ",", "Zc", ",", "likelihood", ",", "Y", ",", "qU_mean", ",", "qU_var_r", ",", "qU_var_c", ")", ":", "N", ",", "D", ",", "Mr", ",", "Mc", ",", "Qr", ...
The SVI-VarDTC inference
[ "The", "SVI", "-", "VarDTC", "inference" ]
python
train
46.478788
rehandalal/therapist
therapist/cli.py
https://github.com/rehandalal/therapist/blob/1995a7e396eea2ec8685bb32a779a4110b459b1f/therapist/cli.py#L145-L195
def uninstall(**kwargs): """Uninstall the current pre-commit hook.""" force = kwargs.get('force') restore_legacy = kwargs.get('restore_legacy') colorama.init(strip=kwargs.get('no_color')) git_dir = current_git_dir() if git_dir is None: output(NOT_GIT_REPO_MSG) exit(1) hook_path = os.path.join(git_dir, 'hooks', 'pre-commit') if not os.path.isfile(hook_path): output(NO_HOOK_INSTALLED_MSG) exit(0) hook_hash = identify_hook(hook_path) if hook_hash: if not force: if not click.confirm(CONFIRM_UNINSTALL_HOOK_MSG, default=False): output(UNINSTALL_ABORTED_MSG) exit(1) else: output(CURRENT_HOOK_NOT_THERAPIST_MSG) exit(1) legacy_hook_path = os.path.join(git_dir, 'hooks', 'pre-commit.legacy') if os.path.isfile(legacy_hook_path): if not force and not restore_legacy: output(LEGACY_HOOK_EXISTS_MSG) restore_legacy = click.confirm(CONFIRM_RESTORE_LEGACY_HOOK_MSG, default=True) if restore_legacy: output(COPYING_LEGACY_HOOK_MSG, end='') shutil.copy2(legacy_hook_path, hook_path) os.remove(legacy_hook_path) output(DONE_COPYING_LEGACY_HOOK_MSG) exit(0) else: if force or click.confirm('Would you like to remove the legacy hook?', default=False): output(REMOVING_LEGACY_HOOK_MSG, end='') os.remove(legacy_hook_path) output(DONE_REMOVING_LEGACY_HOOK_MSG) output(UNINSTALLING_HOOK_MSG, end='') os.remove(hook_path) output(DONE_UNINSTALLING_HOOK_MSG)
[ "def", "uninstall", "(", "*", "*", "kwargs", ")", ":", "force", "=", "kwargs", ".", "get", "(", "'force'", ")", "restore_legacy", "=", "kwargs", ".", "get", "(", "'restore_legacy'", ")", "colorama", ".", "init", "(", "strip", "=", "kwargs", ".", "get",...
Uninstall the current pre-commit hook.
[ "Uninstall", "the", "current", "pre", "-", "commit", "hook", "." ]
python
train
32
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L638-L701
def length_of_geographical_area_code(numobj): """Return length of the geographical area code for a number. Gets the length of the geographical area code from the PhoneNumber object passed in, so that clients could use it to split a national significant number into geographical area code and subscriber number. It works in such a way that the resultant subscriber number should be diallable, at least on some devices. An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("16502530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ac_len = phonenumbers.length_of_geographical_area_code(numobj) >>> if ac_len > 0: ... area_code = nsn[:ac_len] ... subscriber_number = nsn[ac_len:] ... else: ... area_code = "" ... subscriber_number = nsn N.B.: area code is a very ambiguous concept, so the I18N team generally recommends against using it for most purposes, but recommends using the more general national_number instead. Read the following carefully before deciding to use this method: - geographical area codes change over time, and this method honors those changes; therefore, it doesn't guarantee the stability of the result it produces. - subscriber numbers may not be diallable from all devices (notably mobile devices, which typically require the full national_number to be dialled in most countries). - most non-geographical numbers have no area codes, including numbers from non-geographical entities. - some geographical numbers have no area codes. Arguments: numobj -- The PhoneNumber object to find the length of the area code form. Returns the length of area code of the PhoneNumber object passed in. """ metadata = PhoneMetadata.metadata_for_region(region_code_for_number(numobj), None) if metadata is None: return 0 # If a country doesn't use a national prefix, and this number doesn't have # an Italian leading zero, we assume it is a closed dialling plan with no # area codes. if metadata.national_prefix is None and not numobj.italian_leading_zero: return 0 ntype = number_type(numobj) country_code = numobj.country_code if (ntype == PhoneNumberType.MOBILE and (country_code in _GEO_MOBILE_COUNTRIES_WITHOUT_MOBILE_AREA_CODES)): # Note this is a rough heuristic; it doesn't cover Indonesia well, for # example, where area codes are present for some mobile phones but not # for others. We have no better way of representing this in the # metadata at this point. return 0 if not is_number_type_geographical(ntype, country_code): return 0 return length_of_national_destination_code(numobj)
[ "def", "length_of_geographical_area_code", "(", "numobj", ")", ":", "metadata", "=", "PhoneMetadata", ".", "metadata_for_region", "(", "region_code_for_number", "(", "numobj", ")", ",", "None", ")", "if", "metadata", "is", "None", ":", "return", "0", "# If a count...
Return length of the geographical area code for a number. Gets the length of the geographical area code from the PhoneNumber object passed in, so that clients could use it to split a national significant number into geographical area code and subscriber number. It works in such a way that the resultant subscriber number should be diallable, at least on some devices. An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("16502530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ac_len = phonenumbers.length_of_geographical_area_code(numobj) >>> if ac_len > 0: ... area_code = nsn[:ac_len] ... subscriber_number = nsn[ac_len:] ... else: ... area_code = "" ... subscriber_number = nsn N.B.: area code is a very ambiguous concept, so the I18N team generally recommends against using it for most purposes, but recommends using the more general national_number instead. Read the following carefully before deciding to use this method: - geographical area codes change over time, and this method honors those changes; therefore, it doesn't guarantee the stability of the result it produces. - subscriber numbers may not be diallable from all devices (notably mobile devices, which typically require the full national_number to be dialled in most countries). - most non-geographical numbers have no area codes, including numbers from non-geographical entities. - some geographical numbers have no area codes. Arguments: numobj -- The PhoneNumber object to find the length of the area code form. Returns the length of area code of the PhoneNumber object passed in.
[ "Return", "length", "of", "the", "geographical", "area", "code", "for", "a", "number", "." ]
python
train
43.671875
google/grr
grr/server/grr_response_server/databases/mysql_hunts.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_hunts.py#L152-L206
def _HuntObjectFromRow(self, row): """Generates a flow object from a database row.""" ( create_time, last_update_time, creator, duration_micros, client_rate, client_limit, hunt_state, hunt_state_comment, init_start_time, last_start_time, num_clients_at_start_time, description, body, ) = row hunt_obj = rdf_hunt_objects.Hunt.FromSerializedString(body) hunt_obj.duration = rdfvalue.Duration.FromMicroseconds(duration_micros) hunt_obj.create_time = mysql_utils.TimestampToRDFDatetime(create_time) hunt_obj.last_update_time = mysql_utils.TimestampToRDFDatetime( last_update_time) # Checks below are needed for hunts that were written to the database before # respective fields became part of F1 schema. if creator is not None: hunt_obj.creator = creator if client_rate is not None: hunt_obj.client_rate = client_rate if client_limit is not None: hunt_obj.client_limit = client_limit if hunt_state is not None: hunt_obj.hunt_state = hunt_state if hunt_state_comment is not None: hunt_obj.hunt_state_comment = hunt_state_comment if init_start_time is not None: hunt_obj.init_start_time = mysql_utils.TimestampToRDFDatetime( init_start_time) if last_start_time is not None: hunt_obj.last_start_time = mysql_utils.TimestampToRDFDatetime( last_start_time) if num_clients_at_start_time is not None: hunt_obj.num_clients_at_start_time = num_clients_at_start_time if description is not None: hunt_obj.description = description return hunt_obj
[ "def", "_HuntObjectFromRow", "(", "self", ",", "row", ")", ":", "(", "create_time", ",", "last_update_time", ",", "creator", ",", "duration_micros", ",", "client_rate", ",", "client_limit", ",", "hunt_state", ",", "hunt_state_comment", ",", "init_start_time", ",",...
Generates a flow object from a database row.
[ "Generates", "a", "flow", "object", "from", "a", "database", "row", "." ]
python
train
29.981818
elektito/finglish
finglish/f2p.py
https://github.com/elektito/finglish/blob/3d6953d7ad385f860fac4b9110da4205326e4de5/finglish/f2p.py#L166-L194
def f2p_list(phrase, max_word_size=15, cutoff=3): """Convert a phrase from Finglish to Persian. phrase: The phrase to convert. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. Returns a list of lists, each sub-list contains a number of possibilities for each word as a pair of (word, confidence) values. """ # split the phrase into words results = [w for w in sep_regex.split(phrase) if w] # return an empty list if no words if results == []: return [] # convert each word separately results = [f2p_word(w, max_word_size, cutoff) for w in results] return results
[ "def", "f2p_list", "(", "phrase", ",", "max_word_size", "=", "15", ",", "cutoff", "=", "3", ")", ":", "# split the phrase into words", "results", "=", "[", "w", "for", "w", "in", "sep_regex", ".", "split", "(", "phrase", ")", "if", "w", "]", "# return an...
Convert a phrase from Finglish to Persian. phrase: The phrase to convert. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. Returns a list of lists, each sub-list contains a number of possibilities for each word as a pair of (word, confidence) values.
[ "Convert", "a", "phrase", "from", "Finglish", "to", "Persian", "." ]
python
train
29.758621
HarveyHunt/i3situation
i3situation/core/status.py
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/status.py#L141-L153
def handle_events(self): """ An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread. """ for event in sys.stdin: if event.startswith('['): continue name = json.loads(event.lstrip(','))['name'] for obj in self.loader.objects: if obj.output_options['name'] == name: obj.on_click(json.loads(event.lstrip(',')))
[ "def", "handle_events", "(", "self", ")", ":", "for", "event", "in", "sys", ".", "stdin", ":", "if", "event", ".", "startswith", "(", "'['", ")", ":", "continue", "name", "=", "json", ".", "loads", "(", "event", ".", "lstrip", "(", "','", ")", ")",...
An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread.
[ "An", "event", "handler", "that", "processes", "events", "from", "stdin", "and", "calls", "the", "on_click", "function", "of", "the", "respective", "object", ".", "This", "function", "is", "run", "in", "another", "thread", "so", "as", "to", "not", "stall", ...
python
train
43
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L113-L115
def struct2dict(struct): """convert a ctypes structure to a dictionary""" return {x: getattr(struct, x) for x in dict(struct._fields_).keys()}
[ "def", "struct2dict", "(", "struct", ")", ":", "return", "{", "x", ":", "getattr", "(", "struct", ",", "x", ")", "for", "x", "in", "dict", "(", "struct", ".", "_fields_", ")", ".", "keys", "(", ")", "}" ]
convert a ctypes structure to a dictionary
[ "convert", "a", "ctypes", "structure", "to", "a", "dictionary" ]
python
train
49.333333
hosford42/xcs
xcs/_python_bitstrings.py
https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/_python_bitstrings.py#L283-L298
def count(self): """Returns the number of bits set to True in the bit string. Usage: assert BitString('00110').count() == 2 Arguments: None Return: An int, the number of bits with value 1. """ result = 0 bits = self._bits while bits: result += bits % 2 bits >>= 1 return result
[ "def", "count", "(", "self", ")", ":", "result", "=", "0", "bits", "=", "self", ".", "_bits", "while", "bits", ":", "result", "+=", "bits", "%", "2", "bits", ">>=", "1", "return", "result" ]
Returns the number of bits set to True in the bit string. Usage: assert BitString('00110').count() == 2 Arguments: None Return: An int, the number of bits with value 1.
[ "Returns", "the", "number", "of", "bits", "set", "to", "True", "in", "the", "bit", "string", "." ]
python
train
24
sdispater/cachy
cachy/tagged_cache.py
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/tagged_cache.py#L110-L122
def decrement(self, key, value=1): """ Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool """ self._store.decrement(self.tagged_item_key(key), value)
[ "def", "decrement", "(", "self", ",", "key", ",", "value", "=", "1", ")", ":", "self", ".", "_store", ".", "decrement", "(", "self", ".", "tagged_item_key", "(", "key", ")", ",", "value", ")" ]
Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool
[ "Decrement", "the", "value", "of", "an", "item", "in", "the", "cache", "." ]
python
train
24.461538
madmaze/pytesseract
src/pytesseract.py
https://github.com/madmaze/pytesseract/blob/dd7dffc227480e9de024e99a5e10e7664f42b2de/src/pytesseract.py#L360-L381
def image_to_data(image, lang=None, config='', nice=0, output_type=Output.STRING): ''' Returns string containing box boundaries, confidences, and other information. Requires Tesseract 3.05+ ''' if get_tesseract_version() < '3.05': raise TSVNotSupported() config = '{} {}'.format('-c tessedit_create_tsv=1', config.strip()).strip() args = [image, 'tsv', lang, config, nice] return { Output.BYTES: lambda: run_and_get_output(*(args + [True])), Output.DATAFRAME: lambda: get_pandas_output(args + [True]), Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), Output.STRING: lambda: run_and_get_output(*args), }[output_type]()
[ "def", "image_to_data", "(", "image", ",", "lang", "=", "None", ",", "config", "=", "''", ",", "nice", "=", "0", ",", "output_type", "=", "Output", ".", "STRING", ")", ":", "if", "get_tesseract_version", "(", ")", "<", "'3.05'", ":", "raise", "TSVNotSu...
Returns string containing box boundaries, confidences, and other information. Requires Tesseract 3.05+
[ "Returns", "string", "containing", "box", "boundaries", "confidences", "and", "other", "information", ".", "Requires", "Tesseract", "3", ".", "05", "+" ]
python
train
35
Cognexa/cxflow
cxflow/cli/ls.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L50-L71
def walk_train_dirs(root_dir: str) -> Iterable[Tuple[str, Iterable[str]]]: """ Modify os.walk with the following: - return only root_dir and sub-dirs - return only training sub-dirs - stop recursion at training dirs :param root_dir: root dir to be walked :return: generator of (root_dir, training sub-dirs) pairs """ if is_train_dir(root_dir): yield '', [root_dir] return for dir_, subdirs, _ in os.walk(root_dir, topdown=True): # filter train sub-dirs train_subdirs = [subdir for subdir in subdirs if is_train_dir(path.join(dir_, subdir))] # stop the recursion at the train sub-dirs for subdir in train_subdirs: subdirs.remove(subdir) yield dir_, train_subdirs
[ "def", "walk_train_dirs", "(", "root_dir", ":", "str", ")", "->", "Iterable", "[", "Tuple", "[", "str", ",", "Iterable", "[", "str", "]", "]", "]", ":", "if", "is_train_dir", "(", "root_dir", ")", ":", "yield", "''", ",", "[", "root_dir", "]", "retur...
Modify os.walk with the following: - return only root_dir and sub-dirs - return only training sub-dirs - stop recursion at training dirs :param root_dir: root dir to be walked :return: generator of (root_dir, training sub-dirs) pairs
[ "Modify", "os", ".", "walk", "with", "the", "following", ":", "-", "return", "only", "root_dir", "and", "sub", "-", "dirs", "-", "return", "only", "training", "sub", "-", "dirs", "-", "stop", "recursion", "at", "training", "dirs" ]
python
train
34.681818
Kaggle/kaggle-api
kaggle/api/kaggle_api_extended.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2507-L2513
def read(self, *args, **kwargs): """ read the buffer, passing named and non named arguments to the io.BufferedReader function. """ buf = io.BufferedReader.read(self, *args, **kwargs) self.increment(len(buf)) return buf
[ "def", "read", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "buf", "=", "io", ".", "BufferedReader", ".", "read", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "increment", "(", "len", "(", "buf"...
read the buffer, passing named and non named arguments to the io.BufferedReader function.
[ "read", "the", "buffer", "passing", "named", "and", "non", "named", "arguments", "to", "the", "io", ".", "BufferedReader", "function", "." ]
python
train
37.714286
iron-io/iron_core_python
iron_core.py
https://github.com/iron-io/iron_core_python/blob/f09a160a854912efcb75a810702686bc25b74fa8/iron_core.py#L339-L354
def patch(self, url, body="", headers={}, retry=True): """Execute an HTTP PATCH request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True. """ return self.request(url=url, method="PATCH", body=body, headers=headers, retry=retry)
[ "def", "patch", "(", "self", ",", "url", ",", "body", "=", "\"\"", ",", "headers", "=", "{", "}", ",", "retry", "=", "True", ")", ":", "return", "self", ".", "request", "(", "url", "=", "url", ",", "method", "=", "\"PATCH\"", ",", "body", "=", ...
Execute an HTTP PATCH request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True.
[ "Execute", "an", "HTTP", "PATCH", "request", "and", "return", "a", "dict", "containing", "the", "response", "and", "the", "response", "status", "code", "." ]
python
train
49.5625
mozilla-services/python-dockerflow
src/dockerflow/flask/app.py
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L194-L201
def _got_request_exception(self, sender, exception, **extra): """ The signal handler for the got_request_exception signal. """ extra = self.summary_extra() extra['errno'] = 500 self.summary_logger.error(str(exception), extra=extra) g._has_exception = True
[ "def", "_got_request_exception", "(", "self", ",", "sender", ",", "exception", ",", "*", "*", "extra", ")", ":", "extra", "=", "self", ".", "summary_extra", "(", ")", "extra", "[", "'errno'", "]", "=", "500", "self", ".", "summary_logger", ".", "error", ...
The signal handler for the got_request_exception signal.
[ "The", "signal", "handler", "for", "the", "got_request_exception", "signal", "." ]
python
train
38
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L255-L265
def missing_datetimes(self, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """ return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
[ "def", "missing_datetimes", "(", "self", ",", "finite_datetimes", ")", ":", "return", "[", "d", "for", "d", "in", "finite_datetimes", "if", "not", "self", ".", "_instantiate_task_cls", "(", "self", ".", "datetime_to_parameter", "(", "d", ")", ")", ".", "comp...
Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow.
[ "Override", "in", "subclasses", "to", "do", "bulk", "checks", "." ]
python
train
37.454545
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2224-L2229
def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts)
[ "def", "matchers_refresh", "(", "self", ")", ":", "log", ".", "debug", "(", "'Refreshing matchers.'", ")", "self", ".", "matchers", "=", "salt", ".", "loader", ".", "matchers", "(", "self", ".", "opts", ")" ]
Refresh the matchers
[ "Refresh", "the", "matchers" ]
python
train
28.833333
edx/xblock-utils
xblockutils/studio_editable.py
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L472-L491
def author_preview_view(self, context): """ View for previewing contents in studio. """ children_contents = [] fragment = Fragment() for child_id in self.children: child = self.runtime.get_block(child_id) child_fragment = self._render_child_fragment(child, context, 'preview_view') fragment.add_frag_resources(child_fragment) children_contents.append(child_fragment.content) render_context = { 'block': self, 'children_contents': children_contents } render_context.update(context) fragment.add_content(self.loader.render_template(self.CHILD_PREVIEW_TEMPLATE, render_context)) return fragment
[ "def", "author_preview_view", "(", "self", ",", "context", ")", ":", "children_contents", "=", "[", "]", "fragment", "=", "Fragment", "(", ")", "for", "child_id", "in", "self", ".", "children", ":", "child", "=", "self", ".", "runtime", ".", "get_block", ...
View for previewing contents in studio.
[ "View", "for", "previewing", "contents", "in", "studio", "." ]
python
train
36.7
openego/eTraGo
etrago/tools/utilities.py
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L102-L212
def geolocation_buses(network, session): """ If geopandas is installed: Use Geometries of buses x/y(lon/lat) and Polygons of Countries from RenpassGisParameterRegion in order to locate the buses Else: Use coordinats of buses to locate foreign buses, which is less accurate. Parameters ---------- network_etrago: : class: `etrago.tools.io.NetworkScenario` eTraGo network object compiled by: meth: `etrago.appl.etrago` session: : sqlalchemy: `sqlalchemy.orm.session.Session < orm/session_basics.html >` SQLAlchemy session to the OEDB """ if geopandas: # Start db connetion # get renpassG!S scenario data RenpassGISRegion = RenpassGisParameterRegion # Define regions region_id = ['DE', 'DK', 'FR', 'BE', 'LU', 'AT', 'NO', 'PL', 'CH', 'CZ', 'SE', 'NL'] query = session.query(RenpassGISRegion.gid, RenpassGISRegion.u_region_id, RenpassGISRegion.stat_level, RenpassGISRegion.geom, RenpassGISRegion.geom_point) # get regions by query and filter Regions = [(gid, u_region_id, stat_level, geoalchemy2.shape.to_shape( geom), geoalchemy2.shape.to_shape(geom_point)) for gid, u_region_id, stat_level, geom, geom_point in query.filter(RenpassGISRegion.u_region_id. in_(region_id)).all()] crs = {'init': 'epsg:4326'} # transform lon lat to shapely Points and create GeoDataFrame points = [Point(xy) for xy in zip(network.buses.x, network.buses.y)] bus = gpd.GeoDataFrame(network.buses, crs=crs, geometry=points) # Transform Countries Polygons as Regions region = pd.DataFrame( Regions, columns=['id', 'country', 'stat_level', 'Polygon', 'Point']) re = gpd.GeoDataFrame(region, crs=crs, geometry=region['Polygon']) # join regions and buses by geometry which intersects busC = gpd.sjoin(bus, re, how='inner', op='intersects') # busC # Drop non used columns busC = busC.drop(['index_right', 'Point', 'id', 'Polygon', 'stat_level', 'geometry'], axis=1) # add busC to eTraGo.buses network.buses['country_code'] = busC['country'] network.buses.country_code[network.buses.country_code.isnull()] = 'DE' # close session session.close() else: buses_by_country(network) transborder_lines_0 = network.lines[network.lines['bus0'].isin( network.buses.index[network.buses['country_code'] != 'DE'])].index transborder_lines_1 = network.lines[network.lines['bus1'].isin( network.buses.index[network.buses['country_code']!= 'DE'])].index #set country tag for lines network.lines.loc[transborder_lines_0, 'country'] = \ network.buses.loc[network.lines.loc[transborder_lines_0, 'bus0'].\ values,'country_code'].values network.lines.loc[transborder_lines_1, 'country'] = \ network.buses.loc[network.lines.loc[transborder_lines_1, 'bus1'].\ values,'country_code'].values network.lines['country'].fillna('DE', inplace=True) doubles = list(set(transborder_lines_0.intersection(transborder_lines_1))) for line in doubles: c_bus0 = network.buses.loc[network.lines.loc[line, 'bus0'], 'country_code'] c_bus1 = network.buses.loc[network.lines.loc[line, 'bus1'], 'country_code'] network.lines.loc[line, 'country'] = '{}{}'.format(c_bus0, c_bus1) transborder_links_0 = network.links[network.links['bus0'].isin( network.buses.index[network.buses['country_code']!= 'DE'])].index transborder_links_1 = network.links[network.links['bus1'].isin( network.buses.index[network.buses['country_code'] != 'DE'])].index #set country tag for links network.links.loc[transborder_links_0, 'country'] = \ network.buses.loc[network.links.loc[transborder_links_0, 'bus0'].\ values, 'country_code'].values network.links.loc[transborder_links_1, 'country'] = \ network.buses.loc[network.links.loc[transborder_links_1, 'bus1'].\ values, 'country_code'].values network.links['country'].fillna('DE', inplace=True) doubles = list(set(transborder_links_0.intersection(transborder_links_1))) for link in doubles: c_bus0 = network.buses.loc[ network.links.loc[link, 'bus0'], 'country_code'] c_bus1 = network.buses.loc[ network.links.loc[link, 'bus1'], 'country_code'] network.links.loc[link, 'country'] = '{}{}'.format(c_bus0, c_bus1) return network
[ "def", "geolocation_buses", "(", "network", ",", "session", ")", ":", "if", "geopandas", ":", "# Start db connetion", "# get renpassG!S scenario data", "RenpassGISRegion", "=", "RenpassGisParameterRegion", "# Define regions", "region_id", "=", "[", "'DE'", ",", "'DK'", ...
If geopandas is installed: Use Geometries of buses x/y(lon/lat) and Polygons of Countries from RenpassGisParameterRegion in order to locate the buses Else: Use coordinats of buses to locate foreign buses, which is less accurate. Parameters ---------- network_etrago: : class: `etrago.tools.io.NetworkScenario` eTraGo network object compiled by: meth: `etrago.appl.etrago` session: : sqlalchemy: `sqlalchemy.orm.session.Session < orm/session_basics.html >` SQLAlchemy session to the OEDB
[ "If", "geopandas", "is", "installed", ":", "Use", "Geometries", "of", "buses", "x", "/", "y", "(", "lon", "/", "lat", ")", "and", "Polygons", "of", "Countries", "from", "RenpassGisParameterRegion", "in", "order", "to", "locate", "the", "buses" ]
python
train
44.117117
okfn-brasil/serenata-toolbox
serenata_toolbox/chamber_of_deputies/presences_dataset.py
https://github.com/okfn-brasil/serenata-toolbox/blob/47b14725e8ed3a53fb52190a2ba5f29182a16959/serenata_toolbox/chamber_of_deputies/presences_dataset.py#L33-L55
def fetch(self, deputies, start_date, end_date): """ :param deputies: (pandas.DataFrame) a dataframe with deputies data :param date_start: (str) date in the format dd/mm/yyyy :param date_end: (str) date in the format dd/mm/yyyy """ log.debug("Fetching data for {} deputies from {} -> {}".format(len(deputies), start_date, end_date)) records = self._all_presences(deputies, start_date, end_date) df = pd.DataFrame(records, columns=( 'term', 'congressperson_document', 'congressperson_name', 'party', 'state', 'date', 'present_on_day', 'justification', 'session', 'presence' )) return self._translate(df)
[ "def", "fetch", "(", "self", ",", "deputies", ",", "start_date", ",", "end_date", ")", ":", "log", ".", "debug", "(", "\"Fetching data for {} deputies from {} -> {}\"", ".", "format", "(", "len", "(", "deputies", ")", ",", "start_date", ",", "end_date", ")", ...
:param deputies: (pandas.DataFrame) a dataframe with deputies data :param date_start: (str) date in the format dd/mm/yyyy :param date_end: (str) date in the format dd/mm/yyyy
[ ":", "param", "deputies", ":", "(", "pandas", ".", "DataFrame", ")", "a", "dataframe", "with", "deputies", "data", ":", "param", "date_start", ":", "(", "str", ")", "date", "in", "the", "format", "dd", "/", "mm", "/", "yyyy", ":", "param", "date_end", ...
python
valid
34
tomduck/pandoc-eqnos
pandoc_eqnos.py
https://github.com/tomduck/pandoc-eqnos/blob/a0e2b5684d2024ea96049ed2cff3acf4ab47c541/pandoc_eqnos.py#L213-L263
def process(meta): """Saves metadata fields in global variables and returns a few computed fields.""" # pylint: disable=global-statement global capitalize global use_cleveref_default global plusname global starname global numbersections # Read in the metadata fields and do some checking for name in ['eqnos-cleveref', 'xnos-cleveref', 'cleveref']: # 'xnos-cleveref' enables cleveref in all 3 of fignos/eqnos/tablenos # 'cleveref' is deprecated if name in meta: use_cleveref_default = check_bool(get_meta(meta, name)) break for name in ['eqnos-capitalize', 'eqnos-capitalise', 'xnos-capitalize', 'xnos-capitalise']: # 'eqnos-capitalise' is an alternative spelling # 'xnos-capitalise' enables capitalise in all 3 of fignos/eqnos/tablenos # 'xnos-capitalize' is an alternative spelling if name in meta: capitalize = check_bool(get_meta(meta, name)) break if 'eqnos-plus-name' in meta: tmp = get_meta(meta, 'eqnos-plus-name') if isinstance(tmp, list): plusname = tmp else: plusname[0] = tmp assert len(plusname) == 2 for name in plusname: assert isinstance(name, STRTYPES) if 'eqnos-star-name' in meta: tmp = get_meta(meta, 'eqnos-star-name') if isinstance(tmp, list): starname = tmp else: starname[0] = tmp assert len(starname) == 2 for name in starname: assert isinstance(name, STRTYPES) if 'xnos-number-sections' in meta: numbersections = check_bool(get_meta(meta, 'xnos-number-sections'))
[ "def", "process", "(", "meta", ")", ":", "# pylint: disable=global-statement", "global", "capitalize", "global", "use_cleveref_default", "global", "plusname", "global", "starname", "global", "numbersections", "# Read in the metadata fields and do some checking", "for", "name", ...
Saves metadata fields in global variables and returns a few computed fields.
[ "Saves", "metadata", "fields", "in", "global", "variables", "and", "returns", "a", "few", "computed", "fields", "." ]
python
train
33
opendatateam/udata
udata/harvest/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L236-L276
def attach(domain, filename): '''Attach existing dataset to their harvest remote id before harvesting. The expected csv file format is the following: - a column with header "local" and the local IDs or slugs - a column with header "remote" and the remote IDs The delimiter should be ";". columns order and extras columns does not matter ''' count = 0 errors = 0 with open(filename) as csvfile: reader = csv.DictReader(csvfile, delimiter=b';', quotechar=b'"') for row in reader: try: dataset = Dataset.objects.get(id=ObjectId(row['local'])) except: # noqa (Never stop on failure) log.warning('Unable to attach dataset : %s', row['local']) errors += 1 continue # Detach previously attached dataset Dataset.objects(**{ 'extras__harvest:domain': domain, 'extras__harvest:remote_id': row['remote'] }).update(**{ 'unset__extras__harvest:domain': True, 'unset__extras__harvest:remote_id': True }) dataset.extras['harvest:domain'] = domain dataset.extras['harvest:remote_id'] = row['remote'] dataset.last_modified = datetime.now() dataset.save() count += 1 return AttachResult(count, errors)
[ "def", "attach", "(", "domain", ",", "filename", ")", ":", "count", "=", "0", "errors", "=", "0", "with", "open", "(", "filename", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "DictReader", "(", "csvfile", ",", "delimiter", "=", "b';'", ",",...
Attach existing dataset to their harvest remote id before harvesting. The expected csv file format is the following: - a column with header "local" and the local IDs or slugs - a column with header "remote" and the remote IDs The delimiter should be ";". columns order and extras columns does not matter
[ "Attach", "existing", "dataset", "to", "their", "harvest", "remote", "id", "before", "harvesting", "." ]
python
train
35
andrea-cuttone/geoplotlib
geoplotlib/core.py
https://github.com/andrea-cuttone/geoplotlib/blob/a1c355bccec91cabd157569fad6daf53cf7687a1/geoplotlib/core.py#L759-L768
def screen_to_latlon(self, x, y): """ Return the latitude and longitude corresponding to a screen point :param x: screen x :param y: screen y :return: latitude and longitude at x,y """ xtile = 1. * x / TILE_SIZE + self.xtile ytile = 1. * y / TILE_SIZE + self.ytile return self.num2deg(xtile, ytile, self.zoom)
[ "def", "screen_to_latlon", "(", "self", ",", "x", ",", "y", ")", ":", "xtile", "=", "1.", "*", "x", "/", "TILE_SIZE", "+", "self", ".", "xtile", "ytile", "=", "1.", "*", "y", "/", "TILE_SIZE", "+", "self", ".", "ytile", "return", "self", ".", "nu...
Return the latitude and longitude corresponding to a screen point :param x: screen x :param y: screen y :return: latitude and longitude at x,y
[ "Return", "the", "latitude", "and", "longitude", "corresponding", "to", "a", "screen", "point", ":", "param", "x", ":", "screen", "x", ":", "param", "y", ":", "screen", "y", ":", "return", ":", "latitude", "and", "longitude", "at", "x", "y" ]
python
train
37.2
jbittel/django-mama-cas
mama_cas/models.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L141-L149
def consume_tickets(self, user): """ Consume all valid ``Ticket``s for a specified user. This is run when the user logs out to ensure all issued tickets are no longer valid for future authentication attempts. """ for ticket in self.filter(user=user, consumed__isnull=True, expires__gt=now()): ticket.consume()
[ "def", "consume_tickets", "(", "self", ",", "user", ")", ":", "for", "ticket", "in", "self", ".", "filter", "(", "user", "=", "user", ",", "consumed__isnull", "=", "True", ",", "expires__gt", "=", "now", "(", ")", ")", ":", "ticket", ".", "consume", ...
Consume all valid ``Ticket``s for a specified user. This is run when the user logs out to ensure all issued tickets are no longer valid for future authentication attempts.
[ "Consume", "all", "valid", "Ticket", "s", "for", "a", "specified", "user", ".", "This", "is", "run", "when", "the", "user", "logs", "out", "to", "ensure", "all", "issued", "tickets", "are", "no", "longer", "valid", "for", "future", "authentication", "attem...
python
train
43.888889
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/interconnect.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/interconnect.py#L481-L590
def setup(self, socket_type, complete_or_error_queue): """Setup the asyncio event loop. Args: socket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER complete_or_error_queue (queue.Queue): A way to propagate errors back to the calling thread. Needed since this function is directly used in Thread. Returns: None """ try: if self._secured: if self._server_public_key is None or \ self._server_private_key is None: raise LocalConfigurationError( "Attempting to start socket in secure mode, " "but complete server keys were not provided") self._event_loop = zmq.asyncio.ZMQEventLoop() asyncio.set_event_loop(self._event_loop) self._context = zmq.asyncio.Context() self._socket = self._context.socket(socket_type) self._socket.set(zmq.TCP_KEEPALIVE, 1) self._socket.set(zmq.TCP_KEEPALIVE_IDLE, self._connection_timeout) self._socket.set(zmq.TCP_KEEPALIVE_INTVL, self._heartbeat_interval) if socket_type == zmq.DEALER: self._socket.identity = "{}-{}".format( self._zmq_identity, hashlib.sha512(uuid.uuid4().hex.encode() ).hexdigest()[:23]).encode('ascii') if self._secured: # Generate ephemeral certificates for this connection public_key, secretkey = zmq.curve_keypair() self._socket.curve_publickey = public_key self._socket.curve_secretkey = secretkey self._socket.curve_serverkey = self._server_public_key self._socket.connect(self._address) elif socket_type == zmq.ROUTER: if self._secured: auth = AsyncioAuthenticator(self._context) self._auth = auth auth.start() auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) self._socket.curve_secretkey = self._server_private_key self._socket.curve_publickey = self._server_public_key self._socket.curve_server = True try: self._socket.bind(self._address) except zmq.error.ZMQError as e: raise LocalConfigurationError( "Can't bind to {}: {}".format(self._address, str(e))) else: LOGGER.info("Listening on %s", self._address) self._dispatcher.add_send_message(self._connection, self.send_message) self._dispatcher.add_send_last_message(self._connection, self.send_last_message) asyncio.ensure_future(self._remove_expired_futures(), loop=self._event_loop) asyncio.ensure_future(self._receive_message(), loop=self._event_loop) asyncio.ensure_future(self._dispatch_message(), loop=self._event_loop) self._dispatcher_queue = asyncio.Queue() if self._monitor: self._monitor_fd = "inproc://monitor.s-{}".format( _generate_id()[0:5]) self._monitor_sock = self._socket.get_monitor_socket( zmq.EVENT_DISCONNECTED, addr=self._monitor_fd) asyncio.ensure_future(self._monitor_disconnects(), loop=self._event_loop) except Exception as e: # Put the exception on the queue where in start we are waiting # for it. complete_or_error_queue.put_nowait(e) self._close_sockets() raise if self._heartbeat: asyncio.ensure_future(self._do_heartbeat(), loop=self._event_loop) # Put a 'complete with the setup tasks' sentinel on the queue. complete_or_error_queue.put_nowait(_STARTUP_COMPLETE_SENTINEL) asyncio.ensure_future(self._notify_started(), loop=self._event_loop) self._event_loop.run_forever() # event_loop.stop called elsewhere will cause the loop to break out # of run_forever then it can be closed and the context destroyed. self._event_loop.close() self._close_sockets()
[ "def", "setup", "(", "self", ",", "socket_type", ",", "complete_or_error_queue", ")", ":", "try", ":", "if", "self", ".", "_secured", ":", "if", "self", ".", "_server_public_key", "is", "None", "or", "self", ".", "_server_private_key", "is", "None", ":", "...
Setup the asyncio event loop. Args: socket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER complete_or_error_queue (queue.Queue): A way to propagate errors back to the calling thread. Needed since this function is directly used in Thread. Returns: None
[ "Setup", "the", "asyncio", "event", "loop", "." ]
python
train
42.327273
nickstenning/tagalog
tagalog/io.py
https://github.com/nickstenning/tagalog/blob/c6847a957dc4f96836a5cf13c4eb664fccafaac2/tagalog/io.py#L24-L68
def lines(fp): """ Read lines of UTF-8 from the file-like object given in ``fp``, making sure that when reading from STDIN, reads are at most line-buffered. UTF-8 decoding errors are handled silently. Invalid characters are replaced by U+FFFD REPLACEMENT CHARACTER. Line endings are normalised to newlines by Python's universal newlines feature. Returns an iterator yielding lines. """ if fp.fileno() == sys.stdin.fileno(): close = True try: # Python 3 fp = open(fp.fileno(), mode='r', buffering=BUF_LINEBUFFERED, errors='replace') decode = False except TypeError: fp = os.fdopen(fp.fileno(), 'rU', BUF_LINEBUFFERED) decode = True else: close = False try: # only decode if the fp doesn't already have an encoding decode = (fp.encoding != UTF8) except AttributeError: # fp has been opened in binary mode decode = True try: while 1: l = fp.readline() if l: if decode: l = l.decode(UTF8, 'replace') yield l else: break finally: if close: fp.close()
[ "def", "lines", "(", "fp", ")", ":", "if", "fp", ".", "fileno", "(", ")", "==", "sys", ".", "stdin", ".", "fileno", "(", ")", ":", "close", "=", "True", "try", ":", "# Python 3", "fp", "=", "open", "(", "fp", ".", "fileno", "(", ")", ",", "mo...
Read lines of UTF-8 from the file-like object given in ``fp``, making sure that when reading from STDIN, reads are at most line-buffered. UTF-8 decoding errors are handled silently. Invalid characters are replaced by U+FFFD REPLACEMENT CHARACTER. Line endings are normalised to newlines by Python's universal newlines feature. Returns an iterator yielding lines.
[ "Read", "lines", "of", "UTF", "-", "8", "from", "the", "file", "-", "like", "object", "given", "in", "fp", "making", "sure", "that", "when", "reading", "from", "STDIN", "reads", "are", "at", "most", "line", "-", "buffered", "." ]
python
train
27.466667
twisted/twistedchecker
twistedchecker/core/runner.py
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/twistedchecker/core/runner.py#L181-L190
def restrictCheckers(self, allowedMessages): """ Unregister useless checkers to speed up twistedchecker. @param allowedMessages: output messages allowed in twistedchecker """ uselessCheckers = self.findUselessCheckers(allowedMessages) # Unregister these checkers for checker in uselessCheckers: self.unregisterChecker(checker)
[ "def", "restrictCheckers", "(", "self", ",", "allowedMessages", ")", ":", "uselessCheckers", "=", "self", ".", "findUselessCheckers", "(", "allowedMessages", ")", "# Unregister these checkers", "for", "checker", "in", "uselessCheckers", ":", "self", ".", "unregisterCh...
Unregister useless checkers to speed up twistedchecker. @param allowedMessages: output messages allowed in twistedchecker
[ "Unregister", "useless", "checkers", "to", "speed", "up", "twistedchecker", "." ]
python
train
38.6
EnigmaBridge/client.py
ebclient/eb_create_uo.py
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/eb_create_uo.py#L446-L454
def set_flags(self): """ Set flags representing generation way accordingly - commkeys are client generated, app key is server generated. :return: """ offset = self.template['flagoffset']//8 # comm keys provided? self.tpl_buff = bytes_transform(self.tpl_buff, offset+1, offset+2, lambda x: self.set_flag_bit(x))
[ "def", "set_flags", "(", "self", ")", ":", "offset", "=", "self", ".", "template", "[", "'flagoffset'", "]", "//", "8", "# comm keys provided?", "self", ".", "tpl_buff", "=", "bytes_transform", "(", "self", ".", "tpl_buff", ",", "offset", "+", "1", ",", ...
Set flags representing generation way accordingly - commkeys are client generated, app key is server generated. :return:
[ "Set", "flags", "representing", "generation", "way", "accordingly", "-", "commkeys", "are", "client", "generated", "app", "key", "is", "server", "generated", ".", ":", "return", ":" ]
python
train
39.888889
ergoithz/browsepy
browsepy/compat.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/compat.py#L297-L312
def re_escape(pattern, chars=frozenset("()[]{}?*+|^$\\.-#")): ''' Escape all special regex characters in pattern. Logic taken from regex module. :param pattern: regex pattern to escape :type patterm: str :returns: escaped pattern :rtype: str ''' escape = '\\{}'.format return ''.join( escape(c) if c in chars or c.isspace() else '\\000' if c == '\x00' else c for c in pattern )
[ "def", "re_escape", "(", "pattern", ",", "chars", "=", "frozenset", "(", "\"()[]{}?*+|^$\\\\.-#\"", ")", ")", ":", "escape", "=", "'\\\\{}'", ".", "format", "return", "''", ".", "join", "(", "escape", "(", "c", ")", "if", "c", "in", "chars", "or", "c",...
Escape all special regex characters in pattern. Logic taken from regex module. :param pattern: regex pattern to escape :type patterm: str :returns: escaped pattern :rtype: str
[ "Escape", "all", "special", "regex", "characters", "in", "pattern", ".", "Logic", "taken", "from", "regex", "module", "." ]
python
train
27.1875
cdgriffith/Reusables
reusables/namespace.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/namespace.py#L220-L241
def list(self, item, default=None, spliter=",", strip=True, mod=None): """ Return value of key as a list :param item: key of value to transform :param mod: function to map against list :param default: value to return if item does not exist :param spliter: character to split str on :param strip: clean the list with the `strip` :return: list of items """ try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err if strip: item = item.lstrip("[").rstrip("]") out = [x.strip() if strip else x for x in item.split(spliter)] if mod: return list(map(mod, out)) return out
[ "def", "list", "(", "self", ",", "item", ",", "default", "=", "None", ",", "spliter", "=", "\",\"", ",", "strip", "=", "True", ",", "mod", "=", "None", ")", ":", "try", ":", "item", "=", "self", ".", "__getattr__", "(", "item", ")", "except", "At...
Return value of key as a list :param item: key of value to transform :param mod: function to map against list :param default: value to return if item does not exist :param spliter: character to split str on :param strip: clean the list with the `strip` :return: list of items
[ "Return", "value", "of", "key", "as", "a", "list" ]
python
train
35.954545
PyCQA/pylint
pylint/checkers/base.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L717-L724
def visit_unaryop(self, node): """check use of the non-existent ++ and -- operator operator""" if ( (node.op in "+-") and isinstance(node.operand, astroid.UnaryOp) and (node.operand.op == node.op) ): self.add_message("nonexistent-operator", node=node, args=node.op * 2)
[ "def", "visit_unaryop", "(", "self", ",", "node", ")", ":", "if", "(", "(", "node", ".", "op", "in", "\"+-\"", ")", "and", "isinstance", "(", "node", ".", "operand", ",", "astroid", ".", "UnaryOp", ")", "and", "(", "node", ".", "operand", ".", "op"...
check use of the non-existent ++ and -- operator operator
[ "check", "use", "of", "the", "non", "-", "existent", "++", "and", "--", "operator", "operator" ]
python
test
41.75
yvesalexandre/bandicoot
bandicoot/io.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L607-L721
def read_orange(user_id, records_path, antennas_path=None, attributes_path=None, recharges_path=None, network=False, describe=True, warnings=True, errors=False): """ Load user records from a CSV file in *orange* format: ``call_record_type;basic_service;user_msisdn;call_partner_identity;datetime;call_duration;longitude;latitude`` ``basic_service`` takes one of the following values: - 11: telephony; - 12: emergency calls; - 21: short message (in) - 22: short message (out) Parameters ---------- user_id : str ID of the user (filename) records_path : str Path of the directory all the user files. antennas_path : str, optional Path of the CSV file containing (antenna_id, latitude, longitude) values. This allows antennas to be mapped to their locations. attributes_path : str, optional Path of the directory containing attributes files (``key, value`` CSV file). Attributes can for instance be variables such as like, age, or gender. Attributes can be helpful to compute specific metrics. network : bool, optional If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False. describe : boolean If describe is True, it will print a description of the loaded user to the standard output. errors : boolean If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not be loaded. """ def _parse(reader): records = [] antennas = dict() for row in reader: direction = 'out' if row['call_record_type'] == '1' else 'in' interaction = 'call' if row[ 'basic_service'] in ['11', '12'] else 'text' contact = row['call_partner_identity'] date = datetime.strptime(row['datetime'], "%Y-%m-%d %H:%M:%S") call_duration = float(row['call_duration']) if row[ 'call_duration'] != "" else None lon, lat = float(row['longitude']), float(row['latitude']) latlon = (lat, lon) antenna = None for key, value in antennas.items(): if latlon == value: antenna = key break if antenna is None: antenna = len(antennas) + 1 antennas[antenna] = latlon position = Position(antenna=antenna, location=latlon) record = Record(direction=direction, interaction=interaction, correspondent_id=contact, call_duration=call_duration, datetime=date, position=position) records.append(record) return records, antennas user_records = os.path.join(records_path, user_id + ".csv") fields = ['call_record_type', 'basic_service', 'user_msisdn', 'call_partner_identity', 'datetime', 'call_duration', 'longitude', 'latitude'] with open(user_records, 'r') as f: reader = csv.DictReader(f, delimiter=";", fieldnames=fields) records, antennas = _parse(reader) attributes = None if attributes_path is not None: user_attributes = os.path.join(attributes_path, user_id + '.csv') attributes = _load_attributes(user_attributes) recharges = None if recharges_path is not None: user_recharges = os.path.join(recharges_path, user_id + '.csv') recharges = _load_recharges(user_recharges) user, bad_records = load(user_id, records, antennas, attributes, recharges, antennas_path, attributes_path, recharges_path, describe=False, warnings=warnings) if network is True: user.network = _read_network(user, records_path, attributes_path, read_orange, antennas_path, warnings) user.recompute_missing_neighbors() if describe: user.describe() if errors: return user, bad_records return user
[ "def", "read_orange", "(", "user_id", ",", "records_path", ",", "antennas_path", "=", "None", ",", "attributes_path", "=", "None", ",", "recharges_path", "=", "None", ",", "network", "=", "False", ",", "describe", "=", "True", ",", "warnings", "=", "True", ...
Load user records from a CSV file in *orange* format: ``call_record_type;basic_service;user_msisdn;call_partner_identity;datetime;call_duration;longitude;latitude`` ``basic_service`` takes one of the following values: - 11: telephony; - 12: emergency calls; - 21: short message (in) - 22: short message (out) Parameters ---------- user_id : str ID of the user (filename) records_path : str Path of the directory all the user files. antennas_path : str, optional Path of the CSV file containing (antenna_id, latitude, longitude) values. This allows antennas to be mapped to their locations. attributes_path : str, optional Path of the directory containing attributes files (``key, value`` CSV file). Attributes can for instance be variables such as like, age, or gender. Attributes can be helpful to compute specific metrics. network : bool, optional If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False. describe : boolean If describe is True, it will print a description of the loaded user to the standard output. errors : boolean If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not be loaded.
[ "Load", "user", "records", "from", "a", "CSV", "file", "in", "*", "orange", "*", "format", ":" ]
python
train
36.13913
horazont/aiosasl
aiosasl/__init__.py
https://github.com/horazont/aiosasl/blob/af58bf30f688757e58af6e87892d35a8ce798482/aiosasl/__init__.py#L316-L340
def from_reply(cls, state): """ Comptaibility layer for old :class:`SASLInterface` implementations. Accepts the follwing set of :class:`SASLState` or strings and maps the strings to :class:`SASLState` elements as follows: ``"challenge"`` :member:`SASLState.CHALLENGE` ``"failue"`` :member:`SASLState.FAILURE` ``"success"`` :member:`SASLState.SUCCESS` """ if state in (SASLState.FAILURE, SASLState.SUCCESS, SASLState.CHALLENGE): return state if state in ("failure", "success", "challenge"): return SASLState(state) else: raise RuntimeError("invalid SASL state", state)
[ "def", "from_reply", "(", "cls", ",", "state", ")", ":", "if", "state", "in", "(", "SASLState", ".", "FAILURE", ",", "SASLState", ".", "SUCCESS", ",", "SASLState", ".", "CHALLENGE", ")", ":", "return", "state", "if", "state", "in", "(", "\"failure\"", ...
Comptaibility layer for old :class:`SASLInterface` implementations. Accepts the follwing set of :class:`SASLState` or strings and maps the strings to :class:`SASLState` elements as follows: ``"challenge"`` :member:`SASLState.CHALLENGE` ``"failue"`` :member:`SASLState.FAILURE` ``"success"`` :member:`SASLState.SUCCESS`
[ "Comptaibility", "layer", "for", "old", ":", "class", ":", "SASLInterface", "implementations", "." ]
python
test
29.88
osrg/ryu
ryu/lib/packet/packet_utils.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/packet/packet_utils.py#L106-L138
def fletcher_checksum(data, offset): """ Fletcher Checksum -- Refer to RFC1008 calling with offset == _FLETCHER_CHECKSUM_VALIDATE will validate the checksum without modifying the buffer; a valid checksum returns 0. """ c0 = 0 c1 = 0 pos = 0 length = len(data) data = bytearray(data) data[offset:offset + 2] = [0] * 2 while pos < length: tlen = min(length - pos, _MODX) for d in data[pos:pos + tlen]: c0 += d c1 += c0 c0 %= 255 c1 %= 255 pos += tlen x = ((length - offset - 1) * c0 - c1) % 255 if x <= 0: x += 255 y = 510 - c0 - x if y > 255: y -= 255 data[offset] = x data[offset + 1] = y return (x << 8) | (y & 0xff)
[ "def", "fletcher_checksum", "(", "data", ",", "offset", ")", ":", "c0", "=", "0", "c1", "=", "0", "pos", "=", "0", "length", "=", "len", "(", "data", ")", "data", "=", "bytearray", "(", "data", ")", "data", "[", "offset", ":", "offset", "+", "2",...
Fletcher Checksum -- Refer to RFC1008 calling with offset == _FLETCHER_CHECKSUM_VALIDATE will validate the checksum without modifying the buffer; a valid checksum returns 0.
[ "Fletcher", "Checksum", "--", "Refer", "to", "RFC1008" ]
python
train
22.575758
jbaiter/gphoto2-cffi
gphoto2cffi/gphoto2.py
https://github.com/jbaiter/gphoto2-cffi/blob/2876d15a58174bd24613cd4106a3ef0cefd48050/gphoto2cffi/gphoto2.py#L468-L511
def set(self, value): """ Update value of the option. Only possible for options with :py:attr:`readonly` set to `False`. If :py:attr:`type` is `choice`, the value must be one of the :py:attr:`choices`. If :py:attr:`type` is `range`, the value must be in the range described by :py:attr:`range`. :param value: Value to set """ if self.readonly: raise ValueError("Option is read-only.") val_p = None if self.type == 'selection': if value not in self.choices: raise ValueError("Invalid choice (valid: {0})".format( repr(self.choices))) val_p = ffi.new("const char[]", value.encode()) elif self.type == 'text': if not isinstance(value, basestring): raise ValueError("Value must be a string.") val_p = ffi.new("char**") val_p[0] = ffi.new("char[]", value.encode()) elif self.type == 'range': if value < self.range.min or value > self.range.max: raise ValueError("Value exceeds valid range ({0}-{1}." .format(self.range.min, self.range.max)) if value % self.range.step: raise ValueError("Value can only be changed in steps of {0}." .format(self.range.step)) val_p = ffi.new("float*") val_p[0] = value elif self.type == 'toggle': if not isinstance(value, bool): raise ValueError("Value must be bool.") val_p = ffi.new("int*") val_p[0] = int(value) elif self.type == 'date': val_p = ffi.new("int*") val_p[0] = value lib.gp_widget_set_value(self._widget, val_p) lib.gp_camera_set_config(self._cam._cam, self._root, self._cam._ctx) self.value = value
[ "def", "set", "(", "self", ",", "value", ")", ":", "if", "self", ".", "readonly", ":", "raise", "ValueError", "(", "\"Option is read-only.\"", ")", "val_p", "=", "None", "if", "self", ".", "type", "==", "'selection'", ":", "if", "value", "not", "in", "...
Update value of the option. Only possible for options with :py:attr:`readonly` set to `False`. If :py:attr:`type` is `choice`, the value must be one of the :py:attr:`choices`. If :py:attr:`type` is `range`, the value must be in the range described by :py:attr:`range`. :param value: Value to set
[ "Update", "value", "of", "the", "option", "." ]
python
train
43.272727
Azure/azure-cosmos-python
azure/cosmos/execution_context/base_execution_context.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/execution_context/base_execution_context.py#L85-L104
def next(self): """Returns the next query result. :return: The next query result. :rtype: dict :raises StopIteration: If no more result is left. """ if self._has_finished: raise StopIteration if not len(self._buffer): results = self.fetch_next_block() self._buffer.extend(results) if not len(self._buffer): raise StopIteration return self._buffer.popleft()
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_has_finished", ":", "raise", "StopIteration", "if", "not", "len", "(", "self", ".", "_buffer", ")", ":", "results", "=", "self", ".", "fetch_next_block", "(", ")", "self", ".", "_buffer", ".", ...
Returns the next query result. :return: The next query result. :rtype: dict :raises StopIteration: If no more result is left.
[ "Returns", "the", "next", "query", "result", ".", ":", "return", ":", "The", "next", "query", "result", ".", ":", "rtype", ":", "dict", ":", "raises", "StopIteration", ":", "If", "no", "more", "result", "is", "left", "." ]
python
train
26.3
awslabs/serverless-application-model
samtranslator/model/__init__.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/__init__.py#L129-L140
def _validate_logical_id(cls, logical_id): """Validates that the provided logical id is an alphanumeric string. :param str logical_id: the logical id to validate :returns: True if the logical id is valid :rtype: bool :raises TypeError: if the logical id is invalid """ pattern = re.compile(r'^[A-Za-z0-9]+$') if logical_id is not None and pattern.match(logical_id): return True raise InvalidResourceException(logical_id, "Logical ids must be alphanumeric.")
[ "def", "_validate_logical_id", "(", "cls", ",", "logical_id", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r'^[A-Za-z0-9]+$'", ")", "if", "logical_id", "is", "not", "None", "and", "pattern", ".", "match", "(", "logical_id", ")", ":", "return", "Tr...
Validates that the provided logical id is an alphanumeric string. :param str logical_id: the logical id to validate :returns: True if the logical id is valid :rtype: bool :raises TypeError: if the logical id is invalid
[ "Validates", "that", "the", "provided", "logical", "id", "is", "an", "alphanumeric", "string", "." ]
python
train
44.25
jason-weirather/py-seq-tools
seqtools/old_graph.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/old_graph.py#L25-L34
def get_report(self): """ describe the graph :returns: report :rtype: string """ ostr = '' ostr += "Nodes: "+str(len(self.__nodes.keys()))+"\n" ostr += "Edges: "+str(len(self.__edges.keys()))+"\n" return ostr
[ "def", "get_report", "(", "self", ")", ":", "ostr", "=", "''", "ostr", "+=", "\"Nodes: \"", "+", "str", "(", "len", "(", "self", ".", "__nodes", ".", "keys", "(", ")", ")", ")", "+", "\"\\n\"", "ostr", "+=", "\"Edges: \"", "+", "str", "(", "len", ...
describe the graph :returns: report :rtype: string
[ "describe", "the", "graph" ]
python
train
23.2
heikomuller/sco-datastore
scodata/datastore.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/datastore.py#L418-L454
def delete_object(self, identifier, erase=False): """Delete the entry with given identifier in the database. Returns the handle for the deleted object or None if object identifier is unknown. If the read-only property of the object is set to true a ValueError is raised. Parameters ---------- identifier : string Unique object identifier erase : Boolean, optinal If true, the record will be deleted from the database. Otherwise, the active flag will be set to False to support provenance tracking. Returns ------- (Sub-class of)ObjectHandle """ # Get object to ensure that it exists. db_object = self.get_object(identifier) # Set active flag to False if object exists. if db_object is None: return None # Check whether the read-only property is set to true if PROPERTY_READONLY in db_object.properties: if db_object.properties[PROPERTY_READONLY]: raise ValueError('cannot delete read-only resource') if erase: # Erase object from database self.collection.delete_many({"_id": identifier}) else: # Delete object with given identifier by setting active flag # to False self.collection.update_one({"_id": identifier}, {'$set' : {'active' : False}}) # Return retrieved object or None if it didn't exist. return db_object
[ "def", "delete_object", "(", "self", ",", "identifier", ",", "erase", "=", "False", ")", ":", "# Get object to ensure that it exists.", "db_object", "=", "self", ".", "get_object", "(", "identifier", ")", "# Set active flag to False if object exists.", "if", "db_object"...
Delete the entry with given identifier in the database. Returns the handle for the deleted object or None if object identifier is unknown. If the read-only property of the object is set to true a ValueError is raised. Parameters ---------- identifier : string Unique object identifier erase : Boolean, optinal If true, the record will be deleted from the database. Otherwise, the active flag will be set to False to support provenance tracking. Returns ------- (Sub-class of)ObjectHandle
[ "Delete", "the", "entry", "with", "given", "identifier", "in", "the", "database", ".", "Returns", "the", "handle", "for", "the", "deleted", "object", "or", "None", "if", "object", "identifier", "is", "unknown", "." ]
python
train
40.378378
rwl/godot
godot/xdot_parser.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/xdot_parser.py#L314-L324
def _proc_ellipse(self, tokens, filled): """ Returns the components of an ellipse. """ component = Ellipse(pen=self.pen, x_origin=tokens["x0"], y_origin=tokens["y0"], e_width=tokens["w"], e_height=tokens["h"], filled=filled) return component
[ "def", "_proc_ellipse", "(", "self", ",", "tokens", ",", "filled", ")", ":", "component", "=", "Ellipse", "(", "pen", "=", "self", ".", "pen", ",", "x_origin", "=", "tokens", "[", "\"x0\"", "]", ",", "y_origin", "=", "tokens", "[", "\"y0\"", "]", ","...
Returns the components of an ellipse.
[ "Returns", "the", "components", "of", "an", "ellipse", "." ]
python
test
36.090909
phoebe-project/phoebe2
phoebe/parameters/constraint.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/constraint.py#L158-L162
def requiv_to_pot_contact(requiv, q, sma, compno=1): """ TODO: add documentation """ return ConstraintParameter(requiv._bundle, "requiv_to_pot_contact({}, {}, {}, {})".format(_get_expr(requiv), _get_expr(q), _get_expr(sma), compno))
[ "def", "requiv_to_pot_contact", "(", "requiv", ",", "q", ",", "sma", ",", "compno", "=", "1", ")", ":", "return", "ConstraintParameter", "(", "requiv", ".", "_bundle", ",", "\"requiv_to_pot_contact({}, {}, {}, {})\"", ".", "format", "(", "_get_expr", "(", "requi...
TODO: add documentation
[ "TODO", ":", "add", "documentation" ]
python
train
48.8
jepegit/cellpy
cellpy/parameters/prmreader.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/parameters/prmreader.py#L93-L156
def _get_prm_file(file_name=None, search_order=None): """returns name of the prm file""" if file_name is not None: if os.path.isfile(file_name): return file_name else: logger.info("Could not find the prm-file") default_name = prms._prm_default_name prm_globtxt = prms._prm_globtxt script_dir = os.path.abspath(os.path.dirname(__file__)) search_path = dict() search_path["curdir"] = os.path.abspath(os.path.dirname(sys.argv[0])) search_path["filedir"] = script_dir search_path["userdir"] = os.path.expanduser("~") if search_order is None: search_order = ["userdir", ] # ["curdir","filedir", "userdir",] else: search_order = search_order # The default name for the prm file is at the moment in the script-dir,@ # while default searching is in the userdir (yes, I know): prm_default = os.path.join(script_dir, default_name) # -searching----------------------- search_dict = OrderedDict() for key in search_order: search_dict[key] = [None, None] prm_directory = search_path[key] default_file = os.path.join(prm_directory, default_name) if os.path.isfile(default_file): # noinspection PyTypeChecker search_dict[key][0] = default_file prm_globtxt_full = os.path.join(prm_directory, prm_globtxt) user_files = glob.glob(prm_globtxt_full) for f in user_files: if os.path.basename(f) != os.path.basename(default_file): search_dict[key][1] = f break # -selecting---------------------- prm_file = None for key, file_list in search_dict.items(): if file_list[-1]: prm_file = file_list[-1] break else: if not prm_file: prm_file = file_list[0] if prm_file: prm_filename = prm_file else: prm_filename = prm_default return prm_filename
[ "def", "_get_prm_file", "(", "file_name", "=", "None", ",", "search_order", "=", "None", ")", ":", "if", "file_name", "is", "not", "None", ":", "if", "os", ".", "path", ".", "isfile", "(", "file_name", ")", ":", "return", "file_name", "else", ":", "log...
returns name of the prm file
[ "returns", "name", "of", "the", "prm", "file" ]
python
train
30.09375
toastdriven/restless
restless/dj.py
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/dj.py#L116-L133
def urls(cls, name_prefix=None): """ A convenience method for hooking up the URLs. This automatically adds a list & a detail endpoint to your URLconf. :param name_prefix: (Optional) A prefix for the URL's name (for resolving). The default is ``None``, which will autocreate a prefix based on the class name. Ex: ``BlogPostResource`` -> ``api_blogpost_list`` :type name_prefix: string :returns: A list of ``url`` objects for ``include(...)`` """ return [ url(r'^$', cls.as_list(), name=cls.build_url_name('list', name_prefix)), url(r'^(?P<pk>[\w-]+)/$', cls.as_detail(), name=cls.build_url_name('detail', name_prefix)), ]
[ "def", "urls", "(", "cls", ",", "name_prefix", "=", "None", ")", ":", "return", "[", "url", "(", "r'^$'", ",", "cls", ".", "as_list", "(", ")", ",", "name", "=", "cls", ".", "build_url_name", "(", "'list'", ",", "name_prefix", ")", ")", ",", "url",...
A convenience method for hooking up the URLs. This automatically adds a list & a detail endpoint to your URLconf. :param name_prefix: (Optional) A prefix for the URL's name (for resolving). The default is ``None``, which will autocreate a prefix based on the class name. Ex: ``BlogPostResource`` -> ``api_blogpost_list`` :type name_prefix: string :returns: A list of ``url`` objects for ``include(...)``
[ "A", "convenience", "method", "for", "hooking", "up", "the", "URLs", "." ]
python
train
41
materialsproject/pymatgen-db
matgendb/vv/util.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/vv/util.py#L161-L181
def dict_expand(o): """Expand keys in a dict with '.' in them into sub-dictionaries, e.g. {'a.b.c': 'foo'} ==> {'a': {'b': {'c': 'foo'}}} """ r = {} for k, v in o.items(): if isinstance(k, str): k = k.replace('$', '_') if "." in k: sub_r, keys = r, k.split('.') # create sub-dicts until last part of key for k2 in keys[:-1]: sub_r[k2] = {} sub_r = sub_r[k2] # descend # assign last part of key to value sub_r[keys[-1]] = v else: r[k] = v return r
[ "def", "dict_expand", "(", "o", ")", ":", "r", "=", "{", "}", "for", "k", ",", "v", "in", "o", ".", "items", "(", ")", ":", "if", "isinstance", "(", "k", ",", "str", ")", ":", "k", "=", "k", ".", "replace", "(", "'$'", ",", "'_'", ")", "i...
Expand keys in a dict with '.' in them into sub-dictionaries, e.g. {'a.b.c': 'foo'} ==> {'a': {'b': {'c': 'foo'}}}
[ "Expand", "keys", "in", "a", "dict", "with", ".", "in", "them", "into", "sub", "-", "dictionaries", "e", ".", "g", "." ]
python
train
32.190476
apache/incubator-superset
superset/models/helpers.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L68-L73
def _unique_constrains(cls): """Get all (single column and multi column) unique constraints""" unique = [{c.name for c in u.columns} for u in cls.__table_args__ if isinstance(u, UniqueConstraint)] unique.extend({c.name} for c in cls.__table__.columns if c.unique) return unique
[ "def", "_unique_constrains", "(", "cls", ")", ":", "unique", "=", "[", "{", "c", ".", "name", "for", "c", "in", "u", ".", "columns", "}", "for", "u", "in", "cls", ".", "__table_args__", "if", "isinstance", "(", "u", ",", "UniqueConstraint", ")", "]",...
Get all (single column and multi column) unique constraints
[ "Get", "all", "(", "single", "column", "and", "multi", "column", ")", "unique", "constraints" ]
python
train
53.666667
astropy/photutils
photutils/aperture/core.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/core.py#L601-L647
def _to_pixel_params(self, wcs, mode='all'): """ Convert the sky aperture parameters to those for a pixel aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- pixel_params : dict A dictionary of parameters for an equivalent pixel aperture. """ pixel_params = {} x, y = skycoord_to_pixel(self.positions, wcs, mode=mode) pixel_params['positions'] = np.array([x, y]).transpose() # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: pixel_params[theta_key] = (self.theta + angle).to(u.radian).value params.remove(theta_key) param_vals = [getattr(self, param) for param in params] if param_vals[0].unit.physical_type == 'angle': for param, param_val in zip(params, param_vals): pixel_params[param] = (param_val / scale).to(u.pixel).value else: # pixels for param, param_val in zip(params, param_vals): pixel_params[param] = param_val.value return pixel_params
[ "def", "_to_pixel_params", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "pixel_params", "=", "{", "}", "x", ",", "y", "=", "skycoord_to_pixel", "(", "self", ".", "positions", ",", "wcs", ",", "mode", "=", "mode", ")", "pixel_params", ...
Convert the sky aperture parameters to those for a pixel aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- pixel_params : dict A dictionary of parameters for an equivalent pixel aperture.
[ "Convert", "the", "sky", "aperture", "parameters", "to", "those", "for", "a", "pixel", "aperture", "." ]
python
train
38.148936
xflows/rdm
rdm/db/context.py
https://github.com/xflows/rdm/blob/d984e2a0297e5fa8d799953bbd0dba79b05d403d/rdm/db/context.py#L138-L153
def rows(self, table, cols): ''' Fetches rows from the local cache or from the db if there's no cache. :param table: table name to select :cols: list of columns to select :return: list of rows :rtype: list ''' if self.orng_tables: data = [] for ex in self.orng_tables[table]: data.append([ex[str(col)] for col in cols]) return data else: return self.fetch(table, cols)
[ "def", "rows", "(", "self", ",", "table", ",", "cols", ")", ":", "if", "self", ".", "orng_tables", ":", "data", "=", "[", "]", "for", "ex", "in", "self", ".", "orng_tables", "[", "table", "]", ":", "data", ".", "append", "(", "[", "ex", "[", "s...
Fetches rows from the local cache or from the db if there's no cache. :param table: table name to select :cols: list of columns to select :return: list of rows :rtype: list
[ "Fetches", "rows", "from", "the", "local", "cache", "or", "from", "the", "db", "if", "there", "s", "no", "cache", "." ]
python
train
31.625