repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
O365/python-o365
O365/message.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/message.py#L613-L638
def reply(self, to_all=True): """ Creates a new message that is a reply to this message :param bool to_all: whether or not to replies to all the recipients instead to just the sender :return: new message :rtype: Message """ if not self.object_id or self.__is_draft: raise RuntimeError("Can't reply to this message") if to_all: url = self.build_url(self._endpoints.get('create_reply_all').format( id=self.object_id)) else: url = self.build_url( self._endpoints.get('create_reply').format(id=self.object_id)) response = self.con.post(url) if not response: return None message = response.json() # Everything received from cloud must be passed as self._cloud_data_key return self.__class__(parent=self, **{self._cloud_data_key: message})
[ "def", "reply", "(", "self", ",", "to_all", "=", "True", ")", ":", "if", "not", "self", ".", "object_id", "or", "self", ".", "__is_draft", ":", "raise", "RuntimeError", "(", "\"Can't reply to this message\"", ")", "if", "to_all", ":", "url", "=", "self", ...
Creates a new message that is a reply to this message :param bool to_all: whether or not to replies to all the recipients instead to just the sender :return: new message :rtype: Message
[ "Creates", "a", "new", "message", "that", "is", "a", "reply", "to", "this", "message" ]
python
train
pdkit/pdkit
pdkit/tremor_processor.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/tremor_processor.py#L94-L115
def filter_signal(self, data_frame, ts='mag_sum_acc'): """ This method filters a data frame signal as suggested in :cite:`Kassavetis2015`. First step is to high \ pass filter the data frame using a \ `Butterworth <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html>`_ \ digital and analog filter. Then this method filters the data frame along one-dimension using a \ `digital filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html>`_. :param data_frame: the input data frame :type data_frame: pandas.DataFrame :param ts: time series name of data frame to filter :type ts: str :return data_frame: adds a column named 'filtered_signal' to the data frame :rtype data_frame: pandas.DataFrame """ b, a = signal.butter(self.filter_order, 2*self.cutoff_frequency/self.sampling_frequency,'high', analog=False) filtered_signal = signal.lfilter(b, a, data_frame[ts].values) data_frame['filtered_signal'] = filtered_signal logging.debug("filter signal") return data_frame
[ "def", "filter_signal", "(", "self", ",", "data_frame", ",", "ts", "=", "'mag_sum_acc'", ")", ":", "b", ",", "a", "=", "signal", ".", "butter", "(", "self", ".", "filter_order", ",", "2", "*", "self", ".", "cutoff_frequency", "/", "self", ".", "samplin...
This method filters a data frame signal as suggested in :cite:`Kassavetis2015`. First step is to high \ pass filter the data frame using a \ `Butterworth <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html>`_ \ digital and analog filter. Then this method filters the data frame along one-dimension using a \ `digital filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html>`_. :param data_frame: the input data frame :type data_frame: pandas.DataFrame :param ts: time series name of data frame to filter :type ts: str :return data_frame: adds a column named 'filtered_signal' to the data frame :rtype data_frame: pandas.DataFrame
[ "This", "method", "filters", "a", "data", "frame", "signal", "as", "suggested", "in", ":", "cite", ":", "Kassavetis2015", ".", "First", "step", "is", "to", "high", "\\", "pass", "filter", "the", "data", "frame", "using", "a", "\\", "Butterworth", "<https",...
python
train
rootpy/rootpy
rootpy/plotting/utils.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/utils.py#L151-L194
def _limits_helper(x1, x2, a, b, snap=False): """ Given x1, x2, a, b, where: x1 - x0 x3 - x2 a = ------- , b = ------- x3 - x0 x3 - x0 determine the points x0 and x3: x0 x1 x2 x3 |----------|-----------------|--------| """ if x2 < x1: raise ValueError("x2 < x1") if a + b >= 1: raise ValueError("a + b >= 1") if a < 0: raise ValueError("a < 0") if b < 0: raise ValueError("b < 0") if snap: if x1 >= 0: x1 = 0 a = 0 elif x2 <= 0: x2 = 0 b = 0 if x1 == x2 == 0: # garbage in garbage out return 0., 1. elif x1 == x2: # garbage in garbage out return x1 - 1., x1 + 1. if a == 0 and b == 0: return x1, x2 elif a == 0: return x1, (x2 - b * x1) / (1 - b) elif b == 0: return (x1 - a * x2) / (1 - a), x2 x0 = ((b / a) * x1 + x2 - (x2 - x1) / (1 - a - b)) / (1 + b / a) x3 = (x2 - x1) / (1 - a - b) + x0 return x0, x3
[ "def", "_limits_helper", "(", "x1", ",", "x2", ",", "a", ",", "b", ",", "snap", "=", "False", ")", ":", "if", "x2", "<", "x1", ":", "raise", "ValueError", "(", "\"x2 < x1\"", ")", "if", "a", "+", "b", ">=", "1", ":", "raise", "ValueError", "(", ...
Given x1, x2, a, b, where: x1 - x0 x3 - x2 a = ------- , b = ------- x3 - x0 x3 - x0 determine the points x0 and x3: x0 x1 x2 x3 |----------|-----------------|--------|
[ "Given", "x1", "x2", "a", "b", "where", ":" ]
python
train
3ll3d00d/vibe
backend/src/analyser/common/signal.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/common/signal.py#L155-L177
def spectrogram(self, ref=None, segmentLengthMultiplier=1, window='hann'): """ analyses the source to generate a spectrogram :param ref: the reference value for dB purposes. :param segmentLengthMultiplier: allow for increased resolution. :return: t : ndarray Array of time slices. f : ndarray Array of sample frequencies. Pxx : ndarray linear spectrum values. """ t, f, Sxx = signal.spectrogram(self.samples, self.fs, window=window, nperseg=self.getSegmentLength() * segmentLengthMultiplier, detrend=False, scaling='spectrum') Sxx = np.sqrt(Sxx) if ref is not None: Sxx = librosa.amplitude_to_db(Sxx, ref) return t, f, Sxx
[ "def", "spectrogram", "(", "self", ",", "ref", "=", "None", ",", "segmentLengthMultiplier", "=", "1", ",", "window", "=", "'hann'", ")", ":", "t", ",", "f", ",", "Sxx", "=", "signal", ".", "spectrogram", "(", "self", ".", "samples", ",", "self", ".",...
analyses the source to generate a spectrogram :param ref: the reference value for dB purposes. :param segmentLengthMultiplier: allow for increased resolution. :return: t : ndarray Array of time slices. f : ndarray Array of sample frequencies. Pxx : ndarray linear spectrum values.
[ "analyses", "the", "source", "to", "generate", "a", "spectrogram", ":", "param", "ref", ":", "the", "reference", "value", "for", "dB", "purposes", ".", ":", "param", "segmentLengthMultiplier", ":", "allow", "for", "increased", "resolution", ".", ":", "return",...
python
train
HiPERCAM/hcam_widgets
hcam_widgets/hcam.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/hcam.py#L1804-L1812
def act(self): """ Carries out the action associated with the Unfreeze button """ g = get_root(self).globals g.ipars.unfreeze() g.rpars.unfreeze() g.observe.load.enable() self.disable()
[ "def", "act", "(", "self", ")", ":", "g", "=", "get_root", "(", "self", ")", ".", "globals", "g", ".", "ipars", ".", "unfreeze", "(", ")", "g", ".", "rpars", ".", "unfreeze", "(", ")", "g", ".", "observe", ".", "load", ".", "enable", "(", ")", ...
Carries out the action associated with the Unfreeze button
[ "Carries", "out", "the", "action", "associated", "with", "the", "Unfreeze", "button" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__f16.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__f16.py#L21-L45
def f16(op): """ Returns a floating point operand converted to 32 bits unsigned int. Negative numbers are returned in 2 complement. The result is returned in a tuple (DE, HL) => High16 (Int part), Low16 (Decimal part) """ op = float(op) negative = op < 0 if negative: op = -op DE = int(op) HL = int((op - DE) * 2**16) & 0xFFFF DE &= 0xFFFF if negative: # Do C2 DE ^= 0xFFFF HL ^= 0xFFFF DEHL = ((DE << 16) | HL) + 1 HL = DEHL & 0xFFFF DE = (DEHL >> 16) & 0xFFFF return (DE, HL)
[ "def", "f16", "(", "op", ")", ":", "op", "=", "float", "(", "op", ")", "negative", "=", "op", "<", "0", "if", "negative", ":", "op", "=", "-", "op", "DE", "=", "int", "(", "op", ")", "HL", "=", "int", "(", "(", "op", "-", "DE", ")", "*", ...
Returns a floating point operand converted to 32 bits unsigned int. Negative numbers are returned in 2 complement. The result is returned in a tuple (DE, HL) => High16 (Int part), Low16 (Decimal part)
[ "Returns", "a", "floating", "point", "operand", "converted", "to", "32", "bits", "unsigned", "int", ".", "Negative", "numbers", "are", "returned", "in", "2", "complement", "." ]
python
train
LionelAuroux/pyrser
pyrser/parsing/base.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/base.py#L304-L312
def read_until_eof(self) -> bool: """Consume all the stream. Same as EOF in BNF.""" if self.read_eof(): return True # TODO: read ALL self._stream.save_context() while not self.read_eof(): self._stream.incpos() return self._stream.validate_context()
[ "def", "read_until_eof", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "read_eof", "(", ")", ":", "return", "True", "# TODO: read ALL", "self", ".", "_stream", ".", "save_context", "(", ")", "while", "not", "self", ".", "read_eof", "(", ")", ...
Consume all the stream. Same as EOF in BNF.
[ "Consume", "all", "the", "stream", ".", "Same", "as", "EOF", "in", "BNF", "." ]
python
test
mgaitan/waliki
waliki/acl.py
https://github.com/mgaitan/waliki/blob/5baaf6f043275920a1174ff233726f7ff4bfb5cf/waliki/acl.py#L19-L45
def check_perms(perms, user, slug, raise_exception=False): """a helper user to check if a user has the permissions for a given slug""" if isinstance(perms, string_types): perms = {perms} else: perms = set(perms) allowed_users = ACLRule.get_users_for(perms, slug) if allowed_users: return user in allowed_users if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)): return True if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)): return True # First check if the user has the permission (even anon users) if user.has_perms(['waliki.%s' % p for p in perms]): return True # In case the 403 handler should be called raise the exception if raise_exception: raise PermissionDenied # As the last resort, show the login form return False
[ "def", "check_perms", "(", "perms", ",", "user", ",", "slug", ",", "raise_exception", "=", "False", ")", ":", "if", "isinstance", "(", "perms", ",", "string_types", ")", ":", "perms", "=", "{", "perms", "}", "else", ":", "perms", "=", "set", "(", "pe...
a helper user to check if a user has the permissions for a given slug
[ "a", "helper", "user", "to", "check", "if", "a", "user", "has", "the", "permissions", "for", "a", "given", "slug" ]
python
train
secdev/scapy
scapy/utils6.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils6.py#L462-L482
def in6_ptoc(addr): """ Converts an IPv6 address in printable representation to RFC 1924 Compact Representation ;-) Returns None on error. """ try: d = struct.unpack("!IIII", inet_pton(socket.AF_INET6, addr)) except Exception: return None res = 0 m = [2**96, 2**64, 2**32, 1] for i in range(4): res += d[i] * m[i] rem = res res = [] while rem: res.append(_rfc1924map[rem % 85]) rem = rem // 85 res.reverse() return "".join(res)
[ "def", "in6_ptoc", "(", "addr", ")", ":", "try", ":", "d", "=", "struct", ".", "unpack", "(", "\"!IIII\"", ",", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "addr", ")", ")", "except", "Exception", ":", "return", "None", "res", "=", "0", "m", ...
Converts an IPv6 address in printable representation to RFC 1924 Compact Representation ;-) Returns None on error.
[ "Converts", "an", "IPv6", "address", "in", "printable", "representation", "to", "RFC", "1924", "Compact", "Representation", ";", "-", ")", "Returns", "None", "on", "error", "." ]
python
train
UCSBarchlab/PyRTL
pyrtl/rtllib/adders.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/adders.py#L84-L94
def carrysave_adder(a, b, c, final_adder=ripple_add): """ Adds three wirevectors up in an efficient manner :param WireVector a, b, c : the three wires to add up :param function final_adder : The adder to use to do the final addition :return: a wirevector with length 2 longer than the largest input """ a, b, c = libutils.match_bitwidth(a, b, c) partial_sum = a ^ b ^ c shift_carry = (a | b) & (a | c) & (b | c) return pyrtl.concat(final_adder(partial_sum[1:], shift_carry), partial_sum[0])
[ "def", "carrysave_adder", "(", "a", ",", "b", ",", "c", ",", "final_adder", "=", "ripple_add", ")", ":", "a", ",", "b", ",", "c", "=", "libutils", ".", "match_bitwidth", "(", "a", ",", "b", ",", "c", ")", "partial_sum", "=", "a", "^", "b", "^", ...
Adds three wirevectors up in an efficient manner :param WireVector a, b, c : the three wires to add up :param function final_adder : The adder to use to do the final addition :return: a wirevector with length 2 longer than the largest input
[ "Adds", "three", "wirevectors", "up", "in", "an", "efficient", "manner", ":", "param", "WireVector", "a", "b", "c", ":", "the", "three", "wires", "to", "add", "up", ":", "param", "function", "final_adder", ":", "The", "adder", "to", "use", "to", "do", ...
python
train
hid-io/layouts-python
layouts/__init__.py
https://github.com/hid-io/layouts-python/blob/b347578bfb4198fd812ecd7a2d9c7e551a856280/layouts/__init__.py#L217-L246
def dict_merge(self, merge_to, merge_in): ''' Recursively merges two dicts Overwrites any non-dictionary items merge_to <- merge_in Modifies merge_to dictionary @param merge_to: Base dictionary to merge into @param merge_in: Dictionary that may overwrite elements in merge_in ''' for key, value in merge_in.items(): # Just add, if the key doesn't exist yet # Or if set to None/Null if key not in merge_to.keys() or merge_to[key] is None: merge_to[key] = copy.copy(value) continue # Overwrite case, check for types # Make sure types are matching if not isinstance(value, type(merge_to[key])): raise MergeException('Types do not match! {}: {} != {}'.format(key, type(value), type(merge_to[key]))) # Check if this is a dictionary item, in which case recursively merge if isinstance(value, dict): self.dict_merge(merge_to[key], value) continue # Otherwise just overwrite merge_to[key] = copy.copy(value)
[ "def", "dict_merge", "(", "self", ",", "merge_to", ",", "merge_in", ")", ":", "for", "key", ",", "value", "in", "merge_in", ".", "items", "(", ")", ":", "# Just add, if the key doesn't exist yet", "# Or if set to None/Null", "if", "key", "not", "in", "merge_to",...
Recursively merges two dicts Overwrites any non-dictionary items merge_to <- merge_in Modifies merge_to dictionary @param merge_to: Base dictionary to merge into @param merge_in: Dictionary that may overwrite elements in merge_in
[ "Recursively", "merges", "two", "dicts" ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L450-L466
def err(r): """ Input: { return - return code error - error text } Output: Nothing; quits program """ import sys rc=r['return'] re=r['error'] out('Error: '+re) sys.exit(rc)
[ "def", "err", "(", "r", ")", ":", "import", "sys", "rc", "=", "r", "[", "'return'", "]", "re", "=", "r", "[", "'error'", "]", "out", "(", "'Error: '", "+", "re", ")", "sys", ".", "exit", "(", "rc", ")" ]
Input: { return - return code error - error text } Output: Nothing; quits program
[ "Input", ":", "{", "return", "-", "return", "code", "error", "-", "error", "text", "}" ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/auth/auth_chain.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/auth/auth_chain.py#L48-L79
def encrypt_report(self, device_id, root, data, **kwargs): """Encrypt a buffer of report data on behalf of a device. Args: device_id (int): The id of the device that we should encrypt for root (int): The root key type that should be used to generate the report data (bytearray): The data that we should encrypt. **kwargs: There are additional specific keyword args that are required depending on the root key used. Typically, you must specify - report_id (int): The report id - sent_timestamp (int): The sent timestamp of the report These two bits of information are used to construct the per report signing and encryption key from the specific root key type. Returns: dict: The encrypted data and any associated metadata about the data. The data itself must always be a bytearray stored under the 'data' key, however additional keys may be present depending on the encryption method used. Raises: NotFoundError: If the auth provider is not able to encrypt the data. """ for _priority, provider in self.providers: try: return provider.encrypt_report(device_id, root, data, **kwargs) except NotFoundError: pass raise NotFoundError("encrypt_report method is not implemented in any sub_providers")
[ "def", "encrypt_report", "(", "self", ",", "device_id", ",", "root", ",", "data", ",", "*", "*", "kwargs", ")", ":", "for", "_priority", ",", "provider", "in", "self", ".", "providers", ":", "try", ":", "return", "provider", ".", "encrypt_report", "(", ...
Encrypt a buffer of report data on behalf of a device. Args: device_id (int): The id of the device that we should encrypt for root (int): The root key type that should be used to generate the report data (bytearray): The data that we should encrypt. **kwargs: There are additional specific keyword args that are required depending on the root key used. Typically, you must specify - report_id (int): The report id - sent_timestamp (int): The sent timestamp of the report These two bits of information are used to construct the per report signing and encryption key from the specific root key type. Returns: dict: The encrypted data and any associated metadata about the data. The data itself must always be a bytearray stored under the 'data' key, however additional keys may be present depending on the encryption method used. Raises: NotFoundError: If the auth provider is not able to encrypt the data.
[ "Encrypt", "a", "buffer", "of", "report", "data", "on", "behalf", "of", "a", "device", "." ]
python
train
asphalt-framework/asphalt-templating
asphalt/templating/api.py
https://github.com/asphalt-framework/asphalt-templating/blob/e5f836290820aa295b048b17b96d3896d5f1eeac/asphalt/templating/api.py#L53-L65
def render(self, template: str, **vars) -> str: """ Render the named template. The current context will be available to the template as the ``ctx`` variable. :param template: name of the template file :param vars: extra template variables :return: the rendered results """ vars.setdefault('ctx', self._ctx) return self._renderer.render(template, **vars)
[ "def", "render", "(", "self", ",", "template", ":", "str", ",", "*", "*", "vars", ")", "->", "str", ":", "vars", ".", "setdefault", "(", "'ctx'", ",", "self", ".", "_ctx", ")", "return", "self", ".", "_renderer", ".", "render", "(", "template", ","...
Render the named template. The current context will be available to the template as the ``ctx`` variable. :param template: name of the template file :param vars: extra template variables :return: the rendered results
[ "Render", "the", "named", "template", "." ]
python
train
python-xlib/python-xlib
Xlib/rdb.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/rdb.py#L332-L341
def output(self): """output() Return the resource database in text representation. """ self.lock.acquire() text = output_db('', self.db) self.lock.release() return text
[ "def", "output", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "text", "=", "output_db", "(", "''", ",", "self", ".", "db", ")", "self", ".", "lock", ".", "release", "(", ")", "return", "text" ]
output() Return the resource database in text representation.
[ "output", "()" ]
python
train
google/grumpy
third_party/pypy/datetime.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/datetime.py#L1670-L1679
def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(self) offset = _check_utc_offset("utcoffset", offset) if offset is not None: offset = timedelta._create(0, offset * 60, 0, True) return offset
[ "def", "utcoffset", "(", "self", ")", ":", "if", "self", ".", "_tzinfo", "is", "None", ":", "return", "None", "offset", "=", "self", ".", "_tzinfo", ".", "utcoffset", "(", "self", ")", "offset", "=", "_check_utc_offset", "(", "\"utcoffset\"", ",", "offse...
Return the timezone offset in minutes east of UTC (negative west of UTC).
[ "Return", "the", "timezone", "offset", "in", "minutes", "east", "of", "UTC", "(", "negative", "west", "of", "UTC", ")", "." ]
python
valid
Tanganelli/CoAPthon3
coapthon/messages/message.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/message.py#L505-L518
def observe(self): """ Check if the request is an observing request. :return: 0, if the request is an observing request """ for option in self.options: if option.number == defines.OptionRegistry.OBSERVE.number: # if option.value is None: # return 0 if option.value is None: return 0 return option.value return None
[ "def", "observe", "(", "self", ")", ":", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "number", "==", "defines", ".", "OptionRegistry", ".", "OBSERVE", ".", "number", ":", "# if option.value is None:", "# return 0", "if", "opti...
Check if the request is an observing request. :return: 0, if the request is an observing request
[ "Check", "if", "the", "request", "is", "an", "observing", "request", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L1353-L1362
def insertPeer(self, peer): """ Accepts a peer datamodel object and adds it to the registry. """ try: models.Peer.create( url=peer.getUrl(), attributes=json.dumps(peer.getAttributes())) except Exception as e: raise exceptions.RepoManagerException(e)
[ "def", "insertPeer", "(", "self", ",", "peer", ")", ":", "try", ":", "models", ".", "Peer", ".", "create", "(", "url", "=", "peer", ".", "getUrl", "(", ")", ",", "attributes", "=", "json", ".", "dumps", "(", "peer", ".", "getAttributes", "(", ")", ...
Accepts a peer datamodel object and adds it to the registry.
[ "Accepts", "a", "peer", "datamodel", "object", "and", "adds", "it", "to", "the", "registry", "." ]
python
train
noahbenson/pimms
pimms/util.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L488-L496
def curry(f, *args0, **kwargs0): ''' curry(f, ...) yields a function equivalent to f with all following arguments and keyword arguments passed. This is much like the partial function, but yields a function instead of a partial object and thus is suitable for use with pimms lazy maps. ''' def curried_f(*args, **kwargs): return f(*(args0 + args), **merge(kwargs0, kwargs)) return curried_f
[ "def", "curry", "(", "f", ",", "*", "args0", ",", "*", "*", "kwargs0", ")", ":", "def", "curried_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "f", "(", "*", "(", "args0", "+", "args", ")", ",", "*", "*", "merge", "(", ...
curry(f, ...) yields a function equivalent to f with all following arguments and keyword arguments passed. This is much like the partial function, but yields a function instead of a partial object and thus is suitable for use with pimms lazy maps.
[ "curry", "(", "f", "...", ")", "yields", "a", "function", "equivalent", "to", "f", "with", "all", "following", "arguments", "and", "keyword", "arguments", "passed", ".", "This", "is", "much", "like", "the", "partial", "function", "but", "yields", "a", "fun...
python
train
bunq/sdk_python
bunq/sdk/model/generated/object_.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/object_.py#L5429-L5443
def is_all_field_none(self): """ :rtype: bool """ if self._country is not None: return False if self._tax_number is not None: return False if self._status is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_country", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_tax_number", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_status", "is", "not", "No...
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
bokeh/bokeh
bokeh/models/sources.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/models/sources.py#L325-L343
def add(self, data, name=None): ''' Appends a new column of data to the data source. Args: data (seq) : new data to add name (str, optional) : column name to use. If not supplied, generate a name of the form "Series ####" Returns: str: the column name used ''' if name is None: n = len(self.data) while "Series %d"%n in self.data: n += 1 name = "Series %d"%n self.data[name] = data return name
[ "def", "add", "(", "self", ",", "data", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "n", "=", "len", "(", "self", ".", "data", ")", "while", "\"Series %d\"", "%", "n", "in", "self", ".", "data", ":", "n", "+=", "1", "...
Appends a new column of data to the data source. Args: data (seq) : new data to add name (str, optional) : column name to use. If not supplied, generate a name of the form "Series ####" Returns: str: the column name used
[ "Appends", "a", "new", "column", "of", "data", "to", "the", "data", "source", "." ]
python
train
twilio/twilio-python
twilio/rest/preview/deployed_devices/fleet/device.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/deployed_devices/fleet/device.py#L494-L512
def update(self, friendly_name=values.unset, identity=values.unset, deployment_sid=values.unset, enabled=values.unset): """ Update the DeviceInstance :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Updated DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance """ return self._proxy.update( friendly_name=friendly_name, identity=identity, deployment_sid=deployment_sid, enabled=enabled, )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "identity", "=", "values", ".", "unset", ",", "deployment_sid", "=", "values", ".", "unset", ",", "enabled", "=", "values", ".", "unset", ")", ":", "return", "self", ...
Update the DeviceInstance :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Updated DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
[ "Update", "the", "DeviceInstance" ]
python
train
python-useful-helpers/advanced-descriptors
advanced_descriptors/log_on_access.py
https://github.com/python-useful-helpers/advanced-descriptors/blob/17ee4a35b3bfcb4adf4ed2f41e75c4c6b71cb003/advanced_descriptors/log_on_access.py#L300-L305
def logger(self, logger: typing.Union[logging.Logger, str, None]) -> None: """Logger instance to use as override.""" if logger is None or isinstance(logger, logging.Logger): self.__logger = logger else: self.__logger = logging.getLogger(logger)
[ "def", "logger", "(", "self", ",", "logger", ":", "typing", ".", "Union", "[", "logging", ".", "Logger", ",", "str", ",", "None", "]", ")", "->", "None", ":", "if", "logger", "is", "None", "or", "isinstance", "(", "logger", ",", "logging", ".", "Lo...
Logger instance to use as override.
[ "Logger", "instance", "to", "use", "as", "override", "." ]
python
test
saltstack/salt
salt/modules/boto_asg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_asg.py#L814-L858
def get_instances(name, lifecycle_state="InService", health_status="Healthy", attribute="private_ip_address", attributes=None, region=None, key=None, keyid=None, profile=None): ''' return attribute of all instances in the named autoscale group. CLI example:: salt-call boto_asg.get_instances my_autoscale_group_name ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ec2_conn = _get_ec2_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while True: try: asgs = conn.get_all_groups(names=[name]) break except boto.exception.BotoServerError as e: if retries and e.code == 'Throttling': log.debug('Throttled by AWS API, retrying in 5 seconds...') time.sleep(5) retries -= 1 continue log.error(e) return False if len(asgs) != 1: log.debug("name '%s' returns multiple ASGs: %s", name, [asg.name for asg in asgs]) return False asg = asgs[0] instance_ids = [] # match lifecycle_state and health_status for i in asg.instances: if lifecycle_state is not None and i.lifecycle_state != lifecycle_state: continue if health_status is not None and i.health_status != health_status: continue instance_ids.append(i.instance_id) # get full instance info, so that we can return the attribute instances = ec2_conn.get_only_instances(instance_ids=instance_ids) if attributes: return [[_convert_attribute(instance, attr) for attr in attributes] for instance in instances] else: # properly handle case when not all instances have the requested attribute return [_convert_attribute(instance, attribute) for instance in instances if getattr(instance, attribute)]
[ "def", "get_instances", "(", "name", ",", "lifecycle_state", "=", "\"InService\"", ",", "health_status", "=", "\"Healthy\"", ",", "attribute", "=", "\"private_ip_address\"", ",", "attributes", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", "...
return attribute of all instances in the named autoscale group. CLI example:: salt-call boto_asg.get_instances my_autoscale_group_name
[ "return", "attribute", "of", "all", "instances", "in", "the", "named", "autoscale", "group", "." ]
python
train
pbrisk/timewave
timewave/consumers.py
https://github.com/pbrisk/timewave/blob/cf641391d1607a424042724c8b990d43ee270ef6/timewave/consumers.py#L182-L192
def get(self, queue_get): """ get to given consumer states. This function is used for merging of results of parallelized MC. The first state is used for merging in place. The states must be disjoint. :param object queue_get: second consumer state """ for (c, cs) in izip(self.consumers, queue_get): c.get(cs) self.result = [c.result for c in self.consumers]
[ "def", "get", "(", "self", ",", "queue_get", ")", ":", "for", "(", "c", ",", "cs", ")", "in", "izip", "(", "self", ".", "consumers", ",", "queue_get", ")", ":", "c", ".", "get", "(", "cs", ")", "self", ".", "result", "=", "[", "c", ".", "resu...
get to given consumer states. This function is used for merging of results of parallelized MC. The first state is used for merging in place. The states must be disjoint. :param object queue_get: second consumer state
[ "get", "to", "given", "consumer", "states", ".", "This", "function", "is", "used", "for", "merging", "of", "results", "of", "parallelized", "MC", ".", "The", "first", "state", "is", "used", "for", "merging", "in", "place", ".", "The", "states", "must", "...
python
train
hardbyte/python-can
can/io/sqlite.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/io/sqlite.py#L79-L85
def read_all(self): """Fetches all messages in the database. :rtype: Generator[can.Message] """ result = self._cursor.execute("SELECT * FROM {}".format(self.table_name)).fetchall() return (SqliteReader._assemble_message(frame) for frame in result)
[ "def", "read_all", "(", "self", ")", ":", "result", "=", "self", ".", "_cursor", ".", "execute", "(", "\"SELECT * FROM {}\"", ".", "format", "(", "self", ".", "table_name", ")", ")", ".", "fetchall", "(", ")", "return", "(", "SqliteReader", ".", "_assemb...
Fetches all messages in the database. :rtype: Generator[can.Message]
[ "Fetches", "all", "messages", "in", "the", "database", "." ]
python
train
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L770-L786
def execute_command(self, *args, **options): "Execute a command and return a parsed response" pool = self.connection_pool command_name = args[0] connection = pool.get_connection(command_name, **options) try: connection.send_command(*args) return self.parse_response(connection, command_name, **options) except (ConnectionError, TimeoutError) as e: connection.disconnect() if not (connection.retry_on_timeout and isinstance(e, TimeoutError)): raise connection.send_command(*args) return self.parse_response(connection, command_name, **options) finally: pool.release(connection)
[ "def", "execute_command", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "pool", "=", "self", ".", "connection_pool", "command_name", "=", "args", "[", "0", "]", "connection", "=", "pool", ".", "get_connection", "(", "command_name", "...
Execute a command and return a parsed response
[ "Execute", "a", "command", "and", "return", "a", "parsed", "response" ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/dataforms.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/dataforms.py#L98-L118
def _new_from_xml(cls, xmlnode): """Create a new `Option` object from an XML element. :Parameters: - `xmlnode`: the XML element. :Types: - `xmlnode`: `libxml2.xmlNode` :return: the object created. :returntype: `Option` """ label = from_utf8(xmlnode.prop("label")) child = xmlnode.children value = None for child in xml_element_ns_iter(xmlnode.children, DATAFORM_NS): if child.name == "value": value = from_utf8(child.getContent()) break if value is None: raise BadRequestProtocolError("No value in <option/> element") return cls(value, label)
[ "def", "_new_from_xml", "(", "cls", ",", "xmlnode", ")", ":", "label", "=", "from_utf8", "(", "xmlnode", ".", "prop", "(", "\"label\"", ")", ")", "child", "=", "xmlnode", ".", "children", "value", "=", "None", "for", "child", "in", "xml_element_ns_iter", ...
Create a new `Option` object from an XML element. :Parameters: - `xmlnode`: the XML element. :Types: - `xmlnode`: `libxml2.xmlNode` :return: the object created. :returntype: `Option`
[ "Create", "a", "new", "Option", "object", "from", "an", "XML", "element", "." ]
python
valid
ff0000/scarlet
scarlet/cms/base_views.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/base_views.py#L319-L349
def customize_form_widgets(self, form_class, fields=None): """ Hook for customizing widgets for a form_class. This is needed for forms that specify their own fields causing the default db_field callback to not be run for that field. Default implementation checks for APIModelChoiceWidgets or APIManyChoiceWidgets and runs the update_links method on them. Passing the admin_site and request being used. Returns a new class that contains the field with the initialized custom widget. """ attrs = {} if fields: fields = set(fields) for k, f in form_class.base_fields.items(): if fields and not k in fields: continue if isinstance(f.widget, widgets.APIModelChoiceWidget) \ or isinstance(f.widget, widgets.APIManyChoiceWidget): field = copy.deepcopy(f) field.widget.update_links(self.request, self.bundle.admin_site) attrs[k] = field if attrs: form_class = type(form_class.__name__, (form_class,), attrs) return form_class
[ "def", "customize_form_widgets", "(", "self", ",", "form_class", ",", "fields", "=", "None", ")", ":", "attrs", "=", "{", "}", "if", "fields", ":", "fields", "=", "set", "(", "fields", ")", "for", "k", ",", "f", "in", "form_class", ".", "base_fields", ...
Hook for customizing widgets for a form_class. This is needed for forms that specify their own fields causing the default db_field callback to not be run for that field. Default implementation checks for APIModelChoiceWidgets or APIManyChoiceWidgets and runs the update_links method on them. Passing the admin_site and request being used. Returns a new class that contains the field with the initialized custom widget.
[ "Hook", "for", "customizing", "widgets", "for", "a", "form_class", ".", "This", "is", "needed", "for", "forms", "that", "specify", "their", "own", "fields", "causing", "the", "default", "db_field", "callback", "to", "not", "be", "run", "for", "that", "field"...
python
train
rndmcnlly/ansunit
ansunit/__init__.py
https://github.com/rndmcnlly/ansunit/blob/3d45e22ab1ae131b6eda25d5ae2ead2c5cfee02a/ansunit/__init__.py#L97-L108
def flatten_spec(spec, prefix,joiner=" :: "): """Flatten a canonical specification with nesting into one without nesting. When building unique names, concatenate the given prefix to the local test name without the "Test " tag.""" if any(filter(operator.methodcaller("startswith","Test"),spec.keys())): flat_spec = {} for (k,v) in spec.items(): flat_spec.update(flatten_spec(v,prefix + joiner + k[5:])) return flat_spec else: return {"Test "+prefix: spec}
[ "def", "flatten_spec", "(", "spec", ",", "prefix", ",", "joiner", "=", "\" :: \"", ")", ":", "if", "any", "(", "filter", "(", "operator", ".", "methodcaller", "(", "\"startswith\"", ",", "\"Test\"", ")", ",", "spec", ".", "keys", "(", ")", ")", ")", ...
Flatten a canonical specification with nesting into one without nesting. When building unique names, concatenate the given prefix to the local test name without the "Test " tag.
[ "Flatten", "a", "canonical", "specification", "with", "nesting", "into", "one", "without", "nesting", ".", "When", "building", "unique", "names", "concatenate", "the", "given", "prefix", "to", "the", "local", "test", "name", "without", "the", "Test", "tag", "....
python
train
5j9/wikitextparser
wikitextparser/_wikitext.py
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L394-L403
def _insert_update(self, index: int, length: int) -> None: """Update self._type_to_spans according to the added length.""" ss, se = self._span for spans in self._type_to_spans.values(): for span in spans: if index < span[1] or span[1] == index == se: span[1] += length # index is before s, or at s but not on self_span if index < span[0] or span[0] == index != ss: span[0] += length
[ "def", "_insert_update", "(", "self", ",", "index", ":", "int", ",", "length", ":", "int", ")", "->", "None", ":", "ss", ",", "se", "=", "self", ".", "_span", "for", "spans", "in", "self", ".", "_type_to_spans", ".", "values", "(", ")", ":", "for",...
Update self._type_to_spans according to the added length.
[ "Update", "self", ".", "_type_to_spans", "according", "to", "the", "added", "length", "." ]
python
test
ministryofjustice/money-to-prisoners-common
mtp_common/build_tasks/tasks.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/tasks.py#L220-L238
def bundle_stylesheets(context: Context): """ Compiles stylesheets """ args = [ '--output', context.app.scss_build_path, '--output-style', 'compressed', ] if context.verbosity == 0: args.append('--quiet') if not context.use_colour: args.append('--no-color') for path in context.app.scss_include_paths: args.append('--include-path') args.append(path) return_code = 0 for source_file in context.app.scss_source_file_set.paths_for_shell(separator=None): return_code = context.node_tool('node-sass', *args + [source_file]) or return_code return return_code
[ "def", "bundle_stylesheets", "(", "context", ":", "Context", ")", ":", "args", "=", "[", "'--output'", ",", "context", ".", "app", ".", "scss_build_path", ",", "'--output-style'", ",", "'compressed'", ",", "]", "if", "context", ".", "verbosity", "==", "0", ...
Compiles stylesheets
[ "Compiles", "stylesheets" ]
python
train
Hackerfleet/hfos
hfos/schemata/defaultform.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/schemata/defaultform.py#L117-L127
def fieldset(title, items, options=None): """A field set with a title and sub items""" result = { 'title': title, 'type': 'fieldset', 'items': items } if options is not None: result.update(options) return result
[ "def", "fieldset", "(", "title", ",", "items", ",", "options", "=", "None", ")", ":", "result", "=", "{", "'title'", ":", "title", ",", "'type'", ":", "'fieldset'", ",", "'items'", ":", "items", "}", "if", "options", "is", "not", "None", ":", "result...
A field set with a title and sub items
[ "A", "field", "set", "with", "a", "title", "and", "sub", "items" ]
python
train
roanuz/py-cricket
src/pycricket.py
https://github.com/roanuz/py-cricket/blob/fa47fe2e92915fc58db38898213e974742af55d4/src/pycricket.py#L404-L416
def get_fantasy_points(self, match_key): """ Calling Fantasy Points API Arg: match_key: key of the match Return: json data """ fantasy_points_url = self.api_path_v3 + "fantasy-match-points/" + match_key + "/" response = self.get_response(fantasy_points_url) return response
[ "def", "get_fantasy_points", "(", "self", ",", "match_key", ")", ":", "fantasy_points_url", "=", "self", ".", "api_path_v3", "+", "\"fantasy-match-points/\"", "+", "match_key", "+", "\"/\"", "response", "=", "self", ".", "get_response", "(", "fantasy_points_url", ...
Calling Fantasy Points API Arg: match_key: key of the match Return: json data
[ "Calling", "Fantasy", "Points", "API" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/yu_2013.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/yu_2013.py#L296-L356
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # Check that the requested standard deviation type is available assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) # # Set parameters magn = rup.mag epi = dists.repi theta = dists.azimuth # # Convert Mw into Ms if magn < 6.58: mag = (magn - 0.59) / 0.86 else: mag = (magn + 2.42) / 1.28 # # Set coefficients coeff = self.COEFFS[imt] a1ca, a1cb, a1cc, a1cd, a1ce, a2ca, a2cb, a2cc, a2cd, a2ce = \ gc(coeff, mag) # # Get correction coefficients. Here for each site we find the # the geometry of the ellipses ras = [] for epi, theta in zip(dists.repi, dists.azimuth): res = get_ras(epi, theta, mag, coeff) ras.append(res) ras = np.array(ras) rbs = rbf(ras, coeff, mag) # # Compute values of ground motion for the two cases. The value of # 225 is hardcoded under the assumption that the hypocentral depth # corresponds to 15 km (i.e. 15**2) mean1 = (a1ca + a1cb * mag + a1cc * np.log((ras**2+225)**0.5 + a1cd * np.exp(a1ce * mag))) mean2 = (a2ca + a2cb * mag + a2cc * np.log((rbs**2+225)**0.5 + a2cd * np.exp(a2ce * mag))) # # Get distances x = (mean1 * np.sin(np.radians(dists.azimuth)))**2 y = (mean2 * np.cos(np.radians(dists.azimuth)))**2 mean = mean1 * mean2 / np.sqrt(x+y) if imt.name == "PGA": mean = np.exp(mean)/g/100 elif imt.name == "PGV": mean = np.exp(mean) else: raise ValueError('Unsupported IMT') # # Get the standard deviation stddevs = self._compute_std(coeff, stddev_types, len(dists.repi)) # # Return results return np.log(mean), stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# Check that the requested standard deviation type is available", "assert", "all", "(", "stddev_type", "in", "self", ".", "DEFINED_FOR_STAND...
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
Gandi/gandi.cli
gandi/cli/commands/vm.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/vm.py#L32-L52
def list(gandi, state, id, limit, datacenter): """List virtual machines.""" options = { 'items_per_page': limit, } if state: options['state'] = state if datacenter: options['datacenter_id'] = gandi.datacenter.usable_id(datacenter) output_keys = ['hostname', 'state'] if id: output_keys.append('id') result = gandi.iaas.list(options) for num, vm in enumerate(result): if num: gandi.separator_line() output_vm(gandi, vm, [], output_keys) return result
[ "def", "list", "(", "gandi", ",", "state", ",", "id", ",", "limit", ",", "datacenter", ")", ":", "options", "=", "{", "'items_per_page'", ":", "limit", ",", "}", "if", "state", ":", "options", "[", "'state'", "]", "=", "state", "if", "datacenter", ":...
List virtual machines.
[ "List", "virtual", "machines", "." ]
python
train
saltstack/salt
salt/modules/cron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cron.py#L248-L267
def _write_cron_lines(user, lines): ''' Takes a list of lines to be committed to a user's crontab and writes it ''' lines = [salt.utils.stringutils.to_str(_l) for _l in lines] path = salt.utils.files.mkstemp() if _check_instance_uid_match(user) or __grains__.get('os_family') in ('Solaris', 'AIX'): # In some cases crontab command should be executed as user rather than root with salt.utils.files.fpopen(path, 'w+', uid=__salt__['file.user_to_uid'](user), mode=0o600) as fp_: fp_.writelines(lines) ret = __salt__['cmd.run_all'](_get_cron_cmdstr(path), runas=user, python_shell=False) else: with salt.utils.files.fpopen(path, 'w+', mode=0o600) as fp_: fp_.writelines(lines) ret = __salt__['cmd.run_all'](_get_cron_cmdstr(path, user), python_shell=False) os.remove(path) return ret
[ "def", "_write_cron_lines", "(", "user", ",", "lines", ")", ":", "lines", "=", "[", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "_l", ")", "for", "_l", "in", "lines", "]", "path", "=", "salt", ".", "utils", ".", "files", ".", "mks...
Takes a list of lines to be committed to a user's crontab and writes it
[ "Takes", "a", "list", "of", "lines", "to", "be", "committed", "to", "a", "user", "s", "crontab", "and", "writes", "it" ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/nlp/tokenize.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L565-L571
def _is_saccharide_arrow(self, before, after): """Return True if the arrow is in a chemical name.""" if (before and after and before[-1].isdigit() and after[0].isdigit() and before.rstrip('0123456789').endswith('(') and after.lstrip('0123456789').startswith(')-')): return True else: return False
[ "def", "_is_saccharide_arrow", "(", "self", ",", "before", ",", "after", ")", ":", "if", "(", "before", "and", "after", "and", "before", "[", "-", "1", "]", ".", "isdigit", "(", ")", "and", "after", "[", "0", "]", ".", "isdigit", "(", ")", "and", ...
Return True if the arrow is in a chemical name.
[ "Return", "True", "if", "the", "arrow", "is", "in", "a", "chemical", "name", "." ]
python
train
smdabdoub/phylotoast
bin/diversity.py
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L106-L119
def write_diversity_metrics(data, sample_ids, fp=None): """ Given a dictionary of diversity calculations (keyed by method) write out the data to a file. """ if fp is None: fp = "./diversity_data.txt" with open(fp, "w") as outf: out = csv.writer(outf, delimiter="\t") out.writerow(["SampleID", "Group", "Calculation"]) for group, d in data.iteritems(): for sid, value in d.iteritems(): out.writerow([sid, group, value])
[ "def", "write_diversity_metrics", "(", "data", ",", "sample_ids", ",", "fp", "=", "None", ")", ":", "if", "fp", "is", "None", ":", "fp", "=", "\"./diversity_data.txt\"", "with", "open", "(", "fp", ",", "\"w\"", ")", "as", "outf", ":", "out", "=", "csv"...
Given a dictionary of diversity calculations (keyed by method) write out the data to a file.
[ "Given", "a", "dictionary", "of", "diversity", "calculations", "(", "keyed", "by", "method", ")", "write", "out", "the", "data", "to", "a", "file", "." ]
python
train
Erotemic/ubelt
ubelt/util_memoize.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_memoize.py#L58-L100
def _make_signature_key(args, kwargs): """ Transforms function args into a key that can be used by the cache CommandLine: xdoctest -m ubelt.util_memoize _make_signature_key Example: >>> args = (4, [1, 2]) >>> kwargs = {'a': 'b'} >>> key = _make_signature_key(args, kwargs) >>> print('key = {!r}'.format(key)) >>> # Some mutable types cannot be handled by ub.hash_data >>> import pytest >>> import six >>> if six.PY2: >>> import collections as abc >>> else: >>> from collections import abc >>> with pytest.raises(TypeError): >>> _make_signature_key((4, [1, 2], {1: 2, 'a': 'b'}), kwargs={}) >>> class Dummy(abc.MutableSet): >>> def __contains__(self, item): return None >>> def __iter__(self): return iter([]) >>> def __len__(self): return 0 >>> def add(self, item, loc): return None >>> def discard(self, item): return None >>> with pytest.raises(TypeError): >>> _make_signature_key((Dummy(),), kwargs={}) """ kwitems = kwargs.items() # TODO: we should check if Python is at least 3.7 and sort by kwargs # keys otherwise. Should we use hash_data for key generation if (sys.version_info.major, sys.version_info.minor) < (3, 7): # nocover # We can sort because they keys are gaurenteed to be strings kwitems = sorted(kwitems) kwitems = tuple(kwitems) try: key = _hashable(args), _hashable(kwitems) except TypeError: raise TypeError('Signature is not hashable: args={} kwargs{}'.format(args, kwargs)) return key
[ "def", "_make_signature_key", "(", "args", ",", "kwargs", ")", ":", "kwitems", "=", "kwargs", ".", "items", "(", ")", "# TODO: we should check if Python is at least 3.7 and sort by kwargs", "# keys otherwise. Should we use hash_data for key generation", "if", "(", "sys", ".",...
Transforms function args into a key that can be used by the cache CommandLine: xdoctest -m ubelt.util_memoize _make_signature_key Example: >>> args = (4, [1, 2]) >>> kwargs = {'a': 'b'} >>> key = _make_signature_key(args, kwargs) >>> print('key = {!r}'.format(key)) >>> # Some mutable types cannot be handled by ub.hash_data >>> import pytest >>> import six >>> if six.PY2: >>> import collections as abc >>> else: >>> from collections import abc >>> with pytest.raises(TypeError): >>> _make_signature_key((4, [1, 2], {1: 2, 'a': 'b'}), kwargs={}) >>> class Dummy(abc.MutableSet): >>> def __contains__(self, item): return None >>> def __iter__(self): return iter([]) >>> def __len__(self): return 0 >>> def add(self, item, loc): return None >>> def discard(self, item): return None >>> with pytest.raises(TypeError): >>> _make_signature_key((Dummy(),), kwargs={})
[ "Transforms", "function", "args", "into", "a", "key", "that", "can", "be", "used", "by", "the", "cache" ]
python
valid
political-memory/django-representatives
representatives/contrib/francedata/import_representatives.py
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/contrib/francedata/import_representatives.py#L218-L270
def add_mandates(self, representative, rep_json): ''' Create mandates from rep data based on variant configuration ''' # Mandate in country group for party constituency if rep_json.get('parti_ratt_financier'): constituency, _ = Constituency.objects.get_or_create( name=rep_json.get('parti_ratt_financier'), country=self.france) group, _ = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name) _create_mandate(representative, group, constituency, 'membre') # Configurable mandates for mdef in self.variant['mandates']: if mdef.get('chamber', False): chamber = self.chamber else: chamber = None if 'from' in mdef: elems = mdef['from'](rep_json) else: elems = [rep_json] for elem in elems: name = _get_mdef_item(mdef, 'name', elem, '') abbr = _get_mdef_item(mdef, 'abbr', elem, '') group, _ = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name) role = _get_mdef_item(mdef, 'role', elem, 'membre') start = _get_mdef_item(mdef, 'start', elem, None) if start is not None: start = _parse_date(start) end = _get_mdef_item(mdef, 'end', elem, None) if end is not None: end = _parse_date(end) _create_mandate(representative, group, self.ch_constituency, role, start, end) logger.debug( '%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end))
[ "def", "add_mandates", "(", "self", ",", "representative", ",", "rep_json", ")", ":", "# Mandate in country group for party constituency", "if", "rep_json", ".", "get", "(", "'parti_ratt_financier'", ")", ":", "constituency", ",", "_", "=", "Constituency", ".", "obj...
Create mandates from rep data based on variant configuration
[ "Create", "mandates", "from", "rep", "data", "based", "on", "variant", "configuration" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/traitlets.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/traitlets.py#L245-L267
def set_default_value(self, obj): """Set the default value on a per instance basis. This method is called by :meth:`instance_init` to create and validate the default value. The creation and validation of default values must be delayed until the parent :class:`HasTraits` class has been instantiated. """ # Check for a deferred initializer defined in the same class as the # trait declaration or above. mro = type(obj).mro() meth_name = '_%s_default' % self.name for cls in mro[:mro.index(self.this_class)+1]: if meth_name in cls.__dict__: break else: # We didn't find one. Do static initialization. dv = self.get_default_value() newdv = self._validate(obj, dv) obj._trait_values[self.name] = newdv return # Complete the dynamic initialization. obj._trait_dyn_inits[self.name] = cls.__dict__[meth_name]
[ "def", "set_default_value", "(", "self", ",", "obj", ")", ":", "# Check for a deferred initializer defined in the same class as the", "# trait declaration or above.", "mro", "=", "type", "(", "obj", ")", ".", "mro", "(", ")", "meth_name", "=", "'_%s_default'", "%", "s...
Set the default value on a per instance basis. This method is called by :meth:`instance_init` to create and validate the default value. The creation and validation of default values must be delayed until the parent :class:`HasTraits` class has been instantiated.
[ "Set", "the", "default", "value", "on", "a", "per", "instance", "basis", "." ]
python
test
lago-project/lago
lago/plugins/cli.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/plugins/cli.py#L243-L282
def cli_plugin_add_help(help): """ Decorator generator that adds the cli help to the cli plugin based on the decorated function Args: help (str): help string for the cli plugin Returns: function: Decorator that builds or extends the cliplugin for the decorated function, setting the given help Examples: >>> @cli_plugin_add_help('my help string') ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test.help my help string >>> @cli_plugin_add_help('my help string') ... @cli_plugin() ... def test(**kwargs): ... print 'test' >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test.help my help string """ def decorator(func): if not isinstance(func, CLIPluginFuncWrapper): func = CLIPluginFuncWrapper(do_run=func) func.set_help(help) return func return decorator
[ "def", "cli_plugin_add_help", "(", "help", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "not", "isinstance", "(", "func", ",", "CLIPluginFuncWrapper", ")", ":", "func", "=", "CLIPluginFuncWrapper", "(", "do_run", "=", "func", ")", "func", "."...
Decorator generator that adds the cli help to the cli plugin based on the decorated function Args: help (str): help string for the cli plugin Returns: function: Decorator that builds or extends the cliplugin for the decorated function, setting the given help Examples: >>> @cli_plugin_add_help('my help string') ... def test(**kwargs): ... print 'test' ... >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test.help my help string >>> @cli_plugin_add_help('my help string') ... @cli_plugin() ... def test(**kwargs): ... print 'test' >>> print test.__class__ <class 'cli.CLIPluginFuncWrapper'> >>> print test.help my help string
[ "Decorator", "generator", "that", "adds", "the", "cli", "help", "to", "the", "cli", "plugin", "based", "on", "the", "decorated", "function" ]
python
train
NASA-AMMOS/AIT-Core
ait/core/geom.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/geom.py#L671-L697
def contains (self, p): """Returns True if point is contained inside this Polygon, False otherwise. This method uses the Ray Casting algorithm. Examples: >>> p = Polygon() >>> p.vertices = [Point(1, 1), Point(1, -1), Point(-1, -1), Point(-1, 1)] >>> p.contains( Point(0, 0) ) True >>> p.contains( Point(2, 3) ) False """ inside = False if p in self.bounds(): for s in self.segments(): if ((s.p.y > p.y) != (s.q.y > p.y) and (p.x < (s.q.x - s.p.x) * (p.y - s.p.y) / (s.q.y - s.p.y) + s.p.x)): inside = not inside return inside
[ "def", "contains", "(", "self", ",", "p", ")", ":", "inside", "=", "False", "if", "p", "in", "self", ".", "bounds", "(", ")", ":", "for", "s", "in", "self", ".", "segments", "(", ")", ":", "if", "(", "(", "s", ".", "p", ".", "y", ">", "p", ...
Returns True if point is contained inside this Polygon, False otherwise. This method uses the Ray Casting algorithm. Examples: >>> p = Polygon() >>> p.vertices = [Point(1, 1), Point(1, -1), Point(-1, -1), Point(-1, 1)] >>> p.contains( Point(0, 0) ) True >>> p.contains( Point(2, 3) ) False
[ "Returns", "True", "if", "point", "is", "contained", "inside", "this", "Polygon", "False", "otherwise", "." ]
python
train
hynek/environ_config
src/environ/_environ_config.py
https://github.com/hynek/environ_config/blob/d61c0822bf0b6516932534e0496bd3f360a3e5e2/src/environ/_environ_config.py#L156-L198
def _generate_help_dicts(config_cls, _prefix=None): """ Generate dictionaries for use in building help strings. Every dictionary includes the keys... var_name: The env var that should be set to populate the value. required: A bool, True if the var is required, False if it's optional. Conditionally, the following are included... default: Included if an optional variable has a default set help_str: Included if the var uses the help kwarg to provide additional context for the value. Conditional key inclusion is meant to differentiate between exclusion vs explicitly setting a value to None. """ help_dicts = [] if _prefix is None: _prefix = config_cls._prefix for a in attr.fields(config_cls): try: ce = a.metadata[CNF_KEY] except KeyError: continue if ce.sub_cls is None: # Base case for "leaves". if ce.name is None: var_name = "_".join((_prefix, a.name)).upper() else: var_name = ce.name req = ce.default == RAISE help_dict = {"var_name": var_name, "required": req} if not req: help_dict["default"] = ce.default if ce.help is not None: help_dict["help_str"] = ce.help help_dicts.append(help_dict) else: # Construct the new prefix and recurse. help_dicts += _generate_help_dicts( ce.sub_cls, _prefix="_".join((_prefix, a.name)).upper() ) return help_dicts
[ "def", "_generate_help_dicts", "(", "config_cls", ",", "_prefix", "=", "None", ")", ":", "help_dicts", "=", "[", "]", "if", "_prefix", "is", "None", ":", "_prefix", "=", "config_cls", ".", "_prefix", "for", "a", "in", "attr", ".", "fields", "(", "config_...
Generate dictionaries for use in building help strings. Every dictionary includes the keys... var_name: The env var that should be set to populate the value. required: A bool, True if the var is required, False if it's optional. Conditionally, the following are included... default: Included if an optional variable has a default set help_str: Included if the var uses the help kwarg to provide additional context for the value. Conditional key inclusion is meant to differentiate between exclusion vs explicitly setting a value to None.
[ "Generate", "dictionaries", "for", "use", "in", "building", "help", "strings", "." ]
python
train
jupyter-widgets/ipywidgets
ipywidgets/widgets/widget_float.py
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget_float.py#L68-L73
def _validate_value(self, proposal): """Cap and floor value""" value = proposal['value'] if self.base ** self.min > value or self.base ** self.max < value: value = min(max(value, self.base ** self.min), self.base ** self.max) return value
[ "def", "_validate_value", "(", "self", ",", "proposal", ")", ":", "value", "=", "proposal", "[", "'value'", "]", "if", "self", ".", "base", "**", "self", ".", "min", ">", "value", "or", "self", ".", "base", "**", "self", ".", "max", "<", "value", "...
Cap and floor value
[ "Cap", "and", "floor", "value" ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L149-L183
def __collectFilterTerms( self, mapping, item = None, level = 0 ): """ Generates a list of filter terms based on the column data for the \ items in the tree. If no parent is supplied, then the top level items \ will be used, otherwise the children will be searched through. The \ level value will drive how far down the tree we will look for the terms. :param mapping | {<int> column: <set> values, ..} item | <QtGui.QTreeWidgetItem> || None level | <int> """ if not mapping: return max_level = self.maximumFilterLevel() if max_level != None and level > max_level: return False if not item: for i in range(self.topLevelItemCount()): self.__collectFilterTerms(mapping, self.topLevelItem(i)) else: # add the item data to the mapping for index in mapping.keys(): text = nativestring(item.text(index)) if not text: continue mapping[index].add(text) for c in range(item.childCount()): self.__collectFilterTerms(mapping, item.child(c), level + 1)
[ "def", "__collectFilterTerms", "(", "self", ",", "mapping", ",", "item", "=", "None", ",", "level", "=", "0", ")", ":", "if", "not", "mapping", ":", "return", "max_level", "=", "self", ".", "maximumFilterLevel", "(", ")", "if", "max_level", "!=", "None",...
Generates a list of filter terms based on the column data for the \ items in the tree. If no parent is supplied, then the top level items \ will be used, otherwise the children will be searched through. The \ level value will drive how far down the tree we will look for the terms. :param mapping | {<int> column: <set> values, ..} item | <QtGui.QTreeWidgetItem> || None level | <int>
[ "Generates", "a", "list", "of", "filter", "terms", "based", "on", "the", "column", "data", "for", "the", "\\", "items", "in", "the", "tree", ".", "If", "no", "parent", "is", "supplied", "then", "the", "top", "level", "items", "\\", "will", "be", "used"...
python
train
ray-project/ray
python/ray/autoscaler/aws/node_provider.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/autoscaler/aws/node_provider.py#L18-L24
def to_aws_format(tags): """Convert the Ray node name tag to the AWS-specific 'Name' tag.""" if TAG_RAY_NODE_NAME in tags: tags["Name"] = tags[TAG_RAY_NODE_NAME] del tags[TAG_RAY_NODE_NAME] return tags
[ "def", "to_aws_format", "(", "tags", ")", ":", "if", "TAG_RAY_NODE_NAME", "in", "tags", ":", "tags", "[", "\"Name\"", "]", "=", "tags", "[", "TAG_RAY_NODE_NAME", "]", "del", "tags", "[", "TAG_RAY_NODE_NAME", "]", "return", "tags" ]
Convert the Ray node name tag to the AWS-specific 'Name' tag.
[ "Convert", "the", "Ray", "node", "name", "tag", "to", "the", "AWS", "-", "specific", "Name", "tag", "." ]
python
train
sirfoga/pyhal
hal/strings/utils.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/strings/utils.py#L33-L42
def get_average_length_of_string(strings): """Computes average length of words :param strings: list of words :return: Average length of word on list """ if not strings: return 0 return sum(len(word) for word in strings) / len(strings)
[ "def", "get_average_length_of_string", "(", "strings", ")", ":", "if", "not", "strings", ":", "return", "0", "return", "sum", "(", "len", "(", "word", ")", "for", "word", "in", "strings", ")", "/", "len", "(", "strings", ")" ]
Computes average length of words :param strings: list of words :return: Average length of word on list
[ "Computes", "average", "length", "of", "words" ]
python
train
Robpol86/libnl
libnl/cache_mngt.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/cache_mngt.py#L97-L123
def nl_cache_mngt_register(ops): """Register a set of cache operations. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L252 Called by users of caches to announce the availability of a certain cache type. Positional arguments: ops -- cache operations (nl_cache_ops class instance). Returns: 0 on success or a negative error code. """ global cache_ops if not ops.co_name or not ops.co_obj_ops: return -NLE_INVAL with cache_ops_lock: if _nl_cache_ops_lookup(ops.co_name): return -NLE_EXIST ops.co_refcnt = 0 ops.co_next = cache_ops cache_ops = ops _LOGGER.debug('Registered cache operations {0}'.format(ops.co_name)) return 0
[ "def", "nl_cache_mngt_register", "(", "ops", ")", ":", "global", "cache_ops", "if", "not", "ops", ".", "co_name", "or", "not", "ops", ".", "co_obj_ops", ":", "return", "-", "NLE_INVAL", "with", "cache_ops_lock", ":", "if", "_nl_cache_ops_lookup", "(", "ops", ...
Register a set of cache operations. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L252 Called by users of caches to announce the availability of a certain cache type. Positional arguments: ops -- cache operations (nl_cache_ops class instance). Returns: 0 on success or a negative error code.
[ "Register", "a", "set", "of", "cache", "operations", "." ]
python
train
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1172-L1197
def _build_dummy_calls(self): """ Generate false if branch with dummy calls Requires kerncraft.h to be included, which defines dummy(...) and var_false. :return: dummy statement """ # Make sure nothing gets removed by inserting dummy calls dummy_calls = [] for d in self.kernel_ast.block_items: # Only consider toplevel declarations from kernel ast if type(d) is not c_ast.Decl: continue if type(d.type) is c_ast.ArrayDecl: dummy_calls.append(c_ast.FuncCall( c_ast.ID('dummy'), c_ast.ExprList([c_ast.ID(d.name)]))) else: dummy_calls.append(c_ast.FuncCall( c_ast.ID('dummy'), c_ast.ExprList([c_ast.UnaryOp('&', c_ast.ID(d.name))]))) dummy_stmt = c_ast.If( cond=c_ast.ID('var_false'), iftrue=c_ast.Compound(dummy_calls), iffalse=None) return dummy_stmt
[ "def", "_build_dummy_calls", "(", "self", ")", ":", "# Make sure nothing gets removed by inserting dummy calls", "dummy_calls", "=", "[", "]", "for", "d", "in", "self", ".", "kernel_ast", ".", "block_items", ":", "# Only consider toplevel declarations from kernel ast", "if"...
Generate false if branch with dummy calls Requires kerncraft.h to be included, which defines dummy(...) and var_false. :return: dummy statement
[ "Generate", "false", "if", "branch", "with", "dummy", "calls" ]
python
test
aliyun/aliyun-odps-python-sdk
odps/console.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/console.py#L498-L539
def _write_with_fallback(s, write, fileobj): """Write the supplied string with the given write function like ``write(s)``, but use a writer for the locale's preferred encoding in case of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or 'latin-1'. """ if IPythonIOStream is not None and isinstance(fileobj, IPythonIOStream): # If the output stream is an IPython.utils.io.IOStream object that's # not going to be very helpful to us since it doesn't raise any # exceptions when an error occurs writing to its underlying stream. # There's no advantage to us using IOStream.write directly though; # instead just write directly to its underlying stream: write = fileobj.stream.write try: write(s) return write except UnicodeEncodeError: # Let's try the next approach... pass enc = locale.getpreferredencoding() try: Writer = codecs.getwriter(enc) except LookupError: Writer = codecs.getwriter(_DEFAULT_ENCODING) f = Writer(fileobj) write = f.write try: write(s) return write except UnicodeEncodeError: Writer = codecs.getwriter('latin-1') f = Writer(fileobj) write = f.write # If this doesn't work let the exception bubble up; I'm out of ideas write(s) return write
[ "def", "_write_with_fallback", "(", "s", ",", "write", ",", "fileobj", ")", ":", "if", "IPythonIOStream", "is", "not", "None", "and", "isinstance", "(", "fileobj", ",", "IPythonIOStream", ")", ":", "# If the output stream is an IPython.utils.io.IOStream object that's", ...
Write the supplied string with the given write function like ``write(s)``, but use a writer for the locale's preferred encoding in case of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or 'latin-1'.
[ "Write", "the", "supplied", "string", "with", "the", "given", "write", "function", "like", "write", "(", "s", ")", "but", "use", "a", "writer", "for", "the", "locale", "s", "preferred", "encoding", "in", "case", "of", "a", "UnicodeEncodeError", ".", "Faili...
python
train
sergiocorreia/panflute
examples/panflute/gabc.py
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/examples/panflute/gabc.py#L120-L157
def gabc(elem, doc): """Handle gabc file inclusion and gabc code block.""" if type(elem) == Code and "gabc" in elem.classes: if doc.format == "latex": if elem.identifier == "": label = "" else: label = '\\label{' + elem.identifier + '}' return latex( "\n\\smallskip\n{%\n" + latexsnippet('\\gregorioscore{' + elem.text + '}', elem.attributes) + "%\n}" + label ) else: infile = elem.text + ( '.gabc' if '.gabc' not in elem.text else '' ) with open(infile, 'r') as doc: code = doc.read().split('%%\n')[1] return Image(png( elem.text, latexsnippet('\\gregorioscore', elem.attributes) )) elif type(elem) == CodeBlock and "gabc" in elem.classes: if doc.format == "latex": if elem.identifier == "": label = "" else: label = '\\label{' + elem.identifier + '}' return latexblock( "\n\\smallskip\n{%\n" + latexsnippet('\\gabcsnippet{' + elem.text + '}', elem.attributes) + "%\n}" + label ) else: return Para(Image(url=png(elem.text, latexsnippet('\\gabcsnippet', elem.attributes))))
[ "def", "gabc", "(", "elem", ",", "doc", ")", ":", "if", "type", "(", "elem", ")", "==", "Code", "and", "\"gabc\"", "in", "elem", ".", "classes", ":", "if", "doc", ".", "format", "==", "\"latex\"", ":", "if", "elem", ".", "identifier", "==", "\"\"",...
Handle gabc file inclusion and gabc code block.
[ "Handle", "gabc", "file", "inclusion", "and", "gabc", "code", "block", "." ]
python
train
hydpy-dev/hydpy
hydpy/auxs/armatools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/armatools.py#L586-L591
def norm_coefs(self): """Multiply all coefficients by the same factor, so that their sum becomes one.""" sum_coefs = self.sum_coefs self.ar_coefs /= sum_coefs self.ma_coefs /= sum_coefs
[ "def", "norm_coefs", "(", "self", ")", ":", "sum_coefs", "=", "self", ".", "sum_coefs", "self", ".", "ar_coefs", "/=", "sum_coefs", "self", ".", "ma_coefs", "/=", "sum_coefs" ]
Multiply all coefficients by the same factor, so that their sum becomes one.
[ "Multiply", "all", "coefficients", "by", "the", "same", "factor", "so", "that", "their", "sum", "becomes", "one", "." ]
python
train
google/brotli
research/brotlidump.py
https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/research/brotlidump.py#L1190-L1200
def explanation(self, index, extra): """ >>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10) >>> d[55].explanation(13) '11[1101]01-5: [0]+240' """ extraBits = self.extraBits(index) extraString = '[{:0{}b}]'.format(extra, extraBits) return '{0}: [{1[0]}]{1[1]:+d}'.format( self.mnemonic(index, True).replace('x'*(extraBits or 1), extraString), self.value(index, extra))
[ "def", "explanation", "(", "self", ",", "index", ",", "extra", ")", ":", "extraBits", "=", "self", ".", "extraBits", "(", "index", ")", "extraString", "=", "'[{:0{}b}]'", ".", "format", "(", "extra", ",", "extraBits", ")", "return", "'{0}: [{1[0]}]{1[1]:+d}'...
>>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10) >>> d[55].explanation(13) '11[1101]01-5: [0]+240'
[ ">>>", "d", "=", "DistanceAlphabet", "(", "D", "NPOSTFIX", "=", "2", "NDIRECT", "=", "10", ")", ">>>", "d", "[", "55", "]", ".", "explanation", "(", "13", ")", "11", "[", "1101", "]", "01", "-", "5", ":", "[", "0", "]", "+", "240" ]
python
test
danielhrisca/asammdf
asammdf/blocks/utils.py
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L1178-L1237
def master_using_raster(mdf, raster, endpoint=False): """ get single master based on the raster Parameters ---------- mdf : asammdf.MDF measurement object raster : float new raster endpoint=False : bool include maximum time stamp in the new master Returns ------- master : np.array new master """ if not raster: master = np.array([], dtype='<f8') else: t_min = [] t_max = [] for i, group in enumerate(mdf.groups): cycles_nr = group.channel_group.cycles_nr if cycles_nr: master_min = mdf.get_master( i, record_offset=0, record_count=1, ) if len(master_min): t_min.append(master_min[0]) mdf._master_channel_cache.clear() master_max = mdf.get_master( i, record_offset=cycles_nr-1, record_count=1, ) if len(master_max): t_max.append(master_max[0]) mdf._master_channel_cache.clear() if t_min: t_min = np.amin(t_min) t_max = np.amax(t_max) num = float(np.float32((t_max - t_min) / raster)) if int(num) == num: master = np.linspace(t_min, t_max, int(num) + 1) else: master = np.arange(t_min, t_max, raster) if endpoint: master = np.concatenate([master, [t_max]]) else: master = np.array([], dtype='<f8') return master
[ "def", "master_using_raster", "(", "mdf", ",", "raster", ",", "endpoint", "=", "False", ")", ":", "if", "not", "raster", ":", "master", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "'<f8'", ")", "else", ":", "t_min", "=", "[", "]", ...
get single master based on the raster Parameters ---------- mdf : asammdf.MDF measurement object raster : float new raster endpoint=False : bool include maximum time stamp in the new master Returns ------- master : np.array new master
[ "get", "single", "master", "based", "on", "the", "raster" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L403-L409
def database_add_tags(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /database-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags """ return DXHTTPRequest('/%s/addTags' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "database_add_tags", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/addTags'", "%", "object_id", ",", "input_params", ",", "always_retry", ...
Invokes the /database-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags
[ "Invokes", "the", "/", "database", "-", "xxxx", "/", "addTags", "API", "method", "." ]
python
train
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L286-L307
def single_registers(op): """ Given a list of registers like ['a', 'bc', 'h', 'hl'] returns a set of single registers: ['a', 'b', 'c', 'h', 'l']. Non register parameters, like numbers will be ignored. """ result = set() if isinstance(op, str): op = [op] for x in op: if is_8bit_register(x): result = result.union([x]) elif x == 'sp': result.add(x) elif x == 'af': result = result.union(['a', 'f']) elif x == "af'": result = result.union(["a'", "f'"]) elif is_16bit_register(x): # Must be a 16bit reg or we have an internal error! result = result.union([LO16(x), HI16(x)]) return list(result)
[ "def", "single_registers", "(", "op", ")", ":", "result", "=", "set", "(", ")", "if", "isinstance", "(", "op", ",", "str", ")", ":", "op", "=", "[", "op", "]", "for", "x", "in", "op", ":", "if", "is_8bit_register", "(", "x", ")", ":", "result", ...
Given a list of registers like ['a', 'bc', 'h', 'hl'] returns a set of single registers: ['a', 'b', 'c', 'h', 'l']. Non register parameters, like numbers will be ignored.
[ "Given", "a", "list", "of", "registers", "like", "[", "a", "bc", "h", "hl", "]", "returns", "a", "set", "of", "single", "registers", ":", "[", "a", "b", "c", "h", "l", "]", ".", "Non", "register", "parameters", "like", "numbers", "will", "be", "ign...
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/evaluation.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L87-L245
def log_loss(targets, predictions, index_map=None): r""" Compute the logloss for the given targets and the given predicted probabilities. This quantity is defined to be the negative of the sum of the log probability of each observation, normalized by the number of observations: .. math:: \textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N} (y_i \log(p_i) + (1-y_i)\log(1-p_i)) , where y_i is the i'th target value and p_i is the i'th predicted probability. For multiclass situations, the definition is a slight generalization of the above: .. math:: \textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N} \sum_{j \in 1, \ldots, L} (y_{ij} \log(p_{ij})) , where :math:`L` is the number of classes and :math:`y_{ij}` indicates that observation `i` has class label `j`. Parameters ---------- targets : SArray Ground truth class labels. This can either contain integers or strings. predictions : SArray The predicted probability that corresponds to each target value. For binary classification, the probability corresponds to the probability of the "positive" label being predicted. For multi-class classification, the predictions are expected to be an array of predictions for each class. index_map : dict[int], [None (default)] For binary classification, a dictionary mapping the two target labels to either 0 (negative) or 1 (positive). For multi-class classification, a dictionary mapping potential target labels to the associated index into the vectors in ``predictions``. Returns ------- out : float The log_loss. See Also -------- accuracy Notes ----- - For binary classification, when the target label is of type "string", then the labels are sorted alphanumerically and the largest label is chosen as the "positive" label. For example, if the classifier labels are {"cat", "dog"}, then "dog" is chosen as the positive label for the binary classification case. This behavior can be overridden by providing an explicit ``index_map``. - For multi-class classification, when the target label is of type "string", then the probability vector is assumed to be a vector of probabilities of classes as sorted alphanumerically. Hence, for the probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat". This behavior can be overridden by providing an explicit ``index_map``. - Logloss is undefined when a probability value p = 0, or p = 1. Hence, probabilities are clipped to max(EPSILON, min(1 - EPSILON, p)) where EPSILON = 1e-15. References ---------- https://www.kaggle.com/wiki/LogLoss Examples -------- .. sourcecode:: python import turicreate as tc targets = tc.SArray([0, 1, 1, 0]) predictions = tc.SArray([0.1, 0.35, 0.7, 0.99]) log_loss = tc.evaluation.log_loss(targets, predictions) For binary classification, when the target label is of type "string", then the labels are sorted alphanumerically and the largest label is chosen as the "positive" label. .. sourcecode:: python import turicreate as tc targets = tc.SArray(["cat", "dog", "dog", "cat"]) predictions = tc.SArray([0.1, 0.35, 0.7, 0.99]) log_loss = tc.evaluation.log_loss(targets, predictions) In the multi-class setting, log-loss requires a vector of probabilities (that sum to 1) for each class label in the input dataset. In this example, there are three classes [0, 1, 2], and the vector of probabilities correspond to the probability of prediction for each of the three classes. .. sourcecode:: python target = tc.SArray([ 1, 0, 2, 1]) predictions = tc.SArray([[.1, .8, 0.1], [.9, .1, 0.0], [.8, .1, 0.1], [.3, .6, 0.1]]) log_loss = tc.evaluation.log_loss(targets, predictions) For multi-class classification, when the target label is of type "string", then the probability vector is assumed to be a vector of probabilities of class as sorted alphanumerically. Hence, for the probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat". .. sourcecode:: python target = tc.SArray([ "dog", "cat", "foosa", "dog"]) predictions = tc.SArray([[.1, .8, 0.1], [.9, .1, 0.0], [.8, .1, 0.1], [.3, .6, 0.1]]) log_loss = tc.evaluation.log_loss(targets, predictions) If the probability vectors contain predictions for labels not present among the targets, an explicit index map must be provided. .. sourcecode:: python target = tc.SArray([ "dog", "cat", "cat", "dog"]) predictions = tc.SArray([[.1, .8, 0.1], [.9, .1, 0.0], [.8, .1, 0.1], [.3, .6, 0.1]]) index_map = {"cat": 0, "dog": 1, "foosa": 2} log_loss = tc.evaluation.log_loss(targets, predictions, index_map=index_map) """ _supervised_evaluation_error_checking(targets, predictions) _check_prob_and_prob_vector(predictions) _check_target_not_float(targets) _check_index_map(index_map) multiclass = predictions.dtype not in [float, int] opts = {} if index_map is not None: opts['index_map'] = index_map if multiclass: result = _turicreate.extensions._supervised_streaming_evaluator(targets, predictions, "multiclass_logloss", opts) else: result = _turicreate.extensions._supervised_streaming_evaluator(targets, predictions, "binary_logloss", opts) return result
[ "def", "log_loss", "(", "targets", ",", "predictions", ",", "index_map", "=", "None", ")", ":", "_supervised_evaluation_error_checking", "(", "targets", ",", "predictions", ")", "_check_prob_and_prob_vector", "(", "predictions", ")", "_check_target_not_float", "(", "t...
r""" Compute the logloss for the given targets and the given predicted probabilities. This quantity is defined to be the negative of the sum of the log probability of each observation, normalized by the number of observations: .. math:: \textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N} (y_i \log(p_i) + (1-y_i)\log(1-p_i)) , where y_i is the i'th target value and p_i is the i'th predicted probability. For multiclass situations, the definition is a slight generalization of the above: .. math:: \textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N} \sum_{j \in 1, \ldots, L} (y_{ij} \log(p_{ij})) , where :math:`L` is the number of classes and :math:`y_{ij}` indicates that observation `i` has class label `j`. Parameters ---------- targets : SArray Ground truth class labels. This can either contain integers or strings. predictions : SArray The predicted probability that corresponds to each target value. For binary classification, the probability corresponds to the probability of the "positive" label being predicted. For multi-class classification, the predictions are expected to be an array of predictions for each class. index_map : dict[int], [None (default)] For binary classification, a dictionary mapping the two target labels to either 0 (negative) or 1 (positive). For multi-class classification, a dictionary mapping potential target labels to the associated index into the vectors in ``predictions``. Returns ------- out : float The log_loss. See Also -------- accuracy Notes ----- - For binary classification, when the target label is of type "string", then the labels are sorted alphanumerically and the largest label is chosen as the "positive" label. For example, if the classifier labels are {"cat", "dog"}, then "dog" is chosen as the positive label for the binary classification case. This behavior can be overridden by providing an explicit ``index_map``. - For multi-class classification, when the target label is of type "string", then the probability vector is assumed to be a vector of probabilities of classes as sorted alphanumerically. Hence, for the probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat". This behavior can be overridden by providing an explicit ``index_map``. - Logloss is undefined when a probability value p = 0, or p = 1. Hence, probabilities are clipped to max(EPSILON, min(1 - EPSILON, p)) where EPSILON = 1e-15. References ---------- https://www.kaggle.com/wiki/LogLoss Examples -------- .. sourcecode:: python import turicreate as tc targets = tc.SArray([0, 1, 1, 0]) predictions = tc.SArray([0.1, 0.35, 0.7, 0.99]) log_loss = tc.evaluation.log_loss(targets, predictions) For binary classification, when the target label is of type "string", then the labels are sorted alphanumerically and the largest label is chosen as the "positive" label. .. sourcecode:: python import turicreate as tc targets = tc.SArray(["cat", "dog", "dog", "cat"]) predictions = tc.SArray([0.1, 0.35, 0.7, 0.99]) log_loss = tc.evaluation.log_loss(targets, predictions) In the multi-class setting, log-loss requires a vector of probabilities (that sum to 1) for each class label in the input dataset. In this example, there are three classes [0, 1, 2], and the vector of probabilities correspond to the probability of prediction for each of the three classes. .. sourcecode:: python target = tc.SArray([ 1, 0, 2, 1]) predictions = tc.SArray([[.1, .8, 0.1], [.9, .1, 0.0], [.8, .1, 0.1], [.3, .6, 0.1]]) log_loss = tc.evaluation.log_loss(targets, predictions) For multi-class classification, when the target label is of type "string", then the probability vector is assumed to be a vector of probabilities of class as sorted alphanumerically. Hence, for the probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat". .. sourcecode:: python target = tc.SArray([ "dog", "cat", "foosa", "dog"]) predictions = tc.SArray([[.1, .8, 0.1], [.9, .1, 0.0], [.8, .1, 0.1], [.3, .6, 0.1]]) log_loss = tc.evaluation.log_loss(targets, predictions) If the probability vectors contain predictions for labels not present among the targets, an explicit index map must be provided. .. sourcecode:: python target = tc.SArray([ "dog", "cat", "cat", "dog"]) predictions = tc.SArray([[.1, .8, 0.1], [.9, .1, 0.0], [.8, .1, 0.1], [.3, .6, 0.1]]) index_map = {"cat": 0, "dog": 1, "foosa": 2} log_loss = tc.evaluation.log_loss(targets, predictions, index_map=index_map)
[ "r", "Compute", "the", "logloss", "for", "the", "given", "targets", "and", "the", "given", "predicted", "probabilities", ".", "This", "quantity", "is", "defined", "to", "be", "the", "negative", "of", "the", "sum", "of", "the", "log", "probability", "of", "...
python
train
fishtown-analytics/dbt
core/dbt/task/deps.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/task/deps.py#L259-L279
def _checkout(self, project): """Performs a shallow clone of the repository into the downloads directory. This function can be called repeatedly. If the project has already been checked out at this version, it will be a no-op. Returns the path to the checked out directory.""" if len(self.version) != 1: dbt.exceptions.raise_dependency_error( 'Cannot checkout repository until the version is pinned.') try: dir_ = dbt.clients.git.clone_and_checkout( self.git, DOWNLOADS_PATH, branch=self.version[0], dirname=self._checkout_name) except dbt.exceptions.ExecutableError as exc: if exc.cmd and exc.cmd[0] == 'git': logger.error( 'Make sure git is installed on your machine. More ' 'information: ' 'https://docs.getdbt.com/docs/package-management' ) raise return os.path.join(DOWNLOADS_PATH, dir_)
[ "def", "_checkout", "(", "self", ",", "project", ")", ":", "if", "len", "(", "self", ".", "version", ")", "!=", "1", ":", "dbt", ".", "exceptions", ".", "raise_dependency_error", "(", "'Cannot checkout repository until the version is pinned.'", ")", "try", ":", ...
Performs a shallow clone of the repository into the downloads directory. This function can be called repeatedly. If the project has already been checked out at this version, it will be a no-op. Returns the path to the checked out directory.
[ "Performs", "a", "shallow", "clone", "of", "the", "repository", "into", "the", "downloads", "directory", ".", "This", "function", "can", "be", "called", "repeatedly", ".", "If", "the", "project", "has", "already", "been", "checked", "out", "at", "this", "ver...
python
train
matrix-org/matrix-python-sdk
matrix_client/room.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/room.py#L405-L412
def set_room_name(self, name): """Return True if room name successfully changed.""" try: self.client.api.set_room_name(self.room_id, name) self.name = name return True except MatrixRequestError: return False
[ "def", "set_room_name", "(", "self", ",", "name", ")", ":", "try", ":", "self", ".", "client", ".", "api", ".", "set_room_name", "(", "self", ".", "room_id", ",", "name", ")", "self", ".", "name", "=", "name", "return", "True", "except", "MatrixRequest...
Return True if room name successfully changed.
[ "Return", "True", "if", "room", "name", "successfully", "changed", "." ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/views.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/views.py#L413-L422
def get(self, bucket=None, versions=missing, uploads=missing): """Get list of objects in the bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response. """ if uploads is not missing: return self.multipart_listuploads(bucket) else: return self.listobjects(bucket, versions)
[ "def", "get", "(", "self", ",", "bucket", "=", "None", ",", "versions", "=", "missing", ",", "uploads", "=", "missing", ")", ":", "if", "uploads", "is", "not", "missing", ":", "return", "self", ".", "multipart_listuploads", "(", "bucket", ")", "else", ...
Get list of objects in the bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response.
[ "Get", "list", "of", "objects", "in", "the", "bucket", "." ]
python
train
usc-isi-i2/etk
etk/extractors/table_extractor.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/table_extractor.py#L463-L474
def gen_html(row_list): """ Return html table string from a list of data rows """ table = "<table>" for row in row_list: table += "<tr>" cells = row["cells"] for c in cells: t = c['cell'] if c else '' table += t table += "</tr>" table += "</table>" return table
[ "def", "gen_html", "(", "row_list", ")", ":", "table", "=", "\"<table>\"", "for", "row", "in", "row_list", ":", "table", "+=", "\"<tr>\"", "cells", "=", "row", "[", "\"cells\"", "]", "for", "c", "in", "cells", ":", "t", "=", "c", "[", "'cell'", "]", ...
Return html table string from a list of data rows
[ "Return", "html", "table", "string", "from", "a", "list", "of", "data", "rows" ]
python
train
sods/paramz
paramz/model.py
https://github.com/sods/paramz/blob/ae6fc6274b70fb723d91e48fc5026a9bc5a06508/paramz/model.py#L284-L408
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12): """ Check the gradient of the ,odel by comparing to a numerical estimate. If the verbose flag is passed, individual components are tested (and printed) :param verbose: If True, print a "full" checking of each parameter :type verbose: bool :param step: The size of the step around which to linearise the objective :type step: float (default 1e-6) :param tolerance: the tolerance allowed (see note) :type tolerance: float (default 1e-3) Note:- The gradient is considered correct if the ratio of the analytical and numerical gradients is within <tolerance> of unity. The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients. If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually not accurate enough for the tests (shown with blue). """ if not self._model_initialized_: import warnings warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning) return False x = self.optimizer_array.copy() if not verbose: # make sure only to test the selected parameters if target_param is None: transformed_index = np.arange(len(x)) else: transformed_index = self._raveled_index_for_transformed(target_param) if transformed_index.size == 0: print("No free parameters to check") return True # just check the global ratio dx = np.zeros(x.shape) dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.) # evaulate around the point x f1 = self._objective(x + dx) f2 = self._objective(x - dx) gradient = self._grads(x) dx = dx[transformed_index] gradient = gradient[transformed_index] denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator) global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance) if global_ratio is np.nan: # pragma: no cover global_ratio = 0 return np.abs(1. - global_ratio) < tolerance or global_diff else: # check the gradient of each parameter individually, and do some pretty printing try: names = self.parameter_names_flat() except NotImplementedError: names = ['Variable %i' % i for i in range(len(x))] # Prepare for pretty-printing header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio'] max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])]) float_len = 10 cols = [max_names] cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))]) cols = np.array(cols) + 5 header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))] header_string = list(map(lambda x: '|'.join(x), [header_string])) separator = '-' * len(header_string[0]) print('\n'.join([header_string[0], separator])) if target_param is None: target_param = self transformed_index = self._raveled_index_for_transformed(target_param) if transformed_index.size == 0: print("No free parameters to check") return True gradient = self._grads(x).copy() np.where(gradient == 0, 1e-312, gradient) ret = True for xind in zip(transformed_index): xx = x.copy() xx[xind] += step f1 = float(self._objective(xx)) xx[xind] -= 2.*step f2 = float(self._objective(xx)) #Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall #the same if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15: df_ratio = np.abs((f1 - f2) / min(f1, f2)) else: # pragma: no cover df_ratio = 1.0 df_unstable = df_ratio < df_tolerance numerical_gradient = (f1 - f2) / (2. * step) if np.all(gradient[xind] == 0): # pragma: no cover ratio = (f1 - f2) == gradient[xind] else: ratio = (f1 - f2) / (2. * step * gradient[xind]) difference = np.abs(numerical_gradient - gradient[xind]) if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance: formatted_name = "\033[92m {0} \033[0m".format(names[xind]) ret &= True else: # pragma: no cover formatted_name = "\033[91m {0} \033[0m".format(names[xind]) ret &= False if df_unstable: # pragma: no cover formatted_name = "\033[94m {0} \033[0m".format(names[xind]) r = '%.6f' % float(ratio) d = '%.6f' % float(difference) g = '%.6f' % gradient[xind] ng = '%.6f' % float(numerical_gradient) df = '%1.e' % float(df_ratio) grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5]) print(grad_string) self.optimizer_array = x return ret
[ "def", "_checkgrad", "(", "self", ",", "target_param", "=", "None", ",", "verbose", "=", "False", ",", "step", "=", "1e-6", ",", "tolerance", "=", "1e-3", ",", "df_tolerance", "=", "1e-12", ")", ":", "if", "not", "self", ".", "_model_initialized_", ":", ...
Check the gradient of the ,odel by comparing to a numerical estimate. If the verbose flag is passed, individual components are tested (and printed) :param verbose: If True, print a "full" checking of each parameter :type verbose: bool :param step: The size of the step around which to linearise the objective :type step: float (default 1e-6) :param tolerance: the tolerance allowed (see note) :type tolerance: float (default 1e-3) Note:- The gradient is considered correct if the ratio of the analytical and numerical gradients is within <tolerance> of unity. The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients. If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually not accurate enough for the tests (shown with blue).
[ "Check", "the", "gradient", "of", "the", "odel", "by", "comparing", "to", "a", "numerical", "estimate", ".", "If", "the", "verbose", "flag", "is", "passed", "individual", "components", "are", "tested", "(", "and", "printed", ")" ]
python
train
cltk/cltk
cltk/text_reuse/levenshtein.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/levenshtein.py#L124-L141
def ratio(string_a, string_b): """At the most basic level, return a Levenshtein distance ratio via fuzzywuzzy. :param string_a: str :param string_b: str :return: float """ from cltk.utils.cltk_logger import logger try: from fuzzywuzzy import fuzz except ImportError as imp_err: # pragma: no cover message = "'fuzzywuzzy' library required for this module: %s. Install with `pip install fuzzywuzzy python-Levenshtein`" % imp_err logger.error(message) print(message) raise ImportError return fuzz.ratio(string_a, string_b) / 100
[ "def", "ratio", "(", "string_a", ",", "string_b", ")", ":", "from", "cltk", ".", "utils", ".", "cltk_logger", "import", "logger", "try", ":", "from", "fuzzywuzzy", "import", "fuzz", "except", "ImportError", "as", "imp_err", ":", "# pragma: no cover", "message"...
At the most basic level, return a Levenshtein distance ratio via fuzzywuzzy. :param string_a: str :param string_b: str :return: float
[ "At", "the", "most", "basic", "level", "return", "a", "Levenshtein", "distance", "ratio", "via", "fuzzywuzzy", ".", ":", "param", "string_a", ":", "str", ":", "param", "string_b", ":", "str", ":", "return", ":", "float" ]
python
train
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/author_picker.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/author_picker.py#L246-L253
def bind(cls): """ Bind the buttons to adapter's event handler. """ super(cls, cls).bind() cls.search_btn_el.bind("click", cls.start) cls.input_el.bind("keypress", func_on_enter(cls.start))
[ "def", "bind", "(", "cls", ")", ":", "super", "(", "cls", ",", "cls", ")", ".", "bind", "(", ")", "cls", ".", "search_btn_el", ".", "bind", "(", "\"click\"", ",", "cls", ".", "start", ")", "cls", ".", "input_el", ".", "bind", "(", "\"keypress\"", ...
Bind the buttons to adapter's event handler.
[ "Bind", "the", "buttons", "to", "adapter", "s", "event", "handler", "." ]
python
train
NoneGG/aredis
aredis/commands/sets.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/sets.py#L110-L116
async def sunionstore(self, dest, keys, *args): """ Store the union of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. """ args = list_or_args(keys, args) return await self.execute_command('SUNIONSTORE', dest, *args)
[ "async", "def", "sunionstore", "(", "self", ",", "dest", ",", "keys", ",", "*", "args", ")", ":", "args", "=", "list_or_args", "(", "keys", ",", "args", ")", "return", "await", "self", ".", "execute_command", "(", "'SUNIONSTORE'", ",", "dest", ",", "*"...
Store the union of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set.
[ "Store", "the", "union", "of", "sets", "specified", "by", "keys", "into", "a", "new", "set", "named", "dest", ".", "Returns", "the", "number", "of", "keys", "in", "the", "new", "set", "." ]
python
train
amzn/ion-python
amazon/ion/reader_binary.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_binary.py#L838-L870
def raw_reader(queue=None): """Returns a raw binary reader co-routine. Args: queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a new one will be created. Yields: IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed in the middle of a value or ``STREAM_END`` if there is no data **and** the parser is not in the middle of parsing a value. Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP`` to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE`` or ``STREAM_END`` event type. ``SKIP`` is only allowed within a container. A reader is *in* a container when the ``CONTAINER_START`` event type is encountered and *not in* a container when the ``CONTAINER_END`` event type for that container is encountered. """ if queue is None: queue = BufferQueue() ctx = _HandlerContext( position=0, limit=None, queue=queue, field_name=None, annotations=None, depth=0, whence=None ) return reader_trampoline(_container_handler(None, ctx))
[ "def", "raw_reader", "(", "queue", "=", "None", ")", ":", "if", "queue", "is", "None", ":", "queue", "=", "BufferQueue", "(", ")", "ctx", "=", "_HandlerContext", "(", "position", "=", "0", ",", "limit", "=", "None", ",", "queue", "=", "queue", ",", ...
Returns a raw binary reader co-routine. Args: queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a new one will be created. Yields: IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed in the middle of a value or ``STREAM_END`` if there is no data **and** the parser is not in the middle of parsing a value. Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP`` to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE`` or ``STREAM_END`` event type. ``SKIP`` is only allowed within a container. A reader is *in* a container when the ``CONTAINER_START`` event type is encountered and *not in* a container when the ``CONTAINER_END`` event type for that container is encountered.
[ "Returns", "a", "raw", "binary", "reader", "co", "-", "routine", "." ]
python
train
pycontribs/pyrax
pyrax/autoscale.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L485-L495
def update_launch_metadata(self, scaling_group, metadata): """ Adds the given metadata dict to the existing metadata for the scaling group's launch configuration. """ if not isinstance(scaling_group, ScalingGroup): scaling_group = self.get(scaling_group) curr_meta = scaling_group.launchConfiguration.get("args", {}).get( "server", {}).get("metadata", {}) curr_meta.update(metadata) return self.update_launch_config(scaling_group, metadata=curr_meta)
[ "def", "update_launch_metadata", "(", "self", ",", "scaling_group", ",", "metadata", ")", ":", "if", "not", "isinstance", "(", "scaling_group", ",", "ScalingGroup", ")", ":", "scaling_group", "=", "self", ".", "get", "(", "scaling_group", ")", "curr_meta", "="...
Adds the given metadata dict to the existing metadata for the scaling group's launch configuration.
[ "Adds", "the", "given", "metadata", "dict", "to", "the", "existing", "metadata", "for", "the", "scaling", "group", "s", "launch", "configuration", "." ]
python
train
quantmind/dynts
dynts/api/timeseries.py
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/api/timeseries.py#L119-L128
def getalgo(self, operation, name): '''Return the algorithm for *operation* named *name*''' if operation not in self._algorithms: raise NotAvailable('{0} not registered.'.format(operation)) oper = self._algorithms[operation] try: return oper[name] except KeyError: raise NotAvailable('{0} algorithm {1} not registered.' .format(operation, name))
[ "def", "getalgo", "(", "self", ",", "operation", ",", "name", ")", ":", "if", "operation", "not", "in", "self", ".", "_algorithms", ":", "raise", "NotAvailable", "(", "'{0} not registered.'", ".", "format", "(", "operation", ")", ")", "oper", "=", "self", ...
Return the algorithm for *operation* named *name*
[ "Return", "the", "algorithm", "for", "*", "operation", "*", "named", "*", "name", "*" ]
python
train
saltstack/salt
salt/cloud/clouds/gogrid.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gogrid.py#L367-L382
def list_common_lookups(kwargs=None, call=None): ''' List common lookups for a particular type of item .. versionadded:: 2015.8.0 ''' if kwargs is None: kwargs = {} args = {} if 'lookup' in kwargs: args['lookup'] = kwargs['lookup'] response = _query('common', 'lookup/list', args=args) return response
[ "def", "list_common_lookups", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "args", "=", "{", "}", "if", "'lookup'", "in", "kwargs", ":", "args", "[", "'lookup'", "]", "=...
List common lookups for a particular type of item .. versionadded:: 2015.8.0
[ "List", "common", "lookups", "for", "a", "particular", "type", "of", "item" ]
python
train
sony/nnabla
python/src/nnabla/models/utils.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/models/utils.py#L41-L50
def get_model_url_base(): ''' Returns a root folder for models. ''' url_base = get_model_url_base_from_env() if url_base is not None: logger.info('NNBLA_MODELS_URL_BASE is set as {}.'.format(url_base)) else: url_base = 'https://nnabla.org/pretrained-models/nnp_models/' return url_base
[ "def", "get_model_url_base", "(", ")", ":", "url_base", "=", "get_model_url_base_from_env", "(", ")", "if", "url_base", "is", "not", "None", ":", "logger", ".", "info", "(", "'NNBLA_MODELS_URL_BASE is set as {}.'", ".", "format", "(", "url_base", ")", ")", "else...
Returns a root folder for models.
[ "Returns", "a", "root", "folder", "for", "models", "." ]
python
train
fananimi/pyzk
zk/base.py
https://github.com/fananimi/pyzk/blob/1a765d616526efdcb4c9adfcc9b1d10f6ed8b938/zk/base.py#L739-L751
def set_time(self, timestamp): """ set Device time (pass datetime object) :param timestamp: python datetime object """ command = const.CMD_SET_TIME command_string = pack(b'I', self.__encode_time(timestamp)) cmd_response = self.__send_command(command, command_string) if cmd_response.get('status'): return True else: raise ZKErrorResponse("can't set time")
[ "def", "set_time", "(", "self", ",", "timestamp", ")", ":", "command", "=", "const", ".", "CMD_SET_TIME", "command_string", "=", "pack", "(", "b'I'", ",", "self", ".", "__encode_time", "(", "timestamp", ")", ")", "cmd_response", "=", "self", ".", "__send_c...
set Device time (pass datetime object) :param timestamp: python datetime object
[ "set", "Device", "time", "(", "pass", "datetime", "object", ")" ]
python
train
mongodb/mongo-python-driver
pymongo/client_session.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/client_session.py#L504-L527
def start_transaction(self, read_concern=None, write_concern=None, read_preference=None): """Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. .. versionadded:: 3.7 """ self._check_ended() if self._in_transaction: raise InvalidOperation("Transaction already in progress") read_concern = self._inherit_option("read_concern", read_concern) write_concern = self._inherit_option("write_concern", write_concern) read_preference = self._inherit_option( "read_preference", read_preference) self._transaction.opts = TransactionOptions( read_concern, write_concern, read_preference) self._transaction.reset() self._transaction.state = _TxnState.STARTING self._start_retryable_write() return _TransactionContext(self)
[ "def", "start_transaction", "(", "self", ",", "read_concern", "=", "None", ",", "write_concern", "=", "None", ",", "read_preference", "=", "None", ")", ":", "self", ".", "_check_ended", "(", ")", "if", "self", ".", "_in_transaction", ":", "raise", "InvalidOp...
Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. .. versionadded:: 3.7
[ "Start", "a", "multi", "-", "statement", "transaction", "." ]
python
train
Qiskit/qiskit-terra
qiskit/qasm/node/gate.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qasm/node/gate.py#L53-L60
def qasm(self, prec=15): """Return the corresponding OPENQASM string.""" string = "gate " + self.name if self.arguments is not None: string += "(" + self.arguments.qasm(prec) + ")" string += " " + self.bitlist.qasm(prec) + "\n" string += "{\n" + self.body.qasm(prec) + "}" return string
[ "def", "qasm", "(", "self", ",", "prec", "=", "15", ")", ":", "string", "=", "\"gate \"", "+", "self", ".", "name", "if", "self", ".", "arguments", "is", "not", "None", ":", "string", "+=", "\"(\"", "+", "self", ".", "arguments", ".", "qasm", "(", ...
Return the corresponding OPENQASM string.
[ "Return", "the", "corresponding", "OPENQASM", "string", "." ]
python
test
romanz/trezor-agent
libagent/ssh/protocol.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/protocol.py#L108-L118
def list_pubs(self, buf): """SSH v2 public keys are serialized and returned.""" assert not buf.read() keys = self.conn.parse_public_keys() code = util.pack('B', msg_code('SSH2_AGENT_IDENTITIES_ANSWER')) num = util.pack('L', len(keys)) log.debug('available keys: %s', [k['name'] for k in keys]) for i, k in enumerate(keys): log.debug('%2d) %s', i+1, k['fingerprint']) pubs = [util.frame(k['blob']) + util.frame(k['name']) for k in keys] return util.frame(code, num, *pubs)
[ "def", "list_pubs", "(", "self", ",", "buf", ")", ":", "assert", "not", "buf", ".", "read", "(", ")", "keys", "=", "self", ".", "conn", ".", "parse_public_keys", "(", ")", "code", "=", "util", ".", "pack", "(", "'B'", ",", "msg_code", "(", "'SSH2_A...
SSH v2 public keys are serialized and returned.
[ "SSH", "v2", "public", "keys", "are", "serialized", "and", "returned", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L293-L332
def to_bqstorage(self): """Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported. """ if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) table_ref = bigquery_storage_v1beta1.types.TableReference() table_ref.project_id = self._project table_ref.dataset_id = self._dataset_id table_id = self._table_id if "@" in table_id: table_id = table_id.split("@")[0] if "$" in table_id: table_id = table_id.split("$")[0] table_ref.table_id = table_id return table_ref
[ "def", "to_bqstorage", "(", "self", ")", ":", "if", "bigquery_storage_v1beta1", "is", "None", ":", "raise", "ValueError", "(", "_NO_BQSTORAGE_ERROR", ")", "table_ref", "=", "bigquery_storage_v1beta1", ".", "types", ".", "TableReference", "(", ")", "table_ref", "."...
Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported.
[ "Construct", "a", "BigQuery", "Storage", "API", "representation", "of", "this", "table", "." ]
python
train
xeroc/python-graphenelib
graphenecommon/wallet.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/wallet.py#L260-L269
def getAccounts(self): """ Return all accounts installed in the wallet database """ pubkeys = self.getPublicKeys() accounts = [] for pubkey in pubkeys: # Filter those keys not for our network if pubkey[: len(self.prefix)] == self.prefix: accounts.extend(self.getAccountsFromPublicKey(pubkey)) return accounts
[ "def", "getAccounts", "(", "self", ")", ":", "pubkeys", "=", "self", ".", "getPublicKeys", "(", ")", "accounts", "=", "[", "]", "for", "pubkey", "in", "pubkeys", ":", "# Filter those keys not for our network", "if", "pubkey", "[", ":", "len", "(", "self", ...
Return all accounts installed in the wallet database
[ "Return", "all", "accounts", "installed", "in", "the", "wallet", "database" ]
python
valid
wmayner/pyphi
pyphi/validate.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/validate.py#L177-L186
def subsystem(s): """Validate a |Subsystem|. Checks its state and cut. """ node_states(s.state) cut(s.cut, s.cut_indices) if config.VALIDATE_SUBSYSTEM_STATES: state_reachable(s) return True
[ "def", "subsystem", "(", "s", ")", ":", "node_states", "(", "s", ".", "state", ")", "cut", "(", "s", ".", "cut", ",", "s", ".", "cut_indices", ")", "if", "config", ".", "VALIDATE_SUBSYSTEM_STATES", ":", "state_reachable", "(", "s", ")", "return", "True...
Validate a |Subsystem|. Checks its state and cut.
[ "Validate", "a", "|Subsystem|", "." ]
python
train
noirbizarre/flask-fs
flask_fs/backends/s3.py
https://github.com/noirbizarre/flask-fs/blob/092e9327384b8411c9bb38ca257ecb558584d201/flask_fs/backends/s3.py#L89-L99
def get_metadata(self, filename): '''Fetch all availabe metadata''' obj = self.bucket.Object(filename) checksum = 'md5:{0}'.format(obj.e_tag[1:-1]) mime = obj.content_type.split(';', 1)[0] if obj.content_type else None return { 'checksum': checksum, 'size': obj.content_length, 'mime': mime, 'modified': obj.last_modified, }
[ "def", "get_metadata", "(", "self", ",", "filename", ")", ":", "obj", "=", "self", ".", "bucket", ".", "Object", "(", "filename", ")", "checksum", "=", "'md5:{0}'", ".", "format", "(", "obj", ".", "e_tag", "[", "1", ":", "-", "1", "]", ")", "mime",...
Fetch all availabe metadata
[ "Fetch", "all", "availabe", "metadata" ]
python
train
openego/ding0
ding0/grid/mv_grid/mv_connect.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/mv_connect.py#L583-L601
def parametrize_lines(mv_grid): """ Set unparametrized branches to default branch type Args ---- mv_grid: MVGridDing0 MV grid instance Notes ----- During the connection process of satellites, new branches are created - these have to be parametrized. """ for branch in mv_grid.graph_edges(): if branch['branch'].kind is None: branch['branch'].kind = mv_grid.default_branch_kind if branch['branch'].type is None: branch['branch'].type = mv_grid.default_branch_type
[ "def", "parametrize_lines", "(", "mv_grid", ")", ":", "for", "branch", "in", "mv_grid", ".", "graph_edges", "(", ")", ":", "if", "branch", "[", "'branch'", "]", ".", "kind", "is", "None", ":", "branch", "[", "'branch'", "]", ".", "kind", "=", "mv_grid"...
Set unparametrized branches to default branch type Args ---- mv_grid: MVGridDing0 MV grid instance Notes ----- During the connection process of satellites, new branches are created - these have to be parametrized.
[ "Set", "unparametrized", "branches", "to", "default", "branch", "type", "Args", "----", "mv_grid", ":", "MVGridDing0", "MV", "grid", "instance" ]
python
train
slightlynybbled/tk_tools
tk_tools/canvas.py
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L579-L595
def to_red(self, on: bool=False): """ Change the LED to red (on or off) :param on: True or False :return: None """ self._on = on if on: self._load_new(led_red_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_red(False)) else: self._load_new(led_red) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_red(True))
[ "def", "to_red", "(", "self", ",", "on", ":", "bool", "=", "False", ")", ":", "self", ".", "_on", "=", "on", "if", "on", ":", "self", ".", "_load_new", "(", "led_red_on", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".",...
Change the LED to red (on or off) :param on: True or False :return: None
[ "Change", "the", "LED", "to", "red", "(", "on", "or", "off", ")", ":", "param", "on", ":", "True", "or", "False", ":", "return", ":", "None" ]
python
train
privacee/freelan-configurator
freelan_configurator/freelan_cfg.py
https://github.com/privacee/freelan-configurator/blob/7c070f8958454792f870ef0d195a7f5da36edb5a/freelan_configurator/freelan_cfg.py#L192-L247
def validate(self): """Validation of configuration to check for required values""" if not self.server.enabled: if self.security.signature_certificate_file is self.security.defaults['signature_certificate_file']: print("ISSUE: If you are not configuring a server, you need to set 'signature_certificate_file'") if self.security.signature_private_key_file is self.security.defaults['signature_private_key_file']: print("ISSUE: If you are not configuring a server, you need to set 'signature_private_key_file'") else: if self.client.enabled: print("ISSUE: Client and server enabled at the same time?") if self.server.protocol is self.server.defaults['protocol']: if self.server.server_certificate_file is self.server.defaults['server_certificate_file'] or \ self.server.server_private_key_file is self.server.defaults['server_private_key_file']: print("ISSUE: 'server_certificate_file' and/or 'server_private_key_file' are not configured and will be auto-generated.") if self.server.certification_authority_certificate_file is self.server.defaults['certification_authority_certificate_file'] or \ self.server.certification_authority_private_key_file is self.server.defaults['certification_authority_private_key_file']: print("ISSUE: 'certification_authority_certificate_file' and/or 'certification_authority_private_key_file' are not configured and will be auto-generated - this is NOT recommended.") if self.server.authentication_script is self.server.defaults['authentication_script']: print("ISSUE: No 'authentication_script' has been provided and all authentication requests will be rejected!") if self.client.enabled: if self.client.server_endpoint is self.client.defaults['server_endpoint']: print("ISSUE: You are running in client mode, but you are using a default server address.") if not self.client.disable_peer_verification is self.client.defaults['disable_peer_verification'] or \ not self.client.disable_host_verification is self.client.defaults['disable_host_verification']: print("ISSUE: Disabling peer/host verification is NOT recommended - AT ALL.") if self.client.username is self.client.defaults['username'] or \ self.client.password is self.client.defaults['password']: print("ISSUE: No username and/or password has been configured for a client.") if self.fscp.contact is self.fscp.defaults['contact']: if not self.server.enabled and not self.client.enabled: print("ISSUE: You have not defined any contact points while you are neither running as server nor client.") ## hostname_resolution_protocol=ipv4/ipv6 ## ipv4_address_prefix_length=9.0.0.1/24 ## ipv6_address_prefix_length=2aa1::1/8 if self.security.authority_certificate_file is self.security.defaults['authority_certificate_file']: print("ISSUE: You need to set 'authority_certificate_file'") if self.tap_adapter.ipv4_address_prefix_length is self.tap_adapter.defaults['ipv4_address_prefix_length']: print("ISSUE: You are using the default network address - make sure you set a different ip for every machine 'ipv4_address_prefix_length'")
[ "def", "validate", "(", "self", ")", ":", "if", "not", "self", ".", "server", ".", "enabled", ":", "if", "self", ".", "security", ".", "signature_certificate_file", "is", "self", ".", "security", ".", "defaults", "[", "'signature_certificate_file'", "]", ":"...
Validation of configuration to check for required values
[ "Validation", "of", "configuration", "to", "check", "for", "required", "values" ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L1716-L1741
def delete_authorization(self, authorization_id): """Deletes the ``Authorization`` identified by the given ``Id``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` to delete raise: NotFound - an ``Authorization`` was not found identified by the given ``Id`` raise: NullArgument - ``authorization_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.delete_resource_template collection = JSONClientValidated('authorization', collection='Authorization', runtime=self._runtime) if not isinstance(authorization_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') authorization_map = collection.find_one( dict({'_id': ObjectId(authorization_id.get_identifier())}, **self._view_filter())) objects.Authorization(osid_object_map=authorization_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.delete_one({'_id': ObjectId(authorization_id.get_identifier())})
[ "def", "delete_authorization", "(", "self", ",", "authorization_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.delete_resource_template", "collection", "=", "JSONClientValidated", "(", "'authorization'", ",", "collection", "=", "'Authoriza...
Deletes the ``Authorization`` identified by the given ``Id``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` to delete raise: NotFound - an ``Authorization`` was not found identified by the given ``Id`` raise: NullArgument - ``authorization_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Deletes", "the", "Authorization", "identified", "by", "the", "given", "Id", "." ]
python
train
ray-project/ray
python/ray/experimental/streaming/communication.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/communication.py#L205-L233
def _flush(self, close=False): """Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. """ for channel in self.forward_channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_key_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.round_robin_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes()
[ "def", "_flush", "(", "self", ",", "close", "=", "False", ")", ":", "for", "channel", "in", "self", ".", "forward_channels", ":", "if", "close", "is", "True", ":", "channel", ".", "queue", ".", "put_next", "(", "None", ")", "channel", ".", "queue", "...
Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing.
[ "Flushes", "remaining", "output", "records", "in", "the", "output", "queues", "to", "plasma", "." ]
python
train
numenta/nupic
src/nupic/algorithms/fdrutilities.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1311-L1399
def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0): """ Computes the predictive ability of a temporal memory (TM). This routine returns a value which is the average number of time steps of prediction provided by the TM. It accepts as input the inputs, outputs, and resets provided to the TM as well as a 'minOverlapPct' used to evalulate whether or not a prediction is a good enough match to the actual input. The 'outputs' are the pooling outputs of the TM. This routine treats each output as a "manifold" that includes the active columns that should be present in the next N inputs. It then looks at each successive input and sees if it's active columns are within the manifold. For each output sample, it computes how many time steps it can go forward on the input before the input overlap with the manifold is less then 'minOverlapPct'. It returns the average number of time steps calculated for each output. Parameters: ----------------------------------------------- inputs: The inputs to the TM. Row 0 contains the inputs from time step 0, row 1 from time step 1, etc. resets: The reset input to the TM. Element 0 contains the reset from time step 0, element 1 from time step 1, etc. outputs: The pooling outputs from the TM. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. minOverlapPct: How much each input's columns must overlap with the pooling output's columns to be considered a valid prediction. retval: (Average number of time steps of prediction over all output samples, Average number of time steps of prediction when we aren't cut short by the end of the sequence, List containing frequency counts of each encountered prediction time) """ # List of how many times we encountered each prediction amount. Element 0 # is how many times we successfully predicted 0 steps in advance, element 1 # is how many times we predicted 1 step in advance, etc. predCounts = None # Total steps of prediction over all samples predTotal = 0 # Total number of samples nSamples = len(outputs) # Total steps of prediction for samples at the start of the sequence, or # for samples whose prediction runs aren't cut short by the end of the # sequence. predTotalNotLimited = 0 nSamplesNotLimited = 0 # Compute how many cells/column we have nCols = len(inputs[0]) nCellsPerCol = len(outputs[0]) // nCols # Evalulate prediction for each output sample for idx in xrange(nSamples): # What are the active columns for this output? activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=1) # How many steps of prediction do we have? steps = 0 while (idx+steps+1 < nSamples) and (resets[idx+steps+1] == 0): overlap = numpy.logical_and(inputs[idx+steps+1], activeCols) overlapPct = 100.0 * float(overlap.sum()) / inputs[idx+steps+1].sum() if overlapPct >= minOverlapPct: steps += 1 else: break # print "idx:", idx, "steps:", steps # Accumulate into our total predCounts = _accumulateFrequencyCounts([steps], predCounts) predTotal += steps # If this sample was not cut short by the end of the sequence, include # it into the "NotLimited" runs if resets[idx] or \ ((idx+steps+1 < nSamples) and (not resets[idx+steps+1])): predTotalNotLimited += steps nSamplesNotLimited += 1 # Return results return (float(predTotal) / nSamples, float(predTotalNotLimited) / nSamplesNotLimited, predCounts)
[ "def", "predictionExtent", "(", "inputs", ",", "resets", ",", "outputs", ",", "minOverlapPct", "=", "100.0", ")", ":", "# List of how many times we encountered each prediction amount. Element 0", "# is how many times we successfully predicted 0 steps in advance, element 1", "# is h...
Computes the predictive ability of a temporal memory (TM). This routine returns a value which is the average number of time steps of prediction provided by the TM. It accepts as input the inputs, outputs, and resets provided to the TM as well as a 'minOverlapPct' used to evalulate whether or not a prediction is a good enough match to the actual input. The 'outputs' are the pooling outputs of the TM. This routine treats each output as a "manifold" that includes the active columns that should be present in the next N inputs. It then looks at each successive input and sees if it's active columns are within the manifold. For each output sample, it computes how many time steps it can go forward on the input before the input overlap with the manifold is less then 'minOverlapPct'. It returns the average number of time steps calculated for each output. Parameters: ----------------------------------------------- inputs: The inputs to the TM. Row 0 contains the inputs from time step 0, row 1 from time step 1, etc. resets: The reset input to the TM. Element 0 contains the reset from time step 0, element 1 from time step 1, etc. outputs: The pooling outputs from the TM. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. minOverlapPct: How much each input's columns must overlap with the pooling output's columns to be considered a valid prediction. retval: (Average number of time steps of prediction over all output samples, Average number of time steps of prediction when we aren't cut short by the end of the sequence, List containing frequency counts of each encountered prediction time)
[ "Computes", "the", "predictive", "ability", "of", "a", "temporal", "memory", "(", "TM", ")", ".", "This", "routine", "returns", "a", "value", "which", "is", "the", "average", "number", "of", "time", "steps", "of", "prediction", "provided", "by", "the", "TM...
python
valid
polyaxon/polyaxon
polyaxon/scheduler/spawners/templates/project_jobs/pods.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/scheduler/spawners/templates/project_jobs/pods.py#L41-L90
def get_project_pod_spec(volume_mounts, volumes, image, command, args, ports, env_vars=None, env_from=None, container_name=None, resources=None, node_selector=None, affinity=None, tolerations=None, image_pull_policy=None, restart_policy=None, service_account_name=None): """Pod spec to be used to create pods for project: tensorboard, notebooks.""" volume_mounts = to_list(volume_mounts, check_none=True) volumes = to_list(volumes, check_none=True) gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources) volume_mounts += gpu_volume_mounts volumes += gpu_volumes ports = [client.V1ContainerPort(container_port=port) for port in ports] pod_container = get_pod_container( volume_mounts=volume_mounts, image=image, command=command, args=args, ports=ports, env_vars=env_vars, env_from=env_from, container_name=container_name, resources=resources, image_pull_policy=image_pull_policy) containers = [pod_container] if service_account_name and not conf.get('K8S_RBAC_ENABLED'): service_account_name = None return client.V1PodSpec(restart_policy=restart_policy, security_context=get_security_context(), service_account_name=service_account_name, containers=containers, volumes=volumes, node_selector=node_selector, affinity=affinity, tolerations=tolerations)
[ "def", "get_project_pod_spec", "(", "volume_mounts", ",", "volumes", ",", "image", ",", "command", ",", "args", ",", "ports", ",", "env_vars", "=", "None", ",", "env_from", "=", "None", ",", "container_name", "=", "None", ",", "resources", "=", "None", ","...
Pod spec to be used to create pods for project: tensorboard, notebooks.
[ "Pod", "spec", "to", "be", "used", "to", "create", "pods", "for", "project", ":", "tensorboard", "notebooks", "." ]
python
train
nsavch/python-xonotic-db
xon_db/cli.py
https://github.com/nsavch/python-xonotic-db/blob/339fe4c2c74880fd66712ae32789d7e9ae3e8f02/xon_db/cli.py#L65-L71
def remove_cts_record(file_name, map, position): """ Remove cts record on MAP and POSITION """ db = XonoticDB.load_path(file_name) db.remove_cts_record(map, position) db.save(file_name)
[ "def", "remove_cts_record", "(", "file_name", ",", "map", ",", "position", ")", ":", "db", "=", "XonoticDB", ".", "load_path", "(", "file_name", ")", "db", ".", "remove_cts_record", "(", "map", ",", "position", ")", "db", ".", "save", "(", "file_name", "...
Remove cts record on MAP and POSITION
[ "Remove", "cts", "record", "on", "MAP", "and", "POSITION" ]
python
train
matthieugouel/gibica
gibica/parser.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/parser.py#L102-L117
def parameters(self): """ parameters: '(' logical_or_expr (',' logical_or_expr)* ')' """ nodes = [] self._process(Nature.LPAREN) while self.token.nature != Nature.RPAREN: nodes.append(Parameters(variable=self.logical_or_expr())) if self.token.nature == Nature.COMMA: self._process(Nature.COMMA) self._process(Nature.RPAREN) return nodes
[ "def", "parameters", "(", "self", ")", ":", "nodes", "=", "[", "]", "self", ".", "_process", "(", "Nature", ".", "LPAREN", ")", "while", "self", ".", "token", ".", "nature", "!=", "Nature", ".", "RPAREN", ":", "nodes", ".", "append", "(", "Parameters...
parameters: '(' logical_or_expr (',' logical_or_expr)* ')'
[ "parameters", ":", "(", "logical_or_expr", "(", "logical_or_expr", ")", "*", ")" ]
python
train
ema/pycodicefiscale
codicefiscale.py
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L147-L176
def build(surname, name, birthday, sex, municipality): """``build(surname, name, birthday, sex, municipality) -> string`` Computes the fiscal code for the given person data. eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') -> RCCMNL83S18D969H """ # RCCMNL output = __surname_triplet(surname) + __name_triplet(name) # RCCMNL83 output += str(birthday.year)[2:] # RCCMNL83S output += MONTHSCODE[birthday.month - 1] # RCCMNL83S18 output += "%02d" % (sex.upper() == 'M' and birthday.day or 40 + birthday.day) # RCCMNL83S18D969 output += municipality # RCCMNL83S18D969H output += control_code(output) assert isvalid(output) return output
[ "def", "build", "(", "surname", ",", "name", ",", "birthday", ",", "sex", ",", "municipality", ")", ":", "# RCCMNL", "output", "=", "__surname_triplet", "(", "surname", ")", "+", "__name_triplet", "(", "name", ")", "# RCCMNL83", "output", "+=", "str", "(",...
``build(surname, name, birthday, sex, municipality) -> string`` Computes the fiscal code for the given person data. eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') -> RCCMNL83S18D969H
[ "build", "(", "surname", "name", "birthday", "sex", "municipality", ")", "-", ">", "string" ]
python
valid
softlayer/softlayer-python
SoftLayer/managers/hardware.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/hardware.py#L459-L523
def _generate_create_dict(self, size=None, hostname=None, domain=None, location=None, os=None, port_speed=None, ssh_keys=None, post_uri=None, hourly=True, no_public=False, extras=None): """Translates arguments into a dictionary for creating a server.""" extras = extras or [] package = self._get_package() location = _get_location(package, location) prices = [] for category in ['pri_ip_addresses', 'vpn_management', 'remote_management']: prices.append(_get_default_price_id(package['items'], option=category, hourly=hourly, location=location)) prices.append(_get_os_price_id(package['items'], os, location=location)) prices.append(_get_bandwidth_price_id(package['items'], hourly=hourly, no_public=no_public, location=location)) prices.append(_get_port_speed_price_id(package['items'], port_speed, no_public, location=location)) for extra in extras: prices.append(_get_extra_price_id(package['items'], extra, hourly, location=location)) hardware = { 'hostname': hostname, 'domain': domain, } order = { 'hardware': [hardware], 'location': location['keyname'], 'prices': [{'id': price} for price in prices], 'packageId': package['id'], 'presetId': _get_preset_id(package, size), 'useHourlyPricing': hourly, } if post_uri: order['provisionScripts'] = [post_uri] if ssh_keys: order['sshKeys'] = [{'sshKeyIds': ssh_keys}] return order
[ "def", "_generate_create_dict", "(", "self", ",", "size", "=", "None", ",", "hostname", "=", "None", ",", "domain", "=", "None", ",", "location", "=", "None", ",", "os", "=", "None", ",", "port_speed", "=", "None", ",", "ssh_keys", "=", "None", ",", ...
Translates arguments into a dictionary for creating a server.
[ "Translates", "arguments", "into", "a", "dictionary", "for", "creating", "a", "server", "." ]
python
train
awslabs/serverless-application-model
samtranslator/model/eventsources/cloudwatchlogs.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/eventsources/cloudwatchlogs.py#L18-L36
def to_cloudformation(self, **kwargs): """Returns the CloudWatch Logs Subscription Filter and Lambda Permission to which this CloudWatch Logs event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this push event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") source_arn = self.get_source_arn() permission = self._construct_permission(function, source_arn=source_arn) subscription_filter = self.get_subscription_filter(function, permission) resources = [permission, subscription_filter] return resources
[ "def", "to_cloudformation", "(", "self", ",", "*", "*", "kwargs", ")", ":", "function", "=", "kwargs", ".", "get", "(", "'function'", ")", "if", "not", "function", ":", "raise", "TypeError", "(", "\"Missing required keyword argument: function\"", ")", "source_ar...
Returns the CloudWatch Logs Subscription Filter and Lambda Permission to which this CloudWatch Logs event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this push event expands :rtype: list
[ "Returns", "the", "CloudWatch", "Logs", "Subscription", "Filter", "and", "Lambda", "Permission", "to", "which", "this", "CloudWatch", "Logs", "event", "source", "corresponds", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_span.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_span.py#L12-L22
def monitor_session_session_number(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number = ET.SubElement(session, "session-number") session_number.text = kwargs.pop('session_number') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "monitor_session_session_number", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "monitor", "=", "ET", ".", "SubElement", "(", "config", ",", "\"monitor\"", ",", "xmlns", "=", "\"urn:broc...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
OSSOS/MOP
src/ossos/core/ossos/astrom.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/astrom.py#L675-L687
def _original_frame(self, x, y): """ Return x/y in the original frame, based on a guess as much as anything. :param x: x pixel coordinate :type x: float :param y: y pixel coordinate :type y: float :return: x,y :rtype: float, float """ if self._inverted: return self.obs.naxis1 - x, self.obs.naxis2 - y return x, y
[ "def", "_original_frame", "(", "self", ",", "x", ",", "y", ")", ":", "if", "self", ".", "_inverted", ":", "return", "self", ".", "obs", ".", "naxis1", "-", "x", ",", "self", ".", "obs", ".", "naxis2", "-", "y", "return", "x", ",", "y" ]
Return x/y in the original frame, based on a guess as much as anything. :param x: x pixel coordinate :type x: float :param y: y pixel coordinate :type y: float :return: x,y :rtype: float, float
[ "Return", "x", "/", "y", "in", "the", "original", "frame", "based", "on", "a", "guess", "as", "much", "as", "anything", ".", ":", "param", "x", ":", "x", "pixel", "coordinate", ":", "type", "x", ":", "float", ":", "param", "y", ":", "y", "pixel", ...
python
train
kensho-technologies/graphql-compiler
graphql_compiler/query_formatting/match_formatting.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/match_formatting.py#L59-L75
def _safe_match_list(inner_type, argument_value): """Represent the list of "inner_type" objects in MATCH form.""" stripped_type = strip_non_null_from_type(inner_type) if isinstance(stripped_type, GraphQLList): raise GraphQLInvalidArgumentError(u'MATCH does not currently support nested lists, ' u'but inner type was {}: ' u'{}'.format(inner_type, argument_value)) if not isinstance(argument_value, list): raise GraphQLInvalidArgumentError(u'Attempting to represent a non-list as a list: ' u'{}'.format(argument_value)) components = ( _safe_match_argument(stripped_type, x) for x in argument_value ) return u'[' + u','.join(components) + u']'
[ "def", "_safe_match_list", "(", "inner_type", ",", "argument_value", ")", ":", "stripped_type", "=", "strip_non_null_from_type", "(", "inner_type", ")", "if", "isinstance", "(", "stripped_type", ",", "GraphQLList", ")", ":", "raise", "GraphQLInvalidArgumentError", "("...
Represent the list of "inner_type" objects in MATCH form.
[ "Represent", "the", "list", "of", "inner_type", "objects", "in", "MATCH", "form", "." ]
python
train
urinieto/msaf
msaf/algorithms/interface.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/interface.py#L102-L123
def _postprocess(self, est_idxs, est_labels): """Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.""" # Make sure we are using the previously input bounds, if any if self.in_bound_idxs is not None: F = self._preprocess() est_labels = U.synchronize_labels(self.in_bound_idxs, est_idxs, est_labels, F.shape[0]) est_idxs = self.in_bound_idxs # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \ "(%d) and number of labels(%d) don't match" % (len(est_idxs), len(est_labels)) # Make sure the indeces are integers est_idxs = np.asarray(est_idxs, dtype=int) return est_idxs, est_labels
[ "def", "_postprocess", "(", "self", ",", "est_idxs", ",", "est_labels", ")", ":", "# Make sure we are using the previously input bounds, if any", "if", "self", ".", "in_bound_idxs", "is", "not", "None", ":", "F", "=", "self", ".", "_preprocess", "(", ")", "est_lab...
Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.
[ "Post", "processes", "the", "estimations", "from", "the", "algorithm", "removing", "empty", "segments", "and", "making", "sure", "the", "lenghts", "of", "the", "boundaries", "and", "labels", "match", "." ]
python
test
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py#L674-L687
def _complete(self): """ Performs completion at the current cursor location. """ context = self._get_context() if context: # Send the completion request to the kernel msg_id = self.kernel_manager.shell_channel.complete( '.'.join(context), # text self._get_input_buffer_cursor_line(), # line self._get_input_buffer_cursor_column(), # cursor_pos self.input_buffer) # block pos = self._get_cursor().position() info = self._CompletionRequest(msg_id, pos) self._request_info['complete'] = info
[ "def", "_complete", "(", "self", ")", ":", "context", "=", "self", ".", "_get_context", "(", ")", "if", "context", ":", "# Send the completion request to the kernel", "msg_id", "=", "self", ".", "kernel_manager", ".", "shell_channel", ".", "complete", "(", "'.'"...
Performs completion at the current cursor location.
[ "Performs", "completion", "at", "the", "current", "cursor", "location", "." ]
python
test
Stranger6667/postmarker
postmarker/models/stats.py
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L26-L30
def spam(self, tag=None, fromdate=None, todate=None): """ Gets a total count of recipients who have marked your email as spam. """ return self.call("GET", "/stats/outbound/spam", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "spam", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/spam\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fr...
Gets a total count of recipients who have marked your email as spam.
[ "Gets", "a", "total", "count", "of", "recipients", "who", "have", "marked", "your", "email", "as", "spam", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/__init__.py#L201-L222
def _set_load_balancing(self, v, load=False): """ Setter method for load_balancing, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/load_balancing (container) If this variable is read-only (config: false) in the source YANG file, then _set_load_balancing is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_balancing() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=load_balancing.load_balancing, is_container='container', presence=False, yang_name="load-balancing", rest_name="load-balancing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """load_balancing must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=load_balancing.load_balancing, is_container='container', presence=False, yang_name="load-balancing", rest_name="load-balancing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)""", }) self.__load_balancing = t if hasattr(self, '_set'): self._set()
[ "def", "_set_load_balancing", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for load_balancing, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/load_balancing (container) If this variable is read-only (config: false) in the source YANG file, then _set_load_balancing is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_balancing() directly.
[ "Setter", "method", "for", "load_balancing", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "interface", "/", "ve", "/", "ipv6", "/", "ipv6_local_anycast_gateway", "/", "load_balancing", "(", "container", ")", "If", "this", "variable", "is", "read...
python
train
dw/mitogen
mitogen/compat/pkgutil.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/compat/pkgutil.py#L468-L481
def find_loader(fullname): """Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry. """ for importer in iter_importers(fullname): loader = importer.find_module(fullname) if loader is not None: return loader return None
[ "def", "find_loader", "(", "fullname", ")", ":", "for", "importer", "in", "iter_importers", "(", "fullname", ")", ":", "loader", "=", "importer", ".", "find_module", "(", "fullname", ")", "if", "loader", "is", "not", "None", ":", "return", "loader", "retur...
Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry.
[ "Find", "a", "PEP", "302", "loader", "object", "for", "fullname" ]
python
train