repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
apple/turicreate
src/unity/python/turicreate/extensions.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/extensions.py#L654-L734
def _build_native_function_call(fn): """ If fn can be interpreted and handled as a native function: i.e. fn is one of the extensions, or fn is a simple lambda closure using one of the extensions. fn = tc.extensions.add fn = lambda x: tc.extensions.add(5) Then, this returns a closure object, which describes the function call which can then be passed to C++. Returns a _Closure object on success, raises an exception on failure. """ # See if fn is the native function itself native_function_name = _get_toolkit_function_name_from_function(fn) if native_function_name != "": # yup! # generate an "identity" argument list argnames = _get_argument_list_from_toolkit_function_name(native_function_name) arglist = [[0, i] for i in range(len(argnames))] return _Closure(native_function_name, arglist) # ok. its not a native function from .util.lambda_closure_capture import translate from .util.lambda_closure_capture import Parameter # Lets see if it is a simple lambda capture = translate(fn) # ok. build up the closure arguments # Try to pick up the lambda function = _descend_namespace(capture.caller_globals, capture.closure_fn_name) native_function_name = _get_toolkit_function_name_from_function(function) if native_function_name == "": raise RuntimeError("Lambda does not contain a native function") argnames = _get_argument_list_from_toolkit_function_name(native_function_name) # ok. build up the argument list. this is mildly annoying due to the mix of # positional and named arguments # make an argument list with a placeholder for everything first arglist = [[-1, i] for i in argnames] # loop through the positional arguments for i in range(len(capture.positional_args)): arg = capture.positional_args[i] if type(arg) is Parameter: # This is a lambda argument # arg.name is the actual string of the argument # here we need the index arglist[i] = [0, capture.input_arg_names.index(arg.name)] else: # this is a captured value arglist[i] = [1, arg] # now. the named arguments are somewhat annoying for i in capture.named_args: arg = capture.named_args[i] if type(arg) is Parameter: # This is a lambda argument # arg.name is the actual string of the argument # here we need the index arglist[argnames.index(i)] = [0, capture.input_arg_names.index(arg.name)] else: # this is a captured value arglist[argnames.index(i)] = [1, arg] # done. Make sure all arguments are filled for i in arglist: if i[0] == -1: raise RuntimeError("Incomplete function specification") # attempt to recursively break down any other functions import inspect for i in range(len(arglist)): if arglist[i][0] == 1 and inspect.isfunction(arglist[i][1]): try: arglist[i][1] = _build_native_function_call(arglist[i][1]) except: pass return _Closure(native_function_name, arglist)
[ "def", "_build_native_function_call", "(", "fn", ")", ":", "# See if fn is the native function itself", "native_function_name", "=", "_get_toolkit_function_name_from_function", "(", "fn", ")", "if", "native_function_name", "!=", "\"\"", ":", "# yup!", "# generate an \"identity\...
If fn can be interpreted and handled as a native function: i.e. fn is one of the extensions, or fn is a simple lambda closure using one of the extensions. fn = tc.extensions.add fn = lambda x: tc.extensions.add(5) Then, this returns a closure object, which describes the function call which can then be passed to C++. Returns a _Closure object on success, raises an exception on failure.
[ "If", "fn", "can", "be", "interpreted", "and", "handled", "as", "a", "native", "function", ":", "i", ".", "e", ".", "fn", "is", "one", "of", "the", "extensions", "or", "fn", "is", "a", "simple", "lambda", "closure", "using", "one", "of", "the", "exte...
python
train
inveniosoftware/invenio-webhooks
invenio_webhooks/models.py
https://github.com/inveniosoftware/invenio-webhooks/blob/f407cb2245464543ee474a81189fb9d3978bdde5/invenio_webhooks/models.py#L181-L189
def status(self, event): """Return a tuple with current processing status code and message.""" result = AsyncResult(str(event.id)) return ( self.CELERY_STATES_TO_HTTP.get(result.state), result.info.get('message') if result.state in self.CELERY_RESULT_INFO_FOR and result.info else event.response.get('message') )
[ "def", "status", "(", "self", ",", "event", ")", ":", "result", "=", "AsyncResult", "(", "str", "(", "event", ".", "id", ")", ")", "return", "(", "self", ".", "CELERY_STATES_TO_HTTP", ".", "get", "(", "result", ".", "state", ")", ",", "result", ".", ...
Return a tuple with current processing status code and message.
[ "Return", "a", "tuple", "with", "current", "processing", "status", "code", "and", "message", "." ]
python
train
uchicago-cs/deepdish
deepdish/image.py
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L176-L194
def save(path, im): """ Saves an image to file. If the image is type float, it will assume to have values in [0, 1]. Parameters ---------- path : str Path to which the image will be saved. im : ndarray (image) Image. """ from PIL import Image if im.dtype == np.uint8: pil_im = Image.fromarray(im) else: pil_im = Image.fromarray((im*255).astype(np.uint8)) pil_im.save(path)
[ "def", "save", "(", "path", ",", "im", ")", ":", "from", "PIL", "import", "Image", "if", "im", ".", "dtype", "==", "np", ".", "uint8", ":", "pil_im", "=", "Image", ".", "fromarray", "(", "im", ")", "else", ":", "pil_im", "=", "Image", ".", "froma...
Saves an image to file. If the image is type float, it will assume to have values in [0, 1]. Parameters ---------- path : str Path to which the image will be saved. im : ndarray (image) Image.
[ "Saves", "an", "image", "to", "file", "." ]
python
train
ckoepp/TwitterSearch
TwitterSearch/TwitterSearch.py
https://github.com/ckoepp/TwitterSearch/blob/627b9f519d49faf6b83859717f9082b3b2622aaf/TwitterSearch/TwitterSearch.py#L309-L325
def search_next_results(self): """ Triggers the search for more results using the Twitter API. \ Raises exception if no further results can be found. \ See `Advanced usage <advanced_usage.html>`_ for example :returns: ``True`` if there are more results available \ within the Twitter Search API :raises: TwitterSearchException """ if not self.__next_max_id: raise TwitterSearchException(1011) self.send_search( "%s&max_id=%i" % (self._start_url, self.__next_max_id) ) return True
[ "def", "search_next_results", "(", "self", ")", ":", "if", "not", "self", ".", "__next_max_id", ":", "raise", "TwitterSearchException", "(", "1011", ")", "self", ".", "send_search", "(", "\"%s&max_id=%i\"", "%", "(", "self", ".", "_start_url", ",", "self", "...
Triggers the search for more results using the Twitter API. \ Raises exception if no further results can be found. \ See `Advanced usage <advanced_usage.html>`_ for example :returns: ``True`` if there are more results available \ within the Twitter Search API :raises: TwitterSearchException
[ "Triggers", "the", "search", "for", "more", "results", "using", "the", "Twitter", "API", ".", "\\", "Raises", "exception", "if", "no", "further", "results", "can", "be", "found", ".", "\\", "See", "Advanced", "usage", "<advanced_usage", ".", "html", ">", "...
python
train
portantier/habu
habu/cli/cmd_ping.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_ping.py#L20-L74
def cmd_ping(ip, interface, count, timeout, wait, verbose): """The classic ping tool that send ICMP echo requests. \b # habu.ping 8.8.8.8 IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding """ if interface: conf.iface = interface conf.verb = False conf.L3socket=L3RawSocket layer3 = IP() layer3.dst = ip layer3.tos = 0 layer3.id = 1 layer3.flags = 0 layer3.frag = 0 layer3.ttl = 64 layer3.proto = 1 # icmp layer4 = ICMP() layer4.type = 8 # echo-request layer4.code = 0 layer4.id = 0 layer4.seq = 0 pkt = layer3 / layer4 counter = 0 while True: ans = sr1(pkt, timeout=timeout) if ans: if verbose: ans.show() else: print(ans.summary()) del(ans) else: print('Timeout') counter += 1 if count != 0 and counter == count: break sleep(wait) return True
[ "def", "cmd_ping", "(", "ip", ",", "interface", ",", "count", ",", "timeout", ",", "wait", ",", "verbose", ")", ":", "if", "interface", ":", "conf", ".", "iface", "=", "interface", "conf", ".", "verb", "=", "False", "conf", ".", "L3socket", "=", "L3R...
The classic ping tool that send ICMP echo requests. \b # habu.ping 8.8.8.8 IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding IP / ICMP 8.8.8.8 > 192.168.0.5 echo-reply 0 / Padding
[ "The", "classic", "ping", "tool", "that", "send", "ICMP", "echo", "requests", "." ]
python
train
ampl/amplpy
amplpy/ampl.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L326-L335
def isBusy(self): """ Returns true if the underlying engine is doing an async operation. """ # return self._impl.isBusy() if self._lock.acquire(False): self._lock.release() return False else: return True
[ "def", "isBusy", "(", "self", ")", ":", "# return self._impl.isBusy()", "if", "self", ".", "_lock", ".", "acquire", "(", "False", ")", ":", "self", ".", "_lock", ".", "release", "(", ")", "return", "False", "else", ":", "return", "True" ]
Returns true if the underlying engine is doing an async operation.
[ "Returns", "true", "if", "the", "underlying", "engine", "is", "doing", "an", "async", "operation", "." ]
python
train
wolverdude/GenSON
genson/schema/builder.py
https://github.com/wolverdude/GenSON/blob/76552d23cf9202e8e7c262cb018eb3cb3df686b9/genson/schema/builder.py#L33-L56
def add_schema(self, schema): """ Merge in a JSON schema. This can be a ``dict`` or another ``SchemaBuilder`` :param schema: a JSON Schema .. note:: There is no schema validation. If you pass in a bad schema, you might get back a bad schema. """ if isinstance(schema, SchemaBuilder): schema_uri = schema.schema_uri schema = schema.to_schema() if schema_uri is None: del schema['$schema'] elif isinstance(schema, SchemaNode): schema = schema.to_schema() if '$schema' in schema: self.schema_uri = self.schema_uri or schema['$schema'] schema = dict(schema) del schema['$schema'] self._root_node.add_schema(schema)
[ "def", "add_schema", "(", "self", ",", "schema", ")", ":", "if", "isinstance", "(", "schema", ",", "SchemaBuilder", ")", ":", "schema_uri", "=", "schema", ".", "schema_uri", "schema", "=", "schema", ".", "to_schema", "(", ")", "if", "schema_uri", "is", "...
Merge in a JSON schema. This can be a ``dict`` or another ``SchemaBuilder`` :param schema: a JSON Schema .. note:: There is no schema validation. If you pass in a bad schema, you might get back a bad schema.
[ "Merge", "in", "a", "JSON", "schema", ".", "This", "can", "be", "a", "dict", "or", "another", "SchemaBuilder" ]
python
train
AndrewAnnex/SpiceyPy
getspice.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/getspice.py#L153-L180
def _distribution_info(self): """Creates the distribution name and the expected extension for the CSPICE package and returns it. :return (distribution, extension) tuple where distribution is the best guess from the strings available within the platform_urls list of strings, and extension is either "zip" or "tar.Z" depending on whether we are dealing with a Windows platform or else. :rtype: tuple (str, str) :raises: KeyError if the (system, machine) tuple does not correspond to any of the supported SpiceyPy environments. """ print('Gathering information...') system = platform.system() # Cygwin system is CYGWIN-NT-xxx. system = 'cygwin' if 'CYGWIN' in system else system processor = platform.processor() machine = '64bit' if sys.maxsize > 2 ** 32 else '32bit' print('SYSTEM: ', system) print('PROCESSOR:', processor) print('MACHINE: ', machine) return self._dists[(system, machine)]
[ "def", "_distribution_info", "(", "self", ")", ":", "print", "(", "'Gathering information...'", ")", "system", "=", "platform", ".", "system", "(", ")", "# Cygwin system is CYGWIN-NT-xxx.", "system", "=", "'cygwin'", "if", "'CYGWIN'", "in", "system", "else", "syst...
Creates the distribution name and the expected extension for the CSPICE package and returns it. :return (distribution, extension) tuple where distribution is the best guess from the strings available within the platform_urls list of strings, and extension is either "zip" or "tar.Z" depending on whether we are dealing with a Windows platform or else. :rtype: tuple (str, str) :raises: KeyError if the (system, machine) tuple does not correspond to any of the supported SpiceyPy environments.
[ "Creates", "the", "distribution", "name", "and", "the", "expected", "extension", "for", "the", "CSPICE", "package", "and", "returns", "it", "." ]
python
train
aestrivex/bctpy
bct/algorithms/motifs.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/motifs.py#L744-L843
def motif4struct_wei(W): ''' Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Motif intensity and coherence are weighted generalizations of motif frequency. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix (all weights between 0 and 1) Returns ------- I : 199xN np.ndarray motif intensity matrix Q : 199xN np.ndarray motif coherence matrix F : 199xN np.ndarray motif frequency matrix Notes ----- Average intensity and coherence are given by I./F and Q./F. ''' from scipy import io import os fname = os.path.join(os.path.dirname(__file__), motiflib) mot = io.loadmat(fname) m4 = mot['m4'] m4n = mot['m4n'] id4 = mot['id4'].squeeze() n4 = mot['n4'].squeeze() n = len(W) I = np.zeros((199, n)) # intensity Q = np.zeros((199, n)) # coherence F = np.zeros((199, n)) # frequency A = binarize(W, copy=True) # ensure A is binary As = np.logical_or(A, A.T) # symmetrized adjmat for u in range(n - 3): # v1: neighbors of u (>u) V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1]) for v1 in np.where(V1)[0]: V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1]) V2[V1] = 0 # not already in V1 # and all neighbors of u (>v1) V2 = np.logical_or( np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2) for v2 in np.where(V2)[0]: vz = np.max((v1, v2)) # vz: largest rank node # v3: all neighbors of v2 (>u) V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1]) V3[V2] = 0 # not already in V1 and V2 # and all neighbors of v1 (>v2) V3 = np.logical_or( np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3) V3[V1] = 0 # not already in V1 # and all neighbors of u (>vz) V3 = np.logical_or( np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3) for v3 in np.where(V3)[0]: a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1], A[v3, v1], A[u, v2], A[v1, v2], A[ v3, v2], A[u, v3], A[v1, v3], A[v2, v3])) s = np.uint64( np.sum(np.power(10, np.arange(11, -1, -1)) * a)) # print np.shape(s),np.shape(m4n) ix = np.squeeze(s == m4n) w = np.array((W[v1, u], W[v2, u], W[v3, u], W[u, v1], W[v2, v1], W[v3, v1], W[u, v2], W[v1, v2], W[ v3, v2], W[u, v3], W[v1, v3], W[v2, v3])) M = w * m4[ix, :] id = id4[ix] - 1 l = n4[ix] x = np.sum(M, axis=1) / l # arithmetic mean M[M == 0] = 1 # enable geometric mean i = np.prod(M, axis=1)**(1 / l) # intensity q = i / x # coherence # then add to cumulative count I[id, u] += i I[id, v1] += i I[id, v2] += i I[id, v3] += i Q[id, u] += q Q[id, v1] += q Q[id, v2] += q Q[id, v3] += q F[id, u] += 1 F[id, v1] += 1 F[id, v2] += 1 F[id, v3] += 1 return I, Q, F
[ "def", "motif4struct_wei", "(", "W", ")", ":", "from", "scipy", "import", "io", "import", "os", "fname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "motiflib", ")", "mot", "=", "io", ".",...
Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Motif intensity and coherence are weighted generalizations of motif frequency. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix (all weights between 0 and 1) Returns ------- I : 199xN np.ndarray motif intensity matrix Q : 199xN np.ndarray motif coherence matrix F : 199xN np.ndarray motif frequency matrix Notes ----- Average intensity and coherence are given by I./F and Q./F.
[ "Structural", "motifs", "are", "patterns", "of", "local", "connectivity", ".", "Motif", "frequency", "is", "the", "frequency", "of", "occurrence", "of", "motifs", "around", "a", "node", ".", "Motif", "intensity", "and", "coherence", "are", "weighted", "generaliz...
python
train
django-extensions/django-extensions
django_extensions/compat.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/compat.py#L33-L40
def get_template_setting(template_key, default=None): """ Read template settings """ templates_var = getattr(settings, 'TEMPLATES', None) if templates_var: for tdict in templates_var: if template_key in tdict: return tdict[template_key] return default
[ "def", "get_template_setting", "(", "template_key", ",", "default", "=", "None", ")", ":", "templates_var", "=", "getattr", "(", "settings", ",", "'TEMPLATES'", ",", "None", ")", "if", "templates_var", ":", "for", "tdict", "in", "templates_var", ":", "if", "...
Read template settings
[ "Read", "template", "settings" ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L625-L631
def update(self, name=None, password=None, host=None): """ Allows you to change one or more of the user's username, password, or host. """ return self.manager.update(self, name=name, password=password, host=host)
[ "def", "update", "(", "self", ",", "name", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ")", ":", "return", "self", ".", "manager", ".", "update", "(", "self", ",", "name", "=", "name", ",", "password", "=", "password", ",",...
Allows you to change one or more of the user's username, password, or host.
[ "Allows", "you", "to", "change", "one", "or", "more", "of", "the", "user", "s", "username", "password", "or", "host", "." ]
python
train
kristianfoerster/melodist
melodist/station.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L340-L383
def disaggregate_radiation(self, method='pot_rad', pot_rad=None): """ Disaggregate solar radiation. Parameters ---------- method : str, optional Disaggregation method. ``pot_rad`` Calculates potential clear-sky hourly radiation and scales it according to the mean daily radiation. (Default) ``pot_rad_via_ssd`` Calculates potential clear-sky hourly radiation and scales it according to the observed daily sunshine duration. ``pot_rad_via_bc`` Calculates potential clear-sky hourly radiation and scales it according to daily minimum and maximum temperature. ``mean_course`` Hourly radiation follows an observed average course (calculated for each month). pot_rad : Series, optional Hourly values of potential solar radiation. If ``None``, calculated internally. """ if self.sun_times is None: self.calc_sun_times() if pot_rad is None and method != 'mean_course': pot_rad = melodist.potential_radiation(self.data_disagg.index, self.lon, self.lat, self.timezone) self.data_disagg.glob = melodist.disaggregate_radiation( self.data_daily, sun_times=self.sun_times, pot_rad=pot_rad, method=method, angstr_a=self.statistics.glob.angstroem.a, angstr_b=self.statistics.glob.angstroem.b, bristcamp_a=self.statistics.glob.bristcamp.a, bristcamp_c=self.statistics.glob.bristcamp.c, mean_course=self.statistics.glob.mean_course )
[ "def", "disaggregate_radiation", "(", "self", ",", "method", "=", "'pot_rad'", ",", "pot_rad", "=", "None", ")", ":", "if", "self", ".", "sun_times", "is", "None", ":", "self", ".", "calc_sun_times", "(", ")", "if", "pot_rad", "is", "None", "and", "metho...
Disaggregate solar radiation. Parameters ---------- method : str, optional Disaggregation method. ``pot_rad`` Calculates potential clear-sky hourly radiation and scales it according to the mean daily radiation. (Default) ``pot_rad_via_ssd`` Calculates potential clear-sky hourly radiation and scales it according to the observed daily sunshine duration. ``pot_rad_via_bc`` Calculates potential clear-sky hourly radiation and scales it according to daily minimum and maximum temperature. ``mean_course`` Hourly radiation follows an observed average course (calculated for each month). pot_rad : Series, optional Hourly values of potential solar radiation. If ``None``, calculated internally.
[ "Disaggregate", "solar", "radiation", "." ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/anoncreds.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/anoncreds.py#L127-L208
async def issuer_create_and_store_revoc_reg(wallet_handle: int, issuer_did: str, revoc_def_type: Optional[str], tag: str, cred_def_id: str, config_json: str, tails_writer_handle: int) -> (str, str, str): """ Create a new revocation registry for the given credential definition as tuple of entities: - Revocation registry definition that encapsulates credentials definition reference, revocation type specific configuration and secrets used for credentials revocation - Revocation registry state that stores the information about revoked entities in a non-disclosing way. The state can be represented as ordered list of revocation registry entries were each entry represents the list of revocation or issuance operations. Revocation registry definition entity contains private and public parts. Private part will be stored in the wallet. Public part will be returned as json intended to be shared with all anoncreds workflow actors usually by publishing REVOC_REG_DEF transaction to Indy distributed ledger. Revocation registry state is stored on the wallet and also intended to be shared as the ordered list of REVOC_REG_ENTRY transactions. This call initializes the state in the wallet and returns the initial entry. Some revocation registry types (for example, 'CL_ACCUM') can require generation of binary blob called tails used to hide information about revoked credentials in public revocation registry and intended to be distributed out of leger (REVOC_REG_DEF transaction will still contain uri and hash of tails). This call requires access to pre-configured blob storage writer instance handle that will allow to write generated tails. :param wallet_handle: wallet handler (created by open_wallet). :param issuer_did: a DID of the issuer signing transaction to the Ledger :param revoc_def_type: revocation registry type (optional, default value depends on credential definition type). Supported types are: - 'CL_ACCUM': Type-3 pairing based accumulator. Default for 'CL' credential definition type :param tag: allows to distinct between revocation registries for the same issuer and credential definition :param cred_def_id: id of stored in ledger credential definition :param config_json: type-specific configuration of revocation registry as json: - 'CL_ACCUM': "issuance_type": (optional) type of issuance. Currently supported: 1) ISSUANCE_BY_DEFAULT: all indices are assumed to be issued and initial accumulator is calculated over all indices; Revocation Registry is updated only during revocation. 2) ISSUANCE_ON_DEMAND: nothing is issued initially accumulator is 1 (used by default); "max_cred_num": maximum number of credentials the new registry can process (optional, default 100000) } :param tails_writer_handle: :return: revoc_reg_id: identifier of created revocation registry definition revoc_reg_def_json: public part of revocation registry definition revoc_reg_entry_json: revocation registry entry that defines initial state of revocation registry """ logger = logging.getLogger(__name__) logger.debug("issuer_create_and_store_revoc_reg: >>> wallet_handle: %r, issuer_did: %r, revoc_def_type: %r," " tag: %r, cred_def_id: %r, config_json: %r, tails_writer_handle: %r", wallet_handle, issuer_did, revoc_def_type, tag, cred_def_id, config_json, tails_writer_handle) if not hasattr(issuer_create_and_store_revoc_reg, "cb"): logger.debug("issuer_create_and_store_revoc_reg: Creating callback") issuer_create_and_store_revoc_reg.cb = create_cb( CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_issuer_did = c_char_p(issuer_did.encode('utf-8')) c_revoc_def_type = c_char_p(revoc_def_type.encode('utf-8')) if revoc_def_type is not None else None c_tag = c_char_p(tag.encode('utf-8')) c_cred_def_id = c_char_p(cred_def_id.encode('utf-8')) c_config_json = c_char_p(config_json.encode('utf-8')) c_tails_writer_handle = c_int32(tails_writer_handle) (rev_reg_id, rev_reg_def_json, rev_reg_entry_json) = await do_call('indy_issuer_create_and_store_revoc_reg', c_wallet_handle, c_issuer_did, c_revoc_def_type, c_tag, c_cred_def_id, c_config_json, c_tails_writer_handle, issuer_create_and_store_revoc_reg.cb) res = (rev_reg_id.decode(), rev_reg_def_json.decode(), rev_reg_entry_json.decode()) logger.debug("issuer_create_and_store_revoc_reg: <<< res: %r", res) return res
[ "async", "def", "issuer_create_and_store_revoc_reg", "(", "wallet_handle", ":", "int", ",", "issuer_did", ":", "str", ",", "revoc_def_type", ":", "Optional", "[", "str", "]", ",", "tag", ":", "str", ",", "cred_def_id", ":", "str", ",", "config_json", ":", "s...
Create a new revocation registry for the given credential definition as tuple of entities: - Revocation registry definition that encapsulates credentials definition reference, revocation type specific configuration and secrets used for credentials revocation - Revocation registry state that stores the information about revoked entities in a non-disclosing way. The state can be represented as ordered list of revocation registry entries were each entry represents the list of revocation or issuance operations. Revocation registry definition entity contains private and public parts. Private part will be stored in the wallet. Public part will be returned as json intended to be shared with all anoncreds workflow actors usually by publishing REVOC_REG_DEF transaction to Indy distributed ledger. Revocation registry state is stored on the wallet and also intended to be shared as the ordered list of REVOC_REG_ENTRY transactions. This call initializes the state in the wallet and returns the initial entry. Some revocation registry types (for example, 'CL_ACCUM') can require generation of binary blob called tails used to hide information about revoked credentials in public revocation registry and intended to be distributed out of leger (REVOC_REG_DEF transaction will still contain uri and hash of tails). This call requires access to pre-configured blob storage writer instance handle that will allow to write generated tails. :param wallet_handle: wallet handler (created by open_wallet). :param issuer_did: a DID of the issuer signing transaction to the Ledger :param revoc_def_type: revocation registry type (optional, default value depends on credential definition type). Supported types are: - 'CL_ACCUM': Type-3 pairing based accumulator. Default for 'CL' credential definition type :param tag: allows to distinct between revocation registries for the same issuer and credential definition :param cred_def_id: id of stored in ledger credential definition :param config_json: type-specific configuration of revocation registry as json: - 'CL_ACCUM': "issuance_type": (optional) type of issuance. Currently supported: 1) ISSUANCE_BY_DEFAULT: all indices are assumed to be issued and initial accumulator is calculated over all indices; Revocation Registry is updated only during revocation. 2) ISSUANCE_ON_DEMAND: nothing is issued initially accumulator is 1 (used by default); "max_cred_num": maximum number of credentials the new registry can process (optional, default 100000) } :param tails_writer_handle: :return: revoc_reg_id: identifier of created revocation registry definition revoc_reg_def_json: public part of revocation registry definition revoc_reg_entry_json: revocation registry entry that defines initial state of revocation registry
[ "Create", "a", "new", "revocation", "registry", "for", "the", "given", "credential", "definition", "as", "tuple", "of", "entities", ":", "-", "Revocation", "registry", "definition", "that", "encapsulates", "credentials", "definition", "reference", "revocation", "typ...
python
train
sdispater/pendulum
pendulum/tz/zoneinfo/reader.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/tz/zoneinfo/reader.py#L35-L46
def read_for(self, timezone): # type: (str) -> Timezone """ Read the zoneinfo structure for a given timezone name. :param timezone: The timezone. """ try: file_path = pytzdata.tz_path(timezone) except TimezoneNotFound: raise InvalidTimezone(timezone) return self.read(file_path)
[ "def", "read_for", "(", "self", ",", "timezone", ")", ":", "# type: (str) -> Timezone", "try", ":", "file_path", "=", "pytzdata", ".", "tz_path", "(", "timezone", ")", "except", "TimezoneNotFound", ":", "raise", "InvalidTimezone", "(", "timezone", ")", "return",...
Read the zoneinfo structure for a given timezone name. :param timezone: The timezone.
[ "Read", "the", "zoneinfo", "structure", "for", "a", "given", "timezone", "name", "." ]
python
train
odlgroup/odl
odl/util/utility.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L456-L511
def complex_dtype(dtype, default=None): """Return complex counterpart of ``dtype`` if existing, else ``default``. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no complex counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- complex_dtype : `numpy.dtype` The complex counterpart of ``dtype``. Raises ------ ValueError if there is no complex counterpart to the given data type and ``default == None``. Examples -------- Convert scalar dtypes: >>> complex_dtype(float) dtype('complex128') >>> complex_dtype('float32') dtype('complex64') >>> complex_dtype(complex) dtype('complex128') Dtypes with shape are also supported: >>> complex_dtype(np.dtype((float, (3,)))) dtype(('<c16', (3,))) >>> complex_dtype(('float32', (3,))) dtype(('<c8', (3,))) """ dtype, dtype_in = np.dtype(dtype), dtype if is_complex_floating_dtype(dtype): return dtype try: complex_base_dtype = TYPE_MAP_R2C[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no complex counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((complex_base_dtype, dtype.shape))
[ "def", "complex_dtype", "(", "dtype", ",", "default", "=", "None", ")", ":", "dtype", ",", "dtype_in", "=", "np", ".", "dtype", "(", "dtype", ")", ",", "dtype", "if", "is_complex_floating_dtype", "(", "dtype", ")", ":", "return", "dtype", "try", ":", "...
Return complex counterpart of ``dtype`` if existing, else ``default``. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no complex counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- complex_dtype : `numpy.dtype` The complex counterpart of ``dtype``. Raises ------ ValueError if there is no complex counterpart to the given data type and ``default == None``. Examples -------- Convert scalar dtypes: >>> complex_dtype(float) dtype('complex128') >>> complex_dtype('float32') dtype('complex64') >>> complex_dtype(complex) dtype('complex128') Dtypes with shape are also supported: >>> complex_dtype(np.dtype((float, (3,)))) dtype(('<c16', (3,))) >>> complex_dtype(('float32', (3,))) dtype(('<c8', (3,)))
[ "Return", "complex", "counterpart", "of", "dtype", "if", "existing", "else", "default", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/jira.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/jira.py#L336-L344
def get_comments(self, issue_id): """Retrieve all the comments of a given issue. :param issue_id: ID of the issue """ url = urijoin(self.base_url, self.RESOURCE, self.VERSION_API, self.ISSUE, issue_id, self.COMMENT) comments = self.get_items(DEFAULT_DATETIME, url, expand_fields=False) return comments
[ "def", "get_comments", "(", "self", ",", "issue_id", ")", ":", "url", "=", "urijoin", "(", "self", ".", "base_url", ",", "self", ".", "RESOURCE", ",", "self", ".", "VERSION_API", ",", "self", ".", "ISSUE", ",", "issue_id", ",", "self", ".", "COMMENT", ...
Retrieve all the comments of a given issue. :param issue_id: ID of the issue
[ "Retrieve", "all", "the", "comments", "of", "a", "given", "issue", "." ]
python
test
priestc/giotto
giotto/contrib/auth/models.py
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/contrib/auth/models.py#L10-L18
def basic_register(username, password, password2): """ Register a user and session, and then return the session_key and user. """ if password != password2: raise InvalidInput(password={'message': "Passwords do not match"}, username={'value': username}) user = User.objects.create_user(username, password) return create_session(user.username, password)
[ "def", "basic_register", "(", "username", ",", "password", ",", "password2", ")", ":", "if", "password", "!=", "password2", ":", "raise", "InvalidInput", "(", "password", "=", "{", "'message'", ":", "\"Passwords do not match\"", "}", ",", "username", "=", "{",...
Register a user and session, and then return the session_key and user.
[ "Register", "a", "user", "and", "session", "and", "then", "return", "the", "session_key", "and", "user", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/builtin_trap.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/builtin_trap.py#L91-L96
def remove_builtin(self, key, orig): """Remove an added builtin and re-set the original.""" if orig is BuiltinUndefined: del __builtin__.__dict__[key] else: __builtin__.__dict__[key] = orig
[ "def", "remove_builtin", "(", "self", ",", "key", ",", "orig", ")", ":", "if", "orig", "is", "BuiltinUndefined", ":", "del", "__builtin__", ".", "__dict__", "[", "key", "]", "else", ":", "__builtin__", ".", "__dict__", "[", "key", "]", "=", "orig" ]
Remove an added builtin and re-set the original.
[ "Remove", "an", "added", "builtin", "and", "re", "-", "set", "the", "original", "." ]
python
test
swistakm/graceful
src/graceful/serializers.py
https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/serializers.py#L325-L341
def set_attribute(self, obj, attr, value): """Set value of attribute in given object instance. Reason for existence of this method is the fact that 'attribute' can be also a object's key if it is a dict or any other kind of mapping. Args: obj (object): object instance to modify attr (str): attribute (or key) to change value: value to set """ # if this is any mutable mapping then instead of attributes use keys if isinstance(obj, MutableMapping): obj[attr] = value else: setattr(obj, attr, value)
[ "def", "set_attribute", "(", "self", ",", "obj", ",", "attr", ",", "value", ")", ":", "# if this is any mutable mapping then instead of attributes use keys", "if", "isinstance", "(", "obj", ",", "MutableMapping", ")", ":", "obj", "[", "attr", "]", "=", "value", ...
Set value of attribute in given object instance. Reason for existence of this method is the fact that 'attribute' can be also a object's key if it is a dict or any other kind of mapping. Args: obj (object): object instance to modify attr (str): attribute (or key) to change value: value to set
[ "Set", "value", "of", "attribute", "in", "given", "object", "instance", "." ]
python
train
pycontribs/jira
jira/resources.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/resources.py#L621-L641
def update(self, object, globalId=None, application=None, relationship=None): """Update a RemoteLink. 'object' is required. For definitions of the allowable fields for 'object' and the keyword arguments 'globalId', 'application' and 'relationship', see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links. :param object: the link details to add (see the above link for details) :param globalId: unique ID for the link (see the above link for details) :param application: application information for the link (see the above link for details) :param relationship: relationship description for the link (see the above link for details) """ data = { 'object': object} if globalId is not None: data['globalId'] = globalId if application is not None: data['application'] = application if relationship is not None: data['relationship'] = relationship super(RemoteLink, self).update(**data)
[ "def", "update", "(", "self", ",", "object", ",", "globalId", "=", "None", ",", "application", "=", "None", ",", "relationship", "=", "None", ")", ":", "data", "=", "{", "'object'", ":", "object", "}", "if", "globalId", "is", "not", "None", ":", "dat...
Update a RemoteLink. 'object' is required. For definitions of the allowable fields for 'object' and the keyword arguments 'globalId', 'application' and 'relationship', see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links. :param object: the link details to add (see the above link for details) :param globalId: unique ID for the link (see the above link for details) :param application: application information for the link (see the above link for details) :param relationship: relationship description for the link (see the above link for details)
[ "Update", "a", "RemoteLink", ".", "object", "is", "required", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/metrics/fmeasure.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/fmeasure.py#L40-L56
def round_f1(y_true, y_predicted): """ Calculates F1 (binary) measure. Args: y_true: list of true values y_predicted: list of predicted values Returns: F1 score """ try: predictions = [np.round(x) for x in y_predicted] except TypeError: predictions = y_predicted return f1_score(y_true, predictions)
[ "def", "round_f1", "(", "y_true", ",", "y_predicted", ")", ":", "try", ":", "predictions", "=", "[", "np", ".", "round", "(", "x", ")", "for", "x", "in", "y_predicted", "]", "except", "TypeError", ":", "predictions", "=", "y_predicted", "return", "f1_sco...
Calculates F1 (binary) measure. Args: y_true: list of true values y_predicted: list of predicted values Returns: F1 score
[ "Calculates", "F1", "(", "binary", ")", "measure", "." ]
python
test
mozilla-releng/scriptworker
scriptworker/constants.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/constants.py#L463-L475
def get_reversed_statuses(context): """Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings. """ _rev = {v: k for k, v in STATUSES.items()} _rev.update(dict(context.config['reversed_statuses'])) return _rev
[ "def", "get_reversed_statuses", "(", "context", ")", ":", "_rev", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "STATUSES", ".", "items", "(", ")", "}", "_rev", ".", "update", "(", "dict", "(", "context", ".", "config", "[", "'reversed_statuse...
Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings.
[ "Return", "a", "mapping", "of", "exit", "codes", "to", "status", "strings", "." ]
python
train
evhub/coconut
coconut/compiler/compiler.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L810-L824
def leading_whitespace(self, inputstring): """Count leading whitespace.""" count = 0 for i, c in enumerate(inputstring): if c == " ": count += 1 elif c == "\t": count += tabworth - (i % tabworth) else: break if self.indchar is None: self.indchar = c elif c != self.indchar: self.strict_err_or_warn("found mixing of tabs and spaces", inputstring, i) return count
[ "def", "leading_whitespace", "(", "self", ",", "inputstring", ")", ":", "count", "=", "0", "for", "i", ",", "c", "in", "enumerate", "(", "inputstring", ")", ":", "if", "c", "==", "\" \"", ":", "count", "+=", "1", "elif", "c", "==", "\"\\t\"", ":", ...
Count leading whitespace.
[ "Count", "leading", "whitespace", "." ]
python
train
rocky/python-uncompyle6
uncompyle6/scanner.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/scanner.py#L476-L480
def restrict_to_parent(self, target, parent): """Restrict target to parent structure boundaries.""" if not (parent['start'] < target < parent['end']): target = parent['end'] return target
[ "def", "restrict_to_parent", "(", "self", ",", "target", ",", "parent", ")", ":", "if", "not", "(", "parent", "[", "'start'", "]", "<", "target", "<", "parent", "[", "'end'", "]", ")", ":", "target", "=", "parent", "[", "'end'", "]", "return", "targe...
Restrict target to parent structure boundaries.
[ "Restrict", "target", "to", "parent", "structure", "boundaries", "." ]
python
train
elsampsa/valkka-live
valkka/mvision/multiprocess.py
https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/mvision/multiprocess.py#L150-L163
def preRun_(self): """Create the shared memory client immediately after fork """ self.report("preRun_") super().preRun_() self.client = ShmemRGBClient( name=self.shmem_name, n_ringbuffer=self.n_buffer, # size of ring buffer width=self.image_dimensions[0], height=self.image_dimensions[1], # client timeouts if nothing has been received in 1000 milliseconds mstimeout=1000, verbose=False )
[ "def", "preRun_", "(", "self", ")", ":", "self", ".", "report", "(", "\"preRun_\"", ")", "super", "(", ")", ".", "preRun_", "(", ")", "self", ".", "client", "=", "ShmemRGBClient", "(", "name", "=", "self", ".", "shmem_name", ",", "n_ringbuffer", "=", ...
Create the shared memory client immediately after fork
[ "Create", "the", "shared", "memory", "client", "immediately", "after", "fork" ]
python
train
ml31415/numpy-groupies
numpy_groupies/aggregate_numba.py
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L208-L226
def callable(self, nans=False): """Compile a jitted function and loop it over the sorted data.""" jitfunc = nb.njit(self.func, nogil=True) def _loop(sortidx, group_idx, a, ret): size = len(ret) group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] indices = step_indices(group_idx_srt) for i in range(len(indices) - 1): start_idx, stop_idx = indices[i], indices[i + 1] ri = group_idx_srt[start_idx] if ri < 0: raise ValueError("negative indices not supported") if ri >= size: raise ValueError("one or more indices in group_idx are too large") ret[ri] = jitfunc(a_srt[start_idx:stop_idx]) return nb.njit(_loop, nogil=True)
[ "def", "callable", "(", "self", ",", "nans", "=", "False", ")", ":", "jitfunc", "=", "nb", ".", "njit", "(", "self", ".", "func", ",", "nogil", "=", "True", ")", "def", "_loop", "(", "sortidx", ",", "group_idx", ",", "a", ",", "ret", ")", ":", ...
Compile a jitted function and loop it over the sorted data.
[ "Compile", "a", "jitted", "function", "and", "loop", "it", "over", "the", "sorted", "data", "." ]
python
train
undertheseanlp/languageflow
languageflow/file_utils.py
https://github.com/undertheseanlp/languageflow/blob/1436e0bf72803e02ccf727f41e8fc85ba167d9fe/languageflow/file_utils.py#L102-L153
def get_from_cache(url: str, cache_dir: Path = None) -> Path: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ cache_dir.mkdir(parents=True, exist_ok=True) filename = re.sub(r'.+/', '', url) # get cache path to put the file cache_path = cache_dir / filename if cache_path.exists(): return cache_path # make HEAD request to check ETag response = requests.head(url) if response.status_code != 200: if "www.dropbox.com" in url: # dropbox return code 301, so we ignore this error pass else: raise IOError("HEAD request failed for url {}".format(url)) # add ETag to filename if it exists # etag = response.headers.get("ETag") if not cache_path.exists(): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. fd, temp_filename = tempfile.mkstemp() logger.info("%s not found in cache, downloading to %s", url, temp_filename) # GET file object req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = Tqdm.tqdm(unit="B", total=total) with open(temp_filename, 'wb') as temp_file: for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() logger.info("copying %s to cache at %s", temp_filename, cache_path) shutil.copyfile(temp_filename, str(cache_path)) logger.info("removing temp file %s", temp_filename) os.close(fd) os.remove(temp_filename) return cache_path
[ "def", "get_from_cache", "(", "url", ":", "str", ",", "cache_dir", ":", "Path", "=", "None", ")", "->", "Path", ":", "cache_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "filename", "=", "re", ".", "sub", "(", ...
Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file.
[ "Given", "a", "URL", "look", "for", "the", "corresponding", "dataset", "in", "the", "local", "cache", ".", "If", "it", "s", "not", "there", "download", "it", ".", "Then", "return", "the", "path", "to", "the", "cached", "file", "." ]
python
valid
ozak/georasters
georasters/georasters.py
https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L990-L1004
def pysal_Join_Counts(self, **kwargs): """ Compute join count statistics for GeoRaster Usage: geo.pysal_Join_Counts(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Join_Counts See help(gr.raster_weights), help(pysal.Join_Counts) for options """ if self.weights is None: self.raster_weights(**kwargs) rasterf = self.raster.flatten() rasterf = rasterf[rasterf.mask==False] self.Join_Counts = pysal.Join_Counts(rasterf, self.weights, **kwargs)
[ "def", "pysal_Join_Counts", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "weights", "is", "None", ":", "self", ".", "raster_weights", "(", "*", "*", "kwargs", ")", "rasterf", "=", "self", ".", "raster", ".", "flatten", "(", ")",...
Compute join count statistics for GeoRaster Usage: geo.pysal_Join_Counts(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Join_Counts See help(gr.raster_weights), help(pysal.Join_Counts) for options
[ "Compute", "join", "count", "statistics", "for", "GeoRaster" ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L1195-L1202
def get_cdn_log_retention(self, container): """ Returns the status of the setting for CDN log retention for the specified container. """ resp, resp_body = self.api.cdn_request("/%s" % utils.get_name(container), method="HEAD") return resp.headers.get("x-log-retention").lower() == "true"
[ "def", "get_cdn_log_retention", "(", "self", ",", "container", ")", ":", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "cdn_request", "(", "\"/%s\"", "%", "utils", ".", "get_name", "(", "container", ")", ",", "method", "=", "\"HEAD\"", ")", "re...
Returns the status of the setting for CDN log retention for the specified container.
[ "Returns", "the", "status", "of", "the", "setting", "for", "CDN", "log", "retention", "for", "the", "specified", "container", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/tabs.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/tabs.py#L428-L441
def _on_dirty_changed(self, dirty): """ Adds a star in front of a dirtt tab and emits dirty_changed. """ try: title = self._current._tab_name index = self.indexOf(self._current) if dirty: self.setTabText(index, "* " + title) else: self.setTabText(index, title) except AttributeError: pass self.dirty_changed.emit(dirty)
[ "def", "_on_dirty_changed", "(", "self", ",", "dirty", ")", ":", "try", ":", "title", "=", "self", ".", "_current", ".", "_tab_name", "index", "=", "self", ".", "indexOf", "(", "self", ".", "_current", ")", "if", "dirty", ":", "self", ".", "setTabText"...
Adds a star in front of a dirtt tab and emits dirty_changed.
[ "Adds", "a", "star", "in", "front", "of", "a", "dirtt", "tab", "and", "emits", "dirty_changed", "." ]
python
train
MIT-LCP/wfdb-python
wfdb/io/_signal.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L754-L894
def _rd_segment(file_name, dir_name, pb_dir, fmt, n_sig, sig_len, byte_offset, samps_per_frame, skew, sampfrom, sampto, channels, smooth_frames, ignore_skew): """ Read the digital samples from a single segment record's associated dat file(s). Parameters ---------- file_name : list The names of the dat files to be read. dir_name : str The full directory where the dat file(s) are located, if the dat file(s) are local. pb_dir : str The physiobank directory where the dat file(s) are located, if the dat file(s) are remote. fmt : list The formats of the dat files n_sig : int The number of signals contained in the dat file sig_len : int The signal length (per channel) of the dat file byte_offset : int The byte offset of the dat file samps_per_frame : list The samples/frame for each signal of the dat file skew : list The skew for the signals of the dat file sampfrom : int The starting sample number to be read from the signals sampto : int The final sample number to be read from the signals smooth_frames : bool Whether to smooth channels with multiple samples/frame ignore_skew : bool, optional Used when reading records with at least one skewed signal. Specifies whether to apply the skew to align the signals in the output variable (False), or to ignore the skew field and load in all values contained in the dat files unaligned (True). Returns ------- signals : numpy array, or list The signals read from the dat file(s). A 2d numpy array is returned if the signals have uniform samples/frame or if `smooth_frames` is True. Otherwise a list of 1d numpy arrays is returned. Notes ----- 'channels', 'sampfrom', 'sampto', 'smooth_frames', and 'ignore_skew' are user desired input fields. All other parameters are specifications of the segment """ # Avoid changing outer variables byte_offset = byte_offset[:] samps_per_frame = samps_per_frame[:] skew = skew[:] # Set defaults for empty fields for i in range(n_sig): if byte_offset[i] == None: byte_offset[i] = 0 if samps_per_frame[i] == None: samps_per_frame[i] = 1 if skew[i] == None: skew[i] = 0 # If skew is to be ignored, set all to 0 if ignore_skew: skew = [0]*n_sig # Get the set of dat files, and the # channels that belong to each file. file_name, datchannel = describe_list_indices(file_name) # Some files will not be read depending on input channels. # Get the the wanted fields only. w_file_name = [] # one scalar per dat file w_fmt = {} # one scalar per dat file w_byte_offset = {} # one scalar per dat file w_samps_per_frame = {} # one list per dat file w_skew = {} # one list per dat file w_channel = {} # one list per dat file for fn in file_name: # intersecting dat channels between the input channels and the channels of the file idc = [c for c in datchannel[fn] if c in channels] # There is at least one wanted channel in the dat file if idc != []: w_file_name.append(fn) w_fmt[fn] = fmt[datchannel[fn][0]] w_byte_offset[fn] = byte_offset[datchannel[fn][0]] w_samps_per_frame[fn] = [samps_per_frame[c] for c in datchannel[fn]] w_skew[fn] = [skew[c] for c in datchannel[fn]] w_channel[fn] = idc # Wanted dat channels, relative to the dat file itself r_w_channel = {} # The channels in the final output array that correspond to the read channels in each dat file out_dat_channel = {} for fn in w_channel: r_w_channel[fn] = [c - min(datchannel[fn]) for c in w_channel[fn]] out_dat_channel[fn] = [channels.index(c) for c in w_channel[fn]] # Signals with multiple samples/frame are smoothed, or all signals have 1 sample/frame. # Return uniform numpy array if smooth_frames or sum(samps_per_frame) == n_sig: # Figure out the largest required dtype for the segment to minimize memory usage max_dtype = _np_dtype(_fmt_res(fmt, max_res=True), discrete=True) # Allocate signal array. Minimize dtype signals = np.zeros([sampto-sampfrom, len(channels)], dtype=max_dtype) # Read each wanted dat file and store signals for fn in w_file_name: signals[:, out_dat_channel[fn]] = _rd_dat_signals(fn, dir_name, pb_dir, w_fmt[fn], len(datchannel[fn]), sig_len, w_byte_offset[fn], w_samps_per_frame[fn], w_skew[fn], sampfrom, sampto, smooth_frames)[:, r_w_channel[fn]] # Return each sample in signals with multiple samples/frame, without smoothing. # Return a list of numpy arrays for each signal. else: signals = [None] * len(channels) for fn in w_file_name: # Get the list of all signals contained in the dat file datsignals = _rd_dat_signals(fn, dir_name, pb_dir, w_fmt[fn], len(datchannel[fn]), sig_len, w_byte_offset[fn], w_samps_per_frame[fn], w_skew[fn], sampfrom, sampto, smooth_frames) # Copy over the wanted signals for cn in range(len(out_dat_channel[fn])): signals[out_dat_channel[fn][cn]] = datsignals[r_w_channel[fn][cn]] return signals
[ "def", "_rd_segment", "(", "file_name", ",", "dir_name", ",", "pb_dir", ",", "fmt", ",", "n_sig", ",", "sig_len", ",", "byte_offset", ",", "samps_per_frame", ",", "skew", ",", "sampfrom", ",", "sampto", ",", "channels", ",", "smooth_frames", ",", "ignore_ske...
Read the digital samples from a single segment record's associated dat file(s). Parameters ---------- file_name : list The names of the dat files to be read. dir_name : str The full directory where the dat file(s) are located, if the dat file(s) are local. pb_dir : str The physiobank directory where the dat file(s) are located, if the dat file(s) are remote. fmt : list The formats of the dat files n_sig : int The number of signals contained in the dat file sig_len : int The signal length (per channel) of the dat file byte_offset : int The byte offset of the dat file samps_per_frame : list The samples/frame for each signal of the dat file skew : list The skew for the signals of the dat file sampfrom : int The starting sample number to be read from the signals sampto : int The final sample number to be read from the signals smooth_frames : bool Whether to smooth channels with multiple samples/frame ignore_skew : bool, optional Used when reading records with at least one skewed signal. Specifies whether to apply the skew to align the signals in the output variable (False), or to ignore the skew field and load in all values contained in the dat files unaligned (True). Returns ------- signals : numpy array, or list The signals read from the dat file(s). A 2d numpy array is returned if the signals have uniform samples/frame or if `smooth_frames` is True. Otherwise a list of 1d numpy arrays is returned. Notes ----- 'channels', 'sampfrom', 'sampto', 'smooth_frames', and 'ignore_skew' are user desired input fields. All other parameters are specifications of the segment
[ "Read", "the", "digital", "samples", "from", "a", "single", "segment", "record", "s", "associated", "dat", "file", "(", "s", ")", "." ]
python
train
ska-sa/purr
Purr/Editors.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Editors.py#L436-L468
def addDataProducts(self, dps): """Adds new data products to listview. dps is a list of DP objects. Returns True if new (non-quiet) DPs are added, or if existing non-quiet dps are updated. (this usually tells the main window to wake up) """ busy = Purr.BusyIndicator() wakeup = False # build up list of items to be inserted itemlist = [] for dp in dps: item = self.dpitems.get(dp.sourcepath) # If item already exists, it needs to be moved to its new position # If takeTopLevelItem() returns None, then item was already removed (this shouldn't happen, # but let's be defensive), and we make a new one anyway. if item and self.takeTopLevelItem(self.indexOfTopLevelItem(item)): itemlist.append(item) else: itemlist.append(self._makeDPItem(None, dp)) wakeup = wakeup or not (dp.ignored or dp.quiet) # if these DPs were added as a result of a drag-and-drop, we need to insert them in FRONT of the dropped-on item if self._dropped_on: index = self.indexOfTopLevelItem(self._dropped_on) # else insert at end (after=None) else: index = self.topLevelItemCount() if itemlist: self.insertTopLevelItems(index, itemlist) self.emit(SIGNAL("updated")) for item in itemlist: # ensure combobox widgets are made self._itemComboBox(item, self.ColAction) self._itemComboBox(item, self.ColRender) return wakeup
[ "def", "addDataProducts", "(", "self", ",", "dps", ")", ":", "busy", "=", "Purr", ".", "BusyIndicator", "(", ")", "wakeup", "=", "False", "# build up list of items to be inserted", "itemlist", "=", "[", "]", "for", "dp", "in", "dps", ":", "item", "=", "sel...
Adds new data products to listview. dps is a list of DP objects. Returns True if new (non-quiet) DPs are added, or if existing non-quiet dps are updated. (this usually tells the main window to wake up)
[ "Adds", "new", "data", "products", "to", "listview", ".", "dps", "is", "a", "list", "of", "DP", "objects", ".", "Returns", "True", "if", "new", "(", "non", "-", "quiet", ")", "DPs", "are", "added", "or", "if", "existing", "non", "-", "quiet", "dps", ...
python
train
crdoconnor/commandlib
commandlib/command.py
https://github.com/crdoconnor/commandlib/blob/b630364fd7b0d189b388e22a7f43235d182e12e4/commandlib/command.py#L161-L168
def with_trailing_args(self, *arguments): """ Return new Command object that will be run with specified trailing arguments. """ new_command = copy.deepcopy(self) new_command._trailing_args = [str(arg) for arg in arguments] return new_command
[ "def", "with_trailing_args", "(", "self", ",", "*", "arguments", ")", ":", "new_command", "=", "copy", ".", "deepcopy", "(", "self", ")", "new_command", ".", "_trailing_args", "=", "[", "str", "(", "arg", ")", "for", "arg", "in", "arguments", "]", "retur...
Return new Command object that will be run with specified trailing arguments.
[ "Return", "new", "Command", "object", "that", "will", "be", "run", "with", "specified", "trailing", "arguments", "." ]
python
train
ToFuProject/tofu
tofu/data/_core.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L904-L921
def set_dtreat_indt(self, t=None, indt=None): """ Store the desired index array for the time vector If an array of indices (refering to self.ddataRef['t'] is not provided, uses self.select_t(t=t) to produce it """ lC = [indt is not None, t is not None] if all(lC): msg = "Please provide either t or indt (or none)!" raise Exception(msg) if lC[1]: ind = self.select_t(t=t, out=bool) else: ind = _format_ind(indt, n=self._ddataRef['nt']) self._dtreat['indt'] = ind self._ddata['uptodate'] = False
[ "def", "set_dtreat_indt", "(", "self", ",", "t", "=", "None", ",", "indt", "=", "None", ")", ":", "lC", "=", "[", "indt", "is", "not", "None", ",", "t", "is", "not", "None", "]", "if", "all", "(", "lC", ")", ":", "msg", "=", "\"Please provide eit...
Store the desired index array for the time vector If an array of indices (refering to self.ddataRef['t'] is not provided, uses self.select_t(t=t) to produce it
[ "Store", "the", "desired", "index", "array", "for", "the", "time", "vector" ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L483-L488
def grant_user_access(self, user, db_names, strict=True): """ Gives access to the databases listed in `db_names` to the user. """ return self._user_manager.grant_user_access(user, db_names, strict=strict)
[ "def", "grant_user_access", "(", "self", ",", "user", ",", "db_names", ",", "strict", "=", "True", ")", ":", "return", "self", ".", "_user_manager", ".", "grant_user_access", "(", "user", ",", "db_names", ",", "strict", "=", "strict", ")" ]
Gives access to the databases listed in `db_names` to the user.
[ "Gives", "access", "to", "the", "databases", "listed", "in", "db_names", "to", "the", "user", "." ]
python
train
ozgurgunes/django-manifest
manifest/accounts/utils.py
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/utils.py#L113-L127
def get_datetime_now(): """ Returns datetime object with current point in time. In Django 1.4+ it uses Django's django.utils.timezone.now() which returns an aware or naive datetime that represents the current point in time when ``USE_TZ`` in project's settings is True or False respectively. In older versions of Django it uses datetime.datetime.now(). """ try: from django.utils import timezone return timezone.now() except ImportError: return datetime.datetime.now()
[ "def", "get_datetime_now", "(", ")", ":", "try", ":", "from", "django", ".", "utils", "import", "timezone", "return", "timezone", ".", "now", "(", ")", "except", "ImportError", ":", "return", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Returns datetime object with current point in time. In Django 1.4+ it uses Django's django.utils.timezone.now() which returns an aware or naive datetime that represents the current point in time when ``USE_TZ`` in project's settings is True or False respectively. In older versions of Django it uses datetime.datetime.now().
[ "Returns", "datetime", "object", "with", "current", "point", "in", "time", "." ]
python
train
summanlp/textrank
summa/preprocessing/snowball.py
https://github.com/summanlp/textrank/blob/6844bbe8c4b2b468020ae0dfd6574a743f9ad442/summa/preprocessing/snowball.py#L237-L275
def _rv_standard(self, word, vowels): """ Return the standard interpretation of the string region RV. If the second letter is a consonant, RV is the region after the next following vowel. If the first two letters are vowels, RV is the region after the next following consonant. Otherwise, RV is the region after the third letter. :param word: The word whose region RV is determined. :type word: str or unicode :param vowels: The vowels of the respective language that are used to determine the region RV. :type vowels: unicode :return: the region RV for the respective word. :rtype: unicode :note: This helper method is invoked by the respective stem method of the subclasses ItalianStemmer, PortugueseStemmer, RomanianStemmer, and SpanishStemmer. It is not to be invoked directly! """ rv = "" if len(word) >= 2: if word[1] not in vowels: for i in range(2, len(word)): if word[i] in vowels: rv = word[i+1:] break elif word[:2] in vowels: for i in range(2, len(word)): if word[i] not in vowels: rv = word[i+1:] break else: rv = word[3:] return rv
[ "def", "_rv_standard", "(", "self", ",", "word", ",", "vowels", ")", ":", "rv", "=", "\"\"", "if", "len", "(", "word", ")", ">=", "2", ":", "if", "word", "[", "1", "]", "not", "in", "vowels", ":", "for", "i", "in", "range", "(", "2", ",", "le...
Return the standard interpretation of the string region RV. If the second letter is a consonant, RV is the region after the next following vowel. If the first two letters are vowels, RV is the region after the next following consonant. Otherwise, RV is the region after the third letter. :param word: The word whose region RV is determined. :type word: str or unicode :param vowels: The vowels of the respective language that are used to determine the region RV. :type vowels: unicode :return: the region RV for the respective word. :rtype: unicode :note: This helper method is invoked by the respective stem method of the subclasses ItalianStemmer, PortugueseStemmer, RomanianStemmer, and SpanishStemmer. It is not to be invoked directly!
[ "Return", "the", "standard", "interpretation", "of", "the", "string", "region", "RV", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L918-L961
def flip_axis_multi(x, axis, is_random=False): """Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly, Parameters ----------- x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.flip_axis``. Returns ------- numpy.array A list of processed images. """ if is_random: factor = np.random.uniform(-1, 1) if factor > 0: # x = np.asarray(x).swapaxes(axis, 0) # x = x[::-1, ...] # x = x.swapaxes(0, axis) # return x results = [] for data in x: data = np.asarray(data).swapaxes(axis, 0) data = data[::-1, ...] data = data.swapaxes(0, axis) results.append(data) return np.asarray(results) else: return np.asarray(x) else: # x = np.asarray(x).swapaxes(axis, 0) # x = x[::-1, ...] # x = x.swapaxes(0, axis) # return x results = [] for data in x: data = np.asarray(data).swapaxes(axis, 0) data = data[::-1, ...] data = data.swapaxes(0, axis) results.append(data) return np.asarray(results)
[ "def", "flip_axis_multi", "(", "x", ",", "axis", ",", "is_random", "=", "False", ")", ":", "if", "is_random", ":", "factor", "=", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "if", "factor", ">", "0", ":", "# x = np.asarray(x).s...
Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly, Parameters ----------- x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.flip_axis``. Returns ------- numpy.array A list of processed images.
[ "Flip", "the", "axises", "of", "multiple", "images", "together", "such", "as", "flip", "left", "and", "right", "up", "and", "down", "randomly", "or", "non", "-", "randomly" ]
python
valid
Kyria/EsiPy
esipy/utils.py
https://github.com/Kyria/EsiPy/blob/06407a0218a126678f80d8a7e8a67b9729327865/esipy/utils.py#L74-L88
def generate_code_challenge(verifier): """ source: https://github.com/openstack/deb-python-oauth2client Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by generate_code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding. """ digest = hashlib.sha256(verifier.encode('utf-8')).digest() return base64.urlsafe_b64encode(digest).rstrip(b'=').decode('utf-8')
[ "def", "generate_code_challenge", "(", "verifier", ")", ":", "digest", "=", "hashlib", ".", "sha256", "(", "verifier", ".", "encode", "(", "'utf-8'", ")", ")", ".", "digest", "(", ")", "return", "base64", ".", "urlsafe_b64encode", "(", "digest", ")", ".", ...
source: https://github.com/openstack/deb-python-oauth2client Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by generate_code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding.
[ "source", ":", "https", ":", "//", "github", ".", "com", "/", "openstack", "/", "deb", "-", "python", "-", "oauth2client", "Creates", "a", "code_challenge", "as", "described", "in", "section", "4", ".", "2", "of", "RFC", "7636", "by", "taking", "the", ...
python
train
peri-source/peri
scripts/does_matter/common.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/scripts/does_matter/common.py#L68-L127
def perfect_platonic_per_pixel(N, R, scale=11, pos=None, zscale=1.0, returnpix=None): """ Create a perfect platonic sphere of a given radius R by supersampling by a factor scale on a grid of size N. Scale must be odd. We are able to perfectly position these particles up to 1/scale. Therefore, let's only allow those types of shifts for now, but return the actual position used for the placement. """ # enforce odd scale size if scale % 2 != 1: scale += 1 if pos is None: # place the default position in the center of the grid pos = np.array([(N-1)/2.0]*3) # limit positions to those that are exact on the size 1./scale # positions have the form (d = divisions): # p = N + m/d s = 1.0/scale f = zscale**2 i = pos.astype('int') p = i + s*((pos - i)/s).astype('int') pos = p + 1e-10 # unfortunately needed to break ties # make the output arrays image = np.zeros((N,)*3) x,y,z = np.meshgrid(*(xrange(N),)*3, indexing='ij') # for each real pixel in the image, integrate a bunch of superres pixels for x0,y0,z0 in zip(x.flatten(),y.flatten(),z.flatten()): # short-circuit things that are just too far away! ddd = np.sqrt(f*(x0-pos[0])**2 + (y0-pos[1])**2 + (z0-pos[2])**2) if ddd > R + 4: image[x0,y0,z0] = 0.0 continue # otherwise, build the local mesh and count the volume xp,yp,zp = np.meshgrid( *(np.linspace(i-0.5+s/2, i+0.5-s/2, scale, endpoint=True) for i in (x0,y0,z0)), indexing='ij' ) ddd = np.sqrt(f*(xp-pos[0])**2 + (yp-pos[1])**2 + (zp-pos[2])**2) if returnpix is not None and returnpix == [x0,y0,z0]: outpix = 1.0 * (ddd < R) vol = (1.0*(ddd < R) + 0.0*(ddd == R)).sum() image[x0,y0,z0] = vol / float(scale**3) #vol_true = 4./3*np.pi*R**3 #vol_real = image.sum() #print vol_true, vol_real, (vol_true - vol_real)/vol_true if returnpix: return image, pos, outpix return image, pos
[ "def", "perfect_platonic_per_pixel", "(", "N", ",", "R", ",", "scale", "=", "11", ",", "pos", "=", "None", ",", "zscale", "=", "1.0", ",", "returnpix", "=", "None", ")", ":", "# enforce odd scale size", "if", "scale", "%", "2", "!=", "1", ":", "scale",...
Create a perfect platonic sphere of a given radius R by supersampling by a factor scale on a grid of size N. Scale must be odd. We are able to perfectly position these particles up to 1/scale. Therefore, let's only allow those types of shifts for now, but return the actual position used for the placement.
[ "Create", "a", "perfect", "platonic", "sphere", "of", "a", "given", "radius", "R", "by", "supersampling", "by", "a", "factor", "scale", "on", "a", "grid", "of", "size", "N", ".", "Scale", "must", "be", "odd", "." ]
python
valid
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L1373-L1408
def info(self, buf=None): """ Concise summary of a Dataset variables and attributes. Parameters ---------- buf : writable buffer, defaults to sys.stdout See Also -------- pandas.DataFrame.assign netCDF's ncdump """ if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append('xarray.Dataset {') lines.append('dimensions:') for name, size in self.dims.items(): lines.append('\t{name} = {size} ;'.format(name=name, size=size)) lines.append('\nvariables:') for name, da in self.variables.items(): dims = ', '.join(da.dims) lines.append('\t{type} {name}({dims}) ;'.format( type=da.dtype, name=name, dims=dims)) for k, v in da.attrs.items(): lines.append('\t\t{name}:{k} = {v} ;'.format(name=name, k=k, v=v)) lines.append('\n// global attributes:') for k, v in self.attrs.items(): lines.append('\t:{k} = {v} ;'.format(k=k, v=v)) lines.append('}') buf.write('\n'.join(lines))
[ "def", "info", "(", "self", ",", "buf", "=", "None", ")", ":", "if", "buf", "is", "None", ":", "# pragma: no cover", "buf", "=", "sys", ".", "stdout", "lines", "=", "[", "]", "lines", ".", "append", "(", "'xarray.Dataset {'", ")", "lines", ".", "appe...
Concise summary of a Dataset variables and attributes. Parameters ---------- buf : writable buffer, defaults to sys.stdout See Also -------- pandas.DataFrame.assign netCDF's ncdump
[ "Concise", "summary", "of", "a", "Dataset", "variables", "and", "attributes", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/schema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/schema.py#L164-L189
def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False): '''Grab an element declaration, returns a typecode instance representation or a typecode class definition. An element reference has its own facets, and is local so it will not be cached. Parameters: namespaceURI -- name -- isref -- if element reference, return class definition. ''' key = (namespaceURI, name) if isref: klass = cls.elements.get(key,None) if klass is not None and lazy is True: return _Mirage(klass) return klass typecode = cls.element_typecode_cache.get(key, None) if typecode is None: tcls = cls.elements.get(key,None) if tcls is not None: typecode = cls.element_typecode_cache[key] = tcls() typecode.typed = False return typecode
[ "def", "getElementDeclaration", "(", "cls", ",", "namespaceURI", ",", "name", ",", "isref", "=", "False", ",", "lazy", "=", "False", ")", ":", "key", "=", "(", "namespaceURI", ",", "name", ")", "if", "isref", ":", "klass", "=", "cls", ".", "elements", ...
Grab an element declaration, returns a typecode instance representation or a typecode class definition. An element reference has its own facets, and is local so it will not be cached. Parameters: namespaceURI -- name -- isref -- if element reference, return class definition.
[ "Grab", "an", "element", "declaration", "returns", "a", "typecode", "instance", "representation", "or", "a", "typecode", "class", "definition", ".", "An", "element", "reference", "has", "its", "own", "facets", "and", "is", "local", "so", "it", "will", "not", ...
python
train
HazyResearch/fonduer
src/fonduer/learning/classifier.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L333-L362
def marginals(self, X): """ Compute the marginals for the given candidates X. Note: split into batches to avoid OOM errors. :param X: The input data which is a (list of Candidate objects, a sparse matrix of corresponding features) pair or a list of (Candidate, features) pairs. :type X: pair or list """ nn.Module.train(self, False) if self._check_input(X): X = self._preprocess_data(X) dataloader = DataLoader( MultiModalDataset(X), batch_size=self.settings["batch_size"], collate_fn=self._collate_fn(), shuffle=False, ) marginals = torch.Tensor([]) for X_batch in dataloader: marginal = self._non_cuda(self._calc_logits(X_batch)) marginals = torch.cat((marginals, marginal), 0) return F.softmax(marginals, dim=-1).detach().numpy()
[ "def", "marginals", "(", "self", ",", "X", ")", ":", "nn", ".", "Module", ".", "train", "(", "self", ",", "False", ")", "if", "self", ".", "_check_input", "(", "X", ")", ":", "X", "=", "self", ".", "_preprocess_data", "(", "X", ")", "dataloader", ...
Compute the marginals for the given candidates X. Note: split into batches to avoid OOM errors. :param X: The input data which is a (list of Candidate objects, a sparse matrix of corresponding features) pair or a list of (Candidate, features) pairs. :type X: pair or list
[ "Compute", "the", "marginals", "for", "the", "given", "candidates", "X", ".", "Note", ":", "split", "into", "batches", "to", "avoid", "OOM", "errors", "." ]
python
train
peri-source/peri
peri/opt/addsubtract.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/addsubtract.py#L213-L243
def should_particle_exist(absent_err, present_err, absent_d, present_d, im_change_frac=0.2, min_derr=0.1): """ Checks whether or not adding a particle should be present. Parameters ---------- absent_err : Float The state error without the particle. present_err : Float The state error with the particle. absent_d : numpy.ndarray The state residuals without the particle. present_d : numpy.ndarray The state residuals with the particle. im_change_frac : Float, optional How good the change in error needs to be relative to the change in the residuals. Default is 0.2; i.e. return False if the error does not decrease by 0.2 x the change in the residuals. min_derr : Float, optional The minimal improvement in error. Default is 0.1 Returns ------- Bool True if the errors is better with the particle present. """ delta_im = np.ravel(present_d - absent_d) im_change = np.dot(delta_im, delta_im) err_cutoff = max([im_change_frac * im_change, min_derr]) return (absent_err - present_err) >= err_cutoff
[ "def", "should_particle_exist", "(", "absent_err", ",", "present_err", ",", "absent_d", ",", "present_d", ",", "im_change_frac", "=", "0.2", ",", "min_derr", "=", "0.1", ")", ":", "delta_im", "=", "np", ".", "ravel", "(", "present_d", "-", "absent_d", ")", ...
Checks whether or not adding a particle should be present. Parameters ---------- absent_err : Float The state error without the particle. present_err : Float The state error with the particle. absent_d : numpy.ndarray The state residuals without the particle. present_d : numpy.ndarray The state residuals with the particle. im_change_frac : Float, optional How good the change in error needs to be relative to the change in the residuals. Default is 0.2; i.e. return False if the error does not decrease by 0.2 x the change in the residuals. min_derr : Float, optional The minimal improvement in error. Default is 0.1 Returns ------- Bool True if the errors is better with the particle present.
[ "Checks", "whether", "or", "not", "adding", "a", "particle", "should", "be", "present", "." ]
python
valid
inveniosoftware/invenio-oauth2server
invenio_oauth2server/views/settings.py
https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/views/settings.py#L183-L188
def client_reset(client): """Reset client's secret.""" if request.form.get('reset') == 'yes': client.reset_client_secret() db.session.commit() return redirect(url_for('.client_view', client_id=client.client_id))
[ "def", "client_reset", "(", "client", ")", ":", "if", "request", ".", "form", ".", "get", "(", "'reset'", ")", "==", "'yes'", ":", "client", ".", "reset_client_secret", "(", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "redirect", "(",...
Reset client's secret.
[ "Reset", "client", "s", "secret", "." ]
python
train
scanny/python-pptx
pptx/shapes/freeform.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/freeform.py#L293-L298
def new(cls, freeform_builder, x, y): """Return a new _LineSegment object ending at point *(x, y)*. Both *x* and *y* are rounded to the nearest integer before use. """ return cls(freeform_builder, int(round(x)), int(round(y)))
[ "def", "new", "(", "cls", ",", "freeform_builder", ",", "x", ",", "y", ")", ":", "return", "cls", "(", "freeform_builder", ",", "int", "(", "round", "(", "x", ")", ")", ",", "int", "(", "round", "(", "y", ")", ")", ")" ]
Return a new _LineSegment object ending at point *(x, y)*. Both *x* and *y* are rounded to the nearest integer before use.
[ "Return", "a", "new", "_LineSegment", "object", "ending", "at", "point", "*", "(", "x", "y", ")", "*", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/conano.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/conano.py#L148-L162
def is_valid(self, tree): """ returns true, iff the order of the tokens in the graph are the same as in the Conano file (converted to plain text). """ conano_plaintext = etree.tostring(tree, encoding='utf8', method='text') token_str_list = conano_plaintext.split() for i, plain_token in enumerate(token_str_list): graph_token = self.node[self.tokens[i]][self.ns+':token'] if ensure_unicode(plain_token) != graph_token: sys.stderr.write( "Conano tokenizations don't match: {0} vs. {1} " "({2})".format(plain_token, graph_token)) return False return True
[ "def", "is_valid", "(", "self", ",", "tree", ")", ":", "conano_plaintext", "=", "etree", ".", "tostring", "(", "tree", ",", "encoding", "=", "'utf8'", ",", "method", "=", "'text'", ")", "token_str_list", "=", "conano_plaintext", ".", "split", "(", ")", "...
returns true, iff the order of the tokens in the graph are the same as in the Conano file (converted to plain text).
[ "returns", "true", "iff", "the", "order", "of", "the", "tokens", "in", "the", "graph", "are", "the", "same", "as", "in", "the", "Conano", "file", "(", "converted", "to", "plain", "text", ")", "." ]
python
train
zhebrak/raftos
raftos/state.py
https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L42-L57
def validate_commit_index(func): """Apply to State Machine everything up to commit index""" @functools.wraps(func) def wrapped(self, *args, **kwargs): for not_applied in range(self.log.last_applied + 1, self.log.commit_index + 1): self.state_machine.apply(self.log[not_applied]['command']) self.log.last_applied += 1 try: self.apply_future.set_result(not_applied) except (asyncio.futures.InvalidStateError, AttributeError): pass return func(self, *args, **kwargs) return wrapped
[ "def", "validate_commit_index", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "not_applied", "in", "range", "(", "self", ".", "log",...
Apply to State Machine everything up to commit index
[ "Apply", "to", "State", "Machine", "everything", "up", "to", "commit", "index" ]
python
train
CI-WATER/gsshapy
gsshapy/base/file_base.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/base/file_base.py#L37-L80
def read(self, directory, filename, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None, **kwargs): """ Generic read file into database method. Args: directory (str): Directory containing the file to be read. filename (str): Name of the file which will be read (e.g.: 'example.prj'). session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if spatial is True. Defaults to srid 4236. replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if the file you are reading contains replacement parameters. """ # Read parameter derivatives path = os.path.join(directory, filename) filename_split = filename.split('.') name = filename_split[0] # Default file extension extension = '' if len(filename_split) >= 2: extension = filename_split[-1] if os.path.isfile(path): # Add self to session session.add(self) # Read self._read(directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile, **kwargs) # Commit to database self._commit(session, self.COMMIT_ERROR_MESSAGE) else: # Rollback the session if the file doesn't exist session.rollback() # Print warning log.warning('Could not find file named {0}. File not read.'.format(filename))
[ "def", "read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "spatial", "=", "False", ",", "spatialReferenceID", "=", "4236", ",", "replaceParamFile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Read parameter derivatives", "pat...
Generic read file into database method. Args: directory (str): Directory containing the file to be read. filename (str): Name of the file which will be read (e.g.: 'example.prj'). session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if spatial is True. Defaults to srid 4236. replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if the file you are reading contains replacement parameters.
[ "Generic", "read", "file", "into", "database", "method", "." ]
python
train
csparpa/pyowm
pyowm/commons/frontlinkedlist.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/commons/frontlinkedlist.py#L203-L215
def contains(self, data): """ Checks if the provided data is stored in at least one node of the list. :param data: the seeked data :type data: object :returns: a boolean """ for item in self: if item.data() == data: return True return False
[ "def", "contains", "(", "self", ",", "data", ")", ":", "for", "item", "in", "self", ":", "if", "item", ".", "data", "(", ")", "==", "data", ":", "return", "True", "return", "False" ]
Checks if the provided data is stored in at least one node of the list. :param data: the seeked data :type data: object :returns: a boolean
[ "Checks", "if", "the", "provided", "data", "is", "stored", "in", "at", "least", "one", "node", "of", "the", "list", "." ]
python
train
Damgaard/PyImgur
pyimgur/__init__.py
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L989-L993
def get_image(self, id): """Return a Image object representing the image with the given id.""" url = self._base_url + "/3/image/{0}".format(id) resp = self._send_request(url) return Image(resp, self)
[ "def", "get_image", "(", "self", ",", "id", ")", ":", "url", "=", "self", ".", "_base_url", "+", "\"/3/image/{0}\"", ".", "format", "(", "id", ")", "resp", "=", "self", ".", "_send_request", "(", "url", ")", "return", "Image", "(", "resp", ",", "self...
Return a Image object representing the image with the given id.
[ "Return", "a", "Image", "object", "representing", "the", "image", "with", "the", "given", "id", "." ]
python
train
edwards-lab/MVtest
meanvar/mvresult.py
https://github.com/edwards-lab/MVtest/blob/fe8cf627464ef59d68b7eda628a19840d033882f/meanvar/mvresult.py#L68-L110
def print_header(self, f=sys.stdout, verbose=False): """Prints header to f (will write header based on verbose) :param f: stream to print output :param verbose: print all data or only the most important parts? """ self.var_count = 2 + len(self.covar_labels) if verbose: header = [ "Chr", "Pos", "RSID", "Phenotype", "N", "Ref_allele", "Eff_allele", "Eff_Allele_Freq", "P-Value", "LM_PValue" ] for var in ["Intercept","Geno"] + self.covar_labels: for t in ["mean", "mean_stder", "mean_pval", "var", "var_stder", "var_pval"]: header.append("%s_%s" % (var.lower(), t)) print >> f, "\t".join(header) else: print >> f, "\t".join([ "Chr", "Pos", "RSID", "Phenotype", "N", "Ref_allele", "Eff_allele", "Eff_Allele_Freq", "P-Value", "geno_mean", "geno_mean_stderr", "geno_mean_pval", "geno_var", "geno_var_stderr", "geno_var_pval", ])
[ "def", "print_header", "(", "self", ",", "f", "=", "sys", ".", "stdout", ",", "verbose", "=", "False", ")", ":", "self", ".", "var_count", "=", "2", "+", "len", "(", "self", ".", "covar_labels", ")", "if", "verbose", ":", "header", "=", "[", "\"Chr...
Prints header to f (will write header based on verbose) :param f: stream to print output :param verbose: print all data or only the most important parts?
[ "Prints", "header", "to", "f", "(", "will", "write", "header", "based", "on", "verbose", ")" ]
python
train
restran/mountains
mountains/ssh/__init__.py
https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/ssh/__init__.py#L219-L232
def get(self, remote_file, local_file): """ 下载文件 :param remote_file: :param local_file: :return: """ sftp = self.get_sftp() try: sftp.get(remote_file, local_file) except Exception as e: logger.error('下载文件失败') logger.error('remote: %s, local: %s' % (remote_file, local_file)) logger.error(e)
[ "def", "get", "(", "self", ",", "remote_file", ",", "local_file", ")", ":", "sftp", "=", "self", ".", "get_sftp", "(", ")", "try", ":", "sftp", ".", "get", "(", "remote_file", ",", "local_file", ")", "except", "Exception", "as", "e", ":", "logger", "...
下载文件 :param remote_file: :param local_file: :return:
[ "下载文件", ":", "param", "remote_file", ":", ":", "param", "local_file", ":", ":", "return", ":" ]
python
train
saltstack/salt
salt/netapi/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/__init__.py#L163-L176
def runner_async(self, fun, **kwargs): ''' Run `runner modules <all-salt.runners>` asynchronously Wraps :py:meth:`salt.runner.RunnerClient.cmd_async`. Note that runner functions must be called using keyword arguments. Positional arguments are not supported. :return: event data and a job ID for the executed function. ''' kwargs['fun'] = fun runner = salt.runner.RunnerClient(self.opts) return runner.cmd_async(kwargs)
[ "def", "runner_async", "(", "self", ",", "fun", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'fun'", "]", "=", "fun", "runner", "=", "salt", ".", "runner", ".", "RunnerClient", "(", "self", ".", "opts", ")", "return", "runner", ".", "cmd_async"...
Run `runner modules <all-salt.runners>` asynchronously Wraps :py:meth:`salt.runner.RunnerClient.cmd_async`. Note that runner functions must be called using keyword arguments. Positional arguments are not supported. :return: event data and a job ID for the executed function.
[ "Run", "runner", "modules", "<all", "-", "salt", ".", "runners", ">", "asynchronously" ]
python
train
rodynnz/xccdf
src/xccdf/models/tailoring.py
https://github.com/rodynnz/xccdf/blob/1b9dc2f06b5cce8db2a54c5f95a8f6bcf5cb6981/src/xccdf/models/tailoring.py#L80-L124
def load_children(self): """ Load the subelements from the xml_element in its correspondent classes. :returns: List of child objects. :rtype: list :raises CardinalityException: If there is more than one Version child. :raises CardinalityException: If there is no Version child. :raises CardinalityException: If there is no Profile element. """ # Containers children = list() statuses = list() version = None profiles = list() # Element load for element in self.xml_element: uri, tag = Element.get_namespace_and_tag(element.tag) if tag == 'version': if version is None: version = TailoringVersion(element) else: error_msg = 'version element found more than once' raise CardinalityException(error_msg) elif tag == 'status': statuses.append(Status(element)) elif tag == 'Profile': profiles.append(Profile(element)) # Element validation if version is None: error_msg = 'version element is required' raise CardinalityException(error_msg) if len(profiles) <= 0: error_msg = 'Profile element is required at least once' raise CardinalityException(error_msg) # List construction children.extend(statuses) if version is not None: children.append(version) children.extend(profiles) return children
[ "def", "load_children", "(", "self", ")", ":", "# Containers", "children", "=", "list", "(", ")", "statuses", "=", "list", "(", ")", "version", "=", "None", "profiles", "=", "list", "(", ")", "# Element load", "for", "element", "in", "self", ".", "xml_el...
Load the subelements from the xml_element in its correspondent classes. :returns: List of child objects. :rtype: list :raises CardinalityException: If there is more than one Version child. :raises CardinalityException: If there is no Version child. :raises CardinalityException: If there is no Profile element.
[ "Load", "the", "subelements", "from", "the", "xml_element", "in", "its", "correspondent", "classes", "." ]
python
train
jobovy/galpy
galpy/util/bovy_plot.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_plot.py#L88-L119
def bovy_end_print(filename,**kwargs): """ NAME: bovy_end_print PURPOSE: saves the current figure(s) to filename INPUT: filename - filename for plot (with extension) OPTIONAL INPUTS: format - file-format OUTPUT: (none) HISTORY: 2009-12-23 - Written - Bovy (NYU) """ if 'format' in kwargs: pyplot.savefig(filename,**kwargs) else: pyplot.savefig(filename,format=re.split(r'\.',filename)[-1],**kwargs) pyplot.close()
[ "def", "bovy_end_print", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "if", "'format'", "in", "kwargs", ":", "pyplot", ".", "savefig", "(", "filename", ",", "*", "*", "kwargs", ")", "else", ":", "pyplot", ".", "savefig", "(", "filename", ",", ...
NAME: bovy_end_print PURPOSE: saves the current figure(s) to filename INPUT: filename - filename for plot (with extension) OPTIONAL INPUTS: format - file-format OUTPUT: (none) HISTORY: 2009-12-23 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
devassistant/devassistant
devassistant/yaml_assistant_loader.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/yaml_assistant_loader.py#L137-L170
def get_assistants_file_hierarchy(cls, dirs): """Returns assistants file hierarchy structure (see below) representing assistant hierarchy in given directories. It works like this: 1. It goes through all *.yaml files in all given directories and adds them into hierarchy (if there are two files with same name in more directories, the file from first directory wins). 2. For each {name}.yaml file, it calls itself recursively for {name} subdirectories of all given directories. Args: dirs: directories to search Returns: hierarchy structure that looks like this: {'assistant1': {'source': '/path/to/assistant1.yaml', 'subhierarchy': {<hierarchy of subassistants>}}, 'assistant2': {'source': '/path/to/assistant2.yaml', 'subhierarchy': {<another hierarchy of subassistants}} } """ result = {} for d in filter(lambda d: os.path.exists(d), dirs): for f in filter(lambda f: f.endswith('.yaml'), os.listdir(d)): assistant_name = f[:-5] if assistant_name not in result: subas_dirs = [os.path.join(dr, assistant_name) for dr in dirs] result[assistant_name] = {'source': os.path.join(d, f), 'subhierarchy': cls.get_assistants_file_hierarchy(subas_dirs)} return result
[ "def", "get_assistants_file_hierarchy", "(", "cls", ",", "dirs", ")", ":", "result", "=", "{", "}", "for", "d", "in", "filter", "(", "lambda", "d", ":", "os", ".", "path", ".", "exists", "(", "d", ")", ",", "dirs", ")", ":", "for", "f", "in", "fi...
Returns assistants file hierarchy structure (see below) representing assistant hierarchy in given directories. It works like this: 1. It goes through all *.yaml files in all given directories and adds them into hierarchy (if there are two files with same name in more directories, the file from first directory wins). 2. For each {name}.yaml file, it calls itself recursively for {name} subdirectories of all given directories. Args: dirs: directories to search Returns: hierarchy structure that looks like this: {'assistant1': {'source': '/path/to/assistant1.yaml', 'subhierarchy': {<hierarchy of subassistants>}}, 'assistant2': {'source': '/path/to/assistant2.yaml', 'subhierarchy': {<another hierarchy of subassistants}} }
[ "Returns", "assistants", "file", "hierarchy", "structure", "(", "see", "below", ")", "representing", "assistant", "hierarchy", "in", "given", "directories", "." ]
python
train
acutesoftware/AIKIF
aikif/dataTools/cls_sql_code_generator.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L211-L217
def update_old_to_new(self, col, old_val, new_val): """ simply updates all rows and sets COL to NEW_VAL where col = old_val e.g. update_old_to_new("NAME", "The University of Adelaide", "University of Adelaide") will generate UPDATE table op SET op.NAME = 'University of Adelaide' WHERE op.NAME = 'The University of Adelaide'; """ self.sql_text += "UPDATE " + self.fact_table + " SET " + col + " = '" + new_val + "' WHERE " + col + " = '" + old_val + "'; \n"
[ "def", "update_old_to_new", "(", "self", ",", "col", ",", "old_val", ",", "new_val", ")", ":", "self", ".", "sql_text", "+=", "\"UPDATE \"", "+", "self", ".", "fact_table", "+", "\" SET \"", "+", "col", "+", "\" = '\"", "+", "new_val", "+", "\"' WHERE \"",...
simply updates all rows and sets COL to NEW_VAL where col = old_val e.g. update_old_to_new("NAME", "The University of Adelaide", "University of Adelaide") will generate UPDATE table op SET op.NAME = 'University of Adelaide' WHERE op.NAME = 'The University of Adelaide';
[ "simply", "updates", "all", "rows", "and", "sets", "COL", "to", "NEW_VAL", "where", "col", "=", "old_val", "e", ".", "g", ".", "update_old_to_new", "(", "NAME", "The", "University", "of", "Adelaide", "University", "of", "Adelaide", ")", "will", "generate", ...
python
train
python-rope/rope
rope/base/project.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/project.py#L235-L238
def get_python_files(self): """Returns all python files available in the project""" return [resource for resource in self.get_files() if self.pycore.is_python_file(resource)]
[ "def", "get_python_files", "(", "self", ")", ":", "return", "[", "resource", "for", "resource", "in", "self", ".", "get_files", "(", ")", "if", "self", ".", "pycore", ".", "is_python_file", "(", "resource", ")", "]" ]
Returns all python files available in the project
[ "Returns", "all", "python", "files", "available", "in", "the", "project" ]
python
train
saltstack/salt
salt/cloud/clouds/linode.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1472-L1502
def _list_linodes(full=False): ''' Helper function to format and parse linode data ''' nodes = _query('linode', 'list')['DATA'] ips = get_ips() ret = {} for node in nodes: this_node = {} linode_id = six.text_type(node['LINODEID']) this_node['id'] = linode_id this_node['image'] = node['DISTRIBUTIONVENDOR'] this_node['name'] = node['LABEL'] this_node['size'] = node['TOTALRAM'] state = int(node['STATUS']) this_node['state'] = _get_status_descr_by_id(state) for key, val in six.iteritems(ips): if key == linode_id: this_node['private_ips'] = val['private_ips'] this_node['public_ips'] = val['public_ips'] if full: this_node['extra'] = node ret[node['LABEL']] = this_node return ret
[ "def", "_list_linodes", "(", "full", "=", "False", ")", ":", "nodes", "=", "_query", "(", "'linode'", ",", "'list'", ")", "[", "'DATA'", "]", "ips", "=", "get_ips", "(", ")", "ret", "=", "{", "}", "for", "node", "in", "nodes", ":", "this_node", "="...
Helper function to format and parse linode data
[ "Helper", "function", "to", "format", "and", "parse", "linode", "data" ]
python
train
solvebio/solvebio-python
solvebio/utils/tabulate.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L508-L520
def _line_segment_with_colons(linefmt, align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" fill = linefmt.hline w = colwidth if align in ["right", "decimal"]: return (fill[0] * (w - 1)) + ":" elif align == "center": return ":" + (fill[0] * (w - 2)) + ":" elif align == "left": return ":" + (fill[0] * (w - 1)) else: return fill[0] * w
[ "def", "_line_segment_with_colons", "(", "linefmt", ",", "align", ",", "colwidth", ")", ":", "fill", "=", "linefmt", ".", "hline", "w", "=", "colwidth", "if", "align", "in", "[", "\"right\"", ",", "\"decimal\"", "]", ":", "return", "(", "fill", "[", "0",...
Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).
[ "Return", "a", "segment", "of", "a", "horizontal", "line", "with", "optional", "colons", "which", "indicate", "column", "s", "alignment", "(", "as", "in", "pipe", "output", "format", ")", "." ]
python
test
mk-fg/feedjack
feedjack/fjcloud.py
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/fjcloud.py#L42-L50
def getquery(query): 'Performs a query and get the results.' try: conn = connection.cursor() conn.execute(query) data = conn.fetchall() conn.close() except: data = list() return data
[ "def", "getquery", "(", "query", ")", ":", "try", ":", "conn", "=", "connection", ".", "cursor", "(", ")", "conn", ".", "execute", "(", "query", ")", "data", "=", "conn", ".", "fetchall", "(", ")", "conn", ".", "close", "(", ")", "except", ":", "...
Performs a query and get the results.
[ "Performs", "a", "query", "and", "get", "the", "results", "." ]
python
train
totalgood/nlpia
src/nlpia/futil.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L346-L355
def read_json(filepath, intkeys=True, intvalues=True): """ read text from filepath (`open(find_filepath(expand_filepath(fp)))`) then json.loads() >>> read_json('HTTP_1.1 Status Code Definitions.html.json') {'100': 'Continue', '101': 'Switching Protocols',... """ d = json.load(ensure_open(find_filepath(filepath), mode='rt')) d = update_dict_types(d, update_keys=intkeys, update_values=intvalues) return d
[ "def", "read_json", "(", "filepath", ",", "intkeys", "=", "True", ",", "intvalues", "=", "True", ")", ":", "d", "=", "json", ".", "load", "(", "ensure_open", "(", "find_filepath", "(", "filepath", ")", ",", "mode", "=", "'rt'", ")", ")", "d", "=", ...
read text from filepath (`open(find_filepath(expand_filepath(fp)))`) then json.loads() >>> read_json('HTTP_1.1 Status Code Definitions.html.json') {'100': 'Continue', '101': 'Switching Protocols',...
[ "read", "text", "from", "filepath", "(", "open", "(", "find_filepath", "(", "expand_filepath", "(", "fp", ")))", ")", "then", "json", ".", "loads", "()", ">>>", "read_json", "(", "HTTP_1", ".", "1", "Status", "Code", "Definitions", ".", "html", ".", "jso...
python
train
arviz-devs/arviz
arviz/data/io_pystan.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/io_pystan.py#L270-L275
def sample_stats_prior_to_xarray(self): """Extract sample_stats_prior from prior.""" prior = self.prior prior_model = self.prior_model data = get_sample_stats_stan3(prior, model=prior_model) return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims)
[ "def", "sample_stats_prior_to_xarray", "(", "self", ")", ":", "prior", "=", "self", ".", "prior", "prior_model", "=", "self", ".", "prior_model", "data", "=", "get_sample_stats_stan3", "(", "prior", ",", "model", "=", "prior_model", ")", "return", "dict_to_datas...
Extract sample_stats_prior from prior.
[ "Extract", "sample_stats_prior", "from", "prior", "." ]
python
train
jhermann/rituals
src/rituals/acts/documentation.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/acts/documentation.py#L295-L311
def _to_pypi(self, docs_base, release): """Upload to PyPI.""" url = None with self._zipped(docs_base) as handle: reply = requests.post(self.params['url'], auth=get_pypi_auth(), allow_redirects=False, files=dict(content=(self.cfg.project.name + '.zip', handle, 'application/zip')), data={':action': 'doc_upload', 'name': self.cfg.project.name}) if reply.status_code in range(200, 300): notify.info("{status_code} {reason}".format(**vars(reply))) elif reply.status_code == 301: url = reply.headers['location'] else: data = self.cfg.copy() data.update(self.params) data.update(vars(reply)) notify.error("{status_code} {reason} for POST to {url}".format(**data)) return url
[ "def", "_to_pypi", "(", "self", ",", "docs_base", ",", "release", ")", ":", "url", "=", "None", "with", "self", ".", "_zipped", "(", "docs_base", ")", "as", "handle", ":", "reply", "=", "requests", ".", "post", "(", "self", ".", "params", "[", "'url'...
Upload to PyPI.
[ "Upload", "to", "PyPI", "." ]
python
valid
monarch-initiative/dipper
dipper/sources/OMIA.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L327-L360
def process_classes(self, limit): """ After all species have been processed . Loop through the xml file and process the articles, breed, genes, phenes, and phenotype-grouping classes. We add elements to the graph, and store the id-to-label in the label_hash dict, along with the internal key-to-external id in the id_hash dict. The latter are referenced in the association processing functions. :param limit: :return: """ myfile = '/'.join((self.rawdir, self.files['data']['file'])) fh = gzip.open(myfile, 'rb') filereader = io.TextIOWrapper(fh, newline="") filereader.readline() # remove the xml declaration line # iterparse is not deprecated for event, elem in ET.iterparse(filereader): self.process_xml_table(elem, 'Articles', self._process_article_row, limit) self.process_xml_table(elem, 'Breed', self._process_breed_row, limit) self.process_xml_table(elem, 'Genes_gb', self._process_gene_row, limit) self.process_xml_table( elem, 'OMIA_Group', self._process_omia_group_row, limit) self.process_xml_table(elem, 'Phene', self._process_phene_row, limit) self.process_xml_table( elem, 'Omim_Xref', self._process_omia_omim_map, limit) fh.close() # post-process the omia-omim associations to filter out the genes # (keep only phenotypes/diseases) self.clean_up_omim_genes() return
[ "def", "process_classes", "(", "self", ",", "limit", ")", ":", "myfile", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'data'", "]", "[", "'file'", "]", ")", ")", "fh", "=", "gzip", ".", "open", "(", ...
After all species have been processed . Loop through the xml file and process the articles, breed, genes, phenes, and phenotype-grouping classes. We add elements to the graph, and store the id-to-label in the label_hash dict, along with the internal key-to-external id in the id_hash dict. The latter are referenced in the association processing functions. :param limit: :return:
[ "After", "all", "species", "have", "been", "processed", ".", "Loop", "through", "the", "xml", "file", "and", "process", "the", "articles", "breed", "genes", "phenes", "and", "phenotype", "-", "grouping", "classes", ".", "We", "add", "elements", "to", "the", ...
python
train
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L992-L1041
def _create_activity2(self, parent, name, activity_type=ActivityType.TASK): """Create a new activity. .. important:: This function creates activities for KE-chain versions later than 2.9.0-135 In effect where the module 'wim' has version '>=2.0.0'. The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions` In WIM2 the type of the activity is called activity_type :param parent: parent under which to create the activity :type parent: basestring or :class:`models.Activity2` :param name: new activity name :type name: basestring :param activity_type: type of activity: TASK (default) or PROCESS :type activity_type: basestring :return: the created :class:`models.Activity2` :raises APIError: When the object could not be created :raises IllegalArgumentError: When an incorrect activitytype or parent is provided """ # WIM1: activity_class, WIM2: activity_type if self.match_app_version(label='wim', version='<2.0.0', default=True): raise APIError('This method is only compatible with versions of KE-chain where the internal `wim` module ' 'has a version >=2.0.0. Use the `Client.create_activity()` method.') if activity_type and activity_type not in ActivityType.values(): raise IllegalArgumentError("Please provide accepted activity_type (provided:{} accepted:{})". format(activity_type, ActivityType.values())) if isinstance(parent, (Activity, Activity2)): parent = parent.id elif is_uuid(parent): parent = parent else: raise IllegalArgumentError("Please provide either an activity object or a UUID") data = { "name": name, "parent_id": parent, "activity_type": activity_type } response = self._request('POST', self._build_url('activities'), data=data, params=API_EXTRA_PARAMS['activities']) if response.status_code != requests.codes.created: # pragma: no cover raise APIError("Could not create activity") data = response.json() return Activity2(data['results'][0], client=self)
[ "def", "_create_activity2", "(", "self", ",", "parent", ",", "name", ",", "activity_type", "=", "ActivityType", ".", "TASK", ")", ":", "# WIM1: activity_class, WIM2: activity_type", "if", "self", ".", "match_app_version", "(", "label", "=", "'wim'", ",", "version"...
Create a new activity. .. important:: This function creates activities for KE-chain versions later than 2.9.0-135 In effect where the module 'wim' has version '>=2.0.0'. The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions` In WIM2 the type of the activity is called activity_type :param parent: parent under which to create the activity :type parent: basestring or :class:`models.Activity2` :param name: new activity name :type name: basestring :param activity_type: type of activity: TASK (default) or PROCESS :type activity_type: basestring :return: the created :class:`models.Activity2` :raises APIError: When the object could not be created :raises IllegalArgumentError: When an incorrect activitytype or parent is provided
[ "Create", "a", "new", "activity", "." ]
python
train
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L1058-L1071
def getNetworkFragmentID(self): """get current partition id of Thread Network Partition from LeaderData Returns: The Thread network Partition Id """ print '%s call getNetworkFragmentID' % self.port if not self.__isOpenThreadRunning(): print 'OpenThread is not running' return None leaderData = [] leaderData = self.__sendCommand('leaderdata') return int(leaderData[0].split()[2], 16)
[ "def", "getNetworkFragmentID", "(", "self", ")", ":", "print", "'%s call getNetworkFragmentID'", "%", "self", ".", "port", "if", "not", "self", ".", "__isOpenThreadRunning", "(", ")", ":", "print", "'OpenThread is not running'", "return", "None", "leaderData", "=", ...
get current partition id of Thread Network Partition from LeaderData Returns: The Thread network Partition Id
[ "get", "current", "partition", "id", "of", "Thread", "Network", "Partition", "from", "LeaderData" ]
python
train
inveniosoftware/invenio-userprofiles
invenio_userprofiles/views.py
https://github.com/inveniosoftware/invenio-userprofiles/blob/4c682e7d67a4cab8dc38472a31fa1c34cbba03dd/invenio_userprofiles/views.py#L133-L163
def handle_profile_form(form): """Handle profile update form.""" form.process(formdata=request.form) if form.validate_on_submit(): email_changed = False with db.session.begin_nested(): # Update profile. current_userprofile.username = form.username.data current_userprofile.full_name = form.full_name.data db.session.add(current_userprofile) # Update email if current_app.config['USERPROFILES_EMAIL_ENABLED'] and \ form.email.data != current_user.email: current_user.email = form.email.data current_user.confirmed_at = None db.session.add(current_user) email_changed = True db.session.commit() if email_changed: send_confirmation_instructions(current_user) # NOTE: Flash message after successful update of profile. flash(_('Profile was updated. We have sent a verification ' 'email to %(email)s. Please check it.', email=current_user.email), category='success') else: # NOTE: Flash message after successful update of profile. flash(_('Profile was updated.'), category='success')
[ "def", "handle_profile_form", "(", "form", ")", ":", "form", ".", "process", "(", "formdata", "=", "request", ".", "form", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "email_changed", "=", "False", "with", "db", ".", "session", ".", "beg...
Handle profile update form.
[ "Handle", "profile", "update", "form", "." ]
python
train
mailgun/talon
talon/signature/extraction.py
https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/signature/extraction.py#L66-L98
def _mark_lines(lines, sender): """Mark message lines with markers to distinguish signature lines. Markers: * e - empty line * s - line identified as signature * t - other i.e. ordinary text line >>> mark_message_lines(['Some text', '', 'Bob'], 'Bob') 'tes' """ global EXTRACTOR candidate = get_signature_candidate(lines) # at first consider everything to be text no signature markers = list('t' * len(lines)) # mark lines starting from bottom up # mark only lines that belong to candidate # no need to mark all lines of the message for i, line in reversed(list(enumerate(candidate))): # markers correspond to lines not candidate # so we need to recalculate our index to be # relative to lines not candidate j = len(lines) - len(candidate) + i if not line.strip(): markers[j] = 'e' elif is_signature_line(line, sender, EXTRACTOR): markers[j] = 's' return "".join(markers)
[ "def", "_mark_lines", "(", "lines", ",", "sender", ")", ":", "global", "EXTRACTOR", "candidate", "=", "get_signature_candidate", "(", "lines", ")", "# at first consider everything to be text no signature", "markers", "=", "list", "(", "'t'", "*", "len", "(", "lines"...
Mark message lines with markers to distinguish signature lines. Markers: * e - empty line * s - line identified as signature * t - other i.e. ordinary text line >>> mark_message_lines(['Some text', '', 'Bob'], 'Bob') 'tes'
[ "Mark", "message", "lines", "with", "markers", "to", "distinguish", "signature", "lines", "." ]
python
train
pgmpy/pgmpy
pgmpy/factors/distributions/GaussianDistribution.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/distributions/GaussianDistribution.py#L126-L149
def precision_matrix(self): """ Returns the precision matrix of the distribution. Precision is defined as the inverse of the variance. This method returns the inverse matrix of the covariance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]])) >>> dis.precision_matrix array([[ 0.3125 , -0.125 , 0. ], [-0.125 , 0.58333333, 0.33333333], [ 0. , 0.33333333, 0.33333333]]) """ if self._precision_matrix is None: self._precision_matrix = np.linalg.inv(self.covariance) return self._precision_matrix
[ "def", "precision_matrix", "(", "self", ")", ":", "if", "self", ".", "_precision_matrix", "is", "None", ":", "self", ".", "_precision_matrix", "=", "np", ".", "linalg", ".", "inv", "(", "self", ".", "covariance", ")", "return", "self", ".", "_precision_mat...
Returns the precision matrix of the distribution. Precision is defined as the inverse of the variance. This method returns the inverse matrix of the covariance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]])) >>> dis.precision_matrix array([[ 0.3125 , -0.125 , 0. ], [-0.125 , 0.58333333, 0.33333333], [ 0. , 0.33333333, 0.33333333]])
[ "Returns", "the", "precision", "matrix", "of", "the", "distribution", "." ]
python
train
tBaxter/activity-monitor
activity_monitor/managers.py
https://github.com/tBaxter/activity-monitor/blob/be6c6edc7c6b4141923b47376502cde0f785eb68/activity_monitor/managers.py#L41-L45
def get_for_model(self, model): """ Return a QuerySet of only items of a certain type. """ return self.filter(content_type=ContentType.objects.get_for_model(model))
[ "def", "get_for_model", "(", "self", ",", "model", ")", ":", "return", "self", ".", "filter", "(", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "model", ")", ")" ]
Return a QuerySet of only items of a certain type.
[ "Return", "a", "QuerySet", "of", "only", "items", "of", "a", "certain", "type", "." ]
python
train
uktrade/directory-validators
directory_validators/company.py
https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L142-L156
def case_study_social_link_linkedin(value): """ Confirms that the social media url is pointed at the correct domain. Args: value (string): The url to check. Raises: django.forms.ValidationError """ parsed = parse.urlparse(value.lower()) if not parsed.netloc.endswith('linkedin.com'): raise ValidationError(MESSAGE_NOT_LINKEDIN)
[ "def", "case_study_social_link_linkedin", "(", "value", ")", ":", "parsed", "=", "parse", ".", "urlparse", "(", "value", ".", "lower", "(", ")", ")", "if", "not", "parsed", ".", "netloc", ".", "endswith", "(", "'linkedin.com'", ")", ":", "raise", "Validati...
Confirms that the social media url is pointed at the correct domain. Args: value (string): The url to check. Raises: django.forms.ValidationError
[ "Confirms", "that", "the", "social", "media", "url", "is", "pointed", "at", "the", "correct", "domain", "." ]
python
train
SMTG-UCL/sumo
sumo/electronic_structure/dos.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/electronic_structure/dos.py#L212-L267
def get_element_pdos(dos, element, sites, lm_orbitals=None, orbitals=None): """Get the projected density of states for an element. Args: dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The density of states. element (str): Element symbol. E.g. 'Zn'. sites (tuple): The atomic indices over which to sum the density of states, as a :obj:`tuple`. Indices are zero based for each element. For example, ``(0, 1, 2)`` will sum the density of states for the 1st, 2nd and 3rd sites of the element specified. lm_orbitals (:obj:`tuple`, optional): The orbitals to decompose into their lm contributions (e.g. p -> px, py, pz). Should be provided as a :obj:`tuple` of :obj:`str`. For example, ``('p')``, will extract the projected density of states for the px, py, and pz orbitals. Defaults to ``None``. orbitals (:obj:`tuple`, optional): The orbitals to extract from the projected density of states. Should be provided as a :obj:`tuple` of :obj:`str`. For example, ``('s', 'px', 'dx2')`` will extract the s, px, and dx2 orbitals, only. If ``None``, all orbitals will be extracted. Defaults to ``None``. Returns: dict: The projected density of states. Formatted as a :obj:`dict` mapping the orbitals to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example:: { 's': Dos, 'p': Dos } """ el_dos = {} for site in sites: # build a list of which orbitals we are after # start with s, p, and d orbitals only spd = [orb for orb in dos.get_element_spd_dos(element).keys() if ((orbitals and orb.name in orbitals) or not orbitals) and ((lm_orbitals and orb.name not in lm_orbitals) or not lm_orbitals)] # now add any lm decomposed orbitals lm = [orb for orb in Orbital if lm_orbitals and orb.name[0] in lm_orbitals] # extract the data for orb in spd: pdos = dos.get_site_spd_dos(site)[orb] el_dos[orb.name] = (el_dos[orb.name] + pdos if orb.name in el_dos else pdos) for orb in lm: pdos = dos.get_site_orbital_dos(site, orb) el_dos[orb.name] = (el_dos[orb.name] + pdos if orb.name in el_dos else pdos) return el_dos
[ "def", "get_element_pdos", "(", "dos", ",", "element", ",", "sites", ",", "lm_orbitals", "=", "None", ",", "orbitals", "=", "None", ")", ":", "el_dos", "=", "{", "}", "for", "site", "in", "sites", ":", "# build a list of which orbitals we are after", "# start ...
Get the projected density of states for an element. Args: dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The density of states. element (str): Element symbol. E.g. 'Zn'. sites (tuple): The atomic indices over which to sum the density of states, as a :obj:`tuple`. Indices are zero based for each element. For example, ``(0, 1, 2)`` will sum the density of states for the 1st, 2nd and 3rd sites of the element specified. lm_orbitals (:obj:`tuple`, optional): The orbitals to decompose into their lm contributions (e.g. p -> px, py, pz). Should be provided as a :obj:`tuple` of :obj:`str`. For example, ``('p')``, will extract the projected density of states for the px, py, and pz orbitals. Defaults to ``None``. orbitals (:obj:`tuple`, optional): The orbitals to extract from the projected density of states. Should be provided as a :obj:`tuple` of :obj:`str`. For example, ``('s', 'px', 'dx2')`` will extract the s, px, and dx2 orbitals, only. If ``None``, all orbitals will be extracted. Defaults to ``None``. Returns: dict: The projected density of states. Formatted as a :obj:`dict` mapping the orbitals to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example:: { 's': Dos, 'p': Dos }
[ "Get", "the", "projected", "density", "of", "states", "for", "an", "element", "." ]
python
train
tensorflow/tensorboard
tensorboard/program.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L358-L410
def with_port_scanning(cls): """Create a server factory that performs port scanning. This function returns a callable whose signature matches the specification of `TensorBoardServer.__init__`, using `cls` as an underlying implementation. It passes through `flags` unchanged except in the case that `flags.port is None`, in which case it repeatedly instantiates the underlying server with new port suggestions. Args: cls: A valid implementation of `TensorBoardServer`. This class's initializer should raise a `TensorBoardPortInUseError` upon failing to bind to a port when it is expected that binding to another nearby port might succeed. The initializer for `cls` will only ever be invoked with `flags` such that `flags.port is not None`. Returns: A function that implements the `__init__` contract of `TensorBoardServer`. """ def init(wsgi_app, flags): # base_port: what's the first port to which we should try to bind? # should_scan: if that fails, shall we try additional ports? # max_attempts: how many ports shall we try? should_scan = flags.port is None base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port max_attempts = 10 if should_scan else 1 if base_port > 0xFFFF: raise TensorBoardServerException( 'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF) ) max_attempts = 10 if should_scan else 1 base_port = min(base_port + max_attempts, 0x10000) - max_attempts for port in xrange(base_port, base_port + max_attempts): subflags = argparse.Namespace(**vars(flags)) subflags.port = port try: return cls(wsgi_app=wsgi_app, flags=subflags) except TensorBoardPortInUseError: if not should_scan: raise # All attempts failed to bind. raise TensorBoardServerException( 'TensorBoard could not bind to any port around %s ' '(tried %d times)' % (base_port, max_attempts)) return init
[ "def", "with_port_scanning", "(", "cls", ")", ":", "def", "init", "(", "wsgi_app", ",", "flags", ")", ":", "# base_port: what's the first port to which we should try to bind?", "# should_scan: if that fails, shall we try additional ports?", "# max_attempts: how many ports shall we tr...
Create a server factory that performs port scanning. This function returns a callable whose signature matches the specification of `TensorBoardServer.__init__`, using `cls` as an underlying implementation. It passes through `flags` unchanged except in the case that `flags.port is None`, in which case it repeatedly instantiates the underlying server with new port suggestions. Args: cls: A valid implementation of `TensorBoardServer`. This class's initializer should raise a `TensorBoardPortInUseError` upon failing to bind to a port when it is expected that binding to another nearby port might succeed. The initializer for `cls` will only ever be invoked with `flags` such that `flags.port is not None`. Returns: A function that implements the `__init__` contract of `TensorBoardServer`.
[ "Create", "a", "server", "factory", "that", "performs", "port", "scanning", "." ]
python
train
e3krisztian/pyrene
pyrene/shell.py
https://github.com/e3krisztian/pyrene/blob/ad9f2fb979f06930399c9c8214c3fe3c2d6efa06/pyrene/shell.py#L192-L222
def do_copy(self, line): ''' Copy packages between repos copy SOURCE DESTINATION Where SOURCE can be either LOCAL-FILE or REPO:PACKAGE-SPEC DESTINATION can be either a REPO: or a directory. ''' words = line.split() source, destination = words destination_repo = self._get_destination_repo(destination) local_file_source = ':' not in source if local_file_source: destination_repo.upload_packages([source]) else: source_repo_name, _, package_spec = source.partition(':') try: source_repo = self.network.get_repo(source_repo_name) except UnknownRepoError: raise ShellError( 'Unknown repository {}'.format(source_repo_name) ) # copy between repos with the help of temporary storage try: source_repo.download_packages(package_spec, self.__temp_dir) destination_repo.upload_packages(self.__temp_dir.files) finally: self.__temp_dir.clear()
[ "def", "do_copy", "(", "self", ",", "line", ")", ":", "words", "=", "line", ".", "split", "(", ")", "source", ",", "destination", "=", "words", "destination_repo", "=", "self", ".", "_get_destination_repo", "(", "destination", ")", "local_file_source", "=", ...
Copy packages between repos copy SOURCE DESTINATION Where SOURCE can be either LOCAL-FILE or REPO:PACKAGE-SPEC DESTINATION can be either a REPO: or a directory.
[ "Copy", "packages", "between", "repos" ]
python
train
saltstack/salt
salt/modules/win_iis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_iis.py#L1503-L1544
def remove_app(name, site): ''' Remove an IIS application. Args: name (str): The application name. site (str): The IIS site name. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_app name='app0' site='site0' ''' current_apps = list_apps(site) if name not in current_apps: log.debug('Application already absent: %s', name) return True ps_cmd = ['Remove-WebApplication', '-Name', "'{0}'".format(name), '-Site', "'{0}'".format(site)] cmd_ret = _srvmgr(ps_cmd) if cmd_ret['retcode'] != 0: msg = 'Unable to remove application: {0}\nError: {1}' \ ''.format(name, cmd_ret['stderr']) raise CommandExecutionError(msg) new_apps = list_apps(site) if name not in new_apps: log.debug('Application removed successfully: %s', name) return True log.error('Unable to remove application: %s', name) return False
[ "def", "remove_app", "(", "name", ",", "site", ")", ":", "current_apps", "=", "list_apps", "(", "site", ")", "if", "name", "not", "in", "current_apps", ":", "log", ".", "debug", "(", "'Application already absent: %s'", ",", "name", ")", "return", "True", "...
Remove an IIS application. Args: name (str): The application name. site (str): The IIS site name. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_app name='app0' site='site0'
[ "Remove", "an", "IIS", "application", "." ]
python
train
mardix/Mocha
mocha/contrib/views/auth.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/views/auth.py#L337-L366
def reset_password(self, action_token, signed_data): """Reset the user password. It was triggered by LOST-PASSWORD """ try: action = "reset-password" user = get_user_by_action_token(action, action_token) if not user or not user.signed_data_match(signed_data, action): raise mocha_exc.AppError("Verification Invalid!") if request.method == "POST": password = request.form.get("password", "").strip() password_confirm = request.form.get("password_confirm", "").strip() if not password or password != password_confirm: raise exceptions.AuthError( "Password is missing or passwords don't match") user.change_password(password) user.set_email_verified(True) session_set_require_password_change(False) flash_success("Password updated successfully!") return redirect(__options__.get("login_view") or self.login) return {"action_token": action_token, "signed_data": signed_data} except (mocha_exc.AppError, exceptions.AuthError) as ex: flash_error(str(ex)) except Exception as e: logging.exception(e) flash_error("Unable to reset password") return redirect(self.login)
[ "def", "reset_password", "(", "self", ",", "action_token", ",", "signed_data", ")", ":", "try", ":", "action", "=", "\"reset-password\"", "user", "=", "get_user_by_action_token", "(", "action", ",", "action_token", ")", "if", "not", "user", "or", "not", "user"...
Reset the user password. It was triggered by LOST-PASSWORD
[ "Reset", "the", "user", "password", ".", "It", "was", "triggered", "by", "LOST", "-", "PASSWORD" ]
python
train
joke2k/faker
faker/providers/date_time/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/date_time/__init__.py#L1574-L1587
def date_between(self, start_date='-30y', end_date='today'): """ Get a Date object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "today" :example Date('1999-02-02') :return Date """ start_date = self._parse_date(start_date) end_date = self._parse_date(end_date) return self.date_between_dates(date_start=start_date, date_end=end_date)
[ "def", "date_between", "(", "self", ",", "start_date", "=", "'-30y'", ",", "end_date", "=", "'today'", ")", ":", "start_date", "=", "self", ".", "_parse_date", "(", "start_date", ")", "end_date", "=", "self", ".", "_parse_date", "(", "end_date", ")", "retu...
Get a Date object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "today" :example Date('1999-02-02') :return Date
[ "Get", "a", "Date", "object", "based", "on", "a", "random", "date", "between", "two", "given", "dates", ".", "Accepts", "date", "strings", "that", "can", "be", "recognized", "by", "strtotime", "()", "." ]
python
train
ianmiell/shutit
shutit_pexpect.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L1926-L2155
def get_distro_info(self, loglevel=logging.DEBUG): """Get information about which distro we are using, placing it in the environment object. Fails if distro could not be determined. Should be called with the container is started up, and uses as core info as possible. Note: if the install type is apt, it issues the following: - apt-get update - apt-get install -y -qq lsb-release """ shutit = self.shutit install_type = '' distro = '' distro_version = '' if shutit.build['distro_override'] != '': key = shutit.build['distro_override'] distro = shutit.build['distro_override'] install_type = package_map.INSTALL_TYPE_MAP[key] distro_version = '' if install_type == 'apt' and shutit.build['delivery'] in ('docker','dockerfile'): if not self.command_available('lsb_release'): if not shutit.get_current_shutit_pexpect_session_environment().build['apt_update_done'] and self.whoami() == 'root': shutit.get_current_shutit_pexpect_session_environment().build['apt_update_done'] = True self.send(ShutItSendSpec(self, send='DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -qq lsb-release', loglevel=loglevel, ignore_background=True)) d = self.lsb_release() install_type = d['install_type'] distro = d['distro'] distro_version = d['distro_version'] elif install_type == 'yum' and shutit.build['delivery'] in ('docker', 'dockerfile'): if self.file_exists('/etc/redhat-release'): output = self.send_and_get_output(' command cat /etc/redhat-release', echo=False, loglevel=loglevel) if re.match('^centos.*$', output.lower()) or re.match('^red hat.*$', output.lower()) or re.match('^fedora.*$', output.lower()) or True: self.send_and_match_output('yum install -y -t redhat-lsb', 'Complete!', loglevel=loglevel) else: if not self.command_available('lsb_release'): self.send(ShutItSendSpec(self, send='yum install -y lsb-release', loglevel=loglevel, ignore_background=True)) install_type = d['install_type'] distro = d['distro'] distro_version = d['distro_version'] elif install_type == 'apk' and shutit.build['delivery'] in ('docker','dockerfile'): if not shutit.get_current_shutit_pexpect_session_environment().build['apk_update_done'] and self.whoami() == 'root': self.send(ShutItSendSpec(self, send='apk -q update', ignore_background=True, loglevel=logging.INFO)) shutit.get_current_shutit_pexpect_session_environment().build['apk_update_done'] = True self.send(ShutItSendSpec(self, send='apk -q add bash', ignore_background=True, loglevel=loglevel)) install_type = 'apk' distro = 'alpine' distro_version = '1.0' elif install_type == 'pacman' and shutit.build['delivery'] in ('docker','dockerfile') and self.whoami() == 'root': if not shutit.get_current_shutit_pexpect_session_environment().build['pacman_update_done']: shutit.get_current_shutit_pexpect_session_environment().build['pacman_update_done'] = True self.send(ShutItSendSpec(self, send='pacman -Syy', ignore_background=True, loglevel=logging.INFO)) install_type = d['install_type'] distro = d['distro'] distro_version = '1.0' elif install_type == 'emerge' and shutit.build['delivery'] in ('docker','dockerfile'): if not shutit.get_current_shutit_pexpect_session_environment().build['emerge_update_done'] and self.whoami() == 'root': # Takes bloody ages! #self.send(ShutItSendSpec(self,send='emerge --sync', loglevel=loglevel,timeout=9999,ignore_background=True)) pass install_type = 'emerge' distro = 'gentoo' distro_version = '1.0' elif install_type == 'docker' and shutit.build['delivery'] in ('docker','dockerfile'): distro = 'coreos' distro_version = '1.0' elif self.command_available('lsb_release'): d = self.lsb_release() install_type = d['install_type'] distro = d['distro'] distro_version = d['distro_version'] else: issue_output = self.send_and_get_output(' command cat /etc/issue', echo=False, ignore_background=True, loglevel=loglevel).lower() if not re.match('.*No such file.*',issue_output): for key in package_map.INSTALL_TYPE_MAP: if issue_output.find(key) != -1: distro = key install_type = package_map.INSTALL_TYPE_MAP[key] break elif self.file_exists('/cygdrive'): distro = 'cygwin' install_type = 'apt-cyg' if install_type == '' or distro == '': if self.file_exists('/etc/os-release'): os_name = self.send_and_get_output(' command cat /etc/os-release | grep ^NAME', echo=False, ignore_background=True, loglevel=loglevel).lower() if os_name.find('centos') != -1: distro = 'centos' install_type = 'yum' elif os_name.find('red hat') != -1: distro = 'red hat' install_type = 'yum' elif os_name.find('fedora') != -1: # TODO: distinguish with dnf - fedora 23+? search for dnf in here distro = 'fedora' install_type = 'yum' elif os_name.find('gentoo') != -1: distro = 'gentoo' install_type = 'emerge' elif os_name.find('coreos') != -1: distro = 'coreos' install_type = 'docker' else: uname_output = self.send_and_get_output(" command uname -a | awk '{print $1}'", echo=False, ignore_background=True, loglevel=loglevel) if uname_output == 'Darwin': distro = 'osx' install_type = 'brew' if not self.command_available('brew'): shutit.fail('ShutiIt requires brew be installed. See http://brew.sh for details on installation.') # pragma: no cover if not self.file_exists('/tmp/shutit_brew_list'): if self.whoami() != 'root': self.send(ShutItSendSpec(self, send=' brew list > .shutit_brew_list', echo=False, ignore_background=True, loglevel=loglevel)) else: pass for package in ('coreutils','findutils','gnu-tar','gnu-sed','gawk','gnutls','gnu-indent','gnu-getopt'): if self.send_and_get_output(' command cat .shutit_brew_list | grep -w ' + package, echo=False, loglevel=loglevel) == '': self.send(ShutItSendSpec(self, send='brew install ' + package, ignore_background=True, loglevel=loglevel)) self.send(ShutItSendSpec(self, send='rm -f .shutit_brew_list', echo=False, ignore_background=True, loglevel=loglevel)) if uname_output[:6] == 'CYGWIN': distro = 'cygwin' install_type = 'apt-cyg' if install_type == '' or distro == '': shutit.fail('Could not determine Linux distro information. ' + 'Please inform ShutIt maintainers at https://github.com/ianmiell/shutit', shutit_pexpect_child=self.pexpect_child) # pragma: no cover # The call to self.package_installed with lsb-release above # may fail if it doesn't know the install type, so # if we've determined that now if install_type == 'apt' and shutit.build['delivery'] in ('docker','dockerfile'): if not self.command_available('lsb_release'): if not shutit.get_current_shutit_pexpect_session_environment().build['apt_update_done'] and self.whoami() == 'root': shutit.get_current_shutit_pexpect_session_environment().build['apt_update_done'] = True self.send(ShutItSendSpec(self, send='DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -qq lsb-release', loglevel=loglevel, ignore_background=True)) self.send(ShutItSendSpec(self, send='DEBIAN_FRONTEND=noninteractive apt-get install -y -qq lsb-release', loglevel=loglevel, ignore_background=True)) d = self.lsb_release() install_type = d['install_type'] distro = d['distro'] distro_version = d['distro_version'] elif install_type == 'yum' and shutit.build['delivery'] in ('docker','dockerfile'): if self.file_exists('/etc/redhat-release'): output = self.send_and_get_output(' command cat /etc/redhat-release', echo=False, loglevel=loglevel) if re.match('^centos.*$', output.lower()) or re.match('^red hat.*$', output.lower()) or re.match('^fedora.*$', output.lower()) or True: self.send_and_match_output('yum install -y -t redhat-lsb', 'Complete!', loglevel=loglevel) else: if not self.command_available('lsb_release'): self.send(ShutItSendSpec(self, send='yum install -y lsb-release', ignore_background=True, loglevel=loglevel)) d = self.lsb_release() install_type = d['install_type'] distro = d['distro'] distro_version = d['distro_version'] elif install_type == 'apk' and shutit.build['delivery'] in ('docker','dockerfile'): if not shutit.get_current_shutit_pexpect_session_environment().build['apk_update_done'] and self.whoami() == 'root': self.send(ShutItSendSpec(self, send='apk -q update', ignore_background=True, loglevel=logging.INFO)) shutit.get_current_shutit_pexpect_session_environment().build['apk_update_done'] = True self.send(ShutItSendSpec(self, send='apk -q add bash', ignore_background=True, loglevel=loglevel)) install_type = 'apk' distro = 'alpine' distro_version = '1.0' elif install_type == 'emerge' and shutit.build['delivery'] in ('docker','dockerfile'): if not shutit.get_current_shutit_pexpect_session_environment().build['emerge_update_done'] and self.whoami() == 'root': # Takes bloody ages! #self.send(ShutItSendSpec(self,send='emerge --sync', loglevel=logging.INFO,ignore_background=True)) pass install_type = 'emerge' distro = 'gentoo' distro_version = '1.0' # We should have the distro info now, let's assign to target config # if this is not a one-off. self.current_environment.install_type = install_type self.current_environment.distro = distro self.current_environment.distro_version = distro_version return True
[ "def", "get_distro_info", "(", "self", ",", "loglevel", "=", "logging", ".", "DEBUG", ")", ":", "shutit", "=", "self", ".", "shutit", "install_type", "=", "''", "distro", "=", "''", "distro_version", "=", "''", "if", "shutit", ".", "build", "[", "'distro...
Get information about which distro we are using, placing it in the environment object. Fails if distro could not be determined. Should be called with the container is started up, and uses as core info as possible. Note: if the install type is apt, it issues the following: - apt-get update - apt-get install -y -qq lsb-release
[ "Get", "information", "about", "which", "distro", "we", "are", "using", "placing", "it", "in", "the", "environment", "object", "." ]
python
train
gmr/email-normalize
email_normalize.py
https://github.com/gmr/email-normalize/blob/407d8271285e40afd77c94ee2dd2180dfdedffdb/email_normalize.py#L103-L137
def normalize(email_address, resolve=True): """Return the normalized email address, removing :param str email_address: The normalized email address :param bool resolve: Resolve the domain :rtype: str """ address = utils.parseaddr(email_address) local_part, domain_part = address[1].lower().split('@') # Plus addressing is supported by Microsoft domains and FastMail if domain_part in MICROSOFT_DOMAINS: if '+' in local_part: local_part = local_part.split('+')[0] # GMail supports plus addressing and throw-away period delimiters elif _is_gmail(domain_part, resolve): local_part = local_part.replace('.', '').split('+')[0] # Yahoo domain handling of - is like plus addressing elif _is_yahoo(domain_part, resolve): if '-' in local_part: local_part = local_part.split('-')[0] # FastMail has domain part username aliasing and plus addressing elif _is_fastmail(domain_part, resolve): domain_segments = domain_part.split('.') if len(domain_segments) > 2: local_part = domain_segments[0] domain_part = '.'.join(domain_segments[1:]) elif '+' in local_part: local_part = local_part.split('+')[0] return '@'.join([local_part, domain_part])
[ "def", "normalize", "(", "email_address", ",", "resolve", "=", "True", ")", ":", "address", "=", "utils", ".", "parseaddr", "(", "email_address", ")", "local_part", ",", "domain_part", "=", "address", "[", "1", "]", ".", "lower", "(", ")", ".", "split", ...
Return the normalized email address, removing :param str email_address: The normalized email address :param bool resolve: Resolve the domain :rtype: str
[ "Return", "the", "normalized", "email", "address", "removing" ]
python
train
six8/pytailer
src/tailer/__init__.py
https://github.com/six8/pytailer/blob/8f78431b9d2e63077d7f7150264869506c890024/src/tailer/__init__.py#L153-L182
def follow(self, delay=1.0): """\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035 """ trailing = True while 1: where = self.file.tell() line = self.file.readline() if line: if trailing and line in self.line_terminators: # This is just the line terminator added to the end of the file # before a new line, ignore. trailing = False continue if line[-1] in self.line_terminators: line = line[:-1] if line[-1:] == '\r\n' and '\r\n' in self.line_terminators: # found crlf line = line[:-1] trailing = False yield line else: trailing = True self.seek(where) time.sleep(delay)
[ "def", "follow", "(", "self", ",", "delay", "=", "1.0", ")", ":", "trailing", "=", "True", "while", "1", ":", "where", "=", "self", ".", "file", ".", "tell", "(", ")", "line", "=", "self", ".", "file", ".", "readline", "(", ")", "if", "line", "...
\ Iterator generator that returns lines as data is added to the file. Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
[ "\\", "Iterator", "generator", "that", "returns", "lines", "as", "data", "is", "added", "to", "the", "file", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/Validation.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Validation.py#L298-L306
def __search_text_check_convert(cls, text): """Converts and keeps only words in text deemed to be valid""" text = cls.check_convert_string(text, name='text', no_leading_trailing_whitespace=False) if len(text) > VALIDATION_META_SEARCH_TEXT: raise ValueError("Search text can contain at most %d characters" % VALIDATION_META_SEARCH_TEXT) text = ' '.join(_PATTERN_WORDS.findall(text)) if not text: raise ValueError('Search text must contain at least one non-whitespace term (word)') return text
[ "def", "__search_text_check_convert", "(", "cls", ",", "text", ")", ":", "text", "=", "cls", ".", "check_convert_string", "(", "text", ",", "name", "=", "'text'", ",", "no_leading_trailing_whitespace", "=", "False", ")", "if", "len", "(", "text", ")", ">", ...
Converts and keeps only words in text deemed to be valid
[ "Converts", "and", "keeps", "only", "words", "in", "text", "deemed", "to", "be", "valid" ]
python
train
treycucco/bidon
bidon/spreadsheet/excel.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/spreadsheet/excel.py#L78-L81
def _parse_date(self, cell_value): """Attempts to parse a cell_value as a date.""" date_tuple = xlrd.xldate_as_tuple(cell_value, self.raw_sheet.book.datemode) return self.tuple_to_datetime(date_tuple)
[ "def", "_parse_date", "(", "self", ",", "cell_value", ")", ":", "date_tuple", "=", "xlrd", ".", "xldate_as_tuple", "(", "cell_value", ",", "self", ".", "raw_sheet", ".", "book", ".", "datemode", ")", "return", "self", ".", "tuple_to_datetime", "(", "date_tup...
Attempts to parse a cell_value as a date.
[ "Attempts", "to", "parse", "a", "cell_value", "as", "a", "date", "." ]
python
train
noxdafox/clipspy
clips/modules.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/modules.py#L102-L109
def modules(self): """Iterates over the defined Modules.""" defmodule = lib.EnvGetNextDefmodule(self._env, ffi.NULL) while defmodule != ffi.NULL: yield Module(self._env, defmodule) defmodule = lib.EnvGetNextDefmodule(self._env, defmodule)
[ "def", "modules", "(", "self", ")", ":", "defmodule", "=", "lib", ".", "EnvGetNextDefmodule", "(", "self", ".", "_env", ",", "ffi", ".", "NULL", ")", "while", "defmodule", "!=", "ffi", ".", "NULL", ":", "yield", "Module", "(", "self", ".", "_env", ",...
Iterates over the defined Modules.
[ "Iterates", "over", "the", "defined", "Modules", "." ]
python
train
tensorflow/probability
tensorflow_probability/examples/vq_vae.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vq_vae.py#L259-L301
def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay): """Add control dependencies to the commmitment loss to update the codebook. Args: vector_quantizer: An instance of the VectorQuantizer class. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch. codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. commitment_loss: The commitment loss from comparing the encoder outputs to their neighboring codebook entries. decay: Decay factor for exponential moving average. Returns: commitment_loss: Commitment loss with control dependencies. """ # Use an exponential moving average to update the codebook. updated_ema_count = moving_averages.assign_moving_average( vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False) updated_ema_means = moving_averages.assign_moving_average( vector_quantizer.ema_means, tf.reduce_sum( input_tensor=tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3), axis=[0, 1]), decay, zero_debias=False) # Add small value to avoid dividing by zero. perturbed_ema_count = updated_ema_count + 1e-5 with tf.control_dependencies([commitment_loss]): update_means = tf.compat.v1.assign( vector_quantizer.codebook, updated_ema_means / perturbed_ema_count[..., tf.newaxis]) with tf.control_dependencies([update_means]): return tf.identity(commitment_loss)
[ "def", "add_ema_control_dependencies", "(", "vector_quantizer", ",", "one_hot_assignments", ",", "codes", ",", "commitment_loss", ",", "decay", ")", ":", "# Use an exponential moving average to update the codebook.", "updated_ema_count", "=", "moving_averages", ".", "assign_mov...
Add control dependencies to the commmitment loss to update the codebook. Args: vector_quantizer: An instance of the VectorQuantizer class. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch. codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. commitment_loss: The commitment loss from comparing the encoder outputs to their neighboring codebook entries. decay: Decay factor for exponential moving average. Returns: commitment_loss: Commitment loss with control dependencies.
[ "Add", "control", "dependencies", "to", "the", "commmitment", "loss", "to", "update", "the", "codebook", "." ]
python
test
DLR-RM/RAFCON
source/rafcon/core/execution/execution_engine.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/execution/execution_engine.py#L280-L298
def run_to_selected_state(self, path, state_machine_id=None): """Execute the state machine until a specific state. This state won't be executed. This is an asynchronous task """ if self.state_machine_manager.get_active_state_machine() is not None: self.state_machine_manager.get_active_state_machine().root_state.recursively_resume_states() if not self.finished_or_stopped(): logger.debug("Resume execution engine and run to selected state!") self.run_to_states = [] self.run_to_states.append(path) self.set_execution_mode(StateMachineExecutionStatus.RUN_TO_SELECTED_STATE) else: logger.debug("Start execution engine and run to selected state!") if state_machine_id is not None: self.state_machine_manager.active_state_machine_id = state_machine_id self.set_execution_mode(StateMachineExecutionStatus.RUN_TO_SELECTED_STATE) self.run_to_states = [] self.run_to_states.append(path) self._run_active_state_machine()
[ "def", "run_to_selected_state", "(", "self", ",", "path", ",", "state_machine_id", "=", "None", ")", ":", "if", "self", ".", "state_machine_manager", ".", "get_active_state_machine", "(", ")", "is", "not", "None", ":", "self", ".", "state_machine_manager", ".", ...
Execute the state machine until a specific state. This state won't be executed. This is an asynchronous task
[ "Execute", "the", "state", "machine", "until", "a", "specific", "state", ".", "This", "state", "won", "t", "be", "executed", ".", "This", "is", "an", "asynchronous", "task" ]
python
train
jupyter-widgets/ipywidgets
ipywidgets/widgets/widget_selectioncontainer.py
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget_selectioncontainer.py#L33-L46
def set_title(self, index, title): """Sets the title of a container page. Parameters ---------- index : int Index of the container page title : unicode New title """ # JSON dictionaries have string keys, so we convert index to a string index = unicode_type(int(index)) self._titles[index] = title self.send_state('_titles')
[ "def", "set_title", "(", "self", ",", "index", ",", "title", ")", ":", "# JSON dictionaries have string keys, so we convert index to a string", "index", "=", "unicode_type", "(", "int", "(", "index", ")", ")", "self", ".", "_titles", "[", "index", "]", "=", "tit...
Sets the title of a container page. Parameters ---------- index : int Index of the container page title : unicode New title
[ "Sets", "the", "title", "of", "a", "container", "page", "." ]
python
train
72squared/redpipe
redpipe/keyspaces.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L121-L132
def redis_key(cls, key): """ Get the key we pass to redis. If no namespace is declared, it will use the class name. :param key: str the name of the redis key :return: str """ keyspace = cls.keyspace tpl = cls.keyspace_template key = "%s" % key if keyspace is None else tpl % (keyspace, key) return cls.keyparse.encode(key)
[ "def", "redis_key", "(", "cls", ",", "key", ")", ":", "keyspace", "=", "cls", ".", "keyspace", "tpl", "=", "cls", ".", "keyspace_template", "key", "=", "\"%s\"", "%", "key", "if", "keyspace", "is", "None", "else", "tpl", "%", "(", "keyspace", ",", "k...
Get the key we pass to redis. If no namespace is declared, it will use the class name. :param key: str the name of the redis key :return: str
[ "Get", "the", "key", "we", "pass", "to", "redis", ".", "If", "no", "namespace", "is", "declared", "it", "will", "use", "the", "class", "name", "." ]
python
train
zeromake/aiko
aiko/worker.py
https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/worker.py#L25-L36
def init_process(self) -> None: """ GunicornWorker 初始化回调 """ default_loop = asyncio.get_event_loop() if default_loop.is_running(): default_loop.close() self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) else: self.loop = default_loop super().init_process()
[ "def", "init_process", "(", "self", ")", "->", "None", ":", "default_loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "if", "default_loop", ".", "is_running", "(", ")", ":", "default_loop", ".", "close", "(", ")", "self", ".", "loop", "=", "asynci...
GunicornWorker 初始化回调
[ "GunicornWorker", "初始化回调" ]
python
train
openvax/topiary
topiary/sequence_helpers.py
https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/sequence_helpers.py#L19-L50
def protein_subsequences_around_mutations(effects, padding_around_mutation): """ From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets. """ protein_subsequences = {} protein_subsequence_start_offsets = {} for effect in effects: protein_sequence = effect.mutant_protein_sequence # some effects will lack a mutant protein sequence since # they are either silent or unpredictable if protein_sequence: mutation_start = effect.aa_mutation_start_offset mutation_end = effect.aa_mutation_end_offset seq_start_offset = max( 0, mutation_start - padding_around_mutation) # some pseudogenes have stop codons in the reference sequence, # if we try to use them for epitope prediction we should trim # the sequence to not include the stop character '*' first_stop_codon_index = protein_sequence.find("*") if first_stop_codon_index < 0: first_stop_codon_index = len(protein_sequence) seq_end_offset = min( first_stop_codon_index, mutation_end + padding_around_mutation) subsequence = protein_sequence[seq_start_offset:seq_end_offset] protein_subsequences[effect] = subsequence protein_subsequence_start_offsets[effect] = seq_start_offset return protein_subsequences, protein_subsequence_start_offsets
[ "def", "protein_subsequences_around_mutations", "(", "effects", ",", "padding_around_mutation", ")", ":", "protein_subsequences", "=", "{", "}", "protein_subsequence_start_offsets", "=", "{", "}", "for", "effect", "in", "effects", ":", "protein_sequence", "=", "effect",...
From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets.
[ "From", "each", "effect", "get", "a", "mutant", "protein", "sequence", "and", "pull", "out", "a", "subsequence", "around", "the", "mutation", "(", "based", "on", "the", "given", "padding", ")", ".", "Returns", "a", "dictionary", "of", "subsequences", "and", ...
python
train
juju/charm-helpers
charmhelpers/fetch/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/__init__.py#L75-L79
def base_url(self, url): """Return url without querystring or fragment""" parts = list(self.parse_url(url)) parts[4:] = ['' for i in parts[4:]] return urlunparse(parts)
[ "def", "base_url", "(", "self", ",", "url", ")", ":", "parts", "=", "list", "(", "self", ".", "parse_url", "(", "url", ")", ")", "parts", "[", "4", ":", "]", "=", "[", "''", "for", "i", "in", "parts", "[", "4", ":", "]", "]", "return", "urlun...
Return url without querystring or fragment
[ "Return", "url", "without", "querystring", "or", "fragment" ]
python
train
thespacedoctor/fundamentals
fundamentals/times.py
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/times.py#L18-L39
def get_now_sql_datetime(): """ *A datetime stamp in MySQL format: ``YYYY-MM-DDTHH:MM:SS``* **Return:** - ``now`` -- current time and date in MySQL format **Usage:** .. code-block:: python from fundamentals import times now = times.get_now_sql_datetime() print now # OUT: 2016-03-18T11:08:23 """ ## > IMPORTS ## from datetime import datetime, date, time now = datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S") return now
[ "def", "get_now_sql_datetime", "(", ")", ":", "## > IMPORTS ##", "from", "datetime", "import", "datetime", ",", "date", ",", "time", "now", "=", "datetime", ".", "now", "(", ")", "now", "=", "now", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "return"...
*A datetime stamp in MySQL format: ``YYYY-MM-DDTHH:MM:SS``* **Return:** - ``now`` -- current time and date in MySQL format **Usage:** .. code-block:: python from fundamentals import times now = times.get_now_sql_datetime() print now # OUT: 2016-03-18T11:08:23
[ "*", "A", "datetime", "stamp", "in", "MySQL", "format", ":", "YYYY", "-", "MM", "-", "DDTHH", ":", "MM", ":", "SS", "*" ]
python
train
biocore/burrito-fillings
bfillings/bwa.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L614-L635
def create_bwa_index_from_fasta_file(fasta_in, params=None): """Create a BWA index from an input fasta file. fasta_in: the input fasta file from which to create the index params: dict of bwa index specific paramters This method returns a dictionary where the keys are the various output suffixes (.amb, .ann, .bwt, .pac, .sa) and the values are open file objects. The index prefix will be the same as fasta_in, unless the -p parameter is passed in params. """ if params is None: params = {} # Instantiate the app controller index = BWA_index(params) # call the application, passing the fasta file in results = index({'fasta_in': fasta_in}) return results
[ "def", "create_bwa_index_from_fasta_file", "(", "fasta_in", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "# Instantiate the app controller", "index", "=", "BWA_index", "(", "params", ")", "# call the application...
Create a BWA index from an input fasta file. fasta_in: the input fasta file from which to create the index params: dict of bwa index specific paramters This method returns a dictionary where the keys are the various output suffixes (.amb, .ann, .bwt, .pac, .sa) and the values are open file objects. The index prefix will be the same as fasta_in, unless the -p parameter is passed in params.
[ "Create", "a", "BWA", "index", "from", "an", "input", "fasta", "file", "." ]
python
train
vslutov/turingmarkov
turingmarkov/turing.py
https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L175-L185
def build_machine(lines): """Build machine from list of lines.""" if lines == []: raise SyntaxError('Empty file') else: machine = Machine(lines[0].split()) for line in lines[1:]: if line.strip() != '': machine.add_state(line) machine.check() return machine
[ "def", "build_machine", "(", "lines", ")", ":", "if", "lines", "==", "[", "]", ":", "raise", "SyntaxError", "(", "'Empty file'", ")", "else", ":", "machine", "=", "Machine", "(", "lines", "[", "0", "]", ".", "split", "(", ")", ")", "for", "line", "...
Build machine from list of lines.
[ "Build", "machine", "from", "list", "of", "lines", "." ]
python
train
rigetti/quantumflow
quantumflow/circuits.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/circuits.py#L227-L257
def ccnot_circuit(qubits: Qubits) -> Circuit: """Standard decomposition of CCNOT (Toffoli) gate into six CNOT gates (Plus Hadamard and T gates.) [Nielsen2000]_ .. [Nielsen2000] M. A. Nielsen and I. L. Chuang, Quantum Computation and Quantum Information, Cambridge University Press (2000). """ if len(qubits) != 3: raise ValueError('Expected 3 qubits') q0, q1, q2 = qubits circ = Circuit() circ += H(q2) circ += CNOT(q1, q2) circ += T(q2).H circ += CNOT(q0, q2) circ += T(q2) circ += CNOT(q1, q2) circ += T(q2).H circ += CNOT(q0, q2) circ += T(q1) circ += T(q2) circ += H(q2) circ += CNOT(q0, q1) circ += T(q0) circ += T(q1).H circ += CNOT(q0, q1) return circ
[ "def", "ccnot_circuit", "(", "qubits", ":", "Qubits", ")", "->", "Circuit", ":", "if", "len", "(", "qubits", ")", "!=", "3", ":", "raise", "ValueError", "(", "'Expected 3 qubits'", ")", "q0", ",", "q1", ",", "q2", "=", "qubits", "circ", "=", "Circuit",...
Standard decomposition of CCNOT (Toffoli) gate into six CNOT gates (Plus Hadamard and T gates.) [Nielsen2000]_ .. [Nielsen2000] M. A. Nielsen and I. L. Chuang, Quantum Computation and Quantum Information, Cambridge University Press (2000).
[ "Standard", "decomposition", "of", "CCNOT", "(", "Toffoli", ")", "gate", "into", "six", "CNOT", "gates", "(", "Plus", "Hadamard", "and", "T", "gates", ".", ")", "[", "Nielsen2000", "]", "_" ]
python
train
CloverHealth/temple
temple/update.py
https://github.com/CloverHealth/temple/blob/d7b75da2459f72ba74d6f3b6e1ab95c3d1b92ccd/temple/update.py#L152-L162
def _needs_new_cc_config_for_update(old_template, old_version, new_template, new_version): """ Given two templates and their respective versions, return True if a new cookiecutter config needs to be obtained from the user """ if old_template != new_template: return True else: return _cookiecutter_configs_have_changed(new_template, old_version, new_version)
[ "def", "_needs_new_cc_config_for_update", "(", "old_template", ",", "old_version", ",", "new_template", ",", "new_version", ")", ":", "if", "old_template", "!=", "new_template", ":", "return", "True", "else", ":", "return", "_cookiecutter_configs_have_changed", "(", "...
Given two templates and their respective versions, return True if a new cookiecutter config needs to be obtained from the user
[ "Given", "two", "templates", "and", "their", "respective", "versions", "return", "True", "if", "a", "new", "cookiecutter", "config", "needs", "to", "be", "obtained", "from", "the", "user" ]
python
valid
arviz-devs/arviz
arviz/stats/stats.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/stats.py#L564-L611
def _gpdfit(x): """Estimate the parameters for the Generalized Pareto Distribution (GPD). Empirical Bayes estimate for the parameters of the generalized Pareto distribution given the data. Parameters ---------- x : array sorted 1D data array Returns ------- k : float estimated shape parameter sigma : float estimated scale parameter """ prior_bs = 3 prior_k = 10 len_x = len(x) m_est = 30 + int(len_x ** 0.5) b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5)) b_ary /= prior_bs * x[int(len_x / 4 + 0.5) - 1] b_ary += 1 / x[-1] k_ary = np.log1p(-b_ary[:, None] * x).mean(axis=1) # pylint: disable=no-member len_scale = len_x * (np.log(-(b_ary / k_ary)) - k_ary - 1) weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1) # remove negligible weights real_idxs = weights >= 10 * np.finfo(float).eps if not np.all(real_idxs): weights = weights[real_idxs] b_ary = b_ary[real_idxs] # normalise weights weights /= weights.sum() # posterior mean for b b_post = np.sum(b_ary * weights) # estimate for k k_post = np.log1p(-b_post * x).mean() # pylint: disable=invalid-unary-operand-type,no-member # add prior for k_post k_post = (len_x * k_post + prior_k * 0.5) / (len_x + prior_k) sigma = -k_post / b_post return k_post, sigma
[ "def", "_gpdfit", "(", "x", ")", ":", "prior_bs", "=", "3", "prior_k", "=", "10", "len_x", "=", "len", "(", "x", ")", "m_est", "=", "30", "+", "int", "(", "len_x", "**", "0.5", ")", "b_ary", "=", "1", "-", "np", ".", "sqrt", "(", "m_est", "/"...
Estimate the parameters for the Generalized Pareto Distribution (GPD). Empirical Bayes estimate for the parameters of the generalized Pareto distribution given the data. Parameters ---------- x : array sorted 1D data array Returns ------- k : float estimated shape parameter sigma : float estimated scale parameter
[ "Estimate", "the", "parameters", "for", "the", "Generalized", "Pareto", "Distribution", "(", "GPD", ")", "." ]
python
train
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L1333-L1347
def sort_descendants(self, attr="name"): """ This function sort the branches of a given tree by considerening node names. After the tree is sorted, nodes are labeled using ascendent numbers. This can be used to ensure that nodes in a tree with the same node names are always labeled in the same way. Note that if duplicated names are present, extra criteria should be added to sort nodes. Unique id is stored as a node._nid attribute """ node2content = self.get_cached_content(store_attr=attr, container_type=list) for n in self.traverse(): if not n.is_leaf(): n.children.sort(key=lambda x: str(sorted(node2content[x])))
[ "def", "sort_descendants", "(", "self", ",", "attr", "=", "\"name\"", ")", ":", "node2content", "=", "self", ".", "get_cached_content", "(", "store_attr", "=", "attr", ",", "container_type", "=", "list", ")", "for", "n", "in", "self", ".", "traverse", "(",...
This function sort the branches of a given tree by considerening node names. After the tree is sorted, nodes are labeled using ascendent numbers. This can be used to ensure that nodes in a tree with the same node names are always labeled in the same way. Note that if duplicated names are present, extra criteria should be added to sort nodes. Unique id is stored as a node._nid attribute
[ "This", "function", "sort", "the", "branches", "of", "a", "given", "tree", "by", "considerening", "node", "names", ".", "After", "the", "tree", "is", "sorted", "nodes", "are", "labeled", "using", "ascendent", "numbers", ".", "This", "can", "be", "used", "t...
python
train
J535D165/recordlinkage
recordlinkage/adapters.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/adapters.py#L57-L78
def _prob_match(self, features): """Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties. """ # compute the probabilities probs = self.kernel.predict_proba(features) # get the position of match probabilities classes = list(self.kernel.classes_) match_class_position = classes.index(1) return probs[:, match_class_position]
[ "def", "_prob_match", "(", "self", ",", "features", ")", ":", "# compute the probabilities", "probs", "=", "self", ".", "kernel", ".", "predict_proba", "(", "features", ")", "# get the position of match probabilities", "classes", "=", "list", "(", "self", ".", "ke...
Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties.
[ "Compute", "match", "probabilities", "." ]
python
train