repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
wbond/oscrypto
oscrypto/_openssl/asymmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_openssl/asymmetric.py#L729-L755
def _load_key(private_object): """ Loads a private key into a PrivateKey object :param private_object: An asn1crypto.keys.PrivateKeyInfo object :return: A PrivateKey object """ if libcrypto_version_info < (1,) and private_object.algorithm == 'dsa' and private_object.hash_algo == 'sha2': raise AsymmetricKeyError(pretty_message( ''' OpenSSL 0.9.8 only supports DSA keys based on SHA1 (2048 bits or less) - this key is based on SHA2 and is %s bits ''', private_object.bit_size )) source = private_object.unwrap().dump() buffer = buffer_from_bytes(source) evp_pkey = libcrypto.d2i_AutoPrivateKey(null(), buffer_pointer(buffer), len(source)) if is_null(evp_pkey): handle_openssl_error(0) return PrivateKey(evp_pkey, private_object)
[ "def", "_load_key", "(", "private_object", ")", ":", "if", "libcrypto_version_info", "<", "(", "1", ",", ")", "and", "private_object", ".", "algorithm", "==", "'dsa'", "and", "private_object", ".", "hash_algo", "==", "'sha2'", ":", "raise", "AsymmetricKeyError",...
Loads a private key into a PrivateKey object :param private_object: An asn1crypto.keys.PrivateKeyInfo object :return: A PrivateKey object
[ "Loads", "a", "private", "key", "into", "a", "PrivateKey", "object" ]
python
valid
allenai/allennlp
allennlp/tools/drop_eval.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/drop_eval.py#L150-L164
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: """ Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation. """ if "number" in answer and answer["number"]: return tuple([str(answer["number"])]), "number" elif "spans" in answer and answer["spans"]: return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans" elif "date" in answer: return tuple(["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"])]), "date" else: raise ValueError(f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}")
[ "def", "answer_json_to_strings", "(", "answer", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Tuple", "[", "Tuple", "[", "str", ",", "...", "]", ",", "str", "]", ":", "if", "\"number\"", "in", "answer", "and", "answer", "[", "\"number\"", "]"...
Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation.
[ "Takes", "an", "answer", "JSON", "blob", "from", "the", "DROP", "data", "release", "and", "converts", "it", "into", "strings", "used", "for", "evaluation", "." ]
python
train
sdispater/backpack
backpack/collections/base_collection.py
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L145-L155
def diff(self, items): """ Diff the collections with the given items :param items: The items to diff with :type items: mixed :return: A Collection instance :rtype: Collection """ return self.__class__([i for i in self.items if i not in items])
[ "def", "diff", "(", "self", ",", "items", ")", ":", "return", "self", ".", "__class__", "(", "[", "i", "for", "i", "in", "self", ".", "items", "if", "i", "not", "in", "items", "]", ")" ]
Diff the collections with the given items :param items: The items to diff with :type items: mixed :return: A Collection instance :rtype: Collection
[ "Diff", "the", "collections", "with", "the", "given", "items" ]
python
train
pyroscope/pyrocore
src/pyrocore/torrent/formatting.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/formatting.py#L41-L47
def fmt_sz(intval): """ Format a byte sized value. """ try: return fmt.human_size(intval) except (ValueError, TypeError): return "N/A".rjust(len(fmt.human_size(0)))
[ "def", "fmt_sz", "(", "intval", ")", ":", "try", ":", "return", "fmt", ".", "human_size", "(", "intval", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "\"N/A\"", ".", "rjust", "(", "len", "(", "fmt", ".", "human_size", "(", ...
Format a byte sized value.
[ "Format", "a", "byte", "sized", "value", "." ]
python
train
mattjj/pylds
pylds/laplace.py
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L53-L65
def hessian_local_log_likelihood(self, x): """ return d^2/dxt^2 log p(y | x) for each time bin Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) hfun = hessian(self.local_log_likelihood) H_diag = np.zeros((T, D, D)) for t in range(T): H_diag[t] = hfun(x[t], self.data[t], self.inputs[t]) return H_diag
[ "def", "hessian_local_log_likelihood", "(", "self", ",", "x", ")", ":", "T", ",", "D", "=", "self", ".", "T", ",", "self", ".", "D_latent", "assert", "x", ".", "shape", "==", "(", "T", ",", "D", ")", "hfun", "=", "hessian", "(", "self", ".", "loc...
return d^2/dxt^2 log p(y | x) for each time bin Optionally override this in base classes
[ "return", "d^2", "/", "dxt^2", "log", "p", "(", "y", "|", "x", ")", "for", "each", "time", "bin", "Optionally", "override", "this", "in", "base", "classes" ]
python
train
wdm0006/sklearn-extensions
sklearn_extensions/extreme_learning_machines/random_layer.py
https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L296-L326
def _compute_centers(self, X, sparse, rs): """Generate RBF centers""" # use supplied centers if present centers = self._get_user_components('centers') # use points taken uniformly from the bounding # hyperrectangle if (centers is None): n_features = X.shape[1] if (sparse): fxr = range(n_features) cols = [X.getcol(i) for i in fxr] min_dtype = X.dtype.type(1.0e10) sp_min = lambda col: np.minimum(min_dtype, np.min(col.data)) min_Xs = np.array(map(sp_min, cols)) max_dtype = X.dtype.type(-1.0e10) sp_max = lambda col: np.maximum(max_dtype, np.max(col.data)) max_Xs = np.array(map(sp_max, cols)) else: min_Xs = X.min(axis=0) max_Xs = X.max(axis=0) spans = max_Xs - min_Xs ctrs_size = (self.n_hidden, n_features) centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size) self.components_['centers'] = centers
[ "def", "_compute_centers", "(", "self", ",", "X", ",", "sparse", ",", "rs", ")", ":", "# use supplied centers if present", "centers", "=", "self", ".", "_get_user_components", "(", "'centers'", ")", "# use points taken uniformly from the bounding", "# hyperrectangle", "...
Generate RBF centers
[ "Generate", "RBF", "centers" ]
python
train
Nic30/hwtGraph
hwtGraph/elk/fromHwt/resolveSharedConnections.py
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/resolveSharedConnections.py#L6-L84
def portTryReduce(root: LNode, port: LPort): """ Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children :note: use reduceUselessAssignments, extractSplits, flattenTrees before this function to maximize it's effect """ if not port.children: return for p in port.children: portTryReduce(root, p) target_nodes = {} ch_cnt = countDirectlyConnected(port, target_nodes) if not target_nodes: # disconnected port return new_target, children_edge_to_destroy = max(target_nodes.items(), key=lambda x: len(x[1])) cnt = len(children_edge_to_destroy) if cnt < ch_cnt / 2 or cnt == 1 and ch_cnt == 2: # too small to few shared connection to reduce return children_to_destroy = set() on_target_children_to_destroy = set() for child, edge in children_edge_to_destroy: if child.direction == PortType.OUTPUT: target_ch = edge.dsts elif child.direction == PortType.INPUT: target_ch = edge.srcs else: raise ValueError(child.direction) if len(target_ch) != 1: raise NotImplementedError("multiple connected nodes", target_ch) target_ch = target_ch[0] try: assert target_ch.parent is new_target, ( target_ch, target_ch.parent, new_target) except AssertionError: print('Wrong target:\n', edge.src, "\n", edge.dst, "\n", target_ch.parent, "\n", new_target) raise if child.direction == PortType.OUTPUT: edge.removeTarget(target_ch) elif child.direction == PortType.INPUT: edge.removeTarget(child) if not edge.srcs or not edge.dsts: edge.remove() if not target_ch.incomingEdges and not target_ch.outgoingEdges: # disconnect selected children from this port and target on_target_children_to_destroy.add(target_ch) if not child.incomingEdges and not child.outgoingEdges: children_to_destroy.add(child) # destroy children of new target and this port if possible port.children = [ ch for ch in port.children if ch not in children_to_destroy] new_target.children = [ ch for ch in new_target.children if ch not in on_target_children_to_destroy] # connect this port to new target as it was connected by children before # [TODO] names for new edges if port.direction == PortType.OUTPUT: root.addEdge(port, new_target) elif port.direction == PortType.INPUT: root.addEdge(new_target, port) else: raise NotImplementedError(port.direction)
[ "def", "portTryReduce", "(", "root", ":", "LNode", ",", "port", ":", "LPort", ")", ":", "if", "not", "port", ".", "children", ":", "return", "for", "p", "in", "port", ".", "children", ":", "portTryReduce", "(", "root", ",", "p", ")", "target_nodes", ...
Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children :note: use reduceUselessAssignments, extractSplits, flattenTrees before this function to maximize it's effect
[ "Check", "if", "majority", "of", "children", "is", "connected", "to", "same", "port", "if", "it", "is", "the", "case", "reduce", "children", "and", "connect", "this", "port", "instead", "children" ]
python
train
quantumlib/Cirq
cirq/sim/sampler.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/sim/sampler.py#L25-L42
def run( self, program: Union[circuits.Circuit, schedules.Schedule], param_resolver: 'study.ParamResolverOrSimilarType' = None, repetitions: int = 1, ) -> study.TrialResult: """Samples from the given Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. repetitions: The number of repetitions to simulate. Returns: TrialResult for a run. """ return self.run_sweep(program, study.ParamResolver(param_resolver), repetitions)[0]
[ "def", "run", "(", "self", ",", "program", ":", "Union", "[", "circuits", ".", "Circuit", ",", "schedules", ".", "Schedule", "]", ",", "param_resolver", ":", "'study.ParamResolverOrSimilarType'", "=", "None", ",", "repetitions", ":", "int", "=", "1", ",", ...
Samples from the given Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. repetitions: The number of repetitions to simulate. Returns: TrialResult for a run.
[ "Samples", "from", "the", "given", "Circuit", "or", "Schedule", "." ]
python
train
n1analytics/python-paillier
phe/command_line.py
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/command_line.py#L139-L165
def decrypt(private, ciphertext, output): """Decrypt ciphertext with private key. Requires PRIVATE key file and the CIPHERTEXT encrypted with the corresponding public key. """ privatekeydata = json.load(private) assert 'pub' in privatekeydata pub = load_public_key(privatekeydata['pub']) log("Loading private key") private_key_error = "Invalid private key" assert 'key_ops' in privatekeydata, private_key_error assert "decrypt" in privatekeydata['key_ops'], private_key_error assert 'p' in privatekeydata, private_key_error assert 'q' in privatekeydata, private_key_error assert privatekeydata['kty'] == 'DAJ', private_key_error _p = phe.util.base64_to_int(privatekeydata['p']) _q = phe.util.base64_to_int(privatekeydata['q']) private_key = phe.PaillierPrivateKey(pub, _p, _q) log("Decrypting ciphertext") enc = load_encrypted_number(ciphertext, pub) out = private_key.decrypt(enc) print(out, file=output)
[ "def", "decrypt", "(", "private", ",", "ciphertext", ",", "output", ")", ":", "privatekeydata", "=", "json", ".", "load", "(", "private", ")", "assert", "'pub'", "in", "privatekeydata", "pub", "=", "load_public_key", "(", "privatekeydata", "[", "'pub'", "]",...
Decrypt ciphertext with private key. Requires PRIVATE key file and the CIPHERTEXT encrypted with the corresponding public key.
[ "Decrypt", "ciphertext", "with", "private", "key", "." ]
python
train
raphaelvallat/pingouin
pingouin/pandas.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pandas.py#L18-L22
def _anova(self, dv=None, between=None, detailed=False, export_filename=None): """Return one-way and two-way ANOVA.""" aov = anova(data=self, dv=dv, between=between, detailed=detailed, export_filename=export_filename) return aov
[ "def", "_anova", "(", "self", ",", "dv", "=", "None", ",", "between", "=", "None", ",", "detailed", "=", "False", ",", "export_filename", "=", "None", ")", ":", "aov", "=", "anova", "(", "data", "=", "self", ",", "dv", "=", "dv", ",", "between", ...
Return one-way and two-way ANOVA.
[ "Return", "one", "-", "way", "and", "two", "-", "way", "ANOVA", "." ]
python
train
alimanfoo/csvvalidator
csvvalidator.py
https://github.com/alimanfoo/csvvalidator/blob/50a86eefdc549c48f65a91a5c0a66099010ee65d/csvvalidator.py#L293-L324
def add_record_predicate(self, record_predicate, code=RECORD_PREDICATE_FALSE, message=MESSAGES[RECORD_PREDICATE_FALSE], modulus=1): """ Add a record predicate function. N.B., everything you can do with record predicates can also be done with record check functions, whether you use one or the other is a matter of style. Arguments --------- `record_predicate` - a function that accepts a single argument (a record as a dictionary of values indexed by field name) and returns False if the value is not valid `code` - problem code to report if a record is not valid, defaults to `RECORD_PREDICATE_FALSE` `message` - problem message to report if a record is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ assert callable(record_predicate), 'record predicate must be a callable function' t = record_predicate, code, message, modulus self._record_predicates.append(t)
[ "def", "add_record_predicate", "(", "self", ",", "record_predicate", ",", "code", "=", "RECORD_PREDICATE_FALSE", ",", "message", "=", "MESSAGES", "[", "RECORD_PREDICATE_FALSE", "]", ",", "modulus", "=", "1", ")", ":", "assert", "callable", "(", "record_predicate",...
Add a record predicate function. N.B., everything you can do with record predicates can also be done with record check functions, whether you use one or the other is a matter of style. Arguments --------- `record_predicate` - a function that accepts a single argument (a record as a dictionary of values indexed by field name) and returns False if the value is not valid `code` - problem code to report if a record is not valid, defaults to `RECORD_PREDICATE_FALSE` `message` - problem message to report if a record is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record)
[ "Add", "a", "record", "predicate", "function", "." ]
python
valid
kmerkmer/pymer
pymer/markov.py
https://github.com/kmerkmer/pymer/blob/c22802436b3756a2e92829c9b234bde6217b683a/pymer/markov.py#L80-L92
def P(self): """Sparse [k-1]x[k-1] transition frequency matrix""" if self._P is not None: return self._P sparse_P = sparse.lil_matrix((self.n, self.n)) alpha_size = len(self.alphabet) bitmask = (self.n-1) # Mask w/ all bits set hi within (k-1)mer range. for fr in range(self.n): for a in range(alpha_size): to = (fr << 2 | a) & bitmask sparse_P[fr, to] = self.transitions[fr, a] self._P = sparse_P return sparse_P
[ "def", "P", "(", "self", ")", ":", "if", "self", ".", "_P", "is", "not", "None", ":", "return", "self", ".", "_P", "sparse_P", "=", "sparse", ".", "lil_matrix", "(", "(", "self", ".", "n", ",", "self", ".", "n", ")", ")", "alpha_size", "=", "le...
Sparse [k-1]x[k-1] transition frequency matrix
[ "Sparse", "[", "k", "-", "1", "]", "x", "[", "k", "-", "1", "]", "transition", "frequency", "matrix" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe.py#L245-L258
def fcoe_fcoe_map_fcoe_map_cee_map_fcoe_map_cee_map_leaf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe") fcoe_map = ET.SubElement(fcoe, "fcoe-map") fcoe_map_name_key = ET.SubElement(fcoe_map, "fcoe-map-name") fcoe_map_name_key.text = kwargs.pop('fcoe_map_name') fcoe_map_cee_map = ET.SubElement(fcoe_map, "fcoe-map-cee-map") fcoe_map_cee_map_leaf = ET.SubElement(fcoe_map_cee_map, "fcoe-map-cee-map-leaf") fcoe_map_cee_map_leaf.text = kwargs.pop('fcoe_map_cee_map_leaf') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_fcoe_map_fcoe_map_cee_map_fcoe_map_cee_map_leaf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcoe\"", ",", "xmlns", "=...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
CivicSpleen/ambry
ambry/orm/partition.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/partition.py#L790-L796
def analysis(self): """Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services""" if isinstance(self, PartitionProxy): return AnalysisPartition(self._obj) else: return AnalysisPartition(self)
[ "def", "analysis", "(", "self", ")", ":", "if", "isinstance", "(", "self", ",", "PartitionProxy", ")", ":", "return", "AnalysisPartition", "(", "self", ".", "_obj", ")", "else", ":", "return", "AnalysisPartition", "(", "self", ")" ]
Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services
[ "Return", "an", "AnalysisPartition", "proxy", "which", "wraps", "this", "partition", "to", "provide", "acess", "to", "dataframes", "shapely", "shapes", "and", "other", "analysis", "services" ]
python
train
django-fluent/django-fluent-contents
fluent_contents/cache.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/cache.py#L7-L22
def get_rendering_cache_key(placeholder_name, contentitem): """ Return a cache key for the content item output. .. seealso:: The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function can be used to remove the cache keys of a retrieved object. """ if not contentitem.pk: return None return "contentitem.@{0}.{1}.{2}".format( placeholder_name, contentitem.plugin.type_name, # always returns the upcasted name. contentitem.pk, # already unique per language_code )
[ "def", "get_rendering_cache_key", "(", "placeholder_name", ",", "contentitem", ")", ":", "if", "not", "contentitem", ".", "pk", ":", "return", "None", "return", "\"contentitem.@{0}.{1}.{2}\"", ".", "format", "(", "placeholder_name", ",", "contentitem", ".", "plugin"...
Return a cache key for the content item output. .. seealso:: The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function can be used to remove the cache keys of a retrieved object.
[ "Return", "a", "cache", "key", "for", "the", "content", "item", "output", "." ]
python
train
brunobord/meuhdb
meuhdb/core.py
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L171-L175
def insert(self, value): "Insert value in the keystore. Return the UUID key." key = str(uuid4()) self.set(key, value) return key
[ "def", "insert", "(", "self", ",", "value", ")", ":", "key", "=", "str", "(", "uuid4", "(", ")", ")", "self", ".", "set", "(", "key", ",", "value", ")", "return", "key" ]
Insert value in the keystore. Return the UUID key.
[ "Insert", "value", "in", "the", "keystore", ".", "Return", "the", "UUID", "key", "." ]
python
train
alixedi/palal
palal/survey.py
https://github.com/alixedi/palal/blob/325359f66ac48a9f96efea0489aec353f8a40837/palal/survey.py#L28-L31
def _stdin_(p): """Takes input from user. Works for Python 2 and 3.""" _v = sys.version[0] return input(p) if _v is '3' else raw_input(p)
[ "def", "_stdin_", "(", "p", ")", ":", "_v", "=", "sys", ".", "version", "[", "0", "]", "return", "input", "(", "p", ")", "if", "_v", "is", "'3'", "else", "raw_input", "(", "p", ")" ]
Takes input from user. Works for Python 2 and 3.
[ "Takes", "input", "from", "user", ".", "Works", "for", "Python", "2", "and", "3", "." ]
python
train
chaimleib/intervaltree
intervaltree/intervaltree.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L622-L643
def split_overlaps(self): """ Finds all intervals with overlapping ranges and splits them along the range boundaries. Completes in worst-case O(n^2*log n) time (many interval boundaries are inside many intervals), best-case O(n*log n) time (small number of overlaps << n per interval). """ if not self: return if len(self.boundary_table) == 2: return bounds = sorted(self.boundary_table) # get bound locations new_ivs = set() for lbound, ubound in zip(bounds[:-1], bounds[1:]): for iv in self[lbound]: new_ivs.add(Interval(lbound, ubound, iv.data)) self.__init__(new_ivs)
[ "def", "split_overlaps", "(", "self", ")", ":", "if", "not", "self", ":", "return", "if", "len", "(", "self", ".", "boundary_table", ")", "==", "2", ":", "return", "bounds", "=", "sorted", "(", "self", ".", "boundary_table", ")", "# get bound locations", ...
Finds all intervals with overlapping ranges and splits them along the range boundaries. Completes in worst-case O(n^2*log n) time (many interval boundaries are inside many intervals), best-case O(n*log n) time (small number of overlaps << n per interval).
[ "Finds", "all", "intervals", "with", "overlapping", "ranges", "and", "splits", "them", "along", "the", "range", "boundaries", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/minmatch.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/minmatch.py#L204-L210
def getallkeys(self, key, failobj=None): """Returns a list of the full key names (not the items) for all the matching values for key. The list will contain a single entry for unambiguous matches and multiple entries for ambiguous matches.""" if self.mmkeys is None: self._mmInit() return self.mmkeys.get(key, failobj)
[ "def", "getallkeys", "(", "self", ",", "key", ",", "failobj", "=", "None", ")", ":", "if", "self", ".", "mmkeys", "is", "None", ":", "self", ".", "_mmInit", "(", ")", "return", "self", ".", "mmkeys", ".", "get", "(", "key", ",", "failobj", ")" ]
Returns a list of the full key names (not the items) for all the matching values for key. The list will contain a single entry for unambiguous matches and multiple entries for ambiguous matches.
[ "Returns", "a", "list", "of", "the", "full", "key", "names", "(", "not", "the", "items", ")", "for", "all", "the", "matching", "values", "for", "key", ".", "The", "list", "will", "contain", "a", "single", "entry", "for", "unambiguous", "matches", "and", ...
python
train
onnx/onnxmltools
onnxmltools/convert/libsvm/_parse.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/libsvm/_parse.py#L11-L36
def _parse_libsvm_simple_model(scope, model, inputs): ''' This function handles all non-pipeline models. :param scope: Scope object :param model: A libsvm object (e.g., OneHotEncoder and LogisticRegression) :param inputs: A list of variables :return: A list of output variables which will be passed to next stage ''' if model.get_svm_type() in (0, 1): label_variable = scope.declare_local_variable('label', FloatTensorType()) probability_map_variable = scope.declare_local_variable('probabilities', FloatTensorType()) this_operator = scope.declare_local_operator("LibSvmSVC", model) this_operator.inputs = inputs this_operator.outputs.append(label_variable) this_operator.outputs.append(probability_map_variable) elif model.get_svm_type() in (4, 3): # We assume that all scikit-learn operator can only produce a single float tensor. variable = scope.declare_local_variable('variable', FloatTensorType()) this_operator = scope.declare_local_operator("LibSvmSVR", model) this_operator.inputs = inputs this_operator.outputs.append(variable) else: raise ValueError("Unknown SVM type '{0}'".format(model.get_svm_type())) return this_operator.outputs
[ "def", "_parse_libsvm_simple_model", "(", "scope", ",", "model", ",", "inputs", ")", ":", "if", "model", ".", "get_svm_type", "(", ")", "in", "(", "0", ",", "1", ")", ":", "label_variable", "=", "scope", ".", "declare_local_variable", "(", "'label'", ",", ...
This function handles all non-pipeline models. :param scope: Scope object :param model: A libsvm object (e.g., OneHotEncoder and LogisticRegression) :param inputs: A list of variables :return: A list of output variables which will be passed to next stage
[ "This", "function", "handles", "all", "non", "-", "pipeline", "models", "." ]
python
train
pybluez/pybluez
macos/_lightbluecommon.py
https://github.com/pybluez/pybluez/blob/e0dc4093dcbaa3ecb3fa24f8ccf22bbfe6b57fc9/macos/_lightbluecommon.py#L73-L89
def _isbtaddr(address): """ Returns whether the given address is a valid bluetooth address. For example, "00:0e:6d:7b:a2:0a" is a valid address. Returns False if the argument is None or is not a string. """ # Define validity regex. Accept either ":" or "-" as separators. global _validbtaddr if _validbtaddr is None: import re _validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}", re.IGNORECASE) import types if not isinstance(address, str): return False return _validbtaddr.match(address) is not None
[ "def", "_isbtaddr", "(", "address", ")", ":", "# Define validity regex. Accept either \":\" or \"-\" as separators.", "global", "_validbtaddr", "if", "_validbtaddr", "is", "None", ":", "import", "re", "_validbtaddr", "=", "re", ".", "compile", "(", "\"((\\d|[a-f]){2}(:|-)...
Returns whether the given address is a valid bluetooth address. For example, "00:0e:6d:7b:a2:0a" is a valid address. Returns False if the argument is None or is not a string.
[ "Returns", "whether", "the", "given", "address", "is", "a", "valid", "bluetooth", "address", ".", "For", "example", "00", ":", "0e", ":", "6d", ":", "7b", ":", "a2", ":", "0a", "is", "a", "valid", "address", "." ]
python
train
tensorflow/hub
tensorflow_hub/feature_column.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L33-L80
def text_embedding_column(key, module_spec, trainable=False): """Uses a Module to construct a dense representation from a text feature. This feature column can be used on an input feature whose values are strings of arbitrary size. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m(input)`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python comment = text_embedding_column("comment", "/tmp/text-module") feature_columns = [comment, ...] ... features = { "comment": np.array(["wow, much amazing", "so easy", ...]), ... } labels = np.array([[1], [0], ...]) # If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn` input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True) estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns) estimator.train(input_fn, max_steps=100) ``` Args: key: A string or `_FeatureColumn` identifying the text feature. module_spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec via `load_module_spec` trainable: Whether or not the Module is trainable. False by default, meaning the pre-trained weights are frozen. This is different from the ordinary tf.feature_column.embedding_column(), but that one is intended for training from scratch. Returns: `_DenseColumn` that converts from text input. Raises: ValueError: if module_spec is not suitable for use in this feature column. """ module_spec = module.as_module_spec(module_spec) _check_module_is_text_embedding(module_spec) return _TextEmbeddingColumn(key=key, module_spec=module_spec, trainable=trainable)
[ "def", "text_embedding_column", "(", "key", ",", "module_spec", ",", "trainable", "=", "False", ")", ":", "module_spec", "=", "module", ".", "as_module_spec", "(", "module_spec", ")", "_check_module_is_text_embedding", "(", "module_spec", ")", "return", "_TextEmbedd...
Uses a Module to construct a dense representation from a text feature. This feature column can be used on an input feature whose values are strings of arbitrary size. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m(input)`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python comment = text_embedding_column("comment", "/tmp/text-module") feature_columns = [comment, ...] ... features = { "comment": np.array(["wow, much amazing", "so easy", ...]), ... } labels = np.array([[1], [0], ...]) # If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn` input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True) estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns) estimator.train(input_fn, max_steps=100) ``` Args: key: A string or `_FeatureColumn` identifying the text feature. module_spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec via `load_module_spec` trainable: Whether or not the Module is trainable. False by default, meaning the pre-trained weights are frozen. This is different from the ordinary tf.feature_column.embedding_column(), but that one is intended for training from scratch. Returns: `_DenseColumn` that converts from text input. Raises: ValueError: if module_spec is not suitable for use in this feature column.
[ "Uses", "a", "Module", "to", "construct", "a", "dense", "representation", "from", "a", "text", "feature", "." ]
python
train
xflows/rdm
rdm/db/converters.py
https://github.com/xflows/rdm/blob/d984e2a0297e5fa8d799953bbd0dba79b05d403d/rdm/db/converters.py#L51-L64
def mode(self, predicate, args, recall=1, head=False): ''' Emits mode declarations in Aleph-like format. :param predicate: predicate name :param args: predicate arguments with input/output specification, e.g.: >>> [('+', 'train'), ('-', 'car')] :param recall: recall setting (see `Aleph manual <http://www.cs.ox.ac.uk/activities/machinelearning/Aleph/aleph>`_) :param head: set to True for head clauses ''' return ':- mode%s(%s, %s(%s)).' % ( 'h' if head else 'b', str(recall), predicate, ','.join([t + arg for t, arg in args]))
[ "def", "mode", "(", "self", ",", "predicate", ",", "args", ",", "recall", "=", "1", ",", "head", "=", "False", ")", ":", "return", "':- mode%s(%s, %s(%s)).'", "%", "(", "'h'", "if", "head", "else", "'b'", ",", "str", "(", "recall", ")", ",", "predica...
Emits mode declarations in Aleph-like format. :param predicate: predicate name :param args: predicate arguments with input/output specification, e.g.: >>> [('+', 'train'), ('-', 'car')] :param recall: recall setting (see `Aleph manual <http://www.cs.ox.ac.uk/activities/machinelearning/Aleph/aleph>`_) :param head: set to True for head clauses
[ "Emits", "mode", "declarations", "in", "Aleph", "-", "like", "format", "." ]
python
train
CI-WATER/mapkit
mapkit/RasterConverter.py
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterConverter.py#L1097-L1107
def isNumber(self, value): """ Validate whether a value is a number or not """ try: str(value) float(value) return True except ValueError: return False
[ "def", "isNumber", "(", "self", ",", "value", ")", ":", "try", ":", "str", "(", "value", ")", "float", "(", "value", ")", "return", "True", "except", "ValueError", ":", "return", "False" ]
Validate whether a value is a number or not
[ "Validate", "whether", "a", "value", "is", "a", "number", "or", "not" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/common.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/common.py#L23-L53
def compile_graphql_to_match(schema, graphql_string, type_equivalence_hints=None): """Compile the GraphQL input using the schema into a MATCH query and associated metadata. Args: schema: GraphQL schema object describing the schema of the graph to be queried graphql_string: the GraphQL query to compile to MATCH, as a string type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: a CompilationResult object """ lowering_func = ir_lowering_match.lower_ir query_emitter_func = emit_match.emit_code_from_ir return _compile_graphql_generic( MATCH_LANGUAGE, lowering_func, query_emitter_func, schema, graphql_string, type_equivalence_hints, None)
[ "def", "compile_graphql_to_match", "(", "schema", ",", "graphql_string", ",", "type_equivalence_hints", "=", "None", ")", ":", "lowering_func", "=", "ir_lowering_match", ".", "lower_ir", "query_emitter_func", "=", "emit_match", ".", "emit_code_from_ir", "return", "_comp...
Compile the GraphQL input using the schema into a MATCH query and associated metadata. Args: schema: GraphQL schema object describing the schema of the graph to be queried graphql_string: the GraphQL query to compile to MATCH, as a string type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: a CompilationResult object
[ "Compile", "the", "GraphQL", "input", "using", "the", "schema", "into", "a", "MATCH", "query", "and", "associated", "metadata", "." ]
python
train
jeffh/rpi_courses
rpi_courses/scheduler.py
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/scheduler.py#L128-L134
def create_variables(self, courses): """Internal use. Creates all variables in the problem instance for the given courses. If given a dict of {course: sections}, will use the provided sections. """ has_sections = isinstance(courses, dict) for course in courses: self.p.add_variable(course, courses.get(course, []) if has_sections else self.get_sections(course))
[ "def", "create_variables", "(", "self", ",", "courses", ")", ":", "has_sections", "=", "isinstance", "(", "courses", ",", "dict", ")", "for", "course", "in", "courses", ":", "self", ".", "p", ".", "add_variable", "(", "course", ",", "courses", ".", "get"...
Internal use. Creates all variables in the problem instance for the given courses. If given a dict of {course: sections}, will use the provided sections.
[ "Internal", "use", ".", "Creates", "all", "variables", "in", "the", "problem", "instance", "for", "the", "given", "courses", ".", "If", "given", "a", "dict", "of", "{", "course", ":", "sections", "}", "will", "use", "the", "provided", "sections", "." ]
python
train
melizalab/arf
arf.py
https://github.com/melizalab/arf/blob/71746d9edbe7993a783d4acaf84b9631f3230283/arf.py#L204-L238
def check_file_version(file): """Check the ARF version attribute of file for compatibility. Raises DeprecationWarning for backwards-incompatible files, FutureWarning for (potentially) forwards-incompatible files, and UserWarning for files that may not have been created by an ARF library. Returns the version for the file """ from distutils.version import StrictVersion as Version try: ver = file.attrs.get('arf_version', None) if ver is None: ver = file.attrs['arf_library_version'] except KeyError: raise UserWarning( "Unable to determine ARF version for {0.filename};" "created by another program?".format(file)) try: # if the attribute is stored as a string, it's ascii-encoded ver = ver.decode("ascii") except (LookupError, AttributeError): pass # should be backwards compatible after 1.1 file_version = Version(ver) if file_version < Version('1.1'): raise DeprecationWarning( "ARF library {} may have trouble reading file " "version {} (< 1.1)".format(version, file_version)) elif file_version >= Version('3.0'): raise FutureWarning( "ARF library {} may be incompatible with file " "version {} (>= 3.0)".format(version, file_version)) return file_version
[ "def", "check_file_version", "(", "file", ")", ":", "from", "distutils", ".", "version", "import", "StrictVersion", "as", "Version", "try", ":", "ver", "=", "file", ".", "attrs", ".", "get", "(", "'arf_version'", ",", "None", ")", "if", "ver", "is", "Non...
Check the ARF version attribute of file for compatibility. Raises DeprecationWarning for backwards-incompatible files, FutureWarning for (potentially) forwards-incompatible files, and UserWarning for files that may not have been created by an ARF library. Returns the version for the file
[ "Check", "the", "ARF", "version", "attribute", "of", "file", "for", "compatibility", "." ]
python
train
nderkach/airbnb-python
airbnb/api.py
https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L186-L201
def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12): """ Get availability calendar for a given listing """ params = { 'year': str(starting_year), 'listing_id': str(listing_id), '_format': 'with_conditions', 'count': str(calendar_months), 'month': str(starting_month) } r = self._session.get(API_URL + "/calendar_months", params=params) r.raise_for_status() return r.json()
[ "def", "get_calendar", "(", "self", ",", "listing_id", ",", "starting_month", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "month", ",", "starting_year", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", ",", "calendar_...
Get availability calendar for a given listing
[ "Get", "availability", "calendar", "for", "a", "given", "listing" ]
python
train
zencoder/zencoder-py
zencoder/core.py
https://github.com/zencoder/zencoder-py/blob/9d762e33e2bb2edadb0e5da0bb80a61e27636426/zencoder/core.py#L329-L350
def create(self, input=None, live_stream=False, outputs=None, options=None): """ Creates a transcoding job. Here are some examples:: job.create('s3://zencodertesting/test.mov') job.create(live_stream=True) job.create(input='http://example.com/input.mov', outputs=({'label': 'test output'},)) https://app.zencoder.com/docs/api/jobs/create """ data = {"input": input, "test": self.test} if outputs: data['outputs'] = outputs if options: data.update(options) if live_stream: data['live_stream'] = live_stream return self.post(self.base_url, body=json.dumps(data))
[ "def", "create", "(", "self", ",", "input", "=", "None", ",", "live_stream", "=", "False", ",", "outputs", "=", "None", ",", "options", "=", "None", ")", ":", "data", "=", "{", "\"input\"", ":", "input", ",", "\"test\"", ":", "self", ".", "test", "...
Creates a transcoding job. Here are some examples:: job.create('s3://zencodertesting/test.mov') job.create(live_stream=True) job.create(input='http://example.com/input.mov', outputs=({'label': 'test output'},)) https://app.zencoder.com/docs/api/jobs/create
[ "Creates", "a", "transcoding", "job", ".", "Here", "are", "some", "examples", "::" ]
python
train
mushkevych/scheduler
launch.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/launch.py#L139-L145
def install_virtualenv_p3(root, python_version): """ Install virtual environment for Python 3.3+; removing the old one if it exists """ import venv builder = venv.EnvBuilder(system_site_packages=False, clear=True, symlinks=False, upgrade=False) builder.create(root) ret_code = subprocess.call([VE_SCRIPT, PROJECT_ROOT, root, python_version]) sys.exit(ret_code)
[ "def", "install_virtualenv_p3", "(", "root", ",", "python_version", ")", ":", "import", "venv", "builder", "=", "venv", ".", "EnvBuilder", "(", "system_site_packages", "=", "False", ",", "clear", "=", "True", ",", "symlinks", "=", "False", ",", "upgrade", "=...
Install virtual environment for Python 3.3+; removing the old one if it exists
[ "Install", "virtual", "environment", "for", "Python", "3", ".", "3", "+", ";", "removing", "the", "old", "one", "if", "it", "exists" ]
python
train
spyder-ide/spyder
spyder/plugins/ipythonconsole/widgets/shell.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/shell.py#L473-L476
def _context_menu_make(self, pos): """Reimplement the IPython context menu""" menu = super(ShellWidget, self)._context_menu_make(pos) return self.ipyclient.add_actions_to_context_menu(menu)
[ "def", "_context_menu_make", "(", "self", ",", "pos", ")", ":", "menu", "=", "super", "(", "ShellWidget", ",", "self", ")", ".", "_context_menu_make", "(", "pos", ")", "return", "self", ".", "ipyclient", ".", "add_actions_to_context_menu", "(", "menu", ")" ]
Reimplement the IPython context menu
[ "Reimplement", "the", "IPython", "context", "menu" ]
python
train
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L49-L65
def rmarkdown_draft(filename, template, package): """ create a draft rmarkdown file from an installed template """ if file_exists(filename): return filename draft_template = Template( 'rmarkdown::draft("$filename", template="$template", package="$package", edit=FALSE)' ) draft_string = draft_template.substitute( filename=filename, template=template, package=package) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", draft_string], "Creating bcbioRNASeq quality control template.") do.run(["sed", "-i", "s/YYYY-MM-DD\///g", filename], "Editing bcbioRNAseq quality control template.") return filename
[ "def", "rmarkdown_draft", "(", "filename", ",", "template", ",", "package", ")", ":", "if", "file_exists", "(", "filename", ")", ":", "return", "filename", "draft_template", "=", "Template", "(", "'rmarkdown::draft(\"$filename\", template=\"$template\", package=\"$package...
create a draft rmarkdown file from an installed template
[ "create", "a", "draft", "rmarkdown", "file", "from", "an", "installed", "template" ]
python
train
Azure/Azure-MachineLearning-ClientLibrary-Python
azureml/services.py
https://github.com/Azure/Azure-MachineLearning-ClientLibrary-Python/blob/d1211b289747671898eb063013e0dc53d3c80acd/azureml/services.py#L896-L945
def publish(func_or_workspace_id, workspace_id_or_token = None, workspace_token_or_none = None, files=(), endpoint=None): '''publishes a callable function or decorates a function to be published. Returns a callable, iterable object. Calling the object will invoke the published service. Iterating the object will give the API URL, API key, and API help url. To define a function which will be published to Azure you can simply decorate it with the @publish decorator. This will publish the service, and then future calls to the function will run against the operationalized version of the service in the cloud. >>> @publish(workspace_id, workspace_token) >>> def func(a, b): >>> return a + b After publishing you can then invoke the function using: func.service(1, 2) Or continue to invoke the function locally: func(1, 2) You can also just call publish directly to publish a function: >>> def func(a, b): return a + b >>> >>> res = publish(func, workspace_id, workspace_token) >>> >>> url, api_key, help_url = res >>> res(2, 3) 5 >>> url, api_key, help_url = res.url, res.api_key, res.help_url The returned result will be the published service. You can specify a list of files which should be published along with the function. The resulting files will be stored in a subdirectory called 'Script Bundle'. The list of files can be one of: (('file1.txt', None), ) # file is read from disk (('file1.txt', b'contents'), ) # file contents are provided ('file1.txt', 'file2.txt') # files are read from disk, written with same filename ((('file1.txt', 'destname.txt'), None), ) # file is read from disk, written with different destination name The various formats for each filename can be freely mixed and matched. ''' if not callable(func_or_workspace_id): def do_publish(func): func.service = _publish_worker(func, files, func_or_workspace_id, workspace_id_or_token, endpoint) return func return do_publish return _publish_worker(func_or_workspace_id, files, workspace_id_or_token, workspace_token_or_none, endpoint)
[ "def", "publish", "(", "func_or_workspace_id", ",", "workspace_id_or_token", "=", "None", ",", "workspace_token_or_none", "=", "None", ",", "files", "=", "(", ")", ",", "endpoint", "=", "None", ")", ":", "if", "not", "callable", "(", "func_or_workspace_id", ")...
publishes a callable function or decorates a function to be published. Returns a callable, iterable object. Calling the object will invoke the published service. Iterating the object will give the API URL, API key, and API help url. To define a function which will be published to Azure you can simply decorate it with the @publish decorator. This will publish the service, and then future calls to the function will run against the operationalized version of the service in the cloud. >>> @publish(workspace_id, workspace_token) >>> def func(a, b): >>> return a + b After publishing you can then invoke the function using: func.service(1, 2) Or continue to invoke the function locally: func(1, 2) You can also just call publish directly to publish a function: >>> def func(a, b): return a + b >>> >>> res = publish(func, workspace_id, workspace_token) >>> >>> url, api_key, help_url = res >>> res(2, 3) 5 >>> url, api_key, help_url = res.url, res.api_key, res.help_url The returned result will be the published service. You can specify a list of files which should be published along with the function. The resulting files will be stored in a subdirectory called 'Script Bundle'. The list of files can be one of: (('file1.txt', None), ) # file is read from disk (('file1.txt', b'contents'), ) # file contents are provided ('file1.txt', 'file2.txt') # files are read from disk, written with same filename ((('file1.txt', 'destname.txt'), None), ) # file is read from disk, written with different destination name The various formats for each filename can be freely mixed and matched.
[ "publishes", "a", "callable", "function", "or", "decorates", "a", "function", "to", "be", "published", "." ]
python
test
ANTsX/ANTsPy
ants/contrib/sampling/affine3d.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/contrib/sampling/affine3d.py#L382-L435
def transform(self, X=None, y=None): """ Transform an image using an Affine transform with the given rotation parameters. Return the transform if X=None. Arguments --------- X : ANTsImage Image to transform y : ANTsImage (optional) Another image to transform Returns ------- ANTsImage if y is None, else a tuple of ANTsImage types Examples -------- >>> import ants >>> img = ants.image_read(ants.get_data('ch2')) >>> tx = ants.contrib.Rotate3D(rotation=(10,-5,12)) >>> img2 = tx.transform(img) """ # unpack zoom range rotation_x, rotation_y, rotation_z = self.rotation # Rotation about X axis theta_x = math.pi / 180 * rotation_x rotate_matrix_x = np.array([[1, 0, 0, 0], [0, math.cos(theta_x), -math.sin(theta_x), 0], [0, math.sin(theta_x), math.cos(theta_x), 0], [0,0,0,1]]) # Rotation about Y axis theta_y = math.pi / 180 * rotation_y rotate_matrix_y = np.array([[math.cos(theta_y), 0, math.sin(theta_y), 0], [0, 1, 0, 0], [-math.sin(theta_y), 0, math.cos(theta_y), 0], [0,0,0,1]]) # Rotation about Z axis theta_z = math.pi / 180 * rotation_z rotate_matrix_z = np.array([[math.cos(theta_z), -math.sin(theta_z), 0, 0], [math.sin(theta_z), math.cos(theta_z), 0, 0], [0, 0, 1, 0], [0,0,0,1]]) rotate_matrix = rotate_matrix_x.dot(rotate_matrix_y).dot(rotate_matrix_z)[:3,:] self.tx.set_parameters(rotate_matrix) if self.lazy or X is None: return self.tx else: return self.tx.apply_to_image(X, reference=self.reference)
[ "def", "transform", "(", "self", ",", "X", "=", "None", ",", "y", "=", "None", ")", ":", "# unpack zoom range", "rotation_x", ",", "rotation_y", ",", "rotation_z", "=", "self", ".", "rotation", "# Rotation about X axis", "theta_x", "=", "math", ".", "pi", ...
Transform an image using an Affine transform with the given rotation parameters. Return the transform if X=None. Arguments --------- X : ANTsImage Image to transform y : ANTsImage (optional) Another image to transform Returns ------- ANTsImage if y is None, else a tuple of ANTsImage types Examples -------- >>> import ants >>> img = ants.image_read(ants.get_data('ch2')) >>> tx = ants.contrib.Rotate3D(rotation=(10,-5,12)) >>> img2 = tx.transform(img)
[ "Transform", "an", "image", "using", "an", "Affine", "transform", "with", "the", "given", "rotation", "parameters", ".", "Return", "the", "transform", "if", "X", "=", "None", "." ]
python
train
zenotech/SysScribe
sysscribe/__init__.py
https://github.com/zenotech/SysScribe/blob/8cabfc9718e7ccc6d217fbcfc158dd255b28c9b1/sysscribe/__init__.py#L58-L72
def netdevs(): ''' RX and TX bytes for each of the network devices ''' with open('/proc/net/dev') as f: net_dump = f.readlines() device_data={} data = namedtuple('data',['rx','tx']) for line in net_dump[2:]: line = line.split(':') if line[0].strip() != 'lo': device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0), float(line[1].split()[8])/(1024.0*1024.0)) return device_data
[ "def", "netdevs", "(", ")", ":", "with", "open", "(", "'/proc/net/dev'", ")", "as", "f", ":", "net_dump", "=", "f", ".", "readlines", "(", ")", "device_data", "=", "{", "}", "data", "=", "namedtuple", "(", "'data'", ",", "[", "'rx'", ",", "'tx'", "...
RX and TX bytes for each of the network devices
[ "RX", "and", "TX", "bytes", "for", "each", "of", "the", "network", "devices" ]
python
train
woolfson-group/isambard
isambard/ampal/analyse_protein.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/analyse_protein.py#L264-L289
def cc_to_local_params(pitch, radius, oligo): """Returns local parameters for an oligomeric assembly. Parameters ---------- pitch : float Pitch of assembly radius : float Radius of assembly oligo : int Oligomeric state of assembly Returns ------- pitchloc : float Local pitch of assembly (between 2 adjacent component helices) rloc : float Local radius of assembly alphaloc : float Local pitch-angle of assembly """ rloc = numpy.sin(numpy.pi / oligo) * radius alpha = numpy.arctan((2 * numpy.pi * radius) / pitch) alphaloc = numpy.cos((numpy.pi / 2) - ((numpy.pi) / oligo)) * alpha pitchloc = (2 * numpy.pi * rloc) / numpy.tan(alphaloc) return pitchloc, rloc, numpy.rad2deg(alphaloc)
[ "def", "cc_to_local_params", "(", "pitch", ",", "radius", ",", "oligo", ")", ":", "rloc", "=", "numpy", ".", "sin", "(", "numpy", ".", "pi", "/", "oligo", ")", "*", "radius", "alpha", "=", "numpy", ".", "arctan", "(", "(", "2", "*", "numpy", ".", ...
Returns local parameters for an oligomeric assembly. Parameters ---------- pitch : float Pitch of assembly radius : float Radius of assembly oligo : int Oligomeric state of assembly Returns ------- pitchloc : float Local pitch of assembly (between 2 adjacent component helices) rloc : float Local radius of assembly alphaloc : float Local pitch-angle of assembly
[ "Returns", "local", "parameters", "for", "an", "oligomeric", "assembly", "." ]
python
train
KnightConan/sspdatatables
src/sspdatatables/utils/decorator.py
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/decorator.py#L41-L59
def generate_error_json_response(error_dict, error_response_context=None): """ Intends to build an error json response. If the error_response_context is None, then we generate this response using data tables format :param error_dict: str/dict: contains the error message(s) :param error_response_context: None/dict: context dictionary to render, if error occurs :return: JsonResponse """ response = error_dict if isinstance(error_dict, str): response = {"error": response} if error_response_context is None: error_response_context = { 'draw': 0, 'recordsTotal': 0, 'recordsFiltered': 0, 'data': [] } response.update(error_response_context) return JsonResponse(response)
[ "def", "generate_error_json_response", "(", "error_dict", ",", "error_response_context", "=", "None", ")", ":", "response", "=", "error_dict", "if", "isinstance", "(", "error_dict", ",", "str", ")", ":", "response", "=", "{", "\"error\"", ":", "response", "}", ...
Intends to build an error json response. If the error_response_context is None, then we generate this response using data tables format :param error_dict: str/dict: contains the error message(s) :param error_response_context: None/dict: context dictionary to render, if error occurs :return: JsonResponse
[ "Intends", "to", "build", "an", "error", "json", "response", ".", "If", "the", "error_response_context", "is", "None", "then", "we", "generate", "this", "response", "using", "data", "tables", "format" ]
python
train
ibis-project/ibis
ibis/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/client.py#L164-L186
def execute(self, expr, params=None, limit='default', **kwargs): """ Compile and execute Ibis expression using this backend client interface, returning results in-memory in the appropriate object type Parameters ---------- expr : Expr limit : int, default None For expressions yielding result yets; retrieve at most this number of values/rows. Overrides any limit already set on the expression. params : not yet implemented Returns ------- output : input type dependent Table expressions: pandas.DataFrame Array expressions: pandas.Series Scalar expressions: Python scalar value """ query_ast = self._build_ast_ensure_limit(expr, limit, params=params) result = self._execute_query(query_ast, **kwargs) return result
[ "def", "execute", "(", "self", ",", "expr", ",", "params", "=", "None", ",", "limit", "=", "'default'", ",", "*", "*", "kwargs", ")", ":", "query_ast", "=", "self", ".", "_build_ast_ensure_limit", "(", "expr", ",", "limit", ",", "params", "=", "params"...
Compile and execute Ibis expression using this backend client interface, returning results in-memory in the appropriate object type Parameters ---------- expr : Expr limit : int, default None For expressions yielding result yets; retrieve at most this number of values/rows. Overrides any limit already set on the expression. params : not yet implemented Returns ------- output : input type dependent Table expressions: pandas.DataFrame Array expressions: pandas.Series Scalar expressions: Python scalar value
[ "Compile", "and", "execute", "Ibis", "expression", "using", "this", "backend", "client", "interface", "returning", "results", "in", "-", "memory", "in", "the", "appropriate", "object", "type" ]
python
train
aio-libs/aiohttp
aiohttp/multipart.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/multipart.py#L407-L413
async def json(self, *, encoding: Optional[str]=None) -> Any: """Like read(), but assumes that body parts contains JSON data.""" data = await self.read(decode=True) if not data: return None encoding = encoding or self.get_charset(default='utf-8') return json.loads(data.decode(encoding))
[ "async", "def", "json", "(", "self", ",", "*", ",", "encoding", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Any", ":", "data", "=", "await", "self", ".", "read", "(", "decode", "=", "True", ")", "if", "not", "data", ":", "return", ...
Like read(), but assumes that body parts contains JSON data.
[ "Like", "read", "()", "but", "assumes", "that", "body", "parts", "contains", "JSON", "data", "." ]
python
train
jeffh/describe
describe/spec/runners.py
https://github.com/jeffh/describe/blob/6a33ffecc3340b57e60bc8a7095521882ff9a156/describe/spec/runners.py#L110-L118
def _execute_example_group(self): "Handles the execution of Example Group" for example in self.example: runner = self.__class__(example, self.formatter) runner.is_root_runner = False successes, failures, skipped = runner.run(self.context) self.num_successes += successes self.num_failures += failures self.num_skipped += skipped
[ "def", "_execute_example_group", "(", "self", ")", ":", "for", "example", "in", "self", ".", "example", ":", "runner", "=", "self", ".", "__class__", "(", "example", ",", "self", ".", "formatter", ")", "runner", ".", "is_root_runner", "=", "False", "succes...
Handles the execution of Example Group
[ "Handles", "the", "execution", "of", "Example", "Group" ]
python
train
theonion/django-bulbs
bulbs/videos/models.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/videos/models.py#L118-L136
def get_hub_url(self): """gets a canonical path to the detail page of the video on the hub :return: the path to the consumer ui detail page of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_VIDEO_URL", self.DEFAULT_VIDEOHUB_VIDEO_URL) # slugify needs ascii ascii_title = "" if isinstance(self.title, str): ascii_title = self.title elif six.PY2 and isinstance(self.title, six.text_type): # Legacy unicode conversion ascii_title = self.title.encode('ascii', 'replace') path = slugify("{}-{}".format(ascii_title, self.id)) return url.format(path)
[ "def", "get_hub_url", "(", "self", ")", ":", "url", "=", "getattr", "(", "settings", ",", "\"VIDEOHUB_VIDEO_URL\"", ",", "self", ".", "DEFAULT_VIDEOHUB_VIDEO_URL", ")", "# slugify needs ascii", "ascii_title", "=", "\"\"", "if", "isinstance", "(", "self", ".", "t...
gets a canonical path to the detail page of the video on the hub :return: the path to the consumer ui detail page of the video :rtype: str
[ "gets", "a", "canonical", "path", "to", "the", "detail", "page", "of", "the", "video", "on", "the", "hub" ]
python
train
Gandi/gandi.cli
gandi/cli/commands/webacc.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/webacc.py#L154-L185
def create(gandi, name, datacenter, backend, port, vhost, algorithm, ssl_enable, zone_alter, ssl, private_key, poll_cert): """ Create a webaccelerator """ try: gandi.datacenter.is_opened(datacenter, 'iaas') except DatacenterLimited as exc: gandi.echo('/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % (datacenter, exc.date)) backends = backend for backend in backends: # Check if a port is set for each backend, else set a default port if 'port' not in backend: if not port: backend['port'] = click.prompt('Please set a port for ' 'backends. If you want to set ' 'different port for each ' 'backend, use `-b ip:port`', type=int) else: backend['port'] = port if vhost and not gandi.hostedcert.activate_ssl(vhost, ssl, private_key, poll_cert): return result = gandi.webacc.create(name, datacenter, backends, vhost, algorithm, ssl_enable, zone_alter) return result
[ "def", "create", "(", "gandi", ",", "name", ",", "datacenter", ",", "backend", ",", "port", ",", "vhost", ",", "algorithm", ",", "ssl_enable", ",", "zone_alter", ",", "ssl", ",", "private_key", ",", "poll_cert", ")", ":", "try", ":", "gandi", ".", "dat...
Create a webaccelerator
[ "Create", "a", "webaccelerator" ]
python
train
joke2k/django-faker
django_faker/templatetags/fakers.py
https://github.com/joke2k/django-faker/blob/345e3eebcf636e2566d9890ae7b35788ebdb5173/django_faker/templatetags/fakers.py#L96-L108
def do_fake_filter( formatter, arg=None ): """ call a faker format uses: {{ 'randomElement'|fake:mylist }} {% if 'boolean'|fake:30 %} .. {% endif %} {% for word in 'words'|fake:times %}{{ word }}\n{% endfor %} """ args = [] if not arg is None: args.append(arg) return Faker.getGenerator().format( formatter, *args )
[ "def", "do_fake_filter", "(", "formatter", ",", "arg", "=", "None", ")", ":", "args", "=", "[", "]", "if", "not", "arg", "is", "None", ":", "args", ".", "append", "(", "arg", ")", "return", "Faker", ".", "getGenerator", "(", ")", ".", "format", "("...
call a faker format uses: {{ 'randomElement'|fake:mylist }} {% if 'boolean'|fake:30 %} .. {% endif %} {% for word in 'words'|fake:times %}{{ word }}\n{% endfor %}
[ "call", "a", "faker", "format", "uses", ":" ]
python
train
locationlabs/mockredis
mockredis/client.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L424-L446
def msetnx(self, *args, **kwargs): """ Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful. """ if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSETNX requires **kwargs or a single dict arg') mapping = args[0] else: mapping = kwargs if len(mapping) == 0: raise ResponseError("wrong number of arguments for 'msetnx' command") for key in mapping.keys(): if self._encode(key) in self.redis: return False for key, value in mapping.items(): self.set(key, value) return True
[ "def", "msetnx", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", ":", "if", "len", "(", "args", ")", "!=", "1", "or", "not", "isinstance", "(", "args", "[", "0", "]", ",", "dict", ")", ":", "raise", "RedisError",...
Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful.
[ "Sets", "key", "/", "values", "based", "on", "a", "mapping", "if", "none", "of", "the", "keys", "are", "already", "set", ".", "Mapping", "can", "be", "supplied", "as", "a", "single", "dictionary", "argument", "or", "as", "kwargs", ".", "Returns", "a", ...
python
train
push-things/django-th
django_th/publish.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/django_th/publish.py#L26-L35
def update_trigger(self, service): """ update the date when occurs the trigger :param service: service object to update """ now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') TriggerService.objects.filter(id=service.id).update(date_triggered=now, consumer_failed=0, provider_failed=0, )
[ "def", "update_trigger", "(", "self", ",", "service", ")", ":", "now", "=", "arrow", ".", "utcnow", "(", ")", ".", "to", "(", "settings", ".", "TIME_ZONE", ")", ".", "format", "(", "'YYYY-MM-DD HH:mm:ssZZ'", ")", "TriggerService", ".", "objects", ".", "f...
update the date when occurs the trigger :param service: service object to update
[ "update", "the", "date", "when", "occurs", "the", "trigger", ":", "param", "service", ":", "service", "object", "to", "update" ]
python
train
pikepdf/pikepdf
src/pikepdf/_methods.py
https://github.com/pikepdf/pikepdf/blob/07154f4dec007e2e9c0c6a8c07b964fd06bc5f77/src/pikepdf/_methods.py#L76-L83
def _single_page_pdf(page): """Construct a single page PDF from the provided page in memory""" pdf = Pdf.new() pdf.pages.append(page) bio = BytesIO() pdf.save(bio) bio.seek(0) return bio.read()
[ "def", "_single_page_pdf", "(", "page", ")", ":", "pdf", "=", "Pdf", ".", "new", "(", ")", "pdf", ".", "pages", ".", "append", "(", "page", ")", "bio", "=", "BytesIO", "(", ")", "pdf", ".", "save", "(", "bio", ")", "bio", ".", "seek", "(", "0",...
Construct a single page PDF from the provided page in memory
[ "Construct", "a", "single", "page", "PDF", "from", "the", "provided", "page", "in", "memory" ]
python
train
senaite/senaite.core
bika/lims/workflow/worksheet/guards.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/worksheet/guards.py#L26-L57
def guard_submit(obj): """Returns if 'submit' transition can be applied to the worksheet passed in. By default, the target state for the 'submit' transition for a worksheet is 'to_be_verified', so this guard returns true if all the analyses assigned to the worksheet have already been submitted. Those analyses that are in a non-valid state (cancelled, inactive) are dismissed in the evaluation, but at least one analysis must be in an active state (and submitted) for this guard to return True. Otherwise, always returns False. Note this guard depends entirely on the current status of the children. """ analyses = obj.getAnalyses() if not analyses: # An empty worksheet cannot be submitted return False can_submit = False for analysis in obj.getAnalyses(): # Dismiss analyses that are not active if not api.is_active(analysis): continue # Dismiss analyses that have been rejected or retracted if api.get_workflow_status_of(analysis) in ["rejected", "retracted"]: continue # Worksheet cannot be submitted if there is one analysis not submitted can_submit = ISubmitted.providedBy(analysis) if not can_submit: # No need to look further return False # This prevents the submission of the worksheet if all its analyses are in # a detached status (rejected, retracted or cancelled) return can_submit
[ "def", "guard_submit", "(", "obj", ")", ":", "analyses", "=", "obj", ".", "getAnalyses", "(", ")", "if", "not", "analyses", ":", "# An empty worksheet cannot be submitted", "return", "False", "can_submit", "=", "False", "for", "analysis", "in", "obj", ".", "ge...
Returns if 'submit' transition can be applied to the worksheet passed in. By default, the target state for the 'submit' transition for a worksheet is 'to_be_verified', so this guard returns true if all the analyses assigned to the worksheet have already been submitted. Those analyses that are in a non-valid state (cancelled, inactive) are dismissed in the evaluation, but at least one analysis must be in an active state (and submitted) for this guard to return True. Otherwise, always returns False. Note this guard depends entirely on the current status of the children.
[ "Returns", "if", "submit", "transition", "can", "be", "applied", "to", "the", "worksheet", "passed", "in", ".", "By", "default", "the", "target", "state", "for", "the", "submit", "transition", "for", "a", "worksheet", "is", "to_be_verified", "so", "this", "g...
python
train
allenai/allennlp
scripts/convert_openie_to_conll.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/convert_openie_to_conll.py#L186-L196
def parse_element(raw_element: str) -> List[Element]: """ Parse a raw element into text and indices (integers). """ elements = [regex.match("^(([a-zA-Z]+)\(([^;]+),List\(([^;]*)\)\))$", elem.lstrip().rstrip()) for elem in raw_element.split(';')] return [interpret_element(*elem.groups()[1:]) for elem in elements if elem]
[ "def", "parse_element", "(", "raw_element", ":", "str", ")", "->", "List", "[", "Element", "]", ":", "elements", "=", "[", "regex", ".", "match", "(", "\"^(([a-zA-Z]+)\\(([^;]+),List\\(([^;]*)\\)\\))$\"", ",", "elem", ".", "lstrip", "(", ")", ".", "rstrip", ...
Parse a raw element into text and indices (integers).
[ "Parse", "a", "raw", "element", "into", "text", "and", "indices", "(", "integers", ")", "." ]
python
train
NeuralEnsemble/lazyarray
lazyarray.py
https://github.com/NeuralEnsemble/lazyarray/blob/391a4cef3be85309c36adac0c17824de3d82f5be/lazyarray.py#L540-L549
def _build_ufunc(func): """Return a ufunc that works with lazy arrays""" def larray_compatible_ufunc(x): if isinstance(x, larray): y = deepcopy(x) y.apply(func) return y else: return func(x) return larray_compatible_ufunc
[ "def", "_build_ufunc", "(", "func", ")", ":", "def", "larray_compatible_ufunc", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "larray", ")", ":", "y", "=", "deepcopy", "(", "x", ")", "y", ".", "apply", "(", "func", ")", "return", "y", "els...
Return a ufunc that works with lazy arrays
[ "Return", "a", "ufunc", "that", "works", "with", "lazy", "arrays" ]
python
train
mk-fg/txu1
txu1/api_v1.py
https://github.com/mk-fg/txu1/blob/0326e9105f3cf9efa17a3d2ed1dd5606e0ad57d6/txu1/api_v1.py#L61-L64
def force_bytes(bytes_or_unicode, encoding='utf-8', errors='backslashreplace'): 'Convert passed string type to bytes, if necessary.' if isinstance(bytes_or_unicode, bytes): return bytes_or_unicode return bytes_or_unicode.encode(encoding, errors)
[ "def", "force_bytes", "(", "bytes_or_unicode", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'backslashreplace'", ")", ":", "if", "isinstance", "(", "bytes_or_unicode", ",", "bytes", ")", ":", "return", "bytes_or_unicode", "return", "bytes_or_unicode", "."...
Convert passed string type to bytes, if necessary.
[ "Convert", "passed", "string", "type", "to", "bytes", "if", "necessary", "." ]
python
train
LionelAuroux/pyrser
pyrser/type_system/symbol.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/symbol.py#L39-L49
def get_scope_list(self) -> list: """ Return the list of all contained scope from global to local """ # by default only return scoped name lstparent = [self] p = self.get_parent() while p is not None: lstparent.append(p) p = p.get_parent() return lstparent
[ "def", "get_scope_list", "(", "self", ")", "->", "list", ":", "# by default only return scoped name", "lstparent", "=", "[", "self", "]", "p", "=", "self", ".", "get_parent", "(", ")", "while", "p", "is", "not", "None", ":", "lstparent", ".", "append", "("...
Return the list of all contained scope from global to local
[ "Return", "the", "list", "of", "all", "contained", "scope", "from", "global", "to", "local" ]
python
test
senaite/senaite.core
bika/lims/workflow/analysis/events.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/analysis/events.py#L85-L114
def after_submit(analysis): """Method triggered after a 'submit' transition for the analysis passed in is performed. Promotes the submit transition to the Worksheet to which the analysis belongs to. Note that for the worksheet there is already a guard that assures the transition to the worksheet will only be performed if all analyses within the worksheet have already been transitioned. This function is called automatically by bika.lims.workfow.AfterTransitionEventHandler """ # Mark this analysis as ISubmitted alsoProvides(analysis, ISubmitted) # Promote to analyses this analysis depends on promote_to_dependencies(analysis, "submit") # TODO: REFLEX TO REMOVE # Do all the reflex rules process if IRequestAnalysis.providedBy(analysis): analysis._reflex_rule_process('submit') # Promote transition to worksheet ws = analysis.getWorksheet() if ws: doActionFor(ws, 'submit') push_reindex_to_actions_pool(ws) # Promote transition to Analysis Request if IRequestAnalysis.providedBy(analysis): doActionFor(analysis.getRequest(), 'submit') reindex_request(analysis)
[ "def", "after_submit", "(", "analysis", ")", ":", "# Mark this analysis as ISubmitted", "alsoProvides", "(", "analysis", ",", "ISubmitted", ")", "# Promote to analyses this analysis depends on", "promote_to_dependencies", "(", "analysis", ",", "\"submit\"", ")", "# TODO: REFL...
Method triggered after a 'submit' transition for the analysis passed in is performed. Promotes the submit transition to the Worksheet to which the analysis belongs to. Note that for the worksheet there is already a guard that assures the transition to the worksheet will only be performed if all analyses within the worksheet have already been transitioned. This function is called automatically by bika.lims.workfow.AfterTransitionEventHandler
[ "Method", "triggered", "after", "a", "submit", "transition", "for", "the", "analysis", "passed", "in", "is", "performed", ".", "Promotes", "the", "submit", "transition", "to", "the", "Worksheet", "to", "which", "the", "analysis", "belongs", "to", ".", "Note", ...
python
train
RetailMeNotSandbox/acky
acky/s3.py
https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/s3.py#L60-L73
def create(self, url): """Create a bucket, directory, or empty file.""" bucket, obj_key = _parse_url(url) if not bucket: raise InvalidURL(url, "You must specify a bucket and (optional) path") if obj_key: target = "/".join((bucket, obj_key)) else: target = bucket return self.call("CreateBucket", bucket=target)
[ "def", "create", "(", "self", ",", "url", ")", ":", "bucket", ",", "obj_key", "=", "_parse_url", "(", "url", ")", "if", "not", "bucket", ":", "raise", "InvalidURL", "(", "url", ",", "\"You must specify a bucket and (optional) path\"", ")", "if", "obj_key", "...
Create a bucket, directory, or empty file.
[ "Create", "a", "bucket", "directory", "or", "empty", "file", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/__init__.py#L97-L123
def _set_mep_id(self, v, load=False): """ Setter method for mep_id, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/mep_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_mep_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mep_id() directly. """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="mep-id", rest_name="mep-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DECIMAL :: <1-8191>', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mep_id must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="mep-id", rest_name="mep-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DECIMAL :: <1-8191>', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='uint32', is_config=True)""", }) self.__mep_id = t if hasattr(self, '_set'): self._set()
[ "def", "_set_mep_id", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "parent", "=", "getattr", "(", "self", ",", "\"_parent\"", ",", "None", ")", "if", "parent", "is", "not", "None", "and", "load", "is", "False", ":", "raise", "Attribute...
Setter method for mep_id, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/mep_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_mep_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mep_id() directly.
[ "Setter", "method", "for", "mep_id", "mapped", "from", "YANG", "variable", "/", "protocol", "/", "cfm", "/", "domain_name", "/", "ma_name", "/", "cfm_ma_sub_commands", "/", "mep", "/", "mep_id", "(", "uint32", ")", "If", "this", "variable", "is", "read", "...
python
train
xi/ldif3
ldif3.py
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L116-L124
def _needs_base64_encoding(self, attr_type, attr_value): """Return True if attr_value has to be base-64 encoded. This is the case because of special chars or because attr_type is in self._base64_attrs """ return attr_type.lower() in self._base64_attrs or \ isinstance(attr_value, bytes) or \ UNSAFE_STRING_RE.search(attr_value) is not None
[ "def", "_needs_base64_encoding", "(", "self", ",", "attr_type", ",", "attr_value", ")", ":", "return", "attr_type", ".", "lower", "(", ")", "in", "self", ".", "_base64_attrs", "or", "isinstance", "(", "attr_value", ",", "bytes", ")", "or", "UNSAFE_STRING_RE", ...
Return True if attr_value has to be base-64 encoded. This is the case because of special chars or because attr_type is in self._base64_attrs
[ "Return", "True", "if", "attr_value", "has", "to", "be", "base", "-", "64", "encoded", "." ]
python
train
ejeschke/ginga
ginga/ImageView.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L353-L384
def set_window_size(self, width, height): """Report the size of the window to display the image. **Callbacks** Will call any callbacks registered for the ``'configure'`` event. Callbacks should have a method signature of:: (viewer, width, height, ...) .. note:: This is called by the subclass with ``width`` and ``height`` as soon as the actual dimensions of the allocated window are known. Parameters ---------- width : int The width of the window in pixels. height : int The height of the window in pixels. """ self._imgwin_wd = int(width) self._imgwin_ht = int(height) self._ctr_x = width // 2 self._ctr_y = height // 2 self.logger.debug("widget resized to %dx%d" % (width, height)) self.make_callback('configure', width, height) self.redraw(whence=0)
[ "def", "set_window_size", "(", "self", ",", "width", ",", "height", ")", ":", "self", ".", "_imgwin_wd", "=", "int", "(", "width", ")", "self", ".", "_imgwin_ht", "=", "int", "(", "height", ")", "self", ".", "_ctr_x", "=", "width", "//", "2", "self",...
Report the size of the window to display the image. **Callbacks** Will call any callbacks registered for the ``'configure'`` event. Callbacks should have a method signature of:: (viewer, width, height, ...) .. note:: This is called by the subclass with ``width`` and ``height`` as soon as the actual dimensions of the allocated window are known. Parameters ---------- width : int The width of the window in pixels. height : int The height of the window in pixels.
[ "Report", "the", "size", "of", "the", "window", "to", "display", "the", "image", "." ]
python
train
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L1031-L1037
def kwds(self): '''Return all attributes as keywords dict. ''' # no dict(refs=self.refs, ..., kind=self.kind) in Python 2.0 return _kwds(base=self.base, item=self.item, leng=self.leng, refs=self.refs, both=self.both, kind=self.kind, type=self.type)
[ "def", "kwds", "(", "self", ")", ":", "# no dict(refs=self.refs, ..., kind=self.kind) in Python 2.0", "return", "_kwds", "(", "base", "=", "self", ".", "base", ",", "item", "=", "self", ".", "item", ",", "leng", "=", "self", ".", "leng", ",", "refs", "=", ...
Return all attributes as keywords dict.
[ "Return", "all", "attributes", "as", "keywords", "dict", "." ]
python
train
pybel/pybel
src/pybel/struct/grouping/provenance.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/grouping/provenance.py#L15-L32
def get_subgraphs_by_citation(graph): """Stratify the graph based on citations. :type graph: pybel.BELGraph :rtype: dict[tuple[str,str],pybel.BELGraph] """ rv = defaultdict(graph.fresh_copy) for u, v, key, data in graph.edges(keys=True, data=True): if CITATION not in data: continue dk = data[CITATION][CITATION_TYPE], data[CITATION][CITATION_REFERENCE] rv[dk].add_edge(u, v, key=key, **data) cleanup(graph, rv) return dict(rv)
[ "def", "get_subgraphs_by_citation", "(", "graph", ")", ":", "rv", "=", "defaultdict", "(", "graph", ".", "fresh_copy", ")", "for", "u", ",", "v", ",", "key", ",", "data", "in", "graph", ".", "edges", "(", "keys", "=", "True", ",", "data", "=", "True"...
Stratify the graph based on citations. :type graph: pybel.BELGraph :rtype: dict[tuple[str,str],pybel.BELGraph]
[ "Stratify", "the", "graph", "based", "on", "citations", "." ]
python
train
Yelp/kafka-utils
kafka_utils/kafka_check/commands/replication_factor.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/replication_factor.py#L78-L107
def _prepare_output(topics_with_wrong_rf, verbose): """Returns dict with 'raw' and 'message' keys filled.""" out = {} topics_count = len(topics_with_wrong_rf) out['raw'] = { 'topics_with_wrong_replication_factor_count': topics_count, } if topics_count == 0: out['message'] = 'All topics have proper replication factor.' else: out['message'] = ( "{0} topic(s) have replication factor lower than specified min ISR + 1." ).format(topics_count) if verbose: lines = ( "replication_factor={replication_factor} is lower than min_isr={min_isr} + 1 for {topic}" .format( min_isr=topic['min_isr'], topic=topic['topic'], replication_factor=topic['replication_factor'], ) for topic in topics_with_wrong_rf ) out['verbose'] = "Topics:\n" + "\n".join(lines) if verbose: out['raw']['topics'] = topics_with_wrong_rf return out
[ "def", "_prepare_output", "(", "topics_with_wrong_rf", ",", "verbose", ")", ":", "out", "=", "{", "}", "topics_count", "=", "len", "(", "topics_with_wrong_rf", ")", "out", "[", "'raw'", "]", "=", "{", "'topics_with_wrong_replication_factor_count'", ":", "topics_co...
Returns dict with 'raw' and 'message' keys filled.
[ "Returns", "dict", "with", "raw", "and", "message", "keys", "filled", "." ]
python
train
mlperf/training
translation/tensorflow/transformer/utils/tokenizer.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L372-L391
def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length): """Splits a token into subtokens defined in the subtoken dict.""" ret = [] start = 0 token_len = len(token) while start < token_len: # Find the longest subtoken, so iterate backwards. for end in xrange(min(token_len, start + max_subtoken_length), start, -1): subtoken = token[start:end] if subtoken in subtoken_dict: ret.append(subtoken) start = end break else: # Did not break # If there is no possible encoding of the escaped token then one of the # characters in the token is not in the alphabet. This should be # impossible and would be indicative of a bug. raise ValueError("Was unable to split token \"%s\" into subtokens." % token) return ret
[ "def", "_split_token_to_subtokens", "(", "token", ",", "subtoken_dict", ",", "max_subtoken_length", ")", ":", "ret", "=", "[", "]", "start", "=", "0", "token_len", "=", "len", "(", "token", ")", "while", "start", "<", "token_len", ":", "# Find the longest subt...
Splits a token into subtokens defined in the subtoken dict.
[ "Splits", "a", "token", "into", "subtokens", "defined", "in", "the", "subtoken", "dict", "." ]
python
train
hubo1016/namedstruct
namedstruct/namedstruct.py
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L366-L373
def _create_embedded_indices(self): ''' Create indices for all the embedded structs. For parser internal use. ''' try: self._target._embedded_indices.update(((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: pass
[ "def", "_create_embedded_indices", "(", "self", ")", ":", "try", ":", "self", ".", "_target", ".", "_embedded_indices", ".", "update", "(", "(", "(", "k", ",", "(", "self", ",", "v", ")", ")", "for", "k", ",", "v", "in", "getattr", "(", "self", "."...
Create indices for all the embedded structs. For parser internal use.
[ "Create", "indices", "for", "all", "the", "embedded", "structs", ".", "For", "parser", "internal", "use", "." ]
python
train
BlueBrain/NeuroM
neurom/morphmath.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L387-L419
def principal_direction_extent(points): '''Calculate the extent of a set of 3D points. The extent is defined as the maximum distance between the projections on the principal directions of the covariance matrix of the points. Parameter: points : a 2D numpy array of points Returns: extents : the extents for each of the eigenvectors of the cov matrix eigs : eigenvalues of the covariance matrix eigv : respective eigenvectors of the covariance matrix ''' # center the points around 0.0 points = np.copy(points) points -= np.mean(points, axis=0) # principal components _, eigv = pca(points) extent = np.zeros(3) for i in range(eigv.shape[1]): # orthogonal projection onto the direction of the v component scalar_projs = np.sort(np.array([np.dot(p, eigv[:, i]) for p in points])) extent[i] = scalar_projs[-1] if scalar_projs[0] < 0.: extent -= scalar_projs[0] return extent
[ "def", "principal_direction_extent", "(", "points", ")", ":", "# center the points around 0.0", "points", "=", "np", ".", "copy", "(", "points", ")", "points", "-=", "np", ".", "mean", "(", "points", ",", "axis", "=", "0", ")", "# principal components", "_", ...
Calculate the extent of a set of 3D points. The extent is defined as the maximum distance between the projections on the principal directions of the covariance matrix of the points. Parameter: points : a 2D numpy array of points Returns: extents : the extents for each of the eigenvectors of the cov matrix eigs : eigenvalues of the covariance matrix eigv : respective eigenvectors of the covariance matrix
[ "Calculate", "the", "extent", "of", "a", "set", "of", "3D", "points", "." ]
python
train
hydpy-dev/hydpy
hydpy/models/hbranch/hbranch_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hbranch/hbranch_model.py#L11-L94
def calc_outputs_v1(self): """Performs the actual interpolation or extrapolation. Required control parameters: |XPoints| |YPoints| Required derived parameter: |NmbPoints| |NmbBranches| Required flux sequence: |Input| Calculated flux sequence: |Outputs| Examples: As a simple example, assume a weir directing all discharge into `branch1` until the capacity limit of 2 m³/s is reached. The discharge exceeding this threshold is directed into `branch2`: >>> from hydpy.models.hbranch import * >>> parameterstep() >>> xpoints(0., 2., 4.) >>> ypoints(branch1=[0., 2., 2.], ... branch2=[0., 0., 2.]) >>> model.parameters.update() Low discharge example (linear interpolation between the first two supporting point pairs): >>> fluxes.input = 1. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=1.0, branch2=0.0) Medium discharge example (linear interpolation between the second two supporting point pairs): >>> fluxes.input = 3. >>> model.calc_outputs_v1() >>> print(fluxes.outputs) outputs(branch1=2.0, branch2=1.0) High discharge example (linear extrapolation beyond the second two supporting point pairs): >>> fluxes.input = 5. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=2.0, branch2=3.0) Non-monotonous relationships and balance violations are allowed, e.g.: >>> xpoints(0., 2., 4., 6.) >>> ypoints(branch1=[0., 2., 0., 0.], ... branch2=[0., 0., 2., 4.]) >>> model.parameters.update() >>> fluxes.input = 7. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=0.0, branch2=5.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess # Search for the index of the two relevant x points... for pdx in range(1, der.nmbpoints): if con.xpoints[pdx] > flu.input: break # ...and use it for linear interpolation (or extrapolation). for bdx in range(der.nmbbranches): flu.outputs[bdx] = ( (flu.input-con.xpoints[pdx-1]) * (con.ypoints[bdx, pdx]-con.ypoints[bdx, pdx-1]) / (con.xpoints[pdx]-con.xpoints[pdx-1]) + con.ypoints[bdx, pdx-1])
[ "def", "calc_outputs_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".",...
Performs the actual interpolation or extrapolation. Required control parameters: |XPoints| |YPoints| Required derived parameter: |NmbPoints| |NmbBranches| Required flux sequence: |Input| Calculated flux sequence: |Outputs| Examples: As a simple example, assume a weir directing all discharge into `branch1` until the capacity limit of 2 m³/s is reached. The discharge exceeding this threshold is directed into `branch2`: >>> from hydpy.models.hbranch import * >>> parameterstep() >>> xpoints(0., 2., 4.) >>> ypoints(branch1=[0., 2., 2.], ... branch2=[0., 0., 2.]) >>> model.parameters.update() Low discharge example (linear interpolation between the first two supporting point pairs): >>> fluxes.input = 1. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=1.0, branch2=0.0) Medium discharge example (linear interpolation between the second two supporting point pairs): >>> fluxes.input = 3. >>> model.calc_outputs_v1() >>> print(fluxes.outputs) outputs(branch1=2.0, branch2=1.0) High discharge example (linear extrapolation beyond the second two supporting point pairs): >>> fluxes.input = 5. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=2.0, branch2=3.0) Non-monotonous relationships and balance violations are allowed, e.g.: >>> xpoints(0., 2., 4., 6.) >>> ypoints(branch1=[0., 2., 0., 0.], ... branch2=[0., 0., 2., 4.]) >>> model.parameters.update() >>> fluxes.input = 7. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=0.0, branch2=5.0)
[ "Performs", "the", "actual", "interpolation", "or", "extrapolation", "." ]
python
train
ManiacalLabs/BiblioPixel
bibliopixel/layout/matrix.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/layout/matrix.py#L299-L307
def fillRoundRect(self, x, y, w, h, r, color=None, aa=False): """ Draw a rounded rectangle with top-left corner at (x, y), width w, height h, and corner radius r :param aa: if True, use Bresenham's algorithm for line drawing; otherwise use Xiaolin Wu's algorithm """ md.fill_round_rect(self.set, x, y, w, h, r, color, aa)
[ "def", "fillRoundRect", "(", "self", ",", "x", ",", "y", ",", "w", ",", "h", ",", "r", ",", "color", "=", "None", ",", "aa", "=", "False", ")", ":", "md", ".", "fill_round_rect", "(", "self", ".", "set", ",", "x", ",", "y", ",", "w", ",", "...
Draw a rounded rectangle with top-left corner at (x, y), width w, height h, and corner radius r :param aa: if True, use Bresenham's algorithm for line drawing; otherwise use Xiaolin Wu's algorithm
[ "Draw", "a", "rounded", "rectangle", "with", "top", "-", "left", "corner", "at", "(", "x", "y", ")", "width", "w", "height", "h", "and", "corner", "radius", "r" ]
python
valid
MinchinWeb/colourettu
colourettu/_colour.py
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/colourettu/_colour.py#L288-L377
def contrast(colour1, colour2): r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour): a colour colour2 (colourettu.Colour): a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon >>> colourettu.contrast("#FFF", "#FFF") # white on white 1.0 >>> colourettu.contrast(c1, "#000") # black on white 20.999999999999996 >>> colourettu.contrast(c4, c5) 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon >>> c4.contrast(c5) 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon >>> colourettu.A_contrast 3.0 >>> colourettu.AA_contrast 4.5 >>> colourettu.AAA_contrast 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect. """ colour_for_type = Colour() if type(colour1) is type(colour_for_type): mycolour1 = colour1 else: try: mycolour1 = Colour(colour1) except: raise TypeError("colour1 must be a colourettu.colour") if type(colour2) is type(colour_for_type): mycolour2 = colour2 else: try: mycolour2 = Colour(colour2) except: raise TypeError("colour2 must be a colourettu.colour") lum1 = mycolour1.luminance() lum2 = mycolour2.luminance() minlum = min(lum1, lum2) maxlum = max(lum1, lum2) return (maxlum + 0.05) / (minlum + 0.05)
[ "def", "contrast", "(", "colour1", ",", "colour2", ")", ":", "colour_for_type", "=", "Colour", "(", ")", "if", "type", "(", "colour1", ")", "is", "type", "(", "colour_for_type", ")", ":", "mycolour1", "=", "colour1", "else", ":", "try", ":", "mycolour1",...
r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour): a colour colour2 (colourettu.Colour): a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon >>> colourettu.contrast("#FFF", "#FFF") # white on white 1.0 >>> colourettu.contrast(c1, "#000") # black on white 20.999999999999996 >>> colourettu.contrast(c4, c5) 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon >>> c4.contrast(c5) 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon >>> colourettu.A_contrast 3.0 >>> colourettu.AA_contrast 4.5 >>> colourettu.AAA_contrast 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect.
[ "r", "Determines", "the", "contrast", "between", "two", "colours", "." ]
python
train
pyrogram/pyrogram
pyrogram/client/methods/bots/set_game_score.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/bots/set_game_score.py#L27-L92
def set_game_score( self, user_id: Union[int, str], score: int, force: bool = None, disable_edit_message: bool = None, chat_id: Union[int, str] = None, message_id: int = None ): # inline_message_id: str = None): TODO Add inline_message_id """Use this method to set the score of the specified user in a game. Args: user_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). score (``int``): New score, must be non-negative. force (``bool``, *optional*): Pass True, if the high score is allowed to decrease. This can be useful when fixing mistakes or banning cheaters. disable_edit_message (``bool``, *optional*): Pass True, if the game message should not be automatically edited to include the current scoreboard. chat_id (``int`` | ``str``, *optional*): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). Required if inline_message_id is not specified. message_id (``int``, *optional*): Identifier of the sent message. Required if inline_message_id is not specified. Returns: On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`, otherwise returns True. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. :class:`BotScoreNotModified` if the new score is not greater than the user's current score in the chat and force is False. """ r = self.send( functions.messages.SetGameScore( peer=self.resolve_peer(chat_id), score=score, id=message_id, user_id=self.resolve_peer(user_id), force=force or None, edit_message=not disable_edit_message or None ) ) for i in r.updates: if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)): return pyrogram.Message._parse( self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats} ) return True
[ "def", "set_game_score", "(", "self", ",", "user_id", ":", "Union", "[", "int", ",", "str", "]", ",", "score", ":", "int", ",", "force", ":", "bool", "=", "None", ",", "disable_edit_message", ":", "bool", "=", "None", ",", "chat_id", ":", "Union", "[...
Use this method to set the score of the specified user in a game. Args: user_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). score (``int``): New score, must be non-negative. force (``bool``, *optional*): Pass True, if the high score is allowed to decrease. This can be useful when fixing mistakes or banning cheaters. disable_edit_message (``bool``, *optional*): Pass True, if the game message should not be automatically edited to include the current scoreboard. chat_id (``int`` | ``str``, *optional*): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). Required if inline_message_id is not specified. message_id (``int``, *optional*): Identifier of the sent message. Required if inline_message_id is not specified. Returns: On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`, otherwise returns True. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. :class:`BotScoreNotModified` if the new score is not greater than the user's current score in the chat and force is False.
[ "Use", "this", "method", "to", "set", "the", "score", "of", "the", "specified", "user", "in", "a", "game", "." ]
python
train
oscarbranson/latools
latools/helpers/helpers.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L220-L237
def bool_2_indices(a): """ Convert boolean array into a 2D array of (start, stop) pairs. """ if any(a): lims = [] lims.append(np.where(a[:-1] != a[1:])[0]) if a[0]: lims.append([0]) if a[-1]: lims.append([len(a) - 1]) lims = np.concatenate(lims) lims.sort() return np.reshape(lims, (lims.size // 2, 2)) else: return None
[ "def", "bool_2_indices", "(", "a", ")", ":", "if", "any", "(", "a", ")", ":", "lims", "=", "[", "]", "lims", ".", "append", "(", "np", ".", "where", "(", "a", "[", ":", "-", "1", "]", "!=", "a", "[", "1", ":", "]", ")", "[", "0", "]", "...
Convert boolean array into a 2D array of (start, stop) pairs.
[ "Convert", "boolean", "array", "into", "a", "2D", "array", "of", "(", "start", "stop", ")", "pairs", "." ]
python
test
volafiled/python-volapi
volapi/handler.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/handler.py#L194-L199
def _handle_changed_config(self, change): """Handle configuration changes""" key, value = change.get("key"), change.get("value") self.room.config.update({key: value}) self.conn.enqueue_data("config", self.room.config)
[ "def", "_handle_changed_config", "(", "self", ",", "change", ")", ":", "key", ",", "value", "=", "change", ".", "get", "(", "\"key\"", ")", ",", "change", ".", "get", "(", "\"value\"", ")", "self", ".", "room", ".", "config", ".", "update", "(", "{",...
Handle configuration changes
[ "Handle", "configuration", "changes" ]
python
train
MartijnBraam/pyElectronics
electronics/devices/hmc5883l.py
https://github.com/MartijnBraam/pyElectronics/blob/a20878c9fa190135f1e478e9ea0b54ca43ff308e/electronics/devices/hmc5883l.py#L128-L150
def gauss(self): """ Get the magnetometer values as gauss for each axis as a tuple (x,y,z) :example: >>> sensor = HMC5883L(gw) >>> sensor.gauss() (16.56, 21.2888, 26.017599999999998) """ raw = self.raw() factors = { 1370: 0.73, 1090: 0.92, 820: 1.22, 660: 1.52, 440: 2.27, 390: 2.56, 330: 3.03, 230: 4.35 } factor = factors[self.resolution] / 100 return raw[0] * factor, raw[1] * factor, raw[2] * factor
[ "def", "gauss", "(", "self", ")", ":", "raw", "=", "self", ".", "raw", "(", ")", "factors", "=", "{", "1370", ":", "0.73", ",", "1090", ":", "0.92", ",", "820", ":", "1.22", ",", "660", ":", "1.52", ",", "440", ":", "2.27", ",", "390", ":", ...
Get the magnetometer values as gauss for each axis as a tuple (x,y,z) :example: >>> sensor = HMC5883L(gw) >>> sensor.gauss() (16.56, 21.2888, 26.017599999999998)
[ "Get", "the", "magnetometer", "values", "as", "gauss", "for", "each", "axis", "as", "a", "tuple", "(", "x", "y", "z", ")" ]
python
train
kstaniek/condoor
condoor/actions.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/actions.py#L154-L161
def a_expected_prompt(ctx): """Update driver, config mode and hostname when received an expected prompt.""" prompt = ctx.ctrl.match.group(0) ctx.device.update_driver(prompt) ctx.device.update_config_mode() ctx.device.update_hostname() ctx.finished = True return True
[ "def", "a_expected_prompt", "(", "ctx", ")", ":", "prompt", "=", "ctx", ".", "ctrl", ".", "match", ".", "group", "(", "0", ")", "ctx", ".", "device", ".", "update_driver", "(", "prompt", ")", "ctx", ".", "device", ".", "update_config_mode", "(", ")", ...
Update driver, config mode and hostname when received an expected prompt.
[ "Update", "driver", "config", "mode", "and", "hostname", "when", "received", "an", "expected", "prompt", "." ]
python
train
klahnakoski/pyLibrary
pyLibrary/env/flask_wrappers.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/pyLibrary/env/flask_wrappers.py#L68-L114
def dockerflow(flask_app, backend_check): """ ADD ROUTING TO HANDLE DOCKERFLOW APP REQUIREMENTS (see https://github.com/mozilla-services/Dockerflow#containerized-app-requirements) :param flask_app: THE (Flask) APP :param backend_check: METHOD THAT WILL CHECK THE BACKEND IS WORKING AND RAISE AN EXCEPTION IF NOT :return: """ global VERSION_JSON try: VERSION_JSON = File("version.json").read_bytes() @cors_wrapper def version(): return Response( VERSION_JSON, status=200, headers={ "Content-Type": "application/json" } ) @cors_wrapper def heartbeat(): try: backend_check() return Response(status=200) except Exception as e: Log.warning("heartbeat failure", cause=e) return Response( unicode2utf8(value2json(e)), status=500, headers={ "Content-Type": "application/json" } ) @cors_wrapper def lbheartbeat(): return Response(status=200) flask_app.add_url_rule(str('/__version__'), None, version, defaults={}, methods=[str('GET'), str('POST')]) flask_app.add_url_rule(str('/__heartbeat__'), None, heartbeat, defaults={}, methods=[str('GET'), str('POST')]) flask_app.add_url_rule(str('/__lbheartbeat__'), None, lbheartbeat, defaults={}, methods=[str('GET'), str('POST')]) except Exception as e: Log.error("Problem setting up listeners for dockerflow", cause=e)
[ "def", "dockerflow", "(", "flask_app", ",", "backend_check", ")", ":", "global", "VERSION_JSON", "try", ":", "VERSION_JSON", "=", "File", "(", "\"version.json\"", ")", ".", "read_bytes", "(", ")", "@", "cors_wrapper", "def", "version", "(", ")", ":", "return...
ADD ROUTING TO HANDLE DOCKERFLOW APP REQUIREMENTS (see https://github.com/mozilla-services/Dockerflow#containerized-app-requirements) :param flask_app: THE (Flask) APP :param backend_check: METHOD THAT WILL CHECK THE BACKEND IS WORKING AND RAISE AN EXCEPTION IF NOT :return:
[ "ADD", "ROUTING", "TO", "HANDLE", "DOCKERFLOW", "APP", "REQUIREMENTS", "(", "see", "https", ":", "//", "github", ".", "com", "/", "mozilla", "-", "services", "/", "Dockerflow#containerized", "-", "app", "-", "requirements", ")", ":", "param", "flask_app", ":...
python
train
mitsei/dlkit
dlkit/json_/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L1732-L1753
def get_objective_bank_ids_by_objective(self, objective_id): """Gets the list of ``ObjectiveBank`` ``Ids`` mapped to an ``Objective``. arg: objective_id (osid.id.Id): ``Id`` of an ``Objective`` return: (osid.id.IdList) - list of objective bank ``Ids`` raise: NotFound - ``objective_id`` is not found raise: NullArgument - ``objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('LEARNING', local=True) lookup_session = mgr.get_objective_lookup_session(proxy=self._proxy) lookup_session.use_federated_objective_bank_view() objective = lookup_session.get_objective(objective_id) id_list = [] for idstr in objective._my_map['assignedObjectiveBankIds']: id_list.append(Id(idstr)) return IdList(id_list)
[ "def", "get_objective_bank_ids_by_objective", "(", "self", ",", "objective_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_bin_ids_by_resource", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'LEARNING'", ",", "local", "=", "...
Gets the list of ``ObjectiveBank`` ``Ids`` mapped to an ``Objective``. arg: objective_id (osid.id.Id): ``Id`` of an ``Objective`` return: (osid.id.IdList) - list of objective bank ``Ids`` raise: NotFound - ``objective_id`` is not found raise: NullArgument - ``objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "ObjectiveBank", "Ids", "mapped", "to", "an", "Objective", "." ]
python
train
xtrementl/focus
focus/parser/parser.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L324-L338
def _rule_value(self): """ Parses the production rule:: value : TERM (',' TERM)* Returns list of string terms. """ terms = [self._get_token()] # consume additional terms if available while self._lookahead_token() == ',': self._get_token() # chomp the comma terms.append(self._get_token()) return terms
[ "def", "_rule_value", "(", "self", ")", ":", "terms", "=", "[", "self", ".", "_get_token", "(", ")", "]", "# consume additional terms if available", "while", "self", ".", "_lookahead_token", "(", ")", "==", "','", ":", "self", ".", "_get_token", "(", ")", ...
Parses the production rule:: value : TERM (',' TERM)* Returns list of string terms.
[ "Parses", "the", "production", "rule", "::", "value", ":", "TERM", "(", "TERM", ")", "*" ]
python
train
sony/nnabla
python/src/nnabla/experimental/graph_converters/batch_normalization_folded.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/batch_normalization_folded.py#L41-L94
def convert(self, vroot, entry_variables): """ All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables with nn.parameter_scope(self.name): # Function loop in the forward order for t, func in enumerate(self.graph_info.funcs): # TODO: error check # Batch normalization check, then skip if func.name == "BatchNormalization": i0 = func.inputs[0] bn_func = func # Test mode check if bn_func.info.args["batch_stat"] == False: # `Target Func -> BN` check from BN if i0.parent.info.type_name in self.inner_prod_functions: nn.logger.info("{} is skipped.".format(func.name)) continue # `Target Func -> BN` conversion if func.name in self.inner_prod_functions: inner_prod_func = func o0 = inner_prod_func.outputs[0] fs = self.graph_info.variable_to_funcs[o0] # No branch check #TODO: branching check (really needed?) if fs is not None and len(fs) == 1: # `Target Func -> BN` check bn_func = fs[0] if bn_func.name == "BatchNormalization": # Test mode check if bn_func.info.args["batch_stat"] == False: # Perform `Target Func -> BN` conversion nn.logger.info("BatchNormalization parameters are folded to " "the preceding convolution.") o = self._inner_prod_bn_conversion( inner_prod_func, bn_func) continue # Identity conversion o = self._identity_conversion(func) self.end_variable = o return self.end_variable
[ "def", "convert", "(", "self", ",", "vroot", ",", "entry_variables", ")", ":", "self", ".", "graph_info", "=", "GraphInfo", "(", "vroot", ")", "self", ".", "entry_variables", "=", "entry_variables", "with", "nn", ".", "parameter_scope", "(", "self", ".", "...
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
[ "All", "functions", "are", "replaced", "with", "the", "same", "new", "function", "." ]
python
train
datacamp/pythonwhat
pythonwhat/checks/has_funcs.py
https://github.com/datacamp/pythonwhat/blob/ffbf7f8436a51f77c22f3bed75ba3bc37a5c666f/pythonwhat/checks/has_funcs.py#L537-L575
def has_output(state, text, pattern=True, no_output_msg=None): """Search student output for a pattern. Among the student and solution process, the student submission and solution code as a string, the ``Ex()`` state also contains the output that a student generated with his or her submission. With ``has_output()``, you can access this output and match it against a regular or fixed expression. Args: text (str): the text that is searched for pattern (bool): if True (default), the text is treated as a pattern. If False, it is treated as plain text. no_output_msg (str): feedback message to be displayed if the output is not found. :Example: As an example, suppose we want a student to print out a sentence: :: # Print the "This is some ... stuff" print("This is some weird stuff") The following SCT tests whether the student prints out ``This is some weird stuff``: :: # Using exact string matching Ex().has_output("This is some weird stuff", pattern = False) # Using a regular expression (more robust) # pattern = True is the default msg = "Print out ``This is some ... stuff`` to the output, " + \\ "fill in ``...`` with a word you like." Ex().has_output(r"This is some \w* stuff", no_output_msg = msg) """ if not no_output_msg: no_output_msg = "You did not output the correct things." _msg = state.build_message(no_output_msg) state.do_test(StringContainsTest(state.raw_student_output, text, pattern, _msg)) return state
[ "def", "has_output", "(", "state", ",", "text", ",", "pattern", "=", "True", ",", "no_output_msg", "=", "None", ")", ":", "if", "not", "no_output_msg", ":", "no_output_msg", "=", "\"You did not output the correct things.\"", "_msg", "=", "state", ".", "build_mes...
Search student output for a pattern. Among the student and solution process, the student submission and solution code as a string, the ``Ex()`` state also contains the output that a student generated with his or her submission. With ``has_output()``, you can access this output and match it against a regular or fixed expression. Args: text (str): the text that is searched for pattern (bool): if True (default), the text is treated as a pattern. If False, it is treated as plain text. no_output_msg (str): feedback message to be displayed if the output is not found. :Example: As an example, suppose we want a student to print out a sentence: :: # Print the "This is some ... stuff" print("This is some weird stuff") The following SCT tests whether the student prints out ``This is some weird stuff``: :: # Using exact string matching Ex().has_output("This is some weird stuff", pattern = False) # Using a regular expression (more robust) # pattern = True is the default msg = "Print out ``This is some ... stuff`` to the output, " + \\ "fill in ``...`` with a word you like." Ex().has_output(r"This is some \w* stuff", no_output_msg = msg)
[ "Search", "student", "output", "for", "a", "pattern", "." ]
python
test
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L1108-L1114
def treeNode_metaSet(self, astr_path, **kwargs): """ Sets the metaData_print bit on node at <astr_path>. """ self.cdnode(astr_path) self.snode_current.metaData_print(self.b_printMetaData) return {'status': True}
[ "def", "treeNode_metaSet", "(", "self", ",", "astr_path", ",", "*", "*", "kwargs", ")", ":", "self", ".", "cdnode", "(", "astr_path", ")", "self", ".", "snode_current", ".", "metaData_print", "(", "self", ".", "b_printMetaData", ")", "return", "{", "'statu...
Sets the metaData_print bit on node at <astr_path>.
[ "Sets", "the", "metaData_print", "bit", "on", "node", "at", "<astr_path", ">", "." ]
python
train
snipsco/snipsmanagercore
snipsmanagercore/intent_parser.py
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L136-L169
def parse_instant_time(slot): """ Parse a slot into an InstantTime object. Sample response: { "entity": "snips/datetime", "range": { "end": 36, "start": 28 }, "rawValue": "tomorrow", "slotName": "weatherForecastStartDatetime", "value": { "grain": "Day", "kind": "InstantTime", "precision": "Exact", "value": "2017-09-15 00:00:00 +00:00" } } :param slot: a intent slot. :return: a parsed InstantTime object, or None. """ date = IntentParser.get_dict_value(slot, ['value', 'value']) if not date: return None date = parse(date) if not date: return None grain = InstantTime.parse_grain( IntentParser.get_dict_value(slot, ['value', 'grain'])) return InstantTime(date, grain)
[ "def", "parse_instant_time", "(", "slot", ")", ":", "date", "=", "IntentParser", ".", "get_dict_value", "(", "slot", ",", "[", "'value'", ",", "'value'", "]", ")", "if", "not", "date", ":", "return", "None", "date", "=", "parse", "(", "date", ")", "if"...
Parse a slot into an InstantTime object. Sample response: { "entity": "snips/datetime", "range": { "end": 36, "start": 28 }, "rawValue": "tomorrow", "slotName": "weatherForecastStartDatetime", "value": { "grain": "Day", "kind": "InstantTime", "precision": "Exact", "value": "2017-09-15 00:00:00 +00:00" } } :param slot: a intent slot. :return: a parsed InstantTime object, or None.
[ "Parse", "a", "slot", "into", "an", "InstantTime", "object", "." ]
python
train
OCA/vertical-hotel
hotel/models/hotel.py
https://github.com/OCA/vertical-hotel/blob/a01442e92b5ea1fda7fb9e6180b3211e8749a35a/hotel/models/hotel.py#L247-L256
def isroom_change(self): ''' Based on isroom, status will be updated. ---------------------------------------- @param self: object pointer ''' if self.isroom is False: self.status = 'occupied' if self.isroom is True: self.status = 'available'
[ "def", "isroom_change", "(", "self", ")", ":", "if", "self", ".", "isroom", "is", "False", ":", "self", ".", "status", "=", "'occupied'", "if", "self", ".", "isroom", "is", "True", ":", "self", ".", "status", "=", "'available'" ]
Based on isroom, status will be updated. ---------------------------------------- @param self: object pointer
[ "Based", "on", "isroom", "status", "will", "be", "updated", ".", "----------------------------------------" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/collections.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/collections.py#L922-L955
def pivot(expr, rows, columns, values=None): """ Produce ‘pivot’ table based on 3 columns of this DataFrame. Uses unique values from rows / columns and fills with values. :param expr: collection :param rows: use to make new collection's grouped rows :param columns: use to make new collection's columns :param values: values to use for populating new collection's values :return: collection :Example: >>> df.pivot(rows='id', columns='category') >>> df.pivot(rows='id', columns='category', values='sale') >>> df.pivot(rows=['id', 'id2'], columns='category', values='sale') """ rows = [expr._get_field(r) for r in utils.to_list(rows)] columns = [expr._get_field(c) for c in utils.to_list(columns)] if values: values = utils.to_list(values) else: names = set(c.name for c in rows + columns) values = [n for n in expr.schema.names if n not in names] if not values: raise ValueError('No values found for pivot') values = [expr._get_field(v) for v in values] if len(columns) > 1: raise ValueError('More than one `columns` are not supported yet') return PivotCollectionExpr(_input=expr, _group=rows, _columns=columns, _values=values)
[ "def", "pivot", "(", "expr", ",", "rows", ",", "columns", ",", "values", "=", "None", ")", ":", "rows", "=", "[", "expr", ".", "_get_field", "(", "r", ")", "for", "r", "in", "utils", ".", "to_list", "(", "rows", ")", "]", "columns", "=", "[", "...
Produce ‘pivot’ table based on 3 columns of this DataFrame. Uses unique values from rows / columns and fills with values. :param expr: collection :param rows: use to make new collection's grouped rows :param columns: use to make new collection's columns :param values: values to use for populating new collection's values :return: collection :Example: >>> df.pivot(rows='id', columns='category') >>> df.pivot(rows='id', columns='category', values='sale') >>> df.pivot(rows=['id', 'id2'], columns='category', values='sale')
[ "Produce", "‘pivot’", "table", "based", "on", "3", "columns", "of", "this", "DataFrame", ".", "Uses", "unique", "values", "from", "rows", "/", "columns", "and", "fills", "with", "values", "." ]
python
train
llazzaro/django-scheduler
schedule/models/events.py
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/models/events.py#L513-L521
def create_relation(self, event, content_object, distinction=''): """ Creates a relation between event and content_object. See EventRelation for help on distinction. """ return EventRelation.objects.create( event=event, distinction=distinction, content_object=content_object)
[ "def", "create_relation", "(", "self", ",", "event", ",", "content_object", ",", "distinction", "=", "''", ")", ":", "return", "EventRelation", ".", "objects", ".", "create", "(", "event", "=", "event", ",", "distinction", "=", "distinction", ",", "content_o...
Creates a relation between event and content_object. See EventRelation for help on distinction.
[ "Creates", "a", "relation", "between", "event", "and", "content_object", ".", "See", "EventRelation", "for", "help", "on", "distinction", "." ]
python
train
debrouwere/google-analytics
googleanalytics/query.py
https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L469-L484
def description(self): """ A list of the metrics this query will ask for. """ if 'metrics' in self.raw: metrics = self.raw['metrics'] head = metrics[0:-1] or metrics[0:1] text = ", ".join(head) if len(metrics) > 1: tail = metrics[-1] text = text + " and " + tail else: text = 'n/a' return text
[ "def", "description", "(", "self", ")", ":", "if", "'metrics'", "in", "self", ".", "raw", ":", "metrics", "=", "self", ".", "raw", "[", "'metrics'", "]", "head", "=", "metrics", "[", "0", ":", "-", "1", "]", "or", "metrics", "[", "0", ":", "1", ...
A list of the metrics this query will ask for.
[ "A", "list", "of", "the", "metrics", "this", "query", "will", "ask", "for", "." ]
python
train
timothydmorton/isochrones
isochrones/starmodel_old.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L254-L325
def lnlike(self, p): """Log-likelihood of model at given parameters :param p: mass, log10(age), feh, [distance, A_V (extinction)]. Final two should only be provided if ``self.fit_for_distance`` is ``True``; that is, apparent magnitudes are provided. :return: log-likelihood. Will be -np.inf if values out of range. """ if not self._props_cleaned: self._clean_props() if not self.use_emcee: fit_for_distance = True mass, age, feh, dist, AV = (p[0], p[1], p[2], p[3], p[4]) else: if len(p)==5: fit_for_distance = True mass,age,feh,dist,AV = p elif len(p)==3: fit_for_distance = False mass,age,feh = p if mass < self.ic.minmass or mass > self.ic.maxmass \ or age < self.ic.minage or age > self.ic.maxage \ or feh < self.ic.minfeh or feh > self.ic.maxfeh: return -np.inf if fit_for_distance: if dist < 0 or AV < 0 or dist > self.max_distance: return -np.inf if AV > self.maxAV: return -np.inf if self.min_logg is not None: logg = self.ic.logg(mass,age,feh) if logg < self.min_logg: return -np.inf logl = 0 for prop in self.properties.keys(): try: val,err = self.properties[prop] except TypeError: #property not appropriate for fitting (e.g. no error provided) continue if prop in self.ic.bands: if not fit_for_distance: raise ValueError('must fit for mass, age, feh, dist, A_V if apparent magnitudes provided.') mod = self.ic.mag[prop](mass,age,feh) + 5*np.log10(dist) - 5 A = AV*EXTINCTION[prop] mod += A elif re.search('delta_',prop): continue elif prop=='feh': mod = feh elif prop=='parallax': mod = 1./dist * 1000 else: mod = getattr(self.ic,prop)(mass,age,feh) logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi))) if np.isnan(logl): logl = -np.inf return logl
[ "def", "lnlike", "(", "self", ",", "p", ")", ":", "if", "not", "self", ".", "_props_cleaned", ":", "self", ".", "_clean_props", "(", ")", "if", "not", "self", ".", "use_emcee", ":", "fit_for_distance", "=", "True", "mass", ",", "age", ",", "feh", ","...
Log-likelihood of model at given parameters :param p: mass, log10(age), feh, [distance, A_V (extinction)]. Final two should only be provided if ``self.fit_for_distance`` is ``True``; that is, apparent magnitudes are provided. :return: log-likelihood. Will be -np.inf if values out of range.
[ "Log", "-", "likelihood", "of", "model", "at", "given", "parameters" ]
python
train
pkgw/pwkit
pwkit/synphot.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/synphot.py#L464-L471
def register_pivot_wavelength(self, telescope, band, wlen): """Register precomputed pivot wavelengths.""" if (telescope, band) in self._pivot_wavelengths: raise AlreadyDefinedError('pivot wavelength for %s/%s already ' 'defined', telescope, band) self._note(telescope, band) self._pivot_wavelengths[telescope,band] = wlen return self
[ "def", "register_pivot_wavelength", "(", "self", ",", "telescope", ",", "band", ",", "wlen", ")", ":", "if", "(", "telescope", ",", "band", ")", "in", "self", ".", "_pivot_wavelengths", ":", "raise", "AlreadyDefinedError", "(", "'pivot wavelength for %s/%s already...
Register precomputed pivot wavelengths.
[ "Register", "precomputed", "pivot", "wavelengths", "." ]
python
train
bwohlberg/sporco
sporco/admm/ccmod.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmod.py#L288-L297
def uinit(self, ushape): """Return initialiser for working variable U""" if self.opt['Y0'] is None: return np.zeros(ushape, dtype=self.dtype) else: # If initial Y is non-zero, initial U is chosen so that # the relevant dual optimality criterion (see (3.10) in # boyd-2010-distributed) is satisfied. return self.Y
[ "def", "uinit", "(", "self", ",", "ushape", ")", ":", "if", "self", ".", "opt", "[", "'Y0'", "]", "is", "None", ":", "return", "np", ".", "zeros", "(", "ushape", ",", "dtype", "=", "self", ".", "dtype", ")", "else", ":", "# If initial Y is non-zero, ...
Return initialiser for working variable U
[ "Return", "initialiser", "for", "working", "variable", "U" ]
python
train
mopidy/mopidy-gmusic
mopidy_gmusic/translator.py
https://github.com/mopidy/mopidy-gmusic/blob/bbfe876d2a7e4f0f4f9308193bb988936bdfd5c3/mopidy_gmusic/translator.py#L24-L30
def artist_to_ref(artist): """Convert a mopidy artist to a mopidy ref.""" if artist.name: name = artist.name else: name = 'Unknown artist' return Ref.directory(uri=artist.uri, name=name)
[ "def", "artist_to_ref", "(", "artist", ")", ":", "if", "artist", ".", "name", ":", "name", "=", "artist", ".", "name", "else", ":", "name", "=", "'Unknown artist'", "return", "Ref", ".", "directory", "(", "uri", "=", "artist", ".", "uri", ",", "name", ...
Convert a mopidy artist to a mopidy ref.
[ "Convert", "a", "mopidy", "artist", "to", "a", "mopidy", "ref", "." ]
python
train
elliterate/capybara.py
capybara/window.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/window.py#L58-L63
def current(self): """ bool: Whether this window is the window in which commands are being executed. """ try: return self.driver.current_window_handle == self.handle except self.driver.no_such_window_error: return False
[ "def", "current", "(", "self", ")", ":", "try", ":", "return", "self", ".", "driver", ".", "current_window_handle", "==", "self", ".", "handle", "except", "self", ".", "driver", ".", "no_such_window_error", ":", "return", "False" ]
bool: Whether this window is the window in which commands are being executed.
[ "bool", ":", "Whether", "this", "window", "is", "the", "window", "in", "which", "commands", "are", "being", "executed", "." ]
python
test
scott-griffiths/bitstring
bitstring.py
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L2161-L2164
def _set(self, pos): """Set bit at pos to 1.""" assert 0 <= pos < self.len self._datastore.setbit(pos)
[ "def", "_set", "(", "self", ",", "pos", ")", ":", "assert", "0", "<=", "pos", "<", "self", ".", "len", "self", ".", "_datastore", ".", "setbit", "(", "pos", ")" ]
Set bit at pos to 1.
[ "Set", "bit", "at", "pos", "to", "1", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/queue.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/queue.py#L229-L241
def remove_all(self, items): """ Removes all of the elements of the specified collection from this queue. :param items: (Collection), the specified collection. :return: (bool), ``true`` if the call changed this queue, ``false`` otherwise. """ check_not_none(items, "Value can't be None") data_items = [] for item in items: check_not_none(item, "Value can't be None") data_items.append(self._to_data(item)) return self._encode_invoke(queue_compare_and_remove_all_codec, data_list=data_items)
[ "def", "remove_all", "(", "self", ",", "items", ")", ":", "check_not_none", "(", "items", ",", "\"Value can't be None\"", ")", "data_items", "=", "[", "]", "for", "item", "in", "items", ":", "check_not_none", "(", "item", ",", "\"Value can't be None\"", ")", ...
Removes all of the elements of the specified collection from this queue. :param items: (Collection), the specified collection. :return: (bool), ``true`` if the call changed this queue, ``false`` otherwise.
[ "Removes", "all", "of", "the", "elements", "of", "the", "specified", "collection", "from", "this", "queue", "." ]
python
train
noobermin/lspreader
lspreader/lspreader.py
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L65-L152
def get_header(file,**kw): '''gets the header for the .p4 file, note that this Advanced the file position to the end of the header. Returns the size of the header, and the size of the header, if the header keyword is true. ''' if type(file) == str: #if called with a filename, recall with opened file. if test(kw, "gzip") and kw['gzip'] == 'guess': kw['gzip'] = re.search(r'\.gz$', file) is not None; if test(kw, "gzip"): with gzip.open(file,'rb') as f: return get_header(f,**kw); else: with open(file,'rb') as f: return get_header(f,**kw); if test(kw, "size"): size = file.tell(); header = get_dict( file,'iiss', ['dump_type','dversion', 'title','revision']); if header['dump_type'] == 1: #this is a particle dump file d = get_dict(file, 'fiiii', ['timestamp','geometry','sflagsx','sflagsy','sflagsz']); header.update(d); #reading params header['num_species'] = get_int(file); header['num_particles'] = get_int(file); nparams = get_int(file); units = [get_str(file) for i in range(nparams)]; labels = ['q', 'x', 'y', 'z', 'ux', 'uy', 'uz'] if nparams == 7: pass; elif nparams == 8 or nparams == 11: labels += ['H'] elif nparams == 10 or nparams == 11: labels += ['xi', 'yi', 'zi']; else: raise NotImplementedError( 'Not implemented for these number of parameters:{}.'.format(n)); header['params'] = list(zip(labels,units)); elif header['dump_type'] == 2 or header['dump_type'] == 3: #this is a fields file or a scalars file. d = get_dict(file,'fii',['timestamp','geometry','domains']); header.update(d); #reading quantities n = get_int(file); names=[get_str(file) for i in range(n)]; units=[get_str(file) for i in range(n)]; header['quantities'] = list(zip(names,units)); elif header['dump_type'] == 6: #this is a particle movie file d = get_dict(file, 'iiii', ['geometry','sflagsx','sflagsy','sflagsz']); header.update(d); #reading params n = get_int(file); flags=[bool(get_int(file)) for i in range(n)]; units=[get_str(file) for i in range(n)]; labels=['q','x','y','z','ux','uy','uz','E']; if n == 8: pass; elif n == 7: labels = labels[:-1]; elif n == 11: labels+=['xi','yi','zi']; else: raise NotImplementedError( 'Not implemented for these number of parameters:{}.'.format(n)); header['params'] = [ (label,unit) for (label,unit,flag) in zip(labels,units,flags) if flag ]; elif header['dump_type'] == 10: #this is a particle extraction file: header['geometry'] = get_int(file); #reading quantities n = get_int(file); header['quantities'] = [get_str(file) for i in range(n)]; else: raise ValueError('Unknown dump_type: {}'.format(header['dump_type'])); if test(kw,'size'): return header, file.tell()-size; return header;
[ "def", "get_header", "(", "file", ",", "*", "*", "kw", ")", ":", "if", "type", "(", "file", ")", "==", "str", ":", "#if called with a filename, recall with opened file.", "if", "test", "(", "kw", ",", "\"gzip\"", ")", "and", "kw", "[", "'gzip'", "]", "==...
gets the header for the .p4 file, note that this Advanced the file position to the end of the header. Returns the size of the header, and the size of the header, if the header keyword is true.
[ "gets", "the", "header", "for", "the", ".", "p4", "file", "note", "that", "this", "Advanced", "the", "file", "position", "to", "the", "end", "of", "the", "header", ".", "Returns", "the", "size", "of", "the", "header", "and", "the", "size", "of", "the",...
python
train
alexhayes/django-toolkit
django_toolkit/email.py
https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/email.py#L24-L37
def attach_related(self, filename=None, content=None, mimetype=None): """ Attaches a file with the given filename and content. The filename can be omitted and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass it is inserted directly into the resulting message attachments. """ if isinstance(filename, MIMEBase): assert content == mimetype == None self.related_attachments.append(filename) else: assert content is not None self.related_attachments.append((filename, content, mimetype))
[ "def", "attach_related", "(", "self", ",", "filename", "=", "None", ",", "content", "=", "None", ",", "mimetype", "=", "None", ")", ":", "if", "isinstance", "(", "filename", ",", "MIMEBase", ")", ":", "assert", "content", "==", "mimetype", "==", "None", ...
Attaches a file with the given filename and content. The filename can be omitted and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass it is inserted directly into the resulting message attachments.
[ "Attaches", "a", "file", "with", "the", "given", "filename", "and", "content", ".", "The", "filename", "can", "be", "omitted", "and", "the", "mimetype", "is", "guessed", "if", "not", "provided", "." ]
python
train
mdavidsaver/p4p
src/p4p/client/thread.py
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L207-L218
def close(self): """Force close all Channels and cancel all Operations """ if self._Q is not None: for T in self._T: self._Q.interrupt() for n, T in enumerate(self._T): _log.debug('Join Context worker %d', n) T.join() _log.debug('Joined Context workers') self._Q, self._T = None, None super(Context, self).close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_Q", "is", "not", "None", ":", "for", "T", "in", "self", ".", "_T", ":", "self", ".", "_Q", ".", "interrupt", "(", ")", "for", "n", ",", "T", "in", "enumerate", "(", "self", ".", "_T"...
Force close all Channels and cancel all Operations
[ "Force", "close", "all", "Channels", "and", "cancel", "all", "Operations" ]
python
train
awslabs/aws-cfn-template-flip
cfn_clean/__init__.py
https://github.com/awslabs/aws-cfn-template-flip/blob/837576bea243e3f5efb0a20b84802371272e2d33/cfn_clean/__init__.py#L96-L112
def clean(source): """ Clean up the source: * Replace use of Fn::Join with Fn::Sub """ if isinstance(source, dict): for key, value in source.items(): if key == "Fn::Join": return convert_join(value) else: source[key] = clean(value) elif isinstance(source, list): return [clean(item) for item in source] return source
[ "def", "clean", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "source", ".", "items", "(", ")", ":", "if", "key", "==", "\"Fn::Join\"", ":", "return", "convert_join", "(", "value"...
Clean up the source: * Replace use of Fn::Join with Fn::Sub
[ "Clean", "up", "the", "source", ":", "*", "Replace", "use", "of", "Fn", "::", "Join", "with", "Fn", "::", "Sub" ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/askbot.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/askbot.py#L393-L462
def parse_answers(html_question): """Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers """ def parse_answer_container(update_info): """Parse the answer info container of a given HTML question. The method parses the information available in the answer information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param update_info: beautiful soup update_info container element :returns: an object with the parsed information """ container_info = {} created = update_info[0] answered_at = created.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['added_at'] = str(str_to_datetime(answered_at).timestamp()) container_info['answered_by'] = AskbotParser.parse_user_info(created) try: update_info[1] except IndexError: pass else: updated = update_info[1] updated_at = updated.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['updated_at'] = str(str_to_datetime(updated_at).timestamp()) if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info answer_list = [] # Select all the answers bs_question = bs4.BeautifulSoup(html_question, "html.parser") bs_answers = bs_question.select("div.answer") for bs_answer in bs_answers: answer_id = bs_answer.attrs["data-post-id"] votes_element = bs_answer.select("div.vote-number")[0].text accepted_answer = bs_answer.select("div.answer-img-accept")[0].get('title').endswith("correct") # Select the body of the answer body = bs_answer.select("div.post-body") # Get the user information container and parse it update_info = body[0].select("div.post-update-info") answer_container = parse_answer_container(update_info) # Remove the update-info-container div to be able to get the body body[0].div.extract().select("div.post-update-info-container") # Override the body with a clean one body = body[0].get_text(strip=True) # Generate the answer object answer = {'id': answer_id, 'score': votes_element, 'summary': body, 'accepted': accepted_answer } # Update the object with the information in the answer container answer.update(answer_container) answer_list.append(answer) return answer_list
[ "def", "parse_answers", "(", "html_question", ")", ":", "def", "parse_answer_container", "(", "update_info", ")", ":", "\"\"\"Parse the answer info container of a given HTML question.\n\n The method parses the information available in the answer information\n container....
Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers
[ "Parse", "the", "answers", "of", "a", "given", "HTML", "question", "." ]
python
test
mesowx/MesoPy
MesoPy.py
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L754-L827
def metadata(self, **kwargs): r""" Returns the metadata for a station or stations. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Arguments: ---------- complete: string, optional A value of 1 or 0. When set to 1, an extended list of metadata attributes for each returned station is provided. This result is useful for exploring the zones and regions in which a station resides. e.g. complete='1' sensorvars: string, optional A value of 1 or 0. When set to 1, a complete history of sensor variables and period of record is given for each station. e.g. sensorvars='1' obrange: string, optional Filters metadata for stations which were in operation for a specified time period. Users can specify one date or a date range. Dates are in the format of YYYYmmdd. e.g. obrange='20150101', obrange='20040101,20060101' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- A dictionary of metadata. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['token'] = self.token return self._get_response('stations/metadata', kwargs)
[ "def", "metadata", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_geo_param", "(", "kwargs", ")", "kwargs", "[", "'token'", "]", "=", "self", ".", "token", "return", "self", ".", "_get_response", "(", "'stations/metadata'", ",", "kw...
r""" Returns the metadata for a station or stations. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Arguments: ---------- complete: string, optional A value of 1 or 0. When set to 1, an extended list of metadata attributes for each returned station is provided. This result is useful for exploring the zones and regions in which a station resides. e.g. complete='1' sensorvars: string, optional A value of 1 or 0. When set to 1, a complete history of sensor variables and period of record is given for each station. e.g. sensorvars='1' obrange: string, optional Filters metadata for stations which were in operation for a specified time period. Users can specify one date or a date range. Dates are in the format of YYYYmmdd. e.g. obrange='20150101', obrange='20040101,20060101' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- A dictionary of metadata. Raises: ------- None.
[ "r", "Returns", "the", "metadata", "for", "a", "station", "or", "stations", ".", "Users", "must", "specify", "at", "least", "one", "geographic", "search", "parameter", "(", "stid", "state", "country", "county", "radius", "bbox", "cwa", "nwsfirezone", "gacc", ...
python
train
digidotcom/python-devicecloud
devicecloud/streams.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/streams.py#L799-L824
def delete_datapoints_in_time_range(self, start_dt=None, end_dt=None): """Delete datapoints from this stream between the provided start and end times If neither a start or end time is specified, all data points in the stream will be deleted. :param start_dt: The datetime after which data points should be deleted or None if all data points from the beginning of time should be deleted. :param end_dt: The datetime before which data points should be deleted or None if all data points until the current time should be deleted. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error """ start_dt = to_none_or_dt(validate_type(start_dt, datetime.datetime, type(None))) end_dt = to_none_or_dt(validate_type(end_dt, datetime.datetime, type(None))) params = {} if start_dt is not None: params['startTime'] = isoformat(start_dt) if end_dt is not None: params['endTime'] = isoformat(end_dt) self._conn.delete("/ws/DataPoint/{stream_id}{querystring}".format( stream_id=self.get_stream_id(), querystring="?" + urllib.parse.urlencode(params) if params else "", ))
[ "def", "delete_datapoints_in_time_range", "(", "self", ",", "start_dt", "=", "None", ",", "end_dt", "=", "None", ")", ":", "start_dt", "=", "to_none_or_dt", "(", "validate_type", "(", "start_dt", ",", "datetime", ".", "datetime", ",", "type", "(", "None", ")...
Delete datapoints from this stream between the provided start and end times If neither a start or end time is specified, all data points in the stream will be deleted. :param start_dt: The datetime after which data points should be deleted or None if all data points from the beginning of time should be deleted. :param end_dt: The datetime before which data points should be deleted or None if all data points until the current time should be deleted. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
[ "Delete", "datapoints", "from", "this", "stream", "between", "the", "provided", "start", "and", "end", "times" ]
python
train
django-fluent/django-fluent-blogs
fluent_blogs/models/query.py
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/models/query.py#L187-L195
def get_category_for_slug(slug, language_code=None): """ Find the category for a given slug """ Category = get_category_model() if issubclass(Category, TranslatableModel): return Category.objects.active_translations(language_code, slug=slug).get() else: return Category.objects.get(slug=slug)
[ "def", "get_category_for_slug", "(", "slug", ",", "language_code", "=", "None", ")", ":", "Category", "=", "get_category_model", "(", ")", "if", "issubclass", "(", "Category", ",", "TranslatableModel", ")", ":", "return", "Category", ".", "objects", ".", "acti...
Find the category for a given slug
[ "Find", "the", "category", "for", "a", "given", "slug" ]
python
train
saltstack/salt
salt/modules/iosconfig.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/iosconfig.py#L308-L355
def merge_diff(initial_config=None, initial_path=None, merge_config=None, merge_path=None, saltenv='base'): ''' Return the merge diff, as text, after merging the merge config into the initial config. initial_config The initial configuration sent as text. This argument is ignored when ``initial_path`` is set. initial_path Absolute or remote path from where to load the initial configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``initial_path`` or ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' iosconfig.merge_diff initial_path=salt://path/to/running.cfg merge_path=salt://path/to/merge.cfg ''' if initial_path: initial_config = __salt__['cp.get_file_str'](initial_path, saltenv=saltenv) candidate_config = merge_text(initial_config=initial_config, merge_config=merge_config, merge_path=merge_path, saltenv=saltenv) clean_running_dict = tree(config=initial_config) clean_running = _print_config_text(clean_running_dict) return _get_diff_text(clean_running, candidate_config)
[ "def", "merge_diff", "(", "initial_config", "=", "None", ",", "initial_path", "=", "None", ",", "merge_config", "=", "None", ",", "merge_path", "=", "None", ",", "saltenv", "=", "'base'", ")", ":", "if", "initial_path", ":", "initial_config", "=", "__salt__"...
Return the merge diff, as text, after merging the merge config into the initial config. initial_config The initial configuration sent as text. This argument is ignored when ``initial_path`` is set. initial_path Absolute or remote path from where to load the initial configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``initial_path`` or ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' iosconfig.merge_diff initial_path=salt://path/to/running.cfg merge_path=salt://path/to/merge.cfg
[ "Return", "the", "merge", "diff", "as", "text", "after", "merging", "the", "merge", "config", "into", "the", "initial", "config", "." ]
python
train
clinicedc/edc-notification
edc_notification/site_notifications.py
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/site_notifications.py#L47-L56
def get(self, name): """Returns a Notification by name. """ if not self.loaded: raise RegistryNotLoaded(self) if not self._registry.get(name): raise NotificationNotRegistered( f"Notification not registered. Got '{name}'." ) return self._registry.get(name)
[ "def", "get", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "loaded", ":", "raise", "RegistryNotLoaded", "(", "self", ")", "if", "not", "self", ".", "_registry", ".", "get", "(", "name", ")", ":", "raise", "NotificationNotRegistered", "...
Returns a Notification by name.
[ "Returns", "a", "Notification", "by", "name", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/compiler_frontend.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/compiler_frontend.py#L741-L863
def _compile_root_ast_to_ir(schema, ast, type_equivalence_hints=None): """Compile a full GraphQL abstract syntax tree (AST) to intermediate representation. Args: schema: GraphQL schema object, obtained from the graphql library ast: the root GraphQL AST node for the query, obtained from the graphql library, and already validated against the schema for type-correctness type_equivalence_hints: optional dict of GraphQL type to equivalent GraphQL union Returns: IrAndMetadata named tuple, containing fields: - ir_blocks: a list of IR basic block objects - input_metadata: a dict of expected input parameters (string) -> inferred GraphQL type - output_metadata: a dict of output name (string) -> OutputMetadata object - location_types: a dict of location objects -> GraphQL type objects at that location - coerced_locations: a set of location objects indicating where type coercions have happened """ if len(ast.selection_set.selections) != 1: raise GraphQLCompilationError(u'Cannot process AST with more than one root selection!') base_ast = ast.selection_set.selections[0] base_start_type = get_ast_field_name(base_ast) # This is the type at which querying starts. # Validation passed, so the base_start_type must exist as a field of the root query. current_schema_type = get_field_type_from_schema(schema.get_query_type(), base_start_type) # Construct the start location of the query and its associated metadata. location = Location((base_start_type,)) base_location_info = LocationInfo( parent_location=None, type=current_schema_type, coerced_from_type=None, optional_scopes_depth=0, recursive_scopes_depth=0, is_within_fold=False, ) query_metadata_table = QueryMetadataTable(location, base_location_info) # Default argument value is empty dict if not type_equivalence_hints: type_equivalence_hints = dict() # Construct the starting context object. context = { # 'metadata' is the QueryMetadataTable describing all the metadata collected during query # processing, including location metadata (e.g. which locations are folded or optional). 'metadata': query_metadata_table, # 'tags' is a dict containing # - location: Location where the tag was defined # - optional: boolean representing whether the tag was defined within an @optional scope # - type: GraphQLType of the tagged value 'tags': dict(), # 'global_filters' is a list that may contain Filter blocks that are generated during # query processing, but apply to the global query scope and should be appended to the # IR blocks only after the GlobalOperationsStart block has been emitted. 'global_filters': [], # 'outputs' is a dict mapping each output name to another dict which contains # - location: Location where to output from # - optional: boolean representing whether the output was defined within an @optional scope # - type: GraphQLType of the output # - fold: FoldScopeLocation object if the current output was defined within a fold scope, # and None otherwise 'outputs': dict(), # 'inputs' is a dict mapping input parameter names to their respective expected GraphQL # types, as automatically inferred by inspecting the query structure 'inputs': dict(), # 'type_equivalence_hints' is a dict mapping GraphQL types to equivalent GraphQL unions 'type_equivalence_hints': type_equivalence_hints, # 'type_equivalence_hints_inverse' is the inverse of type_equivalence_hints, # which is always invertible. 'type_equivalence_hints_inverse': invert_dict(type_equivalence_hints), } # Add the query root basic block to the output. basic_blocks = [ blocks.QueryRoot({base_start_type}) ] # Ensure the GraphQL query root doesn't immediately have a fragment (type coercion). # Instead of starting at one type and coercing to another, # users should simply start at the type to which they are coercing. immediate_fragment = _get_inline_fragment(base_ast) if immediate_fragment is not None: msg_args = { 'coerce_to': immediate_fragment.type_condition.name.value, 'type_from': base_start_type, } raise GraphQLCompilationError(u'Found inline fragment coercing to type {coerce_to}, ' u'immediately inside query root asking for type {type_from}. ' u'This is a contrived pattern -- you should simply start ' u'your query at {coerce_to}.'.format(**msg_args)) # Ensure the GraphQL query root doesn't have any vertex directives # that are disallowed on the root node. validate_root_vertex_directives(base_ast) # Compile and add the basic blocks for the query's base AST vertex. new_basic_blocks = _compile_ast_node_to_ir( schema, current_schema_type, base_ast, location, context) basic_blocks.extend(new_basic_blocks) _validate_all_tags_are_used(context['metadata']) # All operations after this point affect the global query scope, and are not related to # the "current" location in the query produced by the sequence of Traverse/Backtrack blocks. basic_blocks.append(blocks.GlobalOperationsStart()) # Add any filters that apply to the global query scope. basic_blocks.extend(context['global_filters']) # Based on the outputs context data, add an output step and construct the output metadata. outputs_context = context['outputs'] basic_blocks.append(_compile_output_step(outputs_context)) output_metadata = { name: OutputMetadata(type=value['type'], optional=value['optional']) for name, value in six.iteritems(outputs_context) } return IrAndMetadata( ir_blocks=basic_blocks, input_metadata=context['inputs'], output_metadata=output_metadata, query_metadata_table=context['metadata'])
[ "def", "_compile_root_ast_to_ir", "(", "schema", ",", "ast", ",", "type_equivalence_hints", "=", "None", ")", ":", "if", "len", "(", "ast", ".", "selection_set", ".", "selections", ")", "!=", "1", ":", "raise", "GraphQLCompilationError", "(", "u'Cannot process A...
Compile a full GraphQL abstract syntax tree (AST) to intermediate representation. Args: schema: GraphQL schema object, obtained from the graphql library ast: the root GraphQL AST node for the query, obtained from the graphql library, and already validated against the schema for type-correctness type_equivalence_hints: optional dict of GraphQL type to equivalent GraphQL union Returns: IrAndMetadata named tuple, containing fields: - ir_blocks: a list of IR basic block objects - input_metadata: a dict of expected input parameters (string) -> inferred GraphQL type - output_metadata: a dict of output name (string) -> OutputMetadata object - location_types: a dict of location objects -> GraphQL type objects at that location - coerced_locations: a set of location objects indicating where type coercions have happened
[ "Compile", "a", "full", "GraphQL", "abstract", "syntax", "tree", "(", "AST", ")", "to", "intermediate", "representation", "." ]
python
train
aiven/pghoard
pghoard/restore.py
https://github.com/aiven/pghoard/blob/2994165d4ef3ff7a5669a2527346bcbfb5b3bd8a/pghoard/restore.py#L201-L238
def get_basebackup(self, arg): """Download a basebackup from an object store""" if not arg.tablespace_dir: tablespace_mapping = {} else: try: tablespace_mapping = dict(v.split("=", 1) for v in arg.tablespace_dir) except ValueError: raise RestoreError("Invalid tablespace mapping {!r}".format(arg.tablespace_dir)) self.config = config.read_json_config_file(arg.config, check_commands=False, check_pgdata=False) site = config.get_site_from_config(self.config, arg.site) try: self.storage = self._get_object_storage(site, arg.target_dir) self._get_basebackup( pgdata=arg.target_dir, basebackup=arg.basebackup, site=site, debug=arg.debug, status_output_file=arg.status_output_file, primary_conninfo=arg.primary_conninfo, recovery_end_command=arg.recovery_end_command, recovery_target_action=arg.recovery_target_action, recovery_target_name=arg.recovery_target_name, recovery_target_time=arg.recovery_target_time, recovery_target_xid=arg.recovery_target_xid, restore_to_master=arg.restore_to_master, overwrite=arg.overwrite, tablespace_mapping=tablespace_mapping, tablespace_base_dir=arg.tablespace_base_dir, ) except RestoreError: # pylint: disable=try-except-raise # Pass RestoreErrors thru raise except Exception as ex: if self.log_tracebacks: self.log.exception("Unexpected _get_basebackup failure") raise RestoreError("{}: {}".format(ex.__class__.__name__, ex))
[ "def", "get_basebackup", "(", "self", ",", "arg", ")", ":", "if", "not", "arg", ".", "tablespace_dir", ":", "tablespace_mapping", "=", "{", "}", "else", ":", "try", ":", "tablespace_mapping", "=", "dict", "(", "v", ".", "split", "(", "\"=\"", ",", "1",...
Download a basebackup from an object store
[ "Download", "a", "basebackup", "from", "an", "object", "store" ]
python
train