repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
sacherjj/array_devices
array_devices/array3710.py
Load.start_program
def start_program(self, turn_on_load=True): """ Starts running programmed test sequence :return: None """ self.__set_buffer_start(self.CMD_START_PROG) self.__set_checksum() self.__send_buffer() # Turn on Load if not on if turn_on_load and not self.load_on: self.load_on = True
python
def start_program(self, turn_on_load=True): """ Starts running programmed test sequence :return: None """ self.__set_buffer_start(self.CMD_START_PROG) self.__set_checksum() self.__send_buffer() # Turn on Load if not on if turn_on_load and not self.load_on: self.load_on = True
[ "def", "start_program", "(", "self", ",", "turn_on_load", "=", "True", ")", ":", "self", ".", "__set_buffer_start", "(", "self", ".", "CMD_START_PROG", ")", "self", ".", "__set_checksum", "(", ")", "self", ".", "__send_buffer", "(", ")", "# Turn on Load if not...
Starts running programmed test sequence :return: None
[ "Starts", "running", "programmed", "test", "sequence", ":", "return", ":", "None" ]
train
https://github.com/sacherjj/array_devices/blob/ba93a081e555321125ead33cf6fc5197569ef08f/array_devices/array3710.py#L591-L601
sacherjj/array_devices
array_devices/array3710.py
Load.stop_program
def stop_program(self, turn_off_load=True): """ Stops running programmed test sequence :return: None """ self.__set_buffer_start(self.CMD_STOP_PROG) self.__set_checksum() self.__send_buffer() if turn_off_load and self.load_on: self.load_on = False
python
def stop_program(self, turn_off_load=True): """ Stops running programmed test sequence :return: None """ self.__set_buffer_start(self.CMD_STOP_PROG) self.__set_checksum() self.__send_buffer() if turn_off_load and self.load_on: self.load_on = False
[ "def", "stop_program", "(", "self", ",", "turn_off_load", "=", "True", ")", ":", "self", ".", "__set_buffer_start", "(", "self", ".", "CMD_STOP_PROG", ")", "self", ".", "__set_checksum", "(", ")", "self", ".", "__send_buffer", "(", ")", "if", "turn_off_load"...
Stops running programmed test sequence :return: None
[ "Stops", "running", "programmed", "test", "sequence", ":", "return", ":", "None" ]
train
https://github.com/sacherjj/array_devices/blob/ba93a081e555321125ead33cf6fc5197569ef08f/array_devices/array3710.py#L603-L612
wglass/lighthouse
lighthouse/check.py
Check.run
def run(self): """ Calls the `perform()` method defined by subclasses and stores the result in a `results` deque. After the result is determined the `results` deque is analyzed to see if the `passing` flag should be updated. If the check was considered passing and the previous `self.fall` number of checks failed, the check is updated to not be passing. If the check was not passing and the previous `self.rise` number of checks passed, the check is updated to be considered passing. """ logger.debug("Running %s check", self.name) try: result = self.perform() except Exception: logger.exception("Error while performing %s check", self.name) result = False logger.debug("Result: %s", result) self.results.append(result) if self.passing and not any(self.last_n_results(self.fall)): logger.info( "%s check failed %d time(s), no longer passing.", self.name, self.fall, ) self.passing = False if not self.passing and all(self.last_n_results(self.rise)): logger.info( "%s check passed %d time(s), is now passing.", self.name, self.rise ) self.passing = True
python
def run(self): """ Calls the `perform()` method defined by subclasses and stores the result in a `results` deque. After the result is determined the `results` deque is analyzed to see if the `passing` flag should be updated. If the check was considered passing and the previous `self.fall` number of checks failed, the check is updated to not be passing. If the check was not passing and the previous `self.rise` number of checks passed, the check is updated to be considered passing. """ logger.debug("Running %s check", self.name) try: result = self.perform() except Exception: logger.exception("Error while performing %s check", self.name) result = False logger.debug("Result: %s", result) self.results.append(result) if self.passing and not any(self.last_n_results(self.fall)): logger.info( "%s check failed %d time(s), no longer passing.", self.name, self.fall, ) self.passing = False if not self.passing and all(self.last_n_results(self.rise)): logger.info( "%s check passed %d time(s), is now passing.", self.name, self.rise ) self.passing = True
[ "def", "run", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Running %s check\"", ",", "self", ".", "name", ")", "try", ":", "result", "=", "self", ".", "perform", "(", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Error ...
Calls the `perform()` method defined by subclasses and stores the result in a `results` deque. After the result is determined the `results` deque is analyzed to see if the `passing` flag should be updated. If the check was considered passing and the previous `self.fall` number of checks failed, the check is updated to not be passing. If the check was not passing and the previous `self.rise` number of checks passed, the check is updated to be considered passing.
[ "Calls", "the", "perform", "()", "method", "defined", "by", "subclasses", "and", "stores", "the", "result", "in", "a", "results", "deque", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/check.py#L60-L94
wglass/lighthouse
lighthouse/check.py
Check.last_n_results
def last_n_results(self, n): """ Helper method for returning a set number of the previous check results. """ return list( itertools.islice( self.results, len(self.results) - n, len(self.results) ) )
python
def last_n_results(self, n): """ Helper method for returning a set number of the previous check results. """ return list( itertools.islice( self.results, len(self.results) - n, len(self.results) ) )
[ "def", "last_n_results", "(", "self", ",", "n", ")", ":", "return", "list", "(", "itertools", ".", "islice", "(", "self", ".", "results", ",", "len", "(", "self", ".", "results", ")", "-", "n", ",", "len", "(", "self", ".", "results", ")", ")", "...
Helper method for returning a set number of the previous check results.
[ "Helper", "method", "for", "returning", "a", "set", "number", "of", "the", "previous", "check", "results", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/check.py#L96-L104
wglass/lighthouse
lighthouse/check.py
Check.apply_config
def apply_config(self, config): """ Sets attributes based on the given config. Also adjusts the `results` deque to either expand (padding itself with False results) or contract (by removing the oldest results) until it matches the required length. """ self.rise = int(config["rise"]) self.fall = int(config["fall"]) self.apply_check_config(config) if self.results.maxlen == max(self.rise, self.fall): return results = list(self.results) while len(results) > max(self.rise, self.fall): results.pop(0) while len(results) < max(self.rise, self.fall): results.insert(0, False) self.results = deque( results, maxlen=max(self.rise, self.fall) )
python
def apply_config(self, config): """ Sets attributes based on the given config. Also adjusts the `results` deque to either expand (padding itself with False results) or contract (by removing the oldest results) until it matches the required length. """ self.rise = int(config["rise"]) self.fall = int(config["fall"]) self.apply_check_config(config) if self.results.maxlen == max(self.rise, self.fall): return results = list(self.results) while len(results) > max(self.rise, self.fall): results.pop(0) while len(results) < max(self.rise, self.fall): results.insert(0, False) self.results = deque( results, maxlen=max(self.rise, self.fall) )
[ "def", "apply_config", "(", "self", ",", "config", ")", ":", "self", ".", "rise", "=", "int", "(", "config", "[", "\"rise\"", "]", ")", "self", ".", "fall", "=", "int", "(", "config", "[", "\"fall\"", "]", ")", "self", ".", "apply_check_config", "(",...
Sets attributes based on the given config. Also adjusts the `results` deque to either expand (padding itself with False results) or contract (by removing the oldest results) until it matches the required length.
[ "Sets", "attributes", "based", "on", "the", "given", "config", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/check.py#L106-L131
wglass/lighthouse
lighthouse/check.py
Check.validate_config
def validate_config(cls, config): """ Validates that required config entries are present. Each check requires a `host`, `port`, `rise` and `fall` to be configured. The rise and fall variables are integers denoting how many times a check must pass before being considered passing and how many times a check must fail before being considered failing. """ if "rise" not in config: raise ValueError("No 'rise' configured") if "fall" not in config: raise ValueError("No 'fall' configured") cls.validate_check_config(config)
python
def validate_config(cls, config): """ Validates that required config entries are present. Each check requires a `host`, `port`, `rise` and `fall` to be configured. The rise and fall variables are integers denoting how many times a check must pass before being considered passing and how many times a check must fail before being considered failing. """ if "rise" not in config: raise ValueError("No 'rise' configured") if "fall" not in config: raise ValueError("No 'fall' configured") cls.validate_check_config(config)
[ "def", "validate_config", "(", "cls", ",", "config", ")", ":", "if", "\"rise\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No 'rise' configured\"", ")", "if", "\"fall\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No 'fall' con...
Validates that required config entries are present. Each check requires a `host`, `port`, `rise` and `fall` to be configured. The rise and fall variables are integers denoting how many times a check must pass before being considered passing and how many times a check must fail before being considered failing.
[ "Validates", "that", "required", "config", "entries", "are", "present", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/check.py#L134-L150
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer.vectorize
def vectorize(self, docs): """ Vectorizes a list of documents using their DCS representations. """ doc_core_sems, all_concepts = self._extract_core_semantics(docs) shape = (len(docs), len(all_concepts)) vecs = np.zeros(shape) for i, core_sems in enumerate(doc_core_sems): for con, weight in core_sems: j = all_concepts.index(con) vecs[i,j] = weight # Normalize return vecs/np.max(vecs)
python
def vectorize(self, docs): """ Vectorizes a list of documents using their DCS representations. """ doc_core_sems, all_concepts = self._extract_core_semantics(docs) shape = (len(docs), len(all_concepts)) vecs = np.zeros(shape) for i, core_sems in enumerate(doc_core_sems): for con, weight in core_sems: j = all_concepts.index(con) vecs[i,j] = weight # Normalize return vecs/np.max(vecs)
[ "def", "vectorize", "(", "self", ",", "docs", ")", ":", "doc_core_sems", ",", "all_concepts", "=", "self", ".", "_extract_core_semantics", "(", "docs", ")", "shape", "=", "(", "len", "(", "docs", ")", ",", "len", "(", "all_concepts", ")", ")", "vecs", ...
Vectorizes a list of documents using their DCS representations.
[ "Vectorizes", "a", "list", "of", "documents", "using", "their", "DCS", "representations", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L68-L82
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._process_doc
def _process_doc(self, doc): """ Applies DCS to a document to extract its core concepts and their weights. """ # Prep doc = doc.lower() tagged_tokens = [(t, penn_to_wordnet(t.tag_)) for t in spacy(doc, tag=True, parse=False, entity=False)] tokens = [t for t, tag in tagged_tokens] term_concept_map = self._disambiguate_doc(tagged_tokens) concept_weights = self._weight_concepts(tokens, term_concept_map) # Compute core semantics lexical_chains = self._lexical_chains(doc, term_concept_map) core_semantics = self._core_semantics(lexical_chains, concept_weights) core_concepts = [c for chain in core_semantics for c in chain] return [(con, concept_weights[con]) for con in core_concepts]
python
def _process_doc(self, doc): """ Applies DCS to a document to extract its core concepts and their weights. """ # Prep doc = doc.lower() tagged_tokens = [(t, penn_to_wordnet(t.tag_)) for t in spacy(doc, tag=True, parse=False, entity=False)] tokens = [t for t, tag in tagged_tokens] term_concept_map = self._disambiguate_doc(tagged_tokens) concept_weights = self._weight_concepts(tokens, term_concept_map) # Compute core semantics lexical_chains = self._lexical_chains(doc, term_concept_map) core_semantics = self._core_semantics(lexical_chains, concept_weights) core_concepts = [c for chain in core_semantics for c in chain] return [(con, concept_weights[con]) for con in core_concepts]
[ "def", "_process_doc", "(", "self", ",", "doc", ")", ":", "# Prep", "doc", "=", "doc", ".", "lower", "(", ")", "tagged_tokens", "=", "[", "(", "t", ",", "penn_to_wordnet", "(", "t", ".", "tag_", ")", ")", "for", "t", "in", "spacy", "(", "doc", ",...
Applies DCS to a document to extract its core concepts and their weights.
[ "Applies", "DCS", "to", "a", "document", "to", "extract", "its", "core", "concepts", "and", "their", "weights", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L85-L101
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._disambiguate_doc
def _disambiguate_doc(self, tagged_tokens): """ Takes a list of tagged tokens, representing a document, in the form: [(token, tag), ...] And returns a mapping of terms to their disambiguated concepts (synsets). """ # Group tokens by PoS pos_groups = {pos: [] for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]} for tok, tag in tagged_tokens: if tag in pos_groups: pos_groups[tag].append(tok) #print(pos_groups) # Map of final term -> concept mappings map = {} for tag, toks in pos_groups.items(): map.update(self._disambiguate_pos(toks, tag)) #nice_map = {k: map[k].lemma_names() for k in map.keys()} #print(json.dumps(nice_map, indent=4, sort_keys=True)) return map
python
def _disambiguate_doc(self, tagged_tokens): """ Takes a list of tagged tokens, representing a document, in the form: [(token, tag), ...] And returns a mapping of terms to their disambiguated concepts (synsets). """ # Group tokens by PoS pos_groups = {pos: [] for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]} for tok, tag in tagged_tokens: if tag in pos_groups: pos_groups[tag].append(tok) #print(pos_groups) # Map of final term -> concept mappings map = {} for tag, toks in pos_groups.items(): map.update(self._disambiguate_pos(toks, tag)) #nice_map = {k: map[k].lemma_names() for k in map.keys()} #print(json.dumps(nice_map, indent=4, sort_keys=True)) return map
[ "def", "_disambiguate_doc", "(", "self", ",", "tagged_tokens", ")", ":", "# Group tokens by PoS", "pos_groups", "=", "{", "pos", ":", "[", "]", "for", "pos", "in", "[", "wn", ".", "NOUN", ",", "wn", ".", "VERB", ",", "wn", ".", "ADJ", ",", "wn", ".",...
Takes a list of tagged tokens, representing a document, in the form: [(token, tag), ...] And returns a mapping of terms to their disambiguated concepts (synsets).
[ "Takes", "a", "list", "of", "tagged", "tokens", "representing", "a", "document", "in", "the", "form", ":" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L104-L130
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._disambiguate_pos
def _disambiguate_pos(self, terms, pos): """ Disambiguates a list of tokens of a given PoS. """ # Map the terms to candidate concepts # Consider only the top 3 most common senses candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms} # Filter to unique concepts concepts = set(c for cons in candidate_map.values() for c in cons) # Back to list for consistent ordering concepts = list(concepts) sim_mat = self._similarity_matrix(concepts) # Final map of terms to their disambiguated concepts map = {} # This is terrible # For each term, select the candidate concept # which has the maximum aggregate similarity score against # all other candidate concepts of all other terms sharing the same PoS for term, cons in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if not cons: continue scores = [] for con in cons: i = concepts.index(con) scores_ = [] for term_, cons_ in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if term == term_ or not cons_: continue cons_idx = [concepts.index(c) for c in cons_] top_sim = max(sim_mat[i,cons_idx]) scores_.append(top_sim) scores.append(sum(scores_)) best_idx = np.argmax(scores) map[term] = cons[best_idx] return map
python
def _disambiguate_pos(self, terms, pos): """ Disambiguates a list of tokens of a given PoS. """ # Map the terms to candidate concepts # Consider only the top 3 most common senses candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms} # Filter to unique concepts concepts = set(c for cons in candidate_map.values() for c in cons) # Back to list for consistent ordering concepts = list(concepts) sim_mat = self._similarity_matrix(concepts) # Final map of terms to their disambiguated concepts map = {} # This is terrible # For each term, select the candidate concept # which has the maximum aggregate similarity score against # all other candidate concepts of all other terms sharing the same PoS for term, cons in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if not cons: continue scores = [] for con in cons: i = concepts.index(con) scores_ = [] for term_, cons_ in candidate_map.items(): # Some words may not be in WordNet # and thus have no candidate concepts, so skip if term == term_ or not cons_: continue cons_idx = [concepts.index(c) for c in cons_] top_sim = max(sim_mat[i,cons_idx]) scores_.append(top_sim) scores.append(sum(scores_)) best_idx = np.argmax(scores) map[term] = cons[best_idx] return map
[ "def", "_disambiguate_pos", "(", "self", ",", "terms", ",", "pos", ")", ":", "# Map the terms to candidate concepts", "# Consider only the top 3 most common senses", "candidate_map", "=", "{", "term", ":", "wn", ".", "synsets", "(", "term", ",", "pos", "=", "pos", ...
Disambiguates a list of tokens of a given PoS.
[ "Disambiguates", "a", "list", "of", "tokens", "of", "a", "given", "PoS", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L133-L176
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._similarity_matrix
def _similarity_matrix(self, concepts): """ Computes a semantic similarity matrix for a set of concepts. """ n_cons = len(concepts) sim_mat = np.zeros((n_cons, n_cons)) for i, c1 in enumerate(concepts): for j, c2 in enumerate(concepts): # Just build the lower triangle if i >= j: sim_mat[i,j] = self._semsim(c1, c2) if i != j else 1. return sim_mat + sim_mat.T - np.diag(sim_mat.diagonal())
python
def _similarity_matrix(self, concepts): """ Computes a semantic similarity matrix for a set of concepts. """ n_cons = len(concepts) sim_mat = np.zeros((n_cons, n_cons)) for i, c1 in enumerate(concepts): for j, c2 in enumerate(concepts): # Just build the lower triangle if i >= j: sim_mat[i,j] = self._semsim(c1, c2) if i != j else 1. return sim_mat + sim_mat.T - np.diag(sim_mat.diagonal())
[ "def", "_similarity_matrix", "(", "self", ",", "concepts", ")", ":", "n_cons", "=", "len", "(", "concepts", ")", "sim_mat", "=", "np", ".", "zeros", "(", "(", "n_cons", ",", "n_cons", ")", ")", "for", "i", ",", "c1", "in", "enumerate", "(", "concepts...
Computes a semantic similarity matrix for a set of concepts.
[ "Computes", "a", "semantic", "similarity", "matrix", "for", "a", "set", "of", "concepts", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L179-L190
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._semsim
def _semsim(self, c1, c2): """ Computes the semantic similarity between two concepts. The semantic similarity is a combination of two sem sims: 1. An "explicit" sem sim metric, that is, one which is directly encoded in the WordNet graph. Here it is just Wu-Palmer similarity. 2. An "implicit" sem sim metric. See `_imp_semsim`. Note we can't use the NLTK Wu-Palmer similarity implementation because we need to incorporate the implicit sem sim, but it's fairly straightforward -- leaning on <http://www.nltk.org/_modules/nltk/corpus/reader/wordnet.html#Synset.wup_similarity>, see that for more info. Though...the formula in the paper includes an extra term in the denominator, which is wrong, so we leave it out. """ if c1 == c2: return 1. if (c1, c2) in self.concept_sims: return self.concept_sims[(c1, c2)] elif (c2, c1) in self.concept_sims: return self.concept_sims[(c2, c1)] else: need_root = c1._needs_root() subsumers = c1.lowest_common_hypernyms(c2, simulate_root=need_root) if not subsumers: # For relationships not in WordNet, fallback on just implicit semsim. return self._imp_semsim(c1, c2) subsumer = subsumers[0] depth = subsumer.max_depth() + 1 len1 = c1.shortest_path_distance(subsumer, simulate_root=need_root) len2 = c2.shortest_path_distance(subsumer, simulate_root=need_root) if len1 is None or len2 is None: # See above return self._imp_semsim(c1, c2) len1 += depth len2 += depth imp_score = self._imp_semsim(c1, c2) sim = (2.*depth + imp_score)/(len1 + len2 + imp_score) self.concept_sims[(c1, c2)] = sim return sim
python
def _semsim(self, c1, c2): """ Computes the semantic similarity between two concepts. The semantic similarity is a combination of two sem sims: 1. An "explicit" sem sim metric, that is, one which is directly encoded in the WordNet graph. Here it is just Wu-Palmer similarity. 2. An "implicit" sem sim metric. See `_imp_semsim`. Note we can't use the NLTK Wu-Palmer similarity implementation because we need to incorporate the implicit sem sim, but it's fairly straightforward -- leaning on <http://www.nltk.org/_modules/nltk/corpus/reader/wordnet.html#Synset.wup_similarity>, see that for more info. Though...the formula in the paper includes an extra term in the denominator, which is wrong, so we leave it out. """ if c1 == c2: return 1. if (c1, c2) in self.concept_sims: return self.concept_sims[(c1, c2)] elif (c2, c1) in self.concept_sims: return self.concept_sims[(c2, c1)] else: need_root = c1._needs_root() subsumers = c1.lowest_common_hypernyms(c2, simulate_root=need_root) if not subsumers: # For relationships not in WordNet, fallback on just implicit semsim. return self._imp_semsim(c1, c2) subsumer = subsumers[0] depth = subsumer.max_depth() + 1 len1 = c1.shortest_path_distance(subsumer, simulate_root=need_root) len2 = c2.shortest_path_distance(subsumer, simulate_root=need_root) if len1 is None or len2 is None: # See above return self._imp_semsim(c1, c2) len1 += depth len2 += depth imp_score = self._imp_semsim(c1, c2) sim = (2.*depth + imp_score)/(len1 + len2 + imp_score) self.concept_sims[(c1, c2)] = sim return sim
[ "def", "_semsim", "(", "self", ",", "c1", ",", "c2", ")", ":", "if", "c1", "==", "c2", ":", "return", "1.", "if", "(", "c1", ",", "c2", ")", "in", "self", ".", "concept_sims", ":", "return", "self", ".", "concept_sims", "[", "(", "c1", ",", "c2...
Computes the semantic similarity between two concepts. The semantic similarity is a combination of two sem sims: 1. An "explicit" sem sim metric, that is, one which is directly encoded in the WordNet graph. Here it is just Wu-Palmer similarity. 2. An "implicit" sem sim metric. See `_imp_semsim`. Note we can't use the NLTK Wu-Palmer similarity implementation because we need to incorporate the implicit sem sim, but it's fairly straightforward -- leaning on <http://www.nltk.org/_modules/nltk/corpus/reader/wordnet.html#Synset.wup_similarity>, see that for more info. Though...the formula in the paper includes an extra term in the denominator, which is wrong, so we leave it out.
[ "Computes", "the", "semantic", "similarity", "between", "two", "concepts", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L193-L243
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._imp_semsim
def _imp_semsim(self, c1, c2): """ The paper's implicit semantic similarity metric involves iteratively computing string overlaps; this is a modification where we instead use inverse Sift4 distance (a fast approximation of Levenshtein distance). Frankly ~ I don't know if this is an appropriate substitute, so I'll have to play around with this and see. """ desc1 = self._description(c1) desc2 = self._description(c2) raw_sim = 1/(sift4(desc1, desc2) + 1) return math.log(raw_sim + 1)
python
def _imp_semsim(self, c1, c2): """ The paper's implicit semantic similarity metric involves iteratively computing string overlaps; this is a modification where we instead use inverse Sift4 distance (a fast approximation of Levenshtein distance). Frankly ~ I don't know if this is an appropriate substitute, so I'll have to play around with this and see. """ desc1 = self._description(c1) desc2 = self._description(c2) raw_sim = 1/(sift4(desc1, desc2) + 1) return math.log(raw_sim + 1)
[ "def", "_imp_semsim", "(", "self", ",", "c1", ",", "c2", ")", ":", "desc1", "=", "self", ".", "_description", "(", "c1", ")", "desc2", "=", "self", ".", "_description", "(", "c2", ")", "raw_sim", "=", "1", "/", "(", "sift4", "(", "desc1", ",", "d...
The paper's implicit semantic similarity metric involves iteratively computing string overlaps; this is a modification where we instead use inverse Sift4 distance (a fast approximation of Levenshtein distance). Frankly ~ I don't know if this is an appropriate substitute, so I'll have to play around with this and see.
[ "The", "paper", "s", "implicit", "semantic", "similarity", "metric", "involves", "iteratively", "computing", "string", "overlaps", ";", "this", "is", "a", "modification", "where", "we", "instead", "use", "inverse", "Sift4", "distance", "(", "a", "fast", "approxi...
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L246-L261
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._core_semantics
def _core_semantics(self, lex_chains, concept_weights): """ Returns the n representative lexical chains for a document. """ chain_scores = [self._score_chain(lex_chain, adj_submat, concept_weights) for lex_chain, adj_submat in lex_chains] scored_chains = zip(lex_chains, chain_scores) scored_chains = sorted(scored_chains, key=lambda x: x[1], reverse=True) thresh = (self.alpha/len(lex_chains)) * sum(chain_scores) return [chain for (chain, adj_mat), score in scored_chains if score >= thresh][:self.n_chains]
python
def _core_semantics(self, lex_chains, concept_weights): """ Returns the n representative lexical chains for a document. """ chain_scores = [self._score_chain(lex_chain, adj_submat, concept_weights) for lex_chain, adj_submat in lex_chains] scored_chains = zip(lex_chains, chain_scores) scored_chains = sorted(scored_chains, key=lambda x: x[1], reverse=True) thresh = (self.alpha/len(lex_chains)) * sum(chain_scores) return [chain for (chain, adj_mat), score in scored_chains if score >= thresh][:self.n_chains]
[ "def", "_core_semantics", "(", "self", ",", "lex_chains", ",", "concept_weights", ")", ":", "chain_scores", "=", "[", "self", ".", "_score_chain", "(", "lex_chain", ",", "adj_submat", ",", "concept_weights", ")", "for", "lex_chain", ",", "adj_submat", "in", "l...
Returns the n representative lexical chains for a document.
[ "Returns", "the", "n", "representative", "lexical", "chains", "for", "a", "document", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L264-L273
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._extract_core_semantics
def _extract_core_semantics(self, docs): """ Extracts core semantics for a list of documents, returning them along with a list of all the concepts represented. """ all_concepts = [] doc_core_sems = [] for doc in docs: core_sems = self._process_doc(doc) doc_core_sems.append(core_sems) all_concepts += [con for con, weight in core_sems] return doc_core_sems, list(set(all_concepts))
python
def _extract_core_semantics(self, docs): """ Extracts core semantics for a list of documents, returning them along with a list of all the concepts represented. """ all_concepts = [] doc_core_sems = [] for doc in docs: core_sems = self._process_doc(doc) doc_core_sems.append(core_sems) all_concepts += [con for con, weight in core_sems] return doc_core_sems, list(set(all_concepts))
[ "def", "_extract_core_semantics", "(", "self", ",", "docs", ")", ":", "all_concepts", "=", "[", "]", "doc_core_sems", "=", "[", "]", "for", "doc", "in", "docs", ":", "core_sems", "=", "self", ".", "_process_doc", "(", "doc", ")", "doc_core_sems", ".", "a...
Extracts core semantics for a list of documents, returning them along with a list of all the concepts represented.
[ "Extracts", "core", "semantics", "for", "a", "list", "of", "documents", "returning", "them", "along", "with", "a", "list", "of", "all", "the", "concepts", "represented", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L276-L287
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._lexical_chains
def _lexical_chains(self, doc, term_concept_map): """ Builds lexical chains, as an adjacency matrix, using a disambiguated term-concept map. """ concepts = list({c for c in term_concept_map.values()}) # Build an adjacency matrix for the graph # Using the encoding: # 1 = identity/synonymy, 2 = hypernymy/hyponymy, 3 = meronymy, 0 = no edge n_cons = len(concepts) adj_mat = np.zeros((n_cons, n_cons)) for i, c in enumerate(concepts): # TO DO can only do i >= j since the graph is undirected for j, c_ in enumerate(concepts): edge = 0 if c == c_: edge = 1 # TO DO when should simulate root be True? elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys(): edge = 2 elif c in c_._shortest_hypernym_paths(simulate_root=False).keys(): edge = 2 elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms(): edge = 3 elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms(): edge = 3 adj_mat[i,j] = edge # Group connected concepts by labels concept_labels = connected_components(adj_mat, directed=False)[1] lexical_chains = [([], []) for i in range(max(concept_labels) + 1)] for i, concept in enumerate(concepts): label = concept_labels[i] lexical_chains[label][0].append(concept) lexical_chains[label][1].append(i) # Return the lexical chains as (concept list, adjacency sub-matrix) tuples return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains]
python
def _lexical_chains(self, doc, term_concept_map): """ Builds lexical chains, as an adjacency matrix, using a disambiguated term-concept map. """ concepts = list({c for c in term_concept_map.values()}) # Build an adjacency matrix for the graph # Using the encoding: # 1 = identity/synonymy, 2 = hypernymy/hyponymy, 3 = meronymy, 0 = no edge n_cons = len(concepts) adj_mat = np.zeros((n_cons, n_cons)) for i, c in enumerate(concepts): # TO DO can only do i >= j since the graph is undirected for j, c_ in enumerate(concepts): edge = 0 if c == c_: edge = 1 # TO DO when should simulate root be True? elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys(): edge = 2 elif c in c_._shortest_hypernym_paths(simulate_root=False).keys(): edge = 2 elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms(): edge = 3 elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms(): edge = 3 adj_mat[i,j] = edge # Group connected concepts by labels concept_labels = connected_components(adj_mat, directed=False)[1] lexical_chains = [([], []) for i in range(max(concept_labels) + 1)] for i, concept in enumerate(concepts): label = concept_labels[i] lexical_chains[label][0].append(concept) lexical_chains[label][1].append(i) # Return the lexical chains as (concept list, adjacency sub-matrix) tuples return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains]
[ "def", "_lexical_chains", "(", "self", ",", "doc", ",", "term_concept_map", ")", ":", "concepts", "=", "list", "(", "{", "c", "for", "c", "in", "term_concept_map", ".", "values", "(", ")", "}", ")", "# Build an adjacency matrix for the graph", "# Using the encod...
Builds lexical chains, as an adjacency matrix, using a disambiguated term-concept map.
[ "Builds", "lexical", "chains", "as", "an", "adjacency", "matrix", "using", "a", "disambiguated", "term", "-", "concept", "map", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L290-L330
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._score_chain
def _score_chain(self, lexical_chain, adj_submat, concept_weights): """ Computes the score for a lexical chain. """ scores = [] # Compute scores for concepts in the chain for i, c in enumerate(lexical_chain): score = concept_weights[c] * self.relation_weights[0] rel_scores = [] for j, c_ in enumerate(lexical_chain): if adj_submat[i,j] == 2: rel_scores.append(self.relation_weights[1] * concept_weights[c_]) elif adj_submat[i,j] == 3: rel_scores.append(self.relation_weights[2] * concept_weights[c_]) scores.append(score + sum(rel_scores)) # The chain's score is just the sum of its concepts' scores return sum(scores)
python
def _score_chain(self, lexical_chain, adj_submat, concept_weights): """ Computes the score for a lexical chain. """ scores = [] # Compute scores for concepts in the chain for i, c in enumerate(lexical_chain): score = concept_weights[c] * self.relation_weights[0] rel_scores = [] for j, c_ in enumerate(lexical_chain): if adj_submat[i,j] == 2: rel_scores.append(self.relation_weights[1] * concept_weights[c_]) elif adj_submat[i,j] == 3: rel_scores.append(self.relation_weights[2] * concept_weights[c_]) scores.append(score + sum(rel_scores)) # The chain's score is just the sum of its concepts' scores return sum(scores)
[ "def", "_score_chain", "(", "self", ",", "lexical_chain", ",", "adj_submat", ",", "concept_weights", ")", ":", "scores", "=", "[", "]", "# Compute scores for concepts in the chain", "for", "i", ",", "c", "in", "enumerate", "(", "lexical_chain", ")", ":", "score"...
Computes the score for a lexical chain.
[ "Computes", "the", "score", "for", "a", "lexical", "chain", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L333-L353
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._weight_concepts
def _weight_concepts(self, tokens, term_concept_map): """ Calculates weights for concepts in a document. This is just the frequency of terms which map to a concept. """ weights = {c: 0 for c in term_concept_map.values()} for t in tokens: # Skip terms that aren't one of the PoS we used if t not in term_concept_map: continue con = term_concept_map[t] weights[con] += 1 # TO DO paper doesn't mention normalizing these weights...should we? return weights
python
def _weight_concepts(self, tokens, term_concept_map): """ Calculates weights for concepts in a document. This is just the frequency of terms which map to a concept. """ weights = {c: 0 for c in term_concept_map.values()} for t in tokens: # Skip terms that aren't one of the PoS we used if t not in term_concept_map: continue con = term_concept_map[t] weights[con] += 1 # TO DO paper doesn't mention normalizing these weights...should we? return weights
[ "def", "_weight_concepts", "(", "self", ",", "tokens", ",", "term_concept_map", ")", ":", "weights", "=", "{", "c", ":", "0", "for", "c", "in", "term_concept_map", ".", "values", "(", ")", "}", "for", "t", "in", "tokens", ":", "# Skip terms that aren't one...
Calculates weights for concepts in a document. This is just the frequency of terms which map to a concept.
[ "Calculates", "weights", "for", "concepts", "in", "a", "document", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L356-L372
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._description
def _description(self, concept): """ Returns a "description" of a concept, as defined in the paper. The paper describes the description as a string, so this is a slight modification where we instead represent the definition as a list of tokens. """ if concept not in self.descriptions: lemmas = concept.lemma_names() gloss = self._gloss(concept) glosses = [self._gloss(rel) for rel in self._related(concept)] raw_desc = ' '.join(lemmas + [gloss] + glosses) desc = [w for w in raw_desc.split() if w not in stops] self.descriptions[concept] = desc return self.descriptions[concept]
python
def _description(self, concept): """ Returns a "description" of a concept, as defined in the paper. The paper describes the description as a string, so this is a slight modification where we instead represent the definition as a list of tokens. """ if concept not in self.descriptions: lemmas = concept.lemma_names() gloss = self._gloss(concept) glosses = [self._gloss(rel) for rel in self._related(concept)] raw_desc = ' '.join(lemmas + [gloss] + glosses) desc = [w for w in raw_desc.split() if w not in stops] self.descriptions[concept] = desc return self.descriptions[concept]
[ "def", "_description", "(", "self", ",", "concept", ")", ":", "if", "concept", "not", "in", "self", ".", "descriptions", ":", "lemmas", "=", "concept", ".", "lemma_names", "(", ")", "gloss", "=", "self", ".", "_gloss", "(", "concept", ")", "glosses", "...
Returns a "description" of a concept, as defined in the paper. The paper describes the description as a string, so this is a slight modification where we instead represent the definition as a list of tokens.
[ "Returns", "a", "description", "of", "a", "concept", "as", "defined", "in", "the", "paper", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L375-L391
frnsys/broca
broca/vectorize/dcs.py
DCSVectorizer._related
def _related(self, concept): """ Returns related concepts for a concept. """ return concept.hypernyms() + \ concept.hyponyms() + \ concept.member_meronyms() + \ concept.substance_meronyms() + \ concept.part_meronyms() + \ concept.member_holonyms() + \ concept.substance_holonyms() + \ concept.part_holonyms() + \ concept.attributes() + \ concept.also_sees() + \ concept.similar_tos()
python
def _related(self, concept): """ Returns related concepts for a concept. """ return concept.hypernyms() + \ concept.hyponyms() + \ concept.member_meronyms() + \ concept.substance_meronyms() + \ concept.part_meronyms() + \ concept.member_holonyms() + \ concept.substance_holonyms() + \ concept.part_holonyms() + \ concept.attributes() + \ concept.also_sees() + \ concept.similar_tos()
[ "def", "_related", "(", "self", ",", "concept", ")", ":", "return", "concept", ".", "hypernyms", "(", ")", "+", "concept", ".", "hyponyms", "(", ")", "+", "concept", ".", "member_meronyms", "(", ")", "+", "concept", ".", "substance_meronyms", "(", ")", ...
Returns related concepts for a concept.
[ "Returns", "related", "concepts", "for", "a", "concept", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/dcs.py#L401-L415
Frojd/Fabrik
fabrik/api.py
init_tasks
def init_tasks(): """ Performs basic setup before any of the tasks are run. All tasks needs to run this before continuing. It only fires once. """ # Make sure exist are set if "exists" not in env: env.exists = exists if "run" not in env: env.run = run if "cd" not in env: env.cd = cd if "max_releases" not in env: env.max_releases = 5 if "public_path" in env: public_path = env.public_path.rstrip("/") env.public_path = public_path run_hook("init_tasks")
python
def init_tasks(): """ Performs basic setup before any of the tasks are run. All tasks needs to run this before continuing. It only fires once. """ # Make sure exist are set if "exists" not in env: env.exists = exists if "run" not in env: env.run = run if "cd" not in env: env.cd = cd if "max_releases" not in env: env.max_releases = 5 if "public_path" in env: public_path = env.public_path.rstrip("/") env.public_path = public_path run_hook("init_tasks")
[ "def", "init_tasks", "(", ")", ":", "# Make sure exist are set", "if", "\"exists\"", "not", "in", "env", ":", "env", ".", "exists", "=", "exists", "if", "\"run\"", "not", "in", "env", ":", "env", ".", "run", "=", "run", "if", "\"cd\"", "not", "in", "en...
Performs basic setup before any of the tasks are run. All tasks needs to run this before continuing. It only fires once.
[ "Performs", "basic", "setup", "before", "any", "of", "the", "tasks", "are", "run", ".", "All", "tasks", "needs", "to", "run", "this", "before", "continuing", ".", "It", "only", "fires", "once", "." ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/api.py#L37-L60
Frojd/Fabrik
fabrik/api.py
setup
def setup(): """ Creates shared and upload directory then fires setup to recipes. """ init_tasks() run_hook("before_setup") # Create shared folder env.run("mkdir -p %s" % (paths.get_shared_path())) env.run("chmod 755 %s" % (paths.get_shared_path())) # Create backup folder env.run("mkdir -p %s" % (paths.get_backup_path())) env.run("chmod 750 %s" % (paths.get_backup_path())) # Create uploads folder env.run("mkdir -p %s" % (paths.get_upload_path())) env.run("chmod 775 %s" % (paths.get_upload_path())) run_hook("setup") run_hook("after_setup")
python
def setup(): """ Creates shared and upload directory then fires setup to recipes. """ init_tasks() run_hook("before_setup") # Create shared folder env.run("mkdir -p %s" % (paths.get_shared_path())) env.run("chmod 755 %s" % (paths.get_shared_path())) # Create backup folder env.run("mkdir -p %s" % (paths.get_backup_path())) env.run("chmod 750 %s" % (paths.get_backup_path())) # Create uploads folder env.run("mkdir -p %s" % (paths.get_upload_path())) env.run("chmod 775 %s" % (paths.get_upload_path())) run_hook("setup") run_hook("after_setup")
[ "def", "setup", "(", ")", ":", "init_tasks", "(", ")", "run_hook", "(", "\"before_setup\"", ")", "# Create shared folder", "env", ".", "run", "(", "\"mkdir -p %s\"", "%", "(", "paths", ".", "get_shared_path", "(", ")", ")", ")", "env", ".", "run", "(", "...
Creates shared and upload directory then fires setup to recipes.
[ "Creates", "shared", "and", "upload", "directory", "then", "fires", "setup", "to", "recipes", "." ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/api.py#L64-L86
Frojd/Fabrik
fabrik/api.py
deploy
def deploy(): """ Performs a deploy by invoking copy, then generating next release name and invoking necessary hooks. """ init_tasks() if not has_hook("copy"): return report("No copy method has been defined") if not env.exists(paths.get_shared_path()): return report("You need to run setup before running deploy") run_hook("before_deploy") release_name = int(time.time()*1000) release_path = paths.get_releases_path(release_name) env.current_release = release_path try: run_hook("copy") except Exception as e: return report("Error occurred on copy. Aborting deploy", err=e) if not env.exists(paths.get_source_path(release_name)): return report("Source path not found '%s'" % paths.get_source_path(release_name)) try: run_hook("deploy") except Exception as e: message = "Error occurred on deploy, starting rollback..." logger.error(message) logger.error(e) run_task("rollback") return report("Error occurred on deploy") # Symlink current folder paths.symlink(paths.get_source_path(release_name), paths.get_current_path()) # Clean older releases if "max_releases" in env: cleanup_releases(int(env.max_releases)) run_hook("after_deploy") if "public_path" in env: paths.symlink(paths.get_source_path(release_name), env.public_path) logger.info("Deploy complete")
python
def deploy(): """ Performs a deploy by invoking copy, then generating next release name and invoking necessary hooks. """ init_tasks() if not has_hook("copy"): return report("No copy method has been defined") if not env.exists(paths.get_shared_path()): return report("You need to run setup before running deploy") run_hook("before_deploy") release_name = int(time.time()*1000) release_path = paths.get_releases_path(release_name) env.current_release = release_path try: run_hook("copy") except Exception as e: return report("Error occurred on copy. Aborting deploy", err=e) if not env.exists(paths.get_source_path(release_name)): return report("Source path not found '%s'" % paths.get_source_path(release_name)) try: run_hook("deploy") except Exception as e: message = "Error occurred on deploy, starting rollback..." logger.error(message) logger.error(e) run_task("rollback") return report("Error occurred on deploy") # Symlink current folder paths.symlink(paths.get_source_path(release_name), paths.get_current_path()) # Clean older releases if "max_releases" in env: cleanup_releases(int(env.max_releases)) run_hook("after_deploy") if "public_path" in env: paths.symlink(paths.get_source_path(release_name), env.public_path) logger.info("Deploy complete")
[ "def", "deploy", "(", ")", ":", "init_tasks", "(", ")", "if", "not", "has_hook", "(", "\"copy\"", ")", ":", "return", "report", "(", "\"No copy method has been defined\"", ")", "if", "not", "env", ".", "exists", "(", "paths", ".", "get_shared_path", "(", "...
Performs a deploy by invoking copy, then generating next release name and invoking necessary hooks.
[ "Performs", "a", "deploy", "by", "invoking", "copy", "then", "generating", "next", "release", "name", "and", "invoking", "necessary", "hooks", "." ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/api.py#L90-L144
Frojd/Fabrik
fabrik/api.py
rollback
def rollback(): """ Rolls back to previous release """ init_tasks() run_hook("before_rollback") # Remove current version current_release = paths.get_current_release_path() if current_release: env.run("rm -rf %s" % current_release) # Restore previous version old_release = paths.get_current_release_name() if old_release: paths.symlink(paths.get_source_path(old_release), paths.get_current_path()) run_hook("rollback") run_hook("after_rollback") logger.info("Rollback complete")
python
def rollback(): """ Rolls back to previous release """ init_tasks() run_hook("before_rollback") # Remove current version current_release = paths.get_current_release_path() if current_release: env.run("rm -rf %s" % current_release) # Restore previous version old_release = paths.get_current_release_name() if old_release: paths.symlink(paths.get_source_path(old_release), paths.get_current_path()) run_hook("rollback") run_hook("after_rollback") logger.info("Rollback complete")
[ "def", "rollback", "(", ")", ":", "init_tasks", "(", ")", "run_hook", "(", "\"before_rollback\"", ")", "# Remove current version", "current_release", "=", "paths", ".", "get_current_release_path", "(", ")", "if", "current_release", ":", "env", ".", "run", "(", "...
Rolls back to previous release
[ "Rolls", "back", "to", "previous", "release" ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/api.py#L148-L171
Frojd/Fabrik
fabrik/api.py
cleanup_releases
def cleanup_releases(limit=5): """ Removes older releases. """ init_tasks() max_versions = limit + 1 env.run("ls -dt %s/*/ | tail -n +%s | xargs rm -rf" % ( paths.get_releases_path(), max_versions) )
python
def cleanup_releases(limit=5): """ Removes older releases. """ init_tasks() max_versions = limit + 1 env.run("ls -dt %s/*/ | tail -n +%s | xargs rm -rf" % ( paths.get_releases_path(), max_versions) )
[ "def", "cleanup_releases", "(", "limit", "=", "5", ")", ":", "init_tasks", "(", ")", "max_versions", "=", "limit", "+", "1", "env", ".", "run", "(", "\"ls -dt %s/*/ | tail -n +%s | xargs rm -rf\"", "%", "(", "paths", ".", "get_releases_path", "(", ")", ",", ...
Removes older releases.
[ "Removes", "older", "releases", "." ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/api.py#L175-L187
darkfeline/animanager
animanager/sqlite/cachedview.py
CachedView.from_db
def from_db(cls, db, force=False): """Make instance from database. For performance, this caches the episode types for the database. The `force` parameter can be used to bypass this. """ if force or db not in cls._cache: cls._cache[db] = cls._new_from_db(db) return cls._cache[db]
python
def from_db(cls, db, force=False): """Make instance from database. For performance, this caches the episode types for the database. The `force` parameter can be used to bypass this. """ if force or db not in cls._cache: cls._cache[db] = cls._new_from_db(db) return cls._cache[db]
[ "def", "from_db", "(", "cls", ",", "db", ",", "force", "=", "False", ")", ":", "if", "force", "or", "db", "not", "in", "cls", ".", "_cache", ":", "cls", ".", "_cache", "[", "db", "]", "=", "cls", ".", "_new_from_db", "(", "db", ")", "return", "...
Make instance from database. For performance, this caches the episode types for the database. The `force` parameter can be used to bypass this.
[ "Make", "instance", "from", "database", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/sqlite/cachedview.py#L61-L70
ihgazni2/edict
check.py
pipe_shell_cmds
def pipe_shell_cmds(shell_CMDs): ''' shell_CMDs = {} shell_CMDs[1] = 'netstat -n' shell_CMDs[2] = "awk {'print $6'}" ''' len = shell_CMDs.__len__() p = {} p[1] = subprocess.Popen(shlex.split(shell_CMDs[1]), stdout=subprocess.PIPE,stderr=subprocess.PIPE) for i in range(2,len): p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE) if(len > 1): p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE) result = p[len].communicate() if(len > 1): for i in range(2,len+1): returncode = p[i].wait() else: returncode = p[len].wait() return(result)
python
def pipe_shell_cmds(shell_CMDs): ''' shell_CMDs = {} shell_CMDs[1] = 'netstat -n' shell_CMDs[2] = "awk {'print $6'}" ''' len = shell_CMDs.__len__() p = {} p[1] = subprocess.Popen(shlex.split(shell_CMDs[1]), stdout=subprocess.PIPE,stderr=subprocess.PIPE) for i in range(2,len): p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE) if(len > 1): p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE) result = p[len].communicate() if(len > 1): for i in range(2,len+1): returncode = p[i].wait() else: returncode = p[len].wait() return(result)
[ "def", "pipe_shell_cmds", "(", "shell_CMDs", ")", ":", "len", "=", "shell_CMDs", ".", "__len__", "(", ")", "p", "=", "{", "}", "p", "[", "1", "]", "=", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "shell_CMDs", "[", "1", "]", ")", ...
shell_CMDs = {} shell_CMDs[1] = 'netstat -n' shell_CMDs[2] = "awk {'print $6'}"
[ "shell_CMDs", "=", "{}", "shell_CMDs", "[", "1", "]", "=", "netstat", "-", "n", "shell_CMDs", "[", "2", "]", "=", "awk", "{", "print", "$6", "}" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/check.py#L7-L26
frnsys/broca
broca/tokenize/util.py
prune
def prune(tdocs): """ Prune terms which are totally subsumed by a phrase This could be better if it just removes the individual keywords that occur in a phrase for each time that phrase occurs. """ all_terms = set([t for toks in tdocs for t in toks]) terms = set() phrases = set() for t in all_terms: if gram_size(t) > 1: phrases.add(t) else: terms.add(t) # Identify candidates for redundant terms (1-gram terms found in a phrase) redundant = set() for t in terms: if any(t in ph for ph in phrases): redundant.add(t) # Search all documents to check that these terms occur # only in a phrase. If not, remove it as a candidate. # This could be more efficient cleared = set() for t in redundant: if any(check_term(d, term=t) for d in tdocs): cleared.add(t) redundant = redundant.difference(cleared) pruned_tdocs = [] for doc in tdocs: pruned_tdocs.append([t for t in doc if t not in redundant]) return pruned_tdocs
python
def prune(tdocs): """ Prune terms which are totally subsumed by a phrase This could be better if it just removes the individual keywords that occur in a phrase for each time that phrase occurs. """ all_terms = set([t for toks in tdocs for t in toks]) terms = set() phrases = set() for t in all_terms: if gram_size(t) > 1: phrases.add(t) else: terms.add(t) # Identify candidates for redundant terms (1-gram terms found in a phrase) redundant = set() for t in terms: if any(t in ph for ph in phrases): redundant.add(t) # Search all documents to check that these terms occur # only in a phrase. If not, remove it as a candidate. # This could be more efficient cleared = set() for t in redundant: if any(check_term(d, term=t) for d in tdocs): cleared.add(t) redundant = redundant.difference(cleared) pruned_tdocs = [] for doc in tdocs: pruned_tdocs.append([t for t in doc if t not in redundant]) return pruned_tdocs
[ "def", "prune", "(", "tdocs", ")", ":", "all_terms", "=", "set", "(", "[", "t", "for", "toks", "in", "tdocs", "for", "t", "in", "toks", "]", ")", "terms", "=", "set", "(", ")", "phrases", "=", "set", "(", ")", "for", "t", "in", "all_terms", ":"...
Prune terms which are totally subsumed by a phrase This could be better if it just removes the individual keywords that occur in a phrase for each time that phrase occurs.
[ "Prune", "terms", "which", "are", "totally", "subsumed", "by", "a", "phrase" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/util.py#L4-L40
nickhand/classylss
setup.py
build_CLASS
def build_CLASS(prefix): """ Function to dowwnload CLASS from github and and build the library """ # latest class version and download link args = (package_basedir, package_basedir, CLASS_VERSION, os.path.abspath(prefix)) command = 'sh %s/depends/install_class.sh %s %s %s' %args ret = os.system(command) if ret != 0: raise ValueError("could not build CLASS v%s" %CLASS_VERSION)
python
def build_CLASS(prefix): """ Function to dowwnload CLASS from github and and build the library """ # latest class version and download link args = (package_basedir, package_basedir, CLASS_VERSION, os.path.abspath(prefix)) command = 'sh %s/depends/install_class.sh %s %s %s' %args ret = os.system(command) if ret != 0: raise ValueError("could not build CLASS v%s" %CLASS_VERSION)
[ "def", "build_CLASS", "(", "prefix", ")", ":", "# latest class version and download link", "args", "=", "(", "package_basedir", ",", "package_basedir", ",", "CLASS_VERSION", ",", "os", ".", "path", ".", "abspath", "(", "prefix", ")", ")", "command", "=", "'sh %s...
Function to dowwnload CLASS from github and and build the library
[ "Function", "to", "dowwnload", "CLASS", "from", "github", "and", "and", "build", "the", "library" ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/setup.py#L32-L42
SylvanasSun/FishFishJump
fish_searcher/views/search.py
format_number
def format_number(number): """ >>> format_number(1) 1 >>> format_number(22) 22 >>> format_number(333) 333 >>> format_number(4444) '4,444' >>> format_number(55555) '55,555' >>> format_number(666666) '666,666' >>> format_number(7777777) '7,777,777' """ char_list = list(str(number)) length = len(char_list) if length <= 3: return number result = '' if length % 3 != 0: while len(char_list) % 3 != 0: c = char_list[0] result += c char_list.remove(c) result += ',' i = 0 while len(char_list) > 0: c = char_list[0] result += c char_list.remove(c) i += 1 if i % 3 == 0: result += ',' return result[0:-1] if result[-1] == ',' else result
python
def format_number(number): """ >>> format_number(1) 1 >>> format_number(22) 22 >>> format_number(333) 333 >>> format_number(4444) '4,444' >>> format_number(55555) '55,555' >>> format_number(666666) '666,666' >>> format_number(7777777) '7,777,777' """ char_list = list(str(number)) length = len(char_list) if length <= 3: return number result = '' if length % 3 != 0: while len(char_list) % 3 != 0: c = char_list[0] result += c char_list.remove(c) result += ',' i = 0 while len(char_list) > 0: c = char_list[0] result += c char_list.remove(c) i += 1 if i % 3 == 0: result += ',' return result[0:-1] if result[-1] == ',' else result
[ "def", "format_number", "(", "number", ")", ":", "char_list", "=", "list", "(", "str", "(", "number", ")", ")", "length", "=", "len", "(", "char_list", ")", "if", "length", "<=", "3", ":", "return", "number", "result", "=", "''", "if", "length", "%",...
>>> format_number(1) 1 >>> format_number(22) 22 >>> format_number(333) 333 >>> format_number(4444) '4,444' >>> format_number(55555) '55,555' >>> format_number(666666) '666,666' >>> format_number(7777777) '7,777,777'
[ ">>>", "format_number", "(", "1", ")", "1", ">>>", "format_number", "(", "22", ")", "22", ">>>", "format_number", "(", "333", ")", "333", ">>>", "format_number", "(", "4444", ")", "4", "444", ">>>", "format_number", "(", "55555", ")", "55", "555", ">>>...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_searcher/views/search.py#L89-L128
SylvanasSun/FishFishJump
fish_searcher/views/search.py
generate_key
def generate_key(url, page_number): """ >>> url_a = 'http://localhost:5009/search?keywords=a' >>> generate_key(url_a, 10) 'http://localhost:5009/search?keywords=a&page=10' >>> url_b = 'http://localhost:5009/search?keywords=b&page=1' >>> generate_key(url_b, 10) 'http://localhost:5009/search?keywords=b&page=10' """ index = url.rfind('page') if index != -1: result = url[0:index] result += 'page=%s' % page_number else: result = url result += '&page=%s' % page_number return result
python
def generate_key(url, page_number): """ >>> url_a = 'http://localhost:5009/search?keywords=a' >>> generate_key(url_a, 10) 'http://localhost:5009/search?keywords=a&page=10' >>> url_b = 'http://localhost:5009/search?keywords=b&page=1' >>> generate_key(url_b, 10) 'http://localhost:5009/search?keywords=b&page=10' """ index = url.rfind('page') if index != -1: result = url[0:index] result += 'page=%s' % page_number else: result = url result += '&page=%s' % page_number return result
[ "def", "generate_key", "(", "url", ",", "page_number", ")", ":", "index", "=", "url", ".", "rfind", "(", "'page'", ")", "if", "index", "!=", "-", "1", ":", "result", "=", "url", "[", "0", ":", "index", "]", "result", "+=", "'page=%s'", "%", "page_n...
>>> url_a = 'http://localhost:5009/search?keywords=a' >>> generate_key(url_a, 10) 'http://localhost:5009/search?keywords=a&page=10' >>> url_b = 'http://localhost:5009/search?keywords=b&page=1' >>> generate_key(url_b, 10) 'http://localhost:5009/search?keywords=b&page=10'
[ ">>>", "url_a", "=", "http", ":", "//", "localhost", ":", "5009", "/", "search?keywords", "=", "a", ">>>", "generate_key", "(", "url_a", "10", ")", "http", ":", "//", "localhost", ":", "5009", "/", "search?keywords", "=", "a&page", "=", "10", ">>>", "u...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_searcher/views/search.py#L131-L147
SylvanasSun/FishFishJump
fish_searcher/views/search.py
generate_pagination
def generate_pagination(total_page_num, current_page_num): """ >>> PAGE_SIZE = 10 >>> generate_pagination(total_page_num=9, current_page_num=1) {'start': 1, 'end': 9, 'current': 1} >>> generate_pagination(total_page_num=20, current_page_num=12) {'start': 8, 'end': 17, 'current': 12} >>> generate_pagination(total_page_num=20, current_page_num=4) {'start': 1, 'end': 10, 'current': 4} >>> generate_pagination(total_page_num=16, current_page_num=14) {'start': 7, 'end': 16, 'current': 14} """ pagination = {'start': 1, 'end': PAGE_SIZE, 'current': current_page_num} if total_page_num <= PAGE_SIZE: pagination['end'] = total_page_num else: # base on front four and back five pagination['start'] = current_page_num - 4 pagination['end'] = current_page_num + 5 if pagination['start'] < 1: pagination['start'] = 1 pagination['end'] = PAGE_SIZE if pagination['end'] > total_page_num: pagination['end'] = total_page_num pagination['start'] = total_page_num - 9 return pagination
python
def generate_pagination(total_page_num, current_page_num): """ >>> PAGE_SIZE = 10 >>> generate_pagination(total_page_num=9, current_page_num=1) {'start': 1, 'end': 9, 'current': 1} >>> generate_pagination(total_page_num=20, current_page_num=12) {'start': 8, 'end': 17, 'current': 12} >>> generate_pagination(total_page_num=20, current_page_num=4) {'start': 1, 'end': 10, 'current': 4} >>> generate_pagination(total_page_num=16, current_page_num=14) {'start': 7, 'end': 16, 'current': 14} """ pagination = {'start': 1, 'end': PAGE_SIZE, 'current': current_page_num} if total_page_num <= PAGE_SIZE: pagination['end'] = total_page_num else: # base on front four and back five pagination['start'] = current_page_num - 4 pagination['end'] = current_page_num + 5 if pagination['start'] < 1: pagination['start'] = 1 pagination['end'] = PAGE_SIZE if pagination['end'] > total_page_num: pagination['end'] = total_page_num pagination['start'] = total_page_num - 9 return pagination
[ "def", "generate_pagination", "(", "total_page_num", ",", "current_page_num", ")", ":", "pagination", "=", "{", "'start'", ":", "1", ",", "'end'", ":", "PAGE_SIZE", ",", "'current'", ":", "current_page_num", "}", "if", "total_page_num", "<=", "PAGE_SIZE", ":", ...
>>> PAGE_SIZE = 10 >>> generate_pagination(total_page_num=9, current_page_num=1) {'start': 1, 'end': 9, 'current': 1} >>> generate_pagination(total_page_num=20, current_page_num=12) {'start': 8, 'end': 17, 'current': 12} >>> generate_pagination(total_page_num=20, current_page_num=4) {'start': 1, 'end': 10, 'current': 4} >>> generate_pagination(total_page_num=16, current_page_num=14) {'start': 7, 'end': 16, 'current': 14}
[ ">>>", "PAGE_SIZE", "=", "10", ">>>", "generate_pagination", "(", "total_page_num", "=", "9", "current_page_num", "=", "1", ")", "{", "start", ":", "1", "end", ":", "9", "current", ":", "1", "}", ">>>", "generate_pagination", "(", "total_page_num", "=", "2...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_searcher/views/search.py#L204-L233
concordusapps/python-hummus
examples/image_and_text.py
main
def main(filename): """ Creates a PDF by embedding the first page from the given image and writes some text to it. @param[in] filename The source filename of the image to embed. """ # Prepare font. font_family = 'arial' font = Font(font_family, bold=True) if not font: raise RuntimeError('No font found for %r' % font_family) # Initialize PDF document on a stream. with Document('output.pdf') as document: # Initialize a new page and begin its context. with document.Page() as ctx: # Open the image to embed. with Image(filename) as embed: # Set the media box for the page to the same as the # image to embed. ctx.box = embed.box # Embed the image. ctx.embed(embed) # Write some text. ctx.add(Text('Hello World', font, size=14, x=100, y=60))
python
def main(filename): """ Creates a PDF by embedding the first page from the given image and writes some text to it. @param[in] filename The source filename of the image to embed. """ # Prepare font. font_family = 'arial' font = Font(font_family, bold=True) if not font: raise RuntimeError('No font found for %r' % font_family) # Initialize PDF document on a stream. with Document('output.pdf') as document: # Initialize a new page and begin its context. with document.Page() as ctx: # Open the image to embed. with Image(filename) as embed: # Set the media box for the page to the same as the # image to embed. ctx.box = embed.box # Embed the image. ctx.embed(embed) # Write some text. ctx.add(Text('Hello World', font, size=14, x=100, y=60))
[ "def", "main", "(", "filename", ")", ":", "# Prepare font.", "font_family", "=", "'arial'", "font", "=", "Font", "(", "font_family", ",", "bold", "=", "True", ")", "if", "not", "font", ":", "raise", "RuntimeError", "(", "'No font found for %r'", "%", "font_f...
Creates a PDF by embedding the first page from the given image and writes some text to it. @param[in] filename The source filename of the image to embed.
[ "Creates", "a", "PDF", "by", "embedding", "the", "first", "page", "from", "the", "given", "image", "and", "writes", "some", "text", "to", "it", "." ]
train
https://github.com/concordusapps/python-hummus/blob/44ef9e1103c97e623155ca80fd0cf76aaabd9a08/examples/image_and_text.py#L7-L39
apetrynet/pyfilemail
pyfilemail/users.py
User.login
def login(self, password): """Login to filemail as the current user. :param password: :type password: ``str`` """ method, url = get_URL('login') payload = { 'apikey': self.config.get('apikey'), 'username': self.username, 'password': password, 'source': 'Desktop' } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def login(self, password): """Login to filemail as the current user. :param password: :type password: ``str`` """ method, url = get_URL('login') payload = { 'apikey': self.config.get('apikey'), 'username': self.username, 'password': password, 'source': 'Desktop' } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "login", "(", "self", ",", "password", ")", ":", "method", ",", "url", "=", "get_URL", "(", "'login'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'username'", ":", "self", ".", "...
Login to filemail as the current user. :param password: :type password: ``str``
[ "Login", "to", "filemail", "as", "the", "current", "user", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L79-L99
apetrynet/pyfilemail
pyfilemail/users.py
User.logout
def logout(self): """Logout of filemail and closing the session.""" # Check if all transfers are complete before logout self.transfers_complete payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } method, url = get_URL('logout') res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: self.session.cookies['logintoken'] = None return True hellraiser(res)
python
def logout(self): """Logout of filemail and closing the session.""" # Check if all transfers are complete before logout self.transfers_complete payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } method, url = get_URL('logout') res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: self.session.cookies['logintoken'] = None return True hellraiser(res)
[ "def", "logout", "(", "self", ")", ":", "# Check if all transfers are complete before logout", "self", ".", "transfers_complete", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", "."...
Logout of filemail and closing the session.
[ "Logout", "of", "filemail", "and", "closing", "the", "session", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L102-L120
apetrynet/pyfilemail
pyfilemail/users.py
User.transfers_complete
def transfers_complete(self): """Check if all transfers are completed.""" for transfer in self.transfers: if not transfer.is_complete: error = { 'errorcode': 4003, 'errormessage': 'You must complete transfer before logout.' } hellraiser(error)
python
def transfers_complete(self): """Check if all transfers are completed.""" for transfer in self.transfers: if not transfer.is_complete: error = { 'errorcode': 4003, 'errormessage': 'You must complete transfer before logout.' } hellraiser(error)
[ "def", "transfers_complete", "(", "self", ")", ":", "for", "transfer", "in", "self", ".", "transfers", ":", "if", "not", "transfer", ".", "is_complete", ":", "error", "=", "{", "'errorcode'", ":", "4003", ",", "'errormessage'", ":", "'You must complete transfe...
Check if all transfers are completed.
[ "Check", "if", "all", "transfers", "are", "completed", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L123-L132
apetrynet/pyfilemail
pyfilemail/users.py
User.get_sent
def get_sent(self, expired=False, for_all=False): """Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects """ method, url = get_URL('get_sent') payload = { 'apikey': self.session.cookies.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getexpired': expired, 'getforallusers': for_all } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res.json())
python
def get_sent(self, expired=False, for_all=False): """Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects """ method, url = get_URL('get_sent') payload = { 'apikey': self.session.cookies.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getexpired': expired, 'getforallusers': for_all } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res.json())
[ "def", "get_sent", "(", "self", ",", "expired", "=", "False", ",", "for_all", "=", "False", ")", ":", "method", ",", "url", "=", "get_URL", "(", "'get_sent'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "session", ".", "cookies", ".", "ge...
Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects
[ "Retreve", "information", "on", "previously", "sent", "transfers", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L135-L160
apetrynet/pyfilemail
pyfilemail/users.py
User.get_user_info
def get_user_info(self, save_to_config=True): """Get user info and settings from Filemail. :param save_to_config: Whether or not to save settings to config file :type save_to_config: ``bool`` :rtype: ``dict`` containig user information and default settings. """ method, url = get_URL('user_get') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: settings = res.json()['user'] if save_to_config: self.config.update(settings) return settings hellraiser(res)
python
def get_user_info(self, save_to_config=True): """Get user info and settings from Filemail. :param save_to_config: Whether or not to save settings to config file :type save_to_config: ``bool`` :rtype: ``dict`` containig user information and default settings. """ method, url = get_URL('user_get') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: settings = res.json()['user'] if save_to_config: self.config.update(settings) return settings hellraiser(res)
[ "def", "get_user_info", "(", "self", ",", "save_to_config", "=", "True", ")", ":", "method", ",", "url", "=", "get_URL", "(", "'user_get'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'login...
Get user info and settings from Filemail. :param save_to_config: Whether or not to save settings to config file :type save_to_config: ``bool`` :rtype: ``dict`` containig user information and default settings.
[ "Get", "user", "info", "and", "settings", "from", "Filemail", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L163-L188
apetrynet/pyfilemail
pyfilemail/users.py
User.update_user_info
def update_user_info(self, **kwargs): """Update user info and settings. :param \*\*kwargs: settings to be merged with :func:`User.get_configfile` setings and sent to Filemail. :rtype: ``bool`` """ if kwargs: self.config.update(kwargs) method, url = get_URL('user_update') res = getattr(self.session, method)(url, params=self.config) if res.status_code == 200: return True hellraiser(res)
python
def update_user_info(self, **kwargs): """Update user info and settings. :param \*\*kwargs: settings to be merged with :func:`User.get_configfile` setings and sent to Filemail. :rtype: ``bool`` """ if kwargs: self.config.update(kwargs) method, url = get_URL('user_update') res = getattr(self.session, method)(url, params=self.config) if res.status_code == 200: return True hellraiser(res)
[ "def", "update_user_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "self", ".", "config", ".", "update", "(", "kwargs", ")", "method", ",", "url", "=", "get_URL", "(", "'user_update'", ")", "res", "=", "getattr", "(", "se...
Update user info and settings. :param \*\*kwargs: settings to be merged with :func:`User.get_configfile` setings and sent to Filemail. :rtype: ``bool``
[ "Update", "user", "info", "and", "settings", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L191-L209
apetrynet/pyfilemail
pyfilemail/users.py
User.get_received
def get_received(self, age=None, for_all=True): """Retrieve a list of transfers sent to you or your company from other people. :param age: between 1 and 90 days. :param for_all: If ``True`` will return received files for all users in the same business. (Available for business account members only). :type age: ``int`` :type for_all: ``bool`` :rtype: ``list`` of :class:`Transfer` objects. """ method, url = get_URL('received_get') if age: if not isinstance(age, int) or age < 0 or age > 90: raise FMBaseError('Age must be <int> between 0-90') past = datetime.utcnow() - timedelta(days=age) age = timegm(past.utctimetuple()) payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getForAllUsers': for_all, 'from': age } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res)
python
def get_received(self, age=None, for_all=True): """Retrieve a list of transfers sent to you or your company from other people. :param age: between 1 and 90 days. :param for_all: If ``True`` will return received files for all users in the same business. (Available for business account members only). :type age: ``int`` :type for_all: ``bool`` :rtype: ``list`` of :class:`Transfer` objects. """ method, url = get_URL('received_get') if age: if not isinstance(age, int) or age < 0 or age > 90: raise FMBaseError('Age must be <int> between 0-90') past = datetime.utcnow() - timedelta(days=age) age = timegm(past.utctimetuple()) payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getForAllUsers': for_all, 'from': age } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res)
[ "def", "get_received", "(", "self", ",", "age", "=", "None", ",", "for_all", "=", "True", ")", ":", "method", ",", "url", "=", "get_URL", "(", "'received_get'", ")", "if", "age", ":", "if", "not", "isinstance", "(", "age", ",", "int", ")", "or", "a...
Retrieve a list of transfers sent to you or your company from other people. :param age: between 1 and 90 days. :param for_all: If ``True`` will return received files for all users in the same business. (Available for business account members only). :type age: ``int`` :type for_all: ``bool`` :rtype: ``list`` of :class:`Transfer` objects.
[ "Retrieve", "a", "list", "of", "transfers", "sent", "to", "you", "or", "your", "company", "from", "other", "people", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L212-L246
apetrynet/pyfilemail
pyfilemail/users.py
User._restore_transfers
def _restore_transfers(self, response): """Restore transfers from josn retreived Filemail :param response: response object from request :rtype: ``list`` with :class:`Transfer` objects """ transfers = [] for transfer_data in response.json()['transfers']: transfer = Transfer(self, _restore=True) transfer.transfer_info.update(transfer_data) transfer.get_files() transfers.append(transfer) return transfers
python
def _restore_transfers(self, response): """Restore transfers from josn retreived Filemail :param response: response object from request :rtype: ``list`` with :class:`Transfer` objects """ transfers = [] for transfer_data in response.json()['transfers']: transfer = Transfer(self, _restore=True) transfer.transfer_info.update(transfer_data) transfer.get_files() transfers.append(transfer) return transfers
[ "def", "_restore_transfers", "(", "self", ",", "response", ")", ":", "transfers", "=", "[", "]", "for", "transfer_data", "in", "response", ".", "json", "(", ")", "[", "'transfers'", "]", ":", "transfer", "=", "Transfer", "(", "self", ",", "_restore", "="...
Restore transfers from josn retreived Filemail :param response: response object from request :rtype: ``list`` with :class:`Transfer` objects
[ "Restore", "transfers", "from", "josn", "retreived", "Filemail", ":", "param", "response", ":", "response", "object", "from", "request", ":", "rtype", ":", "list", "with", ":", "class", ":", "Transfer", "objects" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L248-L261
apetrynet/pyfilemail
pyfilemail/users.py
User.get_contact
def get_contact(self, email): """Get Filemail contact based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information """ contacts = self.get_contacts() for contact in contacts: if contact['email'] == email: return contact msg = 'No contact with email: "{email}" found.' raise FMBaseError(msg.format(email=email))
python
def get_contact(self, email): """Get Filemail contact based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information """ contacts = self.get_contacts() for contact in contacts: if contact['email'] == email: return contact msg = 'No contact with email: "{email}" found.' raise FMBaseError(msg.format(email=email))
[ "def", "get_contact", "(", "self", ",", "email", ")", ":", "contacts", "=", "self", ".", "get_contacts", "(", ")", "for", "contact", "in", "contacts", ":", "if", "contact", "[", "'email'", "]", "==", "email", ":", "return", "contact", "msg", "=", "'No ...
Get Filemail contact based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information
[ "Get", "Filemail", "contact", "based", "on", "email", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L286-L300
apetrynet/pyfilemail
pyfilemail/users.py
User.update_contact
def update_contact(self, contact): """Update name and/or email for contact. :param contact: with updated info :type contact: ``dict`` :rtype: ``bool`` """ if not isinstance(contact, dict): raise AttributeError('contact must be a <dict>') method, url = get_URL('contacts_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactid': contact.get('contactid'), 'name': contact.get('name'), 'email': contact.get('email') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def update_contact(self, contact): """Update name and/or email for contact. :param contact: with updated info :type contact: ``dict`` :rtype: ``bool`` """ if not isinstance(contact, dict): raise AttributeError('contact must be a <dict>') method, url = get_URL('contacts_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactid': contact.get('contactid'), 'name': contact.get('name'), 'email': contact.get('email') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "update_contact", "(", "self", ",", "contact", ")", ":", "if", "not", "isinstance", "(", "contact", ",", "dict", ")", ":", "raise", "AttributeError", "(", "'contact must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'contacts_update'",...
Update name and/or email for contact. :param contact: with updated info :type contact: ``dict`` :rtype: ``bool``
[ "Update", "name", "and", "/", "or", "email", "for", "contact", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L303-L329
apetrynet/pyfilemail
pyfilemail/users.py
User.get_group
def get_group(self, name): """Get contact group by name :param name: name of group :type name: ``str``, ``unicode`` :rtype: ``dict`` with group data """ groups = self.get_groups() for group in groups: if group['contactgroupname'] == name: return group msg = 'No group named: "{name}" found.' raise FMBaseError(msg.format(name=name))
python
def get_group(self, name): """Get contact group by name :param name: name of group :type name: ``str``, ``unicode`` :rtype: ``dict`` with group data """ groups = self.get_groups() for group in groups: if group['contactgroupname'] == name: return group msg = 'No group named: "{name}" found.' raise FMBaseError(msg.format(name=name))
[ "def", "get_group", "(", "self", ",", "name", ")", ":", "groups", "=", "self", ".", "get_groups", "(", ")", "for", "group", "in", "groups", ":", "if", "group", "[", "'contactgroupname'", "]", "==", "name", ":", "return", "group", "msg", "=", "'No group...
Get contact group by name :param name: name of group :type name: ``str``, ``unicode`` :rtype: ``dict`` with group data
[ "Get", "contact", "group", "by", "name" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L408-L422
apetrynet/pyfilemail
pyfilemail/users.py
User.delete_group
def delete_group(self, name): """Delete contact group :param name: of group :type name: ``str``, ``unicode`` :rtype: ``bool`` """ group = self.get_group(name) method, url = get_URL('group_delete') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def delete_group(self, name): """Delete contact group :param name: of group :type name: ``str``, ``unicode`` :rtype: ``bool`` """ group = self.get_group(name) method, url = get_URL('group_delete') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "delete_group", "(", "self", ",", "name", ")", ":", "group", "=", "self", ".", "get_group", "(", "name", ")", "method", ",", "url", "=", "get_URL", "(", "'group_delete'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "...
Delete contact group :param name: of group :type name: ``str``, ``unicode`` :rtype: ``bool``
[ "Delete", "contact", "group" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L449-L472
apetrynet/pyfilemail
pyfilemail/users.py
User.rename_group
def rename_group(self, group, newname): """Rename contact group :param group: group data or name of group :param newname: of group :type group: ``str``, ``unicode``, ``dict`` :type newname: ``str``, ``unicode`` :rtype: ``bool`` """ if isinstance(group, basestring): group = self.get_contact(group) method, url = get_URL('group_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactgroupid': group['contactgroupid'], 'name': newname } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def rename_group(self, group, newname): """Rename contact group :param group: group data or name of group :param newname: of group :type group: ``str``, ``unicode``, ``dict`` :type newname: ``str``, ``unicode`` :rtype: ``bool`` """ if isinstance(group, basestring): group = self.get_contact(group) method, url = get_URL('group_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactgroupid': group['contactgroupid'], 'name': newname } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "rename_group", "(", "self", ",", "group", ",", "newname", ")", ":", "if", "isinstance", "(", "group", ",", "basestring", ")", ":", "group", "=", "self", ".", "get_contact", "(", "group", ")", "method", ",", "url", "=", "get_URL", "(", "'group_u...
Rename contact group :param group: group data or name of group :param newname: of group :type group: ``str``, ``unicode``, ``dict`` :type newname: ``str``, ``unicode`` :rtype: ``bool``
[ "Rename", "contact", "group" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L475-L502
apetrynet/pyfilemail
pyfilemail/users.py
User.add_contact_to_group
def add_contact_to_group(self, contact, group): """Add contact to group :param contact: name or contact object :param group: name or group object :type contact: ``str``, ``unicode``, ``dict`` :type group: ``str``, ``unicode``, ``dict`` :rtype: ``bool`` """ if isinstance(contact, basestring): contact = self.get_contact(contact) if isinstance(group, basestring): group = self.get_group(group) method, url = get_URL('contacts_add_to_group') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactid': contact['contactid'], 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def add_contact_to_group(self, contact, group): """Add contact to group :param contact: name or contact object :param group: name or group object :type contact: ``str``, ``unicode``, ``dict`` :type group: ``str``, ``unicode``, ``dict`` :rtype: ``bool`` """ if isinstance(contact, basestring): contact = self.get_contact(contact) if isinstance(group, basestring): group = self.get_group(group) method, url = get_URL('contacts_add_to_group') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactid': contact['contactid'], 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "add_contact_to_group", "(", "self", ",", "contact", ",", "group", ")", ":", "if", "isinstance", "(", "contact", ",", "basestring", ")", ":", "contact", "=", "self", ".", "get_contact", "(", "contact", ")", "if", "isinstance", "(", "group", ",", "...
Add contact to group :param contact: name or contact object :param group: name or group object :type contact: ``str``, ``unicode``, ``dict`` :type group: ``str``, ``unicode``, ``dict`` :rtype: ``bool``
[ "Add", "contact", "to", "group" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L505-L535
apetrynet/pyfilemail
pyfilemail/users.py
User.get_company_info
def get_company_info(self): """Get company settings from Filemail :rtype: ``dict`` with company data """ method, url = get_URL('company_get') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return res.json()['company'] hellraiser(res)
python
def get_company_info(self): """Get company settings from Filemail :rtype: ``dict`` with company data """ method, url = get_URL('company_get') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return res.json()['company'] hellraiser(res)
[ "def", "get_company_info", "(", "self", ")", ":", "method", ",", "url", "=", "get_URL", "(", "'company_get'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "se...
Get company settings from Filemail :rtype: ``dict`` with company data
[ "Get", "company", "settings", "from", "Filemail" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L571-L589
apetrynet/pyfilemail
pyfilemail/users.py
User.update_company
def update_company(self, company): """Update company settings :param company: updated settings :type company: ``dict`` :rtype: ``bool`` """ if not isinstance(company, dict): raise AttributeError('company must be a <dict>') method, url = get_URL('company_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } payload.update(company) res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def update_company(self, company): """Update company settings :param company: updated settings :type company: ``dict`` :rtype: ``bool`` """ if not isinstance(company, dict): raise AttributeError('company must be a <dict>') method, url = get_URL('company_update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken') } payload.update(company) res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "update_company", "(", "self", ",", "company", ")", ":", "if", "not", "isinstance", "(", "company", ",", "dict", ")", ":", "raise", "AttributeError", "(", "'company must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'company_update'", ...
Update company settings :param company: updated settings :type company: ``dict`` :rtype: ``bool``
[ "Update", "company", "settings" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L592-L617
apetrynet/pyfilemail
pyfilemail/users.py
User.get_company_user
def get_company_user(self, email): """Get company user based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information """ users = self.get_company_users() for user in users: if user['email'] == email: return user msg = 'No user with email: "{email}" associated with this company.' raise FMBaseError(msg.format(email=email))
python
def get_company_user(self, email): """Get company user based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information """ users = self.get_company_users() for user in users: if user['email'] == email: return user msg = 'No user with email: "{email}" associated with this company.' raise FMBaseError(msg.format(email=email))
[ "def", "get_company_user", "(", "self", ",", "email", ")", ":", "users", "=", "self", ".", "get_company_users", "(", ")", "for", "user", "in", "users", ":", "if", "user", "[", "'email'", "]", "==", "email", ":", "return", "user", "msg", "=", "'No user ...
Get company user based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information
[ "Get", "company", "user", "based", "on", "email", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L641-L655
apetrynet/pyfilemail
pyfilemail/users.py
User.company_add_user
def company_add_user(self, email, name, password, receiver, admin): """Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool`` """ method, url = get_URL('company_add_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'email': email, 'name': name, 'password': password, 'canreceivefiles': receiver, 'admin': admin } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def company_add_user(self, email, name, password, receiver, admin): """Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool`` """ method, url = get_URL('company_add_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'email': email, 'name': name, 'password': password, 'canreceivefiles': receiver, 'admin': admin } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "company_add_user", "(", "self", ",", "email", ",", "name", ",", "password", ",", "receiver", ",", "admin", ")", ":", "method", ",", "url", "=", "get_URL", "(", "'company_add_user'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config"...
Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool``
[ "Add", "a", "user", "to", "the", "company", "account", "." ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L658-L691
apetrynet/pyfilemail
pyfilemail/users.py
User.update_company_user
def update_company_user(self, email, userdata): """Update a company users settings :param email: current email address of user :param userdata: updated settings :type email: ``str`` or ``unicode`` :type userdata: ``dict`` :rtype: ``bool`` """ if not isinstance(userdata, dict): raise AttributeError('userdata must be a <dict>') method, url = get_URL('company_update_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'useremail': email } payload.update(userdata) res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
python
def update_company_user(self, email, userdata): """Update a company users settings :param email: current email address of user :param userdata: updated settings :type email: ``str`` or ``unicode`` :type userdata: ``dict`` :rtype: ``bool`` """ if not isinstance(userdata, dict): raise AttributeError('userdata must be a <dict>') method, url = get_URL('company_update_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'useremail': email } payload.update(userdata) res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
[ "def", "update_company_user", "(", "self", ",", "email", ",", "userdata", ")", ":", "if", "not", "isinstance", "(", "userdata", ",", "dict", ")", ":", "raise", "AttributeError", "(", "'userdata must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "...
Update a company users settings :param email: current email address of user :param userdata: updated settings :type email: ``str`` or ``unicode`` :type userdata: ``dict`` :rtype: ``bool``
[ "Update", "a", "company", "users", "settings" ]
train
https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/users.py#L694-L722
davebridges/mousedb
mousedb/views.py
home
def home(request): """This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html""" cage_list = Animal.objects.values("Cage").distinct() cage_list_current = cage_list.filter(Alive=True) animal_list = Animal.objects.all() animal_list_current = animal_list.filter(Alive=True) strain_list = animal_list.values("Strain").distinct() strain_list_current = animal_list_current.values("Strain").distinct() return render(request, 'home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current})
python
def home(request): """This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html""" cage_list = Animal.objects.values("Cage").distinct() cage_list_current = cage_list.filter(Alive=True) animal_list = Animal.objects.all() animal_list_current = animal_list.filter(Alive=True) strain_list = animal_list.values("Strain").distinct() strain_list_current = animal_list_current.values("Strain").distinct() return render(request, 'home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current})
[ "def", "home", "(", "request", ")", ":", "cage_list", "=", "Animal", ".", "objects", ".", "values", "(", "\"Cage\"", ")", ".", "distinct", "(", ")", "cage_list_current", "=", "cage_list", ".", "filter", "(", "Alive", "=", "True", ")", "animal_list", "=",...
This view generates the data for the home page. This login restricted view passes dictionaries containing the current cages, animals and strains as well as the totals for each. This data is passed to the template home.html
[ "This", "view", "generates", "the", "data", "for", "the", "home", "page", ".", "This", "login", "restricted", "view", "passes", "dictionaries", "containing", "the", "current", "cages", "animals", "and", "strains", "as", "well", "as", "the", "totals", "for", ...
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/views.py#L33-L43
elifesciences/proofreader-python
proofreader/utils/print_table.py
PrintTable.col_widths
def col_widths(self): # type: () -> defaultdict """Get MAX possible width of each column in the table. :return: defaultdict """ _widths = defaultdict(int) all_rows = [self.headers] all_rows.extend(self._rows) for row in all_rows: for idx, col in enumerate(row): _col_l = len(col) if _col_l > _widths[idx]: _widths[idx] = _col_l return _widths
python
def col_widths(self): # type: () -> defaultdict """Get MAX possible width of each column in the table. :return: defaultdict """ _widths = defaultdict(int) all_rows = [self.headers] all_rows.extend(self._rows) for row in all_rows: for idx, col in enumerate(row): _col_l = len(col) if _col_l > _widths[idx]: _widths[idx] = _col_l return _widths
[ "def", "col_widths", "(", "self", ")", ":", "# type: () -> defaultdict", "_widths", "=", "defaultdict", "(", "int", ")", "all_rows", "=", "[", "self", ".", "headers", "]", "all_rows", ".", "extend", "(", "self", ".", "_rows", ")", "for", "row", "in", "al...
Get MAX possible width of each column in the table. :return: defaultdict
[ "Get", "MAX", "possible", "width", "of", "each", "column", "in", "the", "table", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/utils/print_table.py#L22-L39
elifesciences/proofreader-python
proofreader/utils/print_table.py
PrintTable._marker_line
def _marker_line(self): # type: () -> str """Generate a correctly sized marker line. e.g. '+------------------+---------+----------+---------+' :return: str """ output = '' for col in sorted(self.col_widths): line = self.COLUMN_MARK + (self.DASH * (self.col_widths[col] + self.PADDING * 2)) output += line output += self.COLUMN_MARK + '\n' return output
python
def _marker_line(self): # type: () -> str """Generate a correctly sized marker line. e.g. '+------------------+---------+----------+---------+' :return: str """ output = '' for col in sorted(self.col_widths): line = self.COLUMN_MARK + (self.DASH * (self.col_widths[col] + self.PADDING * 2)) output += line output += self.COLUMN_MARK + '\n' return output
[ "def", "_marker_line", "(", "self", ")", ":", "# type: () -> str", "output", "=", "''", "for", "col", "in", "sorted", "(", "self", ".", "col_widths", ")", ":", "line", "=", "self", ".", "COLUMN_MARK", "+", "(", "self", ".", "DASH", "*", "(", "self", ...
Generate a correctly sized marker line. e.g. '+------------------+---------+----------+---------+' :return: str
[ "Generate", "a", "correctly", "sized", "marker", "line", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/utils/print_table.py#L41-L57
elifesciences/proofreader-python
proofreader/utils/print_table.py
PrintTable._row_to_str
def _row_to_str(self, row): # type: (List[str]) -> str """Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str """ _row_text = '' for col, width in self.col_widths.items(): _row_text += self.COLUMN_SEP l_pad, r_pad = self._split_int(width - len(row[col])) _row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING), row[col], ' ' * (r_pad + self.PADDING)) _row_text += self.COLUMN_SEP + '\n' return _row_text
python
def _row_to_str(self, row): # type: (List[str]) -> str """Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str """ _row_text = '' for col, width in self.col_widths.items(): _row_text += self.COLUMN_SEP l_pad, r_pad = self._split_int(width - len(row[col])) _row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING), row[col], ' ' * (r_pad + self.PADDING)) _row_text += self.COLUMN_SEP + '\n' return _row_text
[ "def", "_row_to_str", "(", "self", ",", "row", ")", ":", "# type: (List[str]) -> str", "_row_text", "=", "''", "for", "col", ",", "width", "in", "self", ".", "col_widths", ".", "items", "(", ")", ":", "_row_text", "+=", "self", ".", "COLUMN_SEP", "l_pad", ...
Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str
[ "Converts", "a", "list", "of", "strings", "to", "a", "correctly", "spaced", "and", "formatted", "row", "string", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/utils/print_table.py#L64-L86
elifesciences/proofreader-python
proofreader/utils/print_table.py
PrintTable._table_to_str
def _table_to_str(self): # type: () -> str """Return single formatted table string. :return: str """ _marker_line = self._marker_line() output = _marker_line + self._row_to_str(self.headers) + _marker_line for row in self._rows: output += self._row_to_str(row) output += _marker_line return output
python
def _table_to_str(self): # type: () -> str """Return single formatted table string. :return: str """ _marker_line = self._marker_line() output = _marker_line + self._row_to_str(self.headers) + _marker_line for row in self._rows: output += self._row_to_str(row) output += _marker_line return output
[ "def", "_table_to_str", "(", "self", ")", ":", "# type: () -> str", "_marker_line", "=", "self", ".", "_marker_line", "(", ")", "output", "=", "_marker_line", "+", "self", ".", "_row_to_str", "(", "self", ".", "headers", ")", "+", "_marker_line", "for", "row...
Return single formatted table string. :return: str
[ "Return", "single", "formatted", "table", "string", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/utils/print_table.py#L94-L108
frnsys/broca
broca/distance/sift4.py
sift4
def sift4(s1, s2, max_offset=5): """ This is an implementation of general Sift4. """ t1, t2 = list(s1), list(s2) l1, l2 = len(t1), len(t2) if not s1: return l2 if not s2: return l1 # Cursors for each string c1, c2 = 0, 0 # Largest common subsequence lcss = 0 # Local common substring local_cs = 0 # Number of transpositions ('ab' vs 'ba') trans = 0 # Offset pair array, for computing the transpositions offsets = [] while c1 < l1 and c2 < l2: if t1[c1] == t2[c2]: local_cs += 1 # Check if current match is a transposition is_trans = False i = 0 while i < len(offsets): ofs = offsets[i] if c1 <= ofs['c1'] or c2 <= ofs['c2']: is_trans = abs(c2-c1) >= abs(ofs['c2'] - ofs['c1']) if is_trans: trans += 1 elif not ofs['trans']: ofs['trans'] = True trans += 1 break elif c1 > ofs['c2'] and c2 > ofs['c1']: del offsets[i] else: i += 1 offsets.append({ 'c1': c1, 'c2': c2, 'trans': is_trans }) else: lcss += local_cs local_cs = 0 if c1 != c2: c1 = c2 = min(c1, c2) for i in range(max_offset): if c1 + i >= l1 and c2 + i >= l2: break elif c1 + i < l1 and s1[c1+i] == s2[c2]: c1 += i - 1 c2 -= 1 break elif c2 + i < l2 and s1[c1] == s2[c2 + i]: c2 += i - 1 c1 -= 1 break c1 += 1 c2 += 1 if c1 >= l1 or c2 >= l2: lcss += local_cs local_cs = 0 c1 = c2 = min(c1, c2) lcss += local_cs return round(max(l1, l2) - lcss + trans)
python
def sift4(s1, s2, max_offset=5): """ This is an implementation of general Sift4. """ t1, t2 = list(s1), list(s2) l1, l2 = len(t1), len(t2) if not s1: return l2 if not s2: return l1 # Cursors for each string c1, c2 = 0, 0 # Largest common subsequence lcss = 0 # Local common substring local_cs = 0 # Number of transpositions ('ab' vs 'ba') trans = 0 # Offset pair array, for computing the transpositions offsets = [] while c1 < l1 and c2 < l2: if t1[c1] == t2[c2]: local_cs += 1 # Check if current match is a transposition is_trans = False i = 0 while i < len(offsets): ofs = offsets[i] if c1 <= ofs['c1'] or c2 <= ofs['c2']: is_trans = abs(c2-c1) >= abs(ofs['c2'] - ofs['c1']) if is_trans: trans += 1 elif not ofs['trans']: ofs['trans'] = True trans += 1 break elif c1 > ofs['c2'] and c2 > ofs['c1']: del offsets[i] else: i += 1 offsets.append({ 'c1': c1, 'c2': c2, 'trans': is_trans }) else: lcss += local_cs local_cs = 0 if c1 != c2: c1 = c2 = min(c1, c2) for i in range(max_offset): if c1 + i >= l1 and c2 + i >= l2: break elif c1 + i < l1 and s1[c1+i] == s2[c2]: c1 += i - 1 c2 -= 1 break elif c2 + i < l2 and s1[c1] == s2[c2 + i]: c2 += i - 1 c1 -= 1 break c1 += 1 c2 += 1 if c1 >= l1 or c2 >= l2: lcss += local_cs local_cs = 0 c1 = c2 = min(c1, c2) lcss += local_cs return round(max(l1, l2) - lcss + trans)
[ "def", "sift4", "(", "s1", ",", "s2", ",", "max_offset", "=", "5", ")", ":", "t1", ",", "t2", "=", "list", "(", "s1", ")", ",", "list", "(", "s2", ")", "l1", ",", "l2", "=", "len", "(", "t1", ")", ",", "len", "(", "t2", ")", "if", "not", ...
This is an implementation of general Sift4.
[ "This", "is", "an", "implementation", "of", "general", "Sift4", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/distance/sift4.py#L14-L97
davebridges/mousedb
mousedb/data/models.py
Cohort.save
def save(self, *args, **kwargs): '''The slug field is auto-populated during the save from the name field.''' if not self.id: self.slug = slugify(self.name) super(Cohort, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): '''The slug field is auto-populated during the save from the name field.''' if not self.id: self.slug = slugify(self.name) super(Cohort, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "id", ":", "self", ".", "slug", "=", "slugify", "(", "self", ".", "name", ")", "super", "(", "Cohort", ",", "self", ")", ".", "save", "...
The slug field is auto-populated during the save from the name field.
[ "The", "slug", "field", "is", "auto", "-", "populated", "during", "the", "save", "from", "the", "name", "field", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/models.py#L264-L268
darkfeline/animanager
animanager/files/picker.py
FilePicker.pick
def pick(self, filenames: Iterable[str]) -> str: """Pick one filename based on priority rules.""" filenames = sorted(filenames, reverse=True) # e.g., v2 before v1 for priority in sorted(self.rules.keys(), reverse=True): patterns = self.rules[priority] for pattern in patterns: for filename in filenames: if pattern.search(filename): return filename return filenames[0]
python
def pick(self, filenames: Iterable[str]) -> str: """Pick one filename based on priority rules.""" filenames = sorted(filenames, reverse=True) # e.g., v2 before v1 for priority in sorted(self.rules.keys(), reverse=True): patterns = self.rules[priority] for pattern in patterns: for filename in filenames: if pattern.search(filename): return filename return filenames[0]
[ "def", "pick", "(", "self", ",", "filenames", ":", "Iterable", "[", "str", "]", ")", "->", "str", ":", "filenames", "=", "sorted", "(", "filenames", ",", "reverse", "=", "True", ")", "# e.g., v2 before v1", "for", "priority", "in", "sorted", "(", "self",...
Pick one filename based on priority rules.
[ "Pick", "one", "filename", "based", "on", "priority", "rules", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/files/picker.py#L61-L70
frnsys/broca
broca/knowledge/tf.py
train_tf
def train_tf(tokens_stream, out=None, **kwargs): """ Train a map of term frequencies on a list of files (parallelized). """ print('Counting terms...') results = parallel(count_tf, tokens_stream, n_jobs=-1) print('Merging...') tf = merge(results) if out is not None: with open(out, 'w') as f: json.dump(tf, f) return tf
python
def train_tf(tokens_stream, out=None, **kwargs): """ Train a map of term frequencies on a list of files (parallelized). """ print('Counting terms...') results = parallel(count_tf, tokens_stream, n_jobs=-1) print('Merging...') tf = merge(results) if out is not None: with open(out, 'w') as f: json.dump(tf, f) return tf
[ "def", "train_tf", "(", "tokens_stream", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "print", "(", "'Counting terms...'", ")", "results", "=", "parallel", "(", "count_tf", ",", "tokens_stream", ",", "n_jobs", "=", "-", "1", ")", "print", ...
Train a map of term frequencies on a list of files (parallelized).
[ "Train", "a", "map", "of", "term", "frequencies", "on", "a", "list", "of", "files", "(", "parallelized", ")", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/tf.py#L7-L21
frnsys/broca
broca/knowledge/tf.py
count_tf
def count_tf(tokens_stream): """ Count term frequencies for a single file. """ tf = defaultdict(int) for tokens in tokens_stream: for token in tokens: tf[token] += 1 return tf
python
def count_tf(tokens_stream): """ Count term frequencies for a single file. """ tf = defaultdict(int) for tokens in tokens_stream: for token in tokens: tf[token] += 1 return tf
[ "def", "count_tf", "(", "tokens_stream", ")", ":", "tf", "=", "defaultdict", "(", "int", ")", "for", "tokens", "in", "tokens_stream", ":", "for", "token", "in", "tokens", ":", "tf", "[", "token", "]", "+=", "1", "return", "tf" ]
Count term frequencies for a single file.
[ "Count", "term", "frequencies", "for", "a", "single", "file", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/tf.py#L24-L32
wglass/lighthouse
lighthouse/cluster.py
Cluster.validate_config
def validate_config(cls, config): """ Validates a config dictionary parsed from a cluster config file. Checks that a discovery method is defined and that at least one of the balancers in the config are installed and available. """ if "discovery" not in config: raise ValueError("No discovery method defined.") installed_balancers = Balancer.get_installed_classes().keys() if not any([balancer in config for balancer in installed_balancers]): raise ValueError("No available balancer configs defined.")
python
def validate_config(cls, config): """ Validates a config dictionary parsed from a cluster config file. Checks that a discovery method is defined and that at least one of the balancers in the config are installed and available. """ if "discovery" not in config: raise ValueError("No discovery method defined.") installed_balancers = Balancer.get_installed_classes().keys() if not any([balancer in config for balancer in installed_balancers]): raise ValueError("No available balancer configs defined.")
[ "def", "validate_config", "(", "cls", ",", "config", ")", ":", "if", "\"discovery\"", "not", "in", "config", ":", "raise", "ValueError", "(", "\"No discovery method defined.\"", ")", "installed_balancers", "=", "Balancer", ".", "get_installed_classes", "(", ")", "...
Validates a config dictionary parsed from a cluster config file. Checks that a discovery method is defined and that at least one of the balancers in the config are installed and available.
[ "Validates", "a", "config", "dictionary", "parsed", "from", "a", "cluster", "config", "file", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/cluster.py#L26-L39
wglass/lighthouse
lighthouse/cluster.py
Cluster.apply_config
def apply_config(self, config): """ Sets the `discovery` and `meta_cluster` attributes, as well as the configured + available balancer attributes from a given validated config. """ self.discovery = config["discovery"] self.meta_cluster = config.get("meta_cluster") for balancer_name in Balancer.get_installed_classes().keys(): if balancer_name in config: setattr(self, balancer_name, config[balancer_name])
python
def apply_config(self, config): """ Sets the `discovery` and `meta_cluster` attributes, as well as the configured + available balancer attributes from a given validated config. """ self.discovery = config["discovery"] self.meta_cluster = config.get("meta_cluster") for balancer_name in Balancer.get_installed_classes().keys(): if balancer_name in config: setattr(self, balancer_name, config[balancer_name])
[ "def", "apply_config", "(", "self", ",", "config", ")", ":", "self", ".", "discovery", "=", "config", "[", "\"discovery\"", "]", "self", ".", "meta_cluster", "=", "config", ".", "get", "(", "\"meta_cluster\"", ")", "for", "balancer_name", "in", "Balancer", ...
Sets the `discovery` and `meta_cluster` attributes, as well as the configured + available balancer attributes from a given validated config.
[ "Sets", "the", "discovery", "and", "meta_cluster", "attributes", "as", "well", "as", "the", "configured", "+", "available", "balancer", "attributes", "from", "a", "given", "validated", "config", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/cluster.py#L41-L51
darkfeline/animanager
animanager/commands/search.py
command
def command(state, args): """Search Animanager database.""" args = parser.parse_args(args[1:]) where_queries = [] params = {} if args.watching or args.available: where_queries.append('regexp IS NOT NULL') if args.query: where_queries.append('title LIKE :title') params['title'] = _compile_sql_query(args.query) if not where_queries: print('Must include at least one filter.') return where_query = ' AND '.join(where_queries) logger.debug('Search where %s with params %s', where_query, params) results = list() all_files = [ filename for filename in _find_files(state.config['anime'].getpath('watchdir')) if _is_video(filename) ] for anime in query.select.select(state.db, where_query, params): logger.debug('For anime %s with regexp %s', anime.aid, anime.regexp) if anime.regexp is not None: anime_files = AnimeFiles(anime.regexp, all_files) logger.debug('Found files %s', anime_files.filenames) query.files.cache_files(state.db, anime.aid, anime_files) available = anime_files.available_string(anime.watched_episodes) else: available = '' if not args.available or available: results.append(( anime.aid, anime.title, anime.type, '{}/{}'.format(anime.watched_episodes, anime.episodecount), 'yes' if anime.complete else '', available, )) state.results['db'].set(results) state.results['db'].print()
python
def command(state, args): """Search Animanager database.""" args = parser.parse_args(args[1:]) where_queries = [] params = {} if args.watching or args.available: where_queries.append('regexp IS NOT NULL') if args.query: where_queries.append('title LIKE :title') params['title'] = _compile_sql_query(args.query) if not where_queries: print('Must include at least one filter.') return where_query = ' AND '.join(where_queries) logger.debug('Search where %s with params %s', where_query, params) results = list() all_files = [ filename for filename in _find_files(state.config['anime'].getpath('watchdir')) if _is_video(filename) ] for anime in query.select.select(state.db, where_query, params): logger.debug('For anime %s with regexp %s', anime.aid, anime.regexp) if anime.regexp is not None: anime_files = AnimeFiles(anime.regexp, all_files) logger.debug('Found files %s', anime_files.filenames) query.files.cache_files(state.db, anime.aid, anime_files) available = anime_files.available_string(anime.watched_episodes) else: available = '' if not args.available or available: results.append(( anime.aid, anime.title, anime.type, '{}/{}'.format(anime.watched_episodes, anime.episodecount), 'yes' if anime.complete else '', available, )) state.results['db'].set(results) state.results['db'].print()
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "where_queries", "=", "[", "]", "params", "=", "{", "}", "if", "args", ".", "watching", "or", "args", ".", "ava...
Search Animanager database.
[ "Search", "Animanager", "database", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/search.py#L29-L68
darkfeline/animanager
animanager/commands/search.py
_is_video
def _is_video(filepath) -> bool: """Check filename extension to see if it's a video file.""" if os.path.exists(filepath): # Could be broken symlink extension = os.path.splitext(filepath)[1] return extension in ('.mkv', '.mp4', '.avi') else: return False
python
def _is_video(filepath) -> bool: """Check filename extension to see if it's a video file.""" if os.path.exists(filepath): # Could be broken symlink extension = os.path.splitext(filepath)[1] return extension in ('.mkv', '.mp4', '.avi') else: return False
[ "def", "_is_video", "(", "filepath", ")", "->", "bool", ":", "if", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "# Could be broken symlink", "extension", "=", "os", ".", "path", ".", "splitext", "(", "filepath", ")", "[", "1", "]", "ret...
Check filename extension to see if it's a video file.
[ "Check", "filename", "extension", "to", "see", "if", "it", "s", "a", "video", "file", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/search.py#L83-L89
darkfeline/animanager
animanager/commands/search.py
_find_files
def _find_files(dirpath: str) -> 'Iterable[str]': """Find files recursively. Returns a generator that yields paths in no particular order. """ for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True, followlinks=True): if os.path.basename(dirpath).startswith('.'): del dirnames[:] for filename in filenames: yield os.path.join(dirpath, filename)
python
def _find_files(dirpath: str) -> 'Iterable[str]': """Find files recursively. Returns a generator that yields paths in no particular order. """ for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True, followlinks=True): if os.path.basename(dirpath).startswith('.'): del dirnames[:] for filename in filenames: yield os.path.join(dirpath, filename)
[ "def", "_find_files", "(", "dirpath", ":", "str", ")", "->", "'Iterable[str]'", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dirpath", ",", "topdown", "=", "True", ",", "followlinks", "=", "True", ")", ":", "...
Find files recursively. Returns a generator that yields paths in no particular order.
[ "Find", "files", "recursively", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/search.py#L92-L102
wglass/lighthouse
lighthouse/peer.py
Peer.current
def current(cls): """ Helper method for getting the current peer of whichever host we're running on. """ name = socket.getfqdn() ip = socket.gethostbyname(name) return cls(name, ip)
python
def current(cls): """ Helper method for getting the current peer of whichever host we're running on. """ name = socket.getfqdn() ip = socket.gethostbyname(name) return cls(name, ip)
[ "def", "current", "(", "cls", ")", ":", "name", "=", "socket", ".", "getfqdn", "(", ")", "ip", "=", "socket", ".", "gethostbyname", "(", "name", ")", "return", "cls", "(", "name", ",", "ip", ")" ]
Helper method for getting the current peer of whichever host we're running on.
[ "Helper", "method", "for", "getting", "the", "current", "peer", "of", "whichever", "host", "we", "re", "running", "on", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/peer.py#L26-L34
wglass/lighthouse
lighthouse/peer.py
Peer.serialize
def serialize(self): """ Serializes the Peer data as a simple JSON map string. """ return json.dumps({ "name": self.name, "ip": self.ip, "port": self.port }, sort_keys=True)
python
def serialize(self): """ Serializes the Peer data as a simple JSON map string. """ return json.dumps({ "name": self.name, "ip": self.ip, "port": self.port }, sort_keys=True)
[ "def", "serialize", "(", "self", ")", ":", "return", "json", ".", "dumps", "(", "{", "\"name\"", ":", "self", ".", "name", ",", "\"ip\"", ":", "self", ".", "ip", ",", "\"port\"", ":", "self", ".", "port", "}", ",", "sort_keys", "=", "True", ")" ]
Serializes the Peer data as a simple JSON map string.
[ "Serializes", "the", "Peer", "data", "as", "a", "simple", "JSON", "map", "string", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/peer.py#L36-L44
wglass/lighthouse
lighthouse/peer.py
Peer.deserialize
def deserialize(cls, value): """ Generates a Peer instance via a JSON string of the sort generated by `Peer.deserialize`. The `name` and `ip` keys are required to be present in the JSON map, if the `port` key is not present the default is used. """ parsed = json.loads(value) if "name" not in parsed: raise ValueError("No peer name.") if "ip" not in parsed: raise ValueError("No peer IP.") if "port" not in parsed: parsed["port"] = DEFAULT_PEER_PORT return cls(parsed["name"], parsed["ip"], parsed["port"])
python
def deserialize(cls, value): """ Generates a Peer instance via a JSON string of the sort generated by `Peer.deserialize`. The `name` and `ip` keys are required to be present in the JSON map, if the `port` key is not present the default is used. """ parsed = json.loads(value) if "name" not in parsed: raise ValueError("No peer name.") if "ip" not in parsed: raise ValueError("No peer IP.") if "port" not in parsed: parsed["port"] = DEFAULT_PEER_PORT return cls(parsed["name"], parsed["ip"], parsed["port"])
[ "def", "deserialize", "(", "cls", ",", "value", ")", ":", "parsed", "=", "json", ".", "loads", "(", "value", ")", "if", "\"name\"", "not", "in", "parsed", ":", "raise", "ValueError", "(", "\"No peer name.\"", ")", "if", "\"ip\"", "not", "in", "parsed", ...
Generates a Peer instance via a JSON string of the sort generated by `Peer.deserialize`. The `name` and `ip` keys are required to be present in the JSON map, if the `port` key is not present the default is used.
[ "Generates", "a", "Peer", "instance", "via", "a", "JSON", "string", "of", "the", "sort", "generated", "by", "Peer", ".", "deserialize", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/peer.py#L47-L64
frnsys/broca
broca/pipeline/cryo.py
CryoEncoder.default
def default(self, obj): """ if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded """ if isinstance(obj, np.ndarray): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) elif sps.issparse(obj): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape, indices=obj.indices, indptr=obj.indptr) elif hasattr(obj, '__dict__'): return obj.__dict__ # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
python
def default(self, obj): """ if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded """ if isinstance(obj, np.ndarray): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) elif sps.issparse(obj): data_b64 = base64.b64encode(obj.data).decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape, indices=obj.indices, indptr=obj.indptr) elif hasattr(obj, '__dict__'): return obj.__dict__ # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ":", "data_b64", "=", "base64", ".", "b64encode", "(", "obj", ".", "data", ")", ".", "decode", "(", "'utf-8'", ")", "return", "dict...
if input object is a ndarray it will be converted into a dict holding dtype, shape and the data base64 encoded
[ "if", "input", "object", "is", "a", "ndarray", "it", "will", "be", "converted", "into", "a", "dict", "holding", "dtype", "shape", "and", "the", "data", "base64", "encoded" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/pipeline/cryo.py#L20-L39
elifesciences/proofreader-python
proofreader/license_checker/package.py
Package._extract_meta_value
def _extract_meta_value(self, tag): # type: (str, List[str]) -> str """Find a target value by `tag` from given meta data. :param tag: str :param meta_data: list :return: str """ try: return [l[len(tag):] for l in self.meta_data if l.startswith(tag)][0] except IndexError: return '* Not Found *'
python
def _extract_meta_value(self, tag): # type: (str, List[str]) -> str """Find a target value by `tag` from given meta data. :param tag: str :param meta_data: list :return: str """ try: return [l[len(tag):] for l in self.meta_data if l.startswith(tag)][0] except IndexError: return '* Not Found *'
[ "def", "_extract_meta_value", "(", "self", ",", "tag", ")", ":", "# type: (str, List[str]) -> str", "try", ":", "return", "[", "l", "[", "len", "(", "tag", ")", ":", "]", "for", "l", "in", "self", ".", "meta_data", "if", "l", ".", "startswith", "(", "t...
Find a target value by `tag` from given meta data. :param tag: str :param meta_data: list :return: str
[ "Find", "a", "target", "value", "by", "tag", "from", "given", "meta", "data", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/license_checker/package.py#L16-L27
gkmngrgn/radpress
radpress/readers/__init__.py
get_markup_choices
def get_markup_choices(): """ Receives available markup options as list. """ available_reader_list = [] module_dir = os.path.realpath(os.path.dirname(__file__)) module_names = filter( lambda x: x.endswith('_reader.py'), os.listdir(module_dir)) for module_name in module_names: markup = module_name.split('_')[0] reader = get_reader(markup=markup) if reader.enabled is True: available_reader_list.append((markup, reader.name)) return available_reader_list
python
def get_markup_choices(): """ Receives available markup options as list. """ available_reader_list = [] module_dir = os.path.realpath(os.path.dirname(__file__)) module_names = filter( lambda x: x.endswith('_reader.py'), os.listdir(module_dir)) for module_name in module_names: markup = module_name.split('_')[0] reader = get_reader(markup=markup) if reader.enabled is True: available_reader_list.append((markup, reader.name)) return available_reader_list
[ "def", "get_markup_choices", "(", ")", ":", "available_reader_list", "=", "[", "]", "module_dir", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "module_names", "=", "filter", "(", "lambda", "x...
Receives available markup options as list.
[ "Receives", "available", "markup", "options", "as", "list", "." ]
train
https://github.com/gkmngrgn/radpress/blob/2ed3b97f94e722479601832ffc40ea2135cda916/radpress/readers/__init__.py#L36-L52
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.apply_config
def apply_config(self, config): """ Takes the given config dictionary and sets the hosts and base_path attributes. If the kazoo client connection is established, its hosts list is updated to the newly configured value. """ self.hosts = config["hosts"] old_base_path = self.base_path self.base_path = config["path"] if not self.connected.is_set(): return logger.debug("Setting ZK hosts to %s", self.hosts) self.client.set_hosts(",".join(self.hosts)) if old_base_path and old_base_path != self.base_path: logger.critical( "ZNode base path changed!" + " Lighthouse will need to be restarted" + " to watch the right znodes" )
python
def apply_config(self, config): """ Takes the given config dictionary and sets the hosts and base_path attributes. If the kazoo client connection is established, its hosts list is updated to the newly configured value. """ self.hosts = config["hosts"] old_base_path = self.base_path self.base_path = config["path"] if not self.connected.is_set(): return logger.debug("Setting ZK hosts to %s", self.hosts) self.client.set_hosts(",".join(self.hosts)) if old_base_path and old_base_path != self.base_path: logger.critical( "ZNode base path changed!" + " Lighthouse will need to be restarted" + " to watch the right znodes" )
[ "def", "apply_config", "(", "self", ",", "config", ")", ":", "self", ".", "hosts", "=", "config", "[", "\"hosts\"", "]", "old_base_path", "=", "self", ".", "base_path", "self", ".", "base_path", "=", "config", "[", "\"path\"", "]", "if", "not", "self", ...
Takes the given config dictionary and sets the hosts and base_path attributes. If the kazoo client connection is established, its hosts list is updated to the newly configured value.
[ "Takes", "the", "given", "config", "dictionary", "and", "sets", "the", "hosts", "and", "base_path", "attributes", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L65-L87
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.connect
def connect(self): """ Creates a new KazooClient and establishes a connection. Passes the client the `handle_connection_change` method as a callback to fire when the Zookeeper connection changes state. """ self.client = client.KazooClient(hosts=",".join(self.hosts)) self.client.add_listener(self.handle_connection_change) self.client.start_async()
python
def connect(self): """ Creates a new KazooClient and establishes a connection. Passes the client the `handle_connection_change` method as a callback to fire when the Zookeeper connection changes state. """ self.client = client.KazooClient(hosts=",".join(self.hosts)) self.client.add_listener(self.handle_connection_change) self.client.start_async()
[ "def", "connect", "(", "self", ")", ":", "self", ".", "client", "=", "client", ".", "KazooClient", "(", "hosts", "=", "\",\"", ".", "join", "(", "self", ".", "hosts", ")", ")", "self", ".", "client", ".", "add_listener", "(", "self", ".", "handle_con...
Creates a new KazooClient and establishes a connection. Passes the client the `handle_connection_change` method as a callback to fire when the Zookeeper connection changes state.
[ "Creates", "a", "new", "KazooClient", "and", "establishes", "a", "connection", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L89-L99
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.disconnect
def disconnect(self): """ Stops and closes the kazoo connection. """ logger.info("Disconnecting from Zookeeper.") self.client.stop() self.client.close()
python
def disconnect(self): """ Stops and closes the kazoo connection. """ logger.info("Disconnecting from Zookeeper.") self.client.stop() self.client.close()
[ "def", "disconnect", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Disconnecting from Zookeeper.\"", ")", "self", ".", "client", ".", "stop", "(", ")", "self", ".", "client", ".", "close", "(", ")" ]
Stops and closes the kazoo connection.
[ "Stops", "and", "closes", "the", "kazoo", "connection", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L101-L107
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.handle_connection_change
def handle_connection_change(self, state): """ Callback for handling changes in the kazoo client's connection state. If the connection becomes lost or suspended, the `connected` Event is cleared. Other given states imply that the connection is established so `connected` is set. """ if state == client.KazooState.LOST: if not self.shutdown.is_set(): logger.info("Zookeeper session lost!") self.connected.clear() elif state == client.KazooState.SUSPENDED: logger.info("Zookeeper connection suspended!") self.connected.clear() else: logger.info("Zookeeper connection (re)established.") self.connected.set()
python
def handle_connection_change(self, state): """ Callback for handling changes in the kazoo client's connection state. If the connection becomes lost or suspended, the `connected` Event is cleared. Other given states imply that the connection is established so `connected` is set. """ if state == client.KazooState.LOST: if not self.shutdown.is_set(): logger.info("Zookeeper session lost!") self.connected.clear() elif state == client.KazooState.SUSPENDED: logger.info("Zookeeper connection suspended!") self.connected.clear() else: logger.info("Zookeeper connection (re)established.") self.connected.set()
[ "def", "handle_connection_change", "(", "self", ",", "state", ")", ":", "if", "state", "==", "client", ".", "KazooState", ".", "LOST", ":", "if", "not", "self", ".", "shutdown", ".", "is_set", "(", ")", ":", "logger", ".", "info", "(", "\"Zookeeper sessi...
Callback for handling changes in the kazoo client's connection state. If the connection becomes lost or suspended, the `connected` Event is cleared. Other given states imply that the connection is established so `connected` is set.
[ "Callback", "for", "handling", "changes", "in", "the", "kazoo", "client", "s", "connection", "state", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L109-L126
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.start_watching
def start_watching(self, cluster, callback): """ Initiates the "watching" of a cluster's associated znode. This is done via kazoo's ChildrenWatch object. When a cluster's znode's child nodes are updated, a callback is fired and we update the cluster's `nodes` attribute based on the existing child znodes and fire a passed-in callback with no arguments once done. If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL` seconds before trying again as long as no ChildrenWatch exists for the given cluster yet and we are not in the process of shutting down. """ logger.debug("starting to watch cluster %s", cluster.name) wait_on_any(self.connected, self.shutdown) logger.debug("done waiting on (connected, shutdown)") znode_path = "/".join([self.base_path, cluster.name]) self.stop_events[znode_path] = threading.Event() def should_stop(): return ( znode_path not in self.stop_events or self.stop_events[znode_path].is_set() or self.shutdown.is_set() ) while not should_stop(): try: if self.client.exists(znode_path): break except exceptions.ConnectionClosedError: break wait_on_any( self.stop_events[znode_path], self.shutdown, timeout=NO_NODE_INTERVAL ) logger.debug("setting up ChildrenWatch for %s", znode_path) @self.client.ChildrenWatch(znode_path) def watch(children): if should_stop(): return False logger.debug("znode children changed! (%s)", znode_path) new_nodes = [] for child in children: child_path = "/".join([znode_path, child]) try: new_nodes.append( Node.deserialize(self.client.get(child_path)[0]) ) except ValueError: logger.exception("Invalid node at path '%s'", child) continue cluster.nodes = new_nodes callback()
python
def start_watching(self, cluster, callback): """ Initiates the "watching" of a cluster's associated znode. This is done via kazoo's ChildrenWatch object. When a cluster's znode's child nodes are updated, a callback is fired and we update the cluster's `nodes` attribute based on the existing child znodes and fire a passed-in callback with no arguments once done. If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL` seconds before trying again as long as no ChildrenWatch exists for the given cluster yet and we are not in the process of shutting down. """ logger.debug("starting to watch cluster %s", cluster.name) wait_on_any(self.connected, self.shutdown) logger.debug("done waiting on (connected, shutdown)") znode_path = "/".join([self.base_path, cluster.name]) self.stop_events[znode_path] = threading.Event() def should_stop(): return ( znode_path not in self.stop_events or self.stop_events[znode_path].is_set() or self.shutdown.is_set() ) while not should_stop(): try: if self.client.exists(znode_path): break except exceptions.ConnectionClosedError: break wait_on_any( self.stop_events[znode_path], self.shutdown, timeout=NO_NODE_INTERVAL ) logger.debug("setting up ChildrenWatch for %s", znode_path) @self.client.ChildrenWatch(znode_path) def watch(children): if should_stop(): return False logger.debug("znode children changed! (%s)", znode_path) new_nodes = [] for child in children: child_path = "/".join([znode_path, child]) try: new_nodes.append( Node.deserialize(self.client.get(child_path)[0]) ) except ValueError: logger.exception("Invalid node at path '%s'", child) continue cluster.nodes = new_nodes callback()
[ "def", "start_watching", "(", "self", ",", "cluster", ",", "callback", ")", ":", "logger", ".", "debug", "(", "\"starting to watch cluster %s\"", ",", "cluster", ".", "name", ")", "wait_on_any", "(", "self", ".", "connected", ",", "self", ".", "shutdown", ")...
Initiates the "watching" of a cluster's associated znode. This is done via kazoo's ChildrenWatch object. When a cluster's znode's child nodes are updated, a callback is fired and we update the cluster's `nodes` attribute based on the existing child znodes and fire a passed-in callback with no arguments once done. If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL` seconds before trying again as long as no ChildrenWatch exists for the given cluster yet and we are not in the process of shutting down.
[ "Initiates", "the", "watching", "of", "a", "cluster", "s", "associated", "znode", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L128-L189
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.stop_watching
def stop_watching(self, cluster): """ Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`. """ znode_path = "/".join([self.base_path, cluster.name]) if znode_path in self.stop_events: self.stop_events[znode_path].set()
python
def stop_watching(self, cluster): """ Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`. """ znode_path = "/".join([self.base_path, cluster.name]) if znode_path in self.stop_events: self.stop_events[znode_path].set()
[ "def", "stop_watching", "(", "self", ",", "cluster", ")", ":", "znode_path", "=", "\"/\"", ".", "join", "(", "[", "self", ".", "base_path", ",", "cluster", ".", "name", "]", ")", "if", "znode_path", "in", "self", ".", "stop_events", ":", "self", ".", ...
Causes the thread that launched the watch of the cluster path to end by setting the proper stop event found in `self.stop_events`.
[ "Causes", "the", "thread", "that", "launched", "the", "watch", "of", "the", "cluster", "path", "to", "end", "by", "setting", "the", "proper", "stop", "event", "found", "in", "self", ".", "stop_events", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L191-L198
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.report_up
def report_up(self, service, port): """ Report the given service's present node as up by creating/updating its respective znode in Zookeeper and setting the znode's data to the serialized representation of the node. Waits for zookeeper to be connected before taking any action. """ wait_on_any(self.connected, self.shutdown) node = Node.current(service, port) path = self.path_of(service, node) data = node.serialize().encode() znode = self.client.exists(path) if not znode: logger.debug("ZNode at %s does not exist, creating new one.", path) self.client.create(path, value=data, ephemeral=True, makepath=True) elif znode.owner_session_id != self.client.client_id[0]: logger.debug("ZNode at %s not owned by us, recreating.", path) txn = self.client.transaction() txn.delete(path) txn.create(path, value=data, ephemeral=True) txn.commit() else: logger.debug("Setting node value to %r", data) self.client.set(path, data)
python
def report_up(self, service, port): """ Report the given service's present node as up by creating/updating its respective znode in Zookeeper and setting the znode's data to the serialized representation of the node. Waits for zookeeper to be connected before taking any action. """ wait_on_any(self.connected, self.shutdown) node = Node.current(service, port) path = self.path_of(service, node) data = node.serialize().encode() znode = self.client.exists(path) if not znode: logger.debug("ZNode at %s does not exist, creating new one.", path) self.client.create(path, value=data, ephemeral=True, makepath=True) elif znode.owner_session_id != self.client.client_id[0]: logger.debug("ZNode at %s not owned by us, recreating.", path) txn = self.client.transaction() txn.delete(path) txn.create(path, value=data, ephemeral=True) txn.commit() else: logger.debug("Setting node value to %r", data) self.client.set(path, data)
[ "def", "report_up", "(", "self", ",", "service", ",", "port", ")", ":", "wait_on_any", "(", "self", ".", "connected", ",", "self", ".", "shutdown", ")", "node", "=", "Node", ".", "current", "(", "service", ",", "port", ")", "path", "=", "self", ".", ...
Report the given service's present node as up by creating/updating its respective znode in Zookeeper and setting the znode's data to the serialized representation of the node. Waits for zookeeper to be connected before taking any action.
[ "Report", "the", "given", "service", "s", "present", "node", "as", "up", "by", "creating", "/", "updating", "its", "respective", "znode", "in", "Zookeeper", "and", "setting", "the", "znode", "s", "data", "to", "the", "serialized", "representation", "of", "th...
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L200-L228
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.report_down
def report_down(self, service, port): """ Reports the given service's present node as down by deleting the node's znode in Zookeeper if the znode is present. Waits for the Zookeeper connection to be established before further action is taken. """ wait_on_any(self.connected, self.shutdown) node = Node.current(service, port) path = self.path_of(service, node) try: logger.debug("Deleting znode at %s", path) self.client.delete(path) except exceptions.NoNodeError: pass
python
def report_down(self, service, port): """ Reports the given service's present node as down by deleting the node's znode in Zookeeper if the znode is present. Waits for the Zookeeper connection to be established before further action is taken. """ wait_on_any(self.connected, self.shutdown) node = Node.current(service, port) path = self.path_of(service, node) try: logger.debug("Deleting znode at %s", path) self.client.delete(path) except exceptions.NoNodeError: pass
[ "def", "report_down", "(", "self", ",", "service", ",", "port", ")", ":", "wait_on_any", "(", "self", ".", "connected", ",", "self", ".", "shutdown", ")", "node", "=", "Node", ".", "current", "(", "service", ",", "port", ")", "path", "=", "self", "."...
Reports the given service's present node as down by deleting the node's znode in Zookeeper if the znode is present. Waits for the Zookeeper connection to be established before further action is taken.
[ "Reports", "the", "given", "service", "s", "present", "node", "as", "down", "by", "deleting", "the", "node", "s", "znode", "in", "Zookeeper", "if", "the", "znode", "is", "present", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L230-L247
wglass/lighthouse
lighthouse/zookeeper.py
ZookeeperDiscovery.path_of
def path_of(self, service, node): """ Helper method for determining the Zookeeper path for a given cluster member node. """ return "/".join([self.base_path, service.name, node.name])
python
def path_of(self, service, node): """ Helper method for determining the Zookeeper path for a given cluster member node. """ return "/".join([self.base_path, service.name, node.name])
[ "def", "path_of", "(", "self", ",", "service", ",", "node", ")", ":", "return", "\"/\"", ".", "join", "(", "[", "self", ".", "base_path", ",", "service", ".", "name", ",", "node", ".", "name", "]", ")" ]
Helper method for determining the Zookeeper path for a given cluster member node.
[ "Helper", "method", "for", "determining", "the", "Zookeeper", "path", "for", "a", "given", "cluster", "member", "node", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/zookeeper.py#L249-L254
django-de/django-simple-ratings
ratings/templatetags/ratings_tags.py
rating_score
def rating_score(obj, user): """ Returns the score a user has given an object """ if not user.is_authenticated() or not hasattr(obj, '_ratings_field'): return False ratings_descriptor = getattr(obj, obj._ratings_field) try: rating = ratings_descriptor.get(user=user).score except ratings_descriptor.model.DoesNotExist: rating = None return rating
python
def rating_score(obj, user): """ Returns the score a user has given an object """ if not user.is_authenticated() or not hasattr(obj, '_ratings_field'): return False ratings_descriptor = getattr(obj, obj._ratings_field) try: rating = ratings_descriptor.get(user=user).score except ratings_descriptor.model.DoesNotExist: rating = None return rating
[ "def", "rating_score", "(", "obj", ",", "user", ")", ":", "if", "not", "user", ".", "is_authenticated", "(", ")", "or", "not", "hasattr", "(", "obj", ",", "'_ratings_field'", ")", ":", "return", "False", "ratings_descriptor", "=", "getattr", "(", "obj", ...
Returns the score a user has given an object
[ "Returns", "the", "score", "a", "user", "has", "given", "an", "object" ]
train
https://github.com/django-de/django-simple-ratings/blob/f876f1284943b4913d865757f5d46b4515e26683/ratings/templatetags/ratings_tags.py#L9-L22
django-de/django-simple-ratings
ratings/templatetags/ratings_tags.py
rate_url
def rate_url(obj, score=1): """ Generates a link to "rate" the given object with the provided score - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_rate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, score, ))
python
def rate_url(obj, score=1): """ Generates a link to "rate" the given object with the provided score - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_rate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, score, ))
[ "def", "rate_url", "(", "obj", ",", "score", "=", "1", ")", ":", "return", "reverse", "(", "'ratings_rate_object'", ",", "args", "=", "(", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", ".", "pk", ",", "obj", ".", "pk", ",", "s...
Generates a link to "rate" the given object with the provided score - this can be used as a form target or for POSTing via Ajax.
[ "Generates", "a", "link", "to", "rate", "the", "given", "object", "with", "the", "provided", "score", "-", "this", "can", "be", "used", "as", "a", "form", "target", "or", "for", "POSTing", "via", "Ajax", "." ]
train
https://github.com/django-de/django-simple-ratings/blob/f876f1284943b4913d865757f5d46b4515e26683/ratings/templatetags/ratings_tags.py#L34-L43
django-de/django-simple-ratings
ratings/templatetags/ratings_tags.py
unrate_url
def unrate_url(obj): """ Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_unrate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, ))
python
def unrate_url(obj): """ Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_unrate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, ))
[ "def", "unrate_url", "(", "obj", ")", ":", "return", "reverse", "(", "'ratings_unrate_object'", ",", "args", "=", "(", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", ".", "pk", ",", "obj", ".", "pk", ",", ")", ")" ]
Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax.
[ "Generates", "a", "link", "to", "un", "-", "rate", "the", "given", "object", "-", "this", "can", "be", "used", "as", "a", "form", "target", "or", "for", "POSTing", "via", "Ajax", "." ]
train
https://github.com/django-de/django-simple-ratings/blob/f876f1284943b4913d865757f5d46b4515e26683/ratings/templatetags/ratings_tags.py#L47-L55
darkfeline/animanager
animanager/commands/reset.py
command
def command(state, args): """Reset anime watched episodes.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') query.update.reset(state.db, aid, args.episode)
python
def command(state, args): """Reset anime watched episodes.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') query.update.reset(state.db, aid, args.episode)
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "aid", "=", "state", ".", "results", ".", "parse_aid", "(", "args", ".", "aid", ",", "default_key", "=", "'db'", ...
Reset anime watched episodes.
[ "Reset", "anime", "watched", "episodes", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/reset.py#L22-L26
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_service.py
cancel_job
def cancel_job(agent, project_name, job_id): """ cancel a job. If the job is pending, it will be removed. If the job is running, it will be terminated. """ prevstate = agent.cancel(project_name, job_id)['prevstate'] if prevstate == 'pending': sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,))
python
def cancel_job(agent, project_name, job_id): """ cancel a job. If the job is pending, it will be removed. If the job is running, it will be terminated. """ prevstate = agent.cancel(project_name, job_id)['prevstate'] if prevstate == 'pending': sqllite_agent.execute(ScrapydJobExtInfoSQLSet.DELETE_BY_ID, (job_id,))
[ "def", "cancel_job", "(", "agent", ",", "project_name", ",", "job_id", ")", ":", "prevstate", "=", "agent", ".", "cancel", "(", "project_name", ",", "job_id", ")", "[", "'prevstate'", "]", "if", "prevstate", "==", "'pending'", ":", "sqllite_agent", ".", "e...
cancel a job. If the job is pending, it will be removed. If the job is running, it will be terminated.
[ "cancel", "a", "job", ".", "If", "the", "job", "is", "pending", "it", "will", "be", "removed", ".", "If", "the", "job", "is", "running", "it", "will", "be", "terminated", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_service.py#L56-L63
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_service.py
packing_job_ext_info
def packing_job_ext_info(job_lsit_DO): """ Packing additional information of the job into the job_list_DO(JobListDO) """ ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,)) if ext_info is None or len(ext_info) <= 0: return ext_info = ext_info[0] job_lsit_DO.args = ext_info[1] job_lsit_DO.priority = ext_info[2] job_lsit_DO.creation_time = ext_info[3] job_lsit_DO.logs_name = str_to_list(ext_info[4], ',') job_lsit_DO.logs_url = str_to_list(ext_info[5], ',')
python
def packing_job_ext_info(job_lsit_DO): """ Packing additional information of the job into the job_list_DO(JobListDO) """ ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,)) if ext_info is None or len(ext_info) <= 0: return ext_info = ext_info[0] job_lsit_DO.args = ext_info[1] job_lsit_DO.priority = ext_info[2] job_lsit_DO.creation_time = ext_info[3] job_lsit_DO.logs_name = str_to_list(ext_info[4], ',') job_lsit_DO.logs_url = str_to_list(ext_info[5], ',')
[ "def", "packing_job_ext_info", "(", "job_lsit_DO", ")", ":", "ext_info", "=", "sqllite_agent", ".", "execute", "(", "ScrapydJobExtInfoSQLSet", ".", "SELECT_BY_ID", ",", "(", "job_lsit_DO", ".", "job_id", ",", ")", ")", "if", "ext_info", "is", "None", "or", "le...
Packing additional information of the job into the job_list_DO(JobListDO)
[ "Packing", "additional", "information", "of", "the", "job", "into", "the", "job_list_DO", "(", "JobListDO", ")" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_service.py#L66-L77
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_service.py
get_all_job_list
def get_all_job_list(agent): """ Get all job list by each project name then return three job list on the base of different status(pending,running,finished). """ project_list = agent.get_project_list() if project_list['status'] == 'error': raise ScrapydTimeoutException project_list = project_list['projects'] pending_job_list = [] running_job_list = [] finished_job_list = [] for project_name in project_list: job_list = agent.get_job_list(project_name) # Extract latest version project_version = agent.get_version_list(project_name)['versions'][-1:] for pending_job in job_list['pending']: pending_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=pending_job['id'], spider_name=pending_job['spider'], job_status=JobStatus.PENDING )) for running_job in job_list['running']: running_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=running_job['id'], spider_name=running_job['spider'], start_time=running_job['start_time'], job_status=JobStatus.RUNNING )) for finished_job in job_list['finished']: finished_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=finished_job['id'], spider_name=finished_job['spider'], start_time=finished_job['start_time'], end_time=finished_job['end_time'], job_status=JobStatus.FINISHED )) return pending_job_list, running_job_list, finished_job_list
python
def get_all_job_list(agent): """ Get all job list by each project name then return three job list on the base of different status(pending,running,finished). """ project_list = agent.get_project_list() if project_list['status'] == 'error': raise ScrapydTimeoutException project_list = project_list['projects'] pending_job_list = [] running_job_list = [] finished_job_list = [] for project_name in project_list: job_list = agent.get_job_list(project_name) # Extract latest version project_version = agent.get_version_list(project_name)['versions'][-1:] for pending_job in job_list['pending']: pending_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=pending_job['id'], spider_name=pending_job['spider'], job_status=JobStatus.PENDING )) for running_job in job_list['running']: running_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=running_job['id'], spider_name=running_job['spider'], start_time=running_job['start_time'], job_status=JobStatus.RUNNING )) for finished_job in job_list['finished']: finished_job_list.append(JobListDO(project_name=project_name, project_version=project_version, job_id=finished_job['id'], spider_name=finished_job['spider'], start_time=finished_job['start_time'], end_time=finished_job['end_time'], job_status=JobStatus.FINISHED )) return pending_job_list, running_job_list, finished_job_list
[ "def", "get_all_job_list", "(", "agent", ")", ":", "project_list", "=", "agent", ".", "get_project_list", "(", ")", "if", "project_list", "[", "'status'", "]", "==", "'error'", ":", "raise", "ScrapydTimeoutException", "project_list", "=", "project_list", "[", "'...
Get all job list by each project name then return three job list on the base of different status(pending,running,finished).
[ "Get", "all", "job", "list", "by", "each", "project", "name", "then", "return", "three", "job", "list", "on", "the", "base", "of", "different", "status", "(", "pending", "running", "finished", ")", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_service.py#L121-L162
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_service.py
get_job_amounts
def get_job_amounts(agent, project_name, spider_name=None): """ Get amounts that pending job amount, running job amount, finished job amount. """ job_list = agent.get_job_list(project_name) pending_job_list = job_list['pending'] running_job_list = job_list['running'] finished_job_list = job_list['finished'] job_amounts = {} if spider_name is None: job_amounts['pending'] = len(pending_job_list) job_amounts['running'] = len(running_job_list) job_amounts['finished'] = len(finished_job_list) else: job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name]) job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name]) job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name]) return job_amounts
python
def get_job_amounts(agent, project_name, spider_name=None): """ Get amounts that pending job amount, running job amount, finished job amount. """ job_list = agent.get_job_list(project_name) pending_job_list = job_list['pending'] running_job_list = job_list['running'] finished_job_list = job_list['finished'] job_amounts = {} if spider_name is None: job_amounts['pending'] = len(pending_job_list) job_amounts['running'] = len(running_job_list) job_amounts['finished'] = len(finished_job_list) else: job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name]) job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name]) job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name]) return job_amounts
[ "def", "get_job_amounts", "(", "agent", ",", "project_name", ",", "spider_name", "=", "None", ")", ":", "job_list", "=", "agent", ".", "get_job_list", "(", "project_name", ")", "pending_job_list", "=", "job_list", "[", "'pending'", "]", "running_job_list", "=", ...
Get amounts that pending job amount, running job amount, finished job amount.
[ "Get", "amounts", "that", "pending", "job", "amount", "running", "job", "amount", "finished", "job", "amount", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_service.py#L212-L230
gbiggs/rtctree
rtctree/directory.py
corba_name_to_string
def corba_name_to_string(name): '''Convert a CORBA CosNaming.Name to a string.''' parts = [] if type(name) is not list and type(name) is not tuple: raise NotCORBANameError(name) if len(name) == 0: raise NotCORBANameError(name) for nc in name: if not nc.kind: parts.append(nc.id) else: parts.append('{0}.{1}'.format(nc.id, nc.kind)) return '/'.join(parts)
python
def corba_name_to_string(name): '''Convert a CORBA CosNaming.Name to a string.''' parts = [] if type(name) is not list and type(name) is not tuple: raise NotCORBANameError(name) if len(name) == 0: raise NotCORBANameError(name) for nc in name: if not nc.kind: parts.append(nc.id) else: parts.append('{0}.{1}'.format(nc.id, nc.kind)) return '/'.join(parts)
[ "def", "corba_name_to_string", "(", "name", ")", ":", "parts", "=", "[", "]", "if", "type", "(", "name", ")", "is", "not", "list", "and", "type", "(", "name", ")", "is", "not", "tuple", ":", "raise", "NotCORBANameError", "(", "name", ")", "if", "len"...
Convert a CORBA CosNaming.Name to a string.
[ "Convert", "a", "CORBA", "CosNaming", ".", "Name", "to", "a", "string", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/directory.py#L195-L208
gbiggs/rtctree
rtctree/directory.py
Directory.reparse
def reparse(self): '''Reparse all children of this directory. This effectively rebuilds the tree below this node. This operation takes an unbounded time to complete; if there are a lot of objects registered below this directory's context, they will all need to be parsed. ''' self._remove_all_children() self._parse_context(self._context, self.orb)
python
def reparse(self): '''Reparse all children of this directory. This effectively rebuilds the tree below this node. This operation takes an unbounded time to complete; if there are a lot of objects registered below this directory's context, they will all need to be parsed. ''' self._remove_all_children() self._parse_context(self._context, self.orb)
[ "def", "reparse", "(", "self", ")", ":", "self", ".", "_remove_all_children", "(", ")", "self", ".", "_parse_context", "(", "self", ".", "_context", ",", "self", ".", "orb", ")" ]
Reparse all children of this directory. This effectively rebuilds the tree below this node. This operation takes an unbounded time to complete; if there are a lot of objects registered below this directory's context, they will all need to be parsed.
[ "Reparse", "all", "children", "of", "this", "directory", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/directory.py#L57-L68
gbiggs/rtctree
rtctree/directory.py
Directory.unbind
def unbind(self, name): '''Unbind an object from the context represented by this directory. Warning: this is a dangerous operation. You may unlink an entire section of the tree and be unable to recover it. Be careful what you unbind. The name should be in the format used in paths. For example, 'manager.mgr' or 'ConsoleIn0.rtc'. ''' with self._mutex: id, sep, kind = name.rpartition('.') if not id: id = kind kind = '' name = CosNaming.NameComponent(id=str(id), kind=str(kind)) try: self.context.unbind([name]) except CosNaming.NamingContext.NotFound: raise exceptions.BadPathError(name)
python
def unbind(self, name): '''Unbind an object from the context represented by this directory. Warning: this is a dangerous operation. You may unlink an entire section of the tree and be unable to recover it. Be careful what you unbind. The name should be in the format used in paths. For example, 'manager.mgr' or 'ConsoleIn0.rtc'. ''' with self._mutex: id, sep, kind = name.rpartition('.') if not id: id = kind kind = '' name = CosNaming.NameComponent(id=str(id), kind=str(kind)) try: self.context.unbind([name]) except CosNaming.NamingContext.NotFound: raise exceptions.BadPathError(name)
[ "def", "unbind", "(", "self", ",", "name", ")", ":", "with", "self", ".", "_mutex", ":", "id", ",", "sep", ",", "kind", "=", "name", ".", "rpartition", "(", "'.'", ")", "if", "not", "id", ":", "id", "=", "kind", "kind", "=", "''", "name", "=", ...
Unbind an object from the context represented by this directory. Warning: this is a dangerous operation. You may unlink an entire section of the tree and be unable to recover it. Be careful what you unbind. The name should be in the format used in paths. For example, 'manager.mgr' or 'ConsoleIn0.rtc'.
[ "Unbind", "an", "object", "from", "the", "context", "represented", "by", "this", "directory", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/directory.py#L70-L90
frnsys/broca
broca/tokenize/lemma.py
LemmaTokenizer.tokenize
def tokenize(self, docs): """ Tokenizes a document, using a lemmatizer. Args: | doc (str) -- the text document to process. Returns: | list -- the list of tokens. """ if self.n_jobs == 1: return [self._tokenize(doc) for doc in docs] else: return parallel(self._tokenize, docs, self.n_jobs)
python
def tokenize(self, docs): """ Tokenizes a document, using a lemmatizer. Args: | doc (str) -- the text document to process. Returns: | list -- the list of tokens. """ if self.n_jobs == 1: return [self._tokenize(doc) for doc in docs] else: return parallel(self._tokenize, docs, self.n_jobs)
[ "def", "tokenize", "(", "self", ",", "docs", ")", ":", "if", "self", ".", "n_jobs", "==", "1", ":", "return", "[", "self", ".", "_tokenize", "(", "doc", ")", "for", "doc", "in", "docs", "]", "else", ":", "return", "parallel", "(", "self", ".", "_...
Tokenizes a document, using a lemmatizer. Args: | doc (str) -- the text document to process. Returns: | list -- the list of tokens.
[ "Tokenizes", "a", "document", "using", "a", "lemmatizer", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/lemma.py#L17-L29
ltalirz/aiida-gudhi
examples/cli.py
main
def main(codelabel, submit): """Command line interface for testing and submitting calculations. This script extends submit.py, adding flexibility in the selected code/computer. Run './cli.py --help' to see options. """ code = Code.get_from_string(codelabel) # set up calculation calc = code.new_calc() calc.label = "compute rips from distance matrix" calc.set_max_wallclock_seconds(1 * 60) calc.set_withmpi(False) calc.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1}) # Prepare input parameters from aiida.orm import DataFactory Parameters = DataFactory('gudhi.rdm') parameters = Parameters(dict={'max-edge-length': 4.2}) calc.use_parameters(parameters) SinglefileData = DataFactory('singlefile') distance_matrix = SinglefileData( file=os.path.join(gt.TEST_DIR, 'sample_distance.matrix')) calc.use_distance_matrix(distance_matrix) if submit: calc.store_all() calc.submit() print("submitted calculation; calc=Calculation(uuid='{}') # ID={}"\ .format(calc.uuid,calc.dbnode.pk)) else: subfolder, script_filename = calc.submit_test() path = os.path.relpath(subfolder.abspath) print("submission test successful") print("Find remote folder in {}".format(path)) print("In order to actually submit, add '--submit'")
python
def main(codelabel, submit): """Command line interface for testing and submitting calculations. This script extends submit.py, adding flexibility in the selected code/computer. Run './cli.py --help' to see options. """ code = Code.get_from_string(codelabel) # set up calculation calc = code.new_calc() calc.label = "compute rips from distance matrix" calc.set_max_wallclock_seconds(1 * 60) calc.set_withmpi(False) calc.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1}) # Prepare input parameters from aiida.orm import DataFactory Parameters = DataFactory('gudhi.rdm') parameters = Parameters(dict={'max-edge-length': 4.2}) calc.use_parameters(parameters) SinglefileData = DataFactory('singlefile') distance_matrix = SinglefileData( file=os.path.join(gt.TEST_DIR, 'sample_distance.matrix')) calc.use_distance_matrix(distance_matrix) if submit: calc.store_all() calc.submit() print("submitted calculation; calc=Calculation(uuid='{}') # ID={}"\ .format(calc.uuid,calc.dbnode.pk)) else: subfolder, script_filename = calc.submit_test() path = os.path.relpath(subfolder.abspath) print("submission test successful") print("Find remote folder in {}".format(path)) print("In order to actually submit, add '--submit'")
[ "def", "main", "(", "codelabel", ",", "submit", ")", ":", "code", "=", "Code", ".", "get_from_string", "(", "codelabel", ")", "# set up calculation", "calc", "=", "code", ".", "new_calc", "(", ")", "calc", ".", "label", "=", "\"compute rips from distance matri...
Command line interface for testing and submitting calculations. This script extends submit.py, adding flexibility in the selected code/computer. Run './cli.py --help' to see options.
[ "Command", "line", "interface", "for", "testing", "and", "submitting", "calculations", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/examples/cli.py#L10-L47
darkfeline/animanager
animanager/commands/register.py
command
def command(state, args): """Register watching regexp for an anime.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') if args.query: # Use regexp provided by user. regexp = '.*'.join(args.query) else: # Make default regexp. title = query.select.lookup(state.db, aid, fields=['title']).title # Replace non-word, non-whitespace with whitespace. regexp = re.sub(r'[^\w\s]', ' ', title) # Split on whitespace and join with wildcard regexp. regexp = '.*?'.join(re.escape(x) for x in regexp.split()) # Append episode matching regexp. regexp = '.*?'.join(( regexp, r'\b(?P<ep>[0-9]+)(v[0-9]+)?', )) query.files.set_regexp(state.db, aid, regexp)
python
def command(state, args): """Register watching regexp for an anime.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') if args.query: # Use regexp provided by user. regexp = '.*'.join(args.query) else: # Make default regexp. title = query.select.lookup(state.db, aid, fields=['title']).title # Replace non-word, non-whitespace with whitespace. regexp = re.sub(r'[^\w\s]', ' ', title) # Split on whitespace and join with wildcard regexp. regexp = '.*?'.join(re.escape(x) for x in regexp.split()) # Append episode matching regexp. regexp = '.*?'.join(( regexp, r'\b(?P<ep>[0-9]+)(v[0-9]+)?', )) query.files.set_regexp(state.db, aid, regexp)
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "aid", "=", "state", ".", "results", ".", "parse_aid", "(", "args", ".", "aid", ",", "default_key", "=", "'db'", ...
Register watching regexp for an anime.
[ "Register", "watching", "regexp", "for", "an", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/register.py#L25-L44
alvarogzp/python-sqlite-framework
sqlite_framework/sql/statement/builder/alter_table.py
AlterTable.from_definition
def from_definition(self, table: Table, version: int): """Add all columns from the table added in the specified version""" self.table(table) self.add_columns(*table.columns.get_with_version(version)) return self
python
def from_definition(self, table: Table, version: int): """Add all columns from the table added in the specified version""" self.table(table) self.add_columns(*table.columns.get_with_version(version)) return self
[ "def", "from_definition", "(", "self", ",", "table", ":", "Table", ",", "version", ":", "int", ")", ":", "self", ".", "table", "(", "table", ")", "self", ".", "add_columns", "(", "*", "table", ".", "columns", ".", "get_with_version", "(", "version", ")...
Add all columns from the table added in the specified version
[ "Add", "all", "columns", "from", "the", "table", "added", "in", "the", "specified", "version" ]
train
https://github.com/alvarogzp/python-sqlite-framework/blob/29db97a64f95cfe13eb7bae1d00b624b5a37b152/sqlite_framework/sql/statement/builder/alter_table.py#L21-L25
knowmalware/camcrypt
camcrypt/__init__.py
CamCrypt.keygen
def keygen(self, keyBitLength, rawKey): """ This must be called on the object before any encryption or decryption can take place. Provide it the key bit length, which must be 128, 192, or 256, and the key, which may be a sequence of bytes or a simple string. Does not return any value. Raises an exception if the arguments are not sane. """ if keyBitLength not in ACCEPTABLE_KEY_LENGTHS: raise Exception("keyBitLength must be 128, 192, or 256") self.bitlen = keyBitLength if len(rawKey) <= 0 or len(rawKey) > self.bitlen/8: raise Exception("rawKey must be less than or equal to keyBitLength/8 (%d) characters long" % (self.bitlen/8)) rawKey = zero_pad(rawKey, self.bitlen/8) keytable = ctypes.create_string_buffer(TABLE_BYTE_LEN) self.ekeygen(self.bitlen, rawKey, keytable) self.keytable = keytable self.initialized = True
python
def keygen(self, keyBitLength, rawKey): """ This must be called on the object before any encryption or decryption can take place. Provide it the key bit length, which must be 128, 192, or 256, and the key, which may be a sequence of bytes or a simple string. Does not return any value. Raises an exception if the arguments are not sane. """ if keyBitLength not in ACCEPTABLE_KEY_LENGTHS: raise Exception("keyBitLength must be 128, 192, or 256") self.bitlen = keyBitLength if len(rawKey) <= 0 or len(rawKey) > self.bitlen/8: raise Exception("rawKey must be less than or equal to keyBitLength/8 (%d) characters long" % (self.bitlen/8)) rawKey = zero_pad(rawKey, self.bitlen/8) keytable = ctypes.create_string_buffer(TABLE_BYTE_LEN) self.ekeygen(self.bitlen, rawKey, keytable) self.keytable = keytable self.initialized = True
[ "def", "keygen", "(", "self", ",", "keyBitLength", ",", "rawKey", ")", ":", "if", "keyBitLength", "not", "in", "ACCEPTABLE_KEY_LENGTHS", ":", "raise", "Exception", "(", "\"keyBitLength must be 128, 192, or 256\"", ")", "self", ".", "bitlen", "=", "keyBitLength", "...
This must be called on the object before any encryption or decryption can take place. Provide it the key bit length, which must be 128, 192, or 256, and the key, which may be a sequence of bytes or a simple string. Does not return any value. Raises an exception if the arguments are not sane.
[ "This", "must", "be", "called", "on", "the", "object", "before", "any", "encryption", "or", "decryption", "can", "take", "place", ".", "Provide", "it", "the", "key", "bit", "length", "which", "must", "be", "128", "192", "or", "256", "and", "the", "key", ...
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L54-L71
knowmalware/camcrypt
camcrypt/__init__.py
CamCrypt.encrypt
def encrypt(self, plainText): """Encrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `plainText`. code that assumed this should still work fine, but can optionally be modified to call `encrypt_block` instead. Args: plainText (str): data to encrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. Returns: encrypted data. Note that this will always be a multiple of 16 bytes long. """ encryptedResult = '' for index in range(0, len(plainText), BLOCK_SIZE): block = plainText[index:index + BLOCK_SIZE] # Pad to required length if needed if len(block) < BLOCK_SIZE: block = zero_pad(block, BLOCK_SIZE) encryptedResult += self.encrypt_block(block) return encryptedResult
python
def encrypt(self, plainText): """Encrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `plainText`. code that assumed this should still work fine, but can optionally be modified to call `encrypt_block` instead. Args: plainText (str): data to encrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. Returns: encrypted data. Note that this will always be a multiple of 16 bytes long. """ encryptedResult = '' for index in range(0, len(plainText), BLOCK_SIZE): block = plainText[index:index + BLOCK_SIZE] # Pad to required length if needed if len(block) < BLOCK_SIZE: block = zero_pad(block, BLOCK_SIZE) encryptedResult += self.encrypt_block(block) return encryptedResult
[ "def", "encrypt", "(", "self", ",", "plainText", ")", ":", "encryptedResult", "=", "''", "for", "index", "in", "range", "(", "0", ",", "len", "(", "plainText", ")", ",", "BLOCK_SIZE", ")", ":", "block", "=", "plainText", "[", "index", ":", "index", "...
Encrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `plainText`. code that assumed this should still work fine, but can optionally be modified to call `encrypt_block` instead. Args: plainText (str): data to encrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. Returns: encrypted data. Note that this will always be a multiple of 16 bytes long.
[ "Encrypt", "an", "arbitrary", "-", "length", "block", "of", "data", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L73-L95
knowmalware/camcrypt
camcrypt/__init__.py
CamCrypt.decrypt
def decrypt(self, cipherText): """Decrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `cipherText`. code that assumed this should still work fine, but can optionally be modified to call `decrypt_block` instead. Args: cipherText (str): data to decrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. WARNING: This is almost certainty never need to happen for correctly-encrypted data. Returns: decrypted data. Note that this will always be a multiple of 16 bytes long. If the original data was not a multiple of 16 bytes, the result will contain trailing null bytes, which can be removed with `.rstrip('\x00')` """ decryptedResult = '' for index in range(0, len(cipherText), BLOCK_SIZE): block = cipherText[index:index + BLOCK_SIZE] # Pad to required length if needed if len(block) < BLOCK_SIZE: block = zero_pad(block, BLOCK_SIZE) decryptedResult += self.decrypt_block(block) return decryptedResult
python
def decrypt(self, cipherText): """Decrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `cipherText`. code that assumed this should still work fine, but can optionally be modified to call `decrypt_block` instead. Args: cipherText (str): data to decrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. WARNING: This is almost certainty never need to happen for correctly-encrypted data. Returns: decrypted data. Note that this will always be a multiple of 16 bytes long. If the original data was not a multiple of 16 bytes, the result will contain trailing null bytes, which can be removed with `.rstrip('\x00')` """ decryptedResult = '' for index in range(0, len(cipherText), BLOCK_SIZE): block = cipherText[index:index + BLOCK_SIZE] # Pad to required length if needed if len(block) < BLOCK_SIZE: block = zero_pad(block, BLOCK_SIZE) decryptedResult += self.decrypt_block(block) return decryptedResult
[ "def", "decrypt", "(", "self", ",", "cipherText", ")", ":", "decryptedResult", "=", "''", "for", "index", "in", "range", "(", "0", ",", "len", "(", "cipherText", ")", ",", "BLOCK_SIZE", ")", ":", "block", "=", "cipherText", "[", "index", ":", "index", ...
Decrypt an arbitrary-length block of data. NOTE: This function formerly worked only on 16-byte blocks of `cipherText`. code that assumed this should still work fine, but can optionally be modified to call `decrypt_block` instead. Args: cipherText (str): data to decrypt. If the data is not a multiple of 16 bytes long, it will be padded with null (0x00) bytes until it is. WARNING: This is almost certainty never need to happen for correctly-encrypted data. Returns: decrypted data. Note that this will always be a multiple of 16 bytes long. If the original data was not a multiple of 16 bytes, the result will contain trailing null bytes, which can be removed with `.rstrip('\x00')`
[ "Decrypt", "an", "arbitrary", "-", "length", "block", "of", "data", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L97-L123