repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
jbloomlab/phydms
phydmslib/simulate.py
simulateAlignment
def simulateAlignment(model, treeFile, alignmentPrefix, randomSeed=False): """ Simulate an alignment given a model and tree (units = subs/site). Simulations done using `pyvolve`. Args: `model` (`phydmslib.models.Models` object) The model used for the simulations. Only models that can be passed to `pyvolve.Partitions` are supported. `treeFile` (str) Name of newick file used to simulate the sequences. The branch lengths should be in substitutions per site, which is the default units for all `phydms` outputs. `alignmentPrefix` Prefix for the files created by `pyvolve`. The result of this function is a simulated FASTA alignment file with the name having the prefix giving by `alignmentPrefix` and the suffix `'_simulatedalignment.fasta'`. """ if randomSeed == False: pass else: random.seed(randomSeed) #Transform the branch lengths by dividing by the model `branchScale` tree = Bio.Phylo.read(treeFile, 'newick') for node in tree.get_terminals() + tree.get_nonterminals(): if (node.branch_length == None) and (node == tree.root): node.branch_length = 1e-06 else: node.branch_length /= model.branchScale fd, temp_path = mkstemp() Bio.Phylo.write(tree, temp_path, 'newick') os.close(fd) pyvolve_tree = pyvolve.read_tree(file=temp_path) os.remove(temp_path) #Make the `pyvolve` partition partitions = pyvolvePartitions(model) #Simulate the alignment alignment = '{0}_simulatedalignment.fasta'.format(alignmentPrefix) info = '_temp_{0}info.txt'.format(alignmentPrefix) rates = '_temp_{0}_ratefile.txt'.format(alignmentPrefix) evolver = pyvolve.Evolver(partitions=partitions, tree=pyvolve_tree) evolver(seqfile=alignment, infofile=info, ratefile=rates) for f in [rates,info, "custom_matrix_frequencies.txt"]: if os.path.isfile(f): os.remove(f) assert os.path.isfile(alignment)
python
def simulateAlignment(model, treeFile, alignmentPrefix, randomSeed=False): """ Simulate an alignment given a model and tree (units = subs/site). Simulations done using `pyvolve`. Args: `model` (`phydmslib.models.Models` object) The model used for the simulations. Only models that can be passed to `pyvolve.Partitions` are supported. `treeFile` (str) Name of newick file used to simulate the sequences. The branch lengths should be in substitutions per site, which is the default units for all `phydms` outputs. `alignmentPrefix` Prefix for the files created by `pyvolve`. The result of this function is a simulated FASTA alignment file with the name having the prefix giving by `alignmentPrefix` and the suffix `'_simulatedalignment.fasta'`. """ if randomSeed == False: pass else: random.seed(randomSeed) #Transform the branch lengths by dividing by the model `branchScale` tree = Bio.Phylo.read(treeFile, 'newick') for node in tree.get_terminals() + tree.get_nonterminals(): if (node.branch_length == None) and (node == tree.root): node.branch_length = 1e-06 else: node.branch_length /= model.branchScale fd, temp_path = mkstemp() Bio.Phylo.write(tree, temp_path, 'newick') os.close(fd) pyvolve_tree = pyvolve.read_tree(file=temp_path) os.remove(temp_path) #Make the `pyvolve` partition partitions = pyvolvePartitions(model) #Simulate the alignment alignment = '{0}_simulatedalignment.fasta'.format(alignmentPrefix) info = '_temp_{0}info.txt'.format(alignmentPrefix) rates = '_temp_{0}_ratefile.txt'.format(alignmentPrefix) evolver = pyvolve.Evolver(partitions=partitions, tree=pyvolve_tree) evolver(seqfile=alignment, infofile=info, ratefile=rates) for f in [rates,info, "custom_matrix_frequencies.txt"]: if os.path.isfile(f): os.remove(f) assert os.path.isfile(alignment)
[ "def", "simulateAlignment", "(", "model", ",", "treeFile", ",", "alignmentPrefix", ",", "randomSeed", "=", "False", ")", ":", "if", "randomSeed", "==", "False", ":", "pass", "else", ":", "random", ".", "seed", "(", "randomSeed", ")", "#Transform the branch len...
Simulate an alignment given a model and tree (units = subs/site). Simulations done using `pyvolve`. Args: `model` (`phydmslib.models.Models` object) The model used for the simulations. Only models that can be passed to `pyvolve.Partitions` are supported. `treeFile` (str) Name of newick file used to simulate the sequences. The branch lengths should be in substitutions per site, which is the default units for all `phydms` outputs. `alignmentPrefix` Prefix for the files created by `pyvolve`. The result of this function is a simulated FASTA alignment file with the name having the prefix giving by `alignmentPrefix` and the suffix `'_simulatedalignment.fasta'`.
[ "Simulate", "an", "alignment", "given", "a", "model", "and", "tree", "(", "units", "=", "subs", "/", "site", ")", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/simulate.py#L97-L150
wangsix/vmo
vmo/VMO/oracle.py
_create_oracle
def _create_oracle(oracle_type, **kwargs): """A routine for creating a factor oracle.""" if oracle_type == 'f': return FO(**kwargs) elif oracle_type == 'a': return MO(**kwargs) else: return MO(**kwargs)
python
def _create_oracle(oracle_type, **kwargs): """A routine for creating a factor oracle.""" if oracle_type == 'f': return FO(**kwargs) elif oracle_type == 'a': return MO(**kwargs) else: return MO(**kwargs)
[ "def", "_create_oracle", "(", "oracle_type", ",", "*", "*", "kwargs", ")", ":", "if", "oracle_type", "==", "'f'", ":", "return", "FO", "(", "*", "*", "kwargs", ")", "elif", "oracle_type", "==", "'a'", ":", "return", "MO", "(", "*", "*", "kwargs", ")"...
A routine for creating a factor oracle.
[ "A", "routine", "for", "creating", "a", "factor", "oracle", "." ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/VMO/oracle.py#L656-L663
wangsix/vmo
vmo/VMO/oracle.py
FactorOracle.segment
def segment(self): """An non-overlap version Compror""" if not self.seg: j = 0 else: j = self.seg[-1][1] last_len = self.seg[-1][0] if last_len + j > self.n_states: return i = j while j < self.n_states - 1: while not (not (i < self.n_states - 1) or not (self.lrs[i + 1] >= i - j + 1)): i += 1 if i == j: i += 1 self.seg.append((0, i)) else: if (self.sfx[i] + self.lrs[i]) <= i: self.seg.append((i - j, self.sfx[i] - i + j + 1)) else: _i = j + i - self.sfx[i] self.seg.append((_i - j, self.sfx[i] - i + j + 1)) _j = _i while not (not (_i < i) or not (self.lrs[_i + 1] - self.lrs[_j] >= _i - _j + 1)): _i += 1 if _i == _j: _i += 1 self.seg.append((0, _i)) else: self.seg.append((_i - _j, self.sfx[_i] - _i + _j + 1)) j = i return self.seg
python
def segment(self): """An non-overlap version Compror""" if not self.seg: j = 0 else: j = self.seg[-1][1] last_len = self.seg[-1][0] if last_len + j > self.n_states: return i = j while j < self.n_states - 1: while not (not (i < self.n_states - 1) or not (self.lrs[i + 1] >= i - j + 1)): i += 1 if i == j: i += 1 self.seg.append((0, i)) else: if (self.sfx[i] + self.lrs[i]) <= i: self.seg.append((i - j, self.sfx[i] - i + j + 1)) else: _i = j + i - self.sfx[i] self.seg.append((_i - j, self.sfx[i] - i + j + 1)) _j = _i while not (not (_i < i) or not (self.lrs[_i + 1] - self.lrs[_j] >= _i - _j + 1)): _i += 1 if _i == _j: _i += 1 self.seg.append((0, _i)) else: self.seg.append((_i - _j, self.sfx[_i] - _i + _j + 1)) j = i return self.seg
[ "def", "segment", "(", "self", ")", ":", "if", "not", "self", ".", "seg", ":", "j", "=", "0", "else", ":", "j", "=", "self", ".", "seg", "[", "-", "1", "]", "[", "1", "]", "last_len", "=", "self", ".", "seg", "[", "-", "1", "]", "[", "0",...
An non-overlap version Compror
[ "An", "non", "-", "overlap", "version", "Compror" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/VMO/oracle.py#L210-L244
wangsix/vmo
vmo/VMO/oracle.py
FactorOracle._ir_cum2
def _ir_cum2(self, alpha=1.0): code, _ = self.encode() N = self.n_states BL = np.zeros(N - 1) # BL is the block length of compror codewords h0 = np.log2(np.cumsum( [1.0 if sfx == 0 else 0.0 for sfx in self.sfx[1:]]) ) """ h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.lrs[1:])]) h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.max_lrs[1:])]) h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.avg_lrs[1:])]) """ h1 = np.array([np.log2(i + 1) if m == 0 else np.log2(i + 1) + np.log2(m) for i, m in enumerate(self.max_lrs[1:])]) j = 0 for i in range(len(code)): if self.code[i][0] == 0: BL[j] = 1 j += 1 else: L = code[i][0] BL[j:j + L] = L # range(1,L+1) j = j + L h1 = h1 / BL ir = alpha * h0 - h1 ir[ir < 0] = 0 # Really a HACK here!!!!! return ir, h0, h1
python
def _ir_cum2(self, alpha=1.0): code, _ = self.encode() N = self.n_states BL = np.zeros(N - 1) # BL is the block length of compror codewords h0 = np.log2(np.cumsum( [1.0 if sfx == 0 else 0.0 for sfx in self.sfx[1:]]) ) """ h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.lrs[1:])]) h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.max_lrs[1:])]) h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.avg_lrs[1:])]) """ h1 = np.array([np.log2(i + 1) if m == 0 else np.log2(i + 1) + np.log2(m) for i, m in enumerate(self.max_lrs[1:])]) j = 0 for i in range(len(code)): if self.code[i][0] == 0: BL[j] = 1 j += 1 else: L = code[i][0] BL[j:j + L] = L # range(1,L+1) j = j + L h1 = h1 / BL ir = alpha * h0 - h1 ir[ir < 0] = 0 # Really a HACK here!!!!! return ir, h0, h1
[ "def", "_ir_cum2", "(", "self", ",", "alpha", "=", "1.0", ")", ":", "code", ",", "_", "=", "self", ".", "encode", "(", ")", "N", "=", "self", ".", "n_states", "BL", "=", "np", ".", "zeros", "(", "N", "-", "1", ")", "# BL is the block length of comp...
h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.lrs[1:])]) h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.max_lrs[1:])]) h1 = np.array([h if m == 0 else h+np.log2(m) for h,m in zip(h0,self.avg_lrs[1:])])
[ "h1", "=", "np", ".", "array", "(", "[", "h", "if", "m", "==", "0", "else", "h", "+", "np", ".", "log2", "(", "m", ")", "for", "h", "m", "in", "zip", "(", "h0", "self", ".", "lrs", "[", "1", ":", "]", ")", "]", ")", "h1", "=", "np", "...
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/VMO/oracle.py#L318-L351
wangsix/vmo
vmo/VMO/oracle.py
FO.accept
def accept(self, context): """ Check if the context could be accepted by the oracle Args: context: s sequence same type as the oracle data Returns: bAccepted: whether the sequence is accepted or not _next: the state where the sequence is accepted """ _next = 0 for _s in context: _data = [self.data[j] for j in self.trn[_next]] if _s in _data: _next = self.trn[_next][_data.index(_s)] else: return 0, _next return 1, _next
python
def accept(self, context): """ Check if the context could be accepted by the oracle Args: context: s sequence same type as the oracle data Returns: bAccepted: whether the sequence is accepted or not _next: the state where the sequence is accepted """ _next = 0 for _s in context: _data = [self.data[j] for j in self.trn[_next]] if _s in _data: _next = self.trn[_next][_data.index(_s)] else: return 0, _next return 1, _next
[ "def", "accept", "(", "self", ",", "context", ")", ":", "_next", "=", "0", "for", "_s", "in", "context", ":", "_data", "=", "[", "self", ".", "data", "[", "j", "]", "for", "j", "in", "self", ".", "trn", "[", "_next", "]", "]", "if", "_s", "in...
Check if the context could be accepted by the oracle Args: context: s sequence same type as the oracle data Returns: bAccepted: whether the sequence is accepted or not _next: the state where the sequence is accepted
[ "Check", "if", "the", "context", "could", "be", "accepted", "by", "the", "oracle", "Args", ":", "context", ":", "s", "sequence", "same", "type", "as", "the", "oracle", "data", "Returns", ":", "bAccepted", ":", "whether", "the", "sequence", "is", "accepted"...
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/VMO/oracle.py#L475-L492
wangsix/vmo
vmo/VMO/oracle.py
MO.add_state
def add_state(self, new_data, method='inc'): """Create new state and update related links and compressed state""" self.sfx.append(0) self.rsfx.append([]) self.trn.append([]) self.lrs.append(0) # Experiment with pointer-based self.f_array.add(new_data) self.n_states += 1 i = self.n_states - 1 # assign new transition from state i-1 to i self.trn[i - 1].append(i) k = self.sfx[i - 1] pi_1 = i - 1 # iteratively backtrack suffixes from state i-1 if method == 'inc': suffix_candidate = 0 elif method == 'complete': suffix_candidate = [] else: suffix_candidate = 0 while k is not None: if self.params['dfunc'] == 'other': # dvec = self.dfunc_handle([new_data], # self.f_array[self.trn[k]])[0] dvec = dist.cdist([new_data], self.f_array[self.trn[k]], metric=self.params['dfunc_handle'])[0] else: dvec = dist.cdist([new_data], self.f_array[self.trn[k]], metric=self.params['dfunc'])[0] I = np.where(dvec < self.params['threshold'])[0] if len(I) == 0: # if no transition from suffix self.trn[k].append(i) # Add new forward link to unvisited state pi_1 = k if method != 'complete': k = self.sfx[k] else: if method == 'inc': if I.shape[0] == 1: suffix_candidate = self.trn[k][I[0]] else: suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]] break elif method == 'complete': suffix_candidate.append((self.trn[k][I[np.argmin(dvec[I])]], np.min(dvec))) else: suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]] break if method == 'complete': k = self.sfx[k] if method == 'complete': if not suffix_candidate: self.sfx[i] = 0 self.lrs[i] = 0 self.latent.append([i]) self.data.append(len(self.latent) - 1) else: sorted_suffix_candidates = sorted(suffix_candidate, key=lambda suffix: suffix[1]) self.sfx[i] = sorted_suffix_candidates[0][0] self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1 self.latent[self.data[self.sfx[i]]].append(i) self.data.append(self.data[self.sfx[i]]) else: if k is None: self.sfx[i] = 0 self.lrs[i] = 0 self.latent.append([i]) self.data.append(len(self.latent) - 1) else: self.sfx[i] = suffix_candidate self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1 self.latent[self.data[self.sfx[i]]].append(i) self.data.append(self.data[self.sfx[i]]) # Temporary adjustment k = self._find_better(i, self.data[i - self.lrs[i]]) if k is not None: self.lrs[i] += 1 self.sfx[i] = k self.rsfx[self.sfx[i]].append(i) if self.lrs[i] > self.max_lrs[i - 1]: self.max_lrs.append(self.lrs[i]) else: self.max_lrs.append(self.max_lrs[i - 1]) self.avg_lrs.append(self.avg_lrs[i - 1] * ((i - 1.0) / (self.n_states - 1.0)) + self.lrs[i] * (1.0 / (self.n_states - 1.0)))
python
def add_state(self, new_data, method='inc'): """Create new state and update related links and compressed state""" self.sfx.append(0) self.rsfx.append([]) self.trn.append([]) self.lrs.append(0) # Experiment with pointer-based self.f_array.add(new_data) self.n_states += 1 i = self.n_states - 1 # assign new transition from state i-1 to i self.trn[i - 1].append(i) k = self.sfx[i - 1] pi_1 = i - 1 # iteratively backtrack suffixes from state i-1 if method == 'inc': suffix_candidate = 0 elif method == 'complete': suffix_candidate = [] else: suffix_candidate = 0 while k is not None: if self.params['dfunc'] == 'other': # dvec = self.dfunc_handle([new_data], # self.f_array[self.trn[k]])[0] dvec = dist.cdist([new_data], self.f_array[self.trn[k]], metric=self.params['dfunc_handle'])[0] else: dvec = dist.cdist([new_data], self.f_array[self.trn[k]], metric=self.params['dfunc'])[0] I = np.where(dvec < self.params['threshold'])[0] if len(I) == 0: # if no transition from suffix self.trn[k].append(i) # Add new forward link to unvisited state pi_1 = k if method != 'complete': k = self.sfx[k] else: if method == 'inc': if I.shape[0] == 1: suffix_candidate = self.trn[k][I[0]] else: suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]] break elif method == 'complete': suffix_candidate.append((self.trn[k][I[np.argmin(dvec[I])]], np.min(dvec))) else: suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]] break if method == 'complete': k = self.sfx[k] if method == 'complete': if not suffix_candidate: self.sfx[i] = 0 self.lrs[i] = 0 self.latent.append([i]) self.data.append(len(self.latent) - 1) else: sorted_suffix_candidates = sorted(suffix_candidate, key=lambda suffix: suffix[1]) self.sfx[i] = sorted_suffix_candidates[0][0] self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1 self.latent[self.data[self.sfx[i]]].append(i) self.data.append(self.data[self.sfx[i]]) else: if k is None: self.sfx[i] = 0 self.lrs[i] = 0 self.latent.append([i]) self.data.append(len(self.latent) - 1) else: self.sfx[i] = suffix_candidate self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1 self.latent[self.data[self.sfx[i]]].append(i) self.data.append(self.data[self.sfx[i]]) # Temporary adjustment k = self._find_better(i, self.data[i - self.lrs[i]]) if k is not None: self.lrs[i] += 1 self.sfx[i] = k self.rsfx[self.sfx[i]].append(i) if self.lrs[i] > self.max_lrs[i - 1]: self.max_lrs.append(self.lrs[i]) else: self.max_lrs.append(self.max_lrs[i - 1]) self.avg_lrs.append(self.avg_lrs[i - 1] * ((i - 1.0) / (self.n_states - 1.0)) + self.lrs[i] * (1.0 / (self.n_states - 1.0)))
[ "def", "add_state", "(", "self", ",", "new_data", ",", "method", "=", "'inc'", ")", ":", "self", ".", "sfx", ".", "append", "(", "0", ")", "self", ".", "rsfx", ".", "append", "(", "[", "]", ")", "self", ".", "trn", ".", "append", "(", "[", "]",...
Create new state and update related links and compressed state
[ "Create", "new", "state", "and", "update", "related", "links", "and", "compressed", "state" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/VMO/oracle.py#L528-L629
ubccr/pinky
pinky/perception/cycle.py
Cycle.rotate
def rotate(self, atom): """(atom)->start the cycle at position atom, assumes that atom is in the cycle""" try: index = self.atoms.index(atom) except ValueError: raise CycleError("atom %s not in cycle"%(atom)) self.atoms = self.atoms[index:] + self.atoms[:index] self.bonds = self.bonds[index:] + self.bonds[:index]
python
def rotate(self, atom): """(atom)->start the cycle at position atom, assumes that atom is in the cycle""" try: index = self.atoms.index(atom) except ValueError: raise CycleError("atom %s not in cycle"%(atom)) self.atoms = self.atoms[index:] + self.atoms[:index] self.bonds = self.bonds[index:] + self.bonds[:index]
[ "def", "rotate", "(", "self", ",", "atom", ")", ":", "try", ":", "index", "=", "self", ".", "atoms", ".", "index", "(", "atom", ")", "except", "ValueError", ":", "raise", "CycleError", "(", "\"atom %s not in cycle\"", "%", "(", "atom", ")", ")", "self"...
(atom)->start the cycle at position atom, assumes that atom is in the cycle
[ "(", "atom", ")", "-", ">", "start", "the", "cycle", "at", "position", "atom", "assumes", "that", "atom", "is", "in", "the", "cycle" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/cycle.py#L63-L72
ubccr/pinky
pinky/perception/cycle.py
Cycle.set_aromatic
def set_aromatic(self): """set the cycle to be an aromatic ring""" #XXX FIX ME # this probably shouldn't be here for atom in self.atoms: atom.aromatic = 1 for bond in self.bonds: bond.aromatic = 1 bond.bondorder = 1.5 bond.bondtype = 4 bond.symbol = ":" bond.fixed = 1 self.aromatic = 1
python
def set_aromatic(self): """set the cycle to be an aromatic ring""" #XXX FIX ME # this probably shouldn't be here for atom in self.atoms: atom.aromatic = 1 for bond in self.bonds: bond.aromatic = 1 bond.bondorder = 1.5 bond.bondtype = 4 bond.symbol = ":" bond.fixed = 1 self.aromatic = 1
[ "def", "set_aromatic", "(", "self", ")", ":", "#XXX FIX ME", "# this probably shouldn't be here", "for", "atom", "in", "self", ".", "atoms", ":", "atom", ".", "aromatic", "=", "1", "for", "bond", "in", "self", ".", "bonds", ":", "bond", ".", "aromatic", "=...
set the cycle to be an aromatic ring
[ "set", "the", "cycle", "to", "be", "an", "aromatic", "ring" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/cycle.py#L77-L91
shawalli/psycopg2-pgevents
psycopg2_pgevents/debug.py
set_debug
def set_debug(enabled: bool): """Enable or disable debug logs for the entire package. Parameters ---------- enabled: bool Whether debug should be enabled or not. """ global _DEBUG_ENABLED if not enabled: log('Disabling debug output...', logger_name=_LOGGER_NAME) _DEBUG_ENABLED = False else: _DEBUG_ENABLED = True log('Enabling debug output...', logger_name=_LOGGER_NAME)
python
def set_debug(enabled: bool): """Enable or disable debug logs for the entire package. Parameters ---------- enabled: bool Whether debug should be enabled or not. """ global _DEBUG_ENABLED if not enabled: log('Disabling debug output...', logger_name=_LOGGER_NAME) _DEBUG_ENABLED = False else: _DEBUG_ENABLED = True log('Enabling debug output...', logger_name=_LOGGER_NAME)
[ "def", "set_debug", "(", "enabled", ":", "bool", ")", ":", "global", "_DEBUG_ENABLED", "if", "not", "enabled", ":", "log", "(", "'Disabling debug output...'", ",", "logger_name", "=", "_LOGGER_NAME", ")", "_DEBUG_ENABLED", "=", "False", "else", ":", "_DEBUG_ENAB...
Enable or disable debug logs for the entire package. Parameters ---------- enabled: bool Whether debug should be enabled or not.
[ "Enable", "or", "disable", "debug", "logs", "for", "the", "entire", "package", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/debug.py#L14-L30
shawalli/psycopg2-pgevents
psycopg2_pgevents/debug.py
_create_logger
def _create_logger(name: str, level: int) -> Generator[logging.Logger, None, None]: """Create a context-based logger. Parameters ---------- name: str Name of logger to use when logging. level: int Logging level, one of logging's levels (e.g. INFO, ERROR, etc.). Returns ------- logging.Logger Named logger that may be used for logging. """ # Get logger logger = logging.getLogger(name) # Set logger level old_level = logger.level logger.setLevel(level) # Setup handler and add to logger handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(asctime)s %(levelname)-5s [%(name)s]: %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) yield logger # Reset logger level logger.setLevel(old_level) # Remove handler from logger logger.removeHandler(handler) handler.close()
python
def _create_logger(name: str, level: int) -> Generator[logging.Logger, None, None]: """Create a context-based logger. Parameters ---------- name: str Name of logger to use when logging. level: int Logging level, one of logging's levels (e.g. INFO, ERROR, etc.). Returns ------- logging.Logger Named logger that may be used for logging. """ # Get logger logger = logging.getLogger(name) # Set logger level old_level = logger.level logger.setLevel(level) # Setup handler and add to logger handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(asctime)s %(levelname)-5s [%(name)s]: %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) yield logger # Reset logger level logger.setLevel(old_level) # Remove handler from logger logger.removeHandler(handler) handler.close()
[ "def", "_create_logger", "(", "name", ":", "str", ",", "level", ":", "int", ")", "->", "Generator", "[", "logging", ".", "Logger", ",", "None", ",", "None", "]", ":", "# Get logger", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "# Set ...
Create a context-based logger. Parameters ---------- name: str Name of logger to use when logging. level: int Logging level, one of logging's levels (e.g. INFO, ERROR, etc.). Returns ------- logging.Logger Named logger that may be used for logging.
[ "Create", "a", "context", "-", "based", "logger", ".", "Parameters", "----------", "name", ":", "str", "Name", "of", "logger", "to", "use", "when", "logging", ".", "level", ":", "int", "Logging", "level", "one", "of", "logging", "s", "levels", "(", "e", ...
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/debug.py#L34-L68
shawalli/psycopg2-pgevents
psycopg2_pgevents/debug.py
log
def log(message: str, *args: str, category: str='info', logger_name: str='pgevents'): """Log a message to the given logger. If debug has not been enabled, this method will not log a message. Parameters ---------- message: str Message, with or without formatters, to print. args: Any Arguments to use with the message. args must either be a series of arguments that match up with anonymous formatters (i.e. "%<FORMAT-CHARACTER>") in the format string, or a dictionary with key-value pairs that match up with named formatters (i.e. "%(key)s") in the format string. logger_name: str Name of logger to which the message should be logged. """ global _DEBUG_ENABLED if _DEBUG_ENABLED: level = logging.INFO else: level = logging.CRITICAL + 1 with _create_logger(logger_name, level) as logger: log_fn = getattr(logger, category, None) if log_fn is None: raise ValueError('Invalid log category "{}"'.format(category)) log_fn(message, *args)
python
def log(message: str, *args: str, category: str='info', logger_name: str='pgevents'): """Log a message to the given logger. If debug has not been enabled, this method will not log a message. Parameters ---------- message: str Message, with or without formatters, to print. args: Any Arguments to use with the message. args must either be a series of arguments that match up with anonymous formatters (i.e. "%<FORMAT-CHARACTER>") in the format string, or a dictionary with key-value pairs that match up with named formatters (i.e. "%(key)s") in the format string. logger_name: str Name of logger to which the message should be logged. """ global _DEBUG_ENABLED if _DEBUG_ENABLED: level = logging.INFO else: level = logging.CRITICAL + 1 with _create_logger(logger_name, level) as logger: log_fn = getattr(logger, category, None) if log_fn is None: raise ValueError('Invalid log category "{}"'.format(category)) log_fn(message, *args)
[ "def", "log", "(", "message", ":", "str", ",", "*", "args", ":", "str", ",", "category", ":", "str", "=", "'info'", ",", "logger_name", ":", "str", "=", "'pgevents'", ")", ":", "global", "_DEBUG_ENABLED", "if", "_DEBUG_ENABLED", ":", "level", "=", "log...
Log a message to the given logger. If debug has not been enabled, this method will not log a message. Parameters ---------- message: str Message, with or without formatters, to print. args: Any Arguments to use with the message. args must either be a series of arguments that match up with anonymous formatters (i.e. "%<FORMAT-CHARACTER>") in the format string, or a dictionary with key-value pairs that match up with named formatters (i.e. "%(key)s") in the format string. logger_name: str Name of logger to which the message should be logged.
[ "Log", "a", "message", "to", "the", "given", "logger", "." ]
train
https://github.com/shawalli/psycopg2-pgevents/blob/bf04c05839a27c56834b26748d227c71cd87257c/psycopg2_pgevents/debug.py#L71-L102
ubccr/pinky
pinky/perception/figueras.py
sssr
def sssr(molecule): """molecule -> generate the molecule.cycles that contain the smallest set of smallest rings""" results = {} lookup = {} fullSet = {} oatoms = {} # XXX FIX ME # copy atom.oatoms to atom._oatoms # atom._oatoms will be modified my the routine for atom in molecule.atoms: atom.rings = [] fullSet[atom.handle] = 1 lookup[atom.handle] = atom oatoms[atom.handle] = atom.oatoms[:] for bond in molecule.bonds: bond.rings = [] trimSet = [] while fullSet: nodesN2 = [] minimum, minimum_degree = None, 100000 # find the N2 atoms and remove atoms with degree 0 for atomID in fullSet.keys(): atom = lookup[atomID] degree = len(oatoms[atom.handle]) if degree == 0: del fullSet[atomID] #fullSet.remove(atomID) elif degree == 2: nodesN2.append(atom) # keep track of the minimum degree if (degree > 0) and ( (not minimum) or (degree < minimum_degree)): minimum, minimum_degree = atom, degree if not minimum: # nothing to do! (i.e. can't have a ring) break if minimum_degree == 1: # these cannot be in rings so trim and remove # my version of trimming for oatom in oatoms[minimum.handle]: oatoms[oatom.handle].remove(minimum) oatoms[minimum.handle] = [] del fullSet[minimum.handle] elif minimum_degree == 2: # find the rings! startNodes = [] for atom in nodesN2: ring, bonds = getRing(atom, fullSet, lookup, oatoms) if ring: rlookup = ring[:] rlookup.sort() rlookup = tuple(rlookup) if (not results.has_key(rlookup)):# not in results): results[rlookup] = ring, bonds startNodes.append(atom) # in case we didn't get a ring remove the head of the nodesN2 startNodes = startNodes or [nodesN2[0]] for atom in startNodes: # again, my version of trimming if oatoms[atom.handle]: oatom = oatoms[atom.handle].pop() oatoms[oatom.handle].remove(atom) elif minimum_degree > 2: # no N2 nodes so remove the "optimum" edge to create # N2 nodes in the next go-around. ring, bonds = getRing(minimum, fullSet, lookup, oatoms) if ring: key = ring[:] key.sort() key = tuple(key) if not results.has_key(key): results[key] = ring, bonds atoms = map(lookup.get, ring) atoms, bonds = toposort(atoms, bonds) checkEdges(atoms, lookup, oatoms) else: del fullSet[minimum.handle] else: raise ShouldntGetHereError # assign the ring index to the atom rings = [] index = 0 # transform the handles back to atoms for result, bonds in results.values(): ring = [] for atomID in result: atom = lookup[atomID] assert atom.handle == atomID ring.append(atom) rings.append((ring, bonds)) index = index + 1 molecule.rings = rings potentialCycles = [] index = 0 for atoms, bonds in rings: # due to the dictionaries used in getRing # the atoms are not in the order found # we need to topologically sort these # for the cycle atoms, bonds = toposort(atoms, bonds) potentialCycles.append((atoms, bonds)) rings = potentialCycles#checkRings(potentialCycles) molecule.rings = rings molecule.cycles = [Cycle(atoms, bonds) for atoms, bonds in rings] return molecule
python
def sssr(molecule): """molecule -> generate the molecule.cycles that contain the smallest set of smallest rings""" results = {} lookup = {} fullSet = {} oatoms = {} # XXX FIX ME # copy atom.oatoms to atom._oatoms # atom._oatoms will be modified my the routine for atom in molecule.atoms: atom.rings = [] fullSet[atom.handle] = 1 lookup[atom.handle] = atom oatoms[atom.handle] = atom.oatoms[:] for bond in molecule.bonds: bond.rings = [] trimSet = [] while fullSet: nodesN2 = [] minimum, minimum_degree = None, 100000 # find the N2 atoms and remove atoms with degree 0 for atomID in fullSet.keys(): atom = lookup[atomID] degree = len(oatoms[atom.handle]) if degree == 0: del fullSet[atomID] #fullSet.remove(atomID) elif degree == 2: nodesN2.append(atom) # keep track of the minimum degree if (degree > 0) and ( (not minimum) or (degree < minimum_degree)): minimum, minimum_degree = atom, degree if not minimum: # nothing to do! (i.e. can't have a ring) break if minimum_degree == 1: # these cannot be in rings so trim and remove # my version of trimming for oatom in oatoms[minimum.handle]: oatoms[oatom.handle].remove(minimum) oatoms[minimum.handle] = [] del fullSet[minimum.handle] elif minimum_degree == 2: # find the rings! startNodes = [] for atom in nodesN2: ring, bonds = getRing(atom, fullSet, lookup, oatoms) if ring: rlookup = ring[:] rlookup.sort() rlookup = tuple(rlookup) if (not results.has_key(rlookup)):# not in results): results[rlookup] = ring, bonds startNodes.append(atom) # in case we didn't get a ring remove the head of the nodesN2 startNodes = startNodes or [nodesN2[0]] for atom in startNodes: # again, my version of trimming if oatoms[atom.handle]: oatom = oatoms[atom.handle].pop() oatoms[oatom.handle].remove(atom) elif minimum_degree > 2: # no N2 nodes so remove the "optimum" edge to create # N2 nodes in the next go-around. ring, bonds = getRing(minimum, fullSet, lookup, oatoms) if ring: key = ring[:] key.sort() key = tuple(key) if not results.has_key(key): results[key] = ring, bonds atoms = map(lookup.get, ring) atoms, bonds = toposort(atoms, bonds) checkEdges(atoms, lookup, oatoms) else: del fullSet[minimum.handle] else: raise ShouldntGetHereError # assign the ring index to the atom rings = [] index = 0 # transform the handles back to atoms for result, bonds in results.values(): ring = [] for atomID in result: atom = lookup[atomID] assert atom.handle == atomID ring.append(atom) rings.append((ring, bonds)) index = index + 1 molecule.rings = rings potentialCycles = [] index = 0 for atoms, bonds in rings: # due to the dictionaries used in getRing # the atoms are not in the order found # we need to topologically sort these # for the cycle atoms, bonds = toposort(atoms, bonds) potentialCycles.append((atoms, bonds)) rings = potentialCycles#checkRings(potentialCycles) molecule.rings = rings molecule.cycles = [Cycle(atoms, bonds) for atoms, bonds in rings] return molecule
[ "def", "sssr", "(", "molecule", ")", ":", "results", "=", "{", "}", "lookup", "=", "{", "}", "fullSet", "=", "{", "}", "oatoms", "=", "{", "}", "# XXX FIX ME", "# copy atom.oatoms to atom._oatoms", "# atom._oatoms will be modified my the routine", "for", "atom", ...
molecule -> generate the molecule.cycles that contain the smallest set of smallest rings
[ "molecule", "-", ">", "generate", "the", "molecule", ".", "cycles", "that", "contain", "the", "smallest", "set", "of", "smallest", "rings" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/figueras.py#L56-L178
ubccr/pinky
pinky/perception/figueras.py
toposort
def toposort(initialAtoms, initialBonds): """initialAtoms, initialBonds -> atoms, bonds Given the list of atoms and bonds in a ring return the topologically sorted atoms and bonds. That is each atom is connected to the following atom and each bond is connected to the following bond in the following manner a1 - b1 - a2 - b2 - ... """ atoms = [] a_append = atoms.append bonds = [] b_append = bonds.append # for the atom and bond hashes # we ignore the first atom since we # would have deleted it from the hash anyway ahash = {} bhash = {} for atom in initialAtoms[1:]: ahash[atom.handle] = 1 for bond in initialBonds: bhash[bond.handle] = bond next = initialAtoms[0] a_append(next) # do until all the atoms are gone while ahash: # traverse to all the connected atoms for atom in next.oatoms: # both the bond and the atom have to be # in our list of atoms and bonds to use # ugg, nested if's... There has to be a # better control structure if ahash.has_key(atom.handle): bond = next.findbond(atom) assert bond # but wait! the bond has to be in our # list of bonds we can use! if bhash.has_key(bond.handle): a_append(atom) b_append(bond) del ahash[atom.handle] next = atom break else: raise RingException("Atoms are not in ring") assert len(initialAtoms) == len(atoms) assert len(bonds) == len(atoms) - 1 lastBond = atoms[0].findbond(atoms[-1]) assert lastBond b_append(lastBond) return atoms, bonds
python
def toposort(initialAtoms, initialBonds): """initialAtoms, initialBonds -> atoms, bonds Given the list of atoms and bonds in a ring return the topologically sorted atoms and bonds. That is each atom is connected to the following atom and each bond is connected to the following bond in the following manner a1 - b1 - a2 - b2 - ... """ atoms = [] a_append = atoms.append bonds = [] b_append = bonds.append # for the atom and bond hashes # we ignore the first atom since we # would have deleted it from the hash anyway ahash = {} bhash = {} for atom in initialAtoms[1:]: ahash[atom.handle] = 1 for bond in initialBonds: bhash[bond.handle] = bond next = initialAtoms[0] a_append(next) # do until all the atoms are gone while ahash: # traverse to all the connected atoms for atom in next.oatoms: # both the bond and the atom have to be # in our list of atoms and bonds to use # ugg, nested if's... There has to be a # better control structure if ahash.has_key(atom.handle): bond = next.findbond(atom) assert bond # but wait! the bond has to be in our # list of bonds we can use! if bhash.has_key(bond.handle): a_append(atom) b_append(bond) del ahash[atom.handle] next = atom break else: raise RingException("Atoms are not in ring") assert len(initialAtoms) == len(atoms) assert len(bonds) == len(atoms) - 1 lastBond = atoms[0].findbond(atoms[-1]) assert lastBond b_append(lastBond) return atoms, bonds
[ "def", "toposort", "(", "initialAtoms", ",", "initialBonds", ")", ":", "atoms", "=", "[", "]", "a_append", "=", "atoms", ".", "append", "bonds", "=", "[", "]", "b_append", "=", "bonds", ".", "append", "# for the atom and bond hashes", "# we ignore the first atom...
initialAtoms, initialBonds -> atoms, bonds Given the list of atoms and bonds in a ring return the topologically sorted atoms and bonds. That is each atom is connected to the following atom and each bond is connected to the following bond in the following manner a1 - b1 - a2 - b2 - ...
[ "initialAtoms", "initialBonds", "-", ">", "atoms", "bonds", "Given", "the", "list", "of", "atoms", "and", "bonds", "in", "a", "ring", "return", "the", "topologically", "sorted", "atoms", "and", "bonds", ".", "That", "is", "each", "atom", "is", "connected", ...
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/figueras.py#L180-L234
ubccr/pinky
pinky/perception/figueras.py
getRing
def getRing(startAtom, atomSet, lookup, oatoms): """getRing(startAtom, atomSet, lookup, oatoms)->atoms, bonds starting at startAtom do a bfs traversal through the atoms in atomSet and return the smallest ring found returns (), () on failure note: atoms and bonds are not returned in traversal order""" path = {} bpaths = {} for atomID in atomSet.keys(): # initially the paths are empty path[atomID] = None bpaths[atomID] = [] q = [] handle = startAtom.handle for atom in oatoms[handle]: q.append((atom, handle)) path[atom.handle] = {atom.handle:1, handle:1} bpaths[atom.handle] = [startAtom.findbond(atom)] qIndex = 0 lenQ = len(q) while qIndex < lenQ: current, sourceHandle = q[qIndex] handle = current.handle qIndex += 1 for next in oatoms[handle]: m = next.handle if m != sourceHandle: if not atomSet.has_key(m): return (), () if path.get(m, None): intersections = 0 for atom in path[handle].keys(): if path[m].has_key(atom): intersections = intersections + 1 sharedAtom = atom if intersections == 1: del path[handle][sharedAtom] path[handle].update(path[m]) result = path[handle].keys() bond = next.findbond(current) # assert bond not in bpaths[handle] and bond not in bpaths[m] bonds = bpaths[handle] + bpaths[m] + [bond] return result, bonds else: path[m] = path[handle].copy() path[m][m] = 1 bond = next.findbond(current) # assert bond not in bpaths[m] and bond not in bpaths[handle] bpaths[m] = bpaths[handle] + [next.findbond(current)] q.append((next, handle)) lenQ = lenQ + 1 return (), ()
python
def getRing(startAtom, atomSet, lookup, oatoms): """getRing(startAtom, atomSet, lookup, oatoms)->atoms, bonds starting at startAtom do a bfs traversal through the atoms in atomSet and return the smallest ring found returns (), () on failure note: atoms and bonds are not returned in traversal order""" path = {} bpaths = {} for atomID in atomSet.keys(): # initially the paths are empty path[atomID] = None bpaths[atomID] = [] q = [] handle = startAtom.handle for atom in oatoms[handle]: q.append((atom, handle)) path[atom.handle] = {atom.handle:1, handle:1} bpaths[atom.handle] = [startAtom.findbond(atom)] qIndex = 0 lenQ = len(q) while qIndex < lenQ: current, sourceHandle = q[qIndex] handle = current.handle qIndex += 1 for next in oatoms[handle]: m = next.handle if m != sourceHandle: if not atomSet.has_key(m): return (), () if path.get(m, None): intersections = 0 for atom in path[handle].keys(): if path[m].has_key(atom): intersections = intersections + 1 sharedAtom = atom if intersections == 1: del path[handle][sharedAtom] path[handle].update(path[m]) result = path[handle].keys() bond = next.findbond(current) # assert bond not in bpaths[handle] and bond not in bpaths[m] bonds = bpaths[handle] + bpaths[m] + [bond] return result, bonds else: path[m] = path[handle].copy() path[m][m] = 1 bond = next.findbond(current) # assert bond not in bpaths[m] and bond not in bpaths[handle] bpaths[m] = bpaths[handle] + [next.findbond(current)] q.append((next, handle)) lenQ = lenQ + 1 return (), ()
[ "def", "getRing", "(", "startAtom", ",", "atomSet", ",", "lookup", ",", "oatoms", ")", ":", "path", "=", "{", "}", "bpaths", "=", "{", "}", "for", "atomID", "in", "atomSet", ".", "keys", "(", ")", ":", "# initially the paths are empty", "path", "[", "a...
getRing(startAtom, atomSet, lookup, oatoms)->atoms, bonds starting at startAtom do a bfs traversal through the atoms in atomSet and return the smallest ring found returns (), () on failure note: atoms and bonds are not returned in traversal order
[ "getRing", "(", "startAtom", "atomSet", "lookup", "oatoms", ")", "-", ">", "atoms", "bonds", "starting", "at", "startAtom", "do", "a", "bfs", "traversal", "through", "the", "atoms", "in", "atomSet", "and", "return", "the", "smallest", "ring", "found" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/figueras.py#L236-L297
ubccr/pinky
pinky/perception/figueras.py
checkEdges
def checkEdges(ringSet, lookup, oatoms): """atoms, lookup -> ring atoms must be in the order of traversal around a ring! break an optimal non N2 node and return the largest ring found """ bondedAtoms = map( None, ringSet[:-1], ringSet[1:] ) bondedAtoms += [ (ringSet[-1], ringSet[0]) ] # form a lookup for the ringSet list atomSet = {} for atomID in ringSet: atomSet[atomID] = 1 results = [] # for each bond in the ring, break it and find the smallest # rings starting on either side of the bond # keep the largest but rememeber to add the bond back at the # end for atom1, atom2 in bondedAtoms: # break a single edge in the ring handle1 = atom1.handle handle2 = atom2.handle oatoms1 = oatoms[handle1] oatoms2 = oatoms[handle2] index1 = oatoms1.index(atom2) index2 = oatoms2.index(atom1) # break the bond del oatoms1[index1] del oatoms2[index2] ring1 = getRing(atom1, atomSet, lookup, oatoms) ring2 = getRing(atom2, atomSet, lookup, oatoms) # keep the larger of the two rings if len(ring1) > len(ring2): results.append((len(ring1), handle1, handle2, ring1)) else: results.append((len(ring2), handle2, handle1, ring2)) # retie the bond oatoms1.insert(index1, atom2) oatoms2.insert(index2, atom1) if not results: return None # find the smallest ring size, incidentHandle, adjacentHandle, smallestRing = min(results) # dereference the handles incident, adjacent = lookup[incidentHandle], lookup[adjacentHandle] # break the bond between the incident and adjacent atoms oatomsI = oatoms[incidentHandle] oatomsA = oatoms[adjacentHandle] assert incident in oatomsA assert adjacent in oatomsI oatomsI.remove(adjacent) oatomsA.remove(incident)
python
def checkEdges(ringSet, lookup, oatoms): """atoms, lookup -> ring atoms must be in the order of traversal around a ring! break an optimal non N2 node and return the largest ring found """ bondedAtoms = map( None, ringSet[:-1], ringSet[1:] ) bondedAtoms += [ (ringSet[-1], ringSet[0]) ] # form a lookup for the ringSet list atomSet = {} for atomID in ringSet: atomSet[atomID] = 1 results = [] # for each bond in the ring, break it and find the smallest # rings starting on either side of the bond # keep the largest but rememeber to add the bond back at the # end for atom1, atom2 in bondedAtoms: # break a single edge in the ring handle1 = atom1.handle handle2 = atom2.handle oatoms1 = oatoms[handle1] oatoms2 = oatoms[handle2] index1 = oatoms1.index(atom2) index2 = oatoms2.index(atom1) # break the bond del oatoms1[index1] del oatoms2[index2] ring1 = getRing(atom1, atomSet, lookup, oatoms) ring2 = getRing(atom2, atomSet, lookup, oatoms) # keep the larger of the two rings if len(ring1) > len(ring2): results.append((len(ring1), handle1, handle2, ring1)) else: results.append((len(ring2), handle2, handle1, ring2)) # retie the bond oatoms1.insert(index1, atom2) oatoms2.insert(index2, atom1) if not results: return None # find the smallest ring size, incidentHandle, adjacentHandle, smallestRing = min(results) # dereference the handles incident, adjacent = lookup[incidentHandle], lookup[adjacentHandle] # break the bond between the incident and adjacent atoms oatomsI = oatoms[incidentHandle] oatomsA = oatoms[adjacentHandle] assert incident in oatomsA assert adjacent in oatomsI oatomsI.remove(adjacent) oatomsA.remove(incident)
[ "def", "checkEdges", "(", "ringSet", ",", "lookup", ",", "oatoms", ")", ":", "bondedAtoms", "=", "map", "(", "None", ",", "ringSet", "[", ":", "-", "1", "]", ",", "ringSet", "[", "1", ":", "]", ")", "bondedAtoms", "+=", "[", "(", "ringSet", "[", ...
atoms, lookup -> ring atoms must be in the order of traversal around a ring! break an optimal non N2 node and return the largest ring found
[ "atoms", "lookup", "-", ">", "ring", "atoms", "must", "be", "in", "the", "order", "of", "traversal", "around", "a", "ring!", "break", "an", "optimal", "non", "N2", "node", "and", "return", "the", "largest", "ring", "found" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/figueras.py#L300-L365
jbloomlab/phydms
phydmslib/parsearguments.py
NonNegativeInt
def NonNegativeInt(n): """If *n* is non-negative integer returns it, otherwise an error. >>> print("%d" % NonNegativeInt('8')) 8 >>> NonNegativeInt('8.1') Traceback (most recent call last): ... ValueError: 8.1 is not an integer >>> print("%d" % NonNegativeInt('0')) 0 >>> NonNegativeInt('-1') Traceback (most recent call last): ... ValueError: -1 is not non-negative """ if not isinstance(n, str): raise ValueError('%r is not a string' % n) try: n = int(n) except: raise ValueError('%s is not an integer' % n) if n < 0: raise ValueError('%d is not non-negative' % n) else: return n
python
def NonNegativeInt(n): """If *n* is non-negative integer returns it, otherwise an error. >>> print("%d" % NonNegativeInt('8')) 8 >>> NonNegativeInt('8.1') Traceback (most recent call last): ... ValueError: 8.1 is not an integer >>> print("%d" % NonNegativeInt('0')) 0 >>> NonNegativeInt('-1') Traceback (most recent call last): ... ValueError: -1 is not non-negative """ if not isinstance(n, str): raise ValueError('%r is not a string' % n) try: n = int(n) except: raise ValueError('%s is not an integer' % n) if n < 0: raise ValueError('%d is not non-negative' % n) else: return n
[ "def", "NonNegativeInt", "(", "n", ")", ":", "if", "not", "isinstance", "(", "n", ",", "str", ")", ":", "raise", "ValueError", "(", "'%r is not a string'", "%", "n", ")", "try", ":", "n", "=", "int", "(", "n", ")", "except", ":", "raise", "ValueError...
If *n* is non-negative integer returns it, otherwise an error. >>> print("%d" % NonNegativeInt('8')) 8 >>> NonNegativeInt('8.1') Traceback (most recent call last): ... ValueError: 8.1 is not an integer >>> print("%d" % NonNegativeInt('0')) 0 >>> NonNegativeInt('-1') Traceback (most recent call last): ... ValueError: -1 is not non-negative
[ "If", "*", "n", "*", "is", "non", "-", "negative", "integer", "returns", "it", "otherwise", "an", "error", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L34-L63
jbloomlab/phydms
phydmslib/parsearguments.py
IntGreaterThanZero
def IntGreaterThanZero(n): """If *n* is an integer > 0, returns it, otherwise an error.""" try: n = int(n) except: raise ValueError("%s is not an integer" % n) if n <= 0: raise ValueError("%d is not > 0" % n) else: return n
python
def IntGreaterThanZero(n): """If *n* is an integer > 0, returns it, otherwise an error.""" try: n = int(n) except: raise ValueError("%s is not an integer" % n) if n <= 0: raise ValueError("%d is not > 0" % n) else: return n
[ "def", "IntGreaterThanZero", "(", "n", ")", ":", "try", ":", "n", "=", "int", "(", "n", ")", "except", ":", "raise", "ValueError", "(", "\"%s is not an integer\"", "%", "n", ")", "if", "n", "<=", "0", ":", "raise", "ValueError", "(", "\"%d is not > 0\"",...
If *n* is an integer > 0, returns it, otherwise an error.
[ "If", "*", "n", "*", "is", "an", "integer", ">", "0", "returns", "it", "otherwise", "an", "error", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L65-L74
jbloomlab/phydms
phydmslib/parsearguments.py
IntGreaterThanOne
def IntGreaterThanOne(n): """If *n* is an integer > 1, returns it, otherwise an error.""" try: n = int(n) except: raise ValueError("%s is not an integer" % n) if n <= 1: raise ValueError("%d is not > 1" % n) else: return n
python
def IntGreaterThanOne(n): """If *n* is an integer > 1, returns it, otherwise an error.""" try: n = int(n) except: raise ValueError("%s is not an integer" % n) if n <= 1: raise ValueError("%d is not > 1" % n) else: return n
[ "def", "IntGreaterThanOne", "(", "n", ")", ":", "try", ":", "n", "=", "int", "(", "n", ")", "except", ":", "raise", "ValueError", "(", "\"%s is not an integer\"", "%", "n", ")", "if", "n", "<=", "1", ":", "raise", "ValueError", "(", "\"%d is not > 1\"", ...
If *n* is an integer > 1, returns it, otherwise an error.
[ "If", "*", "n", "*", "is", "an", "integer", ">", "1", "returns", "it", "otherwise", "an", "error", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L76-L85
jbloomlab/phydms
phydmslib/parsearguments.py
FloatGreaterThanEqualToZero
def FloatGreaterThanEqualToZero(x): """If *x* is a float >= 0, returns it, otherwise raises and error. >>> print('%.1f' % FloatGreaterThanEqualToZero('1.5')) 1.5 >>> print('%.1f' % FloatGreaterThanEqualToZero('-1.1')) Traceback (most recent call last): ... ValueError: -1.1 not float greater than or equal to zero """ try: x = float(x) except: raise ValueError("%r not float greater than or equal to zero" % x) if x >= 0: return x else: raise ValueError("%r not float greater than or equal to zero" % x)
python
def FloatGreaterThanEqualToZero(x): """If *x* is a float >= 0, returns it, otherwise raises and error. >>> print('%.1f' % FloatGreaterThanEqualToZero('1.5')) 1.5 >>> print('%.1f' % FloatGreaterThanEqualToZero('-1.1')) Traceback (most recent call last): ... ValueError: -1.1 not float greater than or equal to zero """ try: x = float(x) except: raise ValueError("%r not float greater than or equal to zero" % x) if x >= 0: return x else: raise ValueError("%r not float greater than or equal to zero" % x)
[ "def", "FloatGreaterThanEqualToZero", "(", "x", ")", ":", "try", ":", "x", "=", "float", "(", "x", ")", "except", ":", "raise", "ValueError", "(", "\"%r not float greater than or equal to zero\"", "%", "x", ")", "if", "x", ">=", "0", ":", "return", "x", "e...
If *x* is a float >= 0, returns it, otherwise raises and error. >>> print('%.1f' % FloatGreaterThanEqualToZero('1.5')) 1.5 >>> print('%.1f' % FloatGreaterThanEqualToZero('-1.1')) Traceback (most recent call last): ... ValueError: -1.1 not float greater than or equal to zero
[ "If", "*", "x", "*", "is", "a", "float", ">", "=", "0", "returns", "it", "otherwise", "raises", "and", "error", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L87-L105
jbloomlab/phydms
phydmslib/parsearguments.py
FloatBetweenZeroAndOne
def FloatBetweenZeroAndOne(x): """Returns *x* only if *0 <= x <= 1*, otherwise raises error.""" x = float(x) if 0 <= x <= 1: return x else: raise ValueError("{0} not a float between 0 and 1.".format(x))
python
def FloatBetweenZeroAndOne(x): """Returns *x* only if *0 <= x <= 1*, otherwise raises error.""" x = float(x) if 0 <= x <= 1: return x else: raise ValueError("{0} not a float between 0 and 1.".format(x))
[ "def", "FloatBetweenZeroAndOne", "(", "x", ")", ":", "x", "=", "float", "(", "x", ")", "if", "0", "<=", "x", "<=", "1", ":", "return", "x", "else", ":", "raise", "ValueError", "(", "\"{0} not a float between 0 and 1.\"", ".", "format", "(", "x", ")", "...
Returns *x* only if *0 <= x <= 1*, otherwise raises error.
[ "Returns", "*", "x", "*", "only", "if", "*", "0", "<", "=", "x", "<", "=", "1", "*", "otherwise", "raises", "error", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L137-L143
jbloomlab/phydms
phydmslib/parsearguments.py
diffPrefsPrior
def diffPrefsPrior(priorstring): """Parses `priorstring` and returns `prior` tuple.""" assert isinstance(priorstring, str) prior = priorstring.split(',') if len(prior) == 3 and prior[0] == 'invquadratic': [c1, c2] = [float(x) for x in prior[1 : ]] assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior" return ('invquadratic', c1, c2) else: raise ValueError("Invalid diffprefsprior: {0}".format(priorstring))
python
def diffPrefsPrior(priorstring): """Parses `priorstring` and returns `prior` tuple.""" assert isinstance(priorstring, str) prior = priorstring.split(',') if len(prior) == 3 and prior[0] == 'invquadratic': [c1, c2] = [float(x) for x in prior[1 : ]] assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior" return ('invquadratic', c1, c2) else: raise ValueError("Invalid diffprefsprior: {0}".format(priorstring))
[ "def", "diffPrefsPrior", "(", "priorstring", ")", ":", "assert", "isinstance", "(", "priorstring", ",", "str", ")", "prior", "=", "priorstring", ".", "split", "(", "','", ")", "if", "len", "(", "prior", ")", "==", "3", "and", "prior", "[", "0", "]", ...
Parses `priorstring` and returns `prior` tuple.
[ "Parses", "priorstring", "and", "returns", "prior", "tuple", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L146-L155
jbloomlab/phydms
phydmslib/parsearguments.py
ExistingFileOrNone
def ExistingFileOrNone(fname): """Like `Existingfile`, but if `fname` is string "None" then return `None`.""" if os.path.isfile(fname): return fname elif fname.lower() == 'none': return None else: raise ValueError("%s must specify a valid file name or 'None'" % fname)
python
def ExistingFileOrNone(fname): """Like `Existingfile`, but if `fname` is string "None" then return `None`.""" if os.path.isfile(fname): return fname elif fname.lower() == 'none': return None else: raise ValueError("%s must specify a valid file name or 'None'" % fname)
[ "def", "ExistingFileOrNone", "(", "fname", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "return", "fname", "elif", "fname", ".", "lower", "(", ")", "==", "'none'", ":", "return", "None", "else", ":", "raise", "ValueError", ...
Like `Existingfile`, but if `fname` is string "None" then return `None`.
[ "Like", "Existingfile", "but", "if", "fname", "is", "string", "None", "then", "return", "None", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L165-L172
jbloomlab/phydms
phydmslib/parsearguments.py
ModelOption
def ModelOption(model): """Returns *model* if a valid choice. Returns the string if it specifies a ``YNGKP_`` model variant. Returns *('ExpCM', prefsfile)* if it specifies an ``ExpCM_`` model. """ yngkpmatch = re.compile('^YNGKP_M[{0}]$'.format(''.join([m[1 : ] for m in yngkp_modelvariants]))) if yngkpmatch.search(model): return model elif len(model) > 6 and model[ : 6] == 'ExpCM_': fname = model[6 : ] if os.path.isfile(fname): return ('ExpCM', fname) else: raise ValueError("ExpCM_ must be followed by the name of an existing file. You specified the following, which is not an existing file: %s" % fname) else: raise ValueError("Invalid model")
python
def ModelOption(model): """Returns *model* if a valid choice. Returns the string if it specifies a ``YNGKP_`` model variant. Returns *('ExpCM', prefsfile)* if it specifies an ``ExpCM_`` model. """ yngkpmatch = re.compile('^YNGKP_M[{0}]$'.format(''.join([m[1 : ] for m in yngkp_modelvariants]))) if yngkpmatch.search(model): return model elif len(model) > 6 and model[ : 6] == 'ExpCM_': fname = model[6 : ] if os.path.isfile(fname): return ('ExpCM', fname) else: raise ValueError("ExpCM_ must be followed by the name of an existing file. You specified the following, which is not an existing file: %s" % fname) else: raise ValueError("Invalid model")
[ "def", "ModelOption", "(", "model", ")", ":", "yngkpmatch", "=", "re", ".", "compile", "(", "'^YNGKP_M[{0}]$'", ".", "format", "(", "''", ".", "join", "(", "[", "m", "[", "1", ":", "]", "for", "m", "in", "yngkp_modelvariants", "]", ")", ")", ")", "...
Returns *model* if a valid choice. Returns the string if it specifies a ``YNGKP_`` model variant. Returns *('ExpCM', prefsfile)* if it specifies an ``ExpCM_`` model.
[ "Returns", "*", "model", "*", "if", "a", "valid", "choice", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L175-L192
jbloomlab/phydms
phydmslib/parsearguments.py
PhyDMSPrepAlignmentParser
def PhyDMSPrepAlignmentParser(): """Returns *argparse.ArgumentParser* for ``phydms_prepalignment``.""" parser = ArgumentParserNoArgHelp(formatter_class=ArgumentDefaultsRawDescriptionFormatter, description='\n'.join([ "Prepare alignment of protein-coding DNA sequences.\n", "Steps:", " * Any sequences specified by '--purgeseqs' are removed.", " * Sequences not of length divisible by 3 are removed.", " * Sequences with ambiguous nucleotides are removed.", " * Sequences with non-terminal stop codons are removed;", " terminal stop codons are trimmed.", " * Sequences that do not encode unique proteins are removed", " unless they are specified for retention by '--keepseqs'.", " * A multiple sequence alignment is built using MAFFT.", " This step is skipped if you specify '--prealigned'.", " * Sites gapped in reference sequence are stripped.", " * Sequences with too little protein identity to reference", " sequence are removed, counting both mismatches and unstripped", " gaps as differences. Identity cutoff set by '--minidentity'.", " * Sequences too similar to other sequences are removed. An", " effort is made to keep one representative of sequences found", " many times in input set. Uniqueness threshold set ", " by '--minuniqueness'. You can specify sequences to not", " remove via '--keepseqs'.", " * Problematic characters in header names are replaced by", " underscores. This is any space, comma, colon, semicolon", " parenthesis, bracket, single quote, or double quote.", " * An alignment is written, as well as a plot with same root", " but extension '.pdf' that shows divergence from reference", " of all sequences retained and purged due to identity or", " uniqueness.\n", phydmslib.__acknowledgments__, 'Version {0}'.format(phydmslib.__version__), 'Full documentation at {0}'.format(phydmslib.__url__), ])) parser.add_argument('inseqs', type=ExistingFile, help="FASTA file giving input coding sequences.") parser.add_argument('alignment', help="Name of created output FASTA alignment. PDF plot has same root, but extension '.pdf'.") parser.add_argument('refseq', help="Reference sequence in 'inseqs': specify substring found ONLY in header for that sequence.") parser.set_defaults(prealigned=False) parser.add_argument('--prealigned', action='store_true', dest='prealigned', help="Sequences in 'inseqs' are already aligned, do NOT re-align.") parser.add_argument('--mafft', help="Path to MAFFT (http://mafft.cbrc.jp/alignment/software/).", default='mafft') parser.add_argument('--minidentity', type=FloatBetweenZeroAndOne, help="Purge sequences with <= this protein identity to 'refseq'.", default=0.7) parser.add_argument('--minuniqueness', type=IntGreaterThanZero, default=2, help="Require each sequence to have >= this many protein differences relative to other sequences.") parser.add_argument('--purgeseqs', nargs='*', help="Specify sequences to always purge. Any sequences with any of the substrings specified here are always removed. The substrings can either be passed as repeated arguments here, or as the name of an existing file which has one substring per line.") parser.add_argument('--keepseqs', nargs='*', help="Do not purge any of these sequences for lack of identity or uniqueness. Specified in the same fashion as for '--purgeseqs'.") parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
python
def PhyDMSPrepAlignmentParser(): """Returns *argparse.ArgumentParser* for ``phydms_prepalignment``.""" parser = ArgumentParserNoArgHelp(formatter_class=ArgumentDefaultsRawDescriptionFormatter, description='\n'.join([ "Prepare alignment of protein-coding DNA sequences.\n", "Steps:", " * Any sequences specified by '--purgeseqs' are removed.", " * Sequences not of length divisible by 3 are removed.", " * Sequences with ambiguous nucleotides are removed.", " * Sequences with non-terminal stop codons are removed;", " terminal stop codons are trimmed.", " * Sequences that do not encode unique proteins are removed", " unless they are specified for retention by '--keepseqs'.", " * A multiple sequence alignment is built using MAFFT.", " This step is skipped if you specify '--prealigned'.", " * Sites gapped in reference sequence are stripped.", " * Sequences with too little protein identity to reference", " sequence are removed, counting both mismatches and unstripped", " gaps as differences. Identity cutoff set by '--minidentity'.", " * Sequences too similar to other sequences are removed. An", " effort is made to keep one representative of sequences found", " many times in input set. Uniqueness threshold set ", " by '--minuniqueness'. You can specify sequences to not", " remove via '--keepseqs'.", " * Problematic characters in header names are replaced by", " underscores. This is any space, comma, colon, semicolon", " parenthesis, bracket, single quote, or double quote.", " * An alignment is written, as well as a plot with same root", " but extension '.pdf' that shows divergence from reference", " of all sequences retained and purged due to identity or", " uniqueness.\n", phydmslib.__acknowledgments__, 'Version {0}'.format(phydmslib.__version__), 'Full documentation at {0}'.format(phydmslib.__url__), ])) parser.add_argument('inseqs', type=ExistingFile, help="FASTA file giving input coding sequences.") parser.add_argument('alignment', help="Name of created output FASTA alignment. PDF plot has same root, but extension '.pdf'.") parser.add_argument('refseq', help="Reference sequence in 'inseqs': specify substring found ONLY in header for that sequence.") parser.set_defaults(prealigned=False) parser.add_argument('--prealigned', action='store_true', dest='prealigned', help="Sequences in 'inseqs' are already aligned, do NOT re-align.") parser.add_argument('--mafft', help="Path to MAFFT (http://mafft.cbrc.jp/alignment/software/).", default='mafft') parser.add_argument('--minidentity', type=FloatBetweenZeroAndOne, help="Purge sequences with <= this protein identity to 'refseq'.", default=0.7) parser.add_argument('--minuniqueness', type=IntGreaterThanZero, default=2, help="Require each sequence to have >= this many protein differences relative to other sequences.") parser.add_argument('--purgeseqs', nargs='*', help="Specify sequences to always purge. Any sequences with any of the substrings specified here are always removed. The substrings can either be passed as repeated arguments here, or as the name of an existing file which has one substring per line.") parser.add_argument('--keepseqs', nargs='*', help="Do not purge any of these sequences for lack of identity or uniqueness. Specified in the same fashion as for '--purgeseqs'.") parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
[ "def", "PhyDMSPrepAlignmentParser", "(", ")", ":", "parser", "=", "ArgumentParserNoArgHelp", "(", "formatter_class", "=", "ArgumentDefaultsRawDescriptionFormatter", ",", "description", "=", "'\\n'", ".", "join", "(", "[", "\"Prepare alignment of protein-coding DNA sequences.\...
Returns *argparse.ArgumentParser* for ``phydms_prepalignment``.
[ "Returns", "*", "argparse", ".", "ArgumentParser", "*", "for", "phydms_prepalignment", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L195-L241
jbloomlab/phydms
phydmslib/parsearguments.py
PhyDMSLogoPlotParser
def PhyDMSLogoPlotParser(): """Returns `argparse.ArgumentParser` for ``phydms_logoplot``.""" parser = ArgumentParserNoArgHelp(description= "Make logo plot of preferences or differential preferences. " "Uses weblogo (http://weblogo.threeplusone.com/). " "{0} Version {1}. Full documentation at {2}".format( phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--prefs', type=ExistingFile, help="File with " "amino-acid preferences; same format as input to 'phydms'.") group.add_argument('--diffprefs', type=ExistingFile, help="File with " "differential preferences; in format output by 'phydms'.") parser.add_argument('outfile', help='Name of created PDF logo plot.') parser.add_argument('--stringency', type=FloatGreaterThanEqualToZero, default=1, help="Stringency parameter to re-scale prefs.") parser.add_argument('--nperline', type=IntGreaterThanZero, default=70, help="Number of sites per line.") parser.add_argument('--numberevery', type=IntGreaterThanZero, default=10, help="Number sites at this interval.") parser.add_argument('--mapmetric', default='functionalgroup', choices=['kd', 'mw', 'charge', 'functionalgroup'], help='Metric used to color ' 'amino-acid letters. kd = Kyte-Doolittle hydrophobicity; ' 'mw = molecular weight; functionalgroup = divide in 7 ' 'groups; charge = charge at neutral pH.') parser.add_argument('--colormap', type=str, default='jet', help="Name of `matplotlib` color map for amino acids " "when `--mapmetric` is 'kd' or 'mw'.") parser.add_argument('--diffprefheight', type=FloatGreaterThanZero, default=1.0, help="Height of diffpref logo in each direction.") parser.add_argument('--omegabysite', help="Overlay omega on " "logo plot. Specify '*_omegabysite.txt' file from 'phydms'.", type=ExistingFileOrNone) parser.add_argument('--minP', type=FloatGreaterThanZero, default=1e-4, help="Min plotted P-value for '--omegabysite' overlay.") parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
python
def PhyDMSLogoPlotParser(): """Returns `argparse.ArgumentParser` for ``phydms_logoplot``.""" parser = ArgumentParserNoArgHelp(description= "Make logo plot of preferences or differential preferences. " "Uses weblogo (http://weblogo.threeplusone.com/). " "{0} Version {1}. Full documentation at {2}".format( phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--prefs', type=ExistingFile, help="File with " "amino-acid preferences; same format as input to 'phydms'.") group.add_argument('--diffprefs', type=ExistingFile, help="File with " "differential preferences; in format output by 'phydms'.") parser.add_argument('outfile', help='Name of created PDF logo plot.') parser.add_argument('--stringency', type=FloatGreaterThanEqualToZero, default=1, help="Stringency parameter to re-scale prefs.") parser.add_argument('--nperline', type=IntGreaterThanZero, default=70, help="Number of sites per line.") parser.add_argument('--numberevery', type=IntGreaterThanZero, default=10, help="Number sites at this interval.") parser.add_argument('--mapmetric', default='functionalgroup', choices=['kd', 'mw', 'charge', 'functionalgroup'], help='Metric used to color ' 'amino-acid letters. kd = Kyte-Doolittle hydrophobicity; ' 'mw = molecular weight; functionalgroup = divide in 7 ' 'groups; charge = charge at neutral pH.') parser.add_argument('--colormap', type=str, default='jet', help="Name of `matplotlib` color map for amino acids " "when `--mapmetric` is 'kd' or 'mw'.") parser.add_argument('--diffprefheight', type=FloatGreaterThanZero, default=1.0, help="Height of diffpref logo in each direction.") parser.add_argument('--omegabysite', help="Overlay omega on " "logo plot. Specify '*_omegabysite.txt' file from 'phydms'.", type=ExistingFileOrNone) parser.add_argument('--minP', type=FloatGreaterThanZero, default=1e-4, help="Min plotted P-value for '--omegabysite' overlay.") parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
[ "def", "PhyDMSLogoPlotParser", "(", ")", ":", "parser", "=", "ArgumentParserNoArgHelp", "(", "description", "=", "\"Make logo plot of preferences or differential preferences. \"", "\"Uses weblogo (http://weblogo.threeplusone.com/). \"", "\"{0} Version {1}. Full documentation at {2}\"", "...
Returns `argparse.ArgumentParser` for ``phydms_logoplot``.
[ "Returns", "argparse", ".", "ArgumentParser", "for", "phydms_logoplot", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L244-L282
jbloomlab/phydms
phydmslib/parsearguments.py
PhyDMSComprehensiveParser
def PhyDMSComprehensiveParser(): """Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script.""" parser = ArgumentParserNoArgHelp(description=("Comprehensive phylogenetic " "model comparison and detection of selection informed by deep " "mutational scanning data. This program runs 'phydms' repeatedly " "to compare substitution models and detect selection. " "{0} Version {1}. Full documentation at {2}").format( phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('outprefix', help='Output file prefix.', type=str) parser.add_argument('alignment', help='Existing FASTA file with aligned ' 'codon sequences.', type=ExistingFile) parser.add_argument('prefsfiles', help='Existing files with site-specific ' 'amino-acid preferences.', type=ExistingFile, nargs='+') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--raxml', help="Path to RAxML (e.g., 'raxml')") group.add_argument('--tree', type=ExistingFile, help="Existing Newick file giving input tree.") parser.add_argument('--ncpus', default=-1, help='Use this many CPUs; -1 ' 'means all available.', type=int) parser.add_argument('--brlen', choices=['scale', 'optimize'], default='optimize', help=("How to handle branch lengths: " "scale by single parameter or optimize each one")) parser.set_defaults(omegabysite=False) parser.add_argument('--omegabysite', dest='omegabysite', action='store_true', help="Fit omega (dN/dS) for each site.") parser.set_defaults(diffprefsbysite=False) parser.add_argument('--diffprefsbysite', dest='diffprefsbysite', action='store_true', help="Fit differential preferences for " "each site.") parser.set_defaults(gammaomega=False) parser.add_argument('--gammaomega', dest='gammaomega', action=\ 'store_true', help="Fit ExpCM with gamma distributed omega.") parser.set_defaults(gammabeta=False) parser.add_argument('--gammabeta', dest='gammabeta', action=\ 'store_true', help="Fit ExpCM with gamma distributed beta.") parser.set_defaults(noavgprefs=False) parser.add_argument('--no-avgprefs', dest='noavgprefs', action='store_true', help="No fitting of models with preferences averaged across sites " "for ExpCM.") parser.set_defaults(randprefs=False) parser.add_argument('--randprefs', dest='randprefs', action='store_true', help="Include ExpCM models with randomized preferences.") parser.add_argument('-v', '--version', action='version', version= '%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
python
def PhyDMSComprehensiveParser(): """Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script.""" parser = ArgumentParserNoArgHelp(description=("Comprehensive phylogenetic " "model comparison and detection of selection informed by deep " "mutational scanning data. This program runs 'phydms' repeatedly " "to compare substitution models and detect selection. " "{0} Version {1}. Full documentation at {2}").format( phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('outprefix', help='Output file prefix.', type=str) parser.add_argument('alignment', help='Existing FASTA file with aligned ' 'codon sequences.', type=ExistingFile) parser.add_argument('prefsfiles', help='Existing files with site-specific ' 'amino-acid preferences.', type=ExistingFile, nargs='+') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--raxml', help="Path to RAxML (e.g., 'raxml')") group.add_argument('--tree', type=ExistingFile, help="Existing Newick file giving input tree.") parser.add_argument('--ncpus', default=-1, help='Use this many CPUs; -1 ' 'means all available.', type=int) parser.add_argument('--brlen', choices=['scale', 'optimize'], default='optimize', help=("How to handle branch lengths: " "scale by single parameter or optimize each one")) parser.set_defaults(omegabysite=False) parser.add_argument('--omegabysite', dest='omegabysite', action='store_true', help="Fit omega (dN/dS) for each site.") parser.set_defaults(diffprefsbysite=False) parser.add_argument('--diffprefsbysite', dest='diffprefsbysite', action='store_true', help="Fit differential preferences for " "each site.") parser.set_defaults(gammaomega=False) parser.add_argument('--gammaomega', dest='gammaomega', action=\ 'store_true', help="Fit ExpCM with gamma distributed omega.") parser.set_defaults(gammabeta=False) parser.add_argument('--gammabeta', dest='gammabeta', action=\ 'store_true', help="Fit ExpCM with gamma distributed beta.") parser.set_defaults(noavgprefs=False) parser.add_argument('--no-avgprefs', dest='noavgprefs', action='store_true', help="No fitting of models with preferences averaged across sites " "for ExpCM.") parser.set_defaults(randprefs=False) parser.add_argument('--randprefs', dest='randprefs', action='store_true', help="Include ExpCM models with randomized preferences.") parser.add_argument('-v', '--version', action='version', version= '%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
[ "def", "PhyDMSComprehensiveParser", "(", ")", ":", "parser", "=", "ArgumentParserNoArgHelp", "(", "description", "=", "(", "\"Comprehensive phylogenetic \"", "\"model comparison and detection of selection informed by deep \"", "\"mutational scanning data. This program runs 'phydms' repea...
Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script.
[ "Returns", "*", "argparse", ".", "ArgumentParser", "*", "for", "phdyms_comprehensive", "script", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L285-L331
jbloomlab/phydms
phydmslib/parsearguments.py
PhyDMSParser
def PhyDMSParser(): """Returns *argparse.ArgumentParser* for ``phydms`` script.""" parser = ArgumentParserNoArgHelp(description=('Phylogenetic analysis ' 'informed by deep mutational scanning data. {0} Version {1}. Full' ' documentation at {2}').format(phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('alignment', type=ExistingFile, help='Existing FASTA file with aligned codon sequences.') parser.add_argument('tree', type=ExistingFile, help="Existing Newick file giving input tree.") parser.add_argument('model', type=ModelOption, help=("Substitution model: ExpCM_<prefsfile> or YNGKP_<m> (" "where <m> is {0}). For ExpCM, <prefsfile> has first " "column labeled 'site' and others labeled by 1-letter " "amino-acid code.").format(', '.join(yngkp_modelvariants))) parser.add_argument('outprefix', help='Output file prefix.', type=str) parser.add_argument('--brlen', choices=['scale', 'optimize'], default='optimize', help=("How to handle branch lengths: " "scale by single parameter or optimize each one")) parser.set_defaults(gammaomega=False) parser.add_argument('--gammaomega', action='store_true', dest='gammaomega', help="Omega for ExpCM from gamma " "distribution rather than single value. To achieve " "same for YNGKP, use 'model' of YNGKP_M5.") parser.set_defaults(gammabeta=False) parser.add_argument('--gammabeta', action='store_true', dest='gammabeta', help="Beta for ExpCM from gamma " "distribution rather than single value.") parser.set_defaults(omegabysite=False) parser.add_argument('--omegabysite', dest='omegabysite', action='store_true', help="Fit omega (dN/dS) for each site.") parser.set_defaults(omegabysite_fixsyn=False) parser.add_argument('--omegabysite_fixsyn', dest='omegabysite_fixsyn', action='store_true', help="For '--omegabysite', assign all " "sites same dS rather than fit for each site.") parser.set_defaults(diffprefsbysite=False) parser.add_argument('--diffprefsbysite', dest='diffprefsbysite', action='store_true', help="Fit differential preferences " "for each site.") parser.add_argument('--diffprefsprior', default='invquadratic,150,0.5', type=diffPrefsPrior, help="Regularizing prior for " "'--diffprefsbysite': 'invquadratic,C1,C2' is prior in " "Bloom, Biology Direct, 12:1.") parser.set_defaults(fitphi=False) parser.add_argument('--fitphi', action='store_true', dest='fitphi', help='Fit ExpCM phi rather than setting so stationary ' 'state matches alignment frequencies.') parser.set_defaults(randprefs=False) parser.add_argument('--randprefs', dest='randprefs', action='store_true', help="Randomize preferences among sites for ExpCM.") parser.set_defaults(avgprefs=False) parser.add_argument('--avgprefs', dest='avgprefs', action='store_true', help="Average preferences across sites for ExpCM.") parser.add_argument('--divpressure', type=ExistingFileOrNone, help=("Known diversifying pressure at sites: file with column 1 " "= position, column 2 = diversification pressure; columns space-, " "tab-, or comma-delimited.")) parser.add_argument('--ncpus', default=1, type=int, help='Use this many CPUs; -1 means all available.') parser.add_argument('--fitprefsmethod', choices=[1, 2], default=2, help='Implementation to we use when fitting prefs.', type=int) parser.add_argument('--ncats', default=4, type=IntGreaterThanOne, help='Number of categories for gamma-distribution.') parser.add_argument('--minbrlen', type=FloatGreaterThanZero, default=phydmslib.constants.ALMOST_ZERO, help="Adjust all branch lengths in starting 'tree' to >= this.") parser.add_argument('--minpref', default=0.002, type=FloatGreaterThanZero, help="Adjust all preferences in ExpCM 'prefsfile' to >= this.") parser.add_argument('--seed', type=int, default=1, help="Random number seed.") parser.add_argument('--initparams', type=ExistingFile, help="Initialize " "model params from this file, which should be format of " "'*_modelparams.txt' file created by 'phydms' with this model.") parser.set_defaults(profile=False) parser.add_argument('--profile', dest='profile', action='store_true', help="Profile likelihood maximization, write pstats files. " "For code-development purposes.") parser.set_defaults(opt_details=False) parser.add_argument('--opt_details', dest='opt_details', action='store_true', help='Print details about optimization') parser.set_defaults(nograd=False) parser.add_argument('--nograd', dest='nograd', action='store_true', help="Do not use gradients for likelihood maximization.") parser.add_argument('-v', '--version', action='version', version=( ('%(prog)s {version}'.format(version=phydmslib.__version__)))) return parser
python
def PhyDMSParser(): """Returns *argparse.ArgumentParser* for ``phydms`` script.""" parser = ArgumentParserNoArgHelp(description=('Phylogenetic analysis ' 'informed by deep mutational scanning data. {0} Version {1}. Full' ' documentation at {2}').format(phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('alignment', type=ExistingFile, help='Existing FASTA file with aligned codon sequences.') parser.add_argument('tree', type=ExistingFile, help="Existing Newick file giving input tree.") parser.add_argument('model', type=ModelOption, help=("Substitution model: ExpCM_<prefsfile> or YNGKP_<m> (" "where <m> is {0}). For ExpCM, <prefsfile> has first " "column labeled 'site' and others labeled by 1-letter " "amino-acid code.").format(', '.join(yngkp_modelvariants))) parser.add_argument('outprefix', help='Output file prefix.', type=str) parser.add_argument('--brlen', choices=['scale', 'optimize'], default='optimize', help=("How to handle branch lengths: " "scale by single parameter or optimize each one")) parser.set_defaults(gammaomega=False) parser.add_argument('--gammaomega', action='store_true', dest='gammaomega', help="Omega for ExpCM from gamma " "distribution rather than single value. To achieve " "same for YNGKP, use 'model' of YNGKP_M5.") parser.set_defaults(gammabeta=False) parser.add_argument('--gammabeta', action='store_true', dest='gammabeta', help="Beta for ExpCM from gamma " "distribution rather than single value.") parser.set_defaults(omegabysite=False) parser.add_argument('--omegabysite', dest='omegabysite', action='store_true', help="Fit omega (dN/dS) for each site.") parser.set_defaults(omegabysite_fixsyn=False) parser.add_argument('--omegabysite_fixsyn', dest='omegabysite_fixsyn', action='store_true', help="For '--omegabysite', assign all " "sites same dS rather than fit for each site.") parser.set_defaults(diffprefsbysite=False) parser.add_argument('--diffprefsbysite', dest='diffprefsbysite', action='store_true', help="Fit differential preferences " "for each site.") parser.add_argument('--diffprefsprior', default='invquadratic,150,0.5', type=diffPrefsPrior, help="Regularizing prior for " "'--diffprefsbysite': 'invquadratic,C1,C2' is prior in " "Bloom, Biology Direct, 12:1.") parser.set_defaults(fitphi=False) parser.add_argument('--fitphi', action='store_true', dest='fitphi', help='Fit ExpCM phi rather than setting so stationary ' 'state matches alignment frequencies.') parser.set_defaults(randprefs=False) parser.add_argument('--randprefs', dest='randprefs', action='store_true', help="Randomize preferences among sites for ExpCM.") parser.set_defaults(avgprefs=False) parser.add_argument('--avgprefs', dest='avgprefs', action='store_true', help="Average preferences across sites for ExpCM.") parser.add_argument('--divpressure', type=ExistingFileOrNone, help=("Known diversifying pressure at sites: file with column 1 " "= position, column 2 = diversification pressure; columns space-, " "tab-, or comma-delimited.")) parser.add_argument('--ncpus', default=1, type=int, help='Use this many CPUs; -1 means all available.') parser.add_argument('--fitprefsmethod', choices=[1, 2], default=2, help='Implementation to we use when fitting prefs.', type=int) parser.add_argument('--ncats', default=4, type=IntGreaterThanOne, help='Number of categories for gamma-distribution.') parser.add_argument('--minbrlen', type=FloatGreaterThanZero, default=phydmslib.constants.ALMOST_ZERO, help="Adjust all branch lengths in starting 'tree' to >= this.") parser.add_argument('--minpref', default=0.002, type=FloatGreaterThanZero, help="Adjust all preferences in ExpCM 'prefsfile' to >= this.") parser.add_argument('--seed', type=int, default=1, help="Random number seed.") parser.add_argument('--initparams', type=ExistingFile, help="Initialize " "model params from this file, which should be format of " "'*_modelparams.txt' file created by 'phydms' with this model.") parser.set_defaults(profile=False) parser.add_argument('--profile', dest='profile', action='store_true', help="Profile likelihood maximization, write pstats files. " "For code-development purposes.") parser.set_defaults(opt_details=False) parser.add_argument('--opt_details', dest='opt_details', action='store_true', help='Print details about optimization') parser.set_defaults(nograd=False) parser.add_argument('--nograd', dest='nograd', action='store_true', help="Do not use gradients for likelihood maximization.") parser.add_argument('-v', '--version', action='version', version=( ('%(prog)s {version}'.format(version=phydmslib.__version__)))) return parser
[ "def", "PhyDMSParser", "(", ")", ":", "parser", "=", "ArgumentParserNoArgHelp", "(", "description", "=", "(", "'Phylogenetic analysis '", "'informed by deep mutational scanning data. {0} Version {1}. Full'", "' documentation at {2}'", ")", ".", "format", "(", "phydmslib", "."...
Returns *argparse.ArgumentParser* for ``phydms`` script.
[ "Returns", "*", "argparse", ".", "ArgumentParser", "*", "for", "phydms", "script", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L334-L419
jbloomlab/phydms
phydmslib/parsearguments.py
ArgumentParserNoArgHelp.error
def error(self, message): """Prints error message, then help.""" sys.stderr.write('error: %s\n\n' % message) self.print_help() sys.exit(2)
python
def error(self, message): """Prints error message, then help.""" sys.stderr.write('error: %s\n\n' % message) self.print_help() sys.exit(2)
[ "def", "error", "(", "self", ",", "message", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'error: %s\\n\\n'", "%", "message", ")", "self", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "2", ")" ]
Prints error message, then help.
[ "Prints", "error", "message", "then", "help", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/parsearguments.py#L19-L23
all-umass/graphs
graphs/base/pairs.py
EdgePairGraph.symmetrize
def symmetrize(self, method=None, copy=False): '''Symmetrizes (ignores method). Returns a copy if copy=True.''' if copy: return SymmEdgePairGraph(self._pairs.copy(), num_vertices=self._num_vertices) shape = (self._num_vertices, self._num_vertices) flat_inds = np.union1d(np.ravel_multi_index(self._pairs.T, shape), np.ravel_multi_index(self._pairs.T[::-1], shape)) self._pairs = np.transpose(np.unravel_index(flat_inds, shape)) return self
python
def symmetrize(self, method=None, copy=False): '''Symmetrizes (ignores method). Returns a copy if copy=True.''' if copy: return SymmEdgePairGraph(self._pairs.copy(), num_vertices=self._num_vertices) shape = (self._num_vertices, self._num_vertices) flat_inds = np.union1d(np.ravel_multi_index(self._pairs.T, shape), np.ravel_multi_index(self._pairs.T[::-1], shape)) self._pairs = np.transpose(np.unravel_index(flat_inds, shape)) return self
[ "def", "symmetrize", "(", "self", ",", "method", "=", "None", ",", "copy", "=", "False", ")", ":", "if", "copy", ":", "return", "SymmEdgePairGraph", "(", "self", ".", "_pairs", ".", "copy", "(", ")", ",", "num_vertices", "=", "self", ".", "_num_vertice...
Symmetrizes (ignores method). Returns a copy if copy=True.
[ "Symmetrizes", "(", "ignores", "method", ")", ".", "Returns", "a", "copy", "if", "copy", "=", "True", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/pairs.py#L100-L109
all-umass/graphs
graphs/base/pairs.py
SymmEdgePairGraph.remove_edges
def remove_edges(self, from_idx, to_idx, symmetric=False, copy=False): '''Removes all from->to and to->from edges. Note: the symmetric kwarg is unused.''' flat_inds = self._pairs.dot((self._num_vertices, 1)) # convert to sorted order and flatten to_remove = (np.minimum(from_idx, to_idx) * self._num_vertices + np.maximum(from_idx, to_idx)) mask = np.in1d(flat_inds, to_remove, invert=True) res = self.copy() if copy else self res._pairs = res._pairs[mask] res._offdiag_mask = res._offdiag_mask[mask] return res
python
def remove_edges(self, from_idx, to_idx, symmetric=False, copy=False): '''Removes all from->to and to->from edges. Note: the symmetric kwarg is unused.''' flat_inds = self._pairs.dot((self._num_vertices, 1)) # convert to sorted order and flatten to_remove = (np.minimum(from_idx, to_idx) * self._num_vertices + np.maximum(from_idx, to_idx)) mask = np.in1d(flat_inds, to_remove, invert=True) res = self.copy() if copy else self res._pairs = res._pairs[mask] res._offdiag_mask = res._offdiag_mask[mask] return res
[ "def", "remove_edges", "(", "self", ",", "from_idx", ",", "to_idx", ",", "symmetric", "=", "False", ",", "copy", "=", "False", ")", ":", "flat_inds", "=", "self", ".", "_pairs", ".", "dot", "(", "(", "self", ".", "_num_vertices", ",", "1", ")", ")", ...
Removes all from->to and to->from edges. Note: the symmetric kwarg is unused.
[ "Removes", "all", "from", "-", ">", "to", "and", "to", "-", ">", "from", "edges", ".", "Note", ":", "the", "symmetric", "kwarg", "is", "unused", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/pairs.py#L156-L167
all-umass/graphs
graphs/base/base.py
Graph.add_edges
def add_edges(self, from_idx, to_idx, weight=1, symmetric=False, copy=False): '''Adds all from->to edges. weight may be a scalar or 1d array. If symmetric=True, also adds to->from edges with the same weights.''' raise NotImplementedError()
python
def add_edges(self, from_idx, to_idx, weight=1, symmetric=False, copy=False): '''Adds all from->to edges. weight may be a scalar or 1d array. If symmetric=True, also adds to->from edges with the same weights.''' raise NotImplementedError()
[ "def", "add_edges", "(", "self", ",", "from_idx", ",", "to_idx", ",", "weight", "=", "1", ",", "symmetric", "=", "False", ",", "copy", "=", "False", ")", ":", "raise", "NotImplementedError", "(", ")" ]
Adds all from->to edges. weight may be a scalar or 1d array. If symmetric=True, also adds to->from edges with the same weights.
[ "Adds", "all", "from", "-", ">", "to", "edges", ".", "weight", "may", "be", "a", "scalar", "or", "1d", "array", ".", "If", "symmetric", "=", "True", "also", "adds", "to", "-", ">", "from", "edges", "with", "the", "same", "weights", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L48-L51
all-umass/graphs
graphs/base/base.py
Graph.add_self_edges
def add_self_edges(self, weight=None, copy=False): '''Adds all i->i edges. weight may be a scalar or 1d array.''' ii = np.arange(self.num_vertices()) return self.add_edges(ii, ii, weight=weight, symmetric=False, copy=copy)
python
def add_self_edges(self, weight=None, copy=False): '''Adds all i->i edges. weight may be a scalar or 1d array.''' ii = np.arange(self.num_vertices()) return self.add_edges(ii, ii, weight=weight, symmetric=False, copy=copy)
[ "def", "add_self_edges", "(", "self", ",", "weight", "=", "None", ",", "copy", "=", "False", ")", ":", "ii", "=", "np", ".", "arange", "(", "self", ".", "num_vertices", "(", ")", ")", "return", "self", ".", "add_edges", "(", "ii", ",", "ii", ",", ...
Adds all i->i edges. weight may be a scalar or 1d array.
[ "Adds", "all", "i", "-", ">", "i", "edges", ".", "weight", "may", "be", "a", "scalar", "or", "1d", "array", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L77-L80
all-umass/graphs
graphs/base/base.py
Graph.reweight
def reweight(self, weight, edges=None, copy=False): '''Replaces existing edge weights. weight may be a scalar or 1d array. edges is a mask or index array that specifies a subset of edges to modify''' if not self.is_weighted(): warnings.warn('Cannot supply weights for unweighted graph; ' 'ignoring call to reweight') return self if edges is None: return self._update_edges(weight, copy=copy) ii, jj = self.pairs()[edges].T return self.add_edges(ii, jj, weight=weight, symmetric=False, copy=copy)
python
def reweight(self, weight, edges=None, copy=False): '''Replaces existing edge weights. weight may be a scalar or 1d array. edges is a mask or index array that specifies a subset of edges to modify''' if not self.is_weighted(): warnings.warn('Cannot supply weights for unweighted graph; ' 'ignoring call to reweight') return self if edges is None: return self._update_edges(weight, copy=copy) ii, jj = self.pairs()[edges].T return self.add_edges(ii, jj, weight=weight, symmetric=False, copy=copy)
[ "def", "reweight", "(", "self", ",", "weight", ",", "edges", "=", "None", ",", "copy", "=", "False", ")", ":", "if", "not", "self", ".", "is_weighted", "(", ")", ":", "warnings", ".", "warn", "(", "'Cannot supply weights for unweighted graph; '", "'ignoring ...
Replaces existing edge weights. weight may be a scalar or 1d array. edges is a mask or index array that specifies a subset of edges to modify
[ "Replaces", "existing", "edge", "weights", ".", "weight", "may", "be", "a", "scalar", "or", "1d", "array", ".", "edges", "is", "a", "mask", "or", "index", "array", "that", "specifies", "a", "subset", "of", "edges", "to", "modify" ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L82-L92
all-umass/graphs
graphs/base/base.py
Graph.reweight_by_distance
def reweight_by_distance(self, coords, metric='l2', copy=False): '''Replaces existing edge weights by distances between connected vertices. The new weight of edge (i,j) is given by: metric(coords[i], coords[j]). coords : (num_vertices x d) array of coordinates, in vertex order metric : str or callable, see sklearn.metrics.pairwise.paired_distances''' if not self.is_weighted(): warnings.warn('Cannot supply weights for unweighted graph; ' 'ignoring call to reweight_by_distance') return self # TODO: take advantage of symmetry of metric function ii, jj = self.pairs().T if metric == 'precomputed': assert coords.ndim == 2 and coords.shape[0] == coords.shape[1] d = coords[ii,jj] else: d = paired_distances(coords[ii], coords[jj], metric=metric) return self._update_edges(d, copy=copy)
python
def reweight_by_distance(self, coords, metric='l2', copy=False): '''Replaces existing edge weights by distances between connected vertices. The new weight of edge (i,j) is given by: metric(coords[i], coords[j]). coords : (num_vertices x d) array of coordinates, in vertex order metric : str or callable, see sklearn.metrics.pairwise.paired_distances''' if not self.is_weighted(): warnings.warn('Cannot supply weights for unweighted graph; ' 'ignoring call to reweight_by_distance') return self # TODO: take advantage of symmetry of metric function ii, jj = self.pairs().T if metric == 'precomputed': assert coords.ndim == 2 and coords.shape[0] == coords.shape[1] d = coords[ii,jj] else: d = paired_distances(coords[ii], coords[jj], metric=metric) return self._update_edges(d, copy=copy)
[ "def", "reweight_by_distance", "(", "self", ",", "coords", ",", "metric", "=", "'l2'", ",", "copy", "=", "False", ")", ":", "if", "not", "self", ".", "is_weighted", "(", ")", ":", "warnings", ".", "warn", "(", "'Cannot supply weights for unweighted graph; '", ...
Replaces existing edge weights by distances between connected vertices. The new weight of edge (i,j) is given by: metric(coords[i], coords[j]). coords : (num_vertices x d) array of coordinates, in vertex order metric : str or callable, see sklearn.metrics.pairwise.paired_distances
[ "Replaces", "existing", "edge", "weights", "by", "distances", "between", "connected", "vertices", ".", "The", "new", "weight", "of", "edge", "(", "i", "j", ")", "is", "given", "by", ":", "metric", "(", "coords", "[", "i", "]", "coords", "[", "j", "]", ...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L94-L110
all-umass/graphs
graphs/base/base.py
Graph.degree
def degree(self, kind='out', weighted=True): '''Returns an array of vertex degrees. kind : either 'in' or 'out', useful for directed graphs weighted : controls whether to count edges or sum their weights ''' if kind == 'out': axis = 1 adj = self.matrix('dense', 'csc') else: axis = 0 adj = self.matrix('dense', 'csr') if not weighted and self.is_weighted(): # With recent numpy and a dense matrix, could do: # d = np.count_nonzero(adj, axis=axis) d = (adj!=0).sum(axis=axis) else: d = adj.sum(axis=axis) return np.asarray(d).ravel()
python
def degree(self, kind='out', weighted=True): '''Returns an array of vertex degrees. kind : either 'in' or 'out', useful for directed graphs weighted : controls whether to count edges or sum their weights ''' if kind == 'out': axis = 1 adj = self.matrix('dense', 'csc') else: axis = 0 adj = self.matrix('dense', 'csr') if not weighted and self.is_weighted(): # With recent numpy and a dense matrix, could do: # d = np.count_nonzero(adj, axis=axis) d = (adj!=0).sum(axis=axis) else: d = adj.sum(axis=axis) return np.asarray(d).ravel()
[ "def", "degree", "(", "self", ",", "kind", "=", "'out'", ",", "weighted", "=", "True", ")", ":", "if", "kind", "==", "'out'", ":", "axis", "=", "1", "adj", "=", "self", ".", "matrix", "(", "'dense'", ",", "'csc'", ")", "else", ":", "axis", "=", ...
Returns an array of vertex degrees. kind : either 'in' or 'out', useful for directed graphs weighted : controls whether to count edges or sum their weights
[ "Returns", "an", "array", "of", "vertex", "degrees", ".", "kind", ":", "either", "in", "or", "out", "useful", "for", "directed", "graphs", "weighted", ":", "controls", "whether", "to", "count", "edges", "or", "sum", "their", "weights" ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L119-L137
all-umass/graphs
graphs/base/base.py
Graph.to_igraph
def to_igraph(self, weighted=None): '''Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.''' # Import here to avoid ImportErrors when igraph isn't available. import igraph ig = igraph.Graph(n=self.num_vertices(), edges=self.pairs().tolist(), directed=self.is_directed()) if weighted is not False and self.is_weighted(): ig.es['weight'] = self.edge_weights() return ig
python
def to_igraph(self, weighted=None): '''Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.''' # Import here to avoid ImportErrors when igraph isn't available. import igraph ig = igraph.Graph(n=self.num_vertices(), edges=self.pairs().tolist(), directed=self.is_directed()) if weighted is not False and self.is_weighted(): ig.es['weight'] = self.edge_weights() return ig
[ "def", "to_igraph", "(", "self", ",", "weighted", "=", "None", ")", ":", "# Import here to avoid ImportErrors when igraph isn't available.", "import", "igraph", "ig", "=", "igraph", ".", "Graph", "(", "n", "=", "self", ".", "num_vertices", "(", ")", ",", "edges"...
Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.
[ "Converts", "this", "Graph", "object", "to", "an", "igraph", "-", "compatible", "object", ".", "Requires", "the", "python", "-", "igraph", "library", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L139-L148
all-umass/graphs
graphs/base/base.py
Graph.to_graph_tool
def to_graph_tool(self): '''Converts this Graph object to a graph_tool-compatible object. Requires the graph_tool library. Note that the internal ordering of graph_tool seems to be column-major.''' # Import here to avoid ImportErrors when graph_tool isn't available. import graph_tool gt = graph_tool.Graph(directed=self.is_directed()) gt.add_edge_list(self.pairs()) if self.is_weighted(): weights = gt.new_edge_property('double') for e,w in zip(gt.edges(), self.edge_weights()): weights[e] = w gt.edge_properties['weight'] = weights return gt
python
def to_graph_tool(self): '''Converts this Graph object to a graph_tool-compatible object. Requires the graph_tool library. Note that the internal ordering of graph_tool seems to be column-major.''' # Import here to avoid ImportErrors when graph_tool isn't available. import graph_tool gt = graph_tool.Graph(directed=self.is_directed()) gt.add_edge_list(self.pairs()) if self.is_weighted(): weights = gt.new_edge_property('double') for e,w in zip(gt.edges(), self.edge_weights()): weights[e] = w gt.edge_properties['weight'] = weights return gt
[ "def", "to_graph_tool", "(", "self", ")", ":", "# Import here to avoid ImportErrors when graph_tool isn't available.", "import", "graph_tool", "gt", "=", "graph_tool", ".", "Graph", "(", "directed", "=", "self", ".", "is_directed", "(", ")", ")", "gt", ".", "add_edg...
Converts this Graph object to a graph_tool-compatible object. Requires the graph_tool library. Note that the internal ordering of graph_tool seems to be column-major.
[ "Converts", "this", "Graph", "object", "to", "a", "graph_tool", "-", "compatible", "object", ".", "Requires", "the", "graph_tool", "library", ".", "Note", "that", "the", "internal", "ordering", "of", "graph_tool", "seems", "to", "be", "column", "-", "major", ...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L150-L163
all-umass/graphs
graphs/base/base.py
Graph.to_networkx
def to_networkx(self, directed=None): '''Converts this Graph object to a networkx-compatible object. Requires the networkx library.''' import networkx as nx directed = directed if directed is not None else self.is_directed() cls = nx.DiGraph if directed else nx.Graph adj = self.matrix() if ss.issparse(adj): return nx.from_scipy_sparse_matrix(adj, create_using=cls()) return nx.from_numpy_matrix(adj, create_using=cls())
python
def to_networkx(self, directed=None): '''Converts this Graph object to a networkx-compatible object. Requires the networkx library.''' import networkx as nx directed = directed if directed is not None else self.is_directed() cls = nx.DiGraph if directed else nx.Graph adj = self.matrix() if ss.issparse(adj): return nx.from_scipy_sparse_matrix(adj, create_using=cls()) return nx.from_numpy_matrix(adj, create_using=cls())
[ "def", "to_networkx", "(", "self", ",", "directed", "=", "None", ")", ":", "import", "networkx", "as", "nx", "directed", "=", "directed", "if", "directed", "is", "not", "None", "else", "self", ".", "is_directed", "(", ")", "cls", "=", "nx", ".", "DiGra...
Converts this Graph object to a networkx-compatible object. Requires the networkx library.
[ "Converts", "this", "Graph", "object", "to", "a", "networkx", "-", "compatible", "object", ".", "Requires", "the", "networkx", "library", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L165-L174
jesford/cluster-lensing
clusterlensing/cofm.py
_check_inputs
def _check_inputs(z, m): """Check inputs are arrays of same length or array and a scalar.""" try: nz = len(z) z = np.array(z) except TypeError: z = np.array([z]) nz = len(z) try: nm = len(m) m = np.array(m) except TypeError: m = np.array([m]) nm = len(m) if (z < 0).any() or (m < 0).any(): raise ValueError('z and m must be positive') if nz != nm and nz > 1 and nm > 1: raise ValueError('z and m arrays must be either equal in length, \ OR of different length with one of length 1.') else: if type(z) != np.ndarray: z = np.array(z) if type(m) != np.ndarray: m = np.array(m) return z, m
python
def _check_inputs(z, m): """Check inputs are arrays of same length or array and a scalar.""" try: nz = len(z) z = np.array(z) except TypeError: z = np.array([z]) nz = len(z) try: nm = len(m) m = np.array(m) except TypeError: m = np.array([m]) nm = len(m) if (z < 0).any() or (m < 0).any(): raise ValueError('z and m must be positive') if nz != nm and nz > 1 and nm > 1: raise ValueError('z and m arrays must be either equal in length, \ OR of different length with one of length 1.') else: if type(z) != np.ndarray: z = np.array(z) if type(m) != np.ndarray: m = np.array(m) return z, m
[ "def", "_check_inputs", "(", "z", ",", "m", ")", ":", "try", ":", "nz", "=", "len", "(", "z", ")", "z", "=", "np", ".", "array", "(", "z", ")", "except", "TypeError", ":", "z", "=", "np", ".", "array", "(", "[", "z", "]", ")", "nz", "=", ...
Check inputs are arrays of same length or array and a scalar.
[ "Check", "inputs", "are", "arrays", "of", "same", "length", "or", "array", "and", "a", "scalar", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/cofm.py#L16-L44
jesford/cluster-lensing
clusterlensing/cofm.py
c_Prada
def c_Prada(z, m, h=h, Om_M=Om_M, Om_L=Om_L): """Concentration from c(M) relation published in Prada et al. (2012). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Om_M : float, optional Matter density parameter. Default is from Planck13. Om_L : float, optional Cosmological constant density parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. Notes ---------- This c(M) relation is somewhat controversial, due to its upturn in concentration for high masses (normally we expect concentration to decrease with increasing mass). See the reference below for discussion. References ---------- Calculation based on results of N-body simulations presented in: F. Prada, A.A. Klypin, A.J. Cuesta, J.E. Betancort-Rijo, and J. Primack, "Halo concentrations in the standard Lambda cold dark matter cosmology," Monthly Notices of the Royal Astronomical Society, Volume 423, Issue 4, pp. 3018-3030, 2012. """ z, m = _check_inputs(z, m) # EQ 13 x = (1. / (1. + z)) * (Om_L / Om_M)**(1. / 3.) # EQ 12 intEQ12 = np.zeros(len(x)) # integral for i in range(len(x)): # v is integration variable temp = integrate.quad(lambda v: (v / (1 + v**3.))**(1.5), 0, x[i]) intEQ12[i] = temp[0] Da = 2.5 * ((Om_M / Om_L)**(1. / 3.)) * (np.sqrt(1. + x**3.) / (x**(1.5))) * intEQ12 # EQ 23 y = (1.e+12) / (h * m) sigma = Da * (16.9 * y**0.41) / (1. + (1.102 * y**0.2) + (6.22 * y**0.333)) # EQ 21 & 22 (constants) c0 = 3.681 c1 = 5.033 alpha = 6.948 x0 = 0.424 s0 = 1.047 # sigma_0^-1 s1 = 1.646 # sigma_1^-1 beta = 7.386 x1 = 0.526 # EQ 19 & 20 cmin = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (x - x0)) + 0.5) smin = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (x - x1)) + 0.5) # EQ 18 cmin1393 = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (1.393 - x0)) + 0.5) smin1393 = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (1.393 - x1)) + 0.5) B0 = cmin / cmin1393 B1 = smin / smin1393 # EQ 15 sigma_prime = B1 * sigma # EQ 17 A = 2.881 b = 1.257 c = 1.022 d = 0.06 # EQ 16 Cs = A * ((sigma_prime / b)**c + 1.) * np.exp(d / (sigma_prime**2.)) # EQ 14 concentration = B0 * Cs return concentration
python
def c_Prada(z, m, h=h, Om_M=Om_M, Om_L=Om_L): """Concentration from c(M) relation published in Prada et al. (2012). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Om_M : float, optional Matter density parameter. Default is from Planck13. Om_L : float, optional Cosmological constant density parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. Notes ---------- This c(M) relation is somewhat controversial, due to its upturn in concentration for high masses (normally we expect concentration to decrease with increasing mass). See the reference below for discussion. References ---------- Calculation based on results of N-body simulations presented in: F. Prada, A.A. Klypin, A.J. Cuesta, J.E. Betancort-Rijo, and J. Primack, "Halo concentrations in the standard Lambda cold dark matter cosmology," Monthly Notices of the Royal Astronomical Society, Volume 423, Issue 4, pp. 3018-3030, 2012. """ z, m = _check_inputs(z, m) # EQ 13 x = (1. / (1. + z)) * (Om_L / Om_M)**(1. / 3.) # EQ 12 intEQ12 = np.zeros(len(x)) # integral for i in range(len(x)): # v is integration variable temp = integrate.quad(lambda v: (v / (1 + v**3.))**(1.5), 0, x[i]) intEQ12[i] = temp[0] Da = 2.5 * ((Om_M / Om_L)**(1. / 3.)) * (np.sqrt(1. + x**3.) / (x**(1.5))) * intEQ12 # EQ 23 y = (1.e+12) / (h * m) sigma = Da * (16.9 * y**0.41) / (1. + (1.102 * y**0.2) + (6.22 * y**0.333)) # EQ 21 & 22 (constants) c0 = 3.681 c1 = 5.033 alpha = 6.948 x0 = 0.424 s0 = 1.047 # sigma_0^-1 s1 = 1.646 # sigma_1^-1 beta = 7.386 x1 = 0.526 # EQ 19 & 20 cmin = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (x - x0)) + 0.5) smin = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (x - x1)) + 0.5) # EQ 18 cmin1393 = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (1.393 - x0)) + 0.5) smin1393 = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (1.393 - x1)) + 0.5) B0 = cmin / cmin1393 B1 = smin / smin1393 # EQ 15 sigma_prime = B1 * sigma # EQ 17 A = 2.881 b = 1.257 c = 1.022 d = 0.06 # EQ 16 Cs = A * ((sigma_prime / b)**c + 1.) * np.exp(d / (sigma_prime**2.)) # EQ 14 concentration = B0 * Cs return concentration
[ "def", "c_Prada", "(", "z", ",", "m", ",", "h", "=", "h", ",", "Om_M", "=", "Om_M", ",", "Om_L", "=", "Om_L", ")", ":", "z", ",", "m", "=", "_check_inputs", "(", "z", ",", "m", ")", "# EQ 13", "x", "=", "(", "1.", "/", "(", "1.", "+", "z"...
Concentration from c(M) relation published in Prada et al. (2012). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Om_M : float, optional Matter density parameter. Default is from Planck13. Om_L : float, optional Cosmological constant density parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. Notes ---------- This c(M) relation is somewhat controversial, due to its upturn in concentration for high masses (normally we expect concentration to decrease with increasing mass). See the reference below for discussion. References ---------- Calculation based on results of N-body simulations presented in: F. Prada, A.A. Klypin, A.J. Cuesta, J.E. Betancort-Rijo, and J. Primack, "Halo concentrations in the standard Lambda cold dark matter cosmology," Monthly Notices of the Royal Astronomical Society, Volume 423, Issue 4, pp. 3018-3030, 2012.
[ "Concentration", "from", "c", "(", "M", ")", "relation", "published", "in", "Prada", "et", "al", ".", "(", "2012", ")", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/cofm.py#L47-L140
jesford/cluster-lensing
clusterlensing/cofm.py
c_DuttonMaccio
def c_DuttonMaccio(z, m, h=h): """Concentration from c(M) relation in Dutton & Maccio (2014). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Calculation from Planck-based results of simulations presented in: A.A. Dutton & A.V. Maccio, "Cold dark matter haloes in the Planck era: evolution of structural parameters for Einasto and NFW profiles," Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4, p.3359-3374, 2014. """ z, m = _check_inputs(z, m) a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10 b = -0.101 + 0.026 * z # EQ 11 logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7 concentration = 10.**logc200 return concentration
python
def c_DuttonMaccio(z, m, h=h): """Concentration from c(M) relation in Dutton & Maccio (2014). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Calculation from Planck-based results of simulations presented in: A.A. Dutton & A.V. Maccio, "Cold dark matter haloes in the Planck era: evolution of structural parameters for Einasto and NFW profiles," Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4, p.3359-3374, 2014. """ z, m = _check_inputs(z, m) a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10 b = -0.101 + 0.026 * z # EQ 11 logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7 concentration = 10.**logc200 return concentration
[ "def", "c_DuttonMaccio", "(", "z", ",", "m", ",", "h", "=", "h", ")", ":", "z", ",", "m", "=", "_check_inputs", "(", "z", ",", "m", ")", "a", "=", "0.52", "+", "0.385", "*", "np", ".", "exp", "(", "-", "0.617", "*", "(", "z", "**", "1.21", ...
Concentration from c(M) relation in Dutton & Maccio (2014). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Calculation from Planck-based results of simulations presented in: A.A. Dutton & A.V. Maccio, "Cold dark matter haloes in the Planck era: evolution of structural parameters for Einasto and NFW profiles," Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4, p.3359-3374, 2014.
[ "Concentration", "from", "c", "(", "M", ")", "relation", "in", "Dutton", "&", "Maccio", "(", "2014", ")", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/cofm.py#L143-L179
jesford/cluster-lensing
clusterlensing/cofm.py
c_Duffy
def c_Duffy(z, m, h=h): """Concentration from c(M) relation published in Duffy et al. (2008). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Results from N-body simulations using WMAP5 cosmology, presented in: A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, "Dark matter halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5 cosmology," Monthly Notices of the Royal Astronomical Society, Volume 390, Issue 1, pp. L64-L68, 2008. This calculation uses the parameters corresponding to the NFW model, the '200' halo definition, and the 'full' sample of halos spanning z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71, -0.084,-0.47) in Table 1 of Duffy et al. (2008). """ z, m = _check_inputs(z, m) M_pivot = 2.e12 / h # [M_solar] A = 5.71 B = -0.084 C = -0.47 concentration = A * ((m / M_pivot)**B) * (1 + z)**C return concentration
python
def c_Duffy(z, m, h=h): """Concentration from c(M) relation published in Duffy et al. (2008). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Results from N-body simulations using WMAP5 cosmology, presented in: A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, "Dark matter halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5 cosmology," Monthly Notices of the Royal Astronomical Society, Volume 390, Issue 1, pp. L64-L68, 2008. This calculation uses the parameters corresponding to the NFW model, the '200' halo definition, and the 'full' sample of halos spanning z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71, -0.084,-0.47) in Table 1 of Duffy et al. (2008). """ z, m = _check_inputs(z, m) M_pivot = 2.e12 / h # [M_solar] A = 5.71 B = -0.084 C = -0.47 concentration = A * ((m / M_pivot)**B) * (1 + z)**C return concentration
[ "def", "c_Duffy", "(", "z", ",", "m", ",", "h", "=", "h", ")", ":", "z", ",", "m", "=", "_check_inputs", "(", "z", ",", "m", ")", "M_pivot", "=", "2.e12", "/", "h", "# [M_solar]", "A", "=", "5.71", "B", "=", "-", "0.084", "C", "=", "-", "0....
Concentration from c(M) relation published in Duffy et al. (2008). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Results from N-body simulations using WMAP5 cosmology, presented in: A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, "Dark matter halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5 cosmology," Monthly Notices of the Royal Astronomical Society, Volume 390, Issue 1, pp. L64-L68, 2008. This calculation uses the parameters corresponding to the NFW model, the '200' halo definition, and the 'full' sample of halos spanning z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71, -0.084,-0.47) in Table 1 of Duffy et al. (2008).
[ "Concentration", "from", "c", "(", "M", ")", "relation", "published", "in", "Duffy", "et", "al", ".", "(", "2008", ")", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/cofm.py#L182-L224
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Label._set_label
def _set_label(self, which, label, **kwargs): """Private method for setting labels. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `xlabel`/`ylabel`, and `title`. label (str): The label to be added. fontsize (int, optional): Fontsize for associated label. Default is None. """ prop_default = { 'fontsize': 18, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) setattr(self.label, which, label) setattr(self.label, which + '_kwargs', kwargs) return
python
def _set_label(self, which, label, **kwargs): """Private method for setting labels. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `xlabel`/`ylabel`, and `title`. label (str): The label to be added. fontsize (int, optional): Fontsize for associated label. Default is None. """ prop_default = { 'fontsize': 18, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) setattr(self.label, which, label) setattr(self.label, which + '_kwargs', kwargs) return
[ "def", "_set_label", "(", "self", ",", "which", ",", "label", ",", "*", "*", "kwargs", ")", ":", "prop_default", "=", "{", "'fontsize'", ":", "18", ",", "}", "for", "prop", ",", "default", "in", "prop_default", ".", "items", "(", ")", ":", "kwargs", ...
Private method for setting labels. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `xlabel`/`ylabel`, and `title`. label (str): The label to be added. fontsize (int, optional): Fontsize for associated label. Default is None.
[ "Private", "method", "for", "setting", "labels", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L50-L71
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Limits._set_axis_limits
def _set_axis_limits(self, which, lims, d, scale, reverse=False): """Private method for setting axis limits. Sets the axis limits on each axis for an individual plot. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lims (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ setattr(self.limits, which + 'lims', lims) setattr(self.limits, 'd' + which, d) setattr(self.limits, which + 'scale', scale) if reverse: setattr(self.limits, 'reverse_' + which + '_axis', True) return
python
def _set_axis_limits(self, which, lims, d, scale, reverse=False): """Private method for setting axis limits. Sets the axis limits on each axis for an individual plot. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lims (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ setattr(self.limits, which + 'lims', lims) setattr(self.limits, 'd' + which, d) setattr(self.limits, which + 'scale', scale) if reverse: setattr(self.limits, 'reverse_' + which + '_axis', True) return
[ "def", "_set_axis_limits", "(", "self", ",", "which", ",", "lims", ",", "d", ",", "scale", ",", "reverse", "=", "False", ")", ":", "setattr", "(", "self", ".", "limits", ",", "which", "+", "'lims'", ",", "lims", ")", "setattr", "(", "self", ".", "l...
Private method for setting axis limits. Sets the axis limits on each axis for an individual plot. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lims (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
[ "Private", "method", "for", "setting", "axis", "limits", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L214-L234
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Limits.set_xlim
def set_xlim(self, xlims, dx, xscale, reverse=False): """Set x limits for plot. This will set the limits for the x axis for the specific plot. Args: xlims (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ self._set_axis_limits('x', xlims, dx, xscale, reverse) return
python
def set_xlim(self, xlims, dx, xscale, reverse=False): """Set x limits for plot. This will set the limits for the x axis for the specific plot. Args: xlims (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ self._set_axis_limits('x', xlims, dx, xscale, reverse) return
[ "def", "set_xlim", "(", "self", ",", "xlims", ",", "dx", ",", "xscale", ",", "reverse", "=", "False", ")", ":", "self", ".", "_set_axis_limits", "(", "'x'", ",", "xlims", ",", "dx", ",", "xscale", ",", "reverse", ")", "return" ]
Set x limits for plot. This will set the limits for the x axis for the specific plot. Args: xlims (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
[ "Set", "x", "limits", "for", "plot", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L236-L250
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Limits.set_ylim
def set_ylim(self, xlims, dx, xscale, reverse=False): """Set y limits for plot. This will set the limits for the y axis for the specific plot. Args: ylims (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ self._set_axis_limits('y', xlims, dx, xscale, reverse) return
python
def set_ylim(self, xlims, dx, xscale, reverse=False): """Set y limits for plot. This will set the limits for the y axis for the specific plot. Args: ylims (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ self._set_axis_limits('y', xlims, dx, xscale, reverse) return
[ "def", "set_ylim", "(", "self", ",", "xlims", ",", "dx", ",", "xscale", ",", "reverse", "=", "False", ")", ":", "self", ".", "_set_axis_limits", "(", "'y'", ",", "xlims", ",", "dx", ",", "xscale", ",", "reverse", ")", "return" ]
Set y limits for plot. This will set the limits for the y axis for the specific plot. Args: ylims (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
[ "Set", "y", "limits", "for", "plot", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L252-L266
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Legend.add_legend
def add_legend(self, labels=None, **kwargs): """Specify legend for a plot. Adds labels and basic legend specifications for specific plot. For the optional Args, refer to https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html for more information. # TODO: Add legend capabilities for Loss/Gain plots. This is possible using the return_fig_ax kwarg in the main plotting function. Args: labels (list of str): String representing each item in plot that will be added to the legend. Keyword Arguments: loc (str, int, len-2 list of floats, optional): Location of legend. See matplotlib documentation for more detail. Default is None. bbox_to_anchor (2-tuple or 4-tuple of floats, optional): Specify position and size of legend box. 2-tuple will specify (x,y) coordinate of part of box specified with `loc` kwarg. 4-tuple will specify (x, y, width, height). See matplotlib documentation for more detail. Default is None. size (float, optional): Set size of legend using call to `prop` dict in legend call. See matplotlib documentaiton for more detail. Default is None. ncol (int, optional): Number of columns in the legend. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html """ if 'size' in kwargs: if 'prop' not in kwargs: kwargs['prop'] = {'size': kwargs['size']} else: kwargs['prop']['size'] = kwargs['size'] del kwargs['size'] self.legend.add_legend = True self.legend.legend_labels = labels self.legend.legend_kwargs = kwargs return
python
def add_legend(self, labels=None, **kwargs): """Specify legend for a plot. Adds labels and basic legend specifications for specific plot. For the optional Args, refer to https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html for more information. # TODO: Add legend capabilities for Loss/Gain plots. This is possible using the return_fig_ax kwarg in the main plotting function. Args: labels (list of str): String representing each item in plot that will be added to the legend. Keyword Arguments: loc (str, int, len-2 list of floats, optional): Location of legend. See matplotlib documentation for more detail. Default is None. bbox_to_anchor (2-tuple or 4-tuple of floats, optional): Specify position and size of legend box. 2-tuple will specify (x,y) coordinate of part of box specified with `loc` kwarg. 4-tuple will specify (x, y, width, height). See matplotlib documentation for more detail. Default is None. size (float, optional): Set size of legend using call to `prop` dict in legend call. See matplotlib documentaiton for more detail. Default is None. ncol (int, optional): Number of columns in the legend. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html """ if 'size' in kwargs: if 'prop' not in kwargs: kwargs['prop'] = {'size': kwargs['size']} else: kwargs['prop']['size'] = kwargs['size'] del kwargs['size'] self.legend.add_legend = True self.legend.legend_labels = labels self.legend.legend_kwargs = kwargs return
[ "def", "add_legend", "(", "self", ",", "labels", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "'size'", "in", "kwargs", ":", "if", "'prop'", "not", "in", "kwargs", ":", "kwargs", "[", "'prop'", "]", "=", "{", "'size'", ":", "kwargs", "[", ...
Specify legend for a plot. Adds labels and basic legend specifications for specific plot. For the optional Args, refer to https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html for more information. # TODO: Add legend capabilities for Loss/Gain plots. This is possible using the return_fig_ax kwarg in the main plotting function. Args: labels (list of str): String representing each item in plot that will be added to the legend. Keyword Arguments: loc (str, int, len-2 list of floats, optional): Location of legend. See matplotlib documentation for more detail. Default is None. bbox_to_anchor (2-tuple or 4-tuple of floats, optional): Specify position and size of legend box. 2-tuple will specify (x,y) coordinate of part of box specified with `loc` kwarg. 4-tuple will specify (x, y, width, height). See matplotlib documentation for more detail. Default is None. size (float, optional): Set size of legend using call to `prop` dict in legend call. See matplotlib documentaiton for more detail. Default is None. ncol (int, optional): Number of columns in the legend. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html
[ "Specify", "legend", "for", "a", "plot", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L304-L347
mikekatz04/BOWIE
bowie/plotutils/forminput.py
DataImport.add_dataset
def add_dataset(self, name=None, label=None, x_column_label=None, y_column_label=None, index=None, control=False): """Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index. """ if name is None and label is None and index is None: raise ValueError("Attempting to add a dataset without" + "supplying index or file information.") if index is None: trans_dict = DataImportContainer() if name is not None: trans_dict.file_name = name if label is not None: trans_dict.label = label if x_column_label is not None: trans_dict.x_column_label = x_column_label if y_column_label is not None: trans_dict.y_column_label = y_column_label if control: self.control = trans_dict else: # need to append file to file list. if 'file' not in self.__dict__: self.file = [] self.file.append(trans_dict) else: if control: self.control = DataImportContainer() self.control.index = index else: # need to append index to index list. if 'indices' not in self.__dict__: self.indices = [] self.indices.append(index) return
python
def add_dataset(self, name=None, label=None, x_column_label=None, y_column_label=None, index=None, control=False): """Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index. """ if name is None and label is None and index is None: raise ValueError("Attempting to add a dataset without" + "supplying index or file information.") if index is None: trans_dict = DataImportContainer() if name is not None: trans_dict.file_name = name if label is not None: trans_dict.label = label if x_column_label is not None: trans_dict.x_column_label = x_column_label if y_column_label is not None: trans_dict.y_column_label = y_column_label if control: self.control = trans_dict else: # need to append file to file list. if 'file' not in self.__dict__: self.file = [] self.file.append(trans_dict) else: if control: self.control = DataImportContainer() self.control.index = index else: # need to append index to index list. if 'indices' not in self.__dict__: self.indices = [] self.indices.append(index) return
[ "def", "add_dataset", "(", "self", ",", "name", "=", "None", ",", "label", "=", "None", ",", "x_column_label", "=", "None", ",", "y_column_label", "=", "None", ",", "index", "=", "None", ",", "control", "=", "False", ")", ":", "if", "name", "is", "No...
Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index.
[ "Add", "a", "dataset", "to", "a", "specific", "plot", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L503-L571
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.savefig
def savefig(self, output_path, **kwargs): """Save figure during generation. This method is used to save a completed figure during the main function run. It represents a call to ``matplotlib.pyplot.fig.savefig``. # TODO: Switch to kwargs for matplotlib.pyplot.savefig Args: output_path (str): Relative path to the WORKING_DIRECTORY to save the figure. Keyword Arguments: dpi (int, optional): Dots per inch of figure. Default is 200. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html """ self.figure.save_figure = True self.figure.output_path = output_path self.figure.savefig_kwargs = kwargs return
python
def savefig(self, output_path, **kwargs): """Save figure during generation. This method is used to save a completed figure during the main function run. It represents a call to ``matplotlib.pyplot.fig.savefig``. # TODO: Switch to kwargs for matplotlib.pyplot.savefig Args: output_path (str): Relative path to the WORKING_DIRECTORY to save the figure. Keyword Arguments: dpi (int, optional): Dots per inch of figure. Default is 200. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html """ self.figure.save_figure = True self.figure.output_path = output_path self.figure.savefig_kwargs = kwargs return
[ "def", "savefig", "(", "self", ",", "output_path", ",", "*", "*", "kwargs", ")", ":", "self", ".", "figure", ".", "save_figure", "=", "True", "self", ".", "figure", ".", "output_path", "=", "output_path", "self", ".", "figure", ".", "savefig_kwargs", "="...
Save figure during generation. This method is used to save a completed figure during the main function run. It represents a call to ``matplotlib.pyplot.fig.savefig``. # TODO: Switch to kwargs for matplotlib.pyplot.savefig Args: output_path (str): Relative path to the WORKING_DIRECTORY to save the figure. Keyword Arguments: dpi (int, optional): Dots per inch of figure. Default is 200. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
[ "Save", "figure", "during", "generation", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L665-L685
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.set_fig_size
def set_fig_size(self, width, height=None): """Set the figure size in inches. Sets the figure size with a call to fig.set_size_inches. Default in code is 8 inches for each. Args: width (float): Dimensions for figure width in inches. height (float, optional): Dimensions for figure height in inches. Default is None. """ self.figure.figure_width = width self.figure.figure_height = height return
python
def set_fig_size(self, width, height=None): """Set the figure size in inches. Sets the figure size with a call to fig.set_size_inches. Default in code is 8 inches for each. Args: width (float): Dimensions for figure width in inches. height (float, optional): Dimensions for figure height in inches. Default is None. """ self.figure.figure_width = width self.figure.figure_height = height return
[ "def", "set_fig_size", "(", "self", ",", "width", ",", "height", "=", "None", ")", ":", "self", ".", "figure", ".", "figure_width", "=", "width", "self", ".", "figure", ".", "figure_height", "=", "height", "return" ]
Set the figure size in inches. Sets the figure size with a call to fig.set_size_inches. Default in code is 8 inches for each. Args: width (float): Dimensions for figure width in inches. height (float, optional): Dimensions for figure height in inches. Default is None.
[ "Set", "the", "figure", "size", "in", "inches", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L697-L710
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.set_spacing
def set_spacing(self, space): """Set the figure spacing. Sets whether in general there is space between subplots. If all axes are shared, this can be `tight`. Default in code is `wide`. The main difference is the tick labels extend to the ends if space==`wide`. If space==`tight`, the edge tick labels are cut off for clearity. Args: space (str): Sets spacing for subplots. Either `wide` or `tight`. """ self.figure.spacing = space if 'subplots_adjust_kwargs' not in self.figure.__dict__: self.figure.subplots_adjust_kwargs = {} if space == 'wide': self.figure.subplots_adjust_kwargs['hspace'] = 0.3 self.figure.subplots_adjust_kwargs['wspace'] = 0.3 else: self.figure.subplots_adjust_kwargs['hspace'] = 0.0 self.figure.subplots_adjust_kwargs['wspace'] = 0.0 return
python
def set_spacing(self, space): """Set the figure spacing. Sets whether in general there is space between subplots. If all axes are shared, this can be `tight`. Default in code is `wide`. The main difference is the tick labels extend to the ends if space==`wide`. If space==`tight`, the edge tick labels are cut off for clearity. Args: space (str): Sets spacing for subplots. Either `wide` or `tight`. """ self.figure.spacing = space if 'subplots_adjust_kwargs' not in self.figure.__dict__: self.figure.subplots_adjust_kwargs = {} if space == 'wide': self.figure.subplots_adjust_kwargs['hspace'] = 0.3 self.figure.subplots_adjust_kwargs['wspace'] = 0.3 else: self.figure.subplots_adjust_kwargs['hspace'] = 0.0 self.figure.subplots_adjust_kwargs['wspace'] = 0.0 return
[ "def", "set_spacing", "(", "self", ",", "space", ")", ":", "self", ".", "figure", ".", "spacing", "=", "space", "if", "'subplots_adjust_kwargs'", "not", "in", "self", ".", "figure", ".", "__dict__", ":", "self", ".", "figure", ".", "subplots_adjust_kwargs", ...
Set the figure spacing. Sets whether in general there is space between subplots. If all axes are shared, this can be `tight`. Default in code is `wide`. The main difference is the tick labels extend to the ends if space==`wide`. If space==`tight`, the edge tick labels are cut off for clearity. Args: space (str): Sets spacing for subplots. Either `wide` or `tight`.
[ "Set", "the", "figure", "spacing", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L712-L735
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.subplots_adjust
def subplots_adjust(self, **kwargs): """Adjust subplot spacing and dimensions. Adjust bottom, top, right, left, width in between plots, and height in between plots with a call to ``plt.subplots_adjust``. See https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots_adjust.html for more information. Keyword Arguments: bottom (float, optional): Sets position of bottom of subplots in figure coordinates. Default is 0.1. top (float, optional): Sets position of top of subplots in figure coordinates. Default is 0.85. left (float, optional): Sets position of left edge of subplots in figure coordinates. Default is 0.12. right (float, optional): Sets position of right edge of subplots in figure coordinates. Default is 0.79. wspace (float, optional): The amount of width reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. hspace (float, optional): The amount of height reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. """ prop_default = { 'bottom': 0.1, 'top': 0.85, 'right': 0.9, 'left': 0.12, 'hspace': 0.3, 'wspace': 0.3, } if 'subplots_adjust_kwargs' in self.figure.__dict__: for key, value in self.figure.subplots_adjust_kwargs.items(): prop_default[key] = value for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self.figure.subplots_adjust_kwargs = kwargs return
python
def subplots_adjust(self, **kwargs): """Adjust subplot spacing and dimensions. Adjust bottom, top, right, left, width in between plots, and height in between plots with a call to ``plt.subplots_adjust``. See https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots_adjust.html for more information. Keyword Arguments: bottom (float, optional): Sets position of bottom of subplots in figure coordinates. Default is 0.1. top (float, optional): Sets position of top of subplots in figure coordinates. Default is 0.85. left (float, optional): Sets position of left edge of subplots in figure coordinates. Default is 0.12. right (float, optional): Sets position of right edge of subplots in figure coordinates. Default is 0.79. wspace (float, optional): The amount of width reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. hspace (float, optional): The amount of height reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. """ prop_default = { 'bottom': 0.1, 'top': 0.85, 'right': 0.9, 'left': 0.12, 'hspace': 0.3, 'wspace': 0.3, } if 'subplots_adjust_kwargs' in self.figure.__dict__: for key, value in self.figure.subplots_adjust_kwargs.items(): prop_default[key] = value for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self.figure.subplots_adjust_kwargs = kwargs return
[ "def", "subplots_adjust", "(", "self", ",", "*", "*", "kwargs", ")", ":", "prop_default", "=", "{", "'bottom'", ":", "0.1", ",", "'top'", ":", "0.85", ",", "'right'", ":", "0.9", ",", "'left'", ":", "0.12", ",", "'hspace'", ":", "0.3", ",", "'wspace'...
Adjust subplot spacing and dimensions. Adjust bottom, top, right, left, width in between plots, and height in between plots with a call to ``plt.subplots_adjust``. See https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots_adjust.html for more information. Keyword Arguments: bottom (float, optional): Sets position of bottom of subplots in figure coordinates. Default is 0.1. top (float, optional): Sets position of top of subplots in figure coordinates. Default is 0.85. left (float, optional): Sets position of left edge of subplots in figure coordinates. Default is 0.12. right (float, optional): Sets position of right edge of subplots in figure coordinates. Default is 0.79. wspace (float, optional): The amount of width reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. hspace (float, optional): The amount of height reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3.
[ "Adjust", "subplot", "spacing", "and", "dimensions", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L737-L778
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.set_fig_x_label
def set_fig_x_label(self, xlabel, **kwargs): """Set overall figure x. Set label for x axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: xlabel (str): xlabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.01 for x and 0.51 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'center'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `vertical`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext """ prop_default = { 'x': 0.01, 'y': 0.51, 'fontsize': 20, 'rotation': 'vertical', 'va': 'center', } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self._set_fig_label('x', xlabel, **kwargs) return
python
def set_fig_x_label(self, xlabel, **kwargs): """Set overall figure x. Set label for x axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: xlabel (str): xlabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.01 for x and 0.51 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'center'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `vertical`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext """ prop_default = { 'x': 0.01, 'y': 0.51, 'fontsize': 20, 'rotation': 'vertical', 'va': 'center', } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self._set_fig_label('x', xlabel, **kwargs) return
[ "def", "set_fig_x_label", "(", "self", ",", "xlabel", ",", "*", "*", "kwargs", ")", ":", "prop_default", "=", "{", "'x'", ":", "0.01", ",", "'y'", ":", "0.51", ",", "'fontsize'", ":", "20", ",", "'rotation'", ":", "'vertical'", ",", "'va'", ":", "'ce...
Set overall figure x. Set label for x axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: xlabel (str): xlabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.01 for x and 0.51 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'center'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `vertical`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext
[ "Set", "overall", "figure", "x", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L786-L823
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.set_fig_y_label
def set_fig_y_label(self, ylabel, **kwargs): """Set overall figure y. Set label for y axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: ylabel (str): ylabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.45 for x and 0.02 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `horizontal`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext """ prop_default = { 'x': 0.45, 'y': 0.02, 'fontsize': 20, 'rotation': 'horizontal', 'ha': 'center', } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self._set_fig_label('y', ylabel, **kwargs) return
python
def set_fig_y_label(self, ylabel, **kwargs): """Set overall figure y. Set label for y axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: ylabel (str): ylabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.45 for x and 0.02 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `horizontal`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext """ prop_default = { 'x': 0.45, 'y': 0.02, 'fontsize': 20, 'rotation': 'horizontal', 'ha': 'center', } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self._set_fig_label('y', ylabel, **kwargs) return
[ "def", "set_fig_y_label", "(", "self", ",", "ylabel", ",", "*", "*", "kwargs", ")", ":", "prop_default", "=", "{", "'x'", ":", "0.45", ",", "'y'", ":", "0.02", ",", "'fontsize'", ":", "20", ",", "'rotation'", ":", "'horizontal'", ",", "'ha'", ":", "'...
Set overall figure y. Set label for y axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: ylabel (str): ylabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.45 for x and 0.02 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `horizontal`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext
[ "Set", "overall", "figure", "y", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L825-L862
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.set_fig_title
def set_fig_title(self, title, **kwargs): """Set overall figure title. Set title for overall figure. This is not for a specific plot. It will place the title at the top of the figure with a call to ``fig.suptitle``. Args: title (str): Figure title. Keywork Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.5 for x and 0.98 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int, optional): The font size of the text. Default is 20. """ prop_default = { 'fontsize': 20, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self.figure.fig_title = title self.figure.fig_title_kwargs = kwargs return
python
def set_fig_title(self, title, **kwargs): """Set overall figure title. Set title for overall figure. This is not for a specific plot. It will place the title at the top of the figure with a call to ``fig.suptitle``. Args: title (str): Figure title. Keywork Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.5 for x and 0.98 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int, optional): The font size of the text. Default is 20. """ prop_default = { 'fontsize': 20, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self.figure.fig_title = title self.figure.fig_title_kwargs = kwargs return
[ "def", "set_fig_title", "(", "self", ",", "title", ",", "*", "*", "kwargs", ")", ":", "prop_default", "=", "{", "'fontsize'", ":", "20", ",", "}", "for", "prop", ",", "default", "in", "prop_default", ".", "items", "(", ")", ":", "kwargs", "[", "prop"...
Set overall figure title. Set title for overall figure. This is not for a specific plot. It will place the title at the top of the figure with a call to ``fig.suptitle``. Args: title (str): Figure title. Keywork Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.5 for x and 0.98 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int, optional): The font size of the text. Default is 20.
[ "Set", "overall", "figure", "title", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L864-L894
mikekatz04/BOWIE
bowie/plotutils/forminput.py
Figure.set_colorbar
def set_colorbar(self, plot_type, **kwargs): """Setup colorbar for specific type of plot. Specify a plot type to customize its corresponding colorbar in the figure. See the ColorbarContainer class attributes for more specific explanations. Args: plot_type (str): Type of plot to adjust. e.g. `Ratio` label (str, optional): Label for colorbar. Default is None. label_fontsize (int, optional): Fontsize for colorbar label. Default is None. ticks_fontsize (int, optional): Fontsize for colorbar tick labels. Default is None. pos (int, optional): Set a position for colorbar based on defaults. Default is None. colorbar_axes (len-4 list of floats): List for custom axes placement of the colorbar. See fig.add_axes from matplotlib. url: https://matplotlib.org/2.0.0/api/figure_api.html Raises: UserWarning: User calls set_colorbar without supplying any Args. This will not stop the code. """ prop_default = { 'cbar_label': None, 'cbar_ticks_fontsize': 15, 'cbar_label_fontsize': 20, 'cbar_axes': [], 'cbar_ticks': [], 'cbar_tick_labels': [], 'cbar_pos': 'use_default', 'cbar_label_pad': None, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop[5:], default) if prop[5:] in kwargs: del kwargs[prop[5:]] if 'colorbars' not in self.figure.__dict__: self.figure.colorbars = {} self.figure.colorbars[plot_type] = kwargs return
python
def set_colorbar(self, plot_type, **kwargs): """Setup colorbar for specific type of plot. Specify a plot type to customize its corresponding colorbar in the figure. See the ColorbarContainer class attributes for more specific explanations. Args: plot_type (str): Type of plot to adjust. e.g. `Ratio` label (str, optional): Label for colorbar. Default is None. label_fontsize (int, optional): Fontsize for colorbar label. Default is None. ticks_fontsize (int, optional): Fontsize for colorbar tick labels. Default is None. pos (int, optional): Set a position for colorbar based on defaults. Default is None. colorbar_axes (len-4 list of floats): List for custom axes placement of the colorbar. See fig.add_axes from matplotlib. url: https://matplotlib.org/2.0.0/api/figure_api.html Raises: UserWarning: User calls set_colorbar without supplying any Args. This will not stop the code. """ prop_default = { 'cbar_label': None, 'cbar_ticks_fontsize': 15, 'cbar_label_fontsize': 20, 'cbar_axes': [], 'cbar_ticks': [], 'cbar_tick_labels': [], 'cbar_pos': 'use_default', 'cbar_label_pad': None, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop[5:], default) if prop[5:] in kwargs: del kwargs[prop[5:]] if 'colorbars' not in self.figure.__dict__: self.figure.colorbars = {} self.figure.colorbars[plot_type] = kwargs return
[ "def", "set_colorbar", "(", "self", ",", "plot_type", ",", "*", "*", "kwargs", ")", ":", "prop_default", "=", "{", "'cbar_label'", ":", "None", ",", "'cbar_ticks_fontsize'", ":", "15", ",", "'cbar_label_fontsize'", ":", "20", ",", "'cbar_axes'", ":", "[", ...
Setup colorbar for specific type of plot. Specify a plot type to customize its corresponding colorbar in the figure. See the ColorbarContainer class attributes for more specific explanations. Args: plot_type (str): Type of plot to adjust. e.g. `Ratio` label (str, optional): Label for colorbar. Default is None. label_fontsize (int, optional): Fontsize for colorbar label. Default is None. ticks_fontsize (int, optional): Fontsize for colorbar tick labels. Default is None. pos (int, optional): Set a position for colorbar based on defaults. Default is None. colorbar_axes (len-4 list of floats): List for custom axes placement of the colorbar. See fig.add_axes from matplotlib. url: https://matplotlib.org/2.0.0/api/figure_api.html Raises: UserWarning: User calls set_colorbar without supplying any Args. This will not stop the code.
[ "Setup", "colorbar", "for", "specific", "type", "of", "plot", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L896-L939
mikekatz04/BOWIE
bowie/plotutils/forminput.py
General.set_all_file_column_labels
def set_all_file_column_labels(self, xlabel=None, ylabel=None): """Indicate general x,y column labels. This sets the general x and y column labels into data files for all plots. It can be overridden for specific plots. Args: xlabel/ylabel (str, optional): String indicating column label for x,y values into the data files. Default is None. Raises: UserWarning: If xlabel and ylabel are both not specified, The user will be alerted, but the code will not stop. """ if xlabel is not None: self.general.x_column_label = xlabel if ylabel is not None: self.general.y_column_label = ylabel if xlabel is None and ylabel is None: warnings.warn("is not specifying x or y lables even" + "though column labels function is called.", UserWarning) return
python
def set_all_file_column_labels(self, xlabel=None, ylabel=None): """Indicate general x,y column labels. This sets the general x and y column labels into data files for all plots. It can be overridden for specific plots. Args: xlabel/ylabel (str, optional): String indicating column label for x,y values into the data files. Default is None. Raises: UserWarning: If xlabel and ylabel are both not specified, The user will be alerted, but the code will not stop. """ if xlabel is not None: self.general.x_column_label = xlabel if ylabel is not None: self.general.y_column_label = ylabel if xlabel is None and ylabel is None: warnings.warn("is not specifying x or y lables even" + "though column labels function is called.", UserWarning) return
[ "def", "set_all_file_column_labels", "(", "self", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ")", ":", "if", "xlabel", "is", "not", "None", ":", "self", ".", "general", ".", "x_column_label", "=", "xlabel", "if", "ylabel", "is", "not", "None...
Indicate general x,y column labels. This sets the general x and y column labels into data files for all plots. It can be overridden for specific plots. Args: xlabel/ylabel (str, optional): String indicating column label for x,y values into the data files. Default is None. Raises: UserWarning: If xlabel and ylabel are both not specified, The user will be alerted, but the code will not stop.
[ "Indicate", "general", "x", "y", "column", "labels", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1040-L1062
mikekatz04/BOWIE
bowie/plotutils/forminput.py
General._set_all_lims
def _set_all_lims(self, which, lim, d, scale, fontsize=None): """Set limits and ticks for an axis for whole figure. This will set axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lim (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for associated axis tick marks. Default is None. """ setattr(self.general, which + 'lims', lim) setattr(self.general, 'd' + which, d) setattr(self.general, which + 'scale', scale) if fontsize is not None: setattr(self.general, which + '_tick_label_fontsize', fontsize) return
python
def _set_all_lims(self, which, lim, d, scale, fontsize=None): """Set limits and ticks for an axis for whole figure. This will set axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lim (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for associated axis tick marks. Default is None. """ setattr(self.general, which + 'lims', lim) setattr(self.general, 'd' + which, d) setattr(self.general, which + 'scale', scale) if fontsize is not None: setattr(self.general, which + '_tick_label_fontsize', fontsize) return
[ "def", "_set_all_lims", "(", "self", ",", "which", ",", "lim", ",", "d", ",", "scale", ",", "fontsize", "=", "None", ")", ":", "setattr", "(", "self", ".", "general", ",", "which", "+", "'lims'", ",", "lim", ")", "setattr", "(", "self", ".", "gener...
Set limits and ticks for an axis for whole figure. This will set axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lim (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for associated axis tick marks. Default is None.
[ "Set", "limits", "and", "ticks", "for", "an", "axis", "for", "whole", "figure", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1064-L1087
mikekatz04/BOWIE
bowie/plotutils/forminput.py
General.set_all_xlims
def set_all_xlims(self, xlim, dx, xscale, fontsize=None): """Set limits and ticks for x axis for whole figure. This will set x axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: xlim (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for x axis tick marks. Default is None. """ self._set_all_lims('x', xlim, dx, xscale, fontsize) return
python
def set_all_xlims(self, xlim, dx, xscale, fontsize=None): """Set limits and ticks for x axis for whole figure. This will set x axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: xlim (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for x axis tick marks. Default is None. """ self._set_all_lims('x', xlim, dx, xscale, fontsize) return
[ "def", "set_all_xlims", "(", "self", ",", "xlim", ",", "dx", ",", "xscale", ",", "fontsize", "=", "None", ")", ":", "self", ".", "_set_all_lims", "(", "'x'", ",", "xlim", ",", "dx", ",", "xscale", ",", "fontsize", ")", "return" ]
Set limits and ticks for x axis for whole figure. This will set x axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: xlim (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for x axis tick marks. Default is None.
[ "Set", "limits", "and", "ticks", "for", "x", "axis", "for", "whole", "figure", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1089-L1104
mikekatz04/BOWIE
bowie/plotutils/forminput.py
General.set_all_ylims
def set_all_ylims(self, ylim, dy, yscale, fontsize=None): """Set limits and ticks for y axis for whole figure. This will set y axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: ylim (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for y axis tick marks. Default is None. """ self._set_all_lims('y', ylim, dy, yscale, fontsize) return
python
def set_all_ylims(self, ylim, dy, yscale, fontsize=None): """Set limits and ticks for y axis for whole figure. This will set y axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: ylim (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for y axis tick marks. Default is None. """ self._set_all_lims('y', ylim, dy, yscale, fontsize) return
[ "def", "set_all_ylims", "(", "self", ",", "ylim", ",", "dy", ",", "yscale", ",", "fontsize", "=", "None", ")", ":", "self", ".", "_set_all_lims", "(", "'y'", ",", "ylim", ",", "dy", ",", "yscale", ",", "fontsize", ")", "return" ]
Set limits and ticks for y axis for whole figure. This will set y axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: ylim (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for y axis tick marks. Default is None.
[ "Set", "limits", "and", "ticks", "for", "y", "axis", "for", "whole", "figure", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1106-L1121
mikekatz04/BOWIE
bowie/plotutils/forminput.py
General.reverse_axis
def reverse_axis(self, axis_to_reverse): """Reverse an axis in all figure plots. This will reverse the tick marks on an axis for each plot in the figure. It can be overridden in SinglePlot class. Args: axis_to_reverse (str): Axis to reverse. Supports `x` and `y`. Raises: ValueError: The string representing the axis to reverse is not `x` or `y`. """ if axis_to_reverse.lower() == 'x': self.general.reverse_x_axis = True if axis_to_reverse.lower() == 'y': self.general.reverse_y_axis = True if axis_to_reverse.lower() != 'x' or axis_to_reverse.lower() != 'y': raise ValueError('Axis for reversing needs to be either x or y.') return
python
def reverse_axis(self, axis_to_reverse): """Reverse an axis in all figure plots. This will reverse the tick marks on an axis for each plot in the figure. It can be overridden in SinglePlot class. Args: axis_to_reverse (str): Axis to reverse. Supports `x` and `y`. Raises: ValueError: The string representing the axis to reverse is not `x` or `y`. """ if axis_to_reverse.lower() == 'x': self.general.reverse_x_axis = True if axis_to_reverse.lower() == 'y': self.general.reverse_y_axis = True if axis_to_reverse.lower() != 'x' or axis_to_reverse.lower() != 'y': raise ValueError('Axis for reversing needs to be either x or y.') return
[ "def", "reverse_axis", "(", "self", ",", "axis_to_reverse", ")", ":", "if", "axis_to_reverse", ".", "lower", "(", ")", "==", "'x'", ":", "self", ".", "general", ".", "reverse_x_axis", "=", "True", "if", "axis_to_reverse", ".", "lower", "(", ")", "==", "'...
Reverse an axis in all figure plots. This will reverse the tick marks on an axis for each plot in the figure. It can be overridden in SinglePlot class. Args: axis_to_reverse (str): Axis to reverse. Supports `x` and `y`. Raises: ValueError: The string representing the axis to reverse is not `x` or `y`.
[ "Reverse", "an", "axis", "in", "all", "figure", "plots", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1149-L1168
mikekatz04/BOWIE
bowie/plotutils/forminput.py
MainContainer.return_dict
def return_dict(self): """Output dictionary for ``make_plot.py`` input. Iterates through the entire MainContainer class turning its contents into dictionary form. This dictionary becomes the input for ``make_plot.py``. If `print_input` attribute is True, the entire dictionary will be printed prior to returning the dicitonary. Returns: - **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``. """ output_dict = {} output_dict['general'] = self._iterate_through_class(self.general.__dict__) output_dict['figure'] = self._iterate_through_class(self.figure.__dict__) if self.total_plots > 1: trans_dict = ({ str(i): self._iterate_through_class(axis.__dict__) for i, axis in enumerate(self.ax)}) output_dict['plot_info'] = trans_dict else: output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)} if self.print_input: print(output_dict) return output_dict
python
def return_dict(self): """Output dictionary for ``make_plot.py`` input. Iterates through the entire MainContainer class turning its contents into dictionary form. This dictionary becomes the input for ``make_plot.py``. If `print_input` attribute is True, the entire dictionary will be printed prior to returning the dicitonary. Returns: - **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``. """ output_dict = {} output_dict['general'] = self._iterate_through_class(self.general.__dict__) output_dict['figure'] = self._iterate_through_class(self.figure.__dict__) if self.total_plots > 1: trans_dict = ({ str(i): self._iterate_through_class(axis.__dict__) for i, axis in enumerate(self.ax)}) output_dict['plot_info'] = trans_dict else: output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)} if self.print_input: print(output_dict) return output_dict
[ "def", "return_dict", "(", "self", ")", ":", "output_dict", "=", "{", "}", "output_dict", "[", "'general'", "]", "=", "self", ".", "_iterate_through_class", "(", "self", ".", "general", ".", "__dict__", ")", "output_dict", "[", "'figure'", "]", "=", "self"...
Output dictionary for ``make_plot.py`` input. Iterates through the entire MainContainer class turning its contents into dictionary form. This dictionary becomes the input for ``make_plot.py``. If `print_input` attribute is True, the entire dictionary will be printed prior to returning the dicitonary. Returns: - **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.
[ "Output", "dictionary", "for", "make_plot", ".", "py", "input", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1274-L1302
mikekatz04/BOWIE
bowie/plotutils/forminput.py
MainContainer._iterate_through_class
def _iterate_through_class(self, class_dict): """Recursive function for output dictionary creation. Function will check each value in a dictionary to see if it is a class, list, or dictionary object. The idea is to turn all class objects into dictionaries. If it is a class object it will pass its ``class.__dict__`` recursively through this function again. If it is a dictionary, it will pass the dictionary recursively through this functin again. If the object is a list, it will iterate through entries checking for class or dictionary objects and pass them recursively through this function. This uses the knowledge of the list structures in the code. Args: class_dict (obj): Dictionary to iteratively check. Returns: Dictionary with all class objects turned into dictionaries. """ output_dict = {} for key in class_dict: val = class_dict[key] try: val = val.__dict__ except AttributeError: pass if type(val) is dict: val = self._iterate_through_class(val) if type(val) is list: temp_val = [] for val_i in val: try: val_i = val_i.__dict__ except AttributeError: pass if type(val_i) is dict: val_i = self._iterate_through_class(val_i) temp_val.append(val_i) val = temp_val output_dict[key] = val return output_dict
python
def _iterate_through_class(self, class_dict): """Recursive function for output dictionary creation. Function will check each value in a dictionary to see if it is a class, list, or dictionary object. The idea is to turn all class objects into dictionaries. If it is a class object it will pass its ``class.__dict__`` recursively through this function again. If it is a dictionary, it will pass the dictionary recursively through this functin again. If the object is a list, it will iterate through entries checking for class or dictionary objects and pass them recursively through this function. This uses the knowledge of the list structures in the code. Args: class_dict (obj): Dictionary to iteratively check. Returns: Dictionary with all class objects turned into dictionaries. """ output_dict = {} for key in class_dict: val = class_dict[key] try: val = val.__dict__ except AttributeError: pass if type(val) is dict: val = self._iterate_through_class(val) if type(val) is list: temp_val = [] for val_i in val: try: val_i = val_i.__dict__ except AttributeError: pass if type(val_i) is dict: val_i = self._iterate_through_class(val_i) temp_val.append(val_i) val = temp_val output_dict[key] = val return output_dict
[ "def", "_iterate_through_class", "(", "self", ",", "class_dict", ")", ":", "output_dict", "=", "{", "}", "for", "key", "in", "class_dict", ":", "val", "=", "class_dict", "[", "key", "]", "try", ":", "val", "=", "val", ".", "__dict__", "except", "Attribut...
Recursive function for output dictionary creation. Function will check each value in a dictionary to see if it is a class, list, or dictionary object. The idea is to turn all class objects into dictionaries. If it is a class object it will pass its ``class.__dict__`` recursively through this function again. If it is a dictionary, it will pass the dictionary recursively through this functin again. If the object is a list, it will iterate through entries checking for class or dictionary objects and pass them recursively through this function. This uses the knowledge of the list structures in the code. Args: class_dict (obj): Dictionary to iteratively check. Returns: Dictionary with all class objects turned into dictionaries.
[ "Recursive", "function", "for", "output", "dictionary", "creation", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L1304-L1350
mikekatz04/BOWIE
bowie/plotutils/readdata.py
ReadInData.txt_read_in
def txt_read_in(self): """Read in txt files. Method for reading in text or csv files. This uses ascii class from astropy.io for flexible input. It is slower than numpy, but has greater flexibility with less input. """ # read in data = ascii.read(self.WORKING_DIRECTORY + '/' + self.file_name) # find number of distinct x and y points. num_x_pts = len(np.unique(data[self.x_column_label])) num_y_pts = len(np.unique(data[self.y_column_label])) # create 2D arrays of x,y,z self.xvals = np.reshape(np.asarray(data[self.x_column_label]), (num_y_pts, num_x_pts)) self.yvals = np.reshape(np.asarray(data[self.y_column_label]), (num_y_pts, num_x_pts)) self.zvals = np.reshape(np.asarray(data[self.z_column_label]), (num_y_pts, num_x_pts)) return
python
def txt_read_in(self): """Read in txt files. Method for reading in text or csv files. This uses ascii class from astropy.io for flexible input. It is slower than numpy, but has greater flexibility with less input. """ # read in data = ascii.read(self.WORKING_DIRECTORY + '/' + self.file_name) # find number of distinct x and y points. num_x_pts = len(np.unique(data[self.x_column_label])) num_y_pts = len(np.unique(data[self.y_column_label])) # create 2D arrays of x,y,z self.xvals = np.reshape(np.asarray(data[self.x_column_label]), (num_y_pts, num_x_pts)) self.yvals = np.reshape(np.asarray(data[self.y_column_label]), (num_y_pts, num_x_pts)) self.zvals = np.reshape(np.asarray(data[self.z_column_label]), (num_y_pts, num_x_pts)) return
[ "def", "txt_read_in", "(", "self", ")", ":", "# read in", "data", "=", "ascii", ".", "read", "(", "self", ".", "WORKING_DIRECTORY", "+", "'/'", "+", "self", ".", "file_name", ")", "# find number of distinct x and y points.", "num_x_pts", "=", "len", "(", "np",...
Read in txt files. Method for reading in text or csv files. This uses ascii class from astropy.io for flexible input. It is slower than numpy, but has greater flexibility with less input.
[ "Read", "in", "txt", "files", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/readdata.py#L87-L107
mikekatz04/BOWIE
bowie/plotutils/readdata.py
ReadInData.hdf5_read_in
def hdf5_read_in(self): """Method for reading in hdf5 files. """ with h5py.File(self.WORKING_DIRECTORY + '/' + self.file_name) as f: # read in data = f['data'] # find number of distinct x and y points. num_x_pts = len(np.unique(data[self.x_column_label][:])) num_y_pts = len(np.unique(data[self.y_column_label][:])) # create 2D arrays of x,y,z self.xvals = np.reshape(data[self.x_column_label][:], (num_y_pts, num_x_pts)) self.yvals = np.reshape(data[self.y_column_label][:], (num_y_pts, num_x_pts)) self.zvals = np.reshape(data[self.z_column_label][:], (num_y_pts, num_x_pts)) return
python
def hdf5_read_in(self): """Method for reading in hdf5 files. """ with h5py.File(self.WORKING_DIRECTORY + '/' + self.file_name) as f: # read in data = f['data'] # find number of distinct x and y points. num_x_pts = len(np.unique(data[self.x_column_label][:])) num_y_pts = len(np.unique(data[self.y_column_label][:])) # create 2D arrays of x,y,z self.xvals = np.reshape(data[self.x_column_label][:], (num_y_pts, num_x_pts)) self.yvals = np.reshape(data[self.y_column_label][:], (num_y_pts, num_x_pts)) self.zvals = np.reshape(data[self.z_column_label][:], (num_y_pts, num_x_pts)) return
[ "def", "hdf5_read_in", "(", "self", ")", ":", "with", "h5py", ".", "File", "(", "self", ".", "WORKING_DIRECTORY", "+", "'/'", "+", "self", ".", "file_name", ")", "as", "f", ":", "# read in", "data", "=", "f", "[", "'data'", "]", "# find number of distinc...
Method for reading in hdf5 files.
[ "Method", "for", "reading", "in", "hdf5", "files", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/readdata.py#L109-L127
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/genconutils/genprocess.py
GenProcess.set_parameters
def set_parameters(self): """Setup all the parameters for the binaries to be evaluated. Grid values and store necessary parameters for input into the SNR function. """ # declare 1D arrays of both paramters if self.xscale != 'lin': self.xvals = np.logspace(np.log10(float(self.x_low)), np.log10(float(self.x_high)), self.num_x) else: self.xvals = np.linspace(float(self.x_low), float(self.x_high), self.num_x) if self.yscale != 'lin': self.yvals = np.logspace(np.log10(float(self.y_low)), np.log10(float(self.y_high)), self.num_y) else: self.yvals = np.linspace(float(self.y_low), float(self.y_high), self.num_y) self.xvals, self.yvals = np.meshgrid(self.xvals, self.yvals) self.xvals, self.yvals = self.xvals.ravel(), self.yvals.ravel() for which in ['x', 'y']: setattr(self, getattr(self, which + 'val_name'), getattr(self, which + 'vals')) self.ecc = 'eccentricity' in self.__dict__ if self.ecc: if 'observation_time' not in self.__dict__: if 'start_time' not in self.__dict__: raise ValueError('If no observation time is provided, the time before' + 'merger must be the inital starting condition.') self.observation_time = self.start_time # small number so it is not zero else: if 'spin' in self.__dict__: self.spin_1 = self.spin self.spin_2 = self.spin for key in ['redshift', 'luminosity_distance', 'comoving_distance']: if key in self.__dict__: self.dist_type = key self.z_or_dist = getattr(self, key) if self.ecc: for key in ['start_frequency', 'start_time', 'start_separation']: if key in self.__dict__: self.initial_cond_type = key.split('_')[-1] self.initial_point = getattr(self, key) # add m1 and m2 self.m1 = (self.total_mass / (1. + self.mass_ratio)) self.m2 = (self.total_mass * self.mass_ratio / (1. + self.mass_ratio)) return
python
def set_parameters(self): """Setup all the parameters for the binaries to be evaluated. Grid values and store necessary parameters for input into the SNR function. """ # declare 1D arrays of both paramters if self.xscale != 'lin': self.xvals = np.logspace(np.log10(float(self.x_low)), np.log10(float(self.x_high)), self.num_x) else: self.xvals = np.linspace(float(self.x_low), float(self.x_high), self.num_x) if self.yscale != 'lin': self.yvals = np.logspace(np.log10(float(self.y_low)), np.log10(float(self.y_high)), self.num_y) else: self.yvals = np.linspace(float(self.y_low), float(self.y_high), self.num_y) self.xvals, self.yvals = np.meshgrid(self.xvals, self.yvals) self.xvals, self.yvals = self.xvals.ravel(), self.yvals.ravel() for which in ['x', 'y']: setattr(self, getattr(self, which + 'val_name'), getattr(self, which + 'vals')) self.ecc = 'eccentricity' in self.__dict__ if self.ecc: if 'observation_time' not in self.__dict__: if 'start_time' not in self.__dict__: raise ValueError('If no observation time is provided, the time before' + 'merger must be the inital starting condition.') self.observation_time = self.start_time # small number so it is not zero else: if 'spin' in self.__dict__: self.spin_1 = self.spin self.spin_2 = self.spin for key in ['redshift', 'luminosity_distance', 'comoving_distance']: if key in self.__dict__: self.dist_type = key self.z_or_dist = getattr(self, key) if self.ecc: for key in ['start_frequency', 'start_time', 'start_separation']: if key in self.__dict__: self.initial_cond_type = key.split('_')[-1] self.initial_point = getattr(self, key) # add m1 and m2 self.m1 = (self.total_mass / (1. + self.mass_ratio)) self.m2 = (self.total_mass * self.mass_ratio / (1. + self.mass_ratio)) return
[ "def", "set_parameters", "(", "self", ")", ":", "# declare 1D arrays of both paramters", "if", "self", ".", "xscale", "!=", "'lin'", ":", "self", ".", "xvals", "=", "np", ".", "logspace", "(", "np", ".", "log10", "(", "float", "(", "self", ".", "x_low", ...
Setup all the parameters for the binaries to be evaluated. Grid values and store necessary parameters for input into the SNR function.
[ "Setup", "all", "the", "parameters", "for", "the", "binaries", "to", "be", "evaluated", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/genprocess.py#L68-L128
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/genconutils/genprocess.py
GenProcess.run_snr
def run_snr(self): """Run the snr calculation. Takes results from ``self.set_parameters`` and other inputs and inputs these into the snr calculator. """ if self.ecc: required_kwargs = {'dist_type': self.dist_type, 'initial_cond_type': self.initial_cond_type, 'ecc': True} input_args = [self.m1, self.m2, self.z_or_dist, self.initial_point, self.eccentricity, self.observation_time] else: required_kwargs = {'dist_type': self.dist_type} input_args = [self.m1, self.m2, self.spin_1, self.spin_2, self.z_or_dist, self.start_time, self.end_time] input_kwargs = {**required_kwargs, **self.general, **self.sensitivity_input, **self.snr_input, **self.parallel_input} self.final_dict = snr(*input_args, **input_kwargs) return
python
def run_snr(self): """Run the snr calculation. Takes results from ``self.set_parameters`` and other inputs and inputs these into the snr calculator. """ if self.ecc: required_kwargs = {'dist_type': self.dist_type, 'initial_cond_type': self.initial_cond_type, 'ecc': True} input_args = [self.m1, self.m2, self.z_or_dist, self.initial_point, self.eccentricity, self.observation_time] else: required_kwargs = {'dist_type': self.dist_type} input_args = [self.m1, self.m2, self.spin_1, self.spin_2, self.z_or_dist, self.start_time, self.end_time] input_kwargs = {**required_kwargs, **self.general, **self.sensitivity_input, **self.snr_input, **self.parallel_input} self.final_dict = snr(*input_args, **input_kwargs) return
[ "def", "run_snr", "(", "self", ")", ":", "if", "self", ".", "ecc", ":", "required_kwargs", "=", "{", "'dist_type'", ":", "self", ".", "dist_type", ",", "'initial_cond_type'", ":", "self", ".", "initial_cond_type", ",", "'ecc'", ":", "True", "}", "input_arg...
Run the snr calculation. Takes results from ``self.set_parameters`` and other inputs and inputs these into the snr calculator.
[ "Run", "the", "snr", "calculation", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/genprocess.py#L130-L157
pedroburon/tbk
tbk/webpay/logging/__init__.py
BaseHandler.event_payment
def event_payment(self, date, time, pid, commerce_id, transaction_id, request_ip, token, webpay_server): '''Record the payment event Official handler writes this information to TBK_EVN%Y%m%d file. ''' raise NotImplementedError("Logging Handler must implement event_payment")
python
def event_payment(self, date, time, pid, commerce_id, transaction_id, request_ip, token, webpay_server): '''Record the payment event Official handler writes this information to TBK_EVN%Y%m%d file. ''' raise NotImplementedError("Logging Handler must implement event_payment")
[ "def", "event_payment", "(", "self", ",", "date", ",", "time", ",", "pid", ",", "commerce_id", ",", "transaction_id", ",", "request_ip", ",", "token", ",", "webpay_server", ")", ":", "raise", "NotImplementedError", "(", "\"Logging Handler must implement event_paymen...
Record the payment event Official handler writes this information to TBK_EVN%Y%m%d file.
[ "Record", "the", "payment", "event" ]
train
https://github.com/pedroburon/tbk/blob/ecd6741e0bae06269eb4ac885c3ffcb7902ee40e/tbk/webpay/logging/__init__.py#L15-L20
pedroburon/tbk
tbk/webpay/logging/__init__.py
BaseHandler.event_confirmation
def event_confirmation(self, date, time, pid, commerce_id, transaction_id, request_ip, order_id): '''Record the confirmation event. Official handler writes this information to TBK_EVN%Y%m%d file. ''' raise NotImplementedError("Logging Handler must implement event_confirmation")
python
def event_confirmation(self, date, time, pid, commerce_id, transaction_id, request_ip, order_id): '''Record the confirmation event. Official handler writes this information to TBK_EVN%Y%m%d file. ''' raise NotImplementedError("Logging Handler must implement event_confirmation")
[ "def", "event_confirmation", "(", "self", ",", "date", ",", "time", ",", "pid", ",", "commerce_id", ",", "transaction_id", ",", "request_ip", ",", "order_id", ")", ":", "raise", "NotImplementedError", "(", "\"Logging Handler must implement event_confirmation\"", ")" ]
Record the confirmation event. Official handler writes this information to TBK_EVN%Y%m%d file.
[ "Record", "the", "confirmation", "event", "." ]
train
https://github.com/pedroburon/tbk/blob/ecd6741e0bae06269eb4ac885c3ffcb7902ee40e/tbk/webpay/logging/__init__.py#L22-L27
markfinger/python-js-host
js_host/manager.py
JSHostManager.stop
def stop(self): """ If the manager is running, tell it to stop its process """ res = self.send_request('manager/stop', post=True) if res.status_code != 200: raise UnexpectedResponse( 'Attempted to stop manager. {res_code}: {res_text}'.format( res_code=res.status_code, res_text=res.text, ) ) if settings.VERBOSITY >= verbosity.PROCESS_STOP: print('Stopped {}'.format(self.get_name())) # The request will end just before the process stops, so there is a tiny # possibility of a race condition. We delay as a precaution so that we # can be reasonably confident of the system's state. time.sleep(0.05)
python
def stop(self): """ If the manager is running, tell it to stop its process """ res = self.send_request('manager/stop', post=True) if res.status_code != 200: raise UnexpectedResponse( 'Attempted to stop manager. {res_code}: {res_text}'.format( res_code=res.status_code, res_text=res.text, ) ) if settings.VERBOSITY >= verbosity.PROCESS_STOP: print('Stopped {}'.format(self.get_name())) # The request will end just before the process stops, so there is a tiny # possibility of a race condition. We delay as a precaution so that we # can be reasonably confident of the system's state. time.sleep(0.05)
[ "def", "stop", "(", "self", ")", ":", "res", "=", "self", ".", "send_request", "(", "'manager/stop'", ",", "post", "=", "True", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", "UnexpectedResponse", "(", "'Attempted to stop manager. {res_code}:...
If the manager is running, tell it to stop its process
[ "If", "the", "manager", "is", "running", "tell", "it", "to", "stop", "its", "process" ]
train
https://github.com/markfinger/python-js-host/blob/7727138c1eae779335d55fb4d7734698225a6322/js_host/manager.py#L12-L32
markfinger/python-js-host
js_host/manager.py
JSHostManager.stop_host
def stop_host(self, config_file): """ Stops a managed host specified by `config_file`. """ res = self.send_json_request('host/stop', data={'config': config_file}) if res.status_code != 200: raise UnexpectedResponse( 'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format( res_code=res.status_code, res_text=res.text, ) ) return res.json()
python
def stop_host(self, config_file): """ Stops a managed host specified by `config_file`. """ res = self.send_json_request('host/stop', data={'config': config_file}) if res.status_code != 200: raise UnexpectedResponse( 'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format( res_code=res.status_code, res_text=res.text, ) ) return res.json()
[ "def", "stop_host", "(", "self", ",", "config_file", ")", ":", "res", "=", "self", ".", "send_json_request", "(", "'host/stop'", ",", "data", "=", "{", "'config'", ":", "config_file", "}", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", ...
Stops a managed host specified by `config_file`.
[ "Stops", "a", "managed", "host", "specified", "by", "config_file", "." ]
train
https://github.com/markfinger/python-js-host/blob/7727138c1eae779335d55fb4d7734698225a6322/js_host/manager.py#L69-L83
mpaolino/pypvwatts
pypvwatts/pypvwatts.py
PVWatts.get_data
def get_data(self, params={}): """ Make the request and return the deserialided JSON from the response :param params: Dictionary mapping (string) query parameters to values :type params: dict :return: JSON object with the data fetched from that URL as a JSON-format object. :rtype: (dict or array) """ if self and hasattr(self, 'proxies') and self.proxies is not None: response = requests.request('GET', url=PVWatts.PVWATTS_QUERY_URL, params=params, headers={'User-Agent': ''.join( ['pypvwatts/', VERSION, ' (Python)'])}, proxies=self.proxies) else: response = requests.request('GET', url=PVWatts.PVWATTS_QUERY_URL, params=params, headers={'User-Agent': ''.join( ['pypvwatts/', VERSION, ' (Python)'])}) if response.status_code == 403: raise PVWattsError("Forbidden, 403") return response.json()
python
def get_data(self, params={}): """ Make the request and return the deserialided JSON from the response :param params: Dictionary mapping (string) query parameters to values :type params: dict :return: JSON object with the data fetched from that URL as a JSON-format object. :rtype: (dict or array) """ if self and hasattr(self, 'proxies') and self.proxies is not None: response = requests.request('GET', url=PVWatts.PVWATTS_QUERY_URL, params=params, headers={'User-Agent': ''.join( ['pypvwatts/', VERSION, ' (Python)'])}, proxies=self.proxies) else: response = requests.request('GET', url=PVWatts.PVWATTS_QUERY_URL, params=params, headers={'User-Agent': ''.join( ['pypvwatts/', VERSION, ' (Python)'])}) if response.status_code == 403: raise PVWattsError("Forbidden, 403") return response.json()
[ "def", "get_data", "(", "self", ",", "params", "=", "{", "}", ")", ":", "if", "self", "and", "hasattr", "(", "self", ",", "'proxies'", ")", "and", "self", ".", "proxies", "is", "not", "None", ":", "response", "=", "requests", ".", "request", "(", "...
Make the request and return the deserialided JSON from the response :param params: Dictionary mapping (string) query parameters to values :type params: dict :return: JSON object with the data fetched from that URL as a JSON-format object. :rtype: (dict or array)
[ "Make", "the", "request", "and", "return", "the", "deserialided", "JSON", "from", "the", "response" ]
train
https://github.com/mpaolino/pypvwatts/blob/e5dee8ef2c2bfefba5009f3626599414039daaa9/pypvwatts/pypvwatts.py#L237-L266
ScatterHQ/machinist
machinist/_logging.py
FiniteStateLogger.receive
def receive(self, input): """ Add logging of state transitions to the wrapped state machine. @see: L{IFiniteStateMachine.receive} """ if IRichInput.providedBy(input): richInput = unicode(input) symbolInput = unicode(input.symbol()) else: richInput = None symbolInput = unicode(input) action = LOG_FSM_TRANSITION( self.logger, fsm_identifier=self.identifier, fsm_state=unicode(self.state), fsm_rich_input=richInput, fsm_input=symbolInput) with action as theAction: output = super(FiniteStateLogger, self).receive(input) theAction.addSuccessFields( fsm_next_state=unicode(self.state), fsm_output=[unicode(o) for o in output]) if self._action is not None and self._isTerminal(self.state): self._action.addSuccessFields( fsm_terminal_state=unicode(self.state)) self._action.finish() self._action = None return output
python
def receive(self, input): """ Add logging of state transitions to the wrapped state machine. @see: L{IFiniteStateMachine.receive} """ if IRichInput.providedBy(input): richInput = unicode(input) symbolInput = unicode(input.symbol()) else: richInput = None symbolInput = unicode(input) action = LOG_FSM_TRANSITION( self.logger, fsm_identifier=self.identifier, fsm_state=unicode(self.state), fsm_rich_input=richInput, fsm_input=symbolInput) with action as theAction: output = super(FiniteStateLogger, self).receive(input) theAction.addSuccessFields( fsm_next_state=unicode(self.state), fsm_output=[unicode(o) for o in output]) if self._action is not None and self._isTerminal(self.state): self._action.addSuccessFields( fsm_terminal_state=unicode(self.state)) self._action.finish() self._action = None return output
[ "def", "receive", "(", "self", ",", "input", ")", ":", "if", "IRichInput", ".", "providedBy", "(", "input", ")", ":", "richInput", "=", "unicode", "(", "input", ")", "symbolInput", "=", "unicode", "(", "input", ".", "symbol", "(", ")", ")", "else", "...
Add logging of state transitions to the wrapped state machine. @see: L{IFiniteStateMachine.receive}
[ "Add", "logging", "of", "state", "transitions", "to", "the", "wrapped", "state", "machine", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_logging.py#L81-L112
inveniosoftware/invenio-config
invenio_config/env.py
InvenioConfigEnvironment.init_app
def init_app(self, app): """Initialize Flask application.""" prefix_len = len(self.prefix) for varname, value in os.environ.items(): if not varname.startswith(self.prefix): continue # Prepare values varname = varname[prefix_len:] value = value or app.config.get(varname) # Evaluate value try: value = ast.literal_eval(value) except (SyntaxError, ValueError): pass # Set value app.config[varname] = value
python
def init_app(self, app): """Initialize Flask application.""" prefix_len = len(self.prefix) for varname, value in os.environ.items(): if not varname.startswith(self.prefix): continue # Prepare values varname = varname[prefix_len:] value = value or app.config.get(varname) # Evaluate value try: value = ast.literal_eval(value) except (SyntaxError, ValueError): pass # Set value app.config[varname] = value
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "prefix_len", "=", "len", "(", "self", ".", "prefix", ")", "for", "varname", ",", "value", "in", "os", ".", "environ", ".", "items", "(", ")", ":", "if", "not", "varname", ".", "startswith", "("...
Initialize Flask application.
[ "Initialize", "Flask", "application", "." ]
train
https://github.com/inveniosoftware/invenio-config/blob/8d1e63ac045cd9c58a3399c6b58845e6daa06102/invenio_config/env.py#L29-L47
all-umass/graphs
graphs/construction/regularized.py
smce_graph
def smce_graph(X, metric='l2', sparsity_param=10, kmax=None, keep_ratio=0.95): '''Sparse graph construction from the SMCE paper. X : 2-dimensional array-like metric : str, optional sparsity_param : float, optional kmax : int, optional keep_ratio : float, optional When <1, keep edges up to (keep_ratio * total weight) Returns a graph with asymmetric similarity weights. Call .symmetrize() and .kernelize('rbf') to convert to symmetric distances. SMCE: "Sparse Manifold Clustering and Embedding" Elhamifar & Vidal, NIPS 2011 ''' n = X.shape[0] if kmax is None: kmax = min(n-1, max(5, n // 10)) nn_dists, nn_inds = nearest_neighbors(X, metric=metric, k=kmax+1, return_dists=True) W = np.zeros((n, n)) # optimize each point separately for i, pt in enumerate(X): nbr_inds = nn_inds[i] mask = nbr_inds != i # remove self-edge nbr_inds = nbr_inds[mask] nbr_dist = nn_dists[i,mask] Y = (X[nbr_inds] - pt) / nbr_dist[:,None] # solve sparse optimization with ADMM c = _solve_admm(Y, nbr_dist/nbr_dist.sum(), sparsity_param) c = np.abs(c / nbr_dist) W[i,nbr_inds] = c / c.sum() W = ss.csr_matrix(W) if keep_ratio < 1: for i in range(n): row_data = W.data[W.indptr[i]:W.indptr[i+1]] order = np.argsort(row_data)[::-1] stop_idx = np.searchsorted(np.cumsum(row_data[order]), keep_ratio) + 1 bad_inds = order[stop_idx:] row_data[bad_inds] = 0 W.eliminate_zeros() return Graph.from_adj_matrix(W)
python
def smce_graph(X, metric='l2', sparsity_param=10, kmax=None, keep_ratio=0.95): '''Sparse graph construction from the SMCE paper. X : 2-dimensional array-like metric : str, optional sparsity_param : float, optional kmax : int, optional keep_ratio : float, optional When <1, keep edges up to (keep_ratio * total weight) Returns a graph with asymmetric similarity weights. Call .symmetrize() and .kernelize('rbf') to convert to symmetric distances. SMCE: "Sparse Manifold Clustering and Embedding" Elhamifar & Vidal, NIPS 2011 ''' n = X.shape[0] if kmax is None: kmax = min(n-1, max(5, n // 10)) nn_dists, nn_inds = nearest_neighbors(X, metric=metric, k=kmax+1, return_dists=True) W = np.zeros((n, n)) # optimize each point separately for i, pt in enumerate(X): nbr_inds = nn_inds[i] mask = nbr_inds != i # remove self-edge nbr_inds = nbr_inds[mask] nbr_dist = nn_dists[i,mask] Y = (X[nbr_inds] - pt) / nbr_dist[:,None] # solve sparse optimization with ADMM c = _solve_admm(Y, nbr_dist/nbr_dist.sum(), sparsity_param) c = np.abs(c / nbr_dist) W[i,nbr_inds] = c / c.sum() W = ss.csr_matrix(W) if keep_ratio < 1: for i in range(n): row_data = W.data[W.indptr[i]:W.indptr[i+1]] order = np.argsort(row_data)[::-1] stop_idx = np.searchsorted(np.cumsum(row_data[order]), keep_ratio) + 1 bad_inds = order[stop_idx:] row_data[bad_inds] = 0 W.eliminate_zeros() return Graph.from_adj_matrix(W)
[ "def", "smce_graph", "(", "X", ",", "metric", "=", "'l2'", ",", "sparsity_param", "=", "10", ",", "kmax", "=", "None", ",", "keep_ratio", "=", "0.95", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "kmax", "is", "None", ":", "kmax", ...
Sparse graph construction from the SMCE paper. X : 2-dimensional array-like metric : str, optional sparsity_param : float, optional kmax : int, optional keep_ratio : float, optional When <1, keep edges up to (keep_ratio * total weight) Returns a graph with asymmetric similarity weights. Call .symmetrize() and .kernelize('rbf') to convert to symmetric distances. SMCE: "Sparse Manifold Clustering and Embedding" Elhamifar & Vidal, NIPS 2011
[ "Sparse", "graph", "construction", "from", "the", "SMCE", "paper", ".", "X", ":", "2", "-", "dimensional", "array", "-", "like", "metric", ":", "str", "optional", "sparsity_param", ":", "float", "optional", "kmax", ":", "int", "optional", "keep_ratio", ":", ...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/regularized.py#L23-L69
all-umass/graphs
graphs/construction/regularized.py
sparse_regularized_graph
def sparse_regularized_graph(X, positive=False, sparsity_param=None, kmax=None): '''Sparse Regularized Graph Construction, commonly known as an l1-graph. positive : bool, optional When True, computes the Sparse Probability Graph (SPG). sparsity_param : float, optional Controls sparsity cost in the LASSO optimization. When None, uses cross-validation to find sparsity parameters. This is very slow, but it gets good results. kmax : int, optional When None, allow all points to be edges. Otherwise, restrict to kNN set. l1-graph: "Semi-supervised Learning by Sparse Representation" Yan & Wang, SDM 2009 http://epubs.siam.org/doi/pdf/10.1137/1.9781611972795.68 SPG: "Nonnegative Sparse Coding for Discriminative Semi-supervised Learning" He et al., CVPR 2001 ''' clf, X = _l1_graph_setup(X, positive, sparsity_param) if kmax is None: W = _l1_graph_solve_full(clf, X) else: W = _l1_graph_solve_k(clf, X, kmax) return Graph.from_adj_matrix(W)
python
def sparse_regularized_graph(X, positive=False, sparsity_param=None, kmax=None): '''Sparse Regularized Graph Construction, commonly known as an l1-graph. positive : bool, optional When True, computes the Sparse Probability Graph (SPG). sparsity_param : float, optional Controls sparsity cost in the LASSO optimization. When None, uses cross-validation to find sparsity parameters. This is very slow, but it gets good results. kmax : int, optional When None, allow all points to be edges. Otherwise, restrict to kNN set. l1-graph: "Semi-supervised Learning by Sparse Representation" Yan & Wang, SDM 2009 http://epubs.siam.org/doi/pdf/10.1137/1.9781611972795.68 SPG: "Nonnegative Sparse Coding for Discriminative Semi-supervised Learning" He et al., CVPR 2001 ''' clf, X = _l1_graph_setup(X, positive, sparsity_param) if kmax is None: W = _l1_graph_solve_full(clf, X) else: W = _l1_graph_solve_k(clf, X, kmax) return Graph.from_adj_matrix(W)
[ "def", "sparse_regularized_graph", "(", "X", ",", "positive", "=", "False", ",", "sparsity_param", "=", "None", ",", "kmax", "=", "None", ")", ":", "clf", ",", "X", "=", "_l1_graph_setup", "(", "X", ",", "positive", ",", "sparsity_param", ")", "if", "kma...
Sparse Regularized Graph Construction, commonly known as an l1-graph. positive : bool, optional When True, computes the Sparse Probability Graph (SPG). sparsity_param : float, optional Controls sparsity cost in the LASSO optimization. When None, uses cross-validation to find sparsity parameters. This is very slow, but it gets good results. kmax : int, optional When None, allow all points to be edges. Otherwise, restrict to kNN set. l1-graph: "Semi-supervised Learning by Sparse Representation" Yan & Wang, SDM 2009 http://epubs.siam.org/doi/pdf/10.1137/1.9781611972795.68 SPG: "Nonnegative Sparse Coding for Discriminative Semi-supervised Learning" He et al., CVPR 2001
[ "Sparse", "Regularized", "Graph", "Construction", "commonly", "known", "as", "an", "l1", "-", "graph", ".", "positive", ":", "bool", "optional", "When", "True", "computes", "the", "Sparse", "Probability", "Graph", "(", "SPG", ")", ".", "sparsity_param", ":", ...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/regularized.py#L108-L132
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/parallel.py
ParallelContainer.prep_parallel
def prep_parallel(self, binary_args, other_args): """Prepare the parallel calculations Prepares the arguments to be run in parallel. It will divide up arrays according to num_splits. Args: binary_args (list): List of binary arguments for input into the SNR function. other_args (tuple of obj): tuple of other args for input into parallel snr function. """ if self.length < 100: raise Exception("Run this across 1 processor by setting num_processors kwarg to None.") if self.num_processors == -1: self.num_processors = mp.cpu_count() split_val = int(np.ceil(self.length/self.num_splits)) split_inds = [self.num_splits*i for i in np.arange(1, split_val)] inds_split_all = np.split(np.arange(self.length), split_inds) self.args = [] for i, ind_split in enumerate(inds_split_all): trans_args = [] for arg in binary_args: try: trans_args.append(arg[ind_split]) except TypeError: trans_args.append(arg) self.args.append((i, tuple(trans_args)) + other_args) return
python
def prep_parallel(self, binary_args, other_args): """Prepare the parallel calculations Prepares the arguments to be run in parallel. It will divide up arrays according to num_splits. Args: binary_args (list): List of binary arguments for input into the SNR function. other_args (tuple of obj): tuple of other args for input into parallel snr function. """ if self.length < 100: raise Exception("Run this across 1 processor by setting num_processors kwarg to None.") if self.num_processors == -1: self.num_processors = mp.cpu_count() split_val = int(np.ceil(self.length/self.num_splits)) split_inds = [self.num_splits*i for i in np.arange(1, split_val)] inds_split_all = np.split(np.arange(self.length), split_inds) self.args = [] for i, ind_split in enumerate(inds_split_all): trans_args = [] for arg in binary_args: try: trans_args.append(arg[ind_split]) except TypeError: trans_args.append(arg) self.args.append((i, tuple(trans_args)) + other_args) return
[ "def", "prep_parallel", "(", "self", ",", "binary_args", ",", "other_args", ")", ":", "if", "self", ".", "length", "<", "100", ":", "raise", "Exception", "(", "\"Run this across 1 processor by setting num_processors kwarg to None.\"", ")", "if", "self", ".", "num_pr...
Prepare the parallel calculations Prepares the arguments to be run in parallel. It will divide up arrays according to num_splits. Args: binary_args (list): List of binary arguments for input into the SNR function. other_args (tuple of obj): tuple of other args for input into parallel snr function.
[ "Prepare", "the", "parallel", "calculations" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/parallel.py#L45-L76
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/parallel.py
ParallelContainer.run_parallel
def run_parallel(self, para_func): """Run parallel calulation This will run the parallel calculation on self.num_processors. Args: para_func (obj): Function object to be used in parallel. Returns: (dict): Dictionary with parallel results. """ if self.timer: start_timer = time.time() # for testing # check = parallel_snr_func(*self.args[10]) # import pdb # pdb.set_trace() with mp.Pool(self.num_processors) as pool: print('start pool with {} processors: {} total processes.\n'.format( self.num_processors, len(self.args))) results = [pool.apply_async(para_func, arg) for arg in self.args] out = [r.get() for r in results] out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()} if self.timer: print("SNR calculation time:", time.time()-start_timer) return out
python
def run_parallel(self, para_func): """Run parallel calulation This will run the parallel calculation on self.num_processors. Args: para_func (obj): Function object to be used in parallel. Returns: (dict): Dictionary with parallel results. """ if self.timer: start_timer = time.time() # for testing # check = parallel_snr_func(*self.args[10]) # import pdb # pdb.set_trace() with mp.Pool(self.num_processors) as pool: print('start pool with {} processors: {} total processes.\n'.format( self.num_processors, len(self.args))) results = [pool.apply_async(para_func, arg) for arg in self.args] out = [r.get() for r in results] out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()} if self.timer: print("SNR calculation time:", time.time()-start_timer) return out
[ "def", "run_parallel", "(", "self", ",", "para_func", ")", ":", "if", "self", ".", "timer", ":", "start_timer", "=", "time", ".", "time", "(", ")", "# for testing", "# check = parallel_snr_func(*self.args[10])", "# import pdb", "# pdb.set_trace()", "with", "mp", "...
Run parallel calulation This will run the parallel calculation on self.num_processors. Args: para_func (obj): Function object to be used in parallel. Returns: (dict): Dictionary with parallel results.
[ "Run", "parallel", "calulation" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/parallel.py#L78-L107
calmjs/calmjs.parse
src/calmjs/parse/factory.py
RawParserUnparserFactory
def RawParserUnparserFactory(parser_name, parse_callable, *unparse_callables): """ Produces a callable object that also has callable attributes that passes its first argument to the parent callable. """ def build_unparse(f): @wraps(f) def unparse(self, source, *a, **kw): node = parse_callable(source) return f(node, *a, **kw) # a dumb and lazy docstring replacement unparse.__doc__ = f.__doc__.replace( 'ast\n The AST ', 'source\n The source ', ) return unparse def build_parse(f): @wraps(f) def parse(self, source): return f(source) parse.__name__ = parser_name parse.__qualname__ = parser_name return parse callables = {f.__name__: build_unparse(f) for f in unparse_callables} callables['__call__'] = build_parse(parse_callable) callables['__module__'] = PKGNAME return type(parser_name, (object,), callables)()
python
def RawParserUnparserFactory(parser_name, parse_callable, *unparse_callables): """ Produces a callable object that also has callable attributes that passes its first argument to the parent callable. """ def build_unparse(f): @wraps(f) def unparse(self, source, *a, **kw): node = parse_callable(source) return f(node, *a, **kw) # a dumb and lazy docstring replacement unparse.__doc__ = f.__doc__.replace( 'ast\n The AST ', 'source\n The source ', ) return unparse def build_parse(f): @wraps(f) def parse(self, source): return f(source) parse.__name__ = parser_name parse.__qualname__ = parser_name return parse callables = {f.__name__: build_unparse(f) for f in unparse_callables} callables['__call__'] = build_parse(parse_callable) callables['__module__'] = PKGNAME return type(parser_name, (object,), callables)()
[ "def", "RawParserUnparserFactory", "(", "parser_name", ",", "parse_callable", ",", "*", "unparse_callables", ")", ":", "def", "build_unparse", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "unparse", "(", "self", ",", "source", ",", "*", "a", "...
Produces a callable object that also has callable attributes that passes its first argument to the parent callable.
[ "Produces", "a", "callable", "object", "that", "also", "has", "callable", "attributes", "that", "passes", "its", "first", "argument", "to", "the", "parent", "callable", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/factory.py#L55-L84
calmjs/calmjs.parse
src/calmjs/parse/factory.py
ParserUnparserFactory
def ParserUnparserFactory(module_name, *unparser_names): """ Produce a new parser/unparser object from the names provided. """ parse_callable = import_module(PKGNAME + '.parsers.' + module_name).parse unparser_module = import_module(PKGNAME + '.unparsers.' + module_name) return RawParserUnparserFactory(module_name, parse_callable, *[ getattr(unparser_module, name) for name in unparser_names])
python
def ParserUnparserFactory(module_name, *unparser_names): """ Produce a new parser/unparser object from the names provided. """ parse_callable = import_module(PKGNAME + '.parsers.' + module_name).parse unparser_module = import_module(PKGNAME + '.unparsers.' + module_name) return RawParserUnparserFactory(module_name, parse_callable, *[ getattr(unparser_module, name) for name in unparser_names])
[ "def", "ParserUnparserFactory", "(", "module_name", ",", "*", "unparser_names", ")", ":", "parse_callable", "=", "import_module", "(", "PKGNAME", "+", "'.parsers.'", "+", "module_name", ")", ".", "parse", "unparser_module", "=", "import_module", "(", "PKGNAME", "+...
Produce a new parser/unparser object from the names provided.
[ "Produce", "a", "new", "parser", "/", "unparser", "object", "from", "the", "names", "provided", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/factory.py#L87-L95
all-umass/graphs
graphs/construction/downsample.py
downsample_trajectories
def downsample_trajectories(trajectories, downsampler, *args, **kwargs): '''Downsamples all points together, then re-splits into original trajectories. trajectories : list of 2-d arrays, each representing a trajectory downsampler(X, *args, **kwargs) : callable that returns indices into X ''' X = np.vstack(trajectories) traj_lengths = list(map(len, trajectories)) inds = np.sort(downsampler(X, *args, **kwargs)) new_traj = [] for stop in np.cumsum(traj_lengths): n = np.searchsorted(inds, stop) new_traj.append(X[inds[:n]]) inds = inds[n:] return new_traj
python
def downsample_trajectories(trajectories, downsampler, *args, **kwargs): '''Downsamples all points together, then re-splits into original trajectories. trajectories : list of 2-d arrays, each representing a trajectory downsampler(X, *args, **kwargs) : callable that returns indices into X ''' X = np.vstack(trajectories) traj_lengths = list(map(len, trajectories)) inds = np.sort(downsampler(X, *args, **kwargs)) new_traj = [] for stop in np.cumsum(traj_lengths): n = np.searchsorted(inds, stop) new_traj.append(X[inds[:n]]) inds = inds[n:] return new_traj
[ "def", "downsample_trajectories", "(", "trajectories", ",", "downsampler", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "X", "=", "np", ".", "vstack", "(", "trajectories", ")", "traj_lengths", "=", "list", "(", "map", "(", "len", ",", "trajectori...
Downsamples all points together, then re-splits into original trajectories. trajectories : list of 2-d arrays, each representing a trajectory downsampler(X, *args, **kwargs) : callable that returns indices into X
[ "Downsamples", "all", "points", "together", "then", "re", "-", "splits", "into", "original", "trajectories", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/downsample.py#L15-L29
all-umass/graphs
graphs/construction/downsample.py
epsilon_net
def epsilon_net(points, close_distance): '''Selects a subset of `points` to preserve graph structure while minimizing the number of points used, by removing points within `close_distance`. Returns the downsampled indices.''' num_points = points.shape[0] indices = set(range(num_points)) selected = [] while indices: idx = indices.pop() nn_inds, = nearest_neighbors(points[idx], points, epsilon=close_distance) indices.difference_update(nn_inds) selected.append(idx) return selected
python
def epsilon_net(points, close_distance): '''Selects a subset of `points` to preserve graph structure while minimizing the number of points used, by removing points within `close_distance`. Returns the downsampled indices.''' num_points = points.shape[0] indices = set(range(num_points)) selected = [] while indices: idx = indices.pop() nn_inds, = nearest_neighbors(points[idx], points, epsilon=close_distance) indices.difference_update(nn_inds) selected.append(idx) return selected
[ "def", "epsilon_net", "(", "points", ",", "close_distance", ")", ":", "num_points", "=", "points", ".", "shape", "[", "0", "]", "indices", "=", "set", "(", "range", "(", "num_points", ")", ")", "selected", "=", "[", "]", "while", "indices", ":", "idx",...
Selects a subset of `points` to preserve graph structure while minimizing the number of points used, by removing points within `close_distance`. Returns the downsampled indices.
[ "Selects", "a", "subset", "of", "points", "to", "preserve", "graph", "structure", "while", "minimizing", "the", "number", "of", "points", "used", "by", "removing", "points", "within", "close_distance", ".", "Returns", "the", "downsampled", "indices", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/downsample.py#L32-L44
all-umass/graphs
graphs/construction/downsample.py
fuzzy_c_means
def fuzzy_c_means(points, num_centers, m=2., tol=1e-4, max_iter=100, verbose=False): '''Uses Fuzzy C-Means to downsample `points`. m : aggregation parameter >1, larger implies smoother clusters Returns indices of downsampled points. ''' num_points = points.shape[0] if num_centers >= num_points: return np.arange(num_points) # randomly initialize cluster assignments matrix assn = np.random.random((points.shape[0], num_centers)) # iterate assignments until they converge for i in range(max_iter): # compute centers w = assn ** m w /= w.sum(axis=0) centers = w.T.dot(points) # calculate new assignments d = pairwise_distances(points, centers) d **= 2. / (m - 1) np.maximum(d, 1e-10, out=d) new_assn = 1. / np.einsum('ik,ij->ik', d, 1./d) # check for convergence change = np.linalg.norm(new_assn - assn) if verbose: print('At iteration %d: change = %g' % (i+1, change)) if change < tol: break assn = new_assn else: warnings.warn("fuzzy_c_means didn't converge in %d iterations" % max_iter) # find points closest to the selected cluster centers return d.argmin(axis=0)
python
def fuzzy_c_means(points, num_centers, m=2., tol=1e-4, max_iter=100, verbose=False): '''Uses Fuzzy C-Means to downsample `points`. m : aggregation parameter >1, larger implies smoother clusters Returns indices of downsampled points. ''' num_points = points.shape[0] if num_centers >= num_points: return np.arange(num_points) # randomly initialize cluster assignments matrix assn = np.random.random((points.shape[0], num_centers)) # iterate assignments until they converge for i in range(max_iter): # compute centers w = assn ** m w /= w.sum(axis=0) centers = w.T.dot(points) # calculate new assignments d = pairwise_distances(points, centers) d **= 2. / (m - 1) np.maximum(d, 1e-10, out=d) new_assn = 1. / np.einsum('ik,ij->ik', d, 1./d) # check for convergence change = np.linalg.norm(new_assn - assn) if verbose: print('At iteration %d: change = %g' % (i+1, change)) if change < tol: break assn = new_assn else: warnings.warn("fuzzy_c_means didn't converge in %d iterations" % max_iter) # find points closest to the selected cluster centers return d.argmin(axis=0)
[ "def", "fuzzy_c_means", "(", "points", ",", "num_centers", ",", "m", "=", "2.", ",", "tol", "=", "1e-4", ",", "max_iter", "=", "100", ",", "verbose", "=", "False", ")", ":", "num_points", "=", "points", ".", "shape", "[", "0", "]", "if", "num_centers...
Uses Fuzzy C-Means to downsample `points`. m : aggregation parameter >1, larger implies smoother clusters Returns indices of downsampled points.
[ "Uses", "Fuzzy", "C", "-", "Means", "to", "downsample", "points", ".", "m", ":", "aggregation", "parameter", ">", "1", "larger", "implies", "smoother", "clusters", "Returns", "indices", "of", "downsampled", "points", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/downsample.py#L47-L79
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
transport_from_url
def transport_from_url(url): """ Create a transport for the given URL. """ if '/' not in url and ':' in url and url.rsplit(':')[-1].isdigit(): url = 'scgi://' + url url = urlparse.urlsplit(url, scheme="scgi", allow_fragments=False) # pylint: disable=redundant-keyword-arg try: transport = TRANSPORTS[url.scheme.lower()] except KeyError: if not any((url.netloc, url.query)) and url.path.isdigit(): # Support simplified "domain:port" URLs return transport_from_url("scgi://%s:%s" % (url.scheme, url.path)) else: raise URLError("Unsupported scheme in URL %r" % url.geturl()) else: return transport(url)
python
def transport_from_url(url): """ Create a transport for the given URL. """ if '/' not in url and ':' in url and url.rsplit(':')[-1].isdigit(): url = 'scgi://' + url url = urlparse.urlsplit(url, scheme="scgi", allow_fragments=False) # pylint: disable=redundant-keyword-arg try: transport = TRANSPORTS[url.scheme.lower()] except KeyError: if not any((url.netloc, url.query)) and url.path.isdigit(): # Support simplified "domain:port" URLs return transport_from_url("scgi://%s:%s" % (url.scheme, url.path)) else: raise URLError("Unsupported scheme in URL %r" % url.geturl()) else: return transport(url)
[ "def", "transport_from_url", "(", "url", ")", ":", "if", "'/'", "not", "in", "url", "and", "':'", "in", "url", "and", "url", ".", "rsplit", "(", "':'", ")", "[", "-", "1", "]", ".", "isdigit", "(", ")", ":", "url", "=", "'scgi://'", "+", "url", ...
Create a transport for the given URL.
[ "Create", "a", "transport", "for", "the", "given", "URL", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L160-L176
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
_encode_payload
def _encode_payload(data, headers=None): "Wrap data in an SCGI request." prolog = "CONTENT_LENGTH\0%d\0SCGI\x001\0" % len(data) if headers: prolog += _encode_headers(headers) return _encode_netstring(prolog) + data
python
def _encode_payload(data, headers=None): "Wrap data in an SCGI request." prolog = "CONTENT_LENGTH\0%d\0SCGI\x001\0" % len(data) if headers: prolog += _encode_headers(headers) return _encode_netstring(prolog) + data
[ "def", "_encode_payload", "(", "data", ",", "headers", "=", "None", ")", ":", "prolog", "=", "\"CONTENT_LENGTH\\0%d\\0SCGI\\x001\\0\"", "%", "len", "(", "data", ")", "if", "headers", ":", "prolog", "+=", "_encode_headers", "(", "headers", ")", "return", "_enco...
Wrap data in an SCGI request.
[ "Wrap", "data", "in", "an", "SCGI", "request", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L194-L200
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
_parse_headers
def _parse_headers(headers): "Get headers dict from header string." try: return dict(line.rstrip().split(": ", 1) for line in headers.splitlines() if line ) except (TypeError, ValueError) as exc: raise SCGIException("Error in SCGI headers %r (%s)" % (headers, exc,))
python
def _parse_headers(headers): "Get headers dict from header string." try: return dict(line.rstrip().split(": ", 1) for line in headers.splitlines() if line ) except (TypeError, ValueError) as exc: raise SCGIException("Error in SCGI headers %r (%s)" % (headers, exc,))
[ "def", "_parse_headers", "(", "headers", ")", ":", "try", ":", "return", "dict", "(", "line", ".", "rstrip", "(", ")", ".", "split", "(", "\": \"", ",", "1", ")", "for", "line", "in", "headers", ".", "splitlines", "(", ")", "if", "line", ")", "exce...
Get headers dict from header string.
[ "Get", "headers", "dict", "from", "header", "string", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L203-L211
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
_parse_response
def _parse_response(resp): """ Get xmlrpc response from scgi response """ # Assume they care for standards and send us CRLF (not just LF) try: headers, payload = resp.split("\r\n\r\n", 1) except (TypeError, ValueError) as exc: raise SCGIException("No header delimiter in SCGI response of length %d (%s)" % (len(resp), exc,)) headers = _parse_headers(headers) clen = headers.get("Content-Length") if clen is not None: # Check length, just in case the transport is bogus assert len(payload) == int(clen) return payload, headers
python
def _parse_response(resp): """ Get xmlrpc response from scgi response """ # Assume they care for standards and send us CRLF (not just LF) try: headers, payload = resp.split("\r\n\r\n", 1) except (TypeError, ValueError) as exc: raise SCGIException("No header delimiter in SCGI response of length %d (%s)" % (len(resp), exc,)) headers = _parse_headers(headers) clen = headers.get("Content-Length") if clen is not None: # Check length, just in case the transport is bogus assert len(payload) == int(clen) return payload, headers
[ "def", "_parse_response", "(", "resp", ")", ":", "# Assume they care for standards and send us CRLF (not just LF)", "try", ":", "headers", ",", "payload", "=", "resp", ".", "split", "(", "\"\\r\\n\\r\\n\"", ",", "1", ")", "except", "(", "TypeError", ",", "ValueError...
Get xmlrpc response from scgi response
[ "Get", "xmlrpc", "response", "from", "scgi", "response" ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L214-L229
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
scgi_request
def scgi_request(url, methodname, *params, **kw): """ Send a XMLRPC request over SCGI to the given URL. @param url: Endpoint URL. @param methodname: XMLRPC method name. @param params: Tuple of simple python objects. @keyword deserialize: Parse XML result? (default is True) @return: XMLRPC response, or the equivalent Python data. """ xmlreq = xmlrpclib.dumps(params, methodname) xmlresp = SCGIRequest(url).send(xmlreq) if kw.get("deserialize", True): # This fixes a bug with the Python xmlrpclib module # (has no handler for <i8> in some versions) xmlresp = xmlresp.replace("<i8>", "<i4>").replace("</i8>", "</i4>") # Return deserialized data return xmlrpclib.loads(xmlresp)[0][0] else: # Return raw XML return xmlresp
python
def scgi_request(url, methodname, *params, **kw): """ Send a XMLRPC request over SCGI to the given URL. @param url: Endpoint URL. @param methodname: XMLRPC method name. @param params: Tuple of simple python objects. @keyword deserialize: Parse XML result? (default is True) @return: XMLRPC response, or the equivalent Python data. """ xmlreq = xmlrpclib.dumps(params, methodname) xmlresp = SCGIRequest(url).send(xmlreq) if kw.get("deserialize", True): # This fixes a bug with the Python xmlrpclib module # (has no handler for <i8> in some versions) xmlresp = xmlresp.replace("<i8>", "<i4>").replace("</i8>", "</i4>") # Return deserialized data return xmlrpclib.loads(xmlresp)[0][0] else: # Return raw XML return xmlresp
[ "def", "scgi_request", "(", "url", ",", "methodname", ",", "*", "params", ",", "*", "*", "kw", ")", ":", "xmlreq", "=", "xmlrpclib", ".", "dumps", "(", "params", ",", "methodname", ")", "xmlresp", "=", "SCGIRequest", "(", "url", ")", ".", "send", "("...
Send a XMLRPC request over SCGI to the given URL. @param url: Endpoint URL. @param methodname: XMLRPC method name. @param params: Tuple of simple python objects. @keyword deserialize: Parse XML result? (default is True) @return: XMLRPC response, or the equivalent Python data.
[ "Send", "a", "XMLRPC", "request", "over", "SCGI", "to", "the", "given", "URL", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L269-L290
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
LocalTransport.send
def send(self, data): """ Open transport, send data, and yield response chunks. """ sock = socket.socket(*self.sock_args) try: sock.connect(self.sock_addr) except socket.error as exc: raise socket.error("Can't connect to %r (%s)" % (self.url.geturl(), exc)) try: # Send request sock.send(data) # Read response while True: chunk = sock.recv(self.CHUNK_SIZE) if chunk: yield chunk else: break finally: # Clean up sock.close()
python
def send(self, data): """ Open transport, send data, and yield response chunks. """ sock = socket.socket(*self.sock_args) try: sock.connect(self.sock_addr) except socket.error as exc: raise socket.error("Can't connect to %r (%s)" % (self.url.geturl(), exc)) try: # Send request sock.send(data) # Read response while True: chunk = sock.recv(self.CHUNK_SIZE) if chunk: yield chunk else: break finally: # Clean up sock.close()
[ "def", "send", "(", "self", ",", "data", ")", ":", "sock", "=", "socket", ".", "socket", "(", "*", "self", ".", "sock_args", ")", "try", ":", "sock", ".", "connect", "(", "self", ".", "sock_addr", ")", "except", "socket", ".", "error", "as", "exc",...
Open transport, send data, and yield response chunks.
[ "Open", "transport", "send", "data", "and", "yield", "response", "chunks", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L77-L99
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
SSHTransport.send
def send(self, data): """ Open transport, send data, and yield response chunks. """ try: proc = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as exc: raise URLError("Calling %r failed (%s)!" % (' '.join(self.cmd), exc)) else: stdout, stderr = proc.communicate(data) if proc.returncode: raise URLError("Calling %r failed with RC=%d!\n%s" % ( ' '.join(self.cmd), proc.returncode, stderr, )) yield stdout
python
def send(self, data): """ Open transport, send data, and yield response chunks. """ try: proc = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as exc: raise URLError("Calling %r failed (%s)!" % (' '.join(self.cmd), exc)) else: stdout, stderr = proc.communicate(data) if proc.returncode: raise URLError("Calling %r failed with RC=%d!\n%s" % ( ' '.join(self.cmd), proc.returncode, stderr, )) yield stdout
[ "def", "send", "(", "self", ",", "data", ")", ":", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "self", ".", "cmd", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subpr...
Open transport, send data, and yield response chunks.
[ "Open", "transport", "send", "data", "and", "yield", "response", "chunks", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L135-L148
pyroscope/pyrobase
src/pyrobase/io/xmlrpc2scgi.py
SCGIRequest.send
def send(self, data): """ Send data over scgi to URL and get response. """ start = time.time() try: scgi_resp = ''.join(self.transport.send(_encode_payload(data))) finally: self.latency = time.time() - start resp, self.resp_headers = _parse_response(scgi_resp) return resp
python
def send(self, data): """ Send data over scgi to URL and get response. """ start = time.time() try: scgi_resp = ''.join(self.transport.send(_encode_payload(data))) finally: self.latency = time.time() - start resp, self.resp_headers = _parse_response(scgi_resp) return resp
[ "def", "send", "(", "self", ",", "data", ")", ":", "start", "=", "time", ".", "time", "(", ")", "try", ":", "scgi_resp", "=", "''", ".", "join", "(", "self", ".", "transport", ".", "send", "(", "_encode_payload", "(", "data", ")", ")", ")", "fina...
Send data over scgi to URL and get response.
[ "Send", "data", "over", "scgi", "to", "URL", "and", "get", "response", "." ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/io/xmlrpc2scgi.py#L256-L266
lexsca/rollback
rollback.py
Rollback._frames
def _frames(traceback): ''' Returns generator that iterates over frames in a traceback ''' frame = traceback while frame.tb_next: frame = frame.tb_next yield frame.tb_frame return
python
def _frames(traceback): ''' Returns generator that iterates over frames in a traceback ''' frame = traceback while frame.tb_next: frame = frame.tb_next yield frame.tb_frame return
[ "def", "_frames", "(", "traceback", ")", ":", "frame", "=", "traceback", "while", "frame", ".", "tb_next", ":", "frame", "=", "frame", ".", "tb_next", "yield", "frame", ".", "tb_frame", "return" ]
Returns generator that iterates over frames in a traceback
[ "Returns", "generator", "that", "iterates", "over", "frames", "in", "a", "traceback" ]
train
https://github.com/lexsca/rollback/blob/ef9e77ff1308e1abfd9d5772060a51c10266fa36/rollback.py#L35-L43
lexsca/rollback
rollback.py
Rollback._methodInTraceback
def _methodInTraceback(self, name, traceback): ''' Returns boolean whether traceback contains method from this instance ''' foundMethod = False for frame in self._frames(traceback): this = frame.f_locals.get('self') if this is self and frame.f_code.co_name == name: foundMethod = True break return foundMethod
python
def _methodInTraceback(self, name, traceback): ''' Returns boolean whether traceback contains method from this instance ''' foundMethod = False for frame in self._frames(traceback): this = frame.f_locals.get('self') if this is self and frame.f_code.co_name == name: foundMethod = True break return foundMethod
[ "def", "_methodInTraceback", "(", "self", ",", "name", ",", "traceback", ")", ":", "foundMethod", "=", "False", "for", "frame", "in", "self", ".", "_frames", "(", "traceback", ")", ":", "this", "=", "frame", ".", "f_locals", ".", "get", "(", "'self'", ...
Returns boolean whether traceback contains method from this instance
[ "Returns", "boolean", "whether", "traceback", "contains", "method", "from", "this", "instance" ]
train
https://github.com/lexsca/rollback/blob/ef9e77ff1308e1abfd9d5772060a51c10266fa36/rollback.py#L45-L55
lexsca/rollback
rollback.py
Rollback.addStep
def addStep(self, callback, *args, **kwargs): ''' Add rollback step with optional arguments. If a rollback is triggered, each step is called in LIFO order. ''' self.steps.append((callback, args, kwargs))
python
def addStep(self, callback, *args, **kwargs): ''' Add rollback step with optional arguments. If a rollback is triggered, each step is called in LIFO order. ''' self.steps.append((callback, args, kwargs))
[ "def", "addStep", "(", "self", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "steps", ".", "append", "(", "(", "callback", ",", "args", ",", "kwargs", ")", ")" ]
Add rollback step with optional arguments. If a rollback is triggered, each step is called in LIFO order.
[ "Add", "rollback", "step", "with", "optional", "arguments", ".", "If", "a", "rollback", "is", "triggered", "each", "step", "is", "called", "in", "LIFO", "order", "." ]
train
https://github.com/lexsca/rollback/blob/ef9e77ff1308e1abfd9d5772060a51c10266fa36/rollback.py#L75-L80
lexsca/rollback
rollback.py
Rollback.doRollback
def doRollback(self): ''' Call each rollback step in LIFO order. ''' while self.steps: callback, args, kwargs = self.steps.pop() callback(*args, **kwargs)
python
def doRollback(self): ''' Call each rollback step in LIFO order. ''' while self.steps: callback, args, kwargs = self.steps.pop() callback(*args, **kwargs)
[ "def", "doRollback", "(", "self", ")", ":", "while", "self", ".", "steps", ":", "callback", ",", "args", ",", "kwargs", "=", "self", ".", "steps", ".", "pop", "(", ")", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Call each rollback step in LIFO order.
[ "Call", "each", "rollback", "step", "in", "LIFO", "order", "." ]
train
https://github.com/lexsca/rollback/blob/ef9e77ff1308e1abfd9d5772060a51c10266fa36/rollback.py#L88-L94
all-umass/graphs
graphs/construction/neighbors.py
neighbor_graph
def neighbor_graph(X, metric='euclidean', k=None, epsilon=None, weighting='none', precomputed=False): '''Build a neighbor graph from pairwise distance information. X : two-dimensional array-like Shape must either be (num_pts, num_dims) or (num_pts, num_pts). k : int, maximum number of nearest neighbors epsilon : float, maximum distance to a neighbor metric : str, type of distance metric (see sklearn.metrics) When metric='precomputed', X is a symmetric distance matrix. weighting : str, one of {'binary', 'none'} When weighting='binary', all edge weights == 1. ''' if k is None and epsilon is None: raise ValueError('Must provide `k` or `epsilon`.') if weighting not in ('binary', 'none'): raise ValueError('Invalid weighting param: %r' % weighting) # TODO: deprecate the precomputed kwarg precomputed = precomputed or (metric == 'precomputed') binary = weighting == 'binary' # Try the fast path, if possible. if not precomputed and epsilon is None: return _sparse_neighbor_graph(X, k, binary, metric) if precomputed: D = X else: D = pairwise_distances(X, metric=metric) return _slow_neighbor_graph(D, k, epsilon, binary)
python
def neighbor_graph(X, metric='euclidean', k=None, epsilon=None, weighting='none', precomputed=False): '''Build a neighbor graph from pairwise distance information. X : two-dimensional array-like Shape must either be (num_pts, num_dims) or (num_pts, num_pts). k : int, maximum number of nearest neighbors epsilon : float, maximum distance to a neighbor metric : str, type of distance metric (see sklearn.metrics) When metric='precomputed', X is a symmetric distance matrix. weighting : str, one of {'binary', 'none'} When weighting='binary', all edge weights == 1. ''' if k is None and epsilon is None: raise ValueError('Must provide `k` or `epsilon`.') if weighting not in ('binary', 'none'): raise ValueError('Invalid weighting param: %r' % weighting) # TODO: deprecate the precomputed kwarg precomputed = precomputed or (metric == 'precomputed') binary = weighting == 'binary' # Try the fast path, if possible. if not precomputed and epsilon is None: return _sparse_neighbor_graph(X, k, binary, metric) if precomputed: D = X else: D = pairwise_distances(X, metric=metric) return _slow_neighbor_graph(D, k, epsilon, binary)
[ "def", "neighbor_graph", "(", "X", ",", "metric", "=", "'euclidean'", ",", "k", "=", "None", ",", "epsilon", "=", "None", ",", "weighting", "=", "'none'", ",", "precomputed", "=", "False", ")", ":", "if", "k", "is", "None", "and", "epsilon", "is", "N...
Build a neighbor graph from pairwise distance information. X : two-dimensional array-like Shape must either be (num_pts, num_dims) or (num_pts, num_pts). k : int, maximum number of nearest neighbors epsilon : float, maximum distance to a neighbor metric : str, type of distance metric (see sklearn.metrics) When metric='precomputed', X is a symmetric distance matrix. weighting : str, one of {'binary', 'none'} When weighting='binary', all edge weights == 1.
[ "Build", "a", "neighbor", "graph", "from", "pairwise", "distance", "information", ".", "X", ":", "two", "-", "dimensional", "array", "-", "like", "Shape", "must", "either", "be", "(", "num_pts", "num_dims", ")", "or", "(", "num_pts", "num_pts", ")", ".", ...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/neighbors.py#L17-L47
all-umass/graphs
graphs/construction/neighbors.py
nearest_neighbors
def nearest_neighbors(query_pts, target_pts=None, metric='euclidean', k=None, epsilon=None, return_dists=False, precomputed=False): '''Find nearest neighbors of query points from a matrix of target points. Returns a list of indices of neighboring points, one list per query. If no target_pts are specified, distances are calculated within query_pts. When return_dists is True, returns two lists: (distances, indices) ''' if k is None and epsilon is None: raise ValueError('Must provide `k` or `epsilon`.') # TODO: deprecate the precomputed kwarg precomputed = precomputed or (metric == 'precomputed') if precomputed and target_pts is not None: raise ValueError('`target_pts` cannot be used with precomputed distances') query_pts = np.array(query_pts) if len(query_pts.shape) == 1: query_pts = query_pts.reshape((1,-1)) # ensure that the query is a 1xD row if precomputed: dists = query_pts.copy() else: dists = pairwise_distances(query_pts, Y=target_pts, metric=metric) if epsilon is not None: if k is not None: # kNN filtering _, not_nn = _min_k_indices(dists, k, inv_ind=True) dists[np.arange(dists.shape[0]), not_nn.T] = np.inf # epsilon-ball is_close = dists <= epsilon if return_dists: nnis,nnds = [],[] for i,row in enumerate(is_close): nns = np.nonzero(row)[0] nnis.append(nns) nnds.append(dists[i,nns]) return nnds, nnis return np.array([np.nonzero(row)[0] for row in is_close]) # knn nns = _min_k_indices(dists,k) if return_dists: # index each row of dists by each row of nns row_inds = np.arange(len(nns))[:,np.newaxis] nn_dists = dists[row_inds, nns] return nn_dists, nns return nns
python
def nearest_neighbors(query_pts, target_pts=None, metric='euclidean', k=None, epsilon=None, return_dists=False, precomputed=False): '''Find nearest neighbors of query points from a matrix of target points. Returns a list of indices of neighboring points, one list per query. If no target_pts are specified, distances are calculated within query_pts. When return_dists is True, returns two lists: (distances, indices) ''' if k is None and epsilon is None: raise ValueError('Must provide `k` or `epsilon`.') # TODO: deprecate the precomputed kwarg precomputed = precomputed or (metric == 'precomputed') if precomputed and target_pts is not None: raise ValueError('`target_pts` cannot be used with precomputed distances') query_pts = np.array(query_pts) if len(query_pts.shape) == 1: query_pts = query_pts.reshape((1,-1)) # ensure that the query is a 1xD row if precomputed: dists = query_pts.copy() else: dists = pairwise_distances(query_pts, Y=target_pts, metric=metric) if epsilon is not None: if k is not None: # kNN filtering _, not_nn = _min_k_indices(dists, k, inv_ind=True) dists[np.arange(dists.shape[0]), not_nn.T] = np.inf # epsilon-ball is_close = dists <= epsilon if return_dists: nnis,nnds = [],[] for i,row in enumerate(is_close): nns = np.nonzero(row)[0] nnis.append(nns) nnds.append(dists[i,nns]) return nnds, nnis return np.array([np.nonzero(row)[0] for row in is_close]) # knn nns = _min_k_indices(dists,k) if return_dists: # index each row of dists by each row of nns row_inds = np.arange(len(nns))[:,np.newaxis] nn_dists = dists[row_inds, nns] return nn_dists, nns return nns
[ "def", "nearest_neighbors", "(", "query_pts", ",", "target_pts", "=", "None", ",", "metric", "=", "'euclidean'", ",", "k", "=", "None", ",", "epsilon", "=", "None", ",", "return_dists", "=", "False", ",", "precomputed", "=", "False", ")", ":", "if", "k",...
Find nearest neighbors of query points from a matrix of target points. Returns a list of indices of neighboring points, one list per query. If no target_pts are specified, distances are calculated within query_pts. When return_dists is True, returns two lists: (distances, indices)
[ "Find", "nearest", "neighbors", "of", "query", "points", "from", "a", "matrix", "of", "target", "points", ".", "Returns", "a", "list", "of", "indices", "of", "neighboring", "points", "one", "list", "per", "query", ".", "If", "no", "target_pts", "are", "spe...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/neighbors.py#L50-L100
all-umass/graphs
graphs/construction/neighbors.py
_sparse_neighbor_graph
def _sparse_neighbor_graph(X, k, binary=False, metric='l2'): '''Construct a sparse adj matrix from a matrix of points (one per row). Non-zeros are unweighted/binary distance values, depending on the binary arg. Doesn't include self-edges.''' knn = NearestNeighbors(n_neighbors=k, metric=metric).fit(X) mode = 'connectivity' if binary else 'distance' try: adj = knn.kneighbors_graph(None, mode=mode) except IndexError: # XXX: we must be running an old (<0.16) version of sklearn # We have to hack around an old bug: if binary: adj = knn.kneighbors_graph(X, k+1, mode=mode) adj.setdiag(0) else: adj = knn.kneighbors_graph(X, k, mode=mode) return Graph.from_adj_matrix(adj)
python
def _sparse_neighbor_graph(X, k, binary=False, metric='l2'): '''Construct a sparse adj matrix from a matrix of points (one per row). Non-zeros are unweighted/binary distance values, depending on the binary arg. Doesn't include self-edges.''' knn = NearestNeighbors(n_neighbors=k, metric=metric).fit(X) mode = 'connectivity' if binary else 'distance' try: adj = knn.kneighbors_graph(None, mode=mode) except IndexError: # XXX: we must be running an old (<0.16) version of sklearn # We have to hack around an old bug: if binary: adj = knn.kneighbors_graph(X, k+1, mode=mode) adj.setdiag(0) else: adj = knn.kneighbors_graph(X, k, mode=mode) return Graph.from_adj_matrix(adj)
[ "def", "_sparse_neighbor_graph", "(", "X", ",", "k", ",", "binary", "=", "False", ",", "metric", "=", "'l2'", ")", ":", "knn", "=", "NearestNeighbors", "(", "n_neighbors", "=", "k", ",", "metric", "=", "metric", ")", ".", "fit", "(", "X", ")", "mode"...
Construct a sparse adj matrix from a matrix of points (one per row). Non-zeros are unweighted/binary distance values, depending on the binary arg. Doesn't include self-edges.
[ "Construct", "a", "sparse", "adj", "matrix", "from", "a", "matrix", "of", "points", "(", "one", "per", "row", ")", ".", "Non", "-", "zeros", "are", "unweighted", "/", "binary", "distance", "values", "depending", "on", "the", "binary", "arg", ".", "Doesn"...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/neighbors.py#L139-L155
all-umass/graphs
graphs/construction/saffron.py
saffron
def saffron(X, q=32, k=4, tangent_dim=1, curv_thresh=0.95, decay_rate=0.9, max_iter=15, verbose=False): ''' SAFFRON graph construction method. X : (n,d)-array of coordinates q : int, median number of candidate friends per vertex k : int, number of friends to select per vertex, k < q tangent_dim : int, dimensionality of manifold tangent space curv_thresh : float, tolerance to curvature, lambda in the paper decay_rate : float, controls step size per iteration, between 0 and 1 max_iter : int, cap on number of iterations verbose : bool, print goodness measure per iteration when True From "Tangent Space Guided Intelligent Neighbor Finding", by Gashler & Martinez, 2011. See http://axon.cs.byu.edu/papers/gashler2011ijcnn1.pdf ''' n = len(X) dist = pairwise_distances(X) idx = np.argpartition(dist, q)[:, q] # radius for finding candidate friends: median distance to qth neighbor r = np.median(dist[np.arange(n), idx]) # make candidate graph + weights W = neighbor_graph(dist, precomputed=True, epsilon=r).matrix('csr') # NOTE: this differs from the paper, where W.data[:] = 1 initially W.data[:] = 1 / W.data # row normalize normalize(W, norm='l1', axis=1, copy=False) # XXX: hacky densify W = W.toarray() # iterate to learn optimal weights prev_goodness = 1e-12 for it in range(max_iter): goodness = 0 S = _estimate_tangent_spaces(X, W, tangent_dim) # find aligned candidates for i, row in enumerate(W): nbrs = row.nonzero()[-1] # compute alignment scores edges = X[nbrs] - X[i] edge_norms = (edges**2).sum(axis=1) a1 = (edges.dot(S[i])**2).sum(axis=1) / edge_norms a2 = (np.einsum('ij,ijk->ik', edges, S[nbrs])**2).sum(axis=1) / edge_norms a3 = _principal_angle(S[i], S[nbrs]) ** 2 x = (np.minimum(curv_thresh, a1) * np.minimum(curv_thresh, a2) * np.minimum(curv_thresh, a3)) # decay weight of least-aligned candidates excess = x.shape[0] - k if excess > 0: bad_idx = np.argpartition(x, excess-1)[:excess] W[i, nbrs[bad_idx]] *= decay_rate W[i] /= W[i].sum() # update goodness measure (weighted alignment) goodness += x.dot(W[i,nbrs]) if verbose: # pragma: no cover goodness /= n print(it, goodness, goodness / prev_goodness) if goodness / prev_goodness <= 1.0001: break prev_goodness = goodness else: warnings.warn('Failed to converge after %d iterations.' % max_iter) # use the largest k weights for each row of W, weighted by original distance indptr, indices, data = [0], [], [] for i, row in enumerate(W): nbrs = row.nonzero()[-1] if len(nbrs) > k: nbrs = nbrs[np.argpartition(row[nbrs], len(nbrs)-k)[-k:]] indices.extend(nbrs) indptr.append(len(nbrs)) data.extend(dist[i, nbrs]) indptr = np.cumsum(indptr) data = np.array(data) indices = np.array(indices) W = ss.csr_matrix((data, indices, indptr), shape=W.shape) return Graph.from_adj_matrix(W)
python
def saffron(X, q=32, k=4, tangent_dim=1, curv_thresh=0.95, decay_rate=0.9, max_iter=15, verbose=False): ''' SAFFRON graph construction method. X : (n,d)-array of coordinates q : int, median number of candidate friends per vertex k : int, number of friends to select per vertex, k < q tangent_dim : int, dimensionality of manifold tangent space curv_thresh : float, tolerance to curvature, lambda in the paper decay_rate : float, controls step size per iteration, between 0 and 1 max_iter : int, cap on number of iterations verbose : bool, print goodness measure per iteration when True From "Tangent Space Guided Intelligent Neighbor Finding", by Gashler & Martinez, 2011. See http://axon.cs.byu.edu/papers/gashler2011ijcnn1.pdf ''' n = len(X) dist = pairwise_distances(X) idx = np.argpartition(dist, q)[:, q] # radius for finding candidate friends: median distance to qth neighbor r = np.median(dist[np.arange(n), idx]) # make candidate graph + weights W = neighbor_graph(dist, precomputed=True, epsilon=r).matrix('csr') # NOTE: this differs from the paper, where W.data[:] = 1 initially W.data[:] = 1 / W.data # row normalize normalize(W, norm='l1', axis=1, copy=False) # XXX: hacky densify W = W.toarray() # iterate to learn optimal weights prev_goodness = 1e-12 for it in range(max_iter): goodness = 0 S = _estimate_tangent_spaces(X, W, tangent_dim) # find aligned candidates for i, row in enumerate(W): nbrs = row.nonzero()[-1] # compute alignment scores edges = X[nbrs] - X[i] edge_norms = (edges**2).sum(axis=1) a1 = (edges.dot(S[i])**2).sum(axis=1) / edge_norms a2 = (np.einsum('ij,ijk->ik', edges, S[nbrs])**2).sum(axis=1) / edge_norms a3 = _principal_angle(S[i], S[nbrs]) ** 2 x = (np.minimum(curv_thresh, a1) * np.minimum(curv_thresh, a2) * np.minimum(curv_thresh, a3)) # decay weight of least-aligned candidates excess = x.shape[0] - k if excess > 0: bad_idx = np.argpartition(x, excess-1)[:excess] W[i, nbrs[bad_idx]] *= decay_rate W[i] /= W[i].sum() # update goodness measure (weighted alignment) goodness += x.dot(W[i,nbrs]) if verbose: # pragma: no cover goodness /= n print(it, goodness, goodness / prev_goodness) if goodness / prev_goodness <= 1.0001: break prev_goodness = goodness else: warnings.warn('Failed to converge after %d iterations.' % max_iter) # use the largest k weights for each row of W, weighted by original distance indptr, indices, data = [0], [], [] for i, row in enumerate(W): nbrs = row.nonzero()[-1] if len(nbrs) > k: nbrs = nbrs[np.argpartition(row[nbrs], len(nbrs)-k)[-k:]] indices.extend(nbrs) indptr.append(len(nbrs)) data.extend(dist[i, nbrs]) indptr = np.cumsum(indptr) data = np.array(data) indices = np.array(indices) W = ss.csr_matrix((data, indices, indptr), shape=W.shape) return Graph.from_adj_matrix(W)
[ "def", "saffron", "(", "X", ",", "q", "=", "32", ",", "k", "=", "4", ",", "tangent_dim", "=", "1", ",", "curv_thresh", "=", "0.95", ",", "decay_rate", "=", "0.9", ",", "max_iter", "=", "15", ",", "verbose", "=", "False", ")", ":", "n", "=", "le...
SAFFRON graph construction method. X : (n,d)-array of coordinates q : int, median number of candidate friends per vertex k : int, number of friends to select per vertex, k < q tangent_dim : int, dimensionality of manifold tangent space curv_thresh : float, tolerance to curvature, lambda in the paper decay_rate : float, controls step size per iteration, between 0 and 1 max_iter : int, cap on number of iterations verbose : bool, print goodness measure per iteration when True From "Tangent Space Guided Intelligent Neighbor Finding", by Gashler & Martinez, 2011. See http://axon.cs.byu.edu/papers/gashler2011ijcnn1.pdf
[ "SAFFRON", "graph", "construction", "method", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/saffron.py#L16-L100
all-umass/graphs
graphs/construction/saffron.py
_principal_angle
def _principal_angle(a, B): '''a is (d,t), B is (k,d,t)''' # TODO: check case for t = d-1 if a.shape[1] == 1: return a.T.dot(B)[0,:,0] # find normals that maximize distance when projected x1 = np.einsum('abc,adc->abd', B, B).dot(a) - a # b.dot(b.T).dot(a) - a x2 = np.einsum('ab,cad->cbd', a.dot(a.T), B) - B # a.dot(a.T).dot(b) - b xx = np.vstack((x1, x2)) # batch PCA (1st comp. only) xx -= xx.mean(axis=1)[:,None] c = np.einsum('abc,abd->acd', xx, xx) _, vecs = np.linalg.eigh(c) fpc = vecs[:,:,-1] fpc1 = fpc[:len(x1)] fpc2 = fpc[len(x1):] # a.dot(fpc1).dot(b.dot(fpc2)) lhs = a.dot(fpc1.T).T rhs = np.einsum('abc,ac->ab', B, fpc2) return np.einsum('ij,ij->i', lhs, rhs)
python
def _principal_angle(a, B): '''a is (d,t), B is (k,d,t)''' # TODO: check case for t = d-1 if a.shape[1] == 1: return a.T.dot(B)[0,:,0] # find normals that maximize distance when projected x1 = np.einsum('abc,adc->abd', B, B).dot(a) - a # b.dot(b.T).dot(a) - a x2 = np.einsum('ab,cad->cbd', a.dot(a.T), B) - B # a.dot(a.T).dot(b) - b xx = np.vstack((x1, x2)) # batch PCA (1st comp. only) xx -= xx.mean(axis=1)[:,None] c = np.einsum('abc,abd->acd', xx, xx) _, vecs = np.linalg.eigh(c) fpc = vecs[:,:,-1] fpc1 = fpc[:len(x1)] fpc2 = fpc[len(x1):] # a.dot(fpc1).dot(b.dot(fpc2)) lhs = a.dot(fpc1.T).T rhs = np.einsum('abc,ac->ab', B, fpc2) return np.einsum('ij,ij->i', lhs, rhs)
[ "def", "_principal_angle", "(", "a", ",", "B", ")", ":", "# TODO: check case for t = d-1", "if", "a", ".", "shape", "[", "1", "]", "==", "1", ":", "return", "a", ".", "T", ".", "dot", "(", "B", ")", "[", "0", ",", ":", ",", "0", "]", "# find norm...
a is (d,t), B is (k,d,t)
[ "a", "is", "(", "d", "t", ")", "B", "is", "(", "k", "d", "t", ")" ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/saffron.py#L116-L138