repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10678-L10699
def doeqdi(x, y, UP=False): """ Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination """ xp, yp = y, x # need to switch into geographic convention r = np.sqrt(xp**2+yp**2) z = 1.-r**2 t = np.arcsin(z) if UP == 1: t = -t p = np.arctan2(yp, xp) dec, inc = np.degrees(p) % 360, np.degrees(t) return dec, inc
[ "def", "doeqdi", "(", "x", ",", "y", ",", "UP", "=", "False", ")", ":", "xp", ",", "yp", "=", "y", ",", "x", "# need to switch into geographic convention", "r", "=", "np", ".", "sqrt", "(", "xp", "**", "2", "+", "yp", "**", "2", ")", "z", "=", ...
Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination
[ "Takes", "digitized", "x", "y", "data", "and", "returns", "the", "dec", "inc", "assuming", "an", "equal", "area", "projection", "Parameters", "__________________", "x", ":", "array", "of", "digitized", "x", "from", "point", "on", "equal", "area", "projection",...
python
train
solvebio/solvebio-python
solvebio/cli/ipython.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/ipython.py#L14-L29
def launch_ipython_shell(args): # pylint: disable=unused-argument """Open the SolveBio shell (IPython wrapper)""" try: import IPython # noqa except ImportError: _print("The SolveBio Python shell requires IPython.\n" "To install, type: 'pip install ipython'") return False if hasattr(IPython, "version_info"): if IPython.version_info > (5, 0, 0, ''): return launch_ipython_5_shell(args) _print("WARNING: Please upgrade IPython (you are running version: {})" .format(IPython.__version__)) return launch_ipython_legacy_shell(args)
[ "def", "launch_ipython_shell", "(", "args", ")", ":", "# pylint: disable=unused-argument", "try", ":", "import", "IPython", "# noqa", "except", "ImportError", ":", "_print", "(", "\"The SolveBio Python shell requires IPython.\\n\"", "\"To install, type: 'pip install ipython'\"", ...
Open the SolveBio shell (IPython wrapper)
[ "Open", "the", "SolveBio", "shell", "(", "IPython", "wrapper", ")" ]
python
test
nickmckay/LiPD-utilities
Python/lipd/lpd_noaa.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L554-L578
def __reorganize_geo(self): """ Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return: """ logger_lpd_noaa.info("enter reorganize_geo") try: # Geo -> Properties for k, v in self.noaa_data_sorted["Site_Information"]['properties'].items(): noaa_key = self.__get_noaa_key(k) self.noaa_geo[noaa_key] = v except KeyError: logger_lpd_noaa.info("reorganize_geo: KeyError: geo properties") try: # Geo -> Geometry self.__reorganize_coordinates() except Exception: logger_lpd_noaa.warning("reorganize_geo: Exception: missing required data: coordinates") # put the temporarily organized data into the self.noaa_data_sorted self.noaa_data_sorted["Site_Information"] = self.noaa_geo return
[ "def", "__reorganize_geo", "(", "self", ")", ":", "logger_lpd_noaa", ".", "info", "(", "\"enter reorganize_geo\"", ")", "try", ":", "# Geo -> Properties", "for", "k", ",", "v", "in", "self", ".", "noaa_data_sorted", "[", "\"Site_Information\"", "]", "[", "'prope...
Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return:
[ "Concat", "geo", "value", "and", "units", "and", "reorganize", "the", "rest", "References", "geo", "data", "from", "self", ".", "noaa_data_sorted", "Places", "new", "data", "into", "self", ".", "noaa_geo", "temporarily", "and", "then", "back", "into", "self", ...
python
train
ozgurgunes/django-manifest
manifest/accounts/forms.py
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/forms.py#L181-L198
def clean(self): """ Checks for the identification and password. If the combination can't be found will raise an invalid sign in error. """ identification = self.cleaned_data.get('identification') password = self.cleaned_data.get('password') if identification and password: self.user_cache = authenticate(identification=identification, password=password) if self.user_cache is None: raise forms.ValidationError(_(u"Please enter a correct " "username or email address and password. " "Note that both fields are case-sensitive.")) return self.cleaned_data
[ "def", "clean", "(", "self", ")", ":", "identification", "=", "self", ".", "cleaned_data", ".", "get", "(", "'identification'", ")", "password", "=", "self", ".", "cleaned_data", ".", "get", "(", "'password'", ")", "if", "identification", "and", "password", ...
Checks for the identification and password. If the combination can't be found will raise an invalid sign in error.
[ "Checks", "for", "the", "identification", "and", "password", "." ]
python
train
pybel/pybel-tools
src/pybel_tools/assembler/html/assembler.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/assembler/html/assembler.py#L45-L98
def to_html(graph: BELGraph) -> str: """Render the graph as an HTML string. Common usage may involve writing to a file like: >>> from pybel.examples import sialic_acid_graph >>> with open('html_output.html', 'w') as file: ... print(to_html(sialic_acid_graph), file=file) """ context = get_network_summary_dict(graph) summary_dict = graph.summary_dict() citation_years = context['citation_years'] function_count = context['function_count'] relation_count = context['relation_count'] error_count = context['error_count'] transformations_count = context['modifications_count'] hub_data = context['hub_data'] disease_data = context['disease_data'] authors_count = context['authors_count'] variants_count = context['variants_count'] namespaces_count = context['namespaces_count'] confidence_count = context['confidence_count'] confidence_data = [ (label, confidence_count.get(label, 0)) for label in ('None', 'Very Low', 'Low', 'Medium', 'High', 'Very High') ] template = environment.get_template('index.html') return template.render( graph=graph, # Node Charts chart_1_data=prepare_c3(function_count, 'Node Count'), chart_6_data=prepare_c3(namespaces_count, 'Node Count'), chart_5_data=prepare_c3(variants_count, 'Node Count'), number_variants=sum(variants_count.values()), number_namespaces=len(namespaces_count), # Edge Charts chart_2_data=prepare_c3(relation_count, 'Edge Count'), chart_4_data=prepare_c3(transformations_count, 'Edge Count') if transformations_count else None, number_transformations=sum(transformations_count.values()), # Error Charts chart_3_data=prepare_c3(error_count, 'Error Type') if error_count else None, # Topology Charts chart_7_data=prepare_c3(hub_data, 'Degree'), chart_9_data=prepare_c3(disease_data, 'Degree') if disease_data else None, # Bibliometrics Charts chart_authors_count=prepare_c3(authors_count, 'Edges Contributed'), chart_10_data=prepare_c3_time_series(citation_years, 'Number of Articles') if citation_years else None, chart_confidence_count=prepare_c3(confidence_data, 'Edge Count'), summary_dict=summary_dict, # Everything else :) **context )
[ "def", "to_html", "(", "graph", ":", "BELGraph", ")", "->", "str", ":", "context", "=", "get_network_summary_dict", "(", "graph", ")", "summary_dict", "=", "graph", ".", "summary_dict", "(", ")", "citation_years", "=", "context", "[", "'citation_years'", "]", ...
Render the graph as an HTML string. Common usage may involve writing to a file like: >>> from pybel.examples import sialic_acid_graph >>> with open('html_output.html', 'w') as file: ... print(to_html(sialic_acid_graph), file=file)
[ "Render", "the", "graph", "as", "an", "HTML", "string", "." ]
python
valid
SBRG/ssbio
ssbio/pipeline/atlas3.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L1000-L1093
def run_all2(protgroup, memornot, subsequences, base_outdir, protgroup_dict, protein_feathers_dir, date, errfile, impute_counts=True, cutoff_num_proteins=0, core_only_genes=None, length_filter_pid=.8, remove_correlated_feats=True, force_rerun_counts=False, force_rerun_percentages=False, force_rerun_pca=False): """run_all but ignoring observations before pca""" import ssbio.utils # Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything import os os.environ['OMP_NUM_THREADS'] = '1' # First, filter down the protein group to the membrane/nonmembrane definition prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot, protgroup_dict=protgroup_dict, protein_feathers_dir=protein_feathers_dir, core_only_genes=core_only_genes) num_proteins = len(prots_filtered_feathers) if num_proteins <= cutoff_num_proteins: return # Make output directories protscale = 'proteome_unscaled' outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale)) outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot))) outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup))) if impute_counts: big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers, outpath=op.join(outdir_final, '{}-subsequence_proteome_IMP.fthr'.format( date)), length_filter_pid=length_filter_pid, force_rerun=force_rerun_counts) big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df, outpath=op.join(outdir_final, '{}-subsequence_proteome_perc_IMP.fthr'.format( date)), force_rerun=force_rerun_percentages) pca_pickle = op.join(outdir_final, '{}-subsequence_pca.pckl'.format(date)) # Divide by totals to get percentages in a new dataframe else: try: big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers, outpath=op.join(outdir_final, '{}-subsequence_proteome_perc_AVG.fthr'.format( date)), length_filter_pid=length_filter_pid, force_rerun=force_rerun_percentages) pca_pickle = op.join(outdir_final, '{}-subsequence_pca_AVG.pckl'.format(date)) except: with open(errfile, "a") as myfile: myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n") return if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle): # Then, get filters for rows of the loaded feathers for interested subsequences keep_subsequences = get_interested_subsequences(subsequences=subsequences) # Some numbers: number of features num_feats = len(big_strain_percents_df) # Make an unwieldy title big_title = 'LOC={0}; PROTGROUP={1};\n' \ 'NUMPROTS={2}; NUMFEATS={3}'.format('-'.join(memornot), '-'.join(protgroup), num_proteins, num_feats) # Run PCA and make plots runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=pd.DataFrame(), plot_title=big_title) try: runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats) except: with open(errfile, "a") as myfile: myfile.write( 'CLEAN ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n") return # try: runner.run_pca() # except: # with open(errfile, "a") as myfile: # myfile.write( # 'PCA ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n") # return with open(pca_pickle, 'wb') as f: pickle.dump(runner, f) else: with open(pca_pickle, 'rb') as f: runner = pickle.load(f)
[ "def", "run_all2", "(", "protgroup", ",", "memornot", ",", "subsequences", ",", "base_outdir", ",", "protgroup_dict", ",", "protein_feathers_dir", ",", "date", ",", "errfile", ",", "impute_counts", "=", "True", ",", "cutoff_num_proteins", "=", "0", ",", "core_on...
run_all but ignoring observations before pca
[ "run_all", "but", "ignoring", "observations", "before", "pca" ]
python
train
whyscream/dspam-milter
dspam/client.py
https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L241-L293
def mailfrom(self, sender=None, client_args=None): """ Send LMTP MAIL FROM command, and process the server response. In DLMTP mode, the server expects the client to identify itself. Because the envelope sender is of no importance to DSPAM, the client is expected to send an identity and a password (dspam.conf: ServerPass.<ident>="<password>") in stead of the actual sender. When you need want DSPAM to deliver the message itself and need to pass the server an actual envelope sender for that, add the --mail-from parameter in client_args. When the server is setup in LMTP mode only (dspam.conf: ServerMode=standard), the envelope sender is a regular envelope sender, and is re-used when delivering the message after processing. Client args =========== When in DLMTP mode (and with proper auth credentials), the server accepts parameters specified by the client. These are in the form as they are passed to the command-line 'dspam' program. See man dspam(1) for details, and the process() or classify() methods in this class for simple examples. Args: sender -- The envelope sender to use in LMTP mode. client_args -- DSPAM parameters to pass to the server in DLMTP mode. """ if sender and client_args: raise DspamClientError('Arguments are mutually exclusive') if client_args and not self.dlmtp: raise DspamClientError( 'Cannot send client args, server does not support DLMTP') command = 'MAIL FROM:' if not sender: if self.dlmtp_ident and self.dlmtp_pass: sender = self.dlmtp_pass + '@' + self.dlmtp_ident else: sender = '' command = command + '<' + sender + '>' if client_args: command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args) self._send(command + '\r\n') resp = self._read() if not resp.startswith('250'): raise DspamClientError( 'Unexpected server response at MAIL FROM: ' + resp)
[ "def", "mailfrom", "(", "self", ",", "sender", "=", "None", ",", "client_args", "=", "None", ")", ":", "if", "sender", "and", "client_args", ":", "raise", "DspamClientError", "(", "'Arguments are mutually exclusive'", ")", "if", "client_args", "and", "not", "s...
Send LMTP MAIL FROM command, and process the server response. In DLMTP mode, the server expects the client to identify itself. Because the envelope sender is of no importance to DSPAM, the client is expected to send an identity and a password (dspam.conf: ServerPass.<ident>="<password>") in stead of the actual sender. When you need want DSPAM to deliver the message itself and need to pass the server an actual envelope sender for that, add the --mail-from parameter in client_args. When the server is setup in LMTP mode only (dspam.conf: ServerMode=standard), the envelope sender is a regular envelope sender, and is re-used when delivering the message after processing. Client args =========== When in DLMTP mode (and with proper auth credentials), the server accepts parameters specified by the client. These are in the form as they are passed to the command-line 'dspam' program. See man dspam(1) for details, and the process() or classify() methods in this class for simple examples. Args: sender -- The envelope sender to use in LMTP mode. client_args -- DSPAM parameters to pass to the server in DLMTP mode.
[ "Send", "LMTP", "MAIL", "FROM", "command", "and", "process", "the", "server", "response", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L422-L462
def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None): """Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size]. """ with tf.variable_scope(name, default_name="transformer_dec"): batch_size = common_layers.shape_list(targets)[0] targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, hparams.num_channels * hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(targets, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_decoder_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, hparams.img_len * hparams.num_channels, hparams.hidden_size]) return decoder_output
[ "def", "transformer_image_decoder", "(", "targets", ",", "encoder_output", ",", "ed_attention_bias", ",", "hparams", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"transformer_dec\"", ")", ":", ...
Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size].
[ "Transformer", "image", "decoder", "over", "targets", "with", "local", "attention", "." ]
python
train
Loudr/asana-hub
asana_hub/actions/pull_request.py
https://github.com/Loudr/asana-hub/blob/af996ce890ed23d8ede5bf68dcd318e3438829cb/asana_hub/actions/pull_request.py#L19-L55
def add_arguments(cls, parser): """Add arguments to the parser for collection in app.args. Args: parser: `argparse.ArgumentParser`. Parser. Arguments added here are server on self.args. """ parser.add_argument( '-i', '--issue', action='store', nargs='?', const='', dest='issue', help="[pr] issue #", ) parser.add_argument( '-br', '--branch', action='store', nargs='?', const='', dest='branch', help="[pr] branch", ) parser.add_argument( '-tbr', '--target-branch', action='store', nargs='?', const='', default='master', dest='target_branch', help="[pr] name of branch to pull changes into\n(defaults to: master)", )
[ "def", "add_arguments", "(", "cls", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-i'", ",", "'--issue'", ",", "action", "=", "'store'", ",", "nargs", "=", "'?'", ",", "const", "=", "''", ",", "dest", "=", "'issue'", ",", "help", "="...
Add arguments to the parser for collection in app.args. Args: parser: `argparse.ArgumentParser`. Parser. Arguments added here are server on self.args.
[ "Add", "arguments", "to", "the", "parser", "for", "collection", "in", "app", ".", "args", "." ]
python
test
a1ezzz/wasp-general
wasp_general/network/clients/virtual_dir.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/clients/virtual_dir.py#L77-L85
def join_path(self, *path): """ Unite entries to generate a single path :param path: path items to unite :return: str """ path = self.directory_sep().join(path) return self.normalize_path(path)
[ "def", "join_path", "(", "self", ",", "*", "path", ")", ":", "path", "=", "self", ".", "directory_sep", "(", ")", ".", "join", "(", "path", ")", "return", "self", ".", "normalize_path", "(", "path", ")" ]
Unite entries to generate a single path :param path: path items to unite :return: str
[ "Unite", "entries", "to", "generate", "a", "single", "path" ]
python
train
treycucco/bidon
bidon/data_table.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/data_table.py#L171-L176
def clean_value(self, value): """Cleans a value, using either the user provided clean_value, or cls.reduce_value.""" if self._clean_value: return self._clean_value(value) else: return self.reduce_value(value)
[ "def", "clean_value", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_clean_value", ":", "return", "self", ".", "_clean_value", "(", "value", ")", "else", ":", "return", "self", ".", "reduce_value", "(", "value", ")" ]
Cleans a value, using either the user provided clean_value, or cls.reduce_value.
[ "Cleans", "a", "value", "using", "either", "the", "user", "provided", "clean_value", "or", "cls", ".", "reduce_value", "." ]
python
train
shoebot/shoebot
shoebot/gui/var_window.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/gui/var_window.py#L197-L212
def var_deleted(self, v): """ var was added in the bot :param v: :return: """ widget = self.widgets[v.name] # widgets are all in a single container .. parent = widget.get_parent() self.container.remove(parent) del self.widgets[v.name] self.window.set_size_request(400, 35 * len(self.widgets.keys())) self.window.show_all()
[ "def", "var_deleted", "(", "self", ",", "v", ")", ":", "widget", "=", "self", ".", "widgets", "[", "v", ".", "name", "]", "# widgets are all in a single container ..", "parent", "=", "widget", ".", "get_parent", "(", ")", "self", ".", "container", ".", "re...
var was added in the bot :param v: :return:
[ "var", "was", "added", "in", "the", "bot" ]
python
valid
tomasbasham/dominos
dominos/api.py
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L102-L117
def get_menu(self, store): ''' Retrieve the menu from the selected store. :param Store store: A store. :return: The store menu. :rtype: Menu ''' params = { 'collectionOnly': not store.delivery_available, 'menuVersion': store.menu_version, 'storeId': store.store_id, } response = self.__get('/ProductCatalog/GetStoreCatalog', params=params) return Menu(response.json())
[ "def", "get_menu", "(", "self", ",", "store", ")", ":", "params", "=", "{", "'collectionOnly'", ":", "not", "store", ".", "delivery_available", ",", "'menuVersion'", ":", "store", ".", "menu_version", ",", "'storeId'", ":", "store", ".", "store_id", ",", "...
Retrieve the menu from the selected store. :param Store store: A store. :return: The store menu. :rtype: Menu
[ "Retrieve", "the", "menu", "from", "the", "selected", "store", "." ]
python
test
dpranke/pyjson5
json5/lib.py
https://github.com/dpranke/pyjson5/blob/94e2ddee6988ec09de48cdb9aad8f6ce10d7fc05/json5/lib.py#L120-L148
def dumps(obj, **kwargs): """Serialize ``obj`` to a JSON5-formatted ``str``.""" t = type(obj) if obj is True: return u'true' elif obj is False: return u'false' elif obj == None: return u'null' elif t == type('') or t == type(u''): single = "'" in obj double = '"' in obj if single and double: return json.dumps(obj) elif single: return '"' + obj + '"' else: return "'" + obj + "'" elif t is float or t is int: return str(obj) elif t is dict: return u'{' + u','.join([ _dumpkey(k) + u':' + dumps(v) for k, v in obj.items() ]) + '}' elif t is list: return u'[' + ','.join([dumps(el) for el in obj]) + u']' else: # pragma: no cover return u''
[ "def", "dumps", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "t", "=", "type", "(", "obj", ")", "if", "obj", "is", "True", ":", "return", "u'true'", "elif", "obj", "is", "False", ":", "return", "u'false'", "elif", "obj", "==", "None", ":", "ret...
Serialize ``obj`` to a JSON5-formatted ``str``.
[ "Serialize", "obj", "to", "a", "JSON5", "-", "formatted", "str", "." ]
python
train
Gandi/gandi.cli
gandi/cli/commands/record.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/record.py#L101-L118
def create(gandi, domain, zone_id, name, type, value, ttl): """Create new DNS zone record entry for a domain.""" if not zone_id: result = gandi.domain.info(domain) zone_id = result['zone_id'] if not zone_id: gandi.echo('No zone records found, domain %s doesn\'t seems to be ' 'managed at Gandi.' % domain) return record = {'type': type, 'name': name, 'value': value} if ttl: record['ttl'] = ttl result = gandi.record.create(zone_id, record) return result
[ "def", "create", "(", "gandi", ",", "domain", ",", "zone_id", ",", "name", ",", "type", ",", "value", ",", "ttl", ")", ":", "if", "not", "zone_id", ":", "result", "=", "gandi", ".", "domain", ".", "info", "(", "domain", ")", "zone_id", "=", "result...
Create new DNS zone record entry for a domain.
[ "Create", "new", "DNS", "zone", "record", "entry", "for", "a", "domain", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L4619-L4640
def validate_account_user_email(self, account_id, user_id, **kwargs): # noqa: E501 """Validate the user email. # noqa: E501 An endpoint for validating the user email. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/validate-email -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.validate_account_user_email(account_id, user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str user_id: The ID of the user whose email is validated. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.validate_account_user_email_with_http_info(account_id, user_id, **kwargs) # noqa: E501 else: (data) = self.validate_account_user_email_with_http_info(account_id, user_id, **kwargs) # noqa: E501 return data
[ "def", "validate_account_user_email", "(", "self", ",", "account_id", ",", "user_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", ...
Validate the user email. # noqa: E501 An endpoint for validating the user email. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/validate-email -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.validate_account_user_email(account_id, user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str user_id: The ID of the user whose email is validated. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Validate", "the", "user", "email", ".", "#", "noqa", ":", "E501" ]
python
train
praekelt/django-livechat
livechat/models.py
https://github.com/praekelt/django-livechat/blob/22d86fb4219e5af6c83e0542aa30e5ea54e71d26/livechat/models.py#L54-L62
def get_current_live_chat(self): """ Check if there is a live chat on the go, so that we should take over the AskMAMA page with the live chat. """ now = datetime.now() chat = self.upcoming_live_chat() if chat and chat.is_in_progress(): return chat return None
[ "def", "get_current_live_chat", "(", "self", ")", ":", "now", "=", "datetime", ".", "now", "(", ")", "chat", "=", "self", ".", "upcoming_live_chat", "(", ")", "if", "chat", "and", "chat", ".", "is_in_progress", "(", ")", ":", "return", "chat", "return", ...
Check if there is a live chat on the go, so that we should take over the AskMAMA page with the live chat.
[ "Check", "if", "there", "is", "a", "live", "chat", "on", "the", "go", "so", "that", "we", "should", "take", "over", "the", "AskMAMA", "page", "with", "the", "live", "chat", "." ]
python
train
dmwm/DBS
Server/Python/src/dbs/web/DBSMigrateModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSMigrateModel.py#L68-L93
def submit(self): """ Interface for submitting a migration request. Required input keys: MIGRATION_URL: The source DBS url for migration. MIGRATION_INPUT: The block or dataset names to be migrated. """ body = request.body.read() indata = cjson.decode(body) try: indata = validateJSONInputNoCopy("migration_rqst", indata) indata.update({"creation_date": dbsUtils().getTime(), "last_modification_date" : dbsUtils().getTime(), "create_by" : dbsUtils().getCreateBy() , "last_modified_by" : dbsUtils().getCreateBy(), "migration_status": 0}) return self.dbsMigrate.insertMigrationRequest(indata) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSMigrateModle/submit. %s\n Exception trace: \n %s." \ % (ex, traceback.format_exc() ) if hasattr(ex, 'status') and ex.status == 400: dbsExceptionHandler('dbsException-invalid-input2', str(ex), self.logger.exception, sError) else: dbsExceptionHandler('dbsException-server-error', str(ex), self.logger.exception, sError)
[ "def", "submit", "(", "self", ")", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "indata", "=", "cjson", ".", "decode", "(", "body", ")", "try", ":", "indata", "=", "validateJSONInputNoCopy", "(", "\"migration_rqst\"", ",", "indata", ...
Interface for submitting a migration request. Required input keys: MIGRATION_URL: The source DBS url for migration. MIGRATION_INPUT: The block or dataset names to be migrated.
[ "Interface", "for", "submitting", "a", "migration", "request", ".", "Required", "input", "keys", ":", "MIGRATION_URL", ":", "The", "source", "DBS", "url", "for", "migration", ".", "MIGRATION_INPUT", ":", "The", "block", "or", "dataset", "names", "to", "be", ...
python
train
PMEAL/OpenPNM
openpnm/models/geometry/throat_vector.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/geometry/throat_vector.py#L10-L34
def pore_to_pore(target): r""" Calculates throat vector as straight path between connected pores. Parameters ---------- geometry : OpenPNM Geometry object The object containing the geometrical properties of the throats Notes ----- There is an important impicit assumption here: the positive direction is taken as the direction from the pore with the lower index to the higher. This corresponds to the pores in the 1st and 2nd columns of the 'throat.conns' array as stored on the etwork. """ network = target.project.network throats = network.throats(target.name) conns = network['throat.conns'] P1 = conns[:, 0] P2 = conns[:, 1] coords = network['pore.coords'] vec = coords[P2] - coords[P1] unit_vec = tr.unit_vector(vec, axis=1) return unit_vec[throats]
[ "def", "pore_to_pore", "(", "target", ")", ":", "network", "=", "target", ".", "project", ".", "network", "throats", "=", "network", ".", "throats", "(", "target", ".", "name", ")", "conns", "=", "network", "[", "'throat.conns'", "]", "P1", "=", "conns",...
r""" Calculates throat vector as straight path between connected pores. Parameters ---------- geometry : OpenPNM Geometry object The object containing the geometrical properties of the throats Notes ----- There is an important impicit assumption here: the positive direction is taken as the direction from the pore with the lower index to the higher. This corresponds to the pores in the 1st and 2nd columns of the 'throat.conns' array as stored on the etwork.
[ "r", "Calculates", "throat", "vector", "as", "straight", "path", "between", "connected", "pores", "." ]
python
train
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/__init__.py#L405-L414
def statistics(self): """ Access the statistics :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList """ if self._statistics is None: self._statistics = WorkspaceStatisticsList(self._version, workspace_sid=self._solution['sid'], ) return self._statistics
[ "def", "statistics", "(", "self", ")", ":", "if", "self", ".", "_statistics", "is", "None", ":", "self", ".", "_statistics", "=", "WorkspaceStatisticsList", "(", "self", ".", "_version", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'sid'", "]...
Access the statistics :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList
[ "Access", "the", "statistics" ]
python
train
woolfson-group/isambard
isambard/ampal/base_ampal.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/base_ampal.py#L326-L350
def rmsd(self, other, backbone=False): """Calculates the RMSD between two AMPAL objects. Notes ----- No fitting operation is performs and both AMPAL objects must have the same number of atoms. Parameters ---------- other : AMPAL Object Any AMPAL object with `get_atoms` method. backbone : bool, optional Calculates RMSD of backbone only. """ assert type(self) == type(other) if backbone and hasattr(self, 'backbone'): points1 = self.backbone.get_atoms() points2 = other.backbone.get_atoms() else: points1 = self.get_atoms() points2 = other.get_atoms() points1 = [x._vector for x in points1] points2 = [x._vector for x in points2] return rmsd(points1=points1, points2=points2)
[ "def", "rmsd", "(", "self", ",", "other", ",", "backbone", "=", "False", ")", ":", "assert", "type", "(", "self", ")", "==", "type", "(", "other", ")", "if", "backbone", "and", "hasattr", "(", "self", ",", "'backbone'", ")", ":", "points1", "=", "s...
Calculates the RMSD between two AMPAL objects. Notes ----- No fitting operation is performs and both AMPAL objects must have the same number of atoms. Parameters ---------- other : AMPAL Object Any AMPAL object with `get_atoms` method. backbone : bool, optional Calculates RMSD of backbone only.
[ "Calculates", "the", "RMSD", "between", "two", "AMPAL", "objects", "." ]
python
train
tornadoweb/tornado
demos/blog/blog.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/demos/blog/blog.py#L99-L112
async def query(self, stmt, *args): """Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...) """ with (await self.application.db.cursor()) as cur: await cur.execute(stmt, args) return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
[ "async", "def", "query", "(", "self", ",", "stmt", ",", "*", "args", ")", ":", "with", "(", "await", "self", ".", "application", ".", "db", ".", "cursor", "(", ")", ")", "as", "cur", ":", "await", "cur", ".", "execute", "(", "stmt", ",", "args", ...
Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...)
[ "Query", "for", "a", "list", "of", "results", "." ]
python
train
stxnext/mappet
mappet/mappet.py
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L90-L104
def setattr(self, key, value): u"""Sets an attribute on a node. >>> xml = etree.Element('root') >>> Node(xml).setattr('text', 'text2') >>> Node(xml).getattr('text') 'text2' >>> Node(xml).setattr('attr', 'val') >>> Node(xml).getattr('attr') 'val' """ if key == 'text': self._xml.text = str(value) else: self._xml.set(key, str(value))
[ "def", "setattr", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "==", "'text'", ":", "self", ".", "_xml", ".", "text", "=", "str", "(", "value", ")", "else", ":", "self", ".", "_xml", ".", "set", "(", "key", ",", "str", "(", "...
u"""Sets an attribute on a node. >>> xml = etree.Element('root') >>> Node(xml).setattr('text', 'text2') >>> Node(xml).getattr('text') 'text2' >>> Node(xml).setattr('attr', 'val') >>> Node(xml).getattr('attr') 'val'
[ "u", "Sets", "an", "attribute", "on", "a", "node", "." ]
python
train
pokerregion/poker
poker/room/pokerstars.py
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L417-L430
def add_label(self, name, color): """Add a new label. It's id will automatically be calculated.""" color_upper = color.upper() if not self._color_re.match(color_upper): raise ValueError('Invalid color: {}'.format(color)) labels_tag = self.root[0] last_id = int(labels_tag[-1].get('id')) new_id = str(last_id + 1) new_label = etree.Element('label', id=new_id, color=color_upper) new_label.text = name labels_tag.append(new_label)
[ "def", "add_label", "(", "self", ",", "name", ",", "color", ")", ":", "color_upper", "=", "color", ".", "upper", "(", ")", "if", "not", "self", ".", "_color_re", ".", "match", "(", "color_upper", ")", ":", "raise", "ValueError", "(", "'Invalid color: {}'...
Add a new label. It's id will automatically be calculated.
[ "Add", "a", "new", "label", ".", "It", "s", "id", "will", "automatically", "be", "calculated", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_adsb.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_adsb.py#L178-L218
def mavlink_packet(self, m): '''handle an incoming mavlink packet''' if m.get_type() == "ADSB_VEHICLE": id = 'ADSB-' + str(m.ICAO_address) if id not in self.threat_vehicles.keys(): # check to see if the vehicle is in the dict # if not then add it self.threat_vehicles[id] = ADSBVehicle(id=id, state=m.to_dict()) for mp in self.module_matching('map*'): from MAVProxy.modules.lib import mp_menu from MAVProxy.modules.mavproxy_map import mp_slipmap self.threat_vehicles[id].menu_item = mp_menu.MPMenuItem(name=id, returnkey=None) if m.emitter_type >= 100 and m.emitter_type-100 in obc_icons: icon = mp.map.icon(obc_icons[m.emitter_type-100]) threat_radius = get_threat_radius(m.emitter_type-100) else: icon = mp.map.icon(self.threat_vehicles[id].icon) threat_radius = 0 popup = mp_menu.MPMenuSubMenu('ADSB', items=[self.threat_vehicles[id].menu_item]) # draw the vehicle on the map mp.map.add_object(mp_slipmap.SlipIcon(id, (m.lat * 1e-7, m.lon * 1e-7), icon, layer=3, rotation=m.heading*0.01, follow=False, trail=mp_slipmap.SlipTrail(colour=(0, 255, 255)), popup_menu=popup)) if threat_radius > 0: mp.map.add_object(mp_slipmap.SlipCircle(id+":circle", 3, (m.lat * 1e-7, m.lon * 1e-7), threat_radius, (0, 255, 255), linewidth=1)) else: # the vehicle is in the dict # update the dict entry self.threat_vehicles[id].update(m.to_dict(), self.get_time()) for mp in self.module_matching('map*'): # update the map ground_alt = mp.ElevationMap.GetElevation(m.lat*1e-7, m.lon*1e-7) alt_amsl = m.altitude * 0.001 if alt_amsl > 0: alt = int(alt_amsl - ground_alt) label = str(alt) + "m" else: label = None mp.map.set_position(id, (m.lat * 1e-7, m.lon * 1e-7), rotation=m.heading*0.01, label=label, colour=(0,250,250)) mp.map.set_position(id+":circle", (m.lat * 1e-7, m.lon * 1e-7))
[ "def", "mavlink_packet", "(", "self", ",", "m", ")", ":", "if", "m", ".", "get_type", "(", ")", "==", "\"ADSB_VEHICLE\"", ":", "id", "=", "'ADSB-'", "+", "str", "(", "m", ".", "ICAO_address", ")", "if", "id", "not", "in", "self", ".", "threat_vehicle...
handle an incoming mavlink packet
[ "handle", "an", "incoming", "mavlink", "packet" ]
python
train
Yelp/kafka-utils
kafka_utils/util/monitoring.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/monitoring.py#L150-L165
def merge_offsets_metadata(topics, *offsets_responses): """Merge the offset metadata dictionaries from multiple responses. :param topics: list of topics :param offsets_responses: list of dict topic: partition: offset :returns: dict topic: partition: offset """ result = dict() for topic in topics: partition_offsets = [ response[topic] for response in offsets_responses if topic in response ] result[topic] = merge_partition_offsets(*partition_offsets) return result
[ "def", "merge_offsets_metadata", "(", "topics", ",", "*", "offsets_responses", ")", ":", "result", "=", "dict", "(", ")", "for", "topic", "in", "topics", ":", "partition_offsets", "=", "[", "response", "[", "topic", "]", "for", "response", "in", "offsets_res...
Merge the offset metadata dictionaries from multiple responses. :param topics: list of topics :param offsets_responses: list of dict topic: partition: offset :returns: dict topic: partition: offset
[ "Merge", "the", "offset", "metadata", "dictionaries", "from", "multiple", "responses", "." ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/stats.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/stats.py#L16-L82
def calc_stats_iterator(motifs, fg_file, bg_file, genome=None, stats=None, ncpus=None): """Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs. """ if not stats: stats = rocmetrics.__all__ if isinstance(motifs, Motif): all_motifs = [motifs] else: if type([]) == type(motifs): all_motifs = motifs else: all_motifs = read_motifs(motifs, fmt="pwm") if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) chunksize = 240 for i in range(0, len(all_motifs), chunksize): result = {} logger.debug("chunk %s of %s", (i / chunksize) + 1, len(all_motifs) // chunksize + 1) motifs = all_motifs[i:i + chunksize] fg_total = scan_to_best_match(fg_file, motifs, ncpus=ncpus, genome=genome) bg_total = scan_to_best_match(bg_file, motifs, ncpus=ncpus, genome=genome) logger.debug("calculating statistics") if ncpus == 1: it = _single_stats(motifs, stats, fg_total, bg_total) else: it = _mp_stats(motifs, stats, fg_total, bg_total, ncpus) for motif_id, s, ret in it: if motif_id not in result: result[motif_id] = {} result[motif_id][s] = ret yield result
[ "def", "calc_stats_iterator", "(", "motifs", ",", "fg_file", ",", "bg_file", ",", "genome", "=", "None", ",", "stats", "=", "None", ",", "ncpus", "=", "None", ")", ":", "if", "not", "stats", ":", "stats", "=", "rocmetrics", ".", "__all__", "if", "isins...
Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs.
[ "Calculate", "motif", "enrichment", "metrics", "." ]
python
train
kivy/python-for-android
pythonforandroid/graph.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/graph.py#L127-L143
def find_order(graph): ''' Do a topological sort on the dependency graph dict. ''' while graph: # Find all items without a parent leftmost = [l for l, s in graph.items() if not s] if not leftmost: raise ValueError('Dependency cycle detected! %s' % graph) # If there is more than one, sort them for predictable order leftmost.sort() for result in leftmost: # Yield and remove them from the graph yield result graph.pop(result) for bset in graph.values(): bset.discard(result)
[ "def", "find_order", "(", "graph", ")", ":", "while", "graph", ":", "# Find all items without a parent", "leftmost", "=", "[", "l", "for", "l", ",", "s", "in", "graph", ".", "items", "(", ")", "if", "not", "s", "]", "if", "not", "leftmost", ":", "raise...
Do a topological sort on the dependency graph dict.
[ "Do", "a", "topological", "sort", "on", "the", "dependency", "graph", "dict", "." ]
python
train
thecynic/pylutron
pylutron/__init__.py
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L362-L383
def _recv(self, line): """Invoked by the connection manager to process incoming data.""" if line == '': return # Only handle query response messages, which are also sent on remote status # updates (e.g. user manually pressed a keypad button) if line[0] != Lutron.OP_RESPONSE: _LOGGER.debug("ignoring %s" % line) return parts = line[1:].split(',') cmd_type = parts[0] integration_id = int(parts[1]) args = parts[2:] if cmd_type not in self._ids: _LOGGER.info("Unknown cmd %s (%s)" % (cmd_type, line)) return ids = self._ids[cmd_type] if integration_id not in ids: _LOGGER.warning("Unknown id %d (%s)" % (integration_id, line)) return obj = ids[integration_id] handled = obj.handle_update(args)
[ "def", "_recv", "(", "self", ",", "line", ")", ":", "if", "line", "==", "''", ":", "return", "# Only handle query response messages, which are also sent on remote status", "# updates (e.g. user manually pressed a keypad button)", "if", "line", "[", "0", "]", "!=", "Lutron...
Invoked by the connection manager to process incoming data.
[ "Invoked", "by", "the", "connection", "manager", "to", "process", "incoming", "data", "." ]
python
train
uogbuji/versa
tools/py/util.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/util.py#L147-L164
def jsonload(model, fp): ''' Load Versa model dumped into JSON form, either raw or canonical ''' dumped_list = json.load(fp) for link in dumped_list: if len(link) == 2: sid, (s, p, o, a) = link elif len(link) == 4: #canonical (s, p, o, a) = link tt = a.get('@target-type') if tt == '@iri-ref': o = I(o) a.pop('@target-type', None) else: continue model.add(s, p, o, a) return
[ "def", "jsonload", "(", "model", ",", "fp", ")", ":", "dumped_list", "=", "json", ".", "load", "(", "fp", ")", "for", "link", "in", "dumped_list", ":", "if", "len", "(", "link", ")", "==", "2", ":", "sid", ",", "(", "s", ",", "p", ",", "o", "...
Load Versa model dumped into JSON form, either raw or canonical
[ "Load", "Versa", "model", "dumped", "into", "JSON", "form", "either", "raw", "or", "canonical" ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/containers.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/containers.py#L78-L85
def fields_to_dict(self): """Transform the object to a dict and return the dict.""" d = {} for container in FieldsContainer.class_container.values(): fields = getattr(self, container) if fields: d[container] = [field.to_dict() for field in fields] return d
[ "def", "fields_to_dict", "(", "self", ")", ":", "d", "=", "{", "}", "for", "container", "in", "FieldsContainer", ".", "class_container", ".", "values", "(", ")", ":", "fields", "=", "getattr", "(", "self", ",", "container", ")", "if", "fields", ":", "d...
Transform the object to a dict and return the dict.
[ "Transform", "the", "object", "to", "a", "dict", "and", "return", "the", "dict", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/transport/virtualadapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/virtualadapter.py#L219-L245
async def connect(self, conn_id, connection_string): """Asynchronously connect to a device Args: conn_id (int): A unique identifer that will refer to this connection connection_string (string): A DeviceAdapter specific string that can be used to connect to a device using this DeviceAdapter. callback (callable): A function that will be called when the connection attempt finishes as callback(conection_id, adapter_id, success: bool, failure_reason: string or None) """ id_number = int(connection_string) if id_number not in self.devices: raise DeviceAdapterError(conn_id, 'connect', 'device not found') if self._get_conn_id(connection_string) is not None: raise DeviceAdapterError(conn_id, 'connect', 'device already connected') dev = self.devices[id_number] if dev.connected: raise DeviceAdapterError(conn_id, 'connect', 'device already connected') dev.connected = True self._setup_connection(conn_id, connection_string) self._track_property(conn_id, 'device', dev)
[ "async", "def", "connect", "(", "self", ",", "conn_id", ",", "connection_string", ")", ":", "id_number", "=", "int", "(", "connection_string", ")", "if", "id_number", "not", "in", "self", ".", "devices", ":", "raise", "DeviceAdapterError", "(", "conn_id", ",...
Asynchronously connect to a device Args: conn_id (int): A unique identifer that will refer to this connection connection_string (string): A DeviceAdapter specific string that can be used to connect to a device using this DeviceAdapter. callback (callable): A function that will be called when the connection attempt finishes as callback(conection_id, adapter_id, success: bool, failure_reason: string or None)
[ "Asynchronously", "connect", "to", "a", "device" ]
python
train
casacore/python-casacore
casacore/measures/__init__.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L618-L634
def getvalue(self, v): """ Return a list of quantities making up the measures' value. :param v: a measure """ if not is_measure(v): raise TypeError('Incorrect input type for getvalue()') import re rx = re.compile("m\d+") out = [] keys = v.keys()[:] keys.sort() for key in keys: if re.match(rx, key): out.append(dq.quantity(v.get(key))) return out
[ "def", "getvalue", "(", "self", ",", "v", ")", ":", "if", "not", "is_measure", "(", "v", ")", ":", "raise", "TypeError", "(", "'Incorrect input type for getvalue()'", ")", "import", "re", "rx", "=", "re", ".", "compile", "(", "\"m\\d+\"", ")", "out", "="...
Return a list of quantities making up the measures' value. :param v: a measure
[ "Return", "a", "list", "of", "quantities", "making", "up", "the", "measures", "value", "." ]
python
train
Microsoft/knack
knack/output.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/output.py#L113-L142
def out(self, obj, formatter=None, out_file=None): # pylint: disable=no-self-use """ Produces the output using the command result. The method does not return a result as the output is written straight to the output file. :param obj: The command result :type obj: knack.util.CommandResultItem :param formatter: The formatter we should use for the command result :type formatter: function :param out_file: The file to write output to :type out_file: file-like object """ if not isinstance(obj, CommandResultItem): raise TypeError('Expected {} got {}'.format(CommandResultItem.__name__, type(obj))) import platform import colorama if platform.system() == 'Windows': out_file = colorama.AnsiToWin32(out_file).stream output = formatter(obj) try: print(output, file=out_file, end='') except IOError as ex: if ex.errno == errno.EPIPE: pass else: raise except UnicodeEncodeError: print(output.encode('ascii', 'ignore').decode('utf-8', 'ignore'), file=out_file, end='')
[ "def", "out", "(", "self", ",", "obj", ",", "formatter", "=", "None", ",", "out_file", "=", "None", ")", ":", "# pylint: disable=no-self-use", "if", "not", "isinstance", "(", "obj", ",", "CommandResultItem", ")", ":", "raise", "TypeError", "(", "'Expected {}...
Produces the output using the command result. The method does not return a result as the output is written straight to the output file. :param obj: The command result :type obj: knack.util.CommandResultItem :param formatter: The formatter we should use for the command result :type formatter: function :param out_file: The file to write output to :type out_file: file-like object
[ "Produces", "the", "output", "using", "the", "command", "result", ".", "The", "method", "does", "not", "return", "a", "result", "as", "the", "output", "is", "written", "straight", "to", "the", "output", "file", "." ]
python
train
orbingol/NURBS-Python
geomdl/trimming.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/trimming.py#L137-L171
def fix_trim_curves(obj): """ Fixes direction, connectivity and similar issues of the trim curves. This function works for surface trim curves consisting of a single curve. :param obj: input surface :type obj: abstract.Surface """ # Validate input if obj.pdimension != 2: raise GeomdlException("Input geometry must be a surface") # Get trims of the surface for o in obj: trims = o.trims if not trims: continue # Get parameter space bounding box parbox = get_par_box(o.domain, True) # Check and update trim curves with respect to the underlying surface updated_trims = [] for trim in trims: flag, trm = check_trim_curve(trim, parbox) if flag: if trm: cont = shortcuts.generate_container_curve() cont.add(trm) updated_trims.append(cont) else: updated_trims.append(trim) # Set updated trims obj.trims = updated_trims
[ "def", "fix_trim_curves", "(", "obj", ")", ":", "# Validate input", "if", "obj", ".", "pdimension", "!=", "2", ":", "raise", "GeomdlException", "(", "\"Input geometry must be a surface\"", ")", "# Get trims of the surface", "for", "o", "in", "obj", ":", "trims", "...
Fixes direction, connectivity and similar issues of the trim curves. This function works for surface trim curves consisting of a single curve. :param obj: input surface :type obj: abstract.Surface
[ "Fixes", "direction", "connectivity", "and", "similar", "issues", "of", "the", "trim", "curves", "." ]
python
train
apache/incubator-heron
heron/executor/src/python/heron_executor.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/executor/src/python/heron_executor.py#L774-L841
def _get_streaming_processes(self): ''' Returns the processes to handle streams, including the stream-mgr and the user code containing the stream logic of the topology ''' retval = {} instance_plans = self._get_instance_plans(self.packing_plan, self.shard) instance_info = [] for instance_plan in instance_plans: global_task_id = instance_plan.task_id component_index = instance_plan.component_index component_name = instance_plan.component_name instance_id = "container_%s_%s_%d" % (str(self.shard), component_name, global_task_id) instance_info.append((instance_id, component_name, global_task_id, component_index)) stmgr_cmd_lst = [ self.stmgr_binary, '--topology_name=%s' % self.topology_name, '--topology_id=%s' % self.topology_id, '--topologydefn_file=%s' % self.topology_defn_file, '--zkhostportlist=%s' % self.state_manager_connection, '--zkroot=%s' % self.state_manager_root, '--stmgr_id=%s' % self.stmgr_ids[self.shard], '--instance_ids=%s' % ','.join(map(lambda x: x[0], instance_info)), '--myhost=%s' % self.master_host, '--data_port=%s' % str(self.master_port), '--local_data_port=%s' % str(self.tmaster_controller_port), '--metricsmgr_port=%s' % str(self.metrics_manager_port), '--shell_port=%s' % str(self.shell_port), '--config_file=%s' % self.heron_internals_config_file, '--override_config_file=%s' % self.override_config_file, '--ckptmgr_port=%s' % str(self.checkpoint_manager_port), '--ckptmgr_id=%s' % self.ckptmgr_ids[self.shard], '--metricscachemgr_mode=%s' % self.metricscache_manager_mode.lower()] stmgr_env = self.shell_env.copy() if self.shell_env is not None else {} stmgr_cmd = Command(stmgr_cmd_lst, stmgr_env) if os.environ.get('ENABLE_HEAPCHECK') is not None: stmgr_cmd.env.update({ 'LD_PRELOAD': "/usr/lib/libtcmalloc.so", 'HEAPCHECK': "normal" }) retval[self.stmgr_ids[self.shard]] = stmgr_cmd # metricsmgr_metrics_sink_config_file = 'metrics_sinks.yaml' retval[self.metricsmgr_ids[self.shard]] = self._get_metricsmgr_cmd( self.metricsmgr_ids[self.shard], self.metrics_sinks_config_file, self.metrics_manager_port ) if self.is_stateful_topology: retval.update(self._get_ckptmgr_process()) if self.pkg_type == 'jar' or self.pkg_type == 'tar': retval.update(self._get_java_instance_cmd(instance_info)) elif self.pkg_type == 'pex': retval.update(self._get_python_instance_cmd(instance_info)) elif self.pkg_type == 'so': retval.update(self._get_cpp_instance_cmd(instance_info)) elif self.pkg_type == 'dylib': retval.update(self._get_cpp_instance_cmd(instance_info)) else: raise ValueError("Unrecognized package type: %s" % self.pkg_type) return retval
[ "def", "_get_streaming_processes", "(", "self", ")", ":", "retval", "=", "{", "}", "instance_plans", "=", "self", ".", "_get_instance_plans", "(", "self", ".", "packing_plan", ",", "self", ".", "shard", ")", "instance_info", "=", "[", "]", "for", "instance_p...
Returns the processes to handle streams, including the stream-mgr and the user code containing the stream logic of the topology
[ "Returns", "the", "processes", "to", "handle", "streams", "including", "the", "stream", "-", "mgr", "and", "the", "user", "code", "containing", "the", "stream", "logic", "of", "the", "topology" ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/models/research/glow_ops.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L867-L925
def latent_to_dist(name, x, hparams, output_channels=None): """Map latent to the mean and log-scale of a Gaussian. Args: name: variable scope. x: 4-D Tensor of shape (NHWC) hparams: HParams. latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet", default = single_conv latent_encoder_depth - int, depth of architecture, valid if latent_architecture is "glow_nn" or "glow_resnet". latent_pre_output_channels - 512, valid only when latent_architecture is "glow_nn". latent_encoder_width - 512, maximum width of the network output_channels: int, number of output channels of the mean (and std). if not provided, set it to be the output channels of x. Returns: dist: instance of tfp.distributions.Normal Raises: ValueError: If architecture not in ["single_conv", "glow_nn"] """ architecture = hparams.get("latent_architecture", "single_conv") depth = hparams.get("latent_encoder_depth", 1) pre_output_channels = hparams.get("latent_pre_output_channels", 512) width = hparams.get("latent_encoder_width", 512) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) if output_channels is None: output_channels = x_shape[-1] if architecture == "single_conv": return single_conv_dist("single_conv", x, output_channels) if architecture == "glow_nn": mean_log_scale = x for layer in range(1, depth + 1): mid_channels = pre_output_channels // 2**(depth - layer) mean_log_scale = conv_block("glow_nn_%d" % layer, mean_log_scale, mid_channels=mid_channels) mean_log_scale = conv("glow_nn_zeros", mean_log_scale, filter_size=[3, 3], stride=[1, 1], output_channels=2*output_channels, apply_actnorm=False, conv_init="zeros") elif architecture == "glow_resnet": h = x for layer in range(depth): h3 = conv_stack("latent_resnet_%d" % layer, h, mid_channels=width, output_channels=x_shape[-1], dropout=hparams.coupling_dropout) h += h3 mean_log_scale = conv("glow_res_final", h, conv_init="zeros", output_channels=2*output_channels, apply_actnorm=False) else: raise ValueError("expected architecture to be single_conv or glow_nn " "got %s" % architecture) mean = mean_log_scale[:, :, :, 0::2] log_scale = mean_log_scale[:, :, :, 1::2] return tfp.distributions.Normal(mean, tf.exp(log_scale))
[ "def", "latent_to_dist", "(", "name", ",", "x", ",", "hparams", ",", "output_channels", "=", "None", ")", ":", "architecture", "=", "hparams", ".", "get", "(", "\"latent_architecture\"", ",", "\"single_conv\"", ")", "depth", "=", "hparams", ".", "get", "(", ...
Map latent to the mean and log-scale of a Gaussian. Args: name: variable scope. x: 4-D Tensor of shape (NHWC) hparams: HParams. latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet", default = single_conv latent_encoder_depth - int, depth of architecture, valid if latent_architecture is "glow_nn" or "glow_resnet". latent_pre_output_channels - 512, valid only when latent_architecture is "glow_nn". latent_encoder_width - 512, maximum width of the network output_channels: int, number of output channels of the mean (and std). if not provided, set it to be the output channels of x. Returns: dist: instance of tfp.distributions.Normal Raises: ValueError: If architecture not in ["single_conv", "glow_nn"]
[ "Map", "latent", "to", "the", "mean", "and", "log", "-", "scale", "of", "a", "Gaussian", "." ]
python
train
loli/medpy
medpy/filter/IntensityRangeStandardization.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/IntensityRangeStandardization.py#L497-L506
def is_in_interval(n, l, r, border = 'included'): """ Checks whether a number is inside the interval l, r. """ if 'included' == border: return (n >= l) and (n <= r) elif 'excluded' == border: return (n > l) and (n < r) else: raise ValueError('borders must be either \'included\' or \'excluded\'')
[ "def", "is_in_interval", "(", "n", ",", "l", ",", "r", ",", "border", "=", "'included'", ")", ":", "if", "'included'", "==", "border", ":", "return", "(", "n", ">=", "l", ")", "and", "(", "n", "<=", "r", ")", "elif", "'excluded'", "==", "border", ...
Checks whether a number is inside the interval l, r.
[ "Checks", "whether", "a", "number", "is", "inside", "the", "interval", "l", "r", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L66-L177
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats): """Checking correctness of monospaced metadata. There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not trully monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...) Monospace fonts must: * post.isFixedWidth "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)" www.microsoft.com/typography/otspec/post.htm * hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater. www.microsoft.com/typography/otspec/hhea.htm * OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced." www.microsoft.com/typography/otspec/os2.htm#pan monotypecom-test.monotype.de/services/pan2 * OS/2.xAverageWidth must be set accurately. "OS/2.xAverageWidth IS used when rendering monospaced fonts, at least by Windows GDI" http://typedrawers.com/discussion/comment/15397/#Comment_15397 Also we should report an error for glyphs not of average width """ from fontbakery.constants import (IsFixedWidth, PANOSE_Proportion) failed = False # Note: These values are read from the dict here only to # reduce the max line length in the check implementation below: seems_monospaced = glyph_metrics_stats["seems_monospaced"] most_common_width = glyph_metrics_stats["most_common_width"] width_max = glyph_metrics_stats['width_max'] if ttFont['hhea'].advanceWidthMax != width_max: failed = True yield FAIL, Message("bad-advanceWidthMax", ("Value of hhea.advanceWidthMax" " should be set to {} but got" " {} instead." "").format(width_max, ttFont['hhea'].advanceWidthMax)) if seems_monospaced: if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED: failed = True yield FAIL, Message("mono-bad-post-isFixedPitch", ("On monospaced fonts, the value of" " post.isFixedPitch must be set to a non-zero value" " (meaning 'fixed width monospaced')," " but got {} instead." "").format(ttFont['post'].isFixedPitch)) if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED: failed = True yield FAIL, Message("mono-bad-panose-proportion", ("On monospaced fonts, the value of" " OS/2.panose.bProportion must be set to {}" " (proportion: monospaced), but got" " {} instead." "").format(PANOSE_Proportion.MONOSPACED, ttFont['OS/2'].panose.bProportion)) num_glyphs = len(ttFont['glyf'].glyphs) unusually_spaced_glyphs = [ g for g in ttFont['glyf'].glyphs if g not in ['.notdef', '.null', 'NULL'] and ttFont['hmtx'].metrics[g][0] != most_common_width ] outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs if outliers_ratio > 0: failed = True yield WARN, Message("mono-outliers", ("Font is monospaced but {} glyphs" " ({}%) have a different width." " You should check the widths of:" " {}").format( len(unusually_spaced_glyphs), 100.0 * outliers_ratio, unusually_spaced_glyphs)) if not failed: yield PASS, Message("mono-good", ("Font is monospaced and all" " related metadata look good.")) else: # it is a non-monospaced font, so lets make sure # that all monospace-related metadata is properly unset. if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED: failed = True yield FAIL, Message("bad-post-isFixedPitch", ("On non-monospaced fonts, the" " post.isFixedPitch value must be set to {}" " (not monospaced), but got {} instead." "").format(IsFixedWidth.NOT_MONOSPACED, ttFont['post'].isFixedPitch)) if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED: failed = True yield FAIL, Message("bad-panose-proportion", ("On non-monospaced fonts, the" " OS/2.panose.bProportion value can be set to " " any value except 9 (proportion: monospaced)" " which is the bad value we got in this font.")) if not failed: yield PASS, Message("good", ("Font is not monospaced and" " all related metadata look good."))
[ "def", "com_google_fonts_check_monospace", "(", "ttFont", ",", "glyph_metrics_stats", ")", ":", "from", "fontbakery", ".", "constants", "import", "(", "IsFixedWidth", ",", "PANOSE_Proportion", ")", "failed", "=", "False", "# Note: These values are read from the dict here on...
Checking correctness of monospaced metadata. There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not trully monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...) Monospace fonts must: * post.isFixedWidth "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)" www.microsoft.com/typography/otspec/post.htm * hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater. www.microsoft.com/typography/otspec/hhea.htm * OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced." www.microsoft.com/typography/otspec/os2.htm#pan monotypecom-test.monotype.de/services/pan2 * OS/2.xAverageWidth must be set accurately. "OS/2.xAverageWidth IS used when rendering monospaced fonts, at least by Windows GDI" http://typedrawers.com/discussion/comment/15397/#Comment_15397 Also we should report an error for glyphs not of average width
[ "Checking", "correctness", "of", "monospaced", "metadata", "." ]
python
train
cpenv/cpenv
cpenv/api.py
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/api.py#L127-L136
def launch(module_name, *args, **kwargs): '''Activates and launches a module :param module_name: name of module to launch ''' r = resolve(module_name) r.activate() mod = r.resolved[0] mod.launch(*args, **kwargs)
[ "def", "launch", "(", "module_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "r", "=", "resolve", "(", "module_name", ")", "r", ".", "activate", "(", ")", "mod", "=", "r", ".", "resolved", "[", "0", "]", "mod", ".", "launch", "(", ...
Activates and launches a module :param module_name: name of module to launch
[ "Activates", "and", "launches", "a", "module" ]
python
valid
bigchaindb/bigchaindb
bigchaindb/fastquery.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/fastquery.py#L25-L36
def filter_spent_outputs(self, outputs): """Remove outputs that have been spent Args: outputs: list of TransactionLink """ links = [o.to_dict() for o in outputs] txs = list(query.get_spending_transactions(self.connection, links)) spends = {TransactionLink.from_dict(input_['fulfills']) for tx in txs for input_ in tx['inputs']} return [ff for ff in outputs if ff not in spends]
[ "def", "filter_spent_outputs", "(", "self", ",", "outputs", ")", ":", "links", "=", "[", "o", ".", "to_dict", "(", ")", "for", "o", "in", "outputs", "]", "txs", "=", "list", "(", "query", ".", "get_spending_transactions", "(", "self", ".", "connection", ...
Remove outputs that have been spent Args: outputs: list of TransactionLink
[ "Remove", "outputs", "that", "have", "been", "spent" ]
python
train
saulpw/visidata
visidata/vdtui.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L822-L829
def rightStatus(self, sheet): 'Compose right side of status bar.' if sheet.currentThreads: gerund = (' '+sheet.progresses[0].gerund) if sheet.progresses else '' status = '%9d %2d%%%s' % (len(sheet), sheet.progressPct, gerund) else: status = '%9d %s' % (len(sheet), sheet.rowtype) return status, 'color_status'
[ "def", "rightStatus", "(", "self", ",", "sheet", ")", ":", "if", "sheet", ".", "currentThreads", ":", "gerund", "=", "(", "' '", "+", "sheet", ".", "progresses", "[", "0", "]", ".", "gerund", ")", "if", "sheet", ".", "progresses", "else", "''", "stat...
Compose right side of status bar.
[ "Compose", "right", "side", "of", "status", "bar", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/frame.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/frame.py#L88-L93
def extract_module_locals(depth=0): """Returns (module, locals) of the funciton `depth` frames away from the caller""" f = sys._getframe(depth + 1) global_ns = f.f_globals module = sys.modules[global_ns['__name__']] return (module, f.f_locals)
[ "def", "extract_module_locals", "(", "depth", "=", "0", ")", ":", "f", "=", "sys", ".", "_getframe", "(", "depth", "+", "1", ")", "global_ns", "=", "f", ".", "f_globals", "module", "=", "sys", ".", "modules", "[", "global_ns", "[", "'__name__'", "]", ...
Returns (module, locals) of the funciton `depth` frames away from the caller
[ "Returns", "(", "module", "locals", ")", "of", "the", "funciton", "depth", "frames", "away", "from", "the", "caller" ]
python
test
geertj/gruvi
lib/gruvi/futures.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/futures.py#L506-L526
def as_completed(objects, count=None, timeout=None): """Wait for one or more waitable objects, yielding them as they become ready. This is the iterator/generator version of :func:`wait`. """ for obj in objects: if not hasattr(obj, 'add_done_callback'): raise TypeError('Expecting sequence of waitable objects') if count is None: count = len(objects) if count < 0 or count > len(objects): raise ValueError('count must be between 0 and len(objects)') if count == 0: return pending = list(objects) for obj in _wait(pending, timeout): yield obj count -= 1 if count == 0: break
[ "def", "as_completed", "(", "objects", ",", "count", "=", "None", ",", "timeout", "=", "None", ")", ":", "for", "obj", "in", "objects", ":", "if", "not", "hasattr", "(", "obj", ",", "'add_done_callback'", ")", ":", "raise", "TypeError", "(", "'Expecting ...
Wait for one or more waitable objects, yielding them as they become ready. This is the iterator/generator version of :func:`wait`.
[ "Wait", "for", "one", "or", "more", "waitable", "objects", "yielding", "them", "as", "they", "become", "ready", "." ]
python
train
ask/carrot
carrot/messaging.py
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L451-L476
def iterqueue(self, limit=None, infinite=False): """Infinite iterator yielding pending messages, by using synchronous direct access to the queue (``basic_get``). :meth:`iterqueue` is used where synchronous functionality is more important than performance. If you can, use :meth:`iterconsume` instead. :keyword limit: If set, the iterator stops when it has processed this number of messages in total. :keyword infinite: Don't raise :exc:`StopIteration` if there is no messages waiting, but return ``None`` instead. If infinite you obviously shouldn't consume the whole iterator at once without using a ``limit``. :raises StopIteration: If there is no messages waiting, and the iterator is not infinite. """ for items_since_start in count(): item = self.fetch() if (not infinite and item is None) or \ (limit and items_since_start >= limit): raise StopIteration yield item
[ "def", "iterqueue", "(", "self", ",", "limit", "=", "None", ",", "infinite", "=", "False", ")", ":", "for", "items_since_start", "in", "count", "(", ")", ":", "item", "=", "self", ".", "fetch", "(", ")", "if", "(", "not", "infinite", "and", "item", ...
Infinite iterator yielding pending messages, by using synchronous direct access to the queue (``basic_get``). :meth:`iterqueue` is used where synchronous functionality is more important than performance. If you can, use :meth:`iterconsume` instead. :keyword limit: If set, the iterator stops when it has processed this number of messages in total. :keyword infinite: Don't raise :exc:`StopIteration` if there is no messages waiting, but return ``None`` instead. If infinite you obviously shouldn't consume the whole iterator at once without using a ``limit``. :raises StopIteration: If there is no messages waiting, and the iterator is not infinite.
[ "Infinite", "iterator", "yielding", "pending", "messages", "by", "using", "synchronous", "direct", "access", "to", "the", "queue", "(", "basic_get", ")", "." ]
python
train
sam-cox/pytides
pytides/tide.py
https://github.com/sam-cox/pytides/blob/63a2507299002f1979ea55a17a82561158d685f7/pytides/tide.py#L242-L254
def _times(t0, hours): """ Return a (list of) datetime(s) given an initial time and an (list of) hourly offset(s). Arguments: t0 -- initial time hours -- hourly offsets from t0 """ if not isinstance(hours, Iterable): return Tide._times(t0, [hours])[0] elif not isinstance(hours[0], datetime): return np.array([t0 + timedelta(hours=h) for h in hours]) else: return np.array(hours)
[ "def", "_times", "(", "t0", ",", "hours", ")", ":", "if", "not", "isinstance", "(", "hours", ",", "Iterable", ")", ":", "return", "Tide", ".", "_times", "(", "t0", ",", "[", "hours", "]", ")", "[", "0", "]", "elif", "not", "isinstance", "(", "hou...
Return a (list of) datetime(s) given an initial time and an (list of) hourly offset(s). Arguments: t0 -- initial time hours -- hourly offsets from t0
[ "Return", "a", "(", "list", "of", ")", "datetime", "(", "s", ")", "given", "an", "initial", "time", "and", "an", "(", "list", "of", ")", "hourly", "offset", "(", "s", ")", ".", "Arguments", ":", "t0", "--", "initial", "time", "hours", "--", "hourly...
python
train
pricingassistant/mrq
mrq/scheduler.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/scheduler.py#L29-L43
def check_config_integrity(self): """ Make sure the scheduler config is valid """ tasks_by_hash = {_hash_task(t): t for t in self.config_tasks} if len(tasks_by_hash) != len(self.config_tasks): raise Exception("Fatal error: there was a hash duplicate in the scheduled tasks config.") for h, task in tasks_by_hash.items(): if task.get("monthday") and not task.get("dailytime"): raise Exception("Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h) if task.get("weekday") and not task.get("dailytime"): raise Exception("Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h) if not task.get("monthday") and not task.get("weekday") and not task.get("dailytime") and not task.get("interval"): raise Exception("Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h)
[ "def", "check_config_integrity", "(", "self", ")", ":", "tasks_by_hash", "=", "{", "_hash_task", "(", "t", ")", ":", "t", "for", "t", "in", "self", ".", "config_tasks", "}", "if", "len", "(", "tasks_by_hash", ")", "!=", "len", "(", "self", ".", "config...
Make sure the scheduler config is valid
[ "Make", "sure", "the", "scheduler", "config", "is", "valid" ]
python
train
openvax/isovar
isovar/reference_context.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L141-L168
def reference_contexts_for_variants( variants, context_size, transcript_id_whitelist=None): """ Extract a set of reference contexts for each variant in the collection. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a dictionary from variants to lists of ReferenceContext objects, sorted by max coding sequence length of any transcript. """ result = OrderedDict() for variant in variants: result[variant] = reference_contexts_for_variant( variant=variant, context_size=context_size, transcript_id_whitelist=transcript_id_whitelist) return result
[ "def", "reference_contexts_for_variants", "(", "variants", ",", "context_size", ",", "transcript_id_whitelist", "=", "None", ")", ":", "result", "=", "OrderedDict", "(", ")", "for", "variant", "in", "variants", ":", "result", "[", "variant", "]", "=", "reference...
Extract a set of reference contexts for each variant in the collection. Parameters ---------- variants : varcode.VariantCollection context_size : int Max of nucleotides to include to the left and right of the variant in the context sequence. transcript_id_whitelist : set, optional If given, then only consider transcripts whose IDs are in this set. Returns a dictionary from variants to lists of ReferenceContext objects, sorted by max coding sequence length of any transcript.
[ "Extract", "a", "set", "of", "reference", "contexts", "for", "each", "variant", "in", "the", "collection", "." ]
python
train
saltstack/salt
salt/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L1056-L1096
def check_refresh(self, data, ret): ''' Check to see if the modules for this state instance need to be updated, only update if the state is a file or a package and if it changed something. If the file function is managed check to see if the file is a possible module type, e.g. a python, pyx, or .so. Always refresh if the function is recurse, since that can lay down anything. ''' _reload_modules = False if data.get('reload_grains', False): log.debug('Refreshing grains...') self.opts['grains'] = salt.loader.grains(self.opts) _reload_modules = True if data.get('reload_pillar', False): log.debug('Refreshing pillar...') self.opts['pillar'] = self._gather_pillar() _reload_modules = True if not ret['changes']: if data.get('force_reload_modules', False): self.module_refresh() return if data.get('reload_modules', False) or _reload_modules: # User explicitly requests a reload self.module_refresh() return if data['state'] == 'file': if data['fun'] == 'managed': if data['name'].endswith( ('.py', '.pyx', '.pyo', '.pyc', '.so')): self.module_refresh() elif data['fun'] == 'recurse': self.module_refresh() elif data['fun'] == 'symlink': if 'bin' in data['name']: self.module_refresh() elif data['state'] in ('pkg', 'ports'): self.module_refresh()
[ "def", "check_refresh", "(", "self", ",", "data", ",", "ret", ")", ":", "_reload_modules", "=", "False", "if", "data", ".", "get", "(", "'reload_grains'", ",", "False", ")", ":", "log", ".", "debug", "(", "'Refreshing grains...'", ")", "self", ".", "opts...
Check to see if the modules for this state instance need to be updated, only update if the state is a file or a package and if it changed something. If the file function is managed check to see if the file is a possible module type, e.g. a python, pyx, or .so. Always refresh if the function is recurse, since that can lay down anything.
[ "Check", "to", "see", "if", "the", "modules", "for", "this", "state", "instance", "need", "to", "be", "updated", "only", "update", "if", "the", "state", "is", "a", "file", "or", "a", "package", "and", "if", "it", "changed", "something", ".", "If", "the...
python
train
PolyJIT/benchbuild
benchbuild/utils/compiler.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L27-L49
def cc(project, detect_project=False): """ Return a clang that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command. """ from benchbuild.utils import cmd cc_name = str(CFG["compiler"]["c"]) wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project) return cmd["./{}".format(cc_name)]
[ "def", "cc", "(", "project", ",", "detect_project", "=", "False", ")", ":", "from", "benchbuild", ".", "utils", "import", "cmd", "cc_name", "=", "str", "(", "CFG", "[", "\"compiler\"", "]", "[", "\"c\"", "]", ")", "wrap_cc", "(", "cc_name", ",", "compi...
Return a clang that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command.
[ "Return", "a", "clang", "that", "hides", "CFLAGS", "and", "LDFLAGS", "." ]
python
train
balloob/pychromecast
pychromecast/controllers/media.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/controllers/media.py#L350-L360
def _send_command(self, command): """ Send a command to the Chromecast on media channel. """ if self.status is None or self.status.media_session_id is None: self.logger.warning( "%s command requested but no session is active.", command[MESSAGE_TYPE]) return command['mediaSessionId'] = self.status.media_session_id self.send_message(command, inc_session_id=True)
[ "def", "_send_command", "(", "self", ",", "command", ")", ":", "if", "self", ".", "status", "is", "None", "or", "self", ".", "status", ".", "media_session_id", "is", "None", ":", "self", ".", "logger", ".", "warning", "(", "\"%s command requested but no sess...
Send a command to the Chromecast on media channel.
[ "Send", "a", "command", "to", "the", "Chromecast", "on", "media", "channel", "." ]
python
train
ic-labs/django-icekit
icekit/response_pages/views.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/response_pages/views.py#L44-L69
def page_not_found(request, template_name='404.html'): """ Custom page not found (404) handler. Don't raise a Http404 or anything like that in here otherwise you will cause an infinite loop. That would be bad. If no ResponsePage exists for with type ``RESPONSE_HTTP404`` then the default template render view will be used. Templates: :template:`404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') page A ResponsePage with type ``RESPONSE_HTTP404`` if it exists. """ rendered_page = get_response_page( request, http.HttpResponseNotFound, 'icekit/response_pages/404.html', abstract_models.RESPONSE_HTTP404 ) if rendered_page is None: return defaults.page_not_found(request, template_name) return rendered_page
[ "def", "page_not_found", "(", "request", ",", "template_name", "=", "'404.html'", ")", ":", "rendered_page", "=", "get_response_page", "(", "request", ",", "http", ".", "HttpResponseNotFound", ",", "'icekit/response_pages/404.html'", ",", "abstract_models", ".", "RESP...
Custom page not found (404) handler. Don't raise a Http404 or anything like that in here otherwise you will cause an infinite loop. That would be bad. If no ResponsePage exists for with type ``RESPONSE_HTTP404`` then the default template render view will be used. Templates: :template:`404.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') page A ResponsePage with type ``RESPONSE_HTTP404`` if it exists.
[ "Custom", "page", "not", "found", "(", "404", ")", "handler", "." ]
python
train
klmitch/tendril
tendril/framers.py
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L101-L119
def _reset(self, framer): """ Reset the state for the framer. It is safe to call this method multiple times with the same framer; the ID of the framer object will be saved and the state only reset if the IDs are different. After resetting the state, the framer's ``init_state()`` method will be called. """ # Do nothing if we're already properly initialized if id(framer) == self._framer_id: return # Reset the state self._other = {} # Initialize the state and save the framer ID framer.init_state(self) self._framer_id = id(framer)
[ "def", "_reset", "(", "self", ",", "framer", ")", ":", "# Do nothing if we're already properly initialized", "if", "id", "(", "framer", ")", "==", "self", ".", "_framer_id", ":", "return", "# Reset the state", "self", ".", "_other", "=", "{", "}", "# Initialize ...
Reset the state for the framer. It is safe to call this method multiple times with the same framer; the ID of the framer object will be saved and the state only reset if the IDs are different. After resetting the state, the framer's ``init_state()`` method will be called.
[ "Reset", "the", "state", "for", "the", "framer", ".", "It", "is", "safe", "to", "call", "this", "method", "multiple", "times", "with", "the", "same", "framer", ";", "the", "ID", "of", "the", "framer", "object", "will", "be", "saved", "and", "the", "sta...
python
train
jmoiron/speedparser
speedparser/speedparser.py
https://github.com/jmoiron/speedparser/blob/e7e8d79daf73b35c9259695ad1e379476e1dfc77/speedparser/speedparser.py#L189-L196
def innertext(node): """Return the inner text of a node. If a node has no sub elements, this is just node.text. Otherwise, it's node.text + sub-element-text + node.tail.""" if not len(node): return node.text return (node.text or '') + ''.join([etree.tostring(c) for c in node]) + (node.tail or '')
[ "def", "innertext", "(", "node", ")", ":", "if", "not", "len", "(", "node", ")", ":", "return", "node", ".", "text", "return", "(", "node", ".", "text", "or", "''", ")", "+", "''", ".", "join", "(", "[", "etree", ".", "tostring", "(", "c", ")",...
Return the inner text of a node. If a node has no sub elements, this is just node.text. Otherwise, it's node.text + sub-element-text + node.tail.
[ "Return", "the", "inner", "text", "of", "a", "node", ".", "If", "a", "node", "has", "no", "sub", "elements", "this", "is", "just", "node", ".", "text", ".", "Otherwise", "it", "s", "node", ".", "text", "+", "sub", "-", "element", "-", "text", "+", ...
python
train
Microsoft/nni
tools/nni_cmd/nnictl_utils.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/nnictl_utils.py#L407-L434
def show_experiment_info(): '''show experiment information in monitor''' experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: print('There is no experiment running...') exit(1) update_experiment() experiment_id_list = [] for key in experiment_dict.keys(): if experiment_dict[key]['status'] != 'STOPPED': experiment_id_list.append(key) if not experiment_id_list: print_warning('There is no experiment running...') return for key in experiment_id_list: print(EXPERIMENT_MONITOR_INFO % (key, experiment_dict[key]['status'], experiment_dict[key]['port'], \ experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], get_time_interval(experiment_dict[key]['startTime'], experiment_dict[key]['endTime']))) print(TRIAL_MONITOR_HEAD) running, response = check_rest_server_quick(experiment_dict[key]['port']) if running: response = rest_get(trial_jobs_url(experiment_dict[key]['port']), REST_TIME_OUT) if response and check_response(response): content = json.loads(response.text) for index, value in enumerate(content): content[index] = convert_time_stamp_to_date(value) print(TRIAL_MONITOR_CONTENT % (content[index].get('id'), content[index].get('startTime'), content[index].get('endTime'), content[index].get('status'))) print(TRIAL_MONITOR_TAIL)
[ "def", "show_experiment_info", "(", ")", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "if", "not", "experiment_dict", ":", "print", "(", "'There is no experiment running...'", ...
show experiment information in monitor
[ "show", "experiment", "information", "in", "monitor" ]
python
train
hugapi/hug
hug/api.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/api.py#L150-L158
def add_exception_handler(self, exception_type, error_handler, versions=(None, )): """Adds a error handler to the hug api""" versions = (versions, ) if not isinstance(versions, (tuple, list)) else versions if not hasattr(self, '_exception_handlers'): self._exception_handlers = {} for version in versions: placement = self._exception_handlers.setdefault(version, OrderedDict()) placement[exception_type] = (error_handler, ) + placement.get(exception_type, tuple())
[ "def", "add_exception_handler", "(", "self", ",", "exception_type", ",", "error_handler", ",", "versions", "=", "(", "None", ",", ")", ")", ":", "versions", "=", "(", "versions", ",", ")", "if", "not", "isinstance", "(", "versions", ",", "(", "tuple", ",...
Adds a error handler to the hug api
[ "Adds", "a", "error", "handler", "to", "the", "hug", "api" ]
python
train
saltstack/salt
salt/modules/zfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zfs.py#L1158-L1264
def get(*dataset, **kwargs): ''' Displays properties for the given datasets. dataset : string name of snapshot(s), filesystem(s), or volume(s) properties : string comma-separated list of properties to list, defaults to all recursive : boolean recursively list children depth : int recursively list children to depth fields : string comma-separated list of fields to include, the name and property field will always be added type : string comma-separated list of types to display, where type is one of filesystem, snapshot, volume, bookmark, or all. source : string comma-separated list of sources to display. Must be one of the following: local, default, inherited, temporary, and none. The default value is all sources. parsable : boolean display numbers in parsable (exact) values (default = True) .. versionadded:: 2018.3.0 .. note:: If no datasets are specified, then the command displays properties for all datasets on the system. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.get salt '*' zfs.get myzpool/mydataset [recursive=True|False] salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False] salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1 ''' ## Configure command # NOTE: initialize the defaults flags = ['-H'] opts = {} # NOTE: set extra config from kwargs if kwargs.get('depth', False): opts['-d'] = kwargs.get('depth') elif kwargs.get('recursive', False): flags.append('-r') fields = kwargs.get('fields', 'value,source').split(',') if 'name' in fields: # ensure name is first fields.remove('name') if 'property' in fields: # ensure property is second fields.remove('property') fields.insert(0, 'name') fields.insert(1, 'property') opts['-o'] = ",".join(fields) if kwargs.get('type', False): opts['-t'] = kwargs.get('type') if kwargs.get('source', False): opts['-s'] = kwargs.get('source') # NOTE: set property_name property_name = kwargs.get('properties', 'all') ## Get properties res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='get', flags=flags, opts=opts, property_name=property_name, target=list(dataset), ), python_shell=False, ) ret = __utils__['zfs.parse_command_result'](res) if res['retcode'] == 0: for ds in res['stdout'].splitlines(): ds_data = OrderedDict(list(zip( fields, ds.split("\t") ))) if 'value' in ds_data: if kwargs.get('parsable', True): ds_data['value'] = __utils__['zfs.from_auto']( ds_data['property'], ds_data['value'], ) else: ds_data['value'] = __utils__['zfs.to_auto']( ds_data['property'], ds_data['value'], convert_to_human=True, ) if ds_data['name'] not in ret: ret[ds_data['name']] = OrderedDict() ret[ds_data['name']][ds_data['property']] = ds_data del ds_data['name'] del ds_data['property'] return ret
[ "def", "get", "(", "*", "dataset", ",", "*", "*", "kwargs", ")", ":", "## Configure command", "# NOTE: initialize the defaults", "flags", "=", "[", "'-H'", "]", "opts", "=", "{", "}", "# NOTE: set extra config from kwargs", "if", "kwargs", ".", "get", "(", "'d...
Displays properties for the given datasets. dataset : string name of snapshot(s), filesystem(s), or volume(s) properties : string comma-separated list of properties to list, defaults to all recursive : boolean recursively list children depth : int recursively list children to depth fields : string comma-separated list of fields to include, the name and property field will always be added type : string comma-separated list of types to display, where type is one of filesystem, snapshot, volume, bookmark, or all. source : string comma-separated list of sources to display. Must be one of the following: local, default, inherited, temporary, and none. The default value is all sources. parsable : boolean display numbers in parsable (exact) values (default = True) .. versionadded:: 2018.3.0 .. note:: If no datasets are specified, then the command displays properties for all datasets on the system. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.get salt '*' zfs.get myzpool/mydataset [recursive=True|False] salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False] salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1
[ "Displays", "properties", "for", "the", "given", "datasets", "." ]
python
train
jaywink/federation
federation/utils/diaspora.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/utils/diaspora.py#L70-L94
def retrieve_and_parse_diaspora_webfinger(handle): """ Retrieve a and parse a remote Diaspora webfinger document. :arg handle: Remote handle to retrieve :returns: dict """ try: host = handle.split("@")[1] except AttributeError: logger.warning("retrieve_and_parse_diaspora_webfinger: invalid handle given: %s", handle) return None document, code, exception = fetch_document( host=host, path="/.well-known/webfinger?resource=acct:%s" % quote(handle), ) if document: return parse_diaspora_webfinger(document) hostmeta = retrieve_diaspora_host_meta(host) if not hostmeta: return None url = hostmeta.find_link(rels="lrdd").template.replace("{uri}", quote(handle)) document, code, exception = fetch_document(url) if exception: return None return parse_diaspora_webfinger(document)
[ "def", "retrieve_and_parse_diaspora_webfinger", "(", "handle", ")", ":", "try", ":", "host", "=", "handle", ".", "split", "(", "\"@\"", ")", "[", "1", "]", "except", "AttributeError", ":", "logger", ".", "warning", "(", "\"retrieve_and_parse_diaspora_webfinger: in...
Retrieve a and parse a remote Diaspora webfinger document. :arg handle: Remote handle to retrieve :returns: dict
[ "Retrieve", "a", "and", "parse", "a", "remote", "Diaspora", "webfinger", "document", "." ]
python
train
CivicSpleen/ambry
ambry/orm/file.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/file.py#L180-L193
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('contents', 'dataset')} d['modified_datetime'] = self.modified_datetime d['modified_ago'] = self.modified_ago return d
[ "def", "dict", "(", "self", ")", ":", "d", "=", "{", "p", ".", "key", ":", "getattr", "(", "self", ",", "p", ".", "key", ")", "for", "p", "in", "self", ".", "__mapper__", ".", "attrs", "if", "p", ".", "key", "not", "in", "(", "'contents'", ",...
A dict that holds key/values for all of the properties in the object. :return:
[ "A", "dict", "that", "holds", "key", "/", "values", "for", "all", "of", "the", "properties", "in", "the", "object", "." ]
python
train
christophercrouzet/nani
nani.py
https://github.com/christophercrouzet/nani/blob/296ae50c0cdcfd3ed0cba23a4d2edea0d124bcb1/nani.py#L1051-L1059
def _get_mixin_attributes(mixins): """Retrieve the attributes for a given set of mixin classes. The attributes of each mixin class are being merged into a single dictionary. """ return {attribute: mixin.__dict__[attribute] for mixin in mixins for attribute in _MIXIN_ATTRIBUTES[mixin]}
[ "def", "_get_mixin_attributes", "(", "mixins", ")", ":", "return", "{", "attribute", ":", "mixin", ".", "__dict__", "[", "attribute", "]", "for", "mixin", "in", "mixins", "for", "attribute", "in", "_MIXIN_ATTRIBUTES", "[", "mixin", "]", "}" ]
Retrieve the attributes for a given set of mixin classes. The attributes of each mixin class are being merged into a single dictionary.
[ "Retrieve", "the", "attributes", "for", "a", "given", "set", "of", "mixin", "classes", "." ]
python
train
inveniosoftware/invenio-records
invenio_records/api.py
https://github.com/inveniosoftware/invenio-records/blob/b0b1481d04012e45cb71b5ae4019e91dde88d1e2/invenio_records/api.py#L322-L355
def revert(self, revision_id): """Revert the record to a specific revision. #. Send a signal :data:`invenio_records.signals.before_record_revert` with the current record as parameter. #. Revert the record to the revision id passed as parameter. #. Send a signal :data:`invenio_records.signals.after_record_revert` with the reverted record as parameter. :param revision_id: Specify the record revision id :returns: The :class:`Record` instance corresponding to the revision id """ if self.model is None: raise MissingModelError() revision = self.revisions[revision_id] with db.session.begin_nested(): before_record_revert.send( current_app._get_current_object(), record=self ) self.model.json = dict(revision) db.session.merge(self.model) after_record_revert.send( current_app._get_current_object(), record=self ) return self.__class__(self.model.json, model=self.model)
[ "def", "revert", "(", "self", ",", "revision_id", ")", ":", "if", "self", ".", "model", "is", "None", ":", "raise", "MissingModelError", "(", ")", "revision", "=", "self", ".", "revisions", "[", "revision_id", "]", "with", "db", ".", "session", ".", "b...
Revert the record to a specific revision. #. Send a signal :data:`invenio_records.signals.before_record_revert` with the current record as parameter. #. Revert the record to the revision id passed as parameter. #. Send a signal :data:`invenio_records.signals.after_record_revert` with the reverted record as parameter. :param revision_id: Specify the record revision id :returns: The :class:`Record` instance corresponding to the revision id
[ "Revert", "the", "record", "to", "a", "specific", "revision", "." ]
python
train
rflamary/POT
ot/optim.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/optim.py#L207-L341
def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10, numInnerItermax=200, stopThr=1e-9, verbose=False, log=False): """ Solve the general regularized OT problem with the generalized conditional gradient The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg1\cdot\Omega(\gamma) + reg2\cdot f(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`f` is the regularization term ( and df is its gradient) - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the generalized conditional gradient as discussed in [5,7]_ Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) samples in the target domain M : np.ndarray (ns,nt) loss matrix reg1 : float Entropic Regularization term >0 reg2 : float Second Regularization term >0 G0 : np.ndarray (ns,nt), optional initial guess (default is indep joint density) numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations of Sinkhorn stopThr : float, optional Stop threshol on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.optim.cg : conditional gradient """ loop = 1 if log: log = {'loss': []} if G0 is None: G = np.outer(a, b) else: G = G0 def cost(G): return np.sum(M * G) + reg1 * np.sum(G * np.log(G)) + reg2 * f(G) f_val = cost(G) if log: log['loss'].append(f_val) it = 0 if verbose: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(it, f_val, 0)) while loop: it += 1 old_fval = f_val # problem linearization Mi = M + reg2 * df(G) # solve linear program with Sinkhorn # Gc = sinkhorn_stabilized(a,b, Mi, reg1, numItermax = numInnerItermax) Gc = sinkhorn(a, b, Mi, reg1, numItermax=numInnerItermax) deltaG = Gc - G # line search dcost = Mi + reg1 * (1 + np.log(G)) # ?? alpha, fc, f_val = line_search_armijo(cost, G, deltaG, dcost, f_val) G = G + alpha * deltaG # test convergence if it >= numItermax: loop = 0 delta_fval = (f_val - old_fval) / abs(f_val) if abs(delta_fval) < stopThr: loop = 0 if log: log['loss'].append(f_val) if verbose: if it % 20 == 0: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(it, f_val, delta_fval)) if log: return G, log else: return G
[ "def", "gcg", "(", "a", ",", "b", ",", "M", ",", "reg1", ",", "reg2", ",", "f", ",", "df", ",", "G0", "=", "None", ",", "numItermax", "=", "10", ",", "numInnerItermax", "=", "200", ",", "stopThr", "=", "1e-9", ",", "verbose", "=", "False", ",",...
Solve the general regularized OT problem with the generalized conditional gradient The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg1\cdot\Omega(\gamma) + reg2\cdot f(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`f` is the regularization term ( and df is its gradient) - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the generalized conditional gradient as discussed in [5,7]_ Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) samples in the target domain M : np.ndarray (ns,nt) loss matrix reg1 : float Entropic Regularization term >0 reg2 : float Second Regularization term >0 G0 : np.ndarray (ns,nt), optional initial guess (default is indep joint density) numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations of Sinkhorn stopThr : float, optional Stop threshol on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.optim.cg : conditional gradient
[ "Solve", "the", "general", "regularized", "OT", "problem", "with", "the", "generalized", "conditional", "gradient" ]
python
train
pandas-dev/pandas
pandas/core/common.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L333-L348
def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs """ if callable(maybe_callable): return maybe_callable(obj, **kwargs) return maybe_callable
[ "def", "apply_if_callable", "(", "maybe_callable", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "callable", "(", "maybe_callable", ")", ":", "return", "maybe_callable", "(", "obj", ",", "*", "*", "kwargs", ")", "return", "maybe_callable" ]
Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs
[ "Evaluate", "possibly", "callable", "input", "using", "obj", "and", "kwargs", "if", "it", "is", "callable", "otherwise", "return", "as", "it", "is", "." ]
python
train
zomux/deepy
examples/attention_models/baseline_model.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/examples/attention_models/baseline_model.py#L64-L81
def _refined_glimpse_sensor(self, x_t, l_p): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 7*14 matrix """ # Turn l_p to the left-top point of rectangle l_p = l_p * 14 + 14 - 4 l_p = T.cast(T.round(l_p), "int32") l_p = l_p * (l_p >= 0) l_p = l_p * (l_p < 21) + (l_p >= 21) * 20 glimpse_1 = x_t[l_p[0]: l_p[0] + 7][:, l_p[1]: l_p[1] + 7] # glimpse_2 = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4)) # return T.concatenate([glimpse_1, glimpse_2]) return glimpse_1
[ "def", "_refined_glimpse_sensor", "(", "self", ",", "x_t", ",", "l_p", ")", ":", "# Turn l_p to the left-top point of rectangle", "l_p", "=", "l_p", "*", "14", "+", "14", "-", "4", "l_p", "=", "T", ".", "cast", "(", "T", ".", "round", "(", "l_p", ")", ...
Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 7*14 matrix
[ "Parameters", ":", "x_t", "-", "28x28", "image", "l_p", "-", "2x1", "focus", "vector", "Returns", ":", "7", "*", "14", "matrix" ]
python
test
newville/wxmplot
wxmplot/stackedplotframe.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/stackedplotframe.py#L56-L60
def unzoom_all(self, event=None): """ zoom out full data range """ for p in (self.panel, self.panel_bot): p.conf.zoom_lims = [] p.conf.unzoom(full=True)
[ "def", "unzoom_all", "(", "self", ",", "event", "=", "None", ")", ":", "for", "p", "in", "(", "self", ".", "panel", ",", "self", ".", "panel_bot", ")", ":", "p", ".", "conf", ".", "zoom_lims", "=", "[", "]", "p", ".", "conf", ".", "unzoom", "("...
zoom out full data range
[ "zoom", "out", "full", "data", "range" ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/walker.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/walker.py#L351-L363
def pop(self): """Pop a reading off of this virtual stream and return it.""" if self.reading is None: raise StreamEmptyError("Pop called on virtual stream walker without any data", selector=self.selector) reading = self.reading # If we're not a constant stream, we just exhausted ourselves if self.selector.match_type != DataStream.ConstantType: self.reading = None return reading
[ "def", "pop", "(", "self", ")", ":", "if", "self", ".", "reading", "is", "None", ":", "raise", "StreamEmptyError", "(", "\"Pop called on virtual stream walker without any data\"", ",", "selector", "=", "self", ".", "selector", ")", "reading", "=", "self", ".", ...
Pop a reading off of this virtual stream and return it.
[ "Pop", "a", "reading", "off", "of", "this", "virtual", "stream", "and", "return", "it", "." ]
python
train
saltstack/salt
salt/modules/jenkinsmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/jenkinsmod.py#L69-L92
def _connect(): ''' Return server object used to interact with Jenkins. :return: server object used to interact with Jenkins ''' jenkins_url = __salt__['config.get']('jenkins.url') or \ __salt__['config.get']('jenkins:url') or \ __salt__['pillar.get']('jenkins.url') jenkins_user = __salt__['config.get']('jenkins.user') or \ __salt__['config.get']('jenkins:user') or \ __salt__['pillar.get']('jenkins.user') jenkins_password = __salt__['config.get']('jenkins.password') or \ __salt__['config.get']('jenkins:password') or \ __salt__['pillar.get']('jenkins.password') if not jenkins_url: raise SaltInvocationError('No Jenkins URL found.') return jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
[ "def", "_connect", "(", ")", ":", "jenkins_url", "=", "__salt__", "[", "'config.get'", "]", "(", "'jenkins.url'", ")", "or", "__salt__", "[", "'config.get'", "]", "(", "'jenkins:url'", ")", "or", "__salt__", "[", "'pillar.get'", "]", "(", "'jenkins.url'", ")...
Return server object used to interact with Jenkins. :return: server object used to interact with Jenkins
[ "Return", "server", "object", "used", "to", "interact", "with", "Jenkins", "." ]
python
train
zomux/deepy
deepy/layers/block.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/layers/block.py#L51-L58
def register_layer(self, layer): """ Register one connected layer. :type layer: NeuralLayer """ if self.fixed: raise Exception("After a block is fixed, no more layers can be registered.") self.layers.append(layer)
[ "def", "register_layer", "(", "self", ",", "layer", ")", ":", "if", "self", ".", "fixed", ":", "raise", "Exception", "(", "\"After a block is fixed, no more layers can be registered.\"", ")", "self", ".", "layers", ".", "append", "(", "layer", ")" ]
Register one connected layer. :type layer: NeuralLayer
[ "Register", "one", "connected", "layer", ".", ":", "type", "layer", ":", "NeuralLayer" ]
python
test
ArchiveTeam/wpull
wpull/pipeline/session.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/pipeline/session.py#L150-L169
def child_url_record(self, url: str, inline: bool=False, link_type: Optional[LinkType]=None, post_data: Optional[str]=None, level: Optional[int]=None): '''Return a child URLRecord. This function is useful for testing filters before adding to table. ''' url_record = URLRecord() url_record.url = url url_record.status = Status.todo url_record.try_count = 0 url_record.level = self.url_record.level + 1 if level is None else level url_record.root_url = self.url_record.root_url or self.url_record.url url_record.parent_url = self.url_record.url url_record.inline_level = (self.url_record.inline_level or 0) + 1 if inline else 0 url_record.link_type = link_type url_record.post_data = post_data return url_record
[ "def", "child_url_record", "(", "self", ",", "url", ":", "str", ",", "inline", ":", "bool", "=", "False", ",", "link_type", ":", "Optional", "[", "LinkType", "]", "=", "None", ",", "post_data", ":", "Optional", "[", "str", "]", "=", "None", ",", "lev...
Return a child URLRecord. This function is useful for testing filters before adding to table.
[ "Return", "a", "child", "URLRecord", "." ]
python
train
ejeschke/ginga
ginga/cmap.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/cmap.py#L13288-L13295
def ginga_to_matplotlib_cmap(cm, name=None): """Convert Ginga colormap to matplotlib's.""" if name is None: name = cm.name from matplotlib.colors import ListedColormap carr = np.asarray(cm.clst) mpl_cm = ListedColormap(carr, name=name, N=len(carr)) return mpl_cm
[ "def", "ginga_to_matplotlib_cmap", "(", "cm", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "cm", ".", "name", "from", "matplotlib", ".", "colors", "import", "ListedColormap", "carr", "=", "np", ".", "asarray", "(", ...
Convert Ginga colormap to matplotlib's.
[ "Convert", "Ginga", "colormap", "to", "matplotlib", "s", "." ]
python
train
uber/tchannel-python
tchannel/messages/common.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/messages/common.py#L138-L153
def generate_checksum(message, previous_csum=0): """Generate checksum for messages with CALL_REQ, CALL_REQ_CONTINUE, CALL_RES,CALL_RES_CONTINUE types. :param message: outgoing message :param previous_csum: accumulated checksum value """ if message.message_type in CHECKSUM_MSG_TYPES: csum = compute_checksum( message.checksum[0], message.args, previous_csum, ) message.checksum = (message.checksum[0], csum)
[ "def", "generate_checksum", "(", "message", ",", "previous_csum", "=", "0", ")", ":", "if", "message", ".", "message_type", "in", "CHECKSUM_MSG_TYPES", ":", "csum", "=", "compute_checksum", "(", "message", ".", "checksum", "[", "0", "]", ",", "message", ".",...
Generate checksum for messages with CALL_REQ, CALL_REQ_CONTINUE, CALL_RES,CALL_RES_CONTINUE types. :param message: outgoing message :param previous_csum: accumulated checksum value
[ "Generate", "checksum", "for", "messages", "with", "CALL_REQ", "CALL_REQ_CONTINUE", "CALL_RES", "CALL_RES_CONTINUE", "types", "." ]
python
train
mtien/PeptideBuilder
PeptideBuilder/PeptideBuilder.py
https://github.com/mtien/PeptideBuilder/blob/7b1ddab5199432c1aabc371a34ec42dd386dfa6f/PeptideBuilder/PeptideBuilder.py#L656-L708
def makeHis(segID, N, CA, C, O, geo): '''Creates a Histidine residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_ND1_length=geo.CG_ND1_length CB_CG_ND1_angle=geo.CB_CG_ND1_angle CA_CB_CG_ND1_diangle=geo.CA_CB_CG_ND1_diangle CG_CD2_length=geo.CG_CD2_length CB_CG_CD2_angle=geo.CB_CG_CD2_angle CA_CB_CG_CD2_diangle=geo.CA_CB_CG_CD2_diangle ND1_CE1_length=geo.ND1_CE1_length CG_ND1_CE1_angle=geo.CG_ND1_CE1_angle CB_CG_ND1_CE1_diangle=geo.CB_CG_ND1_CE1_diangle CD2_NE2_length=geo.CD2_NE2_length CG_CD2_NE2_angle=geo.CG_CD2_NE2_angle CB_CG_CD2_NE2_diangle=geo.CB_CG_CD2_NE2_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") nitrogen_d1= calculateCoordinates(CA, CB, CG, CG_ND1_length, CB_CG_ND1_angle, CA_CB_CG_ND1_diangle) ND1= Atom("ND1", nitrogen_d1, 0.0, 1.0, " ", " ND1", 0, "N") carbon_d2= calculateCoordinates(CA, CB, CG, CG_CD2_length, CB_CG_CD2_angle, CA_CB_CG_CD2_diangle) CD2= Atom("CD2", carbon_d2, 0.0, 1.0, " ", " CD2", 0, "C") carbon_e1= calculateCoordinates(CB, CG, ND1, ND1_CE1_length, CG_ND1_CE1_angle, CB_CG_ND1_CE1_diangle) CE1= Atom("CE1", carbon_e1, 0.0, 1.0, " ", " CE1", 0, "C") nitrogen_e2= calculateCoordinates(CB, CG, CD2, CD2_NE2_length, CG_CD2_NE2_angle, CB_CG_CD2_NE2_diangle) NE2= Atom("NE2", nitrogen_e2, 0.0, 1.0, " ", " NE2", 0, "N") ##Create Residue Data Structure res= Residue((' ', segID, ' '), "HIS", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(ND1) res.add(CD2) res.add(CE1) res.add(NE2) return res
[ "def", "makeHis", "(", "segID", ",", "N", ",", "CA", ",", "C", ",", "O", ",", "geo", ")", ":", "##R-Group", "CA_CB_length", "=", "geo", ".", "CA_CB_length", "C_CA_CB_angle", "=", "geo", ".", "C_CA_CB_angle", "N_C_CA_CB_diangle", "=", "geo", ".", "N_C_CA_...
Creates a Histidine residue
[ "Creates", "a", "Histidine", "residue" ]
python
train
twilio/twilio-python
twilio/rest/serverless/v1/service/function/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/function/__init__.py#L260-L282
def update(self, friendly_name): """ Update the FunctionInstance :param unicode friendly_name: The friendly_name :returns: Updated FunctionInstance :rtype: twilio.rest.serverless.v1.service.function.FunctionInstance """ data = values.of({'FriendlyName': friendly_name, }) payload = self._version.update( 'POST', self._uri, data=data, ) return FunctionInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "friendly_name", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'FriendlyName'", ":", "friendly_name", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "update", "(", "'POST'", ",", "self", ".", ...
Update the FunctionInstance :param unicode friendly_name: The friendly_name :returns: Updated FunctionInstance :rtype: twilio.rest.serverless.v1.service.function.FunctionInstance
[ "Update", "the", "FunctionInstance" ]
python
train
saltstack/salt
salt/utils/schema.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schema.py#L1506-L1533
def get_definition(self): '''Returns the definition of the complex item''' serialized = super(ComplexSchemaItem, self).serialize() # Adjust entries in the serialization del serialized['definition_name'] serialized['title'] = self.definition_name properties = {} required_attr_names = [] for attr_name in self._attributes: attr = getattr(self, attr_name) if attr and isinstance(attr, BaseSchemaItem): # Remove the attribute entry added by the base serialization del serialized[attr_name] properties[attr_name] = attr.serialize() properties[attr_name]['type'] = attr.__type__ if attr.required: required_attr_names.append(attr_name) if serialized.get('properties') is None: serialized['properties'] = {} serialized['properties'].update(properties) # Assign the required array if required_attr_names: serialized['required'] = required_attr_names return serialized
[ "def", "get_definition", "(", "self", ")", ":", "serialized", "=", "super", "(", "ComplexSchemaItem", ",", "self", ")", ".", "serialize", "(", ")", "# Adjust entries in the serialization", "del", "serialized", "[", "'definition_name'", "]", "serialized", "[", "'ti...
Returns the definition of the complex item
[ "Returns", "the", "definition", "of", "the", "complex", "item" ]
python
train
annoviko/pyclustering
pyclustering/cluster/bang.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L538-L550
def __build_directory_levels(self): """! @brief Build levels of direction if amount of level is greater than one. """ previous_level_blocks = [ self.__root ] for level in range(1, self.__levels): previous_level_blocks = self.__build_level(previous_level_blocks, level) self.__store_level_blocks(previous_level_blocks) self.__leafs = sorted(self.__leafs, key=lambda block: block.get_density())
[ "def", "__build_directory_levels", "(", "self", ")", ":", "previous_level_blocks", "=", "[", "self", ".", "__root", "]", "for", "level", "in", "range", "(", "1", ",", "self", ".", "__levels", ")", ":", "previous_level_blocks", "=", "self", ".", "__build_leve...
! @brief Build levels of direction if amount of level is greater than one.
[ "!" ]
python
valid
cronofy/pycronofy
pycronofy/client.py
https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L412-L435
def availability(self, participants=(), required_duration=(), available_periods=(), start_interval=None, buffer=()): """ Performs an availability query. :param list participants: An Array of participant groups or a dict for a single participant group. :param dict or int required_duration - An Integer representing the minimum number of minutes of availability required. :param list available_periods - An Array of available time periods dicts, each must specify a start and end Time. :param dict or int start_interval - An Interger representing the start interval minutes for the event. :param dict buffer - An Dict representing the buffer to apply to the request. :rtype: ``list`` """ options = {} options['participants'] = self.map_availability_participants( participants) options['required_duration'] = self.map_availability_required_duration( required_duration) options['buffer'] = self.map_availability_buffer(buffer) if start_interval: options['start_interval'] = self.map_availability_required_duration(start_interval) self.translate_available_periods(available_periods) options['available_periods'] = available_periods return self.request_handler.post(endpoint='availability', data=options).json()['available_periods']
[ "def", "availability", "(", "self", ",", "participants", "=", "(", ")", ",", "required_duration", "=", "(", ")", ",", "available_periods", "=", "(", ")", ",", "start_interval", "=", "None", ",", "buffer", "=", "(", ")", ")", ":", "options", "=", "{", ...
Performs an availability query. :param list participants: An Array of participant groups or a dict for a single participant group. :param dict or int required_duration - An Integer representing the minimum number of minutes of availability required. :param list available_periods - An Array of available time periods dicts, each must specify a start and end Time. :param dict or int start_interval - An Interger representing the start interval minutes for the event. :param dict buffer - An Dict representing the buffer to apply to the request. :rtype: ``list``
[ "Performs", "an", "availability", "query", ".", ":", "param", "list", "participants", ":", "An", "Array", "of", "participant", "groups", "or", "a", "dict", "for", "a", "single", "participant", "group", ".", ":", "param", "dict", "or", "int", "required_durati...
python
train
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2460-L2479
def wallet_key_valid(self, wallet): """ Returns if a **wallet** key is valid :param wallet: Wallet to check key is valid :type wallet: str >>> rpc.wallet_key_valid( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) True """ wallet = self._process_value(wallet, 'wallet') payload = {"wallet": wallet} resp = self.call('wallet_key_valid', payload) return resp['valid'] == '1'
[ "def", "wallet_key_valid", "(", "self", ",", "wallet", ")", ":", "wallet", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "payload", "=", "{", "\"wallet\"", ":", "wallet", "}", "resp", "=", "self", ".", "call", "(", "'wallet_key...
Returns if a **wallet** key is valid :param wallet: Wallet to check key is valid :type wallet: str >>> rpc.wallet_key_valid( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) True
[ "Returns", "if", "a", "**", "wallet", "**", "key", "is", "valid" ]
python
train
pyviz/holoviews
holoviews/plotting/mpl/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/element.py#L108-L193
def _finalize_axis(self, key, element=None, title=None, dimensions=None, ranges=None, xticks=None, yticks=None, zticks=None, xlabel=None, ylabel=None, zlabel=None): """ Applies all the axis settings before the axis or figure is returned. Only plots with zorder 0 get to apply their settings. When the number of the frame is supplied as n, this method looks up and computes the appropriate title, axis labels and axis bounds. """ if element is None: element = self._get_frame(key) self.current_frame = element if not dimensions and element and not self.subplots: el = element.traverse(lambda x: x, [Element]) if el: el = el[0] dimensions = el.nodes.dimensions() if isinstance(el, Graph) else el.dimensions() axis = self.handles['axis'] subplots = list(self.subplots.values()) if self.subplots else [] if self.zorder == 0 and key is not None: if self.bgcolor: if mpl_version <= '1.5.9': axis.set_axis_bgcolor(self.bgcolor) else: axis.set_facecolor(self.bgcolor) # Apply title title = self._format_title(key) if self.show_title and title is not None: fontsize = self._fontsize('title') if 'title' in self.handles: self.handles['title'].set_text(title) else: self.handles['title'] = axis.set_title(title, **fontsize) # Apply subplot label self._subplot_label(axis) # Apply axis options if axes are enabled if element is not None and not any(not sp._has_axes for sp in [self] + subplots): # Set axis labels if dimensions: self._set_labels(axis, dimensions, xlabel, ylabel, zlabel) else: if self.xlabel is not None: axis.set_xlabel(self.xlabel) if self.ylabel is not None: axis.set_ylabel(self.ylabel) if self.zlabel is not None and hasattr(axis, 'set_zlabel'): axis.set_zlabel(self.zlabel) if not subplots: legend = axis.get_legend() if legend: legend.set_visible(self.show_legend) self.handles["bbox_extra_artists"] += [legend] axis.xaxis.grid(self.show_grid) axis.yaxis.grid(self.show_grid) # Apply log axes if self.logx: axis.set_xscale('log') if self.logy: axis.set_yscale('log') if not self.projection == '3d': self._set_axis_position(axis, 'x', self.xaxis) self._set_axis_position(axis, 'y', self.yaxis) # Apply ticks if self.apply_ticks: self._finalize_ticks(axis, dimensions, xticks, yticks, zticks) # Set axes limits self._set_axis_limits(axis, element, subplots, ranges) # Apply aspects if self.aspect is not None and self.projection != 'polar' and not self.adjoined: self._set_aspect(axis, self.aspect) if not subplots and not self.drawn: self._finalize_artist(element) self._execute_hooks(element) return super(ElementPlot, self)._finalize_axis(key)
[ "def", "_finalize_axis", "(", "self", ",", "key", ",", "element", "=", "None", ",", "title", "=", "None", ",", "dimensions", "=", "None", ",", "ranges", "=", "None", ",", "xticks", "=", "None", ",", "yticks", "=", "None", ",", "zticks", "=", "None", ...
Applies all the axis settings before the axis or figure is returned. Only plots with zorder 0 get to apply their settings. When the number of the frame is supplied as n, this method looks up and computes the appropriate title, axis labels and axis bounds.
[ "Applies", "all", "the", "axis", "settings", "before", "the", "axis", "or", "figure", "is", "returned", ".", "Only", "plots", "with", "zorder", "0", "get", "to", "apply", "their", "settings", "." ]
python
train
gitpython-developers/GitPython
git/util.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/util.py#L774-L786
def _release_lock(self): """Release our lock if we have one""" if not self._has_lock(): return # if someone removed our file beforhand, lets just flag this issue # instead of failing, to make it more usable. lfp = self._lock_file_path() try: rmfile(lfp) except OSError: pass self._owns_lock = False
[ "def", "_release_lock", "(", "self", ")", ":", "if", "not", "self", ".", "_has_lock", "(", ")", ":", "return", "# if someone removed our file beforhand, lets just flag this issue", "# instead of failing, to make it more usable.", "lfp", "=", "self", ".", "_lock_file_path", ...
Release our lock if we have one
[ "Release", "our", "lock", "if", "we", "have", "one" ]
python
train
nickmckay/LiPD-utilities
Python/lipd/__init__.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L683-L709
def showDfs(d): """ Display the available data frame names in a given data frame collection :param dict d: Dataframe collection :return none: """ if "metadata" in d: print("metadata") if "paleoData" in d: try: for k, v in d["paleoData"].items(): print(k) except KeyError: pass except AttributeError: pass if "chronData" in d: try: for k, v in d["chronData"].items(): print(k) except KeyError: pass except AttributeError: pass # print("Process Complete") return
[ "def", "showDfs", "(", "d", ")", ":", "if", "\"metadata\"", "in", "d", ":", "print", "(", "\"metadata\"", ")", "if", "\"paleoData\"", "in", "d", ":", "try", ":", "for", "k", ",", "v", "in", "d", "[", "\"paleoData\"", "]", ".", "items", "(", ")", ...
Display the available data frame names in a given data frame collection :param dict d: Dataframe collection :return none:
[ "Display", "the", "available", "data", "frame", "names", "in", "a", "given", "data", "frame", "collection" ]
python
train
clalancette/pycdlib
pycdlib/utils.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L304-L322
def file_object_supports_binary(fp): # type: (BinaryIO) -> bool ''' A function to check whether a file-like object supports binary mode. Parameters: fp - The file-like object to check for binary mode support. Returns: True if the file-like object supports binary mode, False otherwise. ''' if hasattr(fp, 'mode'): return 'b' in fp.mode # Python 3 if sys.version_info >= (3, 0): return isinstance(fp, (io.RawIOBase, io.BufferedIOBase)) # Python 2 return isinstance(fp, (cStringIO.OutputType, cStringIO.InputType, io.RawIOBase, io.BufferedIOBase))
[ "def", "file_object_supports_binary", "(", "fp", ")", ":", "# type: (BinaryIO) -> bool", "if", "hasattr", "(", "fp", ",", "'mode'", ")", ":", "return", "'b'", "in", "fp", ".", "mode", "# Python 3", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0"...
A function to check whether a file-like object supports binary mode. Parameters: fp - The file-like object to check for binary mode support. Returns: True if the file-like object supports binary mode, False otherwise.
[ "A", "function", "to", "check", "whether", "a", "file", "-", "like", "object", "supports", "binary", "mode", "." ]
python
train
pyout/pyout
pyout/field.py
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/field.py#L414-L447
def post_from_style(self, column_style): """Yield post-format processors based on `column_style`. Parameters ---------- column_style : dict A style where the top-level keys correspond to style attributes such as "bold" or "color". Returns ------- A generator object. """ flanks = Flanks() yield flanks.split_flanks fns = {"simple": self.by_key, "lookup": self.by_lookup, "re_lookup": self.by_re_lookup, "interval": self.by_interval_lookup} for key in self.style_types: if key not in column_style: continue vtype = value_type(column_style[key]) fn = fns[vtype] args = [key, column_style[key]] if vtype == "re_lookup": args.append(sum(getattr(re, f) for f in column_style.get("re_flags", []))) yield fn(*args) yield flanks.join_flanks
[ "def", "post_from_style", "(", "self", ",", "column_style", ")", ":", "flanks", "=", "Flanks", "(", ")", "yield", "flanks", ".", "split_flanks", "fns", "=", "{", "\"simple\"", ":", "self", ".", "by_key", ",", "\"lookup\"", ":", "self", ".", "by_lookup", ...
Yield post-format processors based on `column_style`. Parameters ---------- column_style : dict A style where the top-level keys correspond to style attributes such as "bold" or "color". Returns ------- A generator object.
[ "Yield", "post", "-", "format", "processors", "based", "on", "column_style", "." ]
python
train
BerkeleyAutomation/perception
perception/phoxi_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/phoxi_sensor.py#L225-L236
def _color_im_callback(self, msg): """Callback for handling textures (greyscale images). """ try: data = self._bridge.imgmsg_to_cv2(msg) if np.max(data) > 255.0: data = 255.0 * data / 1200.0 # Experimentally set value for white data = np.clip(data, 0., 255.0).astype(np.uint8) gsimage = GrayscaleImage(data, frame=self._frame) self._cur_color_im = gsimage.to_color() except: self._cur_color_im = None
[ "def", "_color_im_callback", "(", "self", ",", "msg", ")", ":", "try", ":", "data", "=", "self", ".", "_bridge", ".", "imgmsg_to_cv2", "(", "msg", ")", "if", "np", ".", "max", "(", "data", ")", ">", "255.0", ":", "data", "=", "255.0", "*", "data", ...
Callback for handling textures (greyscale images).
[ "Callback", "for", "handling", "textures", "(", "greyscale", "images", ")", "." ]
python
train
ampl/amplpy
amplpy/dataframe.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/dataframe.py#L335-L356
def fromPandas(cls, df): """ Create a :class:`~amplpy.DataFrame` from a pandas DataFrame. """ assert pd is not None if isinstance(df, pd.Series): df = pd.DataFrame(df) else: assert isinstance(df, pd.DataFrame) keys = [ key if isinstance(key, tuple) else (key,) for key in df.index.tolist() ] index = [ ('index{}'.format(i), cindex) for i, cindex in enumerate(zip(*keys)) ] columns = [ (str(cname), df[cname].tolist()) for cname in df.columns.tolist() ] return cls(index=index, columns=columns)
[ "def", "fromPandas", "(", "cls", ",", "df", ")", ":", "assert", "pd", "is", "not", "None", "if", "isinstance", "(", "df", ",", "pd", ".", "Series", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "else", ":", "assert", "isinstance", ...
Create a :class:`~amplpy.DataFrame` from a pandas DataFrame.
[ "Create", "a", ":", "class", ":", "~amplpy", ".", "DataFrame", "from", "a", "pandas", "DataFrame", "." ]
python
train
Gandi/gandi.cli
gandi/cli/commands/certificate.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/certificate.py#L520-L540
def delete(gandi, resource, background, force): """ Revoke the certificate. Resource can be a CN or an ID """ ids = gandi.certificate.usable_ids(resource) if len(ids) > 1: gandi.echo('Will not delete, %s is not precise enough.' % resource) gandi.echo(' * cert : ' + '\n * cert : '.join([str(id_) for id_ in ids])) return if not force: proceed = click.confirm("Are you sure to delete the certificate %s?" % resource) if not proceed: return result = gandi.certificate.delete(ids[0], background) return result
[ "def", "delete", "(", "gandi", ",", "resource", ",", "background", ",", "force", ")", ":", "ids", "=", "gandi", ".", "certificate", ".", "usable_ids", "(", "resource", ")", "if", "len", "(", "ids", ")", ">", "1", ":", "gandi", ".", "echo", "(", "'W...
Revoke the certificate. Resource can be a CN or an ID
[ "Revoke", "the", "certificate", "." ]
python
train
gbowerman/azurerm
azurerm/restfns.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L214-L249
def do_ams_post(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"): '''Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body. ''' min_ds = dsversion_min content_acceptformat = json_acceptformat acceptformat = json_acceptformat if rformat == "json_only": min_ds = ds_min_version content_acceptformat = json_only_acceptformat if rformat == "xml": content_acceptformat = xml_acceptformat acceptformat = xml_acceptformat + ",application/xml" headers = {"Content-Type": content_acceptformat, "DataServiceVersion": min_ds, "MaxDataServiceVersion": dsversion_max, "Accept": acceptformat, "Accept-Charset" : charset, "Authorization": "Bearer " + access_token, "x-ms-version" : xmsversion} response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False) # AMS response to the first call can be a redirect, # so we handle it here to make it transparent for the caller... if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.post(redirected_url, data=body, headers=headers) return response
[ "def", "do_ams_post", "(", "endpoint", ",", "path", ",", "body", ",", "access_token", ",", "rformat", "=", "\"json\"", ",", "ds_min_version", "=", "\"3.0;NetFx\"", ")", ":", "min_ds", "=", "dsversion_min", "content_acceptformat", "=", "json_acceptformat", "acceptf...
Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body.
[ "Do", "a", "AMS", "HTTP", "POST", "request", "and", "return", "JSON", ".", "Args", ":", "endpoint", "(", "str", ")", ":", "Azure", "Media", "Services", "Initial", "Endpoint", ".", "path", "(", "str", ")", ":", "Azure", "Media", "Services", "Endpoint", ...
python
train
neurosynth/neurosynth
neurosynth/base/dataset.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L683-L720
def get_ids(self, features, threshold=0.0, func=np.sum, get_weights=False): """ Returns a list of all studies in the table that meet the desired feature-based criteria. Will most commonly be used to retrieve studies that use one or more features with some minimum frequency; e.g.,: get_ids(['fear', 'anxiety'], threshold=0.001) Args: features (lists): a list of feature names to search on. threshold (float): optional float indicating threshold features must pass to be included. func (Callable): any numpy function to use for thresholding (default: sum). The function will be applied to the list of features and the result compared to the threshold. This can be used to change the meaning of the query in powerful ways. E.g,: max: any of the features have to pass threshold (i.e., max > thresh) min: all features must each individually pass threshold (i.e., min > thresh) sum: the summed weight of all features must pass threshold (i.e., sum > thresh) get_weights (bool): if True, returns a dict with ids => weights. Returns: When get_weights is false (default), returns a list of study names. When true, returns a dict, with study names as keys and feature weights as values. """ if isinstance(features, str): features = [features] features = self.search_features(features) # Expand wild cards feature_weights = self.data.ix[:, features] weights = feature_weights.apply(func, 1) above_thresh = weights[weights >= threshold] # ids_to_keep = self.ids[above_thresh] return above_thresh if get_weights else list(above_thresh.index)
[ "def", "get_ids", "(", "self", ",", "features", ",", "threshold", "=", "0.0", ",", "func", "=", "np", ".", "sum", ",", "get_weights", "=", "False", ")", ":", "if", "isinstance", "(", "features", ",", "str", ")", ":", "features", "=", "[", "features",...
Returns a list of all studies in the table that meet the desired feature-based criteria. Will most commonly be used to retrieve studies that use one or more features with some minimum frequency; e.g.,: get_ids(['fear', 'anxiety'], threshold=0.001) Args: features (lists): a list of feature names to search on. threshold (float): optional float indicating threshold features must pass to be included. func (Callable): any numpy function to use for thresholding (default: sum). The function will be applied to the list of features and the result compared to the threshold. This can be used to change the meaning of the query in powerful ways. E.g,: max: any of the features have to pass threshold (i.e., max > thresh) min: all features must each individually pass threshold (i.e., min > thresh) sum: the summed weight of all features must pass threshold (i.e., sum > thresh) get_weights (bool): if True, returns a dict with ids => weights. Returns: When get_weights is false (default), returns a list of study names. When true, returns a dict, with study names as keys and feature weights as values.
[ "Returns", "a", "list", "of", "all", "studies", "in", "the", "table", "that", "meet", "the", "desired", "feature", "-", "based", "criteria", "." ]
python
test
taxjar/taxjar-python
taxjar/client.py
https://github.com/taxjar/taxjar-python/blob/be9b30d7dc968d24e066c7c133849fee180f8d95/taxjar/client.py#L86-L89
def delete_refund(self, refund_id): """Deletes an existing refund transaction.""" request = self._delete('transactions/refunds/' + str(refund_id)) return self.responder(request)
[ "def", "delete_refund", "(", "self", ",", "refund_id", ")", ":", "request", "=", "self", ".", "_delete", "(", "'transactions/refunds/'", "+", "str", "(", "refund_id", ")", ")", "return", "self", ".", "responder", "(", "request", ")" ]
Deletes an existing refund transaction.
[ "Deletes", "an", "existing", "refund", "transaction", "." ]
python
train
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/_serialization.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/_serialization.py#L1285-L1293
def doc_from_xml(document_element_name, inner_xml): '''Wraps the specified xml in an xml root element with default azure namespaces''' xml = ''.join(['<', document_element_name, ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', ' xmlns="http://schemas.microsoft.com/windowsazure">']) xml += inner_xml xml += ''.join(['</', document_element_name, '>']) return xml
[ "def", "doc_from_xml", "(", "document_element_name", ",", "inner_xml", ")", ":", "xml", "=", "''", ".", "join", "(", "[", "'<'", ",", "document_element_name", ",", "' xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"'", ",", "' xmlns=\"http://schemas.microsoft.com/windo...
Wraps the specified xml in an xml root element with default azure namespaces
[ "Wraps", "the", "specified", "xml", "in", "an", "xml", "root", "element", "with", "default", "azure", "namespaces" ]
python
test
xoolive/traffic
traffic/data/adsb/opensky_impala.py
https://github.com/xoolive/traffic/blob/d1a8878098f16759f6b6e0e8d8b8f32e34a680a8/traffic/data/adsb/opensky_impala.py#L526-L568
def within_bounds( self, start: timelike, stop: timelike, bounds: Union[BaseGeometry, Tuple[float, float, float, float]], ) -> Optional[pd.DataFrame]: """EXPERIMENTAL.""" start = to_datetime(start) stop = to_datetime(stop) before_hour = round_time(start, "before") after_hour = round_time(stop, "after") try: # thinking of shapely bounds attribute (in this order) # I just don't want to add the shapely dependency here west, south, east, north = bounds.bounds # type: ignore except AttributeError: west, south, east, north = bounds other_params = "and lon>={} and lon<={} ".format(west, east) other_params += "and lat>={} and lat<={} ".format(south, north) query = self.basic_request.format( columns="icao24, callsign, s.ITEM as serial, count(*) as count", other_tables=", state_vectors_data4.serials s", before_time=start.timestamp(), after_time=stop.timestamp(), before_hour=before_hour.timestamp(), after_hour=after_hour.timestamp(), other_params=other_params + "group by icao24, callsign, s.ITEM", ) logging.info(f"Sending request: {query}") df = self._impala(query, columns="icao24, callsign, serial, count") if df is None: return None df = df[df["count"] != "count"] df["count"] = df["count"].astype(int) return df
[ "def", "within_bounds", "(", "self", ",", "start", ":", "timelike", ",", "stop", ":", "timelike", ",", "bounds", ":", "Union", "[", "BaseGeometry", ",", "Tuple", "[", "float", ",", "float", ",", "float", ",", "float", "]", "]", ",", ")", "->", "Optio...
EXPERIMENTAL.
[ "EXPERIMENTAL", "." ]
python
train
dmlc/xgboost
python-package/xgboost/core.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1477-L1578
def get_score(self, fmap='', importance_type='weight'): """Get feature importance of each feature. Importance type can be defined as: * 'weight': the number of times a feature is used to split the data across all trees. * 'gain': the average gain across all splits the feature is used in. * 'cover': the average coverage across all splits the feature is used in. * 'total_gain': the total gain across all splits the feature is used in. * 'total_cover': the total coverage across all splits the feature is used in. .. note:: Feature importance is defined only for tree boosters Feature importance is only defined when the decision tree model is chosen as base learner (`booster=gbtree`). It is not defined for other base learner types, such as linear learners (`booster=gblinear`). Parameters ---------- fmap: str (optional) The name of feature map file. importance_type: str, default 'weight' One of the importance types defined above. """ if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}: raise ValueError('Feature importance is not defined for Booster type {}' .format(self.booster)) allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover'] if importance_type not in allowed_importance_types: msg = ("importance_type mismatch, got '{}', expected one of " + repr(allowed_importance_types)) raise ValueError(msg.format(importance_type)) # if it's weight, then omap stores the number of missing values if importance_type == 'weight': # do a simpler tree dump to save time trees = self.get_dump(fmap, with_stats=False) fmap = {} for tree in trees: for line in tree.split('\n'): # look for the opening square bracket arr = line.split('[') # if no opening bracket (leaf node), ignore this line if len(arr) == 1: continue # extract feature name from string between [] fid = arr[1].split(']')[0].split('<')[0] if fid not in fmap: # if the feature hasn't been seen yet fmap[fid] = 1 else: fmap[fid] += 1 return fmap average_over_splits = True if importance_type == 'total_gain': importance_type = 'gain' average_over_splits = False elif importance_type == 'total_cover': importance_type = 'cover' average_over_splits = False trees = self.get_dump(fmap, with_stats=True) importance_type += '=' fmap = {} gmap = {} for tree in trees: for line in tree.split('\n'): # look for the opening square bracket arr = line.split('[') # if no opening bracket (leaf node), ignore this line if len(arr) == 1: continue # look for the closing bracket, extract only info within that bracket fid = arr[1].split(']') # extract gain or cover from string after closing bracket g = float(fid[1].split(importance_type)[1].split(',')[0]) # extract feature name from string before closing bracket fid = fid[0].split('<')[0] if fid not in fmap: # if the feature hasn't been seen yet fmap[fid] = 1 gmap[fid] = g else: fmap[fid] += 1 gmap[fid] += g # calculate average value (gain/cover) for each feature if average_over_splits: for fid in gmap: gmap[fid] = gmap[fid] / fmap[fid] return gmap
[ "def", "get_score", "(", "self", ",", "fmap", "=", "''", ",", "importance_type", "=", "'weight'", ")", ":", "if", "getattr", "(", "self", ",", "'booster'", ",", "None", ")", "is", "not", "None", "and", "self", ".", "booster", "not", "in", "{", "'gbtr...
Get feature importance of each feature. Importance type can be defined as: * 'weight': the number of times a feature is used to split the data across all trees. * 'gain': the average gain across all splits the feature is used in. * 'cover': the average coverage across all splits the feature is used in. * 'total_gain': the total gain across all splits the feature is used in. * 'total_cover': the total coverage across all splits the feature is used in. .. note:: Feature importance is defined only for tree boosters Feature importance is only defined when the decision tree model is chosen as base learner (`booster=gbtree`). It is not defined for other base learner types, such as linear learners (`booster=gblinear`). Parameters ---------- fmap: str (optional) The name of feature map file. importance_type: str, default 'weight' One of the importance types defined above.
[ "Get", "feature", "importance", "of", "each", "feature", ".", "Importance", "type", "can", "be", "defined", "as", ":" ]
python
train
aouyar/PyMunin
pymunin/plugins/fsstats.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/fsstats.py#L110-L117
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ fs = FSinfo(self._fshost, self._fsport, self._fspass) return fs is not None
[ "def", "autoconf", "(", "self", ")", ":", "fs", "=", "FSinfo", "(", "self", ".", "_fshost", ",", "self", ".", "_fsport", ",", "self", ".", "_fspass", ")", "return", "fs", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L882-L916
def convert_permute(builder, layer, input_names, output_names, keras_layer): """ Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = (input_names[0], output_names[0]) keras_dims = keras_layer.dims # Keras permute layer index begins at 1 if len(keras_dims) == 3: # Keras input tensor interpret as (H,W,C) x = list(_np.array(keras_dims)) arr = [2, 3, 1] # HWC in Keras arr_permuted = [arr[x[0] - 1], arr[x[1] - 1], arr[x[2] - 1]] arr_permuted = [arr_permuted[2], arr_permuted[0], arr_permuted[1]] # coreml format: channel first # add a sequence axis dim = [0] + arr_permuted dim = tuple(dim) elif len(keras_dims) == 4: # Here we use Keras converter as a place holder for inserting # permutations - the values here are not valid Keras dim parameters # but parameters we need to use to convert to CoreML model dim = keras_dims else: raise NotImplementedError('Supports only 3d permutation.') builder.add_permute(name = layer, dim=dim, input_name = input_name, output_name = output_name)
[ "def", "convert_permute", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "input_name", ",", "output_name", "=", "(", "input_names", "[", "0", "]", ",", "output_names", "[", "0", "]", ")", "keras_dims", ...
Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object.
[ "Convert", "a", "softmax", "layer", "from", "keras", "to", "coreml", "." ]
python
train
tmontaigu/pylas
pylas/headers/rawheader.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L159-L163
def date(self, date): """ Returns the date of file creation as a python date object """ self.creation_year = date.year self.creation_day_of_year = date.timetuple().tm_yday
[ "def", "date", "(", "self", ",", "date", ")", ":", "self", ".", "creation_year", "=", "date", ".", "year", "self", ".", "creation_day_of_year", "=", "date", ".", "timetuple", "(", ")", ".", "tm_yday" ]
Returns the date of file creation as a python date object
[ "Returns", "the", "date", "of", "file", "creation", "as", "a", "python", "date", "object" ]
python
test
debrouwere/google-analytics
googleanalytics/query.py
https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L1068-L1085
def limit(self, maximum): """ Return a new query, limited to a certain number of results. Unlike core reporting queries, you cannot specify a starting point for live queries, just the maximum results returned. ```python # first 50 query.limit(50) ``` """ self.meta['limit'] = maximum self.raw.update({ 'max_results': maximum, }) return self
[ "def", "limit", "(", "self", ",", "maximum", ")", ":", "self", ".", "meta", "[", "'limit'", "]", "=", "maximum", "self", ".", "raw", ".", "update", "(", "{", "'max_results'", ":", "maximum", ",", "}", ")", "return", "self" ]
Return a new query, limited to a certain number of results. Unlike core reporting queries, you cannot specify a starting point for live queries, just the maximum results returned. ```python # first 50 query.limit(50) ```
[ "Return", "a", "new", "query", "limited", "to", "a", "certain", "number", "of", "results", "." ]
python
train
jsommers/switchyard
switchyard/importcode.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/importcode.py#L7-L52
def import_or_die(module_name, entrypoint_names): ''' Import user code; return reference to usercode function. (str) -> function reference ''' log_debug("Importing {}".format(module_name)) module_name = os.path.abspath(module_name) if module_name.endswith('.py'): module_name,ext = os.path.splitext(module_name) modname = os.path.basename(module_name) dirname = os.path.dirname(module_name) if dirname and dirname not in sys.path: sys.path.append(dirname) # first, try to reload code if modname in sys.modules: user_module = sys.modules.get(modname) user_module = importlib.reload(user_module) # if it isn't in sys.modules, load it for the first time, or # try to. else: try: mypaths = [ x for x in sys.path if ("Cellar" not in x and "packages" not in x)] # print("Loading {} from {} ({})".format(modname, dirname, mypaths)) # user_module = importlib.import_module(modname) user_module = importlib.__import__(modname) except ImportError as e: log_failure("Fatal error: couldn't import module (error: {}) while executing {}".format(str(e), modname)) raise ImportError(e) # if there aren't any functions to call into, then the caller # just wanted the module/code to be imported, and that's it. if not entrypoint_names: return existing_names = dir(user_module) for method in entrypoint_names: if method in existing_names: return getattr(user_module, method) if len(entrypoint_names) > 1: entrypoints = "one of {}".format(', '.join(entrypoint_names)) else: entrypoints = entrypoint_names[0] raise ImportError("Required entrypoint function or symbol ({}) not found in your code".format(entrypoints))
[ "def", "import_or_die", "(", "module_name", ",", "entrypoint_names", ")", ":", "log_debug", "(", "\"Importing {}\"", ".", "format", "(", "module_name", ")", ")", "module_name", "=", "os", ".", "path", ".", "abspath", "(", "module_name", ")", "if", "module_name...
Import user code; return reference to usercode function. (str) -> function reference
[ "Import", "user", "code", ";", "return", "reference", "to", "usercode", "function", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/identity/identity_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/identity/identity_client.py#L255-L265
def create_identity(self, framework_identity_info): """CreateIdentity. :param :class:`<FrameworkIdentityInfo> <azure.devops.v5_0.identity.models.FrameworkIdentityInfo>` framework_identity_info: :rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>` """ content = self._serialize.body(framework_identity_info, 'FrameworkIdentityInfo') response = self._send(http_method='PUT', location_id='dd55f0eb-6ea2-4fe4-9ebe-919e7dd1dfb4', version='5.0', content=content) return self._deserialize('Identity', response)
[ "def", "create_identity", "(", "self", ",", "framework_identity_info", ")", ":", "content", "=", "self", ".", "_serialize", ".", "body", "(", "framework_identity_info", ",", "'FrameworkIdentityInfo'", ")", "response", "=", "self", ".", "_send", "(", "http_method",...
CreateIdentity. :param :class:`<FrameworkIdentityInfo> <azure.devops.v5_0.identity.models.FrameworkIdentityInfo>` framework_identity_info: :rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>`
[ "CreateIdentity", ".", ":", "param", ":", "class", ":", "<FrameworkIdentityInfo", ">", "<azure", ".", "devops", ".", "v5_0", ".", "identity", ".", "models", ".", "FrameworkIdentityInfo", ">", "framework_identity_info", ":", ":", "rtype", ":", ":", "class", ":"...
python
train
Esri/ArcREST
src/arcrest/manageags/_kml.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_kml.py#L51-L65
def createKMZ(self, kmz_as_json): """ Creates a KMZ file from json. See http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Create_Kmz/02r3000001tm000000/ for more information. """ kmlURL = self._url + "/createKmz" params = { "f" : "json", "kml" : kmz_as_json } return self._post(url=kmlURL, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "createKMZ", "(", "self", ",", "kmz_as_json", ")", ":", "kmlURL", "=", "self", ".", "_url", "+", "\"/createKmz\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"kml\"", ":", "kmz_as_json", "}", "return", "self", ".", "_post", "(", "url", ...
Creates a KMZ file from json. See http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Create_Kmz/02r3000001tm000000/ for more information.
[ "Creates", "a", "KMZ", "file", "from", "json", ".", "See", "http", ":", "//", "resources", ".", "arcgis", ".", "com", "/", "en", "/", "help", "/", "arcgis", "-", "rest", "-", "api", "/", "index", ".", "html#", "/", "Create_Kmz", "/", "02r3000001tm000...
python
train
peo3/cgroup-utils
cgutils/cgroup.py
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L768-L791
def get_stats(self): """ It returns a name and a value pairs of control files which are categorised in the stats group. """ stats = {} for name, cls in self.stats.items(): path = self.paths[name] if os.path.exists(path): try: stats[name] = self._PARSERS[cls](fileops.read(path)) except IOError as e: # XXX: we have to distinguish unexpected errors from the expected ones if e.errno == errno.EOPNOTSUPP: # Since 3.5 memory.memsw.* are always created even if disabled. # If disabled we will get EOPNOTSUPP when read or write them. # See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel. pass if e.errno == errno.EIO: # memory.kmem.slabinfo throws EIO until limit_in_bytes is set. pass else: raise return stats
[ "def", "get_stats", "(", "self", ")", ":", "stats", "=", "{", "}", "for", "name", ",", "cls", "in", "self", ".", "stats", ".", "items", "(", ")", ":", "path", "=", "self", ".", "paths", "[", "name", "]", "if", "os", ".", "path", ".", "exists", ...
It returns a name and a value pairs of control files which are categorised in the stats group.
[ "It", "returns", "a", "name", "and", "a", "value", "pairs", "of", "control", "files", "which", "are", "categorised", "in", "the", "stats", "group", "." ]
python
train
gwastro/pycbc
pycbc/detector.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/detector.py#L104-L118
def light_travel_time_to_detector(self, det): """ Return the light travel time from this detector Parameters ---------- det: Detector The other detector to determine the light travel time to. Returns ------- time: float The light travel time in seconds """ d = self.location - det.location return float(d.dot(d)**0.5 / constants.c.value)
[ "def", "light_travel_time_to_detector", "(", "self", ",", "det", ")", ":", "d", "=", "self", ".", "location", "-", "det", ".", "location", "return", "float", "(", "d", ".", "dot", "(", "d", ")", "**", "0.5", "/", "constants", ".", "c", ".", "value", ...
Return the light travel time from this detector Parameters ---------- det: Detector The other detector to determine the light travel time to. Returns ------- time: float The light travel time in seconds
[ "Return", "the", "light", "travel", "time", "from", "this", "detector" ]
python
train