repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
gwastro/pycbc
pycbc/inference/sampler/base.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base.py#L146-L193
def setup_output(self, output_file, force=False, injection_file=None): """Sets up the sampler's checkpoint and output files. The checkpoint file has the same name as the output file, but with ``.checkpoint`` appended to the name. A backup file will also be created. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler output_file : str Name of the output file. force : bool, optional If the output file already exists, overwrite it. injection_file : str, optional If an injection was added to the data, write its information. """ # check for backup file(s) checkpoint_file = output_file + '.checkpoint' backup_file = output_file + '.bkup' # check if we have a good checkpoint and/or backup file logging.info("Looking for checkpoint file") checkpoint_valid = validate_checkpoint_files(checkpoint_file, backup_file) # Create a new file if the checkpoint doesn't exist, or if it is # corrupted self.new_checkpoint = False # keeps track if this is a new file or not if not checkpoint_valid: logging.info("Checkpoint not found or not valid") create_new_output_file(self, checkpoint_file, force=force, injection_file=injection_file) # now the checkpoint is valid self.new_checkpoint = True # copy to backup shutil.copy(checkpoint_file, backup_file) # write the command line, startup for fn in [checkpoint_file, backup_file]: with self.io(fn, "a") as fp: fp.write_command_line() fp.write_resume_point() # store self.checkpoint_file = checkpoint_file self.backup_file = backup_file self.checkpoint_valid = checkpoint_valid
[ "def", "setup_output", "(", "self", ",", "output_file", ",", "force", "=", "False", ",", "injection_file", "=", "None", ")", ":", "# check for backup file(s)", "checkpoint_file", "=", "output_file", "+", "'.checkpoint'", "backup_file", "=", "output_file", "+", "'....
Sets up the sampler's checkpoint and output files. The checkpoint file has the same name as the output file, but with ``.checkpoint`` appended to the name. A backup file will also be created. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler output_file : str Name of the output file. force : bool, optional If the output file already exists, overwrite it. injection_file : str, optional If an injection was added to the data, write its information.
[ "Sets", "up", "the", "sampler", "s", "checkpoint", "and", "output", "files", "." ]
python
train
MDAnalysis/GridDataFormats
gridData/OpenDX.py
https://github.com/MDAnalysis/GridDataFormats/blob/3eeb0432f8cf856912436e4f3e7aba99d3c916be/gridData/OpenDX.py#L589-L594
def initialize(self): """Initialize the corresponding DXclass from the data. class = DXInitObject.initialize() """ return self.DXclasses[self.type](self.id,**self.args)
[ "def", "initialize", "(", "self", ")", ":", "return", "self", ".", "DXclasses", "[", "self", ".", "type", "]", "(", "self", ".", "id", ",", "*", "*", "self", ".", "args", ")" ]
Initialize the corresponding DXclass from the data. class = DXInitObject.initialize()
[ "Initialize", "the", "corresponding", "DXclass", "from", "the", "data", "." ]
python
valid
benfred/implicit
implicit/utils.py
https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/implicit/utils.py#L16-L31
def check_blas_config(): """ checks to see if using OpenBlas/Intel MKL. If so, warn if the number of threads isn't set to 1 (causes severe perf issues when training - can be 10x slower) """ # don't warn repeatedly global _checked_blas_config if _checked_blas_config: return _checked_blas_config = True if np.__config__.get_info('openblas_info') and os.environ.get('OPENBLAS_NUM_THREADS') != '1': logging.warning("OpenBLAS detected. Its highly recommend to set the environment variable " "'export OPENBLAS_NUM_THREADS=1' to disable its internal multithreading") if np.__config__.get_info('blas_mkl_info') and os.environ.get('MKL_NUM_THREADS') != '1': logging.warning("Intel MKL BLAS detected. Its highly recommend to set the environment " "variable 'export MKL_NUM_THREADS=1' to disable its internal " "multithreading")
[ "def", "check_blas_config", "(", ")", ":", "# don't warn repeatedly", "global", "_checked_blas_config", "if", "_checked_blas_config", ":", "return", "_checked_blas_config", "=", "True", "if", "np", ".", "__config__", ".", "get_info", "(", "'openblas_info'", ")", "and"...
checks to see if using OpenBlas/Intel MKL. If so, warn if the number of threads isn't set to 1 (causes severe perf issues when training - can be 10x slower)
[ "checks", "to", "see", "if", "using", "OpenBlas", "/", "Intel", "MKL", ".", "If", "so", "warn", "if", "the", "number", "of", "threads", "isn", "t", "set", "to", "1", "(", "causes", "severe", "perf", "issues", "when", "training", "-", "can", "be", "10...
python
train
dw/mitogen
mitogen/parent.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L2071-L2087
def add_route(self, target_id, stream): """ Arrange for messages whose `dst_id` is `target_id` to be forwarded on the directly connected stream for `via_id`. This method is called automatically in response to :data:`mitogen.core.ADD_ROUTE` messages, but remains public while the design has not yet settled, and situations may arise where routing is not fully automatic. """ LOG.debug('%r.add_route(%r, %r)', self, target_id, stream) assert isinstance(target_id, int) assert isinstance(stream, Stream) self._write_lock.acquire() try: self._stream_by_id[target_id] = stream finally: self._write_lock.release()
[ "def", "add_route", "(", "self", ",", "target_id", ",", "stream", ")", ":", "LOG", ".", "debug", "(", "'%r.add_route(%r, %r)'", ",", "self", ",", "target_id", ",", "stream", ")", "assert", "isinstance", "(", "target_id", ",", "int", ")", "assert", "isinsta...
Arrange for messages whose `dst_id` is `target_id` to be forwarded on the directly connected stream for `via_id`. This method is called automatically in response to :data:`mitogen.core.ADD_ROUTE` messages, but remains public while the design has not yet settled, and situations may arise where routing is not fully automatic.
[ "Arrange", "for", "messages", "whose", "dst_id", "is", "target_id", "to", "be", "forwarded", "on", "the", "directly", "connected", "stream", "for", "via_id", ".", "This", "method", "is", "called", "automatically", "in", "response", "to", ":", "data", ":", "m...
python
train
ilblackdragon/django-misc
misc/views.py
https://github.com/ilblackdragon/django-misc/blob/0accd2dc97de656a1c9e275be81e817f78a2eb9d/misc/views.py#L22-L36
def handler500(request, template_name='500.html'): """ 500 error handler. Templates: `500.html` Context: MEDIA_URL Path of static media (e.g. "media.example.org") STATIC_URL """ t = loader.get_template(template_name) # You need to create a 500.html template. return http.HttpResponseServerError(t.render(Context({ 'MEDIA_URL': settings.MEDIA_URL, 'STATIC_URL': settings.STATIC_URL })))
[ "def", "handler500", "(", "request", ",", "template_name", "=", "'500.html'", ")", ":", "t", "=", "loader", ".", "get_template", "(", "template_name", ")", "# You need to create a 500.html template.", "return", "http", ".", "HttpResponseServerError", "(", "t", ".", ...
500 error handler. Templates: `500.html` Context: MEDIA_URL Path of static media (e.g. "media.example.org") STATIC_URL
[ "500", "error", "handler", "." ]
python
train
tanghaibao/jcvi
jcvi/apps/base.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L450-L472
def set_pairs(self): """ %prog pairs <blastfile|samfile|bedfile> Report how many paired ends mapped, avg distance between paired ends, etc. Paired reads must have the same prefix, use --rclip to remove trailing part, e.g. /1, /2, or .f, .r, default behavior is to truncate until last char. """ self.set_usage(self.set_pairs.__doc__) self.add_option("--pairsfile", default=None, help="Write valid pairs to pairsfile [default: %default]") self.add_option("--nrows", default=200000, type="int", help="Only use the first n lines [default: %default]") self.set_mates() self.add_option("--pdf", default=False, action="store_true", help="Print PDF instead ASCII histogram [default: %default]") self.add_option("--bins", default=20, type="int", help="Number of bins in the histogram [default: %default]") self.add_option("--distmode", default="ss", choices=("ss", "ee"), help="Distance mode between paired reads, ss is outer distance, " \ "ee is inner distance [default: %default]")
[ "def", "set_pairs", "(", "self", ")", ":", "self", ".", "set_usage", "(", "self", ".", "set_pairs", ".", "__doc__", ")", "self", ".", "add_option", "(", "\"--pairsfile\"", ",", "default", "=", "None", ",", "help", "=", "\"Write valid pairs to pairsfile [defaul...
%prog pairs <blastfile|samfile|bedfile> Report how many paired ends mapped, avg distance between paired ends, etc. Paired reads must have the same prefix, use --rclip to remove trailing part, e.g. /1, /2, or .f, .r, default behavior is to truncate until last char.
[ "%prog", "pairs", "<blastfile|samfile|bedfile", ">" ]
python
train
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2575-L2593
def skew_normal_like(x, mu, tau, alpha): R""" Azzalini's skew-normal log-likelihood .. math:: f(x \mid \mu, \tau, \alpha) = 2 \Phi((x-\mu)\sqrt{\tau}\alpha) \phi(x,\mu,\tau) where :math:\Phi is the normal CDF and :math: \phi is the normal PDF. :Parameters: - `x` : Input data. - `mu` : Mean of the distribution. - `tau` : Precision of the distribution (> 0). - `alpha` : Shape parameter of the distribution. .. note:: See http://azzalini.stat.unipd.it/SN/ """ return flib.sn_like(x, mu, tau, alpha)
[ "def", "skew_normal_like", "(", "x", ",", "mu", ",", "tau", ",", "alpha", ")", ":", "return", "flib", ".", "sn_like", "(", "x", ",", "mu", ",", "tau", ",", "alpha", ")" ]
R""" Azzalini's skew-normal log-likelihood .. math:: f(x \mid \mu, \tau, \alpha) = 2 \Phi((x-\mu)\sqrt{\tau}\alpha) \phi(x,\mu,\tau) where :math:\Phi is the normal CDF and :math: \phi is the normal PDF. :Parameters: - `x` : Input data. - `mu` : Mean of the distribution. - `tau` : Precision of the distribution (> 0). - `alpha` : Shape parameter of the distribution. .. note:: See http://azzalini.stat.unipd.it/SN/
[ "R", "Azzalini", "s", "skew", "-", "normal", "log", "-", "likelihood" ]
python
train
jopohl/urh
src/urh/signalprocessing/Spectrogram.py
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/signalprocessing/Spectrogram.py#L78-L98
def stft(self, samples: np.ndarray): """ Perform Short-time Fourier transform to get the spectrogram for the given samples :return: short-time Fourier transform of the given signal """ window = self.window_function(self.window_size) hop_size = self.hop_size if len(samples) < self.window_size: samples = np.append(samples, np.zeros(self.window_size - len(samples))) num_frames = max(1, (len(samples) - self.window_size) // hop_size + 1) # Get frames as numpy view with stride_tricks to save RAM # Same as: frames = [padded_samples[i*hop_size:i*hop_size+self.window_size] for i in range(num_frames)] shape = (num_frames, self.window_size) strides = (hop_size * samples.strides[-1], samples.strides[-1]) frames = np.lib.stride_tricks.as_strided(samples, shape=shape, strides=strides) result = np.fft.fft(frames * window, self.window_size) / np.atleast_1d(self.window_size) return result
[ "def", "stft", "(", "self", ",", "samples", ":", "np", ".", "ndarray", ")", ":", "window", "=", "self", ".", "window_function", "(", "self", ".", "window_size", ")", "hop_size", "=", "self", ".", "hop_size", "if", "len", "(", "samples", ")", "<", "se...
Perform Short-time Fourier transform to get the spectrogram for the given samples :return: short-time Fourier transform of the given signal
[ "Perform", "Short", "-", "time", "Fourier", "transform", "to", "get", "the", "spectrogram", "for", "the", "given", "samples", ":", "return", ":", "short", "-", "time", "Fourier", "transform", "of", "the", "given", "signal" ]
python
train
calmjs/calmjs
src/calmjs/base.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/base.py#L713-L736
def join_cwd(self, path=None): """ Join the path with the current working directory. If it is specified for this instance of the object it will be used, otherwise rely on the global value. """ if self.working_dir: logger.debug( "'%s' instance 'working_dir' set to '%s' for join_cwd", type(self).__name__, self.working_dir, ) cwd = self.working_dir else: cwd = getcwd() logger.debug( "'%s' instance 'working_dir' unset; " "default to process '%s' for join_cwd", type(self).__name__, cwd, ) if path: return join(cwd, path) return cwd
[ "def", "join_cwd", "(", "self", ",", "path", "=", "None", ")", ":", "if", "self", ".", "working_dir", ":", "logger", ".", "debug", "(", "\"'%s' instance 'working_dir' set to '%s' for join_cwd\"", ",", "type", "(", "self", ")", ".", "__name__", ",", "self", "...
Join the path with the current working directory. If it is specified for this instance of the object it will be used, otherwise rely on the global value.
[ "Join", "the", "path", "with", "the", "current", "working", "directory", ".", "If", "it", "is", "specified", "for", "this", "instance", "of", "the", "object", "it", "will", "be", "used", "otherwise", "rely", "on", "the", "global", "value", "." ]
python
train
SiLab-Bonn/basil
basil/HL/GPAC.py
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L839-L856
def set_current_limit(self, channel, value, unit='A'): '''Setting current limit Note: same limit for all channels. ''' # TODO: add units / calibration if unit == 'raw': value = value elif unit == 'A': value = int(value * 1000 * self.CURRENT_LIMIT_GAIN) elif unit == 'mA': value = int(value * self.CURRENT_LIMIT_GAIN) elif unit == 'uA': value = int(value / 1000 * self.CURRENT_LIMIT_GAIN) else: raise TypeError("Invalid unit type.") I2cAnalogChannel._set_dac_value(self, address=self.CURRENT_LIMIT_DAC_SLAVE_ADD, dac_ch=self.CURRENT_LIMIT_DAC_CH, value=value)
[ "def", "set_current_limit", "(", "self", ",", "channel", ",", "value", ",", "unit", "=", "'A'", ")", ":", "# TODO: add units / calibration", "if", "unit", "==", "'raw'", ":", "value", "=", "value", "elif", "unit", "==", "'A'", ":", "value", "=", "int", "...
Setting current limit Note: same limit for all channels.
[ "Setting", "current", "limit" ]
python
train
BlueBrain/NeuroM
neurom/io/utils.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L75-L84
def _filepath(self, name): """ File path to `name` morphology file. """ if self.file_ext is None: candidates = glob.glob(os.path.join(self.directory, name + ".*")) try: return next(filter(_is_morphology_file, candidates)) except StopIteration: raise NeuroMError("Can not find morphology file for '%s' " % name) else: return os.path.join(self.directory, name + self.file_ext)
[ "def", "_filepath", "(", "self", ",", "name", ")", ":", "if", "self", ".", "file_ext", "is", "None", ":", "candidates", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "name", "+", "\".*\"", ")", ...
File path to `name` morphology file.
[ "File", "path", "to", "name", "morphology", "file", "." ]
python
train
a1ezzz/wasp-general
wasp_general/network/messenger/onion.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/messenger/onion.py#L86-L95
def add_layers(self, *layers): """ Append given layers to this onion :param layers: layer to add :return: None """ for layer in layers: if layer.name() in self.__layers.keys(): raise ValueError('Layer "%s" already exists' % layer.name()) self.__layers[layer.name()] = layer
[ "def", "add_layers", "(", "self", ",", "*", "layers", ")", ":", "for", "layer", "in", "layers", ":", "if", "layer", ".", "name", "(", ")", "in", "self", ".", "__layers", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'Layer \"%s\" already exi...
Append given layers to this onion :param layers: layer to add :return: None
[ "Append", "given", "layers", "to", "this", "onion" ]
python
train
cloudtools/stacker
stacker/lookups/handlers/dynamodb.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/lookups/handlers/dynamodb.py#L85-L126
def _lookup_key_parse(table_keys): """Return the order in which the stacks should be executed. Args: dependencies (dict): a dictionary where each key should be the fully qualified name of a stack whose value is an array of fully qualified stack names that the stack depends on. This is used to generate the order in which the stacks should be executed. Returns: dict: includes a dict of lookup types with data types ('new_keys') and a list of the lookups with without ('clean_table_keys') """ # we need to parse the key lookup passed in regex_matcher = '\[([^\]]+)]' valid_dynamodb_datatypes = ['M', 'S', 'N', 'L'] clean_table_keys = [] new_keys = [] for key in table_keys: match = re.search(regex_matcher, key) if match: # the datatypes are pulled from the dynamodb docs if match.group(1) in valid_dynamodb_datatypes: match_val = str(match.group(1)) key = key.replace(match.group(0), '') new_keys.append({match_val: key}) clean_table_keys.append(key) else: raise ValueError( ('Stacker does not support looking up the datatype: {}') .format(str(match.group(1)))) else: new_keys.append({'S': key}) clean_table_keys.append(key) key_dict = {} key_dict['new_keys'] = new_keys key_dict['clean_table_keys'] = clean_table_keys return key_dict
[ "def", "_lookup_key_parse", "(", "table_keys", ")", ":", "# we need to parse the key lookup passed in", "regex_matcher", "=", "'\\[([^\\]]+)]'", "valid_dynamodb_datatypes", "=", "[", "'M'", ",", "'S'", ",", "'N'", ",", "'L'", "]", "clean_table_keys", "=", "[", "]", ...
Return the order in which the stacks should be executed. Args: dependencies (dict): a dictionary where each key should be the fully qualified name of a stack whose value is an array of fully qualified stack names that the stack depends on. This is used to generate the order in which the stacks should be executed. Returns: dict: includes a dict of lookup types with data types ('new_keys') and a list of the lookups with without ('clean_table_keys')
[ "Return", "the", "order", "in", "which", "the", "stacks", "should", "be", "executed", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/server/server.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/server/server.py#L52-L90
def _load_streams(self): """ Reads, parses and creates streams specified in config.yaml. """ common_err_msg = 'No valid {} stream configurations found. ' specific_err_msg = {'inbound': 'No data will be received (or displayed).', 'outbound': 'No data will be published.'} err_msgs = {} for stream_type in ['inbound', 'outbound']: err_msgs[stream_type] = common_err_msg.format(stream_type) + specific_err_msg[stream_type] streams = ait.config.get('server.{}-streams'.format(stream_type)) if streams is None: log.warn(err_msgs[stream_type]) else: for index, s in enumerate(streams): try: if stream_type == 'inbound': strm = self._create_inbound_stream(s['stream']) if type(strm) == PortInputStream: self.servers.append(strm) else: self.inbound_streams.append(strm) elif stream_type == 'outbound': strm = self._create_outbound_stream(s['stream']) self.outbound_streams.append(strm) log.info('Added {} stream {}'.format(stream_type, strm)) except Exception: exc_type, value, tb = sys.exc_info() log.error('{} creating {} stream {}: {}'.format(exc_type, stream_type, index, value)) if not self.inbound_streams and not self.servers: log.warn(err_msgs['inbound']) if not self.outbound_streams: log.warn(err_msgs['outbound'])
[ "def", "_load_streams", "(", "self", ")", ":", "common_err_msg", "=", "'No valid {} stream configurations found. '", "specific_err_msg", "=", "{", "'inbound'", ":", "'No data will be received (or displayed).'", ",", "'outbound'", ":", "'No data will be published.'", "}", "err...
Reads, parses and creates streams specified in config.yaml.
[ "Reads", "parses", "and", "creates", "streams", "specified", "in", "config", ".", "yaml", "." ]
python
train
yyuu/botornado
boto/rds/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/rds/__init__.py#L943-L998
def get_all_events(self, source_identifier=None, source_type=None, start_time=None, end_time=None, max_records=None, marker=None): """ Get information about events related to your DBInstances, DBSecurityGroups and DBParameterGroups. :type source_identifier: str :param source_identifier: If supplied, the events returned will be limited to those that apply to the identified source. The value of this parameter depends on the value of source_type. If neither parameter is specified, all events in the time span will be returned. :type source_type: str :param source_type: Specifies how the source_identifier should be interpreted. Valid values are: b-instance | db-security-group | db-parameter-group | db-snapshot :type start_time: datetime :param start_time: The beginning of the time interval for events. If not supplied, all available events will be returned. :type end_time: datetime :param end_time: The ending of the time interval for events. If not supplied, all available events will be returned. :type max_records: int :param max_records: The maximum number of records to be returned. If more results are available, a MoreToken will be returned in the response that can be used to retrieve additional records. Default is 100. :type marker: str :param marker: The marker provided by a previous request. :rtype: list :return: A list of class:`boto.rds.event.Event` """ params = {} if source_identifier and source_type: params['SourceIdentifier'] = source_identifier params['SourceType'] = source_type if start_time: params['StartTime'] = start_time.isoformat() if end_time: params['EndTime'] = end_time.isoformat() if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeEvents', params, [('Event', Event)])
[ "def", "get_all_events", "(", "self", ",", "source_identifier", "=", "None", ",", "source_type", "=", "None", ",", "start_time", "=", "None", ",", "end_time", "=", "None", ",", "max_records", "=", "None", ",", "marker", "=", "None", ")", ":", "params", "...
Get information about events related to your DBInstances, DBSecurityGroups and DBParameterGroups. :type source_identifier: str :param source_identifier: If supplied, the events returned will be limited to those that apply to the identified source. The value of this parameter depends on the value of source_type. If neither parameter is specified, all events in the time span will be returned. :type source_type: str :param source_type: Specifies how the source_identifier should be interpreted. Valid values are: b-instance | db-security-group | db-parameter-group | db-snapshot :type start_time: datetime :param start_time: The beginning of the time interval for events. If not supplied, all available events will be returned. :type end_time: datetime :param end_time: The ending of the time interval for events. If not supplied, all available events will be returned. :type max_records: int :param max_records: The maximum number of records to be returned. If more results are available, a MoreToken will be returned in the response that can be used to retrieve additional records. Default is 100. :type marker: str :param marker: The marker provided by a previous request. :rtype: list :return: A list of class:`boto.rds.event.Event`
[ "Get", "information", "about", "events", "related", "to", "your", "DBInstances", "DBSecurityGroups", "and", "DBParameterGroups", "." ]
python
train
couchbase/couchbase-python-client
couchbase/n1ql.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/n1ql.py#L194-L210
def consistent_with(self, state): """ Indicate that the query should be consistent with one or more mutations. :param state: The state of the mutations it should be consistent with. :type state: :class:`~.couchbase.mutation_state.MutationState` """ if self.consistency not in (UNBOUNDED, NOT_BOUNDED, 'at_plus'): raise TypeError( 'consistent_with not valid with other consistency options') if not state: raise TypeError('Passed empty or invalid state', state) self.consistency = 'at_plus' self._body['scan_vectors'] = state._sv
[ "def", "consistent_with", "(", "self", ",", "state", ")", ":", "if", "self", ".", "consistency", "not", "in", "(", "UNBOUNDED", ",", "NOT_BOUNDED", ",", "'at_plus'", ")", ":", "raise", "TypeError", "(", "'consistent_with not valid with other consistency options'", ...
Indicate that the query should be consistent with one or more mutations. :param state: The state of the mutations it should be consistent with. :type state: :class:`~.couchbase.mutation_state.MutationState`
[ "Indicate", "that", "the", "query", "should", "be", "consistent", "with", "one", "or", "more", "mutations", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/validate.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/validate.py#L1134-L1157
def is_bool_list(value, min=None, max=None): """ Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor = Validator() >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_boolean(mem) for mem in is_list(value, min, max)]
[ "def", "is_bool_list", "(", "value", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "return", "[", "is_boolean", "(", "mem", ")", "for", "mem", "in", "is_list", "(", "value", ",", "min", ",", "max", ")", "]" ]
Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor = Validator() >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
[ "Check", "that", "the", "value", "is", "a", "list", "of", "booleans", "." ]
python
train
wdbm/datavision
datavision.py
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L2538-L2560
def limit_x( self, limit_lower = None, # float limit_upper = None # float ): """ get or set x limits of the current axes x_min, x_max = limit_x() # return the current limit_x limit_x(x_min, x_max) # set the limit_x to x_min, x_max """ if limit_lower is None and limit_upper is None: return self._limit_x elif hasattr(limit_lower, "__iter__"): self._limit_x = limit_lower[:2] else: self._limit_x = [limit_lower, limit_upper] if self._limit_x[0] == self._limit_x[1]: self._limit_x[1] += 1 self._limit_x[0] -= self.mod_x self._limit_x[1] += self.mod_x
[ "def", "limit_x", "(", "self", ",", "limit_lower", "=", "None", ",", "# float", "limit_upper", "=", "None", "# float", ")", ":", "if", "limit_lower", "is", "None", "and", "limit_upper", "is", "None", ":", "return", "self", ".", "_limit_x", "elif", "hasattr...
get or set x limits of the current axes x_min, x_max = limit_x() # return the current limit_x limit_x(x_min, x_max) # set the limit_x to x_min, x_max
[ "get", "or", "set", "x", "limits", "of", "the", "current", "axes" ]
python
train
osrg/ryu
ryu/lib/igmplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/igmplib.py#L749-L777
def _do_timeout_for_leave(self, timeout, datapath, dst, in_port): """the process when the QUERY from the switch timeout expired.""" parser = datapath.ofproto_parser dpid = datapath.id hub.sleep(timeout) outport = self._to_querier[dpid]['port'] if self._to_hosts[dpid][dst]['ports'][in_port]['out']: return del self._to_hosts[dpid][dst]['ports'][in_port] self._del_flow_entry(datapath, in_port, dst) actions = [] ports = [] for port in self._to_hosts[dpid][dst]['ports']: actions.append(parser.OFPActionOutput(port)) ports.append(port) if len(actions): self._send_event( EventMulticastGroupStateChanged( MG_MEMBER_CHANGED, dst, outport, ports)) self._set_flow_entry( datapath, actions, outport, dst) self._to_hosts[dpid][dst]['leave'] = None else: self._remove_multicast_group(datapath, outport, dst) del self._to_hosts[dpid][dst]
[ "def", "_do_timeout_for_leave", "(", "self", ",", "timeout", ",", "datapath", ",", "dst", ",", "in_port", ")", ":", "parser", "=", "datapath", ".", "ofproto_parser", "dpid", "=", "datapath", ".", "id", "hub", ".", "sleep", "(", "timeout", ")", "outport", ...
the process when the QUERY from the switch timeout expired.
[ "the", "process", "when", "the", "QUERY", "from", "the", "switch", "timeout", "expired", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_selection/relevance.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_selection/relevance.py#L31-L191
def calculate_relevance_table(X, y, ml_task='auto', n_jobs=defaults.N_PROCESSES, chunksize=defaults.CHUNKSIZE, test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE, test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE, test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE, test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE, fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT): """ Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`. The relevance table is calculated for the intended machine learning task `ml_task`. To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to decide which features to keep and which to delete. We are testing :math:`H_0` = the Feature is not relevant and should not be added against :math:`H_1` = the Feature is relevant and should be kept or in other words :math:`H_0` = Target and Feature are independent / the Feature has no influence on the target :math:`H_1` = Target and Feature are associated / dependent When the target is binary this becomes :math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)` :math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)` Where :math:`F` is the distribution of the target. In the same way we can state the hypothesis when the feature is binary :math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)` :math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)` Here :math:`T` is the distribution of the target. TODO: And for real valued? :param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features. It can contain both binary or real-valued features at the same time. :type X: pandas.DataFrame :param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued. :type y: pandas.Series or numpy.ndarray :param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`. Defaults to `'auto'`, meaning the intended task is inferred from `y`. If `y` has a boolean, integer or object dtype, the task is assumend to be classification, else regression. :type ml_task: str :param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature (currently unused) :type test_for_binary_target_binary_feature: str :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature :type test_for_binary_target_real_feature: str :param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused) :type test_for_real_target_binary_feature: str :param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused) :type test_for_real_target_real_feature: str :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant features among all created features. :type fdr_level: float :param hypotheses_independent: Can the significance of the features be assumed to be independent? Normally, this should be set to False as the features are never independent (e.g. mean and median) :type hypotheses_independent: bool :param n_jobs: Number of processes to use during the p-value calculation :type n_jobs: int :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance of this particular feature. The DataFrame has the columns "Feature", "type" (binary, real or const), "p_value" (the significance of this feature as a p-value, lower means more significant) "relevant" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is not relevant] for this feature) :rtype: pandas.DataFrame """ if ml_task not in ['auto', 'classification', 'regression']: raise ValueError('ml_task must be one of: \'auto\', \'classification\', \'regression\'') elif ml_task == 'auto': ml_task = infer_ml_task(y) if n_jobs == 0: map_function = map else: pool = Pool(n_jobs) map_function = partial(pool.map, chunksize=chunksize) relevance_table = pd.DataFrame(index=pd.Series(X.columns, name='feature')) relevance_table['feature'] = relevance_table.index relevance_table['type'] = pd.Series( map_function(get_feature_type, [X[feature] for feature in relevance_table.index]), index=relevance_table.index ) table_real = relevance_table[relevance_table.type == 'real'].copy() table_binary = relevance_table[relevance_table.type == 'binary'].copy() table_const = relevance_table[relevance_table.type == 'constant'].copy() table_const['p_value'] = np.NaN table_const['relevant'] = False if len(table_const) == len(relevance_table): return table_const if ml_task == 'classification': tables = [] for label in y.unique(): _test_real_feature = partial(target_binary_feature_real_test, y=(y == label), test=test_for_binary_target_real_feature) _test_binary_feature = partial(target_binary_feature_binary_test, y=(y == label)) tmp = _calculate_relevance_table_for_implicit_target( table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent, fdr_level, map_function ) tables.append(tmp) relevance_table = combine_relevance_tables(tables) elif ml_task == 'regression': _test_real_feature = partial(target_real_feature_real_test, y=y) _test_binary_feature = partial(target_real_feature_binary_test, y=y) relevance_table = _calculate_relevance_table_for_implicit_target( table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent, fdr_level, map_function ) relevance_table = pd.concat([relevance_table, table_const], axis=0) if n_jobs != 0: pool.close() pool.terminate() pool.join() if sum(relevance_table['relevant']) == 0: _logger.warning("No feature was found relevant for {} for fdr level = {}. " "Consider using a lower fdr level or other features.".format(ml_task, fdr_level)) return relevance_table
[ "def", "calculate_relevance_table", "(", "X", ",", "y", ",", "ml_task", "=", "'auto'", ",", "n_jobs", "=", "defaults", ".", "N_PROCESSES", ",", "chunksize", "=", "defaults", ".", "CHUNKSIZE", ",", "test_for_binary_target_binary_feature", "=", "defaults", ".", "T...
Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`. The relevance table is calculated for the intended machine learning task `ml_task`. To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to decide which features to keep and which to delete. We are testing :math:`H_0` = the Feature is not relevant and should not be added against :math:`H_1` = the Feature is relevant and should be kept or in other words :math:`H_0` = Target and Feature are independent / the Feature has no influence on the target :math:`H_1` = Target and Feature are associated / dependent When the target is binary this becomes :math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)` :math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)` Where :math:`F` is the distribution of the target. In the same way we can state the hypothesis when the feature is binary :math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)` :math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)` Here :math:`T` is the distribution of the target. TODO: And for real valued? :param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features. It can contain both binary or real-valued features at the same time. :type X: pandas.DataFrame :param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued. :type y: pandas.Series or numpy.ndarray :param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`. Defaults to `'auto'`, meaning the intended task is inferred from `y`. If `y` has a boolean, integer or object dtype, the task is assumend to be classification, else regression. :type ml_task: str :param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature (currently unused) :type test_for_binary_target_binary_feature: str :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature :type test_for_binary_target_real_feature: str :param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused) :type test_for_real_target_binary_feature: str :param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused) :type test_for_real_target_real_feature: str :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant features among all created features. :type fdr_level: float :param hypotheses_independent: Can the significance of the features be assumed to be independent? Normally, this should be set to False as the features are never independent (e.g. mean and median) :type hypotheses_independent: bool :param n_jobs: Number of processes to use during the p-value calculation :type n_jobs: int :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance of this particular feature. The DataFrame has the columns "Feature", "type" (binary, real or const), "p_value" (the significance of this feature as a p-value, lower means more significant) "relevant" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is not relevant] for this feature) :rtype: pandas.DataFrame
[ "Calculate", "the", "relevance", "table", "for", "the", "features", "contained", "in", "feature", "matrix", "X", "with", "respect", "to", "target", "vector", "y", ".", "The", "relevance", "table", "is", "calculated", "for", "the", "intended", "machine", "learn...
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/geometry/meshdata.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/geometry/meshdata.py#L403-L431
def get_face_colors(self, indexed=None): """Get the face colors Parameters ---------- indexed : str | None If indexed is None, return (Nf, 4) array of face colors. If indexed=='faces', then instead return an indexed array (Nf, 3, 4) (note this is just the same array with each color repeated three times). Returns ------- colors : ndarray The colors. """ if indexed is None: return self._face_colors elif indexed == 'faces': if (self._face_colors_indexed_by_faces is None and self._face_colors is not None): Nf = self._face_colors.shape[0] self._face_colors_indexed_by_faces = \ np.empty((Nf, 3, 4), dtype=self._face_colors.dtype) self._face_colors_indexed_by_faces[:] = \ self._face_colors.reshape(Nf, 1, 4) return self._face_colors_indexed_by_faces else: raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
[ "def", "get_face_colors", "(", "self", ",", "indexed", "=", "None", ")", ":", "if", "indexed", "is", "None", ":", "return", "self", ".", "_face_colors", "elif", "indexed", "==", "'faces'", ":", "if", "(", "self", ".", "_face_colors_indexed_by_faces", "is", ...
Get the face colors Parameters ---------- indexed : str | None If indexed is None, return (Nf, 4) array of face colors. If indexed=='faces', then instead return an indexed array (Nf, 3, 4) (note this is just the same array with each color repeated three times). Returns ------- colors : ndarray The colors.
[ "Get", "the", "face", "colors" ]
python
train
inveniosoftware/invenio-records-rest
invenio_records_rest/utils.py
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/utils.py#L60-L71
def obj_or_import_string(value, default=None): """Import string or return object. :params value: Import path or class object to instantiate. :params default: Default object to return if the import fails. :returns: The imported object. """ if isinstance(value, six.string_types): return import_string(value) elif value: return value return default
[ "def", "obj_or_import_string", "(", "value", ",", "default", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "return", "import_string", "(", "value", ")", "elif", "value", ":", "return", "value", "return"...
Import string or return object. :params value: Import path or class object to instantiate. :params default: Default object to return if the import fails. :returns: The imported object.
[ "Import", "string", "or", "return", "object", "." ]
python
train
kajala/django-jutil
jutil/format.py
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/format.py#L72-L85
def format_xml(xml_str: str, exceptions: bool=False): """ Formats XML document as human-readable plain text. :param xml_str: str (Input XML str) :param exceptions: Raise exceptions on error :return: str (Formatted XML str) """ try: import xml.dom.minidom return xml.dom.minidom.parseString(xml_str).toprettyxml() except Exception: if exceptions: raise return xml_str
[ "def", "format_xml", "(", "xml_str", ":", "str", ",", "exceptions", ":", "bool", "=", "False", ")", ":", "try", ":", "import", "xml", ".", "dom", ".", "minidom", "return", "xml", ".", "dom", ".", "minidom", ".", "parseString", "(", "xml_str", ")", "....
Formats XML document as human-readable plain text. :param xml_str: str (Input XML str) :param exceptions: Raise exceptions on error :return: str (Formatted XML str)
[ "Formats", "XML", "document", "as", "human", "-", "readable", "plain", "text", ".", ":", "param", "xml_str", ":", "str", "(", "Input", "XML", "str", ")", ":", "param", "exceptions", ":", "Raise", "exceptions", "on", "error", ":", "return", ":", "str", ...
python
train
sourceperl/pyModbusTCP
pyModbusTCP/client.py
https://github.com/sourceperl/pyModbusTCP/blob/993f6e2f5ab52eba164be049e42cea560c3751a5/pyModbusTCP/client.py#L297-L308
def close(self): """Close TCP connection :returns: close status (True for close/None if already close) :rtype: bool or None """ if self.__sock: self.__sock.close() self.__sock = None return True else: return None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "__sock", ":", "self", ".", "__sock", ".", "close", "(", ")", "self", ".", "__sock", "=", "None", "return", "True", "else", ":", "return", "None" ]
Close TCP connection :returns: close status (True for close/None if already close) :rtype: bool or None
[ "Close", "TCP", "connection" ]
python
train
DataBiosphere/toil
src/toil/batchSystems/mesos/batchSystem.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L679-L698
def _registerNode(self, nodeAddress, agentId, nodePort=5051): """ Called when we get communication from an agent. Remembers the information about the agent by address, and the agent address by agent ID. """ executor = self.executors.get(nodeAddress) if executor is None or executor.agentId != agentId: executor = self.ExecutorInfo(nodeAddress=nodeAddress, agentId=agentId, nodeInfo=None, lastSeen=time.time()) self.executors[nodeAddress] = executor else: executor.lastSeen = time.time() # Record the IP under the agent id self.agentsByID[agentId] = nodeAddress return executor
[ "def", "_registerNode", "(", "self", ",", "nodeAddress", ",", "agentId", ",", "nodePort", "=", "5051", ")", ":", "executor", "=", "self", ".", "executors", ".", "get", "(", "nodeAddress", ")", "if", "executor", "is", "None", "or", "executor", ".", "agent...
Called when we get communication from an agent. Remembers the information about the agent by address, and the agent address by agent ID.
[ "Called", "when", "we", "get", "communication", "from", "an", "agent", ".", "Remembers", "the", "information", "about", "the", "agent", "by", "address", "and", "the", "agent", "address", "by", "agent", "ID", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/templating.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/templating.py#L330-L334
def write_all(self): """ Write out all registered config files. """ [self.write(k) for k in six.iterkeys(self.templates)]
[ "def", "write_all", "(", "self", ")", ":", "[", "self", ".", "write", "(", "k", ")", "for", "k", "in", "six", ".", "iterkeys", "(", "self", ".", "templates", ")", "]" ]
Write out all registered config files.
[ "Write", "out", "all", "registered", "config", "files", "." ]
python
train
laginha/django-mobileesp
src/django_mobileesp/mdetect.py
https://github.com/laginha/django-mobileesp/blob/91d4babb2343b992970bdb076508d380680c8b7e/src/django_mobileesp/mdetect.py#L626-L632
def detectMeegoPhone(self): """Return detection of a Meego phone Detects a phone running the Meego OS. """ return UAgentInfo.deviceMeego in self.__userAgent \ and UAgentInfo.mobi in self.__userAgent
[ "def", "detectMeegoPhone", "(", "self", ")", ":", "return", "UAgentInfo", ".", "deviceMeego", "in", "self", ".", "__userAgent", "and", "UAgentInfo", ".", "mobi", "in", "self", ".", "__userAgent" ]
Return detection of a Meego phone Detects a phone running the Meego OS.
[ "Return", "detection", "of", "a", "Meego", "phone" ]
python
train
wakatime/wakatime
wakatime/packages/pygments/lexers/data.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L103-L118
def set_block_scalar_indent(token_class): """Set an explicit indentation level for a block scalar.""" def callback(lexer, match, context): text = match.group() context.block_scalar_indent = None if not text: return increment = match.group(1) if increment: current_indent = max(context.indent, 0) increment = int(increment) context.block_scalar_indent = current_indent + increment if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "set_block_scalar_indent", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "context", ".", "block_scalar_indent", "=", "None", "if", "not", "text", ...
Set an explicit indentation level for a block scalar.
[ "Set", "an", "explicit", "indentation", "level", "for", "a", "block", "scalar", "." ]
python
train
edoburu/django-tag-parser
tag_parser/basetags.py
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L167-L186
def validate_args(cls, tag_name, *args, **kwargs): """ Validate the syntax of the template tag. """ if cls.min_args is not None and len(args) < cls.min_args: if cls.min_args == 1: raise TemplateSyntaxError("'{0}' tag requires at least {1} argument".format(tag_name, cls.min_args)) else: raise TemplateSyntaxError("'{0}' tag requires at least {1} arguments".format(tag_name, cls.min_args)) if cls.max_args is not None and len(args) > cls.max_args: if cls.max_args == 0: if cls.allowed_kwargs: raise TemplateSyntaxError("'{0}' tag only allows keywords arguments, for example {1}=\"...\".".format(tag_name, cls.allowed_kwargs[0])) else: raise TemplateSyntaxError("'{0}' tag doesn't support any arguments".format(tag_name)) elif cls.max_args == 1: raise TemplateSyntaxError("'{0}' tag only allows {1} argument.".format(tag_name, cls.max_args)) else: raise TemplateSyntaxError("'{0}' tag only allows {1} arguments.".format(tag_name, cls.max_args))
[ "def", "validate_args", "(", "cls", ",", "tag_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "min_args", "is", "not", "None", "and", "len", "(", "args", ")", "<", "cls", ".", "min_args", ":", "if", "cls", ".", "min_...
Validate the syntax of the template tag.
[ "Validate", "the", "syntax", "of", "the", "template", "tag", "." ]
python
test
Fantomas42/django-blog-zinnia
zinnia/moderator.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/moderator.py#L36-L50
def moderate(self, comment, entry, request): """ Determine if a new comment should be marked as non-public and await approval. Return ``True`` to put the comment into the moderator queue, or ``False`` to allow it to be showed up immediately. """ if self.auto_moderate_comments: return True if check_is_spam(comment, entry, request, self.spam_checker_backends): return True return False
[ "def", "moderate", "(", "self", ",", "comment", ",", "entry", ",", "request", ")", ":", "if", "self", ".", "auto_moderate_comments", ":", "return", "True", "if", "check_is_spam", "(", "comment", ",", "entry", ",", "request", ",", "self", ".", "spam_checker...
Determine if a new comment should be marked as non-public and await approval. Return ``True`` to put the comment into the moderator queue, or ``False`` to allow it to be showed up immediately.
[ "Determine", "if", "a", "new", "comment", "should", "be", "marked", "as", "non", "-", "public", "and", "await", "approval", ".", "Return", "True", "to", "put", "the", "comment", "into", "the", "moderator", "queue", "or", "False", "to", "allow", "it", "to...
python
train
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/engine_creator.py
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L260-L267
def create_oracle(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a oracle database using cx_oracle. """ return create_engine( _create_oracle(username, password, host, port, database), **kwargs )
[ "def", "create_oracle", "(", "username", ",", "password", ",", "host", ",", "port", ",", "database", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "return", "create_engine", "(", "_create_oracle", "(", "username", ",", "password", ",", "host", ","...
create an engine connected to a oracle database using cx_oracle.
[ "create", "an", "engine", "connected", "to", "a", "oracle", "database", "using", "cx_oracle", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_encoding.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_encoding.py#L59-L79
def CEscape(text, as_utf8): """Escape a bytes string for use in an ascii protocol buffer. text.encode('string_escape') does not seem to satisfy our needs as it encodes unprintable characters using two-digit hex escapes whereas our C++ unescaping function allows hex escapes to be any length. So, "\0011".encode('string_escape') ends up being "\\x011", which will be decoded in C++ as a single-character string with char code 0x11. Args: text: A byte string to be escaped as_utf8: Specifies if result should be returned in UTF-8 encoding Returns: Escaped string """ # PY3 hack: make Ord work for str and bytes: # //platforms/networking/data uses unicode here, hence basestring. Ord = ord if isinstance(text, six.string_types) else lambda x: x if as_utf8: return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text) return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
[ "def", "CEscape", "(", "text", ",", "as_utf8", ")", ":", "# PY3 hack: make Ord work for str and bytes:", "# //platforms/networking/data uses unicode here, hence basestring.", "Ord", "=", "ord", "if", "isinstance", "(", "text", ",", "six", ".", "string_types", ")", "else",...
Escape a bytes string for use in an ascii protocol buffer. text.encode('string_escape') does not seem to satisfy our needs as it encodes unprintable characters using two-digit hex escapes whereas our C++ unescaping function allows hex escapes to be any length. So, "\0011".encode('string_escape') ends up being "\\x011", which will be decoded in C++ as a single-character string with char code 0x11. Args: text: A byte string to be escaped as_utf8: Specifies if result should be returned in UTF-8 encoding Returns: Escaped string
[ "Escape", "a", "bytes", "string", "for", "use", "in", "an", "ascii", "protocol", "buffer", "." ]
python
train
wecatch/app-turbo
turbo/util.py
https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L53-L71
def to_dict_str(origin_value, encode=None): """recursively convert dict content into string """ value = copy.deepcopy(origin_value) for k, v in value.items(): if isinstance(v, dict): value[k] = to_dict_str(v, encode) continue if isinstance(v, list): value[k] = to_list_str(v, encode) continue if encode: value[k] = encode(v) else: value[k] = default_encode(v) return value
[ "def", "to_dict_str", "(", "origin_value", ",", "encode", "=", "None", ")", ":", "value", "=", "copy", ".", "deepcopy", "(", "origin_value", ")", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "...
recursively convert dict content into string
[ "recursively", "convert", "dict", "content", "into", "string" ]
python
train
bpython/curtsies
curtsies/input.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/input.py#L129-L162
def _wait_for_read_ready_or_timeout(self, timeout): """Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received""" remaining_timeout = timeout t0 = time.time() while True: try: (rs, _, _) = select.select( [self.in_stream.fileno()] + self.readers, [], [], remaining_timeout) if not rs: return False, None r = rs[0] # if there's more than one, get it in the next loop if r == self.in_stream.fileno(): return True, None else: os.read(r, 1024) if self.queued_interrupting_events: return False, self.queued_interrupting_events.pop(0) elif remaining_timeout is not None: remaining_timeout = max(0, t0 + timeout - time.time()) continue else: continue except select.error: if self.sigints: return False, self.sigints.pop() if remaining_timeout is not None: remaining_timeout = max(timeout - (time.time() - t0), 0)
[ "def", "_wait_for_read_ready_or_timeout", "(", "self", ",", "timeout", ")", ":", "remaining_timeout", "=", "timeout", "t0", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "(", "rs", ",", "_", ",", "_", ")", "=", "select", ".", ...
Returns tuple of whether stdin is ready to read and an event. If an event is returned, that event is more pressing than reading bytes on stdin to create a keyboard input event. If stdin is ready, either there are bytes to read or a SIGTSTP triggered by dsusp has been received
[ "Returns", "tuple", "of", "whether", "stdin", "is", "ready", "to", "read", "and", "an", "event", "." ]
python
train
jjjake/giganews
giganews/utils.py
https://github.com/jjjake/giganews/blob/8cfb26de6c10c482a8da348d438f0ce19e477573/giganews/utils.py#L107-L129
def get_utc_iso_date(date_str): """Convert date str into a iso-formatted UTC date str, i.e.: yyyymmddhhmmss :type date_str: str :param date_str: date string to be parsed. :rtype: str :returns: iso-formatted UTC date str. """ try: utc_tuple = dateutil.parser.parse(date_str).utctimetuple() except ValueError: try: date_str = ' '.join(date_str.split(' ')[:-1]) utc_tuple = dateutil.parser.parse(date_str).utctimetuple() except ValueError: date_str = ''.join(date_str.split('(')[:-1]).strip(')') utc_tuple = dateutil.parser.parse(date_str).utctimetuple() date_object = datetime.datetime.fromtimestamp(time.mktime(utc_tuple)) utc_date_str = ''.join([x for x in date_object.isoformat() if x not in '-T:']) return utc_date_str
[ "def", "get_utc_iso_date", "(", "date_str", ")", ":", "try", ":", "utc_tuple", "=", "dateutil", ".", "parser", ".", "parse", "(", "date_str", ")", ".", "utctimetuple", "(", ")", "except", "ValueError", ":", "try", ":", "date_str", "=", "' '", ".", "join"...
Convert date str into a iso-formatted UTC date str, i.e.: yyyymmddhhmmss :type date_str: str :param date_str: date string to be parsed. :rtype: str :returns: iso-formatted UTC date str.
[ "Convert", "date", "str", "into", "a", "iso", "-", "formatted", "UTC", "date", "str", "i", ".", "e", ".", ":", "yyyymmddhhmmss" ]
python
train
tanghaibao/jcvi
jcvi/formats/gff.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L854-L878
def gb(args): """ %prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/> """ from Bio.Alphabet import generic_dna try: from BCBio import GFF except ImportError: print("You need to install dep first: $ easy_install bcbio-gff", file=sys.stderr) p = OptionParser(gb.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gff_file, fasta_file = args pf = op.splitext(gff_file)[0] out_file = pf + ".gb" fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna)) gff_iter = GFF.parse(gff_file, fasta_input) SeqIO.write(gff_iter, out_file, "genbank")
[ "def", "gb", "(", "args", ")", ":", "from", "Bio", ".", "Alphabet", "import", "generic_dna", "try", ":", "from", "BCBio", "import", "GFF", "except", "ImportError", ":", "print", "(", "\"You need to install dep first: $ easy_install bcbio-gff\"", ",", "file", "=", ...
%prog gb gffile fastafile Convert GFF3 to Genbank format. Recipe taken from: <http://www.biostars.org/p/2492/>
[ "%prog", "gb", "gffile", "fastafile" ]
python
train
IBMStreams/pypi.streamsx
streamsx/rest_primitives.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L2090-L2100
def cancel_job(self, job_id=None, job_name=None): """Cancel a running job. Args: job_id (str, optional): Identifier of job to be canceled. job_name (str, optional): Name of job to be canceled. Returns: dict: JSON response for the job cancel operation. """ return self._delegator.cancel_job(job_id=job_id, job_name = job_name)
[ "def", "cancel_job", "(", "self", ",", "job_id", "=", "None", ",", "job_name", "=", "None", ")", ":", "return", "self", ".", "_delegator", ".", "cancel_job", "(", "job_id", "=", "job_id", ",", "job_name", "=", "job_name", ")" ]
Cancel a running job. Args: job_id (str, optional): Identifier of job to be canceled. job_name (str, optional): Name of job to be canceled. Returns: dict: JSON response for the job cancel operation.
[ "Cancel", "a", "running", "job", "." ]
python
train
bitesofcode/projex
projex/plugin.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/plugin.py#L486-L499
def plugins(cls, enabled=True): """ Returns the plugins for the given class. :param enabled | <bool> || None :return [<Plugin>, ..] """ cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}).values() if enabled is None: return plugs return filter(lambda x: x.isEnabled() == enabled, plugs)
[ "def", "plugins", "(", "cls", ",", "enabled", "=", "True", ")", ":", "cls", ".", "loadPlugins", "(", ")", "plugs", "=", "getattr", "(", "cls", ",", "'_%s__plugins'", "%", "cls", ".", "__name__", ",", "{", "}", ")", ".", "values", "(", ")", "if", ...
Returns the plugins for the given class. :param enabled | <bool> || None :return [<Plugin>, ..]
[ "Returns", "the", "plugins", "for", "the", "given", "class", ".", ":", "param", "enabled", "|", "<bool", ">", "||", "None", ":", "return", "[", "<Plugin", ">", "..", "]" ]
python
train
senaite/senaite.core
bika/lims/browser/referencesample.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/referencesample.py#L315-L319
def get_reference_results(self): """Return a mapping of Analysis Service -> Reference Results """ referenceresults = self.context.getReferenceResults() return dict(map(lambda rr: (rr.get("uid"), rr), referenceresults))
[ "def", "get_reference_results", "(", "self", ")", ":", "referenceresults", "=", "self", ".", "context", ".", "getReferenceResults", "(", ")", "return", "dict", "(", "map", "(", "lambda", "rr", ":", "(", "rr", ".", "get", "(", "\"uid\"", ")", ",", "rr", ...
Return a mapping of Analysis Service -> Reference Results
[ "Return", "a", "mapping", "of", "Analysis", "Service", "-", ">", "Reference", "Results" ]
python
train
MultipedRobotics/pyxl320
pyxl320/utils.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/utils.py#L81-L93
def read(fname): """ Reads a Json file in: file name out: length of file, dictionary """ try: with open(fname, 'r') as f: data = json.load(f) return data except IOError: raise Exception('Could not open {0!s} for reading'.format((fname)))
[ "def", "read", "(", "fname", ")", ":", "try", ":", "with", "open", "(", "fname", ",", "'r'", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "return", "data", "except", "IOError", ":", "raise", "Exception", "(", "'Could not op...
Reads a Json file in: file name out: length of file, dictionary
[ "Reads", "a", "Json", "file", "in", ":", "file", "name", "out", ":", "length", "of", "file", "dictionary" ]
python
train
jrief/django-websocket-redis
ws4redis/subscriber.py
https://github.com/jrief/django-websocket-redis/blob/abcddaad2f579d71dbf375e5e34bc35eef795a81/ws4redis/subscriber.py#L71-L78
def release(self): """ New implementation to free up Redis subscriptions when websockets close. This prevents memory sap when Redis Output Buffer and Output Lists build when websockets are abandoned. """ if self._subscription and self._subscription.subscribed: self._subscription.unsubscribe() self._subscription.reset()
[ "def", "release", "(", "self", ")", ":", "if", "self", ".", "_subscription", "and", "self", ".", "_subscription", ".", "subscribed", ":", "self", ".", "_subscription", ".", "unsubscribe", "(", ")", "self", ".", "_subscription", ".", "reset", "(", ")" ]
New implementation to free up Redis subscriptions when websockets close. This prevents memory sap when Redis Output Buffer and Output Lists build when websockets are abandoned.
[ "New", "implementation", "to", "free", "up", "Redis", "subscriptions", "when", "websockets", "close", ".", "This", "prevents", "memory", "sap", "when", "Redis", "Output", "Buffer", "and", "Output", "Lists", "build", "when", "websockets", "are", "abandoned", "." ...
python
train
kappius/pyheaderfile
pyheaderfile/drive.py
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L51-L61
def write_cell(self, x, y, value): """ Writing value in the cell of x+1 and y+1 position :param x: line index :param y: coll index :param value: value to be written :return: """ x += 1 y += 1 self._sheet.update_cell(x, y, value)
[ "def", "write_cell", "(", "self", ",", "x", ",", "y", ",", "value", ")", ":", "x", "+=", "1", "y", "+=", "1", "self", ".", "_sheet", ".", "update_cell", "(", "x", ",", "y", ",", "value", ")" ]
Writing value in the cell of x+1 and y+1 position :param x: line index :param y: coll index :param value: value to be written :return:
[ "Writing", "value", "in", "the", "cell", "of", "x", "+", "1", "and", "y", "+", "1", "position", ":", "param", "x", ":", "line", "index", ":", "param", "y", ":", "coll", "index", ":", "param", "value", ":", "value", "to", "be", "written", ":", "re...
python
train
pandas-dev/pandas
pandas/core/arrays/timedeltas.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/timedeltas.py#L56-L102
def _td_array_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented if _is_convertible_to_td(other) or other is NaT: try: other = Timedelta(other) except ValueError: # failed to parse as timedelta return ops.invalid_comparison(self, other, op) result = op(self.view('i8'), other.value) if isna(other): result.fill(nat_result) elif not is_list_like(other): return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: try: other = type(self)._from_sequence(other)._data except (ValueError, TypeError): return ops.invalid_comparison(self, other, op) result = op(self.view('i8'), other.view('i8')) result = com.values_from_object(result) o_mask = np.array(isna(other)) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
[ "def", "_td_array_cmp", "(", "cls", ",", "op", ")", ":", "opname", "=", "'__{name}__'", ".", "format", "(", "name", "=", "op", ".", "__name__", ")", "nat_result", "=", "opname", "==", "'__ne__'", "def", "wrapper", "(", "self", ",", "other", ")", ":", ...
Wrap comparison operations to convert timedelta-like to timedelta64
[ "Wrap", "comparison", "operations", "to", "convert", "timedelta", "-", "like", "to", "timedelta64" ]
python
train
senaite/senaite.core
bika/lims/browser/widgets/reflexrulewidget.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/reflexrulewidget.py#L115-L176
def _format_conditions_and_actions(self, raw_data): """ This function gets a set of actions and conditionswith the following format: {'action-0': 'repeat', 'action-1': 'repeat', 'analysisservice-0': '30cd952b0bb04a05ac27b70ada7feab2', 'analysisservice-1': '30cd952b0bb04a05ac27b70ada7feab2', 'and_or-0': 'and', 'and_or-1': 'no', 'range0-0': '12', 'range0-1': '31', 'range1-0': '12', 'range1-1': '33', 'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf', 'setresulton-0': 'original', 'setresulton-1': 'original', 'trigger': 'submit', 'value': '', 'an_result_id-0':'rep-1', 'an_result_id-1':'rep-2'} and returns a formatted set with the conditions and actions sorted like this one: { 'conditions':[{ 'range1': 'X', 'range0': 'X', 'cond_row_idx':'X' 'and_or': 'and', 'analysisservice': '<as_uid>', }, { 'range1': 'X', 'range0': 'X', 'cond_row_idx':'X' 'and_or': 'and', 'analysisservice': '<as_uid>', }, {...}], 'trigger': 'xxx', 'actions':[ {'action':'duplicate', 'act_row_idx':'0', 'otherWS': to_another, 'analyst': 'sussan1', 'setresultdiscrete': '1', 'setresultvalue': '2', 'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf', 'setresulton': 'original','an_result_id-0':'rep-1'}, {'action':'repeat', 'act_row_idx':'1', 'otherWS': current, 'analyst': '', ...}, ] } """ keys = raw_data.keys() # 'formatted_action_row' is the dict which will be added in the # 'value' list formatted_set = {} # Filling the dict with the values that aren't actions or conditions formatted_set['trigger'] = raw_data.get('trigger', '') # Adding the conditions list to the final dictionary formatted_set['conditions'] = self._get_sorted_conditions_list( raw_data) # Adding the actions list to the final dictionary formatted_set['actions'] = self._get_sorted_actions_list(raw_data) return formatted_set
[ "def", "_format_conditions_and_actions", "(", "self", ",", "raw_data", ")", ":", "keys", "=", "raw_data", ".", "keys", "(", ")", "# 'formatted_action_row' is the dict which will be added in the", "# 'value' list", "formatted_set", "=", "{", "}", "# Filling the dict with the...
This function gets a set of actions and conditionswith the following format: {'action-0': 'repeat', 'action-1': 'repeat', 'analysisservice-0': '30cd952b0bb04a05ac27b70ada7feab2', 'analysisservice-1': '30cd952b0bb04a05ac27b70ada7feab2', 'and_or-0': 'and', 'and_or-1': 'no', 'range0-0': '12', 'range0-1': '31', 'range1-0': '12', 'range1-1': '33', 'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf', 'setresulton-0': 'original', 'setresulton-1': 'original', 'trigger': 'submit', 'value': '', 'an_result_id-0':'rep-1', 'an_result_id-1':'rep-2'} and returns a formatted set with the conditions and actions sorted like this one: { 'conditions':[{ 'range1': 'X', 'range0': 'X', 'cond_row_idx':'X' 'and_or': 'and', 'analysisservice': '<as_uid>', }, { 'range1': 'X', 'range0': 'X', 'cond_row_idx':'X' 'and_or': 'and', 'analysisservice': '<as_uid>', }, {...}], 'trigger': 'xxx', 'actions':[ {'action':'duplicate', 'act_row_idx':'0', 'otherWS': to_another, 'analyst': 'sussan1', 'setresultdiscrete': '1', 'setresultvalue': '2', 'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf', 'setresulton': 'original','an_result_id-0':'rep-1'}, {'action':'repeat', 'act_row_idx':'1', 'otherWS': current, 'analyst': '', ...}, ] }
[ "This", "function", "gets", "a", "set", "of", "actions", "and", "conditionswith", "the", "following", "format", ":" ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L142-L162
def _add_remote_resources(resources): """Retrieve remote resources like GATK/MuTect jars present in S3. """ out = copy.deepcopy(resources) for prog, info in resources.items(): for key, val in info.items(): if key == "jar" and objectstore.is_remote(val): store_dir = utils.safe_makedir(os.path.join(os.getcwd(), "inputs", "jars", prog)) fname = objectstore.download(val, store_dir, store_dir) version_file = os.path.join(store_dir, "version.txt") if not utils.file_exists(version_file): version = install.get_gatk_jar_version(prog, fname) with open(version_file, "w") as out_handle: out_handle.write(version) else: with open(version_file) as in_handle: version = in_handle.read().strip() del out[prog][key] out[prog]["dir"] = store_dir out[prog]["version"] = version return out
[ "def", "_add_remote_resources", "(", "resources", ")", ":", "out", "=", "copy", ".", "deepcopy", "(", "resources", ")", "for", "prog", ",", "info", "in", "resources", ".", "items", "(", ")", ":", "for", "key", ",", "val", "in", "info", ".", "items", ...
Retrieve remote resources like GATK/MuTect jars present in S3.
[ "Retrieve", "remote", "resources", "like", "GATK", "/", "MuTect", "jars", "present", "in", "S3", "." ]
python
train
alpha-xone/xbbg
xbbg/core/assist.py
https://github.com/alpha-xone/xbbg/blob/70226eb19a72a08144b5d8cea9db4913200f7bc5/xbbg/core/assist.py#L59-L78
def proc_ovrds(**kwargs): """ Bloomberg overrides Args: **kwargs: overrides Returns: list of tuples Examples: >>> proc_ovrds(DVD_Start_Dt='20180101') [('DVD_Start_Dt', '20180101')] >>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True) [('DVD_Start_Dt', '20180101')] """ return [ (k, v) for k, v in kwargs.items() if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS ]
[ "def", "proc_ovrds", "(", "*", "*", "kwargs", ")", ":", "return", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "not", "in", "list", "(", "ELEM_KEYS", ".", "keys", "(", ")", ")", "+", ...
Bloomberg overrides Args: **kwargs: overrides Returns: list of tuples Examples: >>> proc_ovrds(DVD_Start_Dt='20180101') [('DVD_Start_Dt', '20180101')] >>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True) [('DVD_Start_Dt', '20180101')]
[ "Bloomberg", "overrides" ]
python
valid
intel-analytics/BigDL
pyspark/bigdl/optim/optimizer.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L734-L742
def set_gradclip_const(self, min_value, max_value): """ Configure constant clipping settings. :param min_value: the minimum value to clip by :param max_value: the maxmimum value to clip by """ callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value)
[ "def", "set_gradclip_const", "(", "self", ",", "min_value", ",", "max_value", ")", ":", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"setConstantClip\"", ",", "self", ".", "value", ",", "min_value", ",", "max_value", ")" ]
Configure constant clipping settings. :param min_value: the minimum value to clip by :param max_value: the maxmimum value to clip by
[ "Configure", "constant", "clipping", "settings", "." ]
python
test
marians/audiocalc
audiocalc/py_audiocalc.py
https://github.com/marians/audiocalc/blob/ef917b05d9dddc5a3a44165112835669ef4808d3/audiocalc/py_audiocalc.py#L19-L51
def damping(temp, relhum, freq, pres=101325): """ Calculates the damping factor for sound in dB/m depending on temperature, humidity and sound frequency. Source: http://www.sengpielaudio.com/LuftdaempfungFormel.htm temp: Temperature in degrees celsius relhum: Relative humidity as percentage, e.g. 50 freq: Sound frequency in herz pres: Atmospheric pressure in kilopascal """ temp += 273.15 # convert to kelvin pres = pres / 101325.0 # convert to relative pressure c_humid = 4.6151 - 6.8346 * pow((273.15 / temp), 1.261) hum = relhum * pow(10.0, c_humid) * pres tempr = temp / 293.15 # convert to relative air temp (re 20 deg C) frO = pres * (24.0 + 4.04e4 * hum * (0.02 + hum) / (0.391 + hum)) frN = (pres * pow(tempr, -0.5) * (9.0 + 280.0 * hum * math.exp(-4.17 * (pow(tempr, (-1.0 / 3.0)) - 1.0)))) damp = (8.686 * freq * freq * ( 1.84e-11 * (1.0 / pres) * math.sqrt(tempr) + pow(tempr, -2.5) * ( 0.01275 * (math.exp(-2239.1 / temp) * 1.0 / (frO + freq * freq / frO)) + 0.1068 * ( math.exp(-3352 / temp) * 1.0 / (frN + freq * freq / frN) ) ) ) ) return damp
[ "def", "damping", "(", "temp", ",", "relhum", ",", "freq", ",", "pres", "=", "101325", ")", ":", "temp", "+=", "273.15", "# convert to kelvin", "pres", "=", "pres", "/", "101325.0", "# convert to relative pressure", "c_humid", "=", "4.6151", "-", "6.8346", "...
Calculates the damping factor for sound in dB/m depending on temperature, humidity and sound frequency. Source: http://www.sengpielaudio.com/LuftdaempfungFormel.htm temp: Temperature in degrees celsius relhum: Relative humidity as percentage, e.g. 50 freq: Sound frequency in herz pres: Atmospheric pressure in kilopascal
[ "Calculates", "the", "damping", "factor", "for", "sound", "in", "dB", "/", "m", "depending", "on", "temperature", "humidity", "and", "sound", "frequency", ".", "Source", ":", "http", ":", "//", "www", ".", "sengpielaudio", ".", "com", "/", "LuftdaempfungForm...
python
train
crunchyroll/ef-open
efopen/ef_password.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_password.py#L112-L146
def generate_secret_file(file_path, pattern, service, environment, clients): """ Generate a parameter files with it's secrets encrypted in KMS Args: file_path (string): Path to the parameter file to be encrypted pattern (string): Pattern to do fuzzy string matching service (string): Service to use KMS key to encrypt file environment (string): Environment to encrypt values clients (dict): KMS AWS client that has been instantiated Returns: None Raises: IOError: If the file does not exist """ changed = False with open(file_path) as json_file: data = json.load(json_file, object_pairs_hook=OrderedDict) try: for key, value in data["params"][environment].items(): if pattern in key: if "aws:kms:decrypt" in value: print("Found match, key {} but value is encrypted already; skipping...".format(key)) else: print("Found match, encrypting key {}".format(key)) encrypted_password = ef_utils.kms_encrypt(clients['kms'], service, environment, value) data["params"][environment][key] = format_secret(encrypted_password) changed = True except KeyError: ef_utils.fail("Error env: {} does not exist in parameters file".format(environment)) if changed: with open(file_path, "w") as encrypted_file: json.dump(data, encrypted_file, indent=2, separators=(',', ': ')) # Writing new line here so it conforms to WG14 N1256 5.1.1.1 (so github doesn't complain) encrypted_file.write("\n")
[ "def", "generate_secret_file", "(", "file_path", ",", "pattern", ",", "service", ",", "environment", ",", "clients", ")", ":", "changed", "=", "False", "with", "open", "(", "file_path", ")", "as", "json_file", ":", "data", "=", "json", ".", "load", "(", ...
Generate a parameter files with it's secrets encrypted in KMS Args: file_path (string): Path to the parameter file to be encrypted pattern (string): Pattern to do fuzzy string matching service (string): Service to use KMS key to encrypt file environment (string): Environment to encrypt values clients (dict): KMS AWS client that has been instantiated Returns: None Raises: IOError: If the file does not exist
[ "Generate", "a", "parameter", "files", "with", "it", "s", "secrets", "encrypted", "in", "KMS", "Args", ":", "file_path", "(", "string", ")", ":", "Path", "to", "the", "parameter", "file", "to", "be", "encrypted", "pattern", "(", "string", ")", ":", "Patt...
python
train
praekelt/django-preferences
preferences/managers.py
https://github.com/praekelt/django-preferences/blob/724f23da45449e96feb5179cb34e3d380cf151a1/preferences/managers.py#L10-L33
def get_queryset(self): """ Return the first preferences object for the current site. If preferences do not exist create it. """ queryset = super(SingletonManager, self).get_queryset() # Get current site current_site = None if getattr(settings, 'SITE_ID', None) is not None: current_site = Site.objects.get_current() # If site found limit queryset to site. if current_site is not None: queryset = queryset.filter(sites=settings.SITE_ID) if not queryset.exists(): # Create object (for current site) if it doesn't exist. obj = self.model.objects.create() if current_site is not None: obj.sites.add(current_site) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "SingletonManager", ",", "self", ")", ".", "get_queryset", "(", ")", "# Get current site", "current_site", "=", "None", "if", "getattr", "(", "settings", ",", "'SITE_ID'", ",", "Non...
Return the first preferences object for the current site. If preferences do not exist create it.
[ "Return", "the", "first", "preferences", "object", "for", "the", "current", "site", ".", "If", "preferences", "do", "not", "exist", "create", "it", "." ]
python
train
vtkiorg/vtki
vtki/examples/downloads.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/downloads.py#L137-L143
def download_blood_vessels(): """data representing the bifurcation of blood vessels.""" local_path, _ = _download_file('pvtu_blood_vessels/blood_vessels.zip') filename = os.path.join(local_path, 'T0000000500.pvtu') mesh = vtki.read(filename) mesh.set_active_vectors('velocity') return mesh
[ "def", "download_blood_vessels", "(", ")", ":", "local_path", ",", "_", "=", "_download_file", "(", "'pvtu_blood_vessels/blood_vessels.zip'", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "local_path", ",", "'T0000000500.pvtu'", ")", "mesh", "=", "v...
data representing the bifurcation of blood vessels.
[ "data", "representing", "the", "bifurcation", "of", "blood", "vessels", "." ]
python
train
SectorLabs/django-postgres-extra
psqlextra/manager/manager.py
https://github.com/SectorLabs/django-postgres-extra/blob/eef2ed5504d225858d4e4f5d77a838082ca6053e/psqlextra/manager/manager.py#L283-L303
def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None): """Creates a set of new records or updates the existing ones with the specified data. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. rows: Rows to upsert. index_predicate: The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking conflicts) """ if not rows or len(rows) <= 0: return self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate) return self.bulk_insert(rows)
[ "def", "bulk_upsert", "(", "self", ",", "conflict_target", ":", "List", ",", "rows", ":", "List", "[", "Dict", "]", ",", "index_predicate", ":", "str", "=", "None", ")", ":", "if", "not", "rows", "or", "len", "(", "rows", ")", "<=", "0", ":", "retu...
Creates a set of new records or updates the existing ones with the specified data. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. rows: Rows to upsert. index_predicate: The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking conflicts)
[ "Creates", "a", "set", "of", "new", "records", "or", "updates", "the", "existing", "ones", "with", "the", "specified", "data", "." ]
python
test
aamalev/aiohttp_apiset
aiohttp_apiset/dispatcher.py
https://github.com/aamalev/aiohttp_apiset/blob/ba3492ce929e39be1325d506b727a8bfb34e7b33/aiohttp_apiset/dispatcher.py#L67-L69
def url_for(self, *args, **kwargs): """Construct url for route with additional params.""" return yarl.URL(self.url(parts=kwargs))
[ "def", "url_for", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "yarl", ".", "URL", "(", "self", ".", "url", "(", "parts", "=", "kwargs", ")", ")" ]
Construct url for route with additional params.
[ "Construct", "url", "for", "route", "with", "additional", "params", "." ]
python
train
codeforamerica/epa_python
epa/gics/gics.py
https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/gics/gics.py#L51-L58
def construction(self, column=None, value=None, **kwargs): """ Identifies monetary, descriptive, and milestone information for Wastewater Treatment construction grants. >>> GICS().construction('complete_percent', 91) """ return self._resolve_call('GIC_CONSTRUCTION', column, value, **kwargs)
[ "def", "construction", "(", "self", ",", "column", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resolve_call", "(", "'GIC_CONSTRUCTION'", ",", "column", ",", "value", ",", "*", "*", "kwargs", ")" ]
Identifies monetary, descriptive, and milestone information for Wastewater Treatment construction grants. >>> GICS().construction('complete_percent', 91)
[ "Identifies", "monetary", "descriptive", "and", "milestone", "information", "for", "Wastewater", "Treatment", "construction", "grants", "." ]
python
train
rycus86/ghost-client
ghost_client/models.py
https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L199-L215
def update(self, id, **kwargs): """ Updates an existing resource. :param id: The ID of the resource :param kwargs: The properties of the resource to change :return: The updated item returned by the API wrapped as a `Model` object """ response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={ self._type_name: [ kwargs ] }) return self._model_type(response.get(self._type_name)[0])
[ "def", "update", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "ghost", ".", "execute_put", "(", "'%s/%s/'", "%", "(", "self", ".", "_type_name", ",", "id", ")", ",", "json", "=", "{", "self", ".", "_typ...
Updates an existing resource. :param id: The ID of the resource :param kwargs: The properties of the resource to change :return: The updated item returned by the API wrapped as a `Model` object
[ "Updates", "an", "existing", "resource", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/plugins/auth/saml2_auth.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/plugins/auth/saml2_auth.py#L98-L119
def prepare_request(settings): """ Prepare SAML request """ # Set the ACS url and binding method settings["sp"]["assertionConsumerService"] = { "url": web.ctx.homedomain + web.ctx.homepath + "/auth/callback/" + settings["id"], "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" } # If server is behind proxys or balancers use the HTTP_X_FORWARDED fields data = web.input() return { 'https': 'on' if web.ctx.protocol == 'https' else 'off', 'http_host': web.ctx.environ["SERVER_NAME"], 'server_port': web.ctx.environ["SERVER_PORT"], 'script_name': web.ctx.homepath, 'get_data': data.copy(), 'post_data': data.copy(), # Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144 # 'lowercase_urlencoding': True, 'query_string': web.ctx.query }
[ "def", "prepare_request", "(", "settings", ")", ":", "# Set the ACS url and binding method", "settings", "[", "\"sp\"", "]", "[", "\"assertionConsumerService\"", "]", "=", "{", "\"url\"", ":", "web", ".", "ctx", ".", "homedomain", "+", "web", ".", "ctx", ".", ...
Prepare SAML request
[ "Prepare", "SAML", "request" ]
python
train
sdispater/orator
orator/utils/url.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/utils/url.py#L122-L142
def get_dialect(self): """Return the SQLAlchemy database dialect class corresponding to this URL's driver name. """ if "+" not in self.drivername: name = self.drivername else: name = self.drivername.replace("+", ".") cls = registry.load(name) # check for legacy dialects that # would return a module with 'dialect' as the # actual class if ( hasattr(cls, "dialect") and isinstance(cls.dialect, type) and issubclass(cls.dialect, Dialect) ): return cls.dialect else: return cls
[ "def", "get_dialect", "(", "self", ")", ":", "if", "\"+\"", "not", "in", "self", ".", "drivername", ":", "name", "=", "self", ".", "drivername", "else", ":", "name", "=", "self", ".", "drivername", ".", "replace", "(", "\"+\"", ",", "\".\"", ")", "cl...
Return the SQLAlchemy database dialect class corresponding to this URL's driver name.
[ "Return", "the", "SQLAlchemy", "database", "dialect", "class", "corresponding", "to", "this", "URL", "s", "driver", "name", "." ]
python
train
CivicSpleen/ckcache
ckcache/filesystem.py
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/filesystem.py#L771-L779
def list(self, path=None, with_metadata=False): '''get a list of all of the files in the repository''' path = path.strip('/') if path else '' if self.upstream: return self.upstream.list(path, with_metadata=with_metadata) else: raise NotImplementedError()
[ "def", "list", "(", "self", ",", "path", "=", "None", ",", "with_metadata", "=", "False", ")", ":", "path", "=", "path", ".", "strip", "(", "'/'", ")", "if", "path", "else", "''", "if", "self", ".", "upstream", ":", "return", "self", ".", "upstream...
get a list of all of the files in the repository
[ "get", "a", "list", "of", "all", "of", "the", "files", "in", "the", "repository" ]
python
train
pytroll/satpy
satpy/scene.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/scene.py#L1201-L1214
def save_dataset(self, dataset_id, filename=None, writer=None, overlay=None, compute=True, **kwargs): """Save the *dataset_id* to file using *writer* (default: geotiff).""" if writer is None and filename is None: writer = 'geotiff' elif writer is None: writer = self.get_writer_by_ext(os.path.splitext(filename)[1]) writer, save_kwargs = load_writer(writer, ppp_config_dir=self.ppp_config_dir, filename=filename, **kwargs) return writer.save_dataset(self[dataset_id], overlay=overlay, compute=compute, **save_kwargs)
[ "def", "save_dataset", "(", "self", ",", "dataset_id", ",", "filename", "=", "None", ",", "writer", "=", "None", ",", "overlay", "=", "None", ",", "compute", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "writer", "is", "None", "and", "filena...
Save the *dataset_id* to file using *writer* (default: geotiff).
[ "Save", "the", "*", "dataset_id", "*", "to", "file", "using", "*", "writer", "*", "(", "default", ":", "geotiff", ")", "." ]
python
train
bluecap-se/yarr.client
yarr_client/app.py
https://github.com/bluecap-se/yarr.client/blob/ec0c053fe8a7e92eb27b68b7a135caecde30c81d/yarr_client/app.py#L16-L43
def configurate_app(config_file=''): """ Configures Flask app :param config_file: Absolute path to Py config file, optional :returns: App object, host and port """ # Load config app.config.from_pyfile('defaults.py') app.config.from_pyfile(config_file, silent=True) if app.config.get('MINIFY_HTML', False): app.jinja_env.add_extension('flask_utils.jinja2htmlcompress.HTMLCompress') # Setup web assets assets = Environment(app) js = Bundle('common.js', filters='jsmin', output='gen/main.%(version)s.js') css = Bundle('common.css', filters='cssmin', output='gen/main.%(version)s.css') assets.register('js_all', js) assets.register('css_all', css) # Set host and port port = app.config.get('PORT', 5000) host = app.config.get('HOST', '127.0.0.1') return app, host, port
[ "def", "configurate_app", "(", "config_file", "=", "''", ")", ":", "# Load config", "app", ".", "config", ".", "from_pyfile", "(", "'defaults.py'", ")", "app", ".", "config", ".", "from_pyfile", "(", "config_file", ",", "silent", "=", "True", ")", "if", "a...
Configures Flask app :param config_file: Absolute path to Py config file, optional :returns: App object, host and port
[ "Configures", "Flask", "app" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L1377-L1399
def initialise(self): """ Initialise this data repository, creating any necessary directories and file paths. """ self._checkWriteMode() self._createSystemTable() self._createNetworkTables() self._createOntologyTable() self._createReferenceSetTable() self._createReferenceTable() self._createDatasetTable() self._createReadGroupSetTable() self._createReadGroupTable() self._createCallSetTable() self._createVariantSetTable() self._createVariantAnnotationSetTable() self._createFeatureSetTable() self._createContinuousSetTable() self._createBiosampleTable() self._createIndividualTable() self._createPhenotypeAssociationSetTable() self._createRnaQuantificationSetTable()
[ "def", "initialise", "(", "self", ")", ":", "self", ".", "_checkWriteMode", "(", ")", "self", ".", "_createSystemTable", "(", ")", "self", ".", "_createNetworkTables", "(", ")", "self", ".", "_createOntologyTable", "(", ")", "self", ".", "_createReferenceSetTa...
Initialise this data repository, creating any necessary directories and file paths.
[ "Initialise", "this", "data", "repository", "creating", "any", "necessary", "directories", "and", "file", "paths", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/dataforms.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/dataforms.py#L81-L95
def complete_xml_element(self, xmlnode, doc): """Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`""" _unused = doc if self.label is not None: xmlnode.setProp("label", self.label.encode("utf-8")) xmlnode.newTextChild(xmlnode.ns(), "value", self.value.encode("utf-8")) return xmlnode
[ "def", "complete_xml_element", "(", "self", ",", "xmlnode", ",", "doc", ")", ":", "_unused", "=", "doc", "if", "self", ".", "label", "is", "not", "None", ":", "xmlnode", ".", "setProp", "(", "\"label\"", ",", "self", ".", "label", ".", "encode", "(", ...
Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`
[ "Complete", "the", "XML", "node", "with", "self", "content", "." ]
python
valid
locationlabs/mockredis
mockredis/client.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L1344-L1356
def call(self, command, *args): """ Sends call to the function, whose name is specified by command. Used by Script invocations and normalizes calls using standard Redis arguments to use the expected redis-py arguments. """ command = self._normalize_command_name(command) args = self._normalize_command_args(command, *args) redis_function = getattr(self, command) value = redis_function(*args) return self._normalize_command_response(command, value)
[ "def", "call", "(", "self", ",", "command", ",", "*", "args", ")", ":", "command", "=", "self", ".", "_normalize_command_name", "(", "command", ")", "args", "=", "self", ".", "_normalize_command_args", "(", "command", ",", "*", "args", ")", "redis_function...
Sends call to the function, whose name is specified by command. Used by Script invocations and normalizes calls using standard Redis arguments to use the expected redis-py arguments.
[ "Sends", "call", "to", "the", "function", "whose", "name", "is", "specified", "by", "command", "." ]
python
train
celiao/rtsimple
rtsimple/lists.py
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L153-L168
def dvds_current_releases(self, **kwargs): """Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvds_current_releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "dvds_current_releases", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'dvds_current_releases'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_v...
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "upcoming", "movies", "from", "the", "API", "." ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/biblio/bibtex.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/biblio/bibtex.py#L130-L169
def _parse_value(self): """Parse a value. Digits, definitions, and the contents of double quotes or curly brackets.""" val = [] while True: t = self._next_token() if t == '"': brac_counter = 0 while True: t = self._next_token(skipws=False) if t == '{': brac_counter += 1 if t == '}': brac_counter -= 1 if t == '"' and brac_counter <= 0: break else: val.append(t) elif t == '{': brac_counter = 0 while True: t = self._next_token(skipws=False) if t == '{': brac_counter += 1 if t == '}': brac_counter -= 1 if brac_counter < 0: break else: val.append(t) elif re.match(r'\w', t): val.extend([self.definitions.get(t, t), ' ']) elif t.isdigit(): val.append([t, ' ']) elif t == '#': pass else: break value = ' '.join(''.join(val).split()) return value
[ "def", "_parse_value", "(", "self", ")", ":", "val", "=", "[", "]", "while", "True", ":", "t", "=", "self", ".", "_next_token", "(", ")", "if", "t", "==", "'\"'", ":", "brac_counter", "=", "0", "while", "True", ":", "t", "=", "self", ".", "_next_...
Parse a value. Digits, definitions, and the contents of double quotes or curly brackets.
[ "Parse", "a", "value", ".", "Digits", "definitions", "and", "the", "contents", "of", "double", "quotes", "or", "curly", "brackets", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L2065-L2073
def report_validation(self, status): """Return run total value.""" # only one fail/pass count per profile if status: self.report['results']['validations']['pass'] += 1 else: self.report['results']['validations']['fail'] += 1 if self.selected_profile.name not in self.report['results']['failed_profiles']: self.report['results']['failed_profiles'].append(self.selected_profile.name)
[ "def", "report_validation", "(", "self", ",", "status", ")", ":", "# only one fail/pass count per profile", "if", "status", ":", "self", ".", "report", "[", "'results'", "]", "[", "'validations'", "]", "[", "'pass'", "]", "+=", "1", "else", ":", "self", ".",...
Return run total value.
[ "Return", "run", "total", "value", "." ]
python
train
CityOfZion/neo-python
neo/Core/TX/Transaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/Transaction.py#L384-L409
def NetworkFee(self): """ Get the network fee. Returns: Fixed8: """ if self._network_fee is None: input = Fixed8(0) for coin_ref in self.References.values(): if coin_ref.AssetId == GetBlockchain().SystemCoin().Hash: input = input + coin_ref.Value output = Fixed8(0) for tx_output in self.outputs: if tx_output.AssetId == GetBlockchain().SystemCoin().Hash: output = output + tx_output.Value self._network_fee = input - output - self.SystemFee() # logger.info("Determined network fee to be %s " % (self.__network_fee.value)) return self._network_fee
[ "def", "NetworkFee", "(", "self", ")", ":", "if", "self", ".", "_network_fee", "is", "None", ":", "input", "=", "Fixed8", "(", "0", ")", "for", "coin_ref", "in", "self", ".", "References", ".", "values", "(", ")", ":", "if", "coin_ref", ".", "AssetId...
Get the network fee. Returns: Fixed8:
[ "Get", "the", "network", "fee", "." ]
python
train
vpelletier/python-hidraw
hidraw/__init__.py
https://github.com/vpelletier/python-hidraw/blob/af6527160d2c0c0f61d737f383e35fd767ce25be/hidraw/__init__.py#L120-L134
def getFeatureReport(self, report_num=0, length=63): """ Receive a feature report. Blocks, unless you configured provided file (descriptor) to be non-blocking. """ length += 1 buf = bytearray(length) buf[0] = report_num self._ioctl( _HIDIOCGFEATURE(length), (ctypes.c_char * length).from_buffer(buf), True, ) return buf
[ "def", "getFeatureReport", "(", "self", ",", "report_num", "=", "0", ",", "length", "=", "63", ")", ":", "length", "+=", "1", "buf", "=", "bytearray", "(", "length", ")", "buf", "[", "0", "]", "=", "report_num", "self", ".", "_ioctl", "(", "_HIDIOCGF...
Receive a feature report. Blocks, unless you configured provided file (descriptor) to be non-blocking.
[ "Receive", "a", "feature", "report", ".", "Blocks", "unless", "you", "configured", "provided", "file", "(", "descriptor", ")", "to", "be", "non", "-", "blocking", "." ]
python
train
marl/jams
jams/core.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/core.py#L1065-L1087
def to_interval_values(self): '''Extract observation data in a `mir_eval`-friendly format. Returns ------- intervals : np.ndarray [shape=(n, 2), dtype=float] Start- and end-times of all valued intervals `intervals[i, :] = [time[i], time[i] + duration[i]]` labels : list List view of value field. ''' ints, vals = [], [] for obs in self.data: ints.append([obs.time, obs.time + obs.duration]) vals.append(obs.value) if not ints: return np.empty(shape=(0, 2), dtype=float), [] return np.array(ints), vals
[ "def", "to_interval_values", "(", "self", ")", ":", "ints", ",", "vals", "=", "[", "]", ",", "[", "]", "for", "obs", "in", "self", ".", "data", ":", "ints", ".", "append", "(", "[", "obs", ".", "time", ",", "obs", ".", "time", "+", "obs", ".", ...
Extract observation data in a `mir_eval`-friendly format. Returns ------- intervals : np.ndarray [shape=(n, 2), dtype=float] Start- and end-times of all valued intervals `intervals[i, :] = [time[i], time[i] + duration[i]]` labels : list List view of value field.
[ "Extract", "observation", "data", "in", "a", "mir_eval", "-", "friendly", "format", "." ]
python
valid
davenquinn/Attitude
attitude/geom/transform.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/geom/transform.py#L3-L9
def rotate_2D(angle): """ Returns a 2x2 transformation matrix to rotate by an angle in two dimensions """ return N.array([[N.cos(angle),-N.sin(angle)], [N.sin(angle),N.cos(angle)]])
[ "def", "rotate_2D", "(", "angle", ")", ":", "return", "N", ".", "array", "(", "[", "[", "N", ".", "cos", "(", "angle", ")", ",", "-", "N", ".", "sin", "(", "angle", ")", "]", ",", "[", "N", ".", "sin", "(", "angle", ")", ",", "N", ".", "c...
Returns a 2x2 transformation matrix to rotate by an angle in two dimensions
[ "Returns", "a", "2x2", "transformation", "matrix", "to", "rotate", "by", "an", "angle", "in", "two", "dimensions" ]
python
train
PSPC-SPAC-buyandsell/von_agent
von_agent/tails.py
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/tails.py#L125-L137
def links(base_dir: str, issuer_did: str = None) -> set: """ Return set of all paths to symbolic links (rev reg ids) associating their respective tails files, in specified base tails directory, on input issuer DID if specified. :param base_dir: base directory for tails files, thereafter split by cred def id :param issuer_did: issuer DID of interest :return: set of paths to symbolic links associating tails files """ return {join(dp, f) for dp, dn, fn in walk(base_dir) for f in fn if islink(join(dp, f)) and (not issuer_did or f.startswith('{}:4:'.format(issuer_did)))}
[ "def", "links", "(", "base_dir", ":", "str", ",", "issuer_did", ":", "str", "=", "None", ")", "->", "set", ":", "return", "{", "join", "(", "dp", ",", "f", ")", "for", "dp", ",", "dn", ",", "fn", "in", "walk", "(", "base_dir", ")", "for", "f", ...
Return set of all paths to symbolic links (rev reg ids) associating their respective tails files, in specified base tails directory, on input issuer DID if specified. :param base_dir: base directory for tails files, thereafter split by cred def id :param issuer_did: issuer DID of interest :return: set of paths to symbolic links associating tails files
[ "Return", "set", "of", "all", "paths", "to", "symbolic", "links", "(", "rev", "reg", "ids", ")", "associating", "their", "respective", "tails", "files", "in", "specified", "base", "tails", "directory", "on", "input", "issuer", "DID", "if", "specified", "." ]
python
train
instana/python-sensor
instana/recorder.py
https://github.com/instana/python-sensor/blob/58aecb90924c48bafcbc4f93bd9b7190980918bc/instana/recorder.py#L52-L64
def report_spans(self): """ Periodically report the queued spans """ logger.debug("Span reporting thread is now alive") def span_work(): queue_size = self.queue.qsize() if queue_size > 0 and instana.singletons.agent.can_send(): response = instana.singletons.agent.report_traces(self.queued_spans()) if response: logger.debug("reported %d spans" % queue_size) return True every(2, span_work, "Span Reporting")
[ "def", "report_spans", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Span reporting thread is now alive\"", ")", "def", "span_work", "(", ")", ":", "queue_size", "=", "self", ".", "queue", ".", "qsize", "(", ")", "if", "queue_size", ">", "0", "and...
Periodically report the queued spans
[ "Periodically", "report", "the", "queued", "spans" ]
python
train
napalm-automation/napalm-logs
napalm_logs/auth.py
https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/auth.py#L123-L138
def verify_cert(self): ''' Checks that the provided cert and key are valid and usable ''' log.debug('Verifying the %s certificate, keyfile: %s', self.certificate, self.keyfile) try: ssl.create_default_context().load_cert_chain(self.certificate, keyfile=self.keyfile) except ssl.SSLError: error_string = 'SSL certificate and key do not match' log.error(error_string) raise SSLMismatchException(error_string) except IOError: log.error('Unable to open either certificate or key file') raise log.debug('Certificate looks good.')
[ "def", "verify_cert", "(", "self", ")", ":", "log", ".", "debug", "(", "'Verifying the %s certificate, keyfile: %s'", ",", "self", ".", "certificate", ",", "self", ".", "keyfile", ")", "try", ":", "ssl", ".", "create_default_context", "(", ")", ".", "load_cert...
Checks that the provided cert and key are valid and usable
[ "Checks", "that", "the", "provided", "cert", "and", "key", "are", "valid", "and", "usable" ]
python
train
christophertbrown/bioscripts
ctbBio/transform.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L38-L55
def pertotal(table, option): """ calculate percent of total """ if option == 'table': total = sum([i for line in table for i in line]) t = [] for row in table: t_row = [] if option != 'table': total = sum(row) for i in row: if total == 0: t_row.append(0) else: t_row.append(i/total*100) t.append(t_row) return t
[ "def", "pertotal", "(", "table", ",", "option", ")", ":", "if", "option", "==", "'table'", ":", "total", "=", "sum", "(", "[", "i", "for", "line", "in", "table", "for", "i", "in", "line", "]", ")", "t", "=", "[", "]", "for", "row", "in", "table...
calculate percent of total
[ "calculate", "percent", "of", "total" ]
python
train
wbond/asn1crypto
asn1crypto/x509.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L730-L836
def _ldap_string_prep(self, string): """ Implements the internationalized string preparation algorithm from RFC 4518. https://tools.ietf.org/html/rfc4518#section-2 :param string: A unicode string to prepare :return: A prepared unicode string, ready for comparison """ # Map step string = re.sub('[\u00ad\u1806\u034f\u180b-\u180d\ufe0f-\uff00\ufffc]+', '', string) string = re.sub('[\u0009\u000a\u000b\u000c\u000d\u0085]', ' ', string) if sys.maxunicode == 0xffff: # Some installs of Python 2.7 don't support 8-digit unicode escape # ranges, so we have to break them into pieces # Original was: \U0001D173-\U0001D17A and \U000E0020-\U000E007F string = re.sub('\ud834[\udd73-\udd7a]|\udb40[\udc20-\udc7f]|\U000e0001', '', string) else: string = re.sub('[\U0001D173-\U0001D17A\U000E0020-\U000E007F\U000e0001]', '', string) string = re.sub( '[\u0000-\u0008\u000e-\u001f\u007f-\u0084\u0086-\u009f\u06dd\u070f\u180e\u200c-\u200f' '\u202a-\u202e\u2060-\u2063\u206a-\u206f\ufeff\ufff9-\ufffb]+', '', string ) string = string.replace('\u200b', '') string = re.sub('[\u00a0\u1680\u2000-\u200a\u2028-\u2029\u202f\u205f\u3000]', ' ', string) string = ''.join(map(stringprep.map_table_b2, string)) # Normalize step string = unicodedata.normalize('NFKC', string) # Prohibit step for char in string: if stringprep.in_table_a1(char): raise ValueError(unwrap( ''' X.509 Name objects may not contain unassigned code points ''' )) if stringprep.in_table_c8(char): raise ValueError(unwrap( ''' X.509 Name objects may not contain change display or zzzzdeprecated characters ''' )) if stringprep.in_table_c3(char): raise ValueError(unwrap( ''' X.509 Name objects may not contain private use characters ''' )) if stringprep.in_table_c4(char): raise ValueError(unwrap( ''' X.509 Name objects may not contain non-character code points ''' )) if stringprep.in_table_c5(char): raise ValueError(unwrap( ''' X.509 Name objects may not contain surrogate code points ''' )) if char == '\ufffd': raise ValueError(unwrap( ''' X.509 Name objects may not contain the replacement character ''' )) # Check bidirectional step - here we ensure that we are not mixing # left-to-right and right-to-left text in the string has_r_and_al_cat = False has_l_cat = False for char in string: if stringprep.in_table_d1(char): has_r_and_al_cat = True elif stringprep.in_table_d2(char): has_l_cat = True if has_r_and_al_cat: first_is_r_and_al = stringprep.in_table_d1(string[0]) last_is_r_and_al = stringprep.in_table_d1(string[-1]) if has_l_cat or not first_is_r_and_al or not last_is_r_and_al: raise ValueError(unwrap( ''' X.509 Name object contains a malformed bidirectional sequence ''' )) # Insignificant space handling step string = ' ' + re.sub(' +', ' ', string).strip() + ' ' return string
[ "def", "_ldap_string_prep", "(", "self", ",", "string", ")", ":", "# Map step", "string", "=", "re", ".", "sub", "(", "'[\\u00ad\\u1806\\u034f\\u180b-\\u180d\\ufe0f-\\uff00\\ufffc]+'", ",", "''", ",", "string", ")", "string", "=", "re", ".", "sub", "(", "'[\\u00...
Implements the internationalized string preparation algorithm from RFC 4518. https://tools.ietf.org/html/rfc4518#section-2 :param string: A unicode string to prepare :return: A prepared unicode string, ready for comparison
[ "Implements", "the", "internationalized", "string", "preparation", "algorithm", "from", "RFC", "4518", ".", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc4518#section", "-", "2" ]
python
train
juju/charm-helpers
charmhelpers/contrib/hardening/host/checks/pam.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hardening/host/checks/pam.py#L38-L55
def get_audits(): """Get OS hardening PAM authentication audits. :returns: dictionary of audits """ audits = [] settings = utils.get_settings('os') if settings['auth']['pam_passwdqc_enable']: audits.append(PasswdqcPAM('/etc/passwdqc.conf')) if settings['auth']['retries']: audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) else: audits.append(DeletedFile('/usr/share/pam-configs/tally2')) return audits
[ "def", "get_audits", "(", ")", ":", "audits", "=", "[", "]", "settings", "=", "utils", ".", "get_settings", "(", "'os'", ")", "if", "settings", "[", "'auth'", "]", "[", "'pam_passwdqc_enable'", "]", ":", "audits", ".", "append", "(", "PasswdqcPAM", "(", ...
Get OS hardening PAM authentication audits. :returns: dictionary of audits
[ "Get", "OS", "hardening", "PAM", "authentication", "audits", "." ]
python
train
aio-libs/aioredis
aioredis/commands/sorted_set.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/sorted_set.py#L226-L242
def zremrangebylex(self, key, min=b'-', max=b'+', include_min=True, include_max=True): """Remove all members in a sorted set between the given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes """ if not isinstance(min, bytes): # FIXME raise TypeError("min argument must be bytes") if not isinstance(max, bytes): # FIXME raise TypeError("max argument must be bytes") if not min == b'-': min = (b'[' if include_min else b'(') + min if not max == b'+': max = (b'[' if include_max else b'(') + max return self.execute(b'ZREMRANGEBYLEX', key, min, max)
[ "def", "zremrangebylex", "(", "self", ",", "key", ",", "min", "=", "b'-'", ",", "max", "=", "b'+'", ",", "include_min", "=", "True", ",", "include_max", "=", "True", ")", ":", "if", "not", "isinstance", "(", "min", ",", "bytes", ")", ":", "# FIXME", ...
Remove all members in a sorted set between the given lexicographical range. :raises TypeError: if min is not bytes :raises TypeError: if max is not bytes
[ "Remove", "all", "members", "in", "a", "sorted", "set", "between", "the", "given", "lexicographical", "range", "." ]
python
train
klmitch/bark
bark/format.py
https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/format.py#L153-L172
def set_code(self, idx): """ Sets a code to be filtered on for the conversion. Note that this also sets the 'code_last' attribute and configures to ignore the remaining characters of the code. :param idx: The index at which the code _begins_. :returns: True if the code is valid, False otherwise. """ code = self.format[idx:idx + 3] if len(code) < 3 or not code.isdigit(): return False self.codes.append(int(code)) self.ignore = 2 self.code_last = True return True
[ "def", "set_code", "(", "self", ",", "idx", ")", ":", "code", "=", "self", ".", "format", "[", "idx", ":", "idx", "+", "3", "]", "if", "len", "(", "code", ")", "<", "3", "or", "not", "code", ".", "isdigit", "(", ")", ":", "return", "False", "...
Sets a code to be filtered on for the conversion. Note that this also sets the 'code_last' attribute and configures to ignore the remaining characters of the code. :param idx: The index at which the code _begins_. :returns: True if the code is valid, False otherwise.
[ "Sets", "a", "code", "to", "be", "filtered", "on", "for", "the", "conversion", ".", "Note", "that", "this", "also", "sets", "the", "code_last", "attribute", "and", "configures", "to", "ignore", "the", "remaining", "characters", "of", "the", "code", "." ]
python
train
ArchiveTeam/wpull
wpull/warc/recorder.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/warc/recorder.py#L106-L111
def _check_journals_and_maybe_raise(self): '''Check if any journal files exist and raise an error.''' files = list(glob.glob(self._prefix_filename + '*-wpullinc')) if files: raise OSError('WARC file {} is incomplete.'.format(files[0]))
[ "def", "_check_journals_and_maybe_raise", "(", "self", ")", ":", "files", "=", "list", "(", "glob", ".", "glob", "(", "self", ".", "_prefix_filename", "+", "'*-wpullinc'", ")", ")", "if", "files", ":", "raise", "OSError", "(", "'WARC file {} is incomplete.'", ...
Check if any journal files exist and raise an error.
[ "Check", "if", "any", "journal", "files", "exist", "and", "raise", "an", "error", "." ]
python
train
KyleJamesWalker/yamlsettings
yamlsettings/yamldict.py
https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L276-L286
def dump_all(data_list, stream=None, **kwargs): """ Serialize YAMLDict into a YAML stream. If stream is None, return the produced string instead. """ return yaml.dump_all( data_list, stream=stream, Dumper=YAMLDictDumper, **kwargs )
[ "def", "dump_all", "(", "data_list", ",", "stream", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "yaml", ".", "dump_all", "(", "data_list", ",", "stream", "=", "stream", ",", "Dumper", "=", "YAMLDictDumper", ",", "*", "*", "kwargs", ")" ]
Serialize YAMLDict into a YAML stream. If stream is None, return the produced string instead.
[ "Serialize", "YAMLDict", "into", "a", "YAML", "stream", ".", "If", "stream", "is", "None", "return", "the", "produced", "string", "instead", "." ]
python
train
quantmind/pulsar
pulsar/apps/__init__.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/__init__.py#L321-L353
def load_config(self): """Load the application configuration from a file and/or from the command line. Called during application initialisation. The parameters overriding order is the following: * default parameters. * the key-valued params passed in the initialisation. * the parameters in the optional configuration file * the parameters passed in the command line. """ # get the actor if available and override default cfg values with those # from the actor actor = get_actor() if actor and actor.is_running(): # actor available and running. # Unless argv is set, skip parsing if self.argv is None: self.console_parsed = False # copy global settings self.cfg.copy_globals(actor.cfg) # for name in list(self.cfg.params): if name in self.cfg.settings: value = self.cfg.params.pop(name) if value is not None: self.cfg.set(name, value) # parse console args if self.console_parsed: self.cfg.parse_command_line(self.argv) else: self.cfg.params.update(self.cfg.import_from_module())
[ "def", "load_config", "(", "self", ")", ":", "# get the actor if available and override default cfg values with those", "# from the actor", "actor", "=", "get_actor", "(", ")", "if", "actor", "and", "actor", ".", "is_running", "(", ")", ":", "# actor available and running...
Load the application configuration from a file and/or from the command line. Called during application initialisation. The parameters overriding order is the following: * default parameters. * the key-valued params passed in the initialisation. * the parameters in the optional configuration file * the parameters passed in the command line.
[ "Load", "the", "application", "configuration", "from", "a", "file", "and", "/", "or", "from", "the", "command", "line", "." ]
python
train
ryan-roemer/django-cloud-browser
cloud_browser/cloud/base.py
https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/cloud/base.py#L119-L124
def native_container(self): """Native container object.""" if self.__native is None: self.__native = self._get_container() return self.__native
[ "def", "native_container", "(", "self", ")", ":", "if", "self", ".", "__native", "is", "None", ":", "self", ".", "__native", "=", "self", ".", "_get_container", "(", ")", "return", "self", ".", "__native" ]
Native container object.
[ "Native", "container", "object", "." ]
python
train
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L789-L801
def profiler(profiles, projectpath,parameters,serviceid,servicename,serviceurl,printdebug=None): """Given input files and parameters, produce metadata for outputfiles. Returns a list of matched profiles (empty if none match), and a program.""" parameters = sanitizeparameters(parameters) matched = [] program = Program(projectpath) for profile in profiles: if profile.match(projectpath, parameters)[0]: matched.append(profile) program.update( profile.generate(projectpath,parameters,serviceid,servicename,serviceurl) ) return matched, program
[ "def", "profiler", "(", "profiles", ",", "projectpath", ",", "parameters", ",", "serviceid", ",", "servicename", ",", "serviceurl", ",", "printdebug", "=", "None", ")", ":", "parameters", "=", "sanitizeparameters", "(", "parameters", ")", "matched", "=", "[", ...
Given input files and parameters, produce metadata for outputfiles. Returns a list of matched profiles (empty if none match), and a program.
[ "Given", "input", "files", "and", "parameters", "produce", "metadata", "for", "outputfiles", ".", "Returns", "a", "list", "of", "matched", "profiles", "(", "empty", "if", "none", "match", ")", "and", "a", "program", "." ]
python
train
LonamiWebs/Telethon
telethon_generator/docswriter.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_generator/docswriter.py#L118-L197
def write_code(self, tlobject): """Writes the code for the given 'tlobject' properly formatted with hyperlinks """ self.write('<pre>---{}---\n', 'functions' if tlobject.is_function else 'types') # Write the function or type and its ID if tlobject.namespace: self.write(tlobject.namespace) self.write('.') self.write('{}#{:08x}', tlobject.name, tlobject.id) # Write all the arguments (or do nothing if there's none) for arg in tlobject.args: self.write(' ') add_link = not arg.generic_definition and not arg.is_generic # "Opening" modifiers if arg.generic_definition: self.write('{') # Argument name self.write(arg.name) self.write(':') # "Opening" modifiers if arg.is_flag: self.write('flags.{}?', arg.flag_index) if arg.is_generic: self.write('!') if arg.is_vector: self.write('<a href="{}">Vector</a>&lt;', self.type_to_path('vector')) # Argument type if arg.type: if add_link: self.write('<a href="{}">', self.type_to_path(arg.type)) self.write(arg.type) if add_link: self.write('</a>') else: self.write('#') # "Closing" modifiers if arg.is_vector: self.write('&gt;') if arg.generic_definition: self.write('}') # Now write the resulting type (result from a function/type) self.write(' = ') generic_name = next((arg.name for arg in tlobject.args if arg.generic_definition), None) if tlobject.result == generic_name: # Generic results cannot have any link self.write(tlobject.result) else: if re.search('^vector<', tlobject.result, re.IGNORECASE): # Notice that we don't simply make up the "Vector" part, # because some requests (as of now, only FutureSalts), # use a lower type name for it (see #81) vector, inner = tlobject.result.split('<') inner = inner.strip('>') self.write('<a href="{}">{}</a>&lt;', self.type_to_path(vector), vector) self.write('<a href="{}">{}</a>&gt;', self.type_to_path(inner), inner) else: self.write('<a href="{}">{}</a>', self.type_to_path(tlobject.result), tlobject.result) self.write('</pre>')
[ "def", "write_code", "(", "self", ",", "tlobject", ")", ":", "self", ".", "write", "(", "'<pre>---{}---\\n'", ",", "'functions'", "if", "tlobject", ".", "is_function", "else", "'types'", ")", "# Write the function or type and its ID", "if", "tlobject", ".", "names...
Writes the code for the given 'tlobject' properly formatted with hyperlinks
[ "Writes", "the", "code", "for", "the", "given", "tlobject", "properly", "formatted", "with", "hyperlinks" ]
python
train
AtomHash/evernode
evernode/classes/render.py
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/render.py#L46-L62
def compile(self, name, folder=None, data=None): """ renders template_name + self.extension file with data using jinja """ template_name = name.replace(os.sep, "") if folder is None: folder = "" full_name = os.path.join( folder.strip(os.sep), template_name) if data is None: data = {} try: self.templates[template_name] = \ self.jinja.get_template(full_name).render(data) except TemplateNotFound as template_error: if current_app.config['DEBUG']: raise template_error
[ "def", "compile", "(", "self", ",", "name", ",", "folder", "=", "None", ",", "data", "=", "None", ")", ":", "template_name", "=", "name", ".", "replace", "(", "os", ".", "sep", ",", "\"\"", ")", "if", "folder", "is", "None", ":", "folder", "=", "...
renders template_name + self.extension file with data using jinja
[ "renders", "template_name", "+", "self", ".", "extension", "file", "with", "data", "using", "jinja" ]
python
train
renweizhukov/pytwis
pytwis/pytwis.py
https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L188-L271
def register(self, username, password): """Register a new user. Parameters ---------- username: str The username. password: str The password. Returns ------- bool True if the new user is successfully registered, False otherwise. result An empty dict if the new user is successfully registered, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_USERNAME_ALREADY_EXISTS.format(username) - ERROR_WEAK_PASSWORD """ result = {pytwis_constants.ERROR_KEY: None} # Check the username. if not Pytwis._check_username(username): result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME return (False, result) # Check the password. if not Pytwis._check_password(password): result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD return (False, result) # Update the username-to-userid mapping. with self._rc.pipeline() as pipe: while True: try: # Put a watch on the Hash 'users': username -> user-id, in case that # multiple clients are registering with the same username. pipe.watch(pytwis_constants.USERS_KEY) username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username) if username_exists: result[pytwis_constants.ERROR_KEY] = \ pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username) return (False, result) # Get the next user-id. If the key "next_user_id" doesn't exist, # it will be created and initialized as 0, and then incremented by 1. userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY) # Set the username-to-userid pair in USERS_HASH_KEY. pipe.multi() pipe.hset(pytwis_constants.USERS_KEY, username, userid) pipe.execute() break except WatchError: continue # Generate the authentication secret. auth_secret = secrets.token_hex() userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid) # Generate the password hash. # The format of the password hash looks like "method$salt$hash". password_hash = generate_password_hash(password, method=\ pytwis_constants.PASSWORD_HASH_METHOD) pipe.multi() # Update the authentication_secret-to-userid mapping. pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid) # Create the user profile. pipe.hmset(userid_profile_key, {pytwis_constants.USERNAME_KEY: username, pytwis_constants.PASSWORD_HASH_KEY: password_hash, pytwis_constants.AUTH_KEY: auth_secret}) pipe.execute() return (True, result)
[ "def", "register", "(", "self", ",", "username", ",", "password", ")", ":", "result", "=", "{", "pytwis_constants", ".", "ERROR_KEY", ":", "None", "}", "# Check the username.", "if", "not", "Pytwis", ".", "_check_username", "(", "username", ")", ":", "result...
Register a new user. Parameters ---------- username: str The username. password: str The password. Returns ------- bool True if the new user is successfully registered, False otherwise. result An empty dict if the new user is successfully registered, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_USERNAME_ALREADY_EXISTS.format(username) - ERROR_WEAK_PASSWORD
[ "Register", "a", "new", "user", "." ]
python
train
grundic/yagocd
yagocd/client.py
https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/client.py#L310-L318
def properties(self): """ Property for accessing :class:`PropertyManager` instance, which is used to manage properties of the jobs. :rtype: yagocd.resources.property.PropertyManager """ if self._property_manager is None: self._property_manager = PropertyManager(session=self._session) return self._property_manager
[ "def", "properties", "(", "self", ")", ":", "if", "self", ".", "_property_manager", "is", "None", ":", "self", ".", "_property_manager", "=", "PropertyManager", "(", "session", "=", "self", ".", "_session", ")", "return", "self", ".", "_property_manager" ]
Property for accessing :class:`PropertyManager` instance, which is used to manage properties of the jobs. :rtype: yagocd.resources.property.PropertyManager
[ "Property", "for", "accessing", ":", "class", ":", "PropertyManager", "instance", "which", "is", "used", "to", "manage", "properties", "of", "the", "jobs", "." ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L5055-L5079
def replace_some(ol,value,*indexes,**kwargs): ''' from elist.elist import * ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) new = replace_some(ol,'AAA',1,3,7) ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) rslt = replace_some(ol,'AAA',1,3,7,mode="original") ol rslt id(ol) id(rslt) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" indexes = list(indexes) return(replace_seqs(ol,value,indexes,mode=mode))
[ "def", "replace_some", "(", "ol", ",", "value", ",", "*", "indexes", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'mode'", "in", "kwargs", ")", ":", "mode", "=", "kwargs", "[", "\"mode\"", "]", "else", ":", "mode", "=", "\"new\"", "indexes", "=",...
from elist.elist import * ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) new = replace_some(ol,'AAA',1,3,7) ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) rslt = replace_some(ol,'AAA',1,3,7,mode="original") ol rslt id(ol) id(rslt)
[ "from", "elist", ".", "elist", "import", "*", "ol", "=", "[", "1", "a", "3", "a", "5", "a", "6", "a", "]", "id", "(", "ol", ")", "new", "=", "replace_some", "(", "ol", "AAA", "1", "3", "7", ")", "ol", "new", "id", "(", "ol", ")", "id", "(...
python
valid
alephdata/memorious
memorious/logic/crawler.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/crawler.py#L48-L63
def check_due(self): """Check if the last execution of this crawler is older than the scheduled interval.""" if self.disabled: return False if self.is_running: return False if self.delta is None: return False last_run = self.last_run if last_run is None: return True now = datetime.utcnow() if now > last_run + self.delta: return True return False
[ "def", "check_due", "(", "self", ")", ":", "if", "self", ".", "disabled", ":", "return", "False", "if", "self", ".", "is_running", ":", "return", "False", "if", "self", ".", "delta", "is", "None", ":", "return", "False", "last_run", "=", "self", ".", ...
Check if the last execution of this crawler is older than the scheduled interval.
[ "Check", "if", "the", "last", "execution", "of", "this", "crawler", "is", "older", "than", "the", "scheduled", "interval", "." ]
python
train
restran/mountains
mountains/utils/string_utils.py
https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/utils/string_utils.py#L7-L17
def fixed_length_split(s, width): """ 固定长度分割字符串 :param s: :param width: :return: """ # 使用正则的方法 # import re # split = re.findall(r'.{%s}' % width, string) return [s[x: x + width] for x in range(0, len(s), width)]
[ "def", "fixed_length_split", "(", "s", ",", "width", ")", ":", "# 使用正则的方法", "# import re", "# split = re.findall(r'.{%s}' % width, string)", "return", "[", "s", "[", "x", ":", "x", "+", "width", "]", "for", "x", "in", "range", "(", "0", ",", "len", "(", "s...
固定长度分割字符串 :param s: :param width: :return:
[ "固定长度分割字符串", ":", "param", "s", ":", ":", "param", "width", ":", ":", "return", ":" ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L326-L333
def bpopleft(self, timeout=0): """ Remove the first item from the list, blocking until an item becomes available or timeout is reached (0 for no timeout, default). """ ret = self.database.blpop(self.key, timeout) if ret is not None: return ret[1]
[ "def", "bpopleft", "(", "self", ",", "timeout", "=", "0", ")", ":", "ret", "=", "self", ".", "database", ".", "blpop", "(", "self", ".", "key", ",", "timeout", ")", "if", "ret", "is", "not", "None", ":", "return", "ret", "[", "1", "]" ]
Remove the first item from the list, blocking until an item becomes available or timeout is reached (0 for no timeout, default).
[ "Remove", "the", "first", "item", "from", "the", "list", "blocking", "until", "an", "item", "becomes", "available", "or", "timeout", "is", "reached", "(", "0", "for", "no", "timeout", "default", ")", "." ]
python
train
Erotemic/utool
utool/util_alg.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1653-L1661
def choose(n, k): """ N choose k binomial combination (without replacement) scipy.special.binom """ import scipy.misc return scipy.misc.comb(n, k, exact=True, repetition=False)
[ "def", "choose", "(", "n", ",", "k", ")", ":", "import", "scipy", ".", "misc", "return", "scipy", ".", "misc", ".", "comb", "(", "n", ",", "k", ",", "exact", "=", "True", ",", "repetition", "=", "False", ")" ]
N choose k binomial combination (without replacement) scipy.special.binom
[ "N", "choose", "k" ]
python
train
meraki-analytics/merakicommons
merakicommons/container.py
https://github.com/meraki-analytics/merakicommons/blob/d0c8ade8f4619a34ca488336c44722ed1aeaf7ef/merakicommons/container.py#L175-L182
def _search_generator(self, item: Any) -> Generator[Any, None, None]: """A helper method for `self.search` that returns a generator rather than a list.""" results = 0 for x in self.enumerate(item): yield x results += 1 if results == 0: raise SearchError(str(item))
[ "def", "_search_generator", "(", "self", ",", "item", ":", "Any", ")", "->", "Generator", "[", "Any", ",", "None", ",", "None", "]", ":", "results", "=", "0", "for", "x", "in", "self", ".", "enumerate", "(", "item", ")", ":", "yield", "x", "results...
A helper method for `self.search` that returns a generator rather than a list.
[ "A", "helper", "method", "for", "self", ".", "search", "that", "returns", "a", "generator", "rather", "than", "a", "list", "." ]
python
train
ph4r05/monero-serialize
monero_serialize/xmrobj.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrobj.py#L158-L171
async def dump_message_field(obj, msg, field, field_archiver=None): """ Dumps a message field to the object. Field is defined by the message field specification. :param obj: :param msg: :param field: :param field_archiver: :return: """ fname, ftype, params = field[0], field[1], field[2:] fvalue = getattr(msg, fname, None) field_archiver = field_archiver if field_archiver else dump_field return await field_archiver(eref(obj, fname, True), fvalue, ftype, params)
[ "async", "def", "dump_message_field", "(", "obj", ",", "msg", ",", "field", ",", "field_archiver", "=", "None", ")", ":", "fname", ",", "ftype", ",", "params", "=", "field", "[", "0", "]", ",", "field", "[", "1", "]", ",", "field", "[", "2", ":", ...
Dumps a message field to the object. Field is defined by the message field specification. :param obj: :param msg: :param field: :param field_archiver: :return:
[ "Dumps", "a", "message", "field", "to", "the", "object", ".", "Field", "is", "defined", "by", "the", "message", "field", "specification", "." ]
python
train
galactics/beyond
beyond/orbits/forms.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/orbits/forms.py#L93-L109
def _keplerian_to_cartesian(cls, coord, center): """Conversion from Keplerian to Cartesian coordinates """ a, e, i, Ω, ω, ν = coord p = a * (1 - e ** 2) r = p / (1 + e * cos(ν)) h = sqrt(center.µ * p) x = r * (cos(Ω) * cos(ω + ν) - sin(Ω) * sin(ω + ν) * cos(i)) y = r * (sin(Ω) * cos(ω + ν) + cos(Ω) * sin(ω + ν) * cos(i)) z = r * sin(i) * sin(ω + ν) vx = x * h * e / (r * p) * sin(ν) - h / r * (cos(Ω) * sin(ω + ν) + sin(Ω) * cos(ω + ν) * cos(i)) vy = y * h * e / (r * p) * sin(ν) - h / r * (sin(Ω) * sin(ω + ν) - cos(Ω) * cos(ω + ν) * cos(i)) vz = z * h * e / (r * p) * sin(ν) + h / r * sin(i) * cos(ω + ν) return np.array([x, y, z, vx, vy, vz], dtype=float)
[ "def", "_keplerian_to_cartesian", "(", "cls", ",", "coord", ",", "center", ")", ":", "a", ",", "e", ",", "i", ",", "Ω,", " ", ", ", "ν", "= ", "o", "rd", "p", "=", "a", "*", "(", "1", "-", "e", "**", "2", ")", "r", "=", "p", "/", "(", "1"...
Conversion from Keplerian to Cartesian coordinates
[ "Conversion", "from", "Keplerian", "to", "Cartesian", "coordinates" ]
python
train
gamechanger/mongothon
mongothon/events.py
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L26-L32
def apply(self, event, document, *args, **kwargs): """ Applies all middleware functions registered against the given event in order to the given document. """ for fn in self._handler_dict.get(event, []): fn(document, *args, **kwargs)
[ "def", "apply", "(", "self", ",", "event", ",", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "fn", "in", "self", ".", "_handler_dict", ".", "get", "(", "event", ",", "[", "]", ")", ":", "fn", "(", "document", ",", "*"...
Applies all middleware functions registered against the given event in order to the given document.
[ "Applies", "all", "middleware", "functions", "registered", "against", "the", "given", "event", "in", "order", "to", "the", "given", "document", "." ]
python
train
pip-services3-python/pip-services3-components-python
pip_services3_components/connect/MemoryDiscovery.py
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/connect/MemoryDiscovery.py#L97-L112
def resolve_one(self, correlation_id, key): """ Resolves a single connection parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection. :return: a resolved connection. """ connection = None for item in self._items: if item.key == key and item.connection != None: connection = item.connection break return connection
[ "def", "resolve_one", "(", "self", ",", "correlation_id", ",", "key", ")", ":", "connection", "=", "None", "for", "item", "in", "self", ".", "_items", ":", "if", "item", ".", "key", "==", "key", "and", "item", ".", "connection", "!=", "None", ":", "c...
Resolves a single connection parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the connection. :return: a resolved connection.
[ "Resolves", "a", "single", "connection", "parameters", "by", "its", "key", "." ]
python
train
bio2bel/bio2bel
src/bio2bel/manager/connection_manager.py
https://github.com/bio2bel/bio2bel/blob/d80762d891fa18b248709ff0b0f97ebb65ec64c2/src/bio2bel/manager/connection_manager.py#L82-L87
def _get_connection(cls, connection: Optional[str] = None) -> str: """Get a default connection string. Wraps :func:`bio2bel.utils.get_connection` and passing this class's :data:`module_name` to it. """ return get_connection(cls.module_name, connection=connection)
[ "def", "_get_connection", "(", "cls", ",", "connection", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "str", ":", "return", "get_connection", "(", "cls", ".", "module_name", ",", "connection", "=", "connection", ")" ]
Get a default connection string. Wraps :func:`bio2bel.utils.get_connection` and passing this class's :data:`module_name` to it.
[ "Get", "a", "default", "connection", "string", "." ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3760-L3850
def masked_local_attention_2d(q, k, v, query_shape=(8, 16), memory_flange=(8, 16), name=None): """Strided block local self-attention. Each position in a query block can attend to all the generated queries in the query block, which are generated in raster scan, and positions that are generated to the left and top. The shapes are specified by query shape and memory flange. Note that if you're using this function, you do not need to right shift. Right shifting happens inside this function separately for each block. Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. query_shape = block_shape memory_flange: an integer indicating how much to look in height and width from each query block. memory shape = query_shape + (block_flange[0], 2*block_flange[1]) name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v] """ with tf.variable_scope( name, default_name="local_masked_self_attention_2d", values=[q, k, v]): v_shape = common_layers.shape_list(v) # Pad query to ensure multiple of corresponding lengths. q = pad_to_multiple_2d(q, query_shape) # Set up query blocks. q_indices = gather_indices_2d(q, query_shape, query_shape) q_new = gather_blocks_2d(q, q_indices) # Set up key and value blocks. k_flange, k_center = get_memory_region(k, query_shape, memory_flange, q_indices) v_flange, v_center = get_memory_region(v, query_shape, memory_flange, q_indices) if k_flange is not None: k_new = tf.concat([k_flange, k_center], axis=3) v_new = tf.concat([v_flange, v_center], axis=3) else: k_new = k_center v_new = v_center # Set up the masks. query_elements = np.prod(query_shape) padding_mask = None if k_flange is not None: padding_mask = tf.expand_dims( embedding_to_padding(k_flange) * -1e9, axis=-2) padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1]) center_attention_bias = attention_bias_lower_triangle( np.prod(query_elements)) center_attention_bias = tf.reshape( center_attention_bias, [1, 1, 1, query_elements, query_elements]) v_center_shape = common_layers.shape_list(v_center) center_attention_bias = tf.tile( center_attention_bias, [v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1]) if padding_mask is not None: # Combine the mask for padding and visible region. attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4) else: attention_bias = center_attention_bias output = dot_product_attention( q_new, k_new, v_new, attention_bias, dropout_rate=0., name="masked_local_2d", make_image_summary=False) # Put representations back into original shapes. padded_q_shape = common_layers.shape_list(q) output = scatter_blocks_2d(output, q_indices, padded_q_shape) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1]) return output
[ "def", "masked_local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "(", "8", ",", "16", ")", ",", "memory_flange", "=", "(", "8", ",", "16", ")", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", ...
Strided block local self-attention. Each position in a query block can attend to all the generated queries in the query block, which are generated in raster scan, and positions that are generated to the left and top. The shapes are specified by query shape and memory flange. Note that if you're using this function, you do not need to right shift. Right shifting happens inside this function separately for each block. Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. query_shape = block_shape memory_flange: an integer indicating how much to look in height and width from each query block. memory shape = query_shape + (block_flange[0], 2*block_flange[1]) name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v]
[ "Strided", "block", "local", "self", "-", "attention", "." ]
python
train
jalanb/pysyte
pysyte/decorators.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/decorators.py#L6-L16
def _represent_arguments(*arguments, **keyword_arguments): """Represent the aruments in a form suitable as a key (hashable) And which will be recognisable to user in error messages >>> print(_represent_arguments([1, 2], **{'fred':'here'})) [1, 2], fred='here' """ argument_strings = [repr(a) for a in arguments] keyword_strings = [ '='.join((k, repr(v))) for k, v in keyword_arguments.items()] return ', '.join(argument_strings + keyword_strings)
[ "def", "_represent_arguments", "(", "*", "arguments", ",", "*", "*", "keyword_arguments", ")", ":", "argument_strings", "=", "[", "repr", "(", "a", ")", "for", "a", "in", "arguments", "]", "keyword_strings", "=", "[", "'='", ".", "join", "(", "(", "k", ...
Represent the aruments in a form suitable as a key (hashable) And which will be recognisable to user in error messages >>> print(_represent_arguments([1, 2], **{'fred':'here'})) [1, 2], fred='here'
[ "Represent", "the", "aruments", "in", "a", "form", "suitable", "as", "a", "key", "(", "hashable", ")" ]
python
train