repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
jay-johnson/network-pipeline
network_pipeline/record_packets_to_csv.py
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/record_packets_to_csv.py#L713-L724
def write_to_file(self, data_dict, output_file_path): """write_to_file :param data_dict: :param output_file_path: """ log.info("saving={}".format(output_file_path)) with open(output_file_path, "w") as output_file: output_file.write(str(ppj(data_dict)))
[ "def", "write_to_file", "(", "self", ",", "data_dict", ",", "output_file_path", ")", ":", "log", ".", "info", "(", "\"saving={}\"", ".", "format", "(", "output_file_path", ")", ")", "with", "open", "(", "output_file_path", ",", "\"w\"", ")", "as", "output_fi...
write_to_file :param data_dict: :param output_file_path:
[ "write_to_file" ]
python
train
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L1637-L1649
def set_attribute(library, session, attribute, attribute_state): """Sets the state of an attribute. Corresponds to viSetAttribute function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param attribute: Attribute for which the state is to be modified. (Attributes.*) :param attribute_state: The state of the attribute to be set for the specified object. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ return library.viSetAttribute(session, attribute, attribute_state)
[ "def", "set_attribute", "(", "library", ",", "session", ",", "attribute", ",", "attribute_state", ")", ":", "return", "library", ".", "viSetAttribute", "(", "session", ",", "attribute", ",", "attribute_state", ")" ]
Sets the state of an attribute. Corresponds to viSetAttribute function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param attribute: Attribute for which the state is to be modified. (Attributes.*) :param attribute_state: The state of the attribute to be set for the specified object. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode`
[ "Sets", "the", "state", "of", "an", "attribute", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L3338-L3379
def print_config(self, cfg, hide_password=True, history=False, module_id=None): """Returns a string representing the config of this ShutIt run. """ shutit_global.shutit_global_object.yield_to_draw() cp = self.config_parser s = '' keys1 = list(cfg.keys()) if keys1: keys1.sort() for k in keys1: if module_id is not None and k != module_id: continue if isinstance(k, str) and isinstance(cfg[k], dict): s += '\n[' + k + ']\n' keys2 = list(cfg[k].keys()) if keys2: keys2.sort() for k1 in keys2: line = '' line += k1 + ':' # If we want to hide passwords, we do so using a sha512 # done an aritrary number of times (27). if hide_password and (k1 == 'password' or k1 == 'passphrase'): p = hashlib.sha512(cfg[k][k1]).hexdigest() i = 27 while i > 0: i -= 1 p = hashlib.sha512(s).hexdigest() line += p else: if type(cfg[k][k1] == bool): line += str(cfg[k][k1]) elif type(cfg[k][k1] == str): line += cfg[k][k1] if history: try: line += (30-len(line)) * ' ' + ' # ' + cp.whereset(k, k1) except Exception: # Assume this is because it was never set by a config parser. line += (30-len(line)) * ' ' + ' # ' + "defaults in code" s += line + '\n' return s
[ "def", "print_config", "(", "self", ",", "cfg", ",", "hide_password", "=", "True", ",", "history", "=", "False", ",", "module_id", "=", "None", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "cp", "=", "self", ".", ...
Returns a string representing the config of this ShutIt run.
[ "Returns", "a", "string", "representing", "the", "config", "of", "this", "ShutIt", "run", "." ]
python
train
xflr6/bitsets
bitsets/transform.py
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L69-L90
def unpack(n, r=32): """Yield r > 0 bit-length integers splitting n into chunks. >>> list(unpack(42, 1)) [0, 1, 0, 1, 0, 1] >>> list(unpack(256, 8)) [0, 1] >>> list(unpack(2, 0)) Traceback (most recent call last): ... ValueError: unpack needs r > 0 """ if r < 1: raise ValueError('unpack needs r > 0') mask = (1 << r) - 1 while n: yield n & mask n >>= r
[ "def", "unpack", "(", "n", ",", "r", "=", "32", ")", ":", "if", "r", "<", "1", ":", "raise", "ValueError", "(", "'unpack needs r > 0'", ")", "mask", "=", "(", "1", "<<", "r", ")", "-", "1", "while", "n", ":", "yield", "n", "&", "mask", "n", "...
Yield r > 0 bit-length integers splitting n into chunks. >>> list(unpack(42, 1)) [0, 1, 0, 1, 0, 1] >>> list(unpack(256, 8)) [0, 1] >>> list(unpack(2, 0)) Traceback (most recent call last): ... ValueError: unpack needs r > 0
[ "Yield", "r", ">", "0", "bit", "-", "length", "integers", "splitting", "n", "into", "chunks", "." ]
python
train
pdkit/pdkit
pdkit/gait_processor.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/gait_processor.py#L120-L175
def freeze_of_gait(self, x): """ This method assess freeze of gait following :cite:`g-BachlinPRMHGT10`. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :return freeze_time: What times do freeze of gait events occur. [measured in time (h:m:s)] :rtype freeze_time: numpy.ndarray :return freeze_indexe: Freeze Index is defined as the power in the “freeze” band [3–8 Hz] divided by the power in the “locomotor” band [0.5–3 Hz] [3]. [measured in Hz] :rtype freeze_indexe: numpy.ndarray :return list locomotor_freeze_index: Locomotor freeze index is the power in the “freeze” band [3–8 Hz] added to power in the “locomotor” band [0.5–3 Hz]. [measured in Hz] :rtype locomotor_freeze_index: numpy.ndarray """ data = self.resample_signal(x).values f_res = self.sampling_frequency / self.window f_nr_LBs = int(self.loco_band[0] / f_res) f_nr_LBe = int(self.loco_band[1] / f_res) f_nr_FBs = int(self.freeze_band[0] / f_res) f_nr_FBe = int(self.freeze_band[1] / f_res) jPos = self.window + 1 i = 0 time = [] sumLocoFreeze = [] freezeIndex = [] while jPos < len(data): jStart = jPos - self.window time.append(jPos) y = data[int(jStart):int(jPos)] y = y - np.mean(y) Y = np.fft.fft(y, int(self.window)) Pyy = abs(Y*Y) / self.window areaLocoBand = numerical_integration( Pyy[f_nr_LBs-1 : f_nr_LBe], self.sampling_frequency ) areaFreezeBand = numerical_integration( Pyy[f_nr_FBs-1 : f_nr_FBe], self.sampling_frequency ) sumLocoFreeze.append(areaFreezeBand + areaLocoBand) freezeIndex.append(areaFreezeBand / areaLocoBand) jPos = jPos + self.step_size i = i + 1 freeze_time = np.asarray(time, dtype=np.int32) freeze_index = np.asarray(freezeIndex, dtype=np.float32) locomotor_freeze_index = np.asarray(sumLocoFreeze, dtype=np.float32) return freeze_time, freeze_index, locomotor_freeze_index
[ "def", "freeze_of_gait", "(", "self", ",", "x", ")", ":", "data", "=", "self", ".", "resample_signal", "(", "x", ")", ".", "values", "f_res", "=", "self", ".", "sampling_frequency", "/", "self", ".", "window", "f_nr_LBs", "=", "int", "(", "self", ".", ...
This method assess freeze of gait following :cite:`g-BachlinPRMHGT10`. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :return freeze_time: What times do freeze of gait events occur. [measured in time (h:m:s)] :rtype freeze_time: numpy.ndarray :return freeze_indexe: Freeze Index is defined as the power in the “freeze” band [3–8 Hz] divided by the power in the “locomotor” band [0.5–3 Hz] [3]. [measured in Hz] :rtype freeze_indexe: numpy.ndarray :return list locomotor_freeze_index: Locomotor freeze index is the power in the “freeze” band [3–8 Hz] added to power in the “locomotor” band [0.5–3 Hz]. [measured in Hz] :rtype locomotor_freeze_index: numpy.ndarray
[ "This", "method", "assess", "freeze", "of", "gait", "following", ":", "cite", ":", "g", "-", "BachlinPRMHGT10", "." ]
python
train
payplug/payplug-python
payplug/resources.py
https://github.com/payplug/payplug-python/blob/42dec9d6bff420dd0c26e51a84dd000adff04331/payplug/resources.py#L81-L91
def _initialize(self, **resource_attributes): """ Initialize a resource. Default behavior is just to set all the attributes. You may want to override this. :param resource_attributes: The resource attributes """ self._set_attributes(**resource_attributes) for attribute, attribute_type in list(self._mapper.items()): if attribute in resource_attributes and isinstance(resource_attributes[attribute], dict): setattr(self, attribute, attribute_type(**resource_attributes[attribute]))
[ "def", "_initialize", "(", "self", ",", "*", "*", "resource_attributes", ")", ":", "self", ".", "_set_attributes", "(", "*", "*", "resource_attributes", ")", "for", "attribute", ",", "attribute_type", "in", "list", "(", "self", ".", "_mapper", ".", "items", ...
Initialize a resource. Default behavior is just to set all the attributes. You may want to override this. :param resource_attributes: The resource attributes
[ "Initialize", "a", "resource", ".", "Default", "behavior", "is", "just", "to", "set", "all", "the", "attributes", ".", "You", "may", "want", "to", "override", "this", "." ]
python
train
brainiak/brainiak
brainiak/utils/utils.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L697-L777
def phase_randomize(data, voxelwise=False, random_state=None): """Randomize phase of time series across subjects For each subject, apply Fourier transform to voxel time series and then randomly shift the phase of each frequency before inverting back into the time domain. This yields time series with the same power spectrum (and thus the same autocorrelation) as the original time series but will remove any meaningful temporal relationships among time series across subjects. By default (voxelwise=False), the same phase shift is applied across all voxels; however if voxelwise=True, different random phase shifts are applied to each voxel. The typical input is a time by voxels by subjects ndarray. The first dimension is assumed to be the time dimension and will be phase randomized. If a 2-dimensional ndarray is provided, the last dimension is assumed to be subjects, and different phase randomizations will be applied to each subject. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. Parameters ---------- data : ndarray (n_TRs x n_voxels x n_subjects) Data to be phase randomized (per subject) voxelwise : bool, default: False Apply same (False) or different (True) randomizations across voxels random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. Returns ---------- shifted_data : ndarray (n_TRs x n_voxels x n_subjects) Phase-randomized time series """ # Check if input is 2-dimensional data_ndim = data.ndim # Get basic shape of data data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) # Random seed to be deterministically re-randomized at each iteration if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) # Get randomized phase shifts if n_TRs % 2 == 0: # Why are we indexing from 1 not zero here? n_TRs / -1 long? pos_freq = np.arange(1, data.shape[0] // 2) neg_freq = np.arange(data.shape[0] - 1, data.shape[0] // 2, -1) else: pos_freq = np.arange(1, (data.shape[0] - 1) // 2 + 1) neg_freq = np.arange(data.shape[0] - 1, (data.shape[0] - 1) // 2, -1) if not voxelwise: phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects) * 2 * np.math.pi) else: phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects) * 2 * np.math.pi) # Fast Fourier transform along time dimension of data fft_data = fft(data, axis=0) # Shift pos and neg frequencies symmetrically, to keep signal real fft_data[pos_freq, :, :] *= np.exp(1j * phase_shifts) fft_data[neg_freq, :, :] *= np.exp(-1j * phase_shifts) # Inverse FFT to put data back in time domain shifted_data = np.real(ifft(fft_data, axis=0)) # Go back to 2-dimensions if input was 2-dimensional if data_ndim == 2: shifted_data = shifted_data[:, 0, :] return shifted_data
[ "def", "phase_randomize", "(", "data", ",", "voxelwise", "=", "False", ",", "random_state", "=", "None", ")", ":", "# Check if input is 2-dimensional", "data_ndim", "=", "data", ".", "ndim", "# Get basic shape of data", "data", ",", "n_TRs", ",", "n_voxels", ",", ...
Randomize phase of time series across subjects For each subject, apply Fourier transform to voxel time series and then randomly shift the phase of each frequency before inverting back into the time domain. This yields time series with the same power spectrum (and thus the same autocorrelation) as the original time series but will remove any meaningful temporal relationships among time series across subjects. By default (voxelwise=False), the same phase shift is applied across all voxels; however if voxelwise=True, different random phase shifts are applied to each voxel. The typical input is a time by voxels by subjects ndarray. The first dimension is assumed to be the time dimension and will be phase randomized. If a 2-dimensional ndarray is provided, the last dimension is assumed to be subjects, and different phase randomizations will be applied to each subject. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. Parameters ---------- data : ndarray (n_TRs x n_voxels x n_subjects) Data to be phase randomized (per subject) voxelwise : bool, default: False Apply same (False) or different (True) randomizations across voxels random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. Returns ---------- shifted_data : ndarray (n_TRs x n_voxels x n_subjects) Phase-randomized time series
[ "Randomize", "phase", "of", "time", "series", "across", "subjects" ]
python
train
mabuchilab/QNET
src/qnet/algebra/_rules.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/_rules.py#L823-L864
def _get_common_block_structure(lhs_bs, rhs_bs): """For two block structures ``aa = (a1, a2, ..., an)``, ``bb = (b1, b2, ..., bm)`` generate the maximal common block structure so that every block from aa and bb is contained in exactly one block of the resulting structure. This is useful for determining how to apply the distributive law when feeding two concatenated Circuit objects into each other. Examples: ``(1, 1, 1), (2, 1) -> (2, 1)`` ``(1, 1, 2, 1), (2, 1, 2) -> (2, 3)`` Args: lhs_bs (tuple): first block structure rhs_bs (tuple): second block structure """ # for convenience the arguments may also be Circuit objects if isinstance(lhs_bs, Circuit): lhs_bs = lhs_bs.block_structure if isinstance(rhs_bs, Circuit): rhs_bs = rhs_bs.block_structure if sum(lhs_bs) != sum(rhs_bs): raise IncompatibleBlockStructures( 'Blockstructures have different total channel numbers.') if len(lhs_bs) == len(rhs_bs) == 0: return () i = j = 1 lsum = 0 while True: lsum = sum(lhs_bs[:i]) rsum = sum(rhs_bs[:j]) if lsum < rsum: i += 1 elif rsum < lsum: j += 1 else: break return (lsum, ) + _get_common_block_structure(lhs_bs[i:], rhs_bs[j:])
[ "def", "_get_common_block_structure", "(", "lhs_bs", ",", "rhs_bs", ")", ":", "# for convenience the arguments may also be Circuit objects", "if", "isinstance", "(", "lhs_bs", ",", "Circuit", ")", ":", "lhs_bs", "=", "lhs_bs", ".", "block_structure", "if", "isinstance",...
For two block structures ``aa = (a1, a2, ..., an)``, ``bb = (b1, b2, ..., bm)`` generate the maximal common block structure so that every block from aa and bb is contained in exactly one block of the resulting structure. This is useful for determining how to apply the distributive law when feeding two concatenated Circuit objects into each other. Examples: ``(1, 1, 1), (2, 1) -> (2, 1)`` ``(1, 1, 2, 1), (2, 1, 2) -> (2, 3)`` Args: lhs_bs (tuple): first block structure rhs_bs (tuple): second block structure
[ "For", "two", "block", "structures", "aa", "=", "(", "a1", "a2", "...", "an", ")", "bb", "=", "(", "b1", "b2", "...", "bm", ")", "generate", "the", "maximal", "common", "block", "structure", "so", "that", "every", "block", "from", "aa", "and", "bb", ...
python
train
F5Networks/f5-common-python
f5-sdk-dist/scripts/configure.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5-sdk-dist/scripts/configure.py#L122-L131
def export_to_json(env): """export_to_json This function takes in a dictionary object and stores it within the config.JSON file. """ json_fl = env['scripts'] + "/config.JSON" with open(json_fl, 'w') as fh: fh.write(json.dumps(env, sort_keys=True, indent=4, separators=(',', ': ')))
[ "def", "export_to_json", "(", "env", ")", ":", "json_fl", "=", "env", "[", "'scripts'", "]", "+", "\"/config.JSON\"", "with", "open", "(", "json_fl", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "json", ".", "dumps", "(", "env", ",", ...
export_to_json This function takes in a dictionary object and stores it within the config.JSON file.
[ "export_to_json" ]
python
train
fredRos/pypmc
pypmc/sampler/importance_sampling.py
https://github.com/fredRos/pypmc/blob/9138b67c976f0d58edd080353d16769a47794d09/pypmc/sampler/importance_sampling.py#L158-L195
def run(self, N=1, trace_sort=False): '''Run the sampler, store the history of visited points into the member variable ``self.samples`` and the importance weights into ``self.weights``. .. seealso:: :py:class:`pypmc.tools.History` :param N: Integer; the number of samples to be drawn. :param trace_sort: Bool; if True, return an array containing the responsible component of ``self.proposal`` for each sample generated during this run. .. note:: This option only works for proposals of type :py:class:`pypmc.density.mixture.MixtureDensity` .. note:: If True, the samples will be ordered by the components. ''' if N == 0: return 0 if trace_sort: this_samples, origin = self._get_samples(N, trace_sort=True) self._calculate_weights(this_samples, N) return origin else: this_samples = self._get_samples(N, trace_sort=False) self._calculate_weights(this_samples, N)
[ "def", "run", "(", "self", ",", "N", "=", "1", ",", "trace_sort", "=", "False", ")", ":", "if", "N", "==", "0", ":", "return", "0", "if", "trace_sort", ":", "this_samples", ",", "origin", "=", "self", ".", "_get_samples", "(", "N", ",", "trace_sort...
Run the sampler, store the history of visited points into the member variable ``self.samples`` and the importance weights into ``self.weights``. .. seealso:: :py:class:`pypmc.tools.History` :param N: Integer; the number of samples to be drawn. :param trace_sort: Bool; if True, return an array containing the responsible component of ``self.proposal`` for each sample generated during this run. .. note:: This option only works for proposals of type :py:class:`pypmc.density.mixture.MixtureDensity` .. note:: If True, the samples will be ordered by the components.
[ "Run", "the", "sampler", "store", "the", "history", "of", "visited", "points", "into", "the", "member", "variable", "self", ".", "samples", "and", "the", "importance", "weights", "into", "self", ".", "weights", "." ]
python
train
click-contrib/sphinx-click
sphinx_click/ext.py
https://github.com/click-contrib/sphinx-click/blob/ec76d15697ec80e51486a6e3daa0aec60b04870f/sphinx_click/ext.py#L294-L329
def _load_module(self, module_path): """Load the module.""" # __import__ will fail on unicode, # so we ensure module path is a string here. module_path = str(module_path) try: module_name, attr_name = module_path.split(':', 1) except ValueError: # noqa raise self.error( '"{}" is not of format "module:parser"'.format(module_path)) try: mod = __import__(module_name, globals(), locals(), [attr_name]) except (Exception, SystemExit) as exc: # noqa err_msg = 'Failed to import "{}" from "{}". '.format( attr_name, module_name) if isinstance(exc, SystemExit): err_msg += 'The module appeared to call sys.exit()' else: err_msg += 'The following exception was raised:\n{}'.format( traceback.format_exc()) raise self.error(err_msg) if not hasattr(mod, attr_name): raise self.error('Module "{}" has no attribute "{}"'.format( module_name, attr_name)) parser = getattr(mod, attr_name) if not isinstance(parser, click.BaseCommand): raise self.error('"{}" of type "{}" is not derived from ' '"click.BaseCommand"'.format( type(parser), module_path)) return parser
[ "def", "_load_module", "(", "self", ",", "module_path", ")", ":", "# __import__ will fail on unicode,", "# so we ensure module path is a string here.", "module_path", "=", "str", "(", "module_path", ")", "try", ":", "module_name", ",", "attr_name", "=", "module_path", "...
Load the module.
[ "Load", "the", "module", "." ]
python
train
veripress/veripress
veripress/model/storages.py
https://github.com/veripress/veripress/blob/9e3df3a10eb1db32da596bf52118fe6acbe4b14a/veripress/model/storages.py#L451-L479
def get_pages(self, include_draft=False): """ Get all custom pages (supported formats, excluding other files like '.js', '.css', '.html'). :param include_draft: return draft page or not :return: an iterable of Page objects """ def pages_generator(pages_root_path): for file_path in traverse_directory(pages_root_path, yield_dir=False): rel_path = os.path.relpath(file_path, pages_root_path) rel_path, ext = os.path.splitext(rel_path) if not ext or ext == '.' or get_standard_format_name( ext[1:]) is None: continue # pragma: no cover if rel_path.endswith(os.path.sep + 'index'): rel_path = rel_path[:-len('index')] else: rel_path += '.html' page = self.get_page(rel_path.replace(os.path.sep, '/'), include_draft=include_draft) if page is not None: yield page pages_path = os.path.join(current_app.instance_path, 'pages') return list(pages_generator(pages_path))
[ "def", "get_pages", "(", "self", ",", "include_draft", "=", "False", ")", ":", "def", "pages_generator", "(", "pages_root_path", ")", ":", "for", "file_path", "in", "traverse_directory", "(", "pages_root_path", ",", "yield_dir", "=", "False", ")", ":", "rel_pa...
Get all custom pages (supported formats, excluding other files like '.js', '.css', '.html'). :param include_draft: return draft page or not :return: an iterable of Page objects
[ "Get", "all", "custom", "pages", "(", "supported", "formats", "excluding", "other", "files", "like", ".", "js", ".", "css", ".", "html", ")", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/vi/csiszar_divergence.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L462-L503
def t_power(logu, t, self_normalized=False, name=None): """The T-Power Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the T-Power Csiszar-function is: ```none f(u) = s [ u**t - 1 - t(u - 1) ] s = { -1 0 < t < 1 { +1 otherwise ``` When `self_normalized = False` the `- t(u - 1)` term is omitted. This is similar to the `amari_alpha` Csiszar-function, with the associated divergence being the same up to factors depending only on `t`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. t: `Tensor` of same `dtype` as `logu` and broadcastable shape. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. name: Python `str` name prefixed to Ops created by this function. Returns: t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "t_power", [logu, t]): logu = tf.convert_to_tensor(value=logu, name="logu") t = tf.convert_to_tensor(value=t, dtype=logu.dtype.base_dtype, name="t") fu = tf.math.expm1(t * logu) if self_normalized: fu -= t * tf.math.expm1(logu) fu *= tf.where(tf.logical_and(0. < t, t < 1.), -tf.ones_like(t), tf.ones_like(t)) return fu
[ "def", "t_power", "(", "logu", ",", "t", ",", "self_normalized", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"t_power\"", ",", "[", "logu", ",", "t", "]", ")", "...
The T-Power Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True` the T-Power Csiszar-function is: ```none f(u) = s [ u**t - 1 - t(u - 1) ] s = { -1 0 < t < 1 { +1 otherwise ``` When `self_normalized = False` the `- t(u - 1)` term is omitted. This is similar to the `amari_alpha` Csiszar-function, with the associated divergence being the same up to factors depending only on `t`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. t: `Tensor` of same `dtype` as `logu` and broadcastable shape. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. name: Python `str` name prefixed to Ops created by this function. Returns: t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
[ "The", "T", "-", "Power", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
python
test
respondcreate/django-versatileimagefield
versatileimagefield/versatileimagefield.py
https://github.com/respondcreate/django-versatileimagefield/blob/d41e279c39cccffafbe876c67596184704ae8877/versatileimagefield/versatileimagefield.py#L30-L122
def crop_on_centerpoint(self, image, width, height, ppoi=(0.5, 0.5)): """ Return a PIL Image instance cropped from `image`. Image has an aspect ratio provided by dividing `width` / `height`), sized down to `width`x`height`. Any 'excess pixels' are trimmed away in respect to the pixel of `image` that corresponds to `ppoi` (Primary Point of Interest). `image`: A PIL Image instance `width`: Integer, width of the image to return (in pixels) `height`: Integer, height of the image to return (in pixels) `ppoi`: A 2-tuple of floats with values greater than 0 and less than 1 These values are converted into a cartesian coordinate that signifies the 'center pixel' which the crop will center on (to trim the excess from the 'long side'). Determines whether to trim away pixels from either the left/right or top/bottom sides by comparing the aspect ratio of `image` vs the aspect ratio of `width`x`height`. Will trim from the left/right sides if the aspect ratio of `image` is greater-than-or-equal-to the aspect ratio of `width`x`height`. Will trim from the top/bottom sides if the aspect ration of `image` is less-than the aspect ratio or `width`x`height`. Similar to Kevin Cazabon's ImageOps.fit method but uses the ppoi value as an absolute centerpoint (as opposed as a percentage to trim off the 'long sides'). """ ppoi_x_axis = int(image.size[0] * ppoi[0]) ppoi_y_axis = int(image.size[1] * ppoi[1]) center_pixel_coord = (ppoi_x_axis, ppoi_y_axis) # Calculate the aspect ratio of `image` orig_aspect_ratio = float( image.size[0] ) / float( image.size[1] ) crop_aspect_ratio = float(width) / float(height) # Figure out if we're trimming from the left/right or top/bottom if orig_aspect_ratio >= crop_aspect_ratio: # `image` is wider than what's needed, # crop from left/right sides orig_crop_width = int( (crop_aspect_ratio * float(image.size[1])) + 0.5 ) orig_crop_height = image.size[1] crop_boundary_top = 0 crop_boundary_bottom = orig_crop_height crop_boundary_left = center_pixel_coord[0] - (orig_crop_width // 2) crop_boundary_right = crop_boundary_left + orig_crop_width if crop_boundary_left < 0: crop_boundary_left = 0 crop_boundary_right = crop_boundary_left + orig_crop_width elif crop_boundary_right > image.size[0]: crop_boundary_right = image.size[0] crop_boundary_left = image.size[0] - orig_crop_width else: # `image` is taller than what's needed, # crop from top/bottom sides orig_crop_width = image.size[0] orig_crop_height = int( (float(image.size[0]) / crop_aspect_ratio) + 0.5 ) crop_boundary_left = 0 crop_boundary_right = orig_crop_width crop_boundary_top = center_pixel_coord[1] - (orig_crop_height // 2) crop_boundary_bottom = crop_boundary_top + orig_crop_height if crop_boundary_top < 0: crop_boundary_top = 0 crop_boundary_bottom = crop_boundary_top + orig_crop_height elif crop_boundary_bottom > image.size[1]: crop_boundary_bottom = image.size[1] crop_boundary_top = image.size[1] - orig_crop_height # Cropping the image from the original image cropped_image = image.crop( ( crop_boundary_left, crop_boundary_top, crop_boundary_right, crop_boundary_bottom ) ) # Resizing the newly cropped image to the size specified # (as determined by `width`x`height`) return cropped_image.resize( (width, height), Image.ANTIALIAS )
[ "def", "crop_on_centerpoint", "(", "self", ",", "image", ",", "width", ",", "height", ",", "ppoi", "=", "(", "0.5", ",", "0.5", ")", ")", ":", "ppoi_x_axis", "=", "int", "(", "image", ".", "size", "[", "0", "]", "*", "ppoi", "[", "0", "]", ")", ...
Return a PIL Image instance cropped from `image`. Image has an aspect ratio provided by dividing `width` / `height`), sized down to `width`x`height`. Any 'excess pixels' are trimmed away in respect to the pixel of `image` that corresponds to `ppoi` (Primary Point of Interest). `image`: A PIL Image instance `width`: Integer, width of the image to return (in pixels) `height`: Integer, height of the image to return (in pixels) `ppoi`: A 2-tuple of floats with values greater than 0 and less than 1 These values are converted into a cartesian coordinate that signifies the 'center pixel' which the crop will center on (to trim the excess from the 'long side'). Determines whether to trim away pixels from either the left/right or top/bottom sides by comparing the aspect ratio of `image` vs the aspect ratio of `width`x`height`. Will trim from the left/right sides if the aspect ratio of `image` is greater-than-or-equal-to the aspect ratio of `width`x`height`. Will trim from the top/bottom sides if the aspect ration of `image` is less-than the aspect ratio or `width`x`height`. Similar to Kevin Cazabon's ImageOps.fit method but uses the ppoi value as an absolute centerpoint (as opposed as a percentage to trim off the 'long sides').
[ "Return", "a", "PIL", "Image", "instance", "cropped", "from", "image", "." ]
python
test
bitesofcode/projexui
projexui/widgets/xorbcolumnedit/xorbcolumnedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbcolumnedit/xorbcolumnedit.py#L229-L238
def setValue( self, value ): """ Sets the value for this edit to the inputed value. :param value | <variant> """ if ( self._editor ): self._editor.setValue(value) return True return False
[ "def", "setValue", "(", "self", ",", "value", ")", ":", "if", "(", "self", ".", "_editor", ")", ":", "self", ".", "_editor", ".", "setValue", "(", "value", ")", "return", "True", "return", "False" ]
Sets the value for this edit to the inputed value. :param value | <variant>
[ "Sets", "the", "value", "for", "this", "edit", "to", "the", "inputed", "value", ".", ":", "param", "value", "|", "<variant", ">" ]
python
train
annoviko/pyclustering
pyclustering/utils/__init__.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/utils/__init__.py#L976-L1018
def draw_dynamics_set(dynamics, xtitle = None, ytitle = None, xlim = None, ylim = None, xlabels = False, ylabels = False): """! @brief Draw lists of dynamics of neurons (oscillators) in the network. @param[in] dynamics (list): List of network outputs that are represented by values of output of oscillators (used by y axis). @param[in] xtitle (string): Title for Y. @param[in] ytitle (string): Title for X. @param[in] xlim (double): X limit. @param[in] ylim (double): Y limit. @param[in] xlabels (bool): If True - shows X labels. @param[in] ylabels (bool): If True - shows Y labels. """ # Calculate edge for confortable representation. number_dynamics = len(dynamics); if (number_dynamics == 1): draw_dynamics(dynamics[0][0], dynamics[0][1], xtitle, ytitle, xlim, ylim, xlabels, ylabels); return; number_cols = int(numpy.ceil(number_dynamics ** 0.5)); number_rows = int(numpy.ceil(number_dynamics / number_cols)); real_index = 0, 0; double_indexer = True; if ( (number_cols == 1) or (number_rows == 1) ): real_index = 0; double_indexer = False; (_, axarr) = plt.subplots(number_rows, number_cols); #plt.setp([ax for ax in axarr], visible = False); for dynamic in dynamics: axarr[real_index] = draw_dynamics(dynamic[0], dynamic[1], xtitle, ytitle, xlim, ylim, xlabels, ylabels, axes = axarr[real_index]); #plt.setp(axarr[real_index], visible = True); if (double_indexer is True): real_index = real_index[0], real_index[1] + 1; if (real_index[1] >= number_cols): real_index = real_index[0] + 1, 0; else: real_index += 1; plt.show();
[ "def", "draw_dynamics_set", "(", "dynamics", ",", "xtitle", "=", "None", ",", "ytitle", "=", "None", ",", "xlim", "=", "None", ",", "ylim", "=", "None", ",", "xlabels", "=", "False", ",", "ylabels", "=", "False", ")", ":", "# Calculate edge for confortable...
! @brief Draw lists of dynamics of neurons (oscillators) in the network. @param[in] dynamics (list): List of network outputs that are represented by values of output of oscillators (used by y axis). @param[in] xtitle (string): Title for Y. @param[in] ytitle (string): Title for X. @param[in] xlim (double): X limit. @param[in] ylim (double): Y limit. @param[in] xlabels (bool): If True - shows X labels. @param[in] ylabels (bool): If True - shows Y labels.
[ "!" ]
python
valid
dps/simplescheduler
simplescheduler/cli.py
https://github.com/dps/simplescheduler/blob/d633549a8b78d5c1ff37419f4970835f1c6a5947/simplescheduler/cli.py#L24-L42
def main(): """ SimpleScheduler redis parameters will be read from environment variables: REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password) """ args = parser.parse_args() scheduler = Scheduler() print 'Start %s' % scheduler.scheduler_id scheduler.interval = args.interval if args.keepalive: scheduler.run(once=True) keepalive = Job('simplescheduler.keepalive', args=[0, scheduler.get_running_scheduler_id(), args.interval * 2]) scheduler.schedule(keepalive, long(time.time() * 1000000)) scheduler._run()
[ "def", "main", "(", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "scheduler", "=", "Scheduler", "(", ")", "print", "'Start %s'", "%", "scheduler", ".", "scheduler_id", "scheduler", ".", "interval", "=", "args", ".", "interval", "if", "ar...
SimpleScheduler redis parameters will be read from environment variables: REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password)
[ "SimpleScheduler", "redis", "parameters", "will", "be", "read", "from", "environment", "variables", ":", "REDIS_HOST", "REDIS_PORT", "REDIS_DB", "REDIS_KEY", "(", "password", ")" ]
python
train
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L109-L125
def _region(decoder, timeout): """A context manager for `DmtxRegion`, created and destroyed by `dmtxRegionFindNext` and `dmtxRegionDestroy`. Args: decoder (POINTER(DmtxDecode)): timeout (int or None): Yields: DmtxRegion: The next region or None, if all regions have been found. """ region = dmtxRegionFindNext(decoder, timeout) try: yield region finally: if region: dmtxRegionDestroy(byref(region))
[ "def", "_region", "(", "decoder", ",", "timeout", ")", ":", "region", "=", "dmtxRegionFindNext", "(", "decoder", ",", "timeout", ")", "try", ":", "yield", "region", "finally", ":", "if", "region", ":", "dmtxRegionDestroy", "(", "byref", "(", "region", ")",...
A context manager for `DmtxRegion`, created and destroyed by `dmtxRegionFindNext` and `dmtxRegionDestroy`. Args: decoder (POINTER(DmtxDecode)): timeout (int or None): Yields: DmtxRegion: The next region or None, if all regions have been found.
[ "A", "context", "manager", "for", "DmtxRegion", "created", "and", "destroyed", "by", "dmtxRegionFindNext", "and", "dmtxRegionDestroy", "." ]
python
train
proycon/pynlpl
pynlpl/statistics.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/statistics.py#L64-L69
def save(self, filename, addnormalised=False): """Save a frequency list to file, can be loaded later using the load method""" f = io.open(filename,'w',encoding='utf-8') for line in self.output("\t", addnormalised): f.write(line + '\n') f.close()
[ "def", "save", "(", "self", ",", "filename", ",", "addnormalised", "=", "False", ")", ":", "f", "=", "io", ".", "open", "(", "filename", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "for", "line", "in", "self", ".", "output", "(", "\"\\t\"", "...
Save a frequency list to file, can be loaded later using the load method
[ "Save", "a", "frequency", "list", "to", "file", "can", "be", "loaded", "later", "using", "the", "load", "method" ]
python
train
edx/edx-enterprise
enterprise/utils.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L114-L131
def get_identity_provider(provider_id): """ Get Identity Provider with given id. Return: Instance of ProviderConfig or None. """ try: from third_party_auth.provider import Registry # pylint: disable=redefined-outer-name except ImportError as exception: LOGGER.warning("Could not import Registry from third_party_auth.provider") LOGGER.warning(exception) Registry = None # pylint: disable=redefined-outer-name try: return Registry and Registry.get(provider_id) except ValueError: return None
[ "def", "get_identity_provider", "(", "provider_id", ")", ":", "try", ":", "from", "third_party_auth", ".", "provider", "import", "Registry", "# pylint: disable=redefined-outer-name", "except", "ImportError", "as", "exception", ":", "LOGGER", ".", "warning", "(", "\"Co...
Get Identity Provider with given id. Return: Instance of ProviderConfig or None.
[ "Get", "Identity", "Provider", "with", "given", "id", "." ]
python
valid
MrYsLab/pymata-aio
pymata_aio/pymata_iot.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_iot.py#L557-L566
async def stepper_step(self, command): """ This method activates a stepper motor motion. This is a FirmataPlus feature. :param command: {"method": "stepper_step", "params": [SPEED, NUMBER_OF_STEPS]} :returns:No message returned. """ speed = int(command[0]) num_steps = int(command[1]) await self.core.stepper_step(speed, num_steps)
[ "async", "def", "stepper_step", "(", "self", ",", "command", ")", ":", "speed", "=", "int", "(", "command", "[", "0", "]", ")", "num_steps", "=", "int", "(", "command", "[", "1", "]", ")", "await", "self", ".", "core", ".", "stepper_step", "(", "sp...
This method activates a stepper motor motion. This is a FirmataPlus feature. :param command: {"method": "stepper_step", "params": [SPEED, NUMBER_OF_STEPS]} :returns:No message returned.
[ "This", "method", "activates", "a", "stepper", "motor", "motion", ".", "This", "is", "a", "FirmataPlus", "feature", ".", ":", "param", "command", ":", "{", "method", ":", "stepper_step", "params", ":", "[", "SPEED", "NUMBER_OF_STEPS", "]", "}", ":", "retur...
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/address.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/address.py#L32-L47
def setUp(self): '''Look for WS-Address ''' toplist = filter(lambda wsa: wsa.ADDRESS==self.wsAddressURI, WSA_LIST) epr = 'EndpointReferenceType' for WSA in toplist+WSA_LIST: if (self.wsAddressURI is not None and self.wsAddressURI != WSA.ADDRESS) or \ _has_type_definition(WSA.ADDRESS, epr) is True: break else: raise EvaluateException,\ 'enabling wsAddressing requires the inclusion of that namespace' self.wsAddressURI = WSA.ADDRESS self.anonymousURI = WSA.ANONYMOUS self._replyTo = WSA.ANONYMOUS
[ "def", "setUp", "(", "self", ")", ":", "toplist", "=", "filter", "(", "lambda", "wsa", ":", "wsa", ".", "ADDRESS", "==", "self", ".", "wsAddressURI", ",", "WSA_LIST", ")", "epr", "=", "'EndpointReferenceType'", "for", "WSA", "in", "toplist", "+", "WSA_LI...
Look for WS-Address
[ "Look", "for", "WS", "-", "Address" ]
python
train
danpoland/pyramid-restful-framework
pyramid_restful/pagination/pagenumber.py
https://github.com/danpoland/pyramid-restful-framework/blob/4d8c9db44b1869c3d1fdd59ca304c3166473fcbb/pyramid_restful/pagination/pagenumber.py#L98-L109
def count(self): """ Returns the total number of objects, across all pages. """ try: return self.object_list.count() except (AttributeError, TypeError): # AttributeError if object_list has no count() method. # TypeError if object_list.count() requires arguments # (i.e. is of type list). return len(self.object_list)
[ "def", "count", "(", "self", ")", ":", "try", ":", "return", "self", ".", "object_list", ".", "count", "(", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "# AttributeError if object_list has no count() method.", "# TypeError if object_list.count() ...
Returns the total number of objects, across all pages.
[ "Returns", "the", "total", "number", "of", "objects", "across", "all", "pages", "." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/baseviews.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/baseviews.py#L705-L728
def _init_forms(self): """ Init forms for Add and Edit """ super(BaseCRUDView, self)._init_forms() conv = GeneralModelConverter(self.datamodel) if not self.add_form: self.add_form = conv.create_form( self.label_columns, self.add_columns, self.description_columns, self.validators_columns, self.add_form_extra_fields, self.add_form_query_rel_fields, ) if not self.edit_form: self.edit_form = conv.create_form( self.label_columns, self.edit_columns, self.description_columns, self.validators_columns, self.edit_form_extra_fields, self.edit_form_query_rel_fields, )
[ "def", "_init_forms", "(", "self", ")", ":", "super", "(", "BaseCRUDView", ",", "self", ")", ".", "_init_forms", "(", ")", "conv", "=", "GeneralModelConverter", "(", "self", ".", "datamodel", ")", "if", "not", "self", ".", "add_form", ":", "self", ".", ...
Init forms for Add and Edit
[ "Init", "forms", "for", "Add", "and", "Edit" ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASU/main.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASU/main.py#L273-L284
def QA_SU_save_index_min(engine, client=DATABASE): """save index_min Arguments: engine {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ engine = select_save_engine(engine) engine.QA_SU_save_index_min(client=client)
[ "def", "QA_SU_save_index_min", "(", "engine", ",", "client", "=", "DATABASE", ")", ":", "engine", "=", "select_save_engine", "(", "engine", ")", "engine", ".", "QA_SU_save_index_min", "(", "client", "=", "client", ")" ]
save index_min Arguments: engine {[type]} -- [description] Keyword Arguments: client {[type]} -- [description] (default: {DATABASE})
[ "save", "index_min" ]
python
train
fananimi/pyzk
zk/base.py
https://github.com/fananimi/pyzk/blob/1a765d616526efdcb4c9adfcc9b1d10f6ed8b938/zk/base.py#L1151-L1159
def reg_event(self, flags): """ reg events """ command = const.CMD_REG_EVENT command_string = pack ("I", flags) cmd_response = self.__send_command(command, command_string) if not cmd_response.get('status'): raise ZKErrorResponse("cant' reg events %i" % flags)
[ "def", "reg_event", "(", "self", ",", "flags", ")", ":", "command", "=", "const", ".", "CMD_REG_EVENT", "command_string", "=", "pack", "(", "\"I\"", ",", "flags", ")", "cmd_response", "=", "self", ".", "__send_command", "(", "command", ",", "command_string",...
reg events
[ "reg", "events" ]
python
train
nagius/snmp_passpersist
snmp_passpersist.py
https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L152-L170
def get_next(self,oid): """Return snmp value for the next OID.""" try: # Nested try..except because of Python 2.4 self.lock.acquire() try: # remove trailing zeroes from the oid while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data: oid = oid[:-2]; return self.get(self.data_idx[self.data_idx.index(oid)+1]) except ValueError: # Not found: try to match partial oid for real_oid in self.data_idx: if real_oid.startswith(oid): return self.get(real_oid) return "NONE" # Unknown OID except IndexError: return "NONE" # End of MIB finally: self.lock.release()
[ "def", "get_next", "(", "self", ",", "oid", ")", ":", "try", ":", "# Nested try..except because of Python 2.4", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "# remove trailing zeroes from the oid", "while", "len", "(", "oid", ")", ">", "0", "and"...
Return snmp value for the next OID.
[ "Return", "snmp", "value", "for", "the", "next", "OID", "." ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/pdf/page.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/pdf/page.py#L150-L193
def __get_boxes(self): """ Get all the word boxes of this page. """ if self.__boxes is not None: return self.__boxes # Check first if there is an OCR file available boxfile = self.__get_box_path() if self.fs.exists(boxfile): box_builder = pyocr.builders.LineBoxBuilder() try: with self.fs.open(boxfile, 'r') as file_desc: self.__boxes = box_builder.read_file(file_desc) return self.__boxes except IOError as exc: logger.error("Unable to get boxes for '%s': %s" % (self.doc.docid, exc)) # will fall back on pdf boxes # fall back on what libpoppler tells us txt = self.pdf_page.get_text() self.__boxes = [] layout = self.pdf_page.get_text_layout() if not layout[0]: layout = [] return self.__boxes layout = layout[1] for (line, line_rects) in custom_split( txt, layout, lambda x: x == "\n" ): words = [] for (word, word_rects) in custom_split( line, line_rects, lambda x: x.isspace() ): word_box = PdfWordBox(word, word_rects) words.append(word_box) line_box = PdfLineBox(words, line_rects) self.__boxes.append(line_box) return self.__boxes
[ "def", "__get_boxes", "(", "self", ")", ":", "if", "self", ".", "__boxes", "is", "not", "None", ":", "return", "self", ".", "__boxes", "# Check first if there is an OCR file available", "boxfile", "=", "self", ".", "__get_box_path", "(", ")", "if", "self", "."...
Get all the word boxes of this page.
[ "Get", "all", "the", "word", "boxes", "of", "this", "page", "." ]
python
train
DataDog/integrations-core
cisco_aci/datadog_checks/cisco_aci/helpers.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/cisco_aci/datadog_checks/cisco_aci/helpers.py#L98-L122
def get_event_tags_from_dn(dn): """ This grabs the event tags from the dn designator. They look like this: uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h """ tags = [] node = get_node_from_dn(dn) if node: tags.append("node:" + node) app = get_app_from_dn(dn) if app: tags.append("app:" + app) bd = get_bd_from_dn(dn) if bd: tags.append("bd:" + bd) cep = get_cep_from_dn(dn) if cep: tags.append("mac:" + cep) ip = get_ip_from_dn(dn) if ip: tags.append("ip:" + ip) epg = get_epg_from_dn(dn) if epg: tags.append("epg:" + epg) return tags
[ "def", "get_event_tags_from_dn", "(", "dn", ")", ":", "tags", "=", "[", "]", "node", "=", "get_node_from_dn", "(", "dn", ")", "if", "node", ":", "tags", ".", "append", "(", "\"node:\"", "+", "node", ")", "app", "=", "get_app_from_dn", "(", "dn", ")", ...
This grabs the event tags from the dn designator. They look like this: uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h
[ "This", "grabs", "the", "event", "tags", "from", "the", "dn", "designator", ".", "They", "look", "like", "this", ":", "uni", "/", "tn", "-", "DataDog", "/", "ap", "-", "DtDg", "-", "AP1", "-", "EcommerceApp", "/", "epg", "-", "DtDg", "-", "Ecomm", ...
python
train
ardydedase/pycouchbase
couchbase-python-cffi/couchbase_ffi/executors.py
https://github.com/ardydedase/pycouchbase/blob/6f010b4d2ef41aead2366878d0cf0b1284c0db0e/couchbase-python-cffi/couchbase_ffi/executors.py#L335-L382
def execute(self, kv, **kwargs): """ Execute the operation scheduling items as needed :param kv: An iterable of keys (or key-values, or Items) :param kwargs: Settings for the operation :return: A MultiResult object """ self._verify_iter(kv) if not len(kv): raise ArgumentError.pyexc(obj=kv, message="No items in container") if isinstance(kv, dict): is_dict = True try: kviter = kv.iteritems() except AttributeError: kviter = iter(kv.items()) else: is_dict = False kviter = iter(kv) is_itmcoll = isinstance(kv, ItemCollection) mres = kwargs.get('_MRES') if mres is None: mres = self.parent._make_mres() self.set_mres_flags(mres, kwargs) C.lcb_sched_enter(self.instance) num_items = 0 while True: # Clear the previous command object C.memset(self.c_command, 0, ffi.sizeof(self.c_command[0])) try: self._invoke_submit(kviter, is_dict, is_itmcoll, mres, kwargs) num_items += 1 except StopIteration: break except: C.lcb_sched_fail(self.instance) raise C.lcb_sched_leave(self.instance) mres._remaining += num_items # print "Execute(): mres:", mres return mres
[ "def", "execute", "(", "self", ",", "kv", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_verify_iter", "(", "kv", ")", "if", "not", "len", "(", "kv", ")", ":", "raise", "ArgumentError", ".", "pyexc", "(", "obj", "=", "kv", ",", "message", "=",...
Execute the operation scheduling items as needed :param kv: An iterable of keys (or key-values, or Items) :param kwargs: Settings for the operation :return: A MultiResult object
[ "Execute", "the", "operation", "scheduling", "items", "as", "needed", ":", "param", "kv", ":", "An", "iterable", "of", "keys", "(", "or", "key", "-", "values", "or", "Items", ")", ":", "param", "kwargs", ":", "Settings", "for", "the", "operation", ":", ...
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1659-L1669
def _init_metadata(self, **kwargs): """Initialize form metadata""" osid_objects.OsidObjectForm._init_metadata(self, **kwargs) self._level_default = self._mdata['level']['default_id_values'][0] self._start_time_default = self._mdata['start_time']['default_date_time_values'][0] self._grade_system_default = self._mdata['grade_system']['default_id_values'][0] self._items_shuffled_default = self._mdata['items_shuffled']['default_boolean_values'][0] self._score_system_default = self._mdata['score_system']['default_id_values'][0] self._deadline_default = self._mdata['deadline']['default_date_time_values'][0] self._duration_default = self._mdata['duration']['default_duration_values'][0] self._items_sequential_default = self._mdata['items_sequential']['default_boolean_values'][0]
[ "def", "_init_metadata", "(", "self", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidObjectForm", ".", "_init_metadata", "(", "self", ",", "*", "*", "kwargs", ")", "self", ".", "_level_default", "=", "self", ".", "_mdata", "[", "'level'", "...
Initialize form metadata
[ "Initialize", "form", "metadata" ]
python
train
yyuu/botornado
boto/s3/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/connection.py#L180-L190
def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ assert type(expiration_time) == time.struct_time, \ 'Policy document must include a valid expiration Time object' # Convert conditions object mappings to condition statements return '{"expiration": "%s",\n"conditions": [%s]}' % \ (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
[ "def", "build_post_policy", "(", "self", ",", "expiration_time", ",", "conditions", ")", ":", "assert", "type", "(", "expiration_time", ")", "==", "time", ".", "struct_time", ",", "'Policy document must include a valid expiration Time object'", "# Convert conditions object ...
Taken from the AWS book Python examples and modified for use with boto
[ "Taken", "from", "the", "AWS", "book", "Python", "examples", "and", "modified", "for", "use", "with", "boto" ]
python
train
saltstack/salt
salt/beacons/wtmp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/wtmp.py#L174-L194
def _validate_time_range(trange, status, msg): ''' Check time range ''' # If trange is empty, just return the current status & msg if not trange: return status, msg if not isinstance(trange, dict): status = False msg = ('The time_range parameter for ' 'wtmp beacon must ' 'be a dictionary.') if not all(k in trange for k in ('start', 'end')): status = False msg = ('The time_range parameter for ' 'wtmp beacon must contain ' 'start & end options.') return status, msg
[ "def", "_validate_time_range", "(", "trange", ",", "status", ",", "msg", ")", ":", "# If trange is empty, just return the current status & msg", "if", "not", "trange", ":", "return", "status", ",", "msg", "if", "not", "isinstance", "(", "trange", ",", "dict", ")",...
Check time range
[ "Check", "time", "range" ]
python
train
nickjj/ansigenome
ansigenome/scan.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L236-L267
def gather_defaults(self): """ Return the number of default variables. """ total_defaults = 0 defaults_lines = [] if not os.path.exists(self.paths["defaults"]): # reset the defaults if no defaults were found self.defaults = "" return 0 file = open(self.paths["defaults"], "r") for line in file: if len(line) > 0: first_char = line[0] else: first_char = "" defaults_lines.append(line) if (first_char != "#" and first_char != "-" and first_char != " " and first_char != "\r" and first_char != "\n" and first_char != "\t"): total_defaults += 1 file.close() self.defaults = "".join(defaults_lines) return total_defaults
[ "def", "gather_defaults", "(", "self", ")", ":", "total_defaults", "=", "0", "defaults_lines", "=", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"defaults\"", "]", ")", ":", "# reset the defaults if no defaults w...
Return the number of default variables.
[ "Return", "the", "number", "of", "default", "variables", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/util.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/util.py#L242-L288
def getpackage(filename): """ Find the full dotted package name for a given python source file name. Returns None if the file is not a python source file. >>> getpackage('foo.py') 'foo' >>> getpackage('biff/baf.py') 'baf' >>> getpackage('nose/util.py') 'nose.util' Works for directories too. >>> getpackage('nose') 'nose' >>> getpackage('nose/plugins') 'nose.plugins' And __init__ files stuck onto directories >>> getpackage('nose/plugins/__init__.py') 'nose.plugins' Absolute paths also work. >>> path = os.path.abspath(os.path.join('nose', 'plugins')) >>> getpackage(path) 'nose.plugins' """ src_file = src(filename) if not src_file.endswith('.py') and not ispackage(src_file): return None base, ext = os.path.splitext(os.path.basename(src_file)) if base == '__init__': mod_parts = [] else: mod_parts = [base] path, part = os.path.split(os.path.split(src_file)[0]) while part: if ispackage(os.path.join(path, part)): mod_parts.append(part) else: break path, part = os.path.split(path) mod_parts.reverse() return '.'.join(mod_parts)
[ "def", "getpackage", "(", "filename", ")", ":", "src_file", "=", "src", "(", "filename", ")", "if", "not", "src_file", ".", "endswith", "(", "'.py'", ")", "and", "not", "ispackage", "(", "src_file", ")", ":", "return", "None", "base", ",", "ext", "=", ...
Find the full dotted package name for a given python source file name. Returns None if the file is not a python source file. >>> getpackage('foo.py') 'foo' >>> getpackage('biff/baf.py') 'baf' >>> getpackage('nose/util.py') 'nose.util' Works for directories too. >>> getpackage('nose') 'nose' >>> getpackage('nose/plugins') 'nose.plugins' And __init__ files stuck onto directories >>> getpackage('nose/plugins/__init__.py') 'nose.plugins' Absolute paths also work. >>> path = os.path.abspath(os.path.join('nose', 'plugins')) >>> getpackage(path) 'nose.plugins'
[ "Find", "the", "full", "dotted", "package", "name", "for", "a", "given", "python", "source", "file", "name", ".", "Returns", "None", "if", "the", "file", "is", "not", "a", "python", "source", "file", "." ]
python
test
angr/angr
angr/procedures/stubs/format_parser.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/procedures/stubs/format_parser.py#L414-L424
def _all_spec(self): """ All specifiers and their lengths. """ base = self._mod_spec for spec in self.basic_spec: base[spec] = self.basic_spec[spec] return base
[ "def", "_all_spec", "(", "self", ")", ":", "base", "=", "self", ".", "_mod_spec", "for", "spec", "in", "self", ".", "basic_spec", ":", "base", "[", "spec", "]", "=", "self", ".", "basic_spec", "[", "spec", "]", "return", "base" ]
All specifiers and their lengths.
[ "All", "specifiers", "and", "their", "lengths", "." ]
python
train
SeattleTestbed/seash
pyreadline/modes/basemode.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/modes/basemode.py#L540-L549
def dump_functions(self, e): # () u"""Print all of the functions and their key bindings to the Readline output stream. If a numeric argument is supplied, the output is formatted in such a way that it can be made part of an inputrc file. This command is unbound by default.""" print txt="\n".join(self.rl_settings_to_string()) print txt self._print_prompt() self.finalize()
[ "def", "dump_functions", "(", "self", ",", "e", ")", ":", "# ()\r", "print", "txt", "=", "\"\\n\"", ".", "join", "(", "self", ".", "rl_settings_to_string", "(", ")", ")", "print", "txt", "self", ".", "_print_prompt", "(", ")", "self", ".", "finalize", ...
u"""Print all of the functions and their key bindings to the Readline output stream. If a numeric argument is supplied, the output is formatted in such a way that it can be made part of an inputrc file. This command is unbound by default.
[ "u", "Print", "all", "of", "the", "functions", "and", "their", "key", "bindings", "to", "the", "Readline", "output", "stream", ".", "If", "a", "numeric", "argument", "is", "supplied", "the", "output", "is", "formatted", "in", "such", "a", "way", "that", ...
python
train
briancappello/flask-unchained
flask_unchained/bundles/security/services/security_service.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/services/security_service.py#L165-L186
def change_password(self, user, password, send_email=None): """ Service method to change a user's password. Sends signal `password_changed`. :param user: The :class:`User`'s password to change. :param password: The new password. :param send_email: Whether or not to override the config option ``SECURITY_SEND_PASSWORD_CHANGED_EMAIL`` and force either sending or not sending an email. """ user.password = password self.user_manager.save(user) if send_email or (app.config.SECURITY_SEND_PASSWORD_CHANGED_EMAIL and send_email is None): self.send_mail( _('flask_unchained.bundles.security:email_subject.password_changed_notice'), to=user.email, template='security/email/password_changed_notice.html', user=user) password_changed.send(app._get_current_object(), user=user)
[ "def", "change_password", "(", "self", ",", "user", ",", "password", ",", "send_email", "=", "None", ")", ":", "user", ".", "password", "=", "password", "self", ".", "user_manager", ".", "save", "(", "user", ")", "if", "send_email", "or", "(", "app", "...
Service method to change a user's password. Sends signal `password_changed`. :param user: The :class:`User`'s password to change. :param password: The new password. :param send_email: Whether or not to override the config option ``SECURITY_SEND_PASSWORD_CHANGED_EMAIL`` and force either sending or not sending an email.
[ "Service", "method", "to", "change", "a", "user", "s", "password", "." ]
python
train
tamasgal/km3pipe
km3pipe/db.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L356-L367
def _get_json(self, url): "Get JSON-type content" content = self._get_content('jsonds/' + url) try: json_content = json.loads(content.decode()) except AttributeError: json_content = json.loads(content) if json_content['Comment']: log.warning(json_content['Comment']) if json_content['Result'] != 'OK': raise ValueError('Error while retrieving the parameter list.') return json_content['Data']
[ "def", "_get_json", "(", "self", ",", "url", ")", ":", "content", "=", "self", ".", "_get_content", "(", "'jsonds/'", "+", "url", ")", "try", ":", "json_content", "=", "json", ".", "loads", "(", "content", ".", "decode", "(", ")", ")", "except", "Att...
Get JSON-type content
[ "Get", "JSON", "-", "type", "content" ]
python
train
ralphbean/bugwarrior
bugwarrior/services/__init__.py
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/__init__.py#L161-L174
def validate_config(cls, service_config, target): """ Validate generic options for a particular target """ if service_config.has_option(target, 'only_if_assigned'): die("[%s] has an 'only_if_assigned' option. Should be " "'%s.only_if_assigned'." % (target, cls.CONFIG_PREFIX)) if service_config.has_option(target, 'also_unassigned'): die("[%s] has an 'also_unassigned' option. Should be " "'%s.also_unassigned'." % (target, cls.CONFIG_PREFIX)) if service_config.has_option(target, 'default_priority'): die("[%s] has a 'default_priority' option. Should be " "'%s.default_priority'." % (target, cls.CONFIG_PREFIX)) if service_config.has_option(target, 'add_tags'): die("[%s] has an 'add_tags' option. Should be " "'%s.add_tags'." % (target, cls.CONFIG_PREFIX))
[ "def", "validate_config", "(", "cls", ",", "service_config", ",", "target", ")", ":", "if", "service_config", ".", "has_option", "(", "target", ",", "'only_if_assigned'", ")", ":", "die", "(", "\"[%s] has an 'only_if_assigned' option. Should be \"", "\"'%s.only_if_assi...
Validate generic options for a particular target
[ "Validate", "generic", "options", "for", "a", "particular", "target" ]
python
test
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L113-L127
def get_access_flags_string(value): """ Transform an access flag field to the corresponding string :param value: the value of the access flags :type value: int :rtype: string """ flags = [] for k, v in ACCESS_FLAGS.items(): if (k & value) == k: flags.append(v) return " ".join(flags)
[ "def", "get_access_flags_string", "(", "value", ")", ":", "flags", "=", "[", "]", "for", "k", ",", "v", "in", "ACCESS_FLAGS", ".", "items", "(", ")", ":", "if", "(", "k", "&", "value", ")", "==", "k", ":", "flags", ".", "append", "(", "v", ")", ...
Transform an access flag field to the corresponding string :param value: the value of the access flags :type value: int :rtype: string
[ "Transform", "an", "access", "flag", "field", "to", "the", "corresponding", "string" ]
python
train
Rapptz/discord.py
discord/channel.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/channel.py#L520-L528
def members(self): """Returns a list of :class:`Member` that are currently inside this voice channel.""" ret = [] for user_id, state in self.guild._voice_states.items(): if state.channel.id == self.id: member = self.guild.get_member(user_id) if member is not None: ret.append(member) return ret
[ "def", "members", "(", "self", ")", ":", "ret", "=", "[", "]", "for", "user_id", ",", "state", "in", "self", ".", "guild", ".", "_voice_states", ".", "items", "(", ")", ":", "if", "state", ".", "channel", ".", "id", "==", "self", ".", "id", ":", ...
Returns a list of :class:`Member` that are currently inside this voice channel.
[ "Returns", "a", "list", "of", ":", "class", ":", "Member", "that", "are", "currently", "inside", "this", "voice", "channel", "." ]
python
train
saltstack/salt
salt/modules/boto_elb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elb.py#L626-L659
def get_health_check(name, region=None, key=None, keyid=None, profile=None): ''' Get the health check configured for this ELB. CLI example: .. code-block:: bash salt myminion boto_elb.get_health_check myelb ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while True: try: lb = conn.get_all_load_balancers(load_balancer_names=[name]) lb = lb[0] ret = odict.OrderedDict() hc = lb.health_check ret['interval'] = hc.interval ret['target'] = hc.target ret['healthy_threshold'] = hc.healthy_threshold ret['timeout'] = hc.timeout ret['unhealthy_threshold'] = hc.unhealthy_threshold return ret except boto.exception.BotoServerError as e: if retries and e.code == 'Throttling': log.debug('Throttled by AWS API, will retry in 5 seconds.') time.sleep(5) retries -= 1 continue log.error('ELB %s not found.', name, exc_info_on_logleve=logging.DEBUG) return {}
[ "def", "get_health_check", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "key...
Get the health check configured for this ELB. CLI example: .. code-block:: bash salt myminion boto_elb.get_health_check myelb
[ "Get", "the", "health", "check", "configured", "for", "this", "ELB", "." ]
python
train
asphalt-framework/asphalt-sqlalchemy
asphalt/sqlalchemy/utils.py
https://github.com/asphalt-framework/asphalt-sqlalchemy/blob/5abb7d9977ee92299359b76496ff34624421de05/asphalt/sqlalchemy/utils.py#L8-L27
def clear_database(engine: Connectable, schemas: Iterable[str] = ()) -> None: """ Clear any tables from an existing database. :param engine: the engine or connection to use :param schemas: full list of schema names to expect (ignored for SQLite) """ assert check_argument_types() metadatas = [] all_schemas = (None,) # type: Tuple[Optional[str], ...] all_schemas += tuple(schemas) for schema in all_schemas: # Reflect the schema to get the list of the tables, views and constraints metadata = MetaData() metadata.reflect(engine, schema=schema, views=True) metadatas.append(metadata) for metadata in metadatas: metadata.drop_all(engine, checkfirst=False)
[ "def", "clear_database", "(", "engine", ":", "Connectable", ",", "schemas", ":", "Iterable", "[", "str", "]", "=", "(", ")", ")", "->", "None", ":", "assert", "check_argument_types", "(", ")", "metadatas", "=", "[", "]", "all_schemas", "=", "(", "None", ...
Clear any tables from an existing database. :param engine: the engine or connection to use :param schemas: full list of schema names to expect (ignored for SQLite)
[ "Clear", "any", "tables", "from", "an", "existing", "database", "." ]
python
train
tensorflow/cleverhans
cleverhans/attacks/bapp.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L522-L536
def select_delta(dist_post_update, current_iteration, clip_max, clip_min, d, theta, constraint): """ Choose the delta at the scale of distance between x and perturbed sample. """ if current_iteration == 1: delta = 0.1 * (clip_max - clip_min) else: if constraint == 'l2': delta = np.sqrt(d) * theta * dist_post_update elif constraint == 'linf': delta = d * theta * dist_post_update return delta
[ "def", "select_delta", "(", "dist_post_update", ",", "current_iteration", ",", "clip_max", ",", "clip_min", ",", "d", ",", "theta", ",", "constraint", ")", ":", "if", "current_iteration", "==", "1", ":", "delta", "=", "0.1", "*", "(", "clip_max", "-", "cli...
Choose the delta at the scale of distance between x and perturbed sample.
[ "Choose", "the", "delta", "at", "the", "scale", "of", "distance", "between", "x", "and", "perturbed", "sample", "." ]
python
train
sorgerlab/indra
indra/sources/medscan/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/medscan/processor.py#L683-L849
def agent_from_entity(self, relation, entity_id): """Create a (potentially grounded) INDRA Agent object from a given Medscan entity describing the subject or object. Uses helper functions to convert a Medscan URN to an INDRA db_refs grounding dictionary. If the entity has properties indicating that it is a protein with a mutation or modification, then constructs the needed ModCondition or MutCondition. Parameters ---------- relation : MedscanRelation The current relation being processed entity_id : str The ID of the entity to process Returns ------- agent : indra.statements.Agent A potentially grounded INDRA agent representing this entity """ # Extract sentence tags mapping ids to the text. We refer to this # mapping only if the entity doesn't appear in the grounded entity # list tags = _extract_sentence_tags(relation.tagged_sentence) if entity_id is None: return None self.num_entities += 1 entity_id = _extract_id(entity_id) if entity_id not in relation.entities and \ entity_id not in tags: # Could not find the entity in either the list of grounded # entities of the items tagged in the sentence. Happens for # a very small percentage of the dataset. self.num_entities_not_found += 1 return None if entity_id not in relation.entities: # The entity is not in the grounded entity list # Instead, make an ungrounded entity, with TEXT corresponding to # the words with the given entity id tagged in the sentence. entity_data = tags[entity_id] db_refs = {'TEXT': entity_data['text']} ag = Agent(normalize_medscan_name(db_refs['TEXT']), db_refs=db_refs) return ag, entity_data['bounds'] else: entity = relation.entities[entity_id] bounds = (entity.ch_start, entity.ch_end) prop = entity.properties if len(prop.keys()) == 2 and 'Protein' in prop \ and 'Mutation' in prop: # Handle the special case where the entity is a protein # with a mutation or modification, with those details # described in the entity properties protein = prop['Protein'] assert(len(protein) == 1) protein = protein[0] mutation = prop['Mutation'] assert(len(mutation) == 1) mutation = mutation[0] db_refs, db_name = _urn_to_db_refs(protein.urn) if db_refs is None: return None db_refs['TEXT'] = protein.name if db_name is None: agent_name = db_refs['TEXT'] else: agent_name = db_name # Check mutation.type. Only some types correspond to situations # that can be represented in INDRA; return None if we cannot # map to an INDRA statement (which will block processing of # the statement in process_relation). if mutation.type == 'AASite': # Do not handle this # Example: # MedscanEntity(name='D1', urn='urn:agi-aa:D1', # type='AASite', properties=None) return None elif mutation.type == 'Mutation': # Convert mutation properties to an INDRA MutCondition r_old, pos, r_new = _parse_mut_string(mutation.name) if r_old is None: logger.warning('Could not parse mutation string: ' + mutation.name) # Don't create an agent return None else: try: cond = MutCondition(pos, r_old, r_new) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mutations=[cond]) return ag, bounds except BaseException: logger.warning('Could not parse mutation ' + 'string: ' + mutation.name) return None elif mutation.type == 'MethSite': # Convert methylation site information to an INDRA # ModCondition res, pos = _parse_mod_string(mutation.name) if res is None: return None cond = ModCondition('methylation', res, pos) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mods=[cond]) return ag, bounds # Example: # MedscanEntity(name='R457', # urn='urn:agi-s-llid:R457-2185', type='MethSite', # properties=None) elif mutation.type == 'PhosphoSite': # Convert phosphorylation site information to an INDRA # ModCondition res, pos = _parse_mod_string(mutation.name) if res is None: return None cond = ModCondition('phosphorylation', res, pos) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mods=[cond]) return ag, bounds # Example: # MedscanEntity(name='S455', # urn='urn:agi-s-llid:S455-47', type='PhosphoSite', # properties=None) pass elif mutation.type == 'Lysine': # Ambiguous whether this is a methylation or # demethylation; skip # Example: # MedscanEntity(name='K150', # urn='urn:agi-s-llid:K150-5624', type='Lysine', # properties=None) return None else: logger.warning('Processor currently cannot process ' + 'mutations of type ' + mutation.type) else: # Handle the more common case where we just ground the entity # without mutation or modification information db_refs, db_name = _urn_to_db_refs(entity.urn) if db_refs is None: return None db_refs['TEXT'] = entity.name if db_name is None: agent_name = db_refs['TEXT'] else: agent_name = db_name ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs) return ag, bounds
[ "def", "agent_from_entity", "(", "self", ",", "relation", ",", "entity_id", ")", ":", "# Extract sentence tags mapping ids to the text. We refer to this", "# mapping only if the entity doesn't appear in the grounded entity", "# list", "tags", "=", "_extract_sentence_tags", "(", "re...
Create a (potentially grounded) INDRA Agent object from a given Medscan entity describing the subject or object. Uses helper functions to convert a Medscan URN to an INDRA db_refs grounding dictionary. If the entity has properties indicating that it is a protein with a mutation or modification, then constructs the needed ModCondition or MutCondition. Parameters ---------- relation : MedscanRelation The current relation being processed entity_id : str The ID of the entity to process Returns ------- agent : indra.statements.Agent A potentially grounded INDRA agent representing this entity
[ "Create", "a", "(", "potentially", "grounded", ")", "INDRA", "Agent", "object", "from", "a", "given", "Medscan", "entity", "describing", "the", "subject", "or", "object", "." ]
python
train
openego/eDisGo
edisgo/data/import_data.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/data/import_data.py#L412-L601
def _determine_aggregated_nodes(la_centers): """Determine generation and load within load areas Parameters ---------- la_centers: list of LVLoadAreaCentre Load Area Centers are Ding0 implementations for representating areas of high population density with high demand compared to DG potential. Notes ----- Currently, MV grid loads are not considered in this aggregation function as Ding0 data does not come with loads in the MV grid level. Returns ------- :obj:`list` of dict aggregated Dict of the structure .. code: {'generation': { 'v_level': { 'subtype': { 'ids': <ids of aggregated generator>, 'capacity'} } }, 'load': { 'consumption': 'residential': <value>, 'retail': <value>, ... } 'aggregates': { 'population': int, 'geom': `shapely.Polygon` } } :obj:`list` aggr_stations List of LV stations its generation and load is aggregated """ def aggregate_generators(gen, aggr): """Aggregate generation capacity per voltage level Parameters ---------- gen: ding0.core.GeneratorDing0 Ding0 Generator object aggr: dict Aggregated generation capacity. For structure see `_determine_aggregated_nodes()`. Returns ------- """ if gen.v_level not in aggr['generation']: aggr['generation'][gen.v_level] = {} if gen.type not in aggr['generation'][gen.v_level]: aggr['generation'][gen.v_level][gen.type] = {} if gen.subtype not in aggr['generation'][gen.v_level][gen.type]: aggr['generation'][gen.v_level][gen.type].update( {gen.subtype: {'ids': [gen.id_db], 'capacity': gen.capacity}}) else: aggr['generation'][gen.v_level][gen.type][gen.subtype][ 'ids'].append(gen.id_db) aggr['generation'][gen.v_level][gen.type][gen.subtype][ 'capacity'] += gen.capacity return aggr def aggregate_loads(la_center, aggr): """Aggregate consumption in load area per sector Parameters ---------- la_center: LVLoadAreaCentreDing0 Load area center object from Ding0 Returns ------- """ for s in ['retail', 'industrial', 'agricultural', 'residential']: if s not in aggr['load']: aggr['load'][s] = 0 aggr['load']['retail'] += sum( [_.sector_consumption_retail for _ in la_center.lv_load_area._lv_grid_districts]) aggr['load']['industrial'] += sum( [_.sector_consumption_industrial for _ in la_center.lv_load_area._lv_grid_districts]) aggr['load']['agricultural'] += sum( [_.sector_consumption_agricultural for _ in la_center.lv_load_area._lv_grid_districts]) aggr['load']['residential'] += sum( [_.sector_consumption_residential for _ in la_center.lv_load_area._lv_grid_districts]) return aggr aggregated = {} aggr_stations = [] # ToDo: The variable generation_aggr is further used -> delete this code generation_aggr = {} for la in la_centers[0].grid.grid_district._lv_load_areas: for lvgd in la._lv_grid_districts: for gen in lvgd.lv_grid.generators(): if la.is_aggregated: generation_aggr.setdefault(gen.type, {}) generation_aggr[gen.type].setdefault(gen.subtype, {'ding0': 0}) generation_aggr[gen.type][gen.subtype].setdefault('ding0', 0) generation_aggr[gen.type][gen.subtype]['ding0'] += gen.capacity dingo_import_data = pd.DataFrame(columns=('id', 'capacity', 'agg_geno') ) for la_center in la_centers: aggr = {'generation': {}, 'load': {}, 'aggregates': []} # Determine aggregated generation in LV grid for lvgd in la_center.lv_load_area._lv_grid_districts: weather_cell_ids = {} for gen in lvgd.lv_grid.generators(): aggr = aggregate_generators(gen, aggr) # Get the aggregated weather cell id of the area # b if isinstance(gen, GeneratorFluctuatingDing0): if gen.weather_cell_id not in weather_cell_ids.keys(): weather_cell_ids[gen.weather_cell_id] = 1 else: weather_cell_ids[gen.weather_cell_id] += 1 dingo_import_data.loc[len(dingo_import_data)] = \ [int(gen.id_db), gen.capacity, None] # Get the weather cell id that occurs the most if there are any generators if not(list(lvgd.lv_grid.generators())): weather_cell_id = None else: if weather_cell_ids: weather_cell_id = list(weather_cell_ids.keys())[ list(weather_cell_ids.values()).index( max(weather_cell_ids.values()))] else: weather_cell_id = None for v_level in aggr['generation']: for type in aggr['generation'][v_level]: for subtype in aggr['generation'][v_level][type]: # make sure to check if there are any generators before assigning # a weather cell id if not(list(lvgd.lv_grid.generators())): pass else: aggr['generation'][v_level][type][subtype]['weather_cell_id'] = \ weather_cell_id # Determine aggregated load in MV grid # -> Implement once laods in Ding0 MV grids exist # Determine aggregated load in LV grid aggr = aggregate_loads(la_center, aggr) # Collect metadata of aggregated load areas aggr['aggregates'] = { 'population': la_center.lv_load_area.zensus_sum, 'geom': la_center.lv_load_area.geo_area} # Determine LV grids/ stations that are aggregated for _ in la_center.lv_load_area._lv_grid_districts: aggr_stations.append(_.lv_grid.station()) # add elements to lists aggregated.update({la_center.id_db: aggr}) return aggregated, aggr_stations, dingo_import_data
[ "def", "_determine_aggregated_nodes", "(", "la_centers", ")", ":", "def", "aggregate_generators", "(", "gen", ",", "aggr", ")", ":", "\"\"\"Aggregate generation capacity per voltage level\n\n Parameters\n ----------\n gen: ding0.core.GeneratorDing0\n Ding0...
Determine generation and load within load areas Parameters ---------- la_centers: list of LVLoadAreaCentre Load Area Centers are Ding0 implementations for representating areas of high population density with high demand compared to DG potential. Notes ----- Currently, MV grid loads are not considered in this aggregation function as Ding0 data does not come with loads in the MV grid level. Returns ------- :obj:`list` of dict aggregated Dict of the structure .. code: {'generation': { 'v_level': { 'subtype': { 'ids': <ids of aggregated generator>, 'capacity'} } }, 'load': { 'consumption': 'residential': <value>, 'retail': <value>, ... } 'aggregates': { 'population': int, 'geom': `shapely.Polygon` } } :obj:`list` aggr_stations List of LV stations its generation and load is aggregated
[ "Determine", "generation", "and", "load", "within", "load", "areas" ]
python
train
quantum5/2048
_2048/game.py
https://github.com/quantum5/2048/blob/93ada2e3026eaf154e1bbee943d0500c9253e66f/_2048/game.py#L313-L318
def get_tile_location(self, x, y): """Get the screen coordinate for the top-left corner of a tile.""" x1, y1 = self.origin x1 += self.BORDER + (self.BORDER + self.cell_width) * x y1 += self.BORDER + (self.BORDER + self.cell_height) * y return x1, y1
[ "def", "get_tile_location", "(", "self", ",", "x", ",", "y", ")", ":", "x1", ",", "y1", "=", "self", ".", "origin", "x1", "+=", "self", ".", "BORDER", "+", "(", "self", ".", "BORDER", "+", "self", ".", "cell_width", ")", "*", "x", "y1", "+=", "...
Get the screen coordinate for the top-left corner of a tile.
[ "Get", "the", "screen", "coordinate", "for", "the", "top", "-", "left", "corner", "of", "a", "tile", "." ]
python
train
Hackerfleet/hfos
modules/enrol/hfos/enrol/enrolmanager.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/enrol/hfos/enrol/enrolmanager.py#L357-L385
def changepassword(self, event): """An enrolled user wants to change their password""" old = event.data['old'] new = event.data['new'] uuid = event.user.uuid # TODO: Write email to notify user of password change user = objectmodels['user'].find_one({'uuid': uuid}) if std_hash(old, self.salt) == user.passhash: user.passhash = std_hash(new, self.salt) user.save() packet = { 'component': 'hfos.enrol.enrolmanager', 'action': 'changepassword', 'data': True } self.fireEvent(send(event.client.uuid, packet)) self.log('Successfully changed password for user', uuid) else: packet = { 'component': 'hfos.enrol.enrolmanager', 'action': 'changepassword', 'data': False } self.fireEvent(send(event.client.uuid, packet)) self.log('User tried to change password without supplying old one', lvl=warn)
[ "def", "changepassword", "(", "self", ",", "event", ")", ":", "old", "=", "event", ".", "data", "[", "'old'", "]", "new", "=", "event", ".", "data", "[", "'new'", "]", "uuid", "=", "event", ".", "user", ".", "uuid", "# TODO: Write email to notify user of...
An enrolled user wants to change their password
[ "An", "enrolled", "user", "wants", "to", "change", "their", "password" ]
python
train
spyder-ide/spyder
spyder/plugins/onlinehelp/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/onlinehelp/widgets.py#L126-L130
def text_to_url(self, text): """Convert text address into QUrl object""" if text.startswith('/'): text = text[1:] return QUrl(self.home_url.toString()+text+'.html')
[ "def", "text_to_url", "(", "self", ",", "text", ")", ":", "if", "text", ".", "startswith", "(", "'/'", ")", ":", "text", "=", "text", "[", "1", ":", "]", "return", "QUrl", "(", "self", ".", "home_url", ".", "toString", "(", ")", "+", "text", "+",...
Convert text address into QUrl object
[ "Convert", "text", "address", "into", "QUrl", "object" ]
python
train
LLNL/scraper
scraper/doecode/__init__.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/doecode/__init__.py#L23-L38
def process_url(url, key): """ Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects """ logger.debug('Fetching DOE CODE JSON: %s', url) if key is None: raise ValueError('DOE CODE API Key value is missing!') response = requests.get(url, headers={"Authorization": "Basic " + key}) doecode_json = response.json() for record in doecode_json['records']: yield record
[ "def", "process_url", "(", "url", ",", "key", ")", ":", "logger", ".", "debug", "(", "'Fetching DOE CODE JSON: %s'", ",", "url", ")", "if", "key", "is", "None", ":", "raise", "ValueError", "(", "'DOE CODE API Key value is missing!'", ")", "response", "=", "req...
Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects
[ "Yields", "DOE", "CODE", "records", "from", "a", "DOE", "CODE", ".", "json", "URL", "response", "Converts", "a", "DOE", "CODE", "API", ".", "json", "URL", "response", "into", "DOE", "CODE", "projects" ]
python
test
wbond/oscrypto
oscrypto/_tls.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_tls.py#L129-L148
def parse_alert(server_handshake_bytes): """ Parses the handshake for protocol alerts :param server_handshake_bytes: A byte string of the handshake data received from the server :return: None or an 2-element tuple of integers: 0: 1 (warning) or 2 (fatal) 1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2) """ for record_type, _, record_data in parse_tls_records(server_handshake_bytes): if record_type != b'\x15': continue if len(record_data) != 2: return None return (int_from_bytes(record_data[0:1]), int_from_bytes(record_data[1:2])) return None
[ "def", "parse_alert", "(", "server_handshake_bytes", ")", ":", "for", "record_type", ",", "_", ",", "record_data", "in", "parse_tls_records", "(", "server_handshake_bytes", ")", ":", "if", "record_type", "!=", "b'\\x15'", ":", "continue", "if", "len", "(", "reco...
Parses the handshake for protocol alerts :param server_handshake_bytes: A byte string of the handshake data received from the server :return: None or an 2-element tuple of integers: 0: 1 (warning) or 2 (fatal) 1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)
[ "Parses", "the", "handshake", "for", "protocol", "alerts" ]
python
valid
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L182-L202
def account_block_count(self, account): """ Get number of blocks for a specific **account** :param account: Account to get number of blocks for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_block_count(account="xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3") 19 """ account = self._process_value(account, 'account') payload = {"account": account} resp = self.call('account_block_count', payload) return int(resp['block_count'])
[ "def", "account_block_count", "(", "self", ",", "account", ")", ":", "account", "=", "self", ".", "_process_value", "(", "account", ",", "'account'", ")", "payload", "=", "{", "\"account\"", ":", "account", "}", "resp", "=", "self", ".", "call", "(", "'a...
Get number of blocks for a specific **account** :param account: Account to get number of blocks for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_block_count(account="xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3") 19
[ "Get", "number", "of", "blocks", "for", "a", "specific", "**", "account", "**" ]
python
train
chrisspen/burlap
burlap/files.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/files.py#L334-L340
def remove(self, path, recursive=False, use_sudo=False): """ Remove a file or directory """ func = use_sudo and run_as_root or self.run options = '-r ' if recursive else '' func('/bin/rm {0}{1}'.format(options, quote(path)))
[ "def", "remove", "(", "self", ",", "path", ",", "recursive", "=", "False", ",", "use_sudo", "=", "False", ")", ":", "func", "=", "use_sudo", "and", "run_as_root", "or", "self", ".", "run", "options", "=", "'-r '", "if", "recursive", "else", "''", "func...
Remove a file or directory
[ "Remove", "a", "file", "or", "directory" ]
python
valid
numenta/nupic
src/nupic/encoders/scalar.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/scalar.py#L655-L674
def getBucketInfo(self, buckets): """ See the function description in base.py """ # Get/generate the topDown mapping table #NOTE: although variable topDownMappingM is unused, some (bad-style) actions #are executed during _getTopDownMapping() so this line must stay here topDownMappingM = self._getTopDownMapping() # The "category" is simply the bucket index category = buckets[0] encoding = self._topDownMappingM.getRow(category) # Which input value does this correspond to? if self.periodic: inputVal = (self.minval + (self.resolution / 2.0) + (category * self.resolution)) else: inputVal = self.minval + (category * self.resolution) return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]
[ "def", "getBucketInfo", "(", "self", ",", "buckets", ")", ":", "# Get/generate the topDown mapping table", "#NOTE: although variable topDownMappingM is unused, some (bad-style) actions", "#are executed during _getTopDownMapping() so this line must stay here", "topDownMappingM", "=", "self"...
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
python
valid
swisscom/cleanerversion
versions/admin.py
https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/admin.py#L144-L152
def get_readonly_fields(self, request, obj=None): """ This is required a subclass of VersionedAdmin has readonly_fields ours won't be undone """ if obj: return list(self.readonly_fields) + ['id', 'identity', 'is_current'] return self.readonly_fields
[ "def", "get_readonly_fields", "(", "self", ",", "request", ",", "obj", "=", "None", ")", ":", "if", "obj", ":", "return", "list", "(", "self", ".", "readonly_fields", ")", "+", "[", "'id'", ",", "'identity'", ",", "'is_current'", "]", "return", "self", ...
This is required a subclass of VersionedAdmin has readonly_fields ours won't be undone
[ "This", "is", "required", "a", "subclass", "of", "VersionedAdmin", "has", "readonly_fields", "ours", "won", "t", "be", "undone" ]
python
train
jonathf/chaospy
chaospy/distributions/operators/joint.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/joint.py#L148-L169
def _ppf(self, qloc, cache, **kwargs): """ Example: >>> dist = chaospy.J(chaospy.Uniform(), chaospy.Normal()) >>> print(numpy.around(dist.inv([[0.1, 0.2, 0.3], [0.3, 0.3, 0.4]]), 4)) [[ 0.1 0.2 0.3 ] [-0.5244 -0.5244 -0.2533]] >>> d0 = chaospy.Uniform() >>> dist = chaospy.J(d0, d0+chaospy.Uniform()) >>> print(numpy.around(dist.inv([[0.1, 0.2, 0.3], [0.3, 0.3, 0.4]]), 4)) [[0.1 0.2 0.3] [0.4 0.5 0.7]] """ xloc = numpy.zeros(qloc.shape) for dist in evaluation.sorted_dependencies(self, reverse=True): if dist not in self.inverse_map: continue idx = self.inverse_map[dist] qloc_ = qloc[idx].reshape(1, -1) xloc[idx] = evaluation.evaluate_inverse( dist, qloc_, cache=cache)[0] return xloc
[ "def", "_ppf", "(", "self", ",", "qloc", ",", "cache", ",", "*", "*", "kwargs", ")", ":", "xloc", "=", "numpy", ".", "zeros", "(", "qloc", ".", "shape", ")", "for", "dist", "in", "evaluation", ".", "sorted_dependencies", "(", "self", ",", "reverse", ...
Example: >>> dist = chaospy.J(chaospy.Uniform(), chaospy.Normal()) >>> print(numpy.around(dist.inv([[0.1, 0.2, 0.3], [0.3, 0.3, 0.4]]), 4)) [[ 0.1 0.2 0.3 ] [-0.5244 -0.5244 -0.2533]] >>> d0 = chaospy.Uniform() >>> dist = chaospy.J(d0, d0+chaospy.Uniform()) >>> print(numpy.around(dist.inv([[0.1, 0.2, 0.3], [0.3, 0.3, 0.4]]), 4)) [[0.1 0.2 0.3] [0.4 0.5 0.7]]
[ "Example", ":", ">>>", "dist", "=", "chaospy", ".", "J", "(", "chaospy", ".", "Uniform", "()", "chaospy", ".", "Normal", "()", ")", ">>>", "print", "(", "numpy", ".", "around", "(", "dist", ".", "inv", "(", "[[", "0", ".", "1", "0", ".", "2", "...
python
train
log2timeline/plaso
plaso/formatters/winevt_rc.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/winevt_rc.py#L246-L263
def _GetMessageFileKeys(self, event_log_provider_key): """Retrieves the message file keys. Args: event_log_provider_key (int): Event Log provider key. Yields: int: message file key. """ table_names = ['message_file_per_event_log_provider'] column_names = ['message_file_key'] condition = 'event_log_provider_key == {0:d}'.format( event_log_provider_key) generator = self._database_file.GetValues( table_names, column_names, condition) for values in generator: yield values['message_file_key']
[ "def", "_GetMessageFileKeys", "(", "self", ",", "event_log_provider_key", ")", ":", "table_names", "=", "[", "'message_file_per_event_log_provider'", "]", "column_names", "=", "[", "'message_file_key'", "]", "condition", "=", "'event_log_provider_key == {0:d}'", ".", "for...
Retrieves the message file keys. Args: event_log_provider_key (int): Event Log provider key. Yields: int: message file key.
[ "Retrieves", "the", "message", "file", "keys", "." ]
python
train
cjdrake/pyeda
pyeda/parsing/dimacs.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/dimacs.py#L385-L421
def _sat_formula(lexer, varname, fmt, nvars): """Return a DIMACS SAT formula.""" types = {IntegerToken, LPAREN} | _SAT_TOKS[fmt] tok = _expect_token(lexer, types) # INT if isinstance(tok, IntegerToken): index = tok.value if not 0 < index <= nvars: fstr = "formula literal {} outside valid range: (0, {}]" raise Error(fstr.format(index, nvars)) return ('var', (varname, ), (index, )) # '-' elif isinstance(tok, OP_not): tok = _expect_token(lexer, {IntegerToken, LPAREN}) # '-' INT if isinstance(tok, IntegerToken): index = tok.value if not 0 < index <= nvars: fstr = "formula literal {} outside valid range: (0, {}]" raise Error(fstr.format(index, nvars)) return ('not', ('var', (varname, ), (index, ))) # '-' '(' FORMULA ')' else: formula = _sat_formula(lexer, varname, fmt, nvars) _expect_token(lexer, {RPAREN}) return ('not', formula) # '(' FORMULA ')' elif isinstance(tok, LPAREN): formula = _sat_formula(lexer, varname, fmt, nvars) _expect_token(lexer, {RPAREN}) return formula # OP '(' FORMULAS ')' else: _expect_token(lexer, {LPAREN}) formulas = _formulas(lexer, varname, fmt, nvars) _expect_token(lexer, {RPAREN}) return (tok.ASTOP, ) + formulas
[ "def", "_sat_formula", "(", "lexer", ",", "varname", ",", "fmt", ",", "nvars", ")", ":", "types", "=", "{", "IntegerToken", ",", "LPAREN", "}", "|", "_SAT_TOKS", "[", "fmt", "]", "tok", "=", "_expect_token", "(", "lexer", ",", "types", ")", "# INT", ...
Return a DIMACS SAT formula.
[ "Return", "a", "DIMACS", "SAT", "formula", "." ]
python
train
NuGrid/NuGridPy
nugridpy/utils.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/utils.py#L1163-L1435
def convert_specie_naming_from_h5_to_ppn(isotope_names): ''' read isotopes names from h5 files, and convert them according to standard scheme used inside ppn and mppnp. Also Z and A are recalculated, for these species. Isomers are excluded for now, since there were recent changes in isomers name. As soon as the isomers names are settled, than Z and A provided here will be obsolete, and can be changed by usual Z and A. ''' spe_rude1 = [] spe_rude2 = [] spe_rude3 = [] for i in range(len(isotope_names)): spe_rude1.append(isotope_names[i].split('-')[0]) spe_rude2.append(isotope_names[i].split('-')[1]) # spe_rude1 is elem name and spe_rude2 is mass number. #print spe_rude1,spe_rude2 k = 0 for i in range(len(spe_rude1)): try: if int(spe_rude2[i]) < 10: spe_rude3.append(str(spe_rude1[i][0:2])+str(' ')+str(spe_rude2[i][0:3])) elif int(spe_rude2[i]) >= 10 and int(spe_rude2[i]) < 100 : spe_rude3.append(str(spe_rude1[i][0:2])+str(' ')+str(spe_rude2[i][0:3])) elif int(spe_rude2[i]) >= 100 : spe_rude3.append(str(spe_rude1[i][0:2])+str(spe_rude2[i][0:3])) except ValueError: k = k+1 None global spe spe = [] global n_array n_array = [] for i in range(len(spe_rude3)): if len(str(spe_rude1[i])) == 1: spe.append(str(spe_rude3[i][0:1])+str(' ')+str(spe_rude3[i][1:4])) else: spe.append(spe_rude3[i]) n_array.append(i) if spe[0]=='Ne 1': spe[0] = 'N 1' # spe_rude is the isotope name, in agreement with what we use in ppn, etc. # need to do this to can use other functions without changing them drastically. # here I skip isomers... global amass_int amass_int=np.zeros(len(spe_rude2)) for i in range(len(spe_rude2)-k): amass_int[i]=int(spe_rude2[i]) #print amass_int # here I have to create an array for the atomic numbers. # I need to this when I calculate and plot element abundances global znum_int znum_int=np.zeros(len(spe)) for i in range(len(spe)): znum_int[i] = Utils.elements_names.index(str(spe[i][0:2]).strip()) # changed by alex # if str(spe[i][0:2]) == 'H ': # znum_int[i] = 1 # elif str(spe[i][0:2]) == 'He': # znum_int[i] = 2 # elif str(spe[i][0:2]) == 'Li': # znum_int[i] = 3 # elif str(spe[i][0:2]) == 'Be': # znum_int[i] = 4 # elif str(spe[i][0:2]) == 'B ': # znum_int[i] = 5 # elif str(spe[i][0:2]) == 'C ': # znum_int[i] = 6 # elif str(spe[i][0:2]) == 'N ': # znum_int[i] = 7 # elif str(spe[i][0:2]) == 'O ': # znum_int[i] = 8 # elif str(spe[i][0:2]) == 'F ': # znum_int[i] = 9 # elif str(spe[i][0:2]) == 'Ne': # znum_int[i] = 10 # elif str(spe[i][0:2]) == 'Na': # znum_int[i] = 11 # elif str(spe[i][0:2]) == 'Mg': # znum_int[i] = 12 # elif str(spe[i][0:2]) == 'Al': # znum_int[i] = 13 # elif str(spe[i][0:2]) == 'Si': # znum_int[i] = 14 # elif str(spe[i][0:2]) == 'P ': # znum_int[i] = 15 # elif str(spe[i][0:2]) == 'S ': # znum_int[i] = 16 # elif str(spe[i][0:2]) == 'Cl': # znum_int[i] = 17 # elif str(spe[i][0:2]) == 'Ar': # znum_int[i] = 18 # elif str(spe[i][0:2]) == 'K ': # znum_int[i] = 19 # elif str(spe[i][0:2]) == 'Ca': # znum_int[i] = 20 # elif str(spe[i][0:2]) == 'Sc': # znum_int[i] = 21 # elif str(spe[i][0:2]) == 'Ti': # znum_int[i] = 22 # elif str(spe[i][0:2]) == 'V ': # znum_int[i] = 23 # elif str(spe[i][0:2]) == 'Cr': # znum_int[i] = 24 # elif str(spe[i][0:2]) == 'Mn': # znum_int[i] = 25 # elif str(spe[i][0:2]) == 'Fe': # znum_int[i] = 26 # elif str(spe[i][0:2]) == 'Co': # znum_int[i] = 27 # elif str(spe[i][0:2]) == 'Ni': # znum_int[i] = 28 # elif str(spe[i][0:2]) == 'Cu': # znum_int[i] = 29 # elif str(spe[i][0:2]) == 'Zn': # znum_int[i] = 30 # elif str(spe[i][0:2]) == 'Ga': # znum_int[i] = 31 # elif str(spe[i][0:2]) == 'Ge': # znum_int[i] = 32 # elif str(spe[i][0:2]) == 'As': # znum_int[i] = 33 # elif str(spe[i][0:2]) == 'Se': # znum_int[i] = 34 # elif str(spe[i][0:2]) == 'Br': # znum_int[i] = 35 # elif str(spe[i][0:2]) == 'Kr': # znum_int[i] = 36 # elif str(spe[i][0:2]) == 'Rb': # znum_int[i] = 37 # elif str(spe[i][0:2]) == 'Sr': # znum_int[i] = 38 # elif str(spe[i][0:2]) == 'Y ': # znum_int[i] = 39 # elif str(spe[i][0:2]) == 'Zr': # znum_int[i] = 40 # elif str(spe[i][0:2]) == 'Nb': # znum_int[i] = 41 # elif str(spe[i][0:2]) == 'Mo': # znum_int[i] = 42 # elif str(spe[i][0:2]) == 'Tc': # znum_int[i] = 43 # elif str(spe[i][0:2]) == 'Ru': # znum_int[i] = 44 # elif str(spe[i][0:2]) == 'Rh': # znum_int[i] = 45 # elif str(spe[i][0:2]) == 'Pd': # znum_int[i] = 46 # elif str(spe[i][0:2]) == 'Ag': # znum_int[i] = 47 # elif str(spe[i][0:2]) == 'Cd': # znum_int[i] = 48 # elif str(spe[i][0:2]) == 'In': # znum_int[i] = 49 # elif str(spe[i][0:2]) == 'Sn': # znum_int[i] = 50 # elif str(spe[i][0:2]) == 'Sb': # znum_int[i] = 51 # elif str(spe[i][0:2]) == 'Te': # znum_int[i] = 52 # elif str(spe[i][0:2]) == 'I ': # znum_int[i] = 53 # elif str(spe[i][0:2]) == 'Xe': # znum_int[i] = 54 # elif str(spe[i][0:2]) == 'Cs': # znum_int[i] = 55 # elif str(spe[i][0:2]) == 'Ba': # znum_int[i] = 56 # elif str(spe[i][0:2]) == 'La': # znum_int[i] = 57 # elif str(spe[i][0:2]) == 'Ce': # znum_int[i] = 58 # elif str(spe[i][0:2]) == 'Pr': # znum_int[i] = 59 # elif str(spe[i][0:2]) == 'Nd': # znum_int[i] = 60 # elif str(spe[i][0:2]) == 'Pm': # znum_int[i] = 61 # elif str(spe[i][0:2]) == 'Sm': # znum_int[i] = 62 # elif str(spe[i][0:2]) == 'Eu': # znum_int[i] = 63 # elif str(spe[i][0:2]) == 'Gd': # znum_int[i] = 64 # elif str(spe[i][0:2]) == 'Tb': # znum_int[i] = 65 # elif str(spe[i][0:2]) == 'Dy': # znum_int[i] = 66 # elif str(spe[i][0:2]) == 'Ho': # znum_int[i] = 67 # elif str(spe[i][0:2]) == 'Er': # znum_int[i] = 68 # elif str(spe[i][0:2]) == 'Tm': # znum_int[i] = 69 # elif str(spe[i][0:2]) == 'Yb': # znum_int[i] = 70 # elif str(spe[i][0:2]) == 'Lu': # znum_int[i] = 71 # elif str(spe[i][0:2]) == 'Hf': # znum_int[i] = 72 # elif str(spe[i][0:2]) == 'Ta': # znum_int[i] = 73 # elif str(spe[i][0:2]) == 'W ': # znum_int[i] = 74 # elif str(spe[i][0:2]) == 'Re': # znum_int[i] = 75 # elif str(spe[i][0:2]) == 'Os': # znum_int[i] = 76 # elif str(spe[i][0:2]) == 'Ir': # znum_int[i] = 77 # elif str(spe[i][0:2]) == 'Pt': # znum_int[i] = 78 # elif str(spe[i][0:2]) == 'Au': # znum_int[i] = 79 # elif str(spe[i][0:2]) == 'Hg': # znum_int[i] = 80 # elif str(spe[i][0:2]) == 'Tl': # znum_int[i] = 81 # elif str(spe[i][0:2]) == 'Pb': # znum_int[i] = 82 # elif str(spe[i][0:2]) == 'Bi': # znum_int[i] = 83 # elif str(spe[i][0:2]) == 'Po': # znum_int[i] = 84 # elif str(spe[i][0:2]) == 'At': # znum_int[i] = 85 # elif str(spe[i][0:2]) == 'Rn': # znum_int[i] = 86 # elif str(spe[i][0:2]) == 'Fr': # znum_int[i] = 87 # elif str(spe[i][0:2]) == 'Ra': # znum_int[i] = 88 # elif str(spe[i][0:2]) == 'Ac': # znum_int[i] = 89 # elif str(spe[i][0:2]) == 'Th': # znum_int[i] = 90 # elif str(spe[i][0:2]) == 'Pa': # znum_int[i] = 91 # elif str(spe[i][0:2]) == 'U ': # znum_int[i] = 92 # elif str(spe[i][0:2]) == 'Np': # znum_int[i] = 93 # elif str(spe[i][0:2]) == 'Pu': # znum_int[i] = 94 # elif str(spe[i][0:2]) == 'Am': # znum_int[i] = 95 # elif str(spe[i][0:2]) == 'Cm': # znum_int[i] = 96 # elif str(spe[i][0:2]) == 'Bk': # znum_int[i] = 97 # elif str(spe[i][0:2]) == 'Cf': # znum_int[i] = 98 if spe[0] == 'N 1': znum_int[0] = 0 # here the index to connect name and atomic numbers. global index_atomic_number index_atomic_number = {} for a,b in zip(spe,znum_int): index_atomic_number[a]=b
[ "def", "convert_specie_naming_from_h5_to_ppn", "(", "isotope_names", ")", ":", "spe_rude1", "=", "[", "]", "spe_rude2", "=", "[", "]", "spe_rude3", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "isotope_names", ")", ")", ":", "spe_rude1", ".",...
read isotopes names from h5 files, and convert them according to standard scheme used inside ppn and mppnp. Also Z and A are recalculated, for these species. Isomers are excluded for now, since there were recent changes in isomers name. As soon as the isomers names are settled, than Z and A provided here will be obsolete, and can be changed by usual Z and A.
[ "read", "isotopes", "names", "from", "h5", "files", "and", "convert", "them", "according", "to", "standard", "scheme", "used", "inside", "ppn", "and", "mppnp", ".", "Also", "Z", "and", "A", "are", "recalculated", "for", "these", "species", ".", "Isomers", ...
python
train
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L188-L195
def _pre_delete_hook(cls, key): """ Removes instance from index. """ if cls.searching_enabled: doc_id = cls.search_get_document_id(key) index = cls.search_get_index() index.delete(doc_id)
[ "def", "_pre_delete_hook", "(", "cls", ",", "key", ")", ":", "if", "cls", ".", "searching_enabled", ":", "doc_id", "=", "cls", ".", "search_get_document_id", "(", "key", ")", "index", "=", "cls", ".", "search_get_index", "(", ")", "index", ".", "delete", ...
Removes instance from index.
[ "Removes", "instance", "from", "index", "." ]
python
train
ten10solutions/Geist
geist/match_position_finder_helpers.py
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/match_position_finder_helpers.py#L44-L57
def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1): """Calculates the normalisation coefficients of potential match positions Then normalises the correlation at these positions, and returns them if they do indeed constitute a match """ template_norm = np.linalg.norm(template) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape #points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points] image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return result.keys()
[ "def", "normalise_correlation", "(", "image_tile_dict", ",", "transformed_array", ",", "template", ",", "normed_tolerance", "=", "1", ")", ":", "template_norm", "=", "np", ".", "linalg", ".", "norm", "(", "template", ")", "image_norms", "=", "{", "(", "x", "...
Calculates the normalisation coefficients of potential match positions Then normalises the correlation at these positions, and returns them if they do indeed constitute a match
[ "Calculates", "the", "normalisation", "coefficients", "of", "potential", "match", "positions", "Then", "normalises", "the", "correlation", "at", "these", "positions", "and", "returns", "them", "if", "they", "do", "indeed", "constitute", "a", "match" ]
python
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L287-L299
def replace_666(meta_df, convert_neg_666): """ Replace -666, -666.0, and optionally "-666". Args: meta_df (pandas df): convert_neg_666 (bool): Returns: out_df (pandas df): updated meta_df """ if convert_neg_666: out_df = meta_df.replace([-666, "-666", -666.0], np.nan) else: out_df = meta_df.replace([-666, -666.0], "-666") return out_df
[ "def", "replace_666", "(", "meta_df", ",", "convert_neg_666", ")", ":", "if", "convert_neg_666", ":", "out_df", "=", "meta_df", ".", "replace", "(", "[", "-", "666", ",", "\"-666\"", ",", "-", "666.0", "]", ",", "np", ".", "nan", ")", "else", ":", "o...
Replace -666, -666.0, and optionally "-666". Args: meta_df (pandas df): convert_neg_666 (bool): Returns: out_df (pandas df): updated meta_df
[ "Replace", "-", "666", "-", "666", ".", "0", "and", "optionally", "-", "666", ".", "Args", ":", "meta_df", "(", "pandas", "df", ")", ":", "convert_neg_666", "(", "bool", ")", ":", "Returns", ":", "out_df", "(", "pandas", "df", ")", ":", "updated", ...
python
train
vvangelovski/django-audit-log
audit_log/models/managers.py
https://github.com/vvangelovski/django-audit-log/blob/f1bee75360a67390fbef67c110e9a245b41ebb92/audit_log/models/managers.py#L190-L231
def get_logging_fields(self, model): """ Returns a dictionary mapping of the fields that are used for keeping the acutal audit log entries. """ rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower() def entry_instance_to_unicode(log_entry): try: result = '%s: %s %s at %s'%(model._meta.object_name, log_entry.object_state, log_entry.get_action_type_display().lower(), log_entry.action_date, ) except AttributeError: result = '%s %s at %s'%(model._meta.object_name, log_entry.get_action_type_display().lower(), log_entry.action_date ) return result action_user_field = LastUserField(related_name = rel_name, editable = False) #check if the manager has been attached to auth user model if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."): action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self') return { 'action_id' : models.AutoField(primary_key = True), 'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False), 'action_user' : action_user_field, 'action_type' : models.CharField(max_length = 1, editable = False, choices = ( ('I', _('Created')), ('U', _('Changed')), ('D', _('Deleted')), )), 'object_state' : LogEntryObjectDescriptor(model), '__unicode__' : entry_instance_to_unicode, }
[ "def", "get_logging_fields", "(", "self", ",", "model", ")", ":", "rel_name", "=", "'_%s_audit_log_entry'", "%", "model", ".", "_meta", ".", "object_name", ".", "lower", "(", ")", "def", "entry_instance_to_unicode", "(", "log_entry", ")", ":", "try", ":", "r...
Returns a dictionary mapping of the fields that are used for keeping the acutal audit log entries.
[ "Returns", "a", "dictionary", "mapping", "of", "the", "fields", "that", "are", "used", "for", "keeping", "the", "acutal", "audit", "log", "entries", "." ]
python
train
lk-geimfari/mimesis
mimesis/providers/generic.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/generic.py#L126-L133
def add_providers(self, *providers: Type[BaseProvider]) -> None: """Add a lot of custom providers to Generic() object. :param providers: Custom providers. :return: None """ for provider in providers: self.add_provider(provider)
[ "def", "add_providers", "(", "self", ",", "*", "providers", ":", "Type", "[", "BaseProvider", "]", ")", "->", "None", ":", "for", "provider", "in", "providers", ":", "self", ".", "add_provider", "(", "provider", ")" ]
Add a lot of custom providers to Generic() object. :param providers: Custom providers. :return: None
[ "Add", "a", "lot", "of", "custom", "providers", "to", "Generic", "()", "object", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/functions.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1256-L1287
def str_rstrip(x, to_strip=None): """Remove trailing characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rstrip(to_strip='ing') Expression = str_rstrip(text, to_strip='ing') Length: 5 dtype: str (expression) --------------------------------- 0 Someth 1 very pretty 2 is com 3 our 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).rstrip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "def", "str_rstrip", "(", "x", ",", "to_strip", "=", "None", ")", ":", "# in c++ we give empty string the same meaning as None", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "rstrip", "(", "''", "if", "to_strip", "is", "None", "else", "to_strip", ")", ...
Remove trailing characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rstrip(to_strip='ing') Expression = str_rstrip(text, to_strip='ing') Length: 5 dtype: str (expression) --------------------------------- 0 Someth 1 very pretty 2 is com 3 our 4 way.
[ "Remove", "trailing", "characters", "from", "a", "string", "sample", "." ]
python
test
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L1310-L1329
def _extract_asset_urls(self, asset_ids): """ Extract asset URLs along with asset ids. @param asset_ids: List of ids to get URLs for. @type assertn: [str] @return: List of dictionaries with asset URLs and ids. @rtype: [{ 'id': '<id>', 'url': '<url>' }] """ dom = get_page(self._session, OPENCOURSE_ASSET_URL, json=True, ids=quote_plus(','.join(asset_ids))) return [{'id': element['id'], 'url': element['url'].strip()} for element in dom['elements']]
[ "def", "_extract_asset_urls", "(", "self", ",", "asset_ids", ")", ":", "dom", "=", "get_page", "(", "self", ".", "_session", ",", "OPENCOURSE_ASSET_URL", ",", "json", "=", "True", ",", "ids", "=", "quote_plus", "(", "','", ".", "join", "(", "asset_ids", ...
Extract asset URLs along with asset ids. @param asset_ids: List of ids to get URLs for. @type assertn: [str] @return: List of dictionaries with asset URLs and ids. @rtype: [{ 'id': '<id>', 'url': '<url>' }]
[ "Extract", "asset", "URLs", "along", "with", "asset", "ids", "." ]
python
train
joshourisman/django-tablib
django_tablib/views.py
https://github.com/joshourisman/django-tablib/blob/85b0751fa222a0498aa186714f840b1171a150f9/django_tablib/views.py#L39-L98
def generic_export(request, model_name=None): """ Generic view configured through settings.TABLIB_MODELS Usage: 1. Add the view to ``urlpatterns`` in ``urls.py``:: url(r'export/(?P<model_name>[^/]+)/$', "django_tablib.views.generic_export"), 2. Create the ``settings.TABLIB_MODELS`` dictionary using model names as keys the allowed lookup operators as values, if any:: TABLIB_MODELS = { 'myapp.simple': None, 'myapp.related': {'simple__title': ('exact', 'iexact')}, } 3. Open ``/export/myapp.simple`` or ``/export/myapp.related/?simple__title__iexact=test`` """ if model_name not in settings.TABLIB_MODELS: raise Http404() model = get_model(*model_name.split(".", 2)) if not model: raise ImproperlyConfigured( "Model {0} is in settings.TABLIB_MODELS but" " could not be loaded".format(model_name)) qs = model._default_manager.all() # Filtering may be allowed based on TABLIB_MODELS: filter_settings = settings.TABLIB_MODELS[model_name] filters = {} for k, v in request.GET.items(): try: # Allow joins (they'll be checked below) but chop off the trailing # lookup operator: rel, lookup_type = k.rsplit("__", 1) except ValueError: rel = k lookup_type = "exact" allowed_lookups = filter_settings.get(rel, None) if allowed_lookups is None: return HttpResponseBadRequest( "Filtering on {0} is not allowed".format(rel) ) elif lookup_type not in allowed_lookups: return HttpResponseBadRequest( "{0} may only be filtered using {1}".format( k, " ".join(allowed_lookups))) else: filters[str(k)] = v if filters: qs = qs.filter(**filters) return export(request, model=model, queryset=qs)
[ "def", "generic_export", "(", "request", ",", "model_name", "=", "None", ")", ":", "if", "model_name", "not", "in", "settings", ".", "TABLIB_MODELS", ":", "raise", "Http404", "(", ")", "model", "=", "get_model", "(", "*", "model_name", ".", "split", "(", ...
Generic view configured through settings.TABLIB_MODELS Usage: 1. Add the view to ``urlpatterns`` in ``urls.py``:: url(r'export/(?P<model_name>[^/]+)/$', "django_tablib.views.generic_export"), 2. Create the ``settings.TABLIB_MODELS`` dictionary using model names as keys the allowed lookup operators as values, if any:: TABLIB_MODELS = { 'myapp.simple': None, 'myapp.related': {'simple__title': ('exact', 'iexact')}, } 3. Open ``/export/myapp.simple`` or ``/export/myapp.related/?simple__title__iexact=test``
[ "Generic", "view", "configured", "through", "settings", ".", "TABLIB_MODELS" ]
python
train
brainiak/brainiak
brainiak/funcalign/srm.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/srm.py#L385-L413
def transform_subject(self, X): """Transform a new subject using the existing model. The subject is assumed to have recieved equivalent stimulation Parameters ---------- X : 2D array, shape=[voxels, timepoints] The fMRI data of the new subject. Returns ------- w : 2D array, shape=[voxels, features] Orthogonal mapping `W_{new}` for new subject """ # Check if the model exist if hasattr(self, 'w_') is False: raise NotFittedError("The model fit has not been run yet.") # Check the number of TRs in the subject if X.shape[1] != self.s_.shape[1]: raise ValueError("The number of timepoints(TRs) does not match the" "one in the model.") w = self._update_transform_subject(X, self.s_) return w
[ "def", "transform_subject", "(", "self", ",", "X", ")", ":", "# Check if the model exist", "if", "hasattr", "(", "self", ",", "'w_'", ")", "is", "False", ":", "raise", "NotFittedError", "(", "\"The model fit has not been run yet.\"", ")", "# Check the number of TRs in...
Transform a new subject using the existing model. The subject is assumed to have recieved equivalent stimulation Parameters ---------- X : 2D array, shape=[voxels, timepoints] The fMRI data of the new subject. Returns ------- w : 2D array, shape=[voxels, features] Orthogonal mapping `W_{new}` for new subject
[ "Transform", "a", "new", "subject", "using", "the", "existing", "model", ".", "The", "subject", "is", "assumed", "to", "have", "recieved", "equivalent", "stimulation" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/kthread.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/kthread.py#L49-L53
def __run(self): """Hacked run function, which installs the trace.""" sys.settrace(self.globaltrace) self.__run_backup() self.run = self.__run_backup
[ "def", "__run", "(", "self", ")", ":", "sys", ".", "settrace", "(", "self", ".", "globaltrace", ")", "self", ".", "__run_backup", "(", ")", "self", ".", "run", "=", "self", ".", "__run_backup" ]
Hacked run function, which installs the trace.
[ "Hacked", "run", "function", "which", "installs", "the", "trace", "." ]
python
train
SeabornGames/Table
seaborn_table/table.py
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L1659-L1673
def _html_link_cells(self): """ This will return a new table with cell linked with their columns that have <Link> in the name :return: """ new_table = self.copy() for row in new_table: for c in new_table.columns: link = '%s <Link>' % c if row.get(link, None): row[c] = '<a href="%s">%s</a>' % (row[link], row[c]) new_table.columns = [c for c in self.columns if '<Link>' not in c] return new_table
[ "def", "_html_link_cells", "(", "self", ")", ":", "new_table", "=", "self", ".", "copy", "(", ")", "for", "row", "in", "new_table", ":", "for", "c", "in", "new_table", ".", "columns", ":", "link", "=", "'%s <Link>'", "%", "c", "if", "row", ".", "get"...
This will return a new table with cell linked with their columns that have <Link> in the name :return:
[ "This", "will", "return", "a", "new", "table", "with", "cell", "linked", "with", "their", "columns", "that", "have", "<Link", ">", "in", "the", "name", ":", "return", ":" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/gcc.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/gcc.py#L66-L94
def detect_version(env, cc): """Return the version of the GNU compiler, or None if it is not a GNU compiler.""" cc = env.subst(cc) if not cc: return None version = None #pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'], pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['--version'], stdin = 'devnull', stderr = 'devnull', stdout = subprocess.PIPE) # -dumpversion was added in GCC 3.0. As long as we're supporting # GCC versions older than that, we should use --version and a # regular expression. #line = pipe.stdout.read().strip() #if line: # version = line line = SCons.Util.to_str(pipe.stdout.readline()) match = re.search(r'[0-9]+(\.[0-9]+)+', line) if match: version = match.group(0) # Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer: # So continue with reading to let the child process actually terminate. while SCons.Util.to_str(pipe.stdout.readline()): pass ret = pipe.wait() if ret != 0: return None return version
[ "def", "detect_version", "(", "env", ",", "cc", ")", ":", "cc", "=", "env", ".", "subst", "(", "cc", ")", "if", "not", "cc", ":", "return", "None", "version", "=", "None", "#pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'],", "pipe", "="...
Return the version of the GNU compiler, or None if it is not a GNU compiler.
[ "Return", "the", "version", "of", "the", "GNU", "compiler", "or", "None", "if", "it", "is", "not", "a", "GNU", "compiler", "." ]
python
train
python-astrodynamics/spacetrack
spacetrack/base.py
https://github.com/python-astrodynamics/spacetrack/blob/18f63b7de989a31b983d140a11418e01bd6fd398/spacetrack/base.py#L632-L637
def get_predicates(self, class_): """Proxy ``get_predicates`` to client with stored request controller. """ return self.client.get_predicates( class_=class_, controller=self.controller)
[ "def", "get_predicates", "(", "self", ",", "class_", ")", ":", "return", "self", ".", "client", ".", "get_predicates", "(", "class_", "=", "class_", ",", "controller", "=", "self", ".", "controller", ")" ]
Proxy ``get_predicates`` to client with stored request controller.
[ "Proxy", "get_predicates", "to", "client", "with", "stored", "request", "controller", "." ]
python
train
urschrei/Circles
Circles/circles.py
https://github.com/urschrei/Circles/blob/5aab401b470935e816a28d7ba817eb72f9344672/Circles/circles.py#L30-L95
def _gccalc(lon, lat, azimuth, maxdist=None): """ Original javascript on http://williams.best.vwh.net/gccalc.htm Translated into python by Thomas Lecocq This function is a black box, because trigonometry is difficult """ glat1 = lat * np.pi / 180. glon1 = lon * np.pi / 180. s = maxdist / 1.852243 faz = azimuth * np.pi / 180. EPS = 0.00000000005 if ((np.abs(np.cos(glat1)) < EPS) and not (np.abs(np.sin(faz)) < EPS)): raise CourseException("Only North-South courses are meaningful") a = 6378.137 / 1.852243 f = 1 / 298.257223563 r = 1 - f tu = r * np.tan(glat1) sf = np.sin(faz) cf = np.cos(faz) if (cf == 0): b = 0. else: b = 2. * np.arctan2 (tu, cf) cu = 1. / np.sqrt(1 + tu * tu) su = tu * cu sa = cu * sf c2a = 1 - sa * sa x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.)) x = (x - 2.) / x c = 1. - x c = (x * x / 4. + 1.) / c d = (0.375 * x * x - 1.) * x tu = s / (r * a * c) y = tu c = y + 1 while (np.abs (y - c) > EPS): sy = np.sin(y) cy = np.cos(y) cz = np.cos(b + y) e = 2. * cz * cz - 1. c = y x = e * cy y = e + e - 1. y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) * d / 4. - cz) * sy * d + tu b = cu * cy * cf - su * sy c = r * np.sqrt(sa * sa + b * b) d = su * cy + cu * sy * cf glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi c = cu * cy - su * sy * cf x = np.arctan2(sy * sf, c) c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16. d = ((e * cy * c + cz) * sy * c + y) * sa glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi) glon2 *= 180./np.pi glat2 *= 180./np.pi baz *= 180./np.pi return (glon2, glat2, baz)
[ "def", "_gccalc", "(", "lon", ",", "lat", ",", "azimuth", ",", "maxdist", "=", "None", ")", ":", "glat1", "=", "lat", "*", "np", ".", "pi", "/", "180.", "glon1", "=", "lon", "*", "np", ".", "pi", "/", "180.", "s", "=", "maxdist", "/", "1.852243...
Original javascript on http://williams.best.vwh.net/gccalc.htm Translated into python by Thomas Lecocq This function is a black box, because trigonometry is difficult
[ "Original", "javascript", "on", "http", ":", "//", "williams", ".", "best", ".", "vwh", ".", "net", "/", "gccalc", ".", "htm", "Translated", "into", "python", "by", "Thomas", "Lecocq", "This", "function", "is", "a", "black", "box", "because", "trigonometry...
python
train
jsvine/tinyapi
tinyapi/session.py
https://github.com/jsvine/tinyapi/blob/ac2cf0400b2a9b22bd0b1f43b36be99f5d1a787c/tinyapi/session.py#L100-L104
def get_drafts(self, **kwargs): """Same as Session.get_messages, but where ``statuses=["draft"]``.""" default_kwargs = { "order": "updated_at desc" } default_kwargs.update(kwargs) return self.get_messages(statuses=["draft"], **default_kwargs)
[ "def", "get_drafts", "(", "self", ",", "*", "*", "kwargs", ")", ":", "default_kwargs", "=", "{", "\"order\"", ":", "\"updated_at desc\"", "}", "default_kwargs", ".", "update", "(", "kwargs", ")", "return", "self", ".", "get_messages", "(", "statuses", "=", ...
Same as Session.get_messages, but where ``statuses=["draft"]``.
[ "Same", "as", "Session", ".", "get_messages", "but", "where", "statuses", "=", "[", "draft", "]", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2674-L2690
def store_object(self, container, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, chunk_size=None, headers=None, metadata=None, extra_info=None): """ Creates a new object in the specified container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create_object(container, obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, chunk_size=chunk_size, headers=headers, metadata=metadata)
[ "def", "store_object", "(", "self", ",", "container", ",", "obj_name", ",", "data", ",", "content_type", "=", "None", ",", "etag", "=", "None", ",", "content_encoding", "=", "None", ",", "ttl", "=", "None", ",", "return_none", "=", "False", ",", "chunk_s...
Creates a new object in the specified container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
[ "Creates", "a", "new", "object", "in", "the", "specified", "container", "and", "populates", "it", "with", "the", "given", "data", ".", "A", "StorageObject", "reference", "to", "the", "uploaded", "file", "will", "be", "returned", "unless", "return_none", "is", ...
python
train
jilljenn/tryalgo
tryalgo/dfs.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/dfs.py#L110-L136
def find_cycle(graph): """find a cycle in an undirected graph :param graph: undirected graph in listlist or listdict format :returns: list of vertices in a cycle or None :complexity: `O(|V|+|E|)` """ n = len(graph) prec = [None] * n # ancestor marks for visited vertices for u in range(n): if prec[u] is None: # unvisited vertex S = [u] # start new DFS prec[u] = u # mark root (not necessary for this algorithm) while S: u = S.pop() for v in graph[u]: # for all neighbors if v != prec[u]: # except arcs to father in DFS tree if prec[v] is not None: cycle = [v, u] # cycle found, (u,v) back edge while u != prec[v] and u != prec[u]: # directed u = prec[u] # climb up the tree cycle.append(u) return cycle else: prec[v] = u # v is new vertex in tree S.append(v) return None
[ "def", "find_cycle", "(", "graph", ")", ":", "n", "=", "len", "(", "graph", ")", "prec", "=", "[", "None", "]", "*", "n", "# ancestor marks for visited vertices", "for", "u", "in", "range", "(", "n", ")", ":", "if", "prec", "[", "u", "]", "is", "No...
find a cycle in an undirected graph :param graph: undirected graph in listlist or listdict format :returns: list of vertices in a cycle or None :complexity: `O(|V|+|E|)`
[ "find", "a", "cycle", "in", "an", "undirected", "graph" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L194-L210
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_unnum_ip_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop('remote_interface_name') remote_unnum_ip_address = ET.SubElement(lldp_neighbor_detail, "remote-unnum-ip-address") remote_unnum_ip_address.text = kwargs.pop('remote_unnum_ip_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_unnum_ip_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/docsearch.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/docsearch.py#L216-L222
def del_doc(self, doc): """ Delete a document """ logger.info("Removing doc from the index: %s" % doc) doc = doc.clone() # make sure it can be serialized safely self.docsearch.index.del_doc(doc)
[ "def", "del_doc", "(", "self", ",", "doc", ")", ":", "logger", ".", "info", "(", "\"Removing doc from the index: %s\"", "%", "doc", ")", "doc", "=", "doc", ".", "clone", "(", ")", "# make sure it can be serialized safely", "self", ".", "docsearch", ".", "index...
Delete a document
[ "Delete", "a", "document" ]
python
train
pantsbuild/pants
src/python/pants/engine/legacy/structs.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/legacy/structs.py#L376-L381
def to_path_globs(self, relpath, conjunction): """Return a PathGlobs representing the included and excluded Files for these patterns.""" return PathGlobs( include=tuple(os.path.join(relpath, glob) for glob in self._file_globs), exclude=tuple(os.path.join(relpath, exclude) for exclude in self._excluded_file_globs), conjunction=conjunction)
[ "def", "to_path_globs", "(", "self", ",", "relpath", ",", "conjunction", ")", ":", "return", "PathGlobs", "(", "include", "=", "tuple", "(", "os", ".", "path", ".", "join", "(", "relpath", ",", "glob", ")", "for", "glob", "in", "self", ".", "_file_glob...
Return a PathGlobs representing the included and excluded Files for these patterns.
[ "Return", "a", "PathGlobs", "representing", "the", "included", "and", "excluded", "Files", "for", "these", "patterns", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L86-L89
def get_obj(ref): """Get object from string reference.""" oid = int(ref) return server.id2ref.get(oid) or server.id2obj[oid]
[ "def", "get_obj", "(", "ref", ")", ":", "oid", "=", "int", "(", "ref", ")", "return", "server", ".", "id2ref", ".", "get", "(", "oid", ")", "or", "server", ".", "id2obj", "[", "oid", "]" ]
Get object from string reference.
[ "Get", "object", "from", "string", "reference", "." ]
python
train
LeastAuthority/txkube
src/txkube/_swagger.py
https://github.com/LeastAuthority/txkube/blob/a7e555d00535ff787d4b1204c264780da40cf736/src/txkube/_swagger.py#L143-L155
def to_document(self): """ Serialize this specification to a JSON-compatible object representing a Swagger specification. """ return dict( info=thaw(self.info), paths=thaw(self.paths), definitions=thaw(self.definitions), securityDefinitions=thaw(self.securityDefinitions), security=thaw(self.security), swagger=thaw(self.swagger), )
[ "def", "to_document", "(", "self", ")", ":", "return", "dict", "(", "info", "=", "thaw", "(", "self", ".", "info", ")", ",", "paths", "=", "thaw", "(", "self", ".", "paths", ")", ",", "definitions", "=", "thaw", "(", "self", ".", "definitions", ")"...
Serialize this specification to a JSON-compatible object representing a Swagger specification.
[ "Serialize", "this", "specification", "to", "a", "JSON", "-", "compatible", "object", "representing", "a", "Swagger", "specification", "." ]
python
train
collectiveacuity/labPack
labpack/parsing/conversion.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/conversion.py#L20-L30
def _to_python(input_string): ''' a helper method to convert camelcase to python''' python_string = '' for i in range(len(input_string)): if not python_string: python_string += input_string[i].lower() elif input_string[i].isupper(): python_string += '_%s' % input_string[i].lower() else: python_string += input_string[i] return python_string
[ "def", "_to_python", "(", "input_string", ")", ":", "python_string", "=", "''", "for", "i", "in", "range", "(", "len", "(", "input_string", ")", ")", ":", "if", "not", "python_string", ":", "python_string", "+=", "input_string", "[", "i", "]", ".", "lowe...
a helper method to convert camelcase to python
[ "a", "helper", "method", "to", "convert", "camelcase", "to", "python" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1135-L1155
def justify(self, column, direction): """ Make the text in a column left or right justified. @type column: int @param column: Index of the column. @type direction: int @param direction: C{-1} to justify left, C{1} to justify right. @raise IndexError: Bad column index. @raise ValueError: Bad direction value. """ if direction == -1: self.__width[column] = abs(self.__width[column]) elif direction == 1: self.__width[column] = - abs(self.__width[column]) else: raise ValueError("Bad direction value.")
[ "def", "justify", "(", "self", ",", "column", ",", "direction", ")", ":", "if", "direction", "==", "-", "1", ":", "self", ".", "__width", "[", "column", "]", "=", "abs", "(", "self", ".", "__width", "[", "column", "]", ")", "elif", "direction", "==...
Make the text in a column left or right justified. @type column: int @param column: Index of the column. @type direction: int @param direction: C{-1} to justify left, C{1} to justify right. @raise IndexError: Bad column index. @raise ValueError: Bad direction value.
[ "Make", "the", "text", "in", "a", "column", "left", "or", "right", "justified", "." ]
python
train
Jaymon/endpoints
endpoints/http.py
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L840-L848
def access_token(self): """return an Oauth 2.0 Bearer access token if it can be found""" access_token = self.get_auth_bearer() if not access_token: access_token = self.query_kwargs.get('access_token', '') if not access_token: access_token = self.body_kwargs.get('access_token', '') return access_token
[ "def", "access_token", "(", "self", ")", ":", "access_token", "=", "self", ".", "get_auth_bearer", "(", ")", "if", "not", "access_token", ":", "access_token", "=", "self", ".", "query_kwargs", ".", "get", "(", "'access_token'", ",", "''", ")", "if", "not",...
return an Oauth 2.0 Bearer access token if it can be found
[ "return", "an", "Oauth", "2", ".", "0", "Bearer", "access", "token", "if", "it", "can", "be", "found" ]
python
train
numberly/appnexus-client
appnexus/cursor.py
https://github.com/numberly/appnexus-client/blob/d6a813449ab6fd93bfbceaa937a168fa9a78b890/appnexus/cursor.py#L74-L79
def first(self): """Extract the first AppNexus object present in the response""" page = self.get_page(num_elements=1) data = self.extract_data(page) if data: return data[0]
[ "def", "first", "(", "self", ")", ":", "page", "=", "self", ".", "get_page", "(", "num_elements", "=", "1", ")", "data", "=", "self", ".", "extract_data", "(", "page", ")", "if", "data", ":", "return", "data", "[", "0", "]" ]
Extract the first AppNexus object present in the response
[ "Extract", "the", "first", "AppNexus", "object", "present", "in", "the", "response" ]
python
train
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L460-L488
def generate_batches(sequence, batch_len=1, allow_partial=True, ignore_errors=True, verbosity=1): """Iterate through a sequence (or generator) in batches of length `batch_len` http://stackoverflow.com/a/761125/623735 >>> [batch for batch in generate_batches(range(7), 3)] [[0, 1, 2], [3, 4, 5], [6]] """ it = iter(sequence) last_value = False # An exception will be thrown by `.next()` here and caught in the loop that called this iterator/generator while not last_value: batch = [] for n in range(batch_len): try: batch += (next(it),) except StopIteration: last_value = True if batch: break else: raise StopIteration except Exception: # 'Error: new-line character seen in unquoted field - # do you need to open the file in universal-newline mode?' if verbosity > 0: print_exc() if not ignore_errors: raise yield batch
[ "def", "generate_batches", "(", "sequence", ",", "batch_len", "=", "1", ",", "allow_partial", "=", "True", ",", "ignore_errors", "=", "True", ",", "verbosity", "=", "1", ")", ":", "it", "=", "iter", "(", "sequence", ")", "last_value", "=", "False", "# An...
Iterate through a sequence (or generator) in batches of length `batch_len` http://stackoverflow.com/a/761125/623735 >>> [batch for batch in generate_batches(range(7), 3)] [[0, 1, 2], [3, 4, 5], [6]]
[ "Iterate", "through", "a", "sequence", "(", "or", "generator", ")", "in", "batches", "of", "length", "batch_len" ]
python
train
proteanhq/protean
src/protean/core/repository/factory.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/factory.py#L66-L83
def _find_entity_in_records_by_class_name(self, entity_name): """Fetch by Entity Name in values""" records = { key: value for (key, value) in self._registry.items() if value.name == entity_name } # If more than one record was found, we are dealing with the case of # an Entity name present in multiple places (packages or plugins). Throw an error # and ask for a fully qualified Entity name to be specified if len(records) > 1: raise ConfigurationError( f'Entity with name {entity_name} has been registered twice. ' f'Please use fully qualified Entity name to specify the exact Entity.') elif len(records) == 1: return next(iter(records.values())) else: raise AssertionError(f'No Entity registered with name {entity_name}')
[ "def", "_find_entity_in_records_by_class_name", "(", "self", ",", "entity_name", ")", ":", "records", "=", "{", "key", ":", "value", "for", "(", "key", ",", "value", ")", "in", "self", ".", "_registry", ".", "items", "(", ")", "if", "value", ".", "name",...
Fetch by Entity Name in values
[ "Fetch", "by", "Entity", "Name", "in", "values" ]
python
train
knipknap/SpiffWorkflow
SpiffWorkflow/specs/Join.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/specs/Join.py#L187-L202
def _start(self, my_task, force=False): """ Checks whether the preconditions for going to READY state are met. Returns True if the threshold was reached, False otherwise. Also returns the list of tasks that yet need to be completed. """ # If the threshold was already reached, there is nothing else to do. if my_task._has_state(Task.COMPLETED): return True, None if my_task._has_state(Task.READY): return True, None # Check whether we may fire. if self.split_task is None: return self._check_threshold_unstructured(my_task, force) return self._check_threshold_structured(my_task, force)
[ "def", "_start", "(", "self", ",", "my_task", ",", "force", "=", "False", ")", ":", "# If the threshold was already reached, there is nothing else to do.", "if", "my_task", ".", "_has_state", "(", "Task", ".", "COMPLETED", ")", ":", "return", "True", ",", "None", ...
Checks whether the preconditions for going to READY state are met. Returns True if the threshold was reached, False otherwise. Also returns the list of tasks that yet need to be completed.
[ "Checks", "whether", "the", "preconditions", "for", "going", "to", "READY", "state", "are", "met", ".", "Returns", "True", "if", "the", "threshold", "was", "reached", "False", "otherwise", ".", "Also", "returns", "the", "list", "of", "tasks", "that", "yet", ...
python
valid
wonambi-python/wonambi
wonambi/attr/chan.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/chan.py#L362-L395
def assign_region_to_channels(channels, anat, parc_type='aparc', max_approx=3, exclude_regions=None): """Assign a brain region based on the channel location. Parameters ---------- channels : instance of wonambi.attr.chan.Channels channels to assign regions to anat : instance of wonambi.attr.anat.Freesurfer anatomical information taken from freesurfer. parc_type : str 'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40' 'aparc.DKTatlas40' is only for recent freesurfer versions max_approx : int, optional approximation to define position of the electrode. exclude_regions : list of str or empty list do not report regions if they contain these substrings. None means that it does not exclude any region. For example, to exclude white matter regions and unknown regions you can use exclude_regions=('White', 'WM', 'Unknown') Returns ------- instance of wonambi.attr.chan.Channels same instance as before, now Chan have attr 'region' """ for one_chan in channels.chan: one_region, approx = anat.find_brain_region(one_chan.xyz, parc_type, max_approx, exclude_regions) one_chan.attr.update({'region': one_region, 'approx': approx}) return channels
[ "def", "assign_region_to_channels", "(", "channels", ",", "anat", ",", "parc_type", "=", "'aparc'", ",", "max_approx", "=", "3", ",", "exclude_regions", "=", "None", ")", ":", "for", "one_chan", "in", "channels", ".", "chan", ":", "one_region", ",", "approx"...
Assign a brain region based on the channel location. Parameters ---------- channels : instance of wonambi.attr.chan.Channels channels to assign regions to anat : instance of wonambi.attr.anat.Freesurfer anatomical information taken from freesurfer. parc_type : str 'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40' 'aparc.DKTatlas40' is only for recent freesurfer versions max_approx : int, optional approximation to define position of the electrode. exclude_regions : list of str or empty list do not report regions if they contain these substrings. None means that it does not exclude any region. For example, to exclude white matter regions and unknown regions you can use exclude_regions=('White', 'WM', 'Unknown') Returns ------- instance of wonambi.attr.chan.Channels same instance as before, now Chan have attr 'region'
[ "Assign", "a", "brain", "region", "based", "on", "the", "channel", "location", "." ]
python
train
Metatab/metapack
metapack/package/core.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L325-L370
def _clean_doc(self, doc=None): """Clean the doc before writing it, removing unnecessary properties and doing other operations.""" if doc is None: doc = self.doc resources = doc['Resources'] # We don't need these anymore because all of the data written into the package is normalized. for arg in ['startline', 'headerlines', 'encoding']: for e in list(resources.args): if e.lower() == arg: resources.args.remove(e) for term in resources: term['startline'] = None term['headerlines'] = None term['encoding'] = None schema = doc['Schema'] ## FIXME! This is probably dangerous, because the section args are changing, but the children ## are not, so when these two are combined in the Term.properties() acessors, the values are off. ## Because of this, _clean_doc should be run immediately before writing the doc. for arg in ['altname', 'transform']: for e in list(schema.args): if e.lower() == arg: schema.args.remove(e) for table in self.doc.find('Root.Table'): for col in table.find('Column'): try: col.value = col['altname'].value except: pass col['altname'] = None col['transform'] = None # Remove any DSNs #for dsn_t in self.doc.find('Root.Dsn'): # self.doc.remove_term(dsn_t) return doc
[ "def", "_clean_doc", "(", "self", ",", "doc", "=", "None", ")", ":", "if", "doc", "is", "None", ":", "doc", "=", "self", ".", "doc", "resources", "=", "doc", "[", "'Resources'", "]", "# We don't need these anymore because all of the data written into the package i...
Clean the doc before writing it, removing unnecessary properties and doing other operations.
[ "Clean", "the", "doc", "before", "writing", "it", "removing", "unnecessary", "properties", "and", "doing", "other", "operations", "." ]
python
train
erichiggins/gaek
gaek/ndb_json.py
https://github.com/erichiggins/gaek/blob/eb6bbc2d2688302834f97fd97891592e8b9659f2/gaek/ndb_json.py#L162-L169
def decode(self, val): """Override of the default decode method that also uses decode_date.""" # First try the date decoder. new_val = self.decode_date(val) if val != new_val: return new_val # Fall back to the default decoder. return json.JSONDecoder.decode(self, val)
[ "def", "decode", "(", "self", ",", "val", ")", ":", "# First try the date decoder.", "new_val", "=", "self", ".", "decode_date", "(", "val", ")", "if", "val", "!=", "new_val", ":", "return", "new_val", "# Fall back to the default decoder.", "return", "json", "."...
Override of the default decode method that also uses decode_date.
[ "Override", "of", "the", "default", "decode", "method", "that", "also", "uses", "decode_date", "." ]
python
test
DataDog/integrations-core
yarn/datadog_checks/yarn/yarn.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/yarn/datadog_checks/yarn/yarn.py#L310-L319
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None): """ Set a metric """ if metric_type == GAUGE: self.gauge(metric_name, value, tags=tags, device_name=device_name) elif metric_type == INCREMENT: self.increment(metric_name, value, tags=tags, device_name=device_name) else: self.log.error('Metric type "{}" unknown'.format(metric_type))
[ "def", "_set_metric", "(", "self", ",", "metric_name", ",", "metric_type", ",", "value", ",", "tags", "=", "None", ",", "device_name", "=", "None", ")", ":", "if", "metric_type", "==", "GAUGE", ":", "self", ".", "gauge", "(", "metric_name", ",", "value",...
Set a metric
[ "Set", "a", "metric" ]
python
train
mpdavis/python-jose
jose/jwt.py
https://github.com/mpdavis/python-jose/blob/deea7600eeea47aeb1bf5053a96de51cf2b9c639/jose/jwt.py#L294-L321
def _validate_exp(claims, leeway=0): """Validates that the 'exp' claim is valid. The "exp" (expiration time) claim identifies the expiration time on or after which the JWT MUST NOT be accepted for processing. The processing of the "exp" claim requires that the current date/time MUST be before the expiration date/time listed in the "exp" claim. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL. Args: claims (dict): The claims dictionary to validate. leeway (int): The number of seconds of skew that is allowed. """ if 'exp' not in claims: return try: exp = int(claims['exp']) except ValueError: raise JWTClaimsError('Expiration Time claim (exp) must be an integer.') now = timegm(datetime.utcnow().utctimetuple()) if exp < (now - leeway): raise ExpiredSignatureError('Signature has expired.')
[ "def", "_validate_exp", "(", "claims", ",", "leeway", "=", "0", ")", ":", "if", "'exp'", "not", "in", "claims", ":", "return", "try", ":", "exp", "=", "int", "(", "claims", "[", "'exp'", "]", ")", "except", "ValueError", ":", "raise", "JWTClaimsError",...
Validates that the 'exp' claim is valid. The "exp" (expiration time) claim identifies the expiration time on or after which the JWT MUST NOT be accepted for processing. The processing of the "exp" claim requires that the current date/time MUST be before the expiration date/time listed in the "exp" claim. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL. Args: claims (dict): The claims dictionary to validate. leeway (int): The number of seconds of skew that is allowed.
[ "Validates", "that", "the", "exp", "claim", "is", "valid", "." ]
python
train
santoshphilip/eppy
eppy/idf_helpers.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L44-L60
def getobject_use_prevfield(idf, idfobject, fieldname): """field=object_name, prev_field=object_type. Return the object""" if not fieldname.endswith("Name"): return None # test if prevfieldname ends with "Object_Type" fdnames = idfobject.fieldnames ifieldname = fdnames.index(fieldname) prevfdname = fdnames[ifieldname - 1] if not prevfdname.endswith("Object_Type"): return None objkey = idfobject[prevfdname].upper() objname = idfobject[fieldname] try: foundobj = idf.getobject(objkey, objname) except KeyError as e: return None return foundobj
[ "def", "getobject_use_prevfield", "(", "idf", ",", "idfobject", ",", "fieldname", ")", ":", "if", "not", "fieldname", ".", "endswith", "(", "\"Name\"", ")", ":", "return", "None", "# test if prevfieldname ends with \"Object_Type\"", "fdnames", "=", "idfobject", ".",...
field=object_name, prev_field=object_type. Return the object
[ "field", "=", "object_name", "prev_field", "=", "object_type", ".", "Return", "the", "object" ]
python
train
AmesCornish/buttersink
buttersink/BestDiffs.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/BestDiffs.py#L313-L322
def _prune(self): """ Get rid of all intermediate nodes that aren't needed. """ done = False while not done: done = True for node in [node for node in self.nodes.values() if node.intermediate]: if not [dep for dep in self.nodes.values() if dep.previous == node.volume]: # logger.debug("Removing unnecessary node %s", node) del self.nodes[node.volume] done = False
[ "def", "_prune", "(", "self", ")", ":", "done", "=", "False", "while", "not", "done", ":", "done", "=", "True", "for", "node", "in", "[", "node", "for", "node", "in", "self", ".", "nodes", ".", "values", "(", ")", "if", "node", ".", "intermediate",...
Get rid of all intermediate nodes that aren't needed.
[ "Get", "rid", "of", "all", "intermediate", "nodes", "that", "aren", "t", "needed", "." ]
python
train
openstack/networking-cisco
networking_cisco/plugins/cisco/cfg_agent/cfg_agent.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/cfg_agent.py#L341-L351
def hosting_devices_unassigned_from_cfg_agent(self, context, payload): """Deal with hosting devices unassigned from this config agent.""" try: if payload['hosting_device_ids']: #TODO(hareeshp): implement unassignment of hosting devices pass except KeyError as e: LOG.error("Invalid payload format for received RPC message " "`hosting_devices_unassigned_from_cfg_agent`. Error " "is %(error)s. Payload is %(payload)s", {'error': e, 'payload': payload})
[ "def", "hosting_devices_unassigned_from_cfg_agent", "(", "self", ",", "context", ",", "payload", ")", ":", "try", ":", "if", "payload", "[", "'hosting_device_ids'", "]", ":", "#TODO(hareeshp): implement unassignment of hosting devices", "pass", "except", "KeyError", "as",...
Deal with hosting devices unassigned from this config agent.
[ "Deal", "with", "hosting", "devices", "unassigned", "from", "this", "config", "agent", "." ]
python
train
henzk/featuremonkey
featuremonkey/importhooks.py
https://github.com/henzk/featuremonkey/blob/e44414fc68427bcd71ad33ec2d816da0dd78eefa/featuremonkey/importhooks.py#L156-L170
def remove(cls, module_name): """ drop a previously created guard on ``module_name`` if the module is not guarded, then this is a no-op. """ module_guards = cls._guards.get(module_name, False) if module_guards: module_guards.pop() cls._num_entries -= 1 if cls._num_entries < 1: if cls._num_entries < 0: raise Exception( 'Bug: ImportGuardHook._num_entries became negative!' ) cls._uninstall()
[ "def", "remove", "(", "cls", ",", "module_name", ")", ":", "module_guards", "=", "cls", ".", "_guards", ".", "get", "(", "module_name", ",", "False", ")", "if", "module_guards", ":", "module_guards", ".", "pop", "(", ")", "cls", ".", "_num_entries", "-="...
drop a previously created guard on ``module_name`` if the module is not guarded, then this is a no-op.
[ "drop", "a", "previously", "created", "guard", "on", "module_name", "if", "the", "module", "is", "not", "guarded", "then", "this", "is", "a", "no", "-", "op", "." ]
python
train
metagriffin/fso
fso/filesystemoverlay.py
https://github.com/metagriffin/fso/blob/c37701fbfdfde359a2044eb9420abe569a7b35e4/fso/filesystemoverlay.py#L390-L395
def fso_lexists(self, path): 'overlays os.path.lexists()' try: return self._lexists(self.deref(path, to_parent=True)) except os.error: return False
[ "def", "fso_lexists", "(", "self", ",", "path", ")", ":", "try", ":", "return", "self", ".", "_lexists", "(", "self", ".", "deref", "(", "path", ",", "to_parent", "=", "True", ")", ")", "except", "os", ".", "error", ":", "return", "False" ]
overlays os.path.lexists()
[ "overlays", "os", ".", "path", ".", "lexists", "()" ]
python
valid
eqcorrscan/EQcorrscan
eqcorrscan/core/match_filter.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3245-L3253
def _templates_match(t, family_file): """ Return True if a tribe matches a family file path. :type t: Tribe :type family_file: str :return: bool """ return t.name == family_file.split(os.sep)[-1].split('_detections.csv')[0]
[ "def", "_templates_match", "(", "t", ",", "family_file", ")", ":", "return", "t", ".", "name", "==", "family_file", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", ".", "split", "(", "'_detections.csv'", ")", "[", "0", "]" ]
Return True if a tribe matches a family file path. :type t: Tribe :type family_file: str :return: bool
[ "Return", "True", "if", "a", "tribe", "matches", "a", "family", "file", "path", "." ]
python
train