repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
quantmind/pulsar-cloud
cloud/utils/s3.py
https://github.com/quantmind/pulsar-cloud/blob/dc2ff8ab5c9a1c2cfb1270581d30454d1a606cf9/cloud/utils/s3.py#L27-L76
async def upload_file(self, bucket, file, uploadpath=None, key=None, ContentType=None, **kw): """Upload a file to S3 possibly using the multi-part uploader Return the key uploaded """ is_filename = False if hasattr(file, 'read'): if hasattr(file, 'seek'): file.seek(0) file = file.read() size = len(file) elif key: size = len(file) else: is_filename = True size = os.stat(file).st_size key = os.path.basename(file) assert key, 'key not available' if not ContentType: ContentType, _ = mimetypes.guess_type(key) if uploadpath: if not uploadpath.endswith('/'): uploadpath = '%s/' % uploadpath key = '%s%s' % (uploadpath, key) params = dict(Bucket=bucket, Key=key) if not ContentType: ContentType = 'application/octet-stream' params['ContentType'] = ContentType if size > MULTI_PART_SIZE and is_filename: resp = await _multipart(self, file, params) elif is_filename: with open(file, 'rb') as fp: params['Body'] = fp.read() resp = await self.put_object(**params) else: params['Body'] = file resp = await self.put_object(**params) if 'Key' not in resp: resp['Key'] = key if 'Bucket' not in resp: resp['Bucket'] = bucket return resp
[ "async", "def", "upload_file", "(", "self", ",", "bucket", ",", "file", ",", "uploadpath", "=", "None", ",", "key", "=", "None", ",", "ContentType", "=", "None", ",", "*", "*", "kw", ")", ":", "is_filename", "=", "False", "if", "hasattr", "(", "file"...
Upload a file to S3 possibly using the multi-part uploader Return the key uploaded
[ "Upload", "a", "file", "to", "S3", "possibly", "using", "the", "multi", "-", "part", "uploader", "Return", "the", "key", "uploaded" ]
python
valid
30.52
kontron/python-aardvark
pyaardvark/aardvark.py
https://github.com/kontron/python-aardvark/blob/9827f669fbdc5bceb98e7d08a294b4e4e455d0d5/pyaardvark/aardvark.py#L611-L618
def spi_write(self, data): """Write a stream of bytes to a SPI device.""" data_out = array.array('B', data) data_in = array.array('B', (0,) * len(data_out)) ret = api.py_aa_spi_write(self.handle, len(data_out), data_out, len(data_in), data_in) _raise_error_if_negative(ret) return bytes(data_in)
[ "def", "spi_write", "(", "self", ",", "data", ")", ":", "data_out", "=", "array", ".", "array", "(", "'B'", ",", "data", ")", "data_in", "=", "array", ".", "array", "(", "'B'", ",", "(", "0", ",", ")", "*", "len", "(", "data_out", ")", ")", "re...
Write a stream of bytes to a SPI device.
[ "Write", "a", "stream", "of", "bytes", "to", "a", "SPI", "device", "." ]
python
train
44
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_deterministic_params.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L96-L106
def next_frame_ae(): """Conv autoencoder.""" hparams = next_frame_basic_deterministic() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.hidden_size = 256 hparams.batch_size = 8 hparams.num_hidden_layers = 4 hparams.num_compress_steps = 4 hparams.dropout = 0.4 return hparams
[ "def", "next_frame_ae", "(", ")", ":", "hparams", "=", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "bottom", "[", "\"inputs\"", "]", "=", "modalities", ".", "video_bitwise_bottom", "hparams", ".", "top", "[", "\"inputs\"", "]", "=", "modalities",...
Conv autoencoder.
[ "Conv", "autoencoder", "." ]
python
train
31.636364
mattja/distob
distob/arrays.py
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/arrays.py#L1446-L1478
def transpose(a, axes=None): """Returns a view of the array with axes transposed. For a 1-D array, this has no effect. For a 2-D array, this is the usual matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted Args: a (array_like): Input array. axes (list of int, optional): By default, reverse the dimensions, otherwise permute the axes according to the values given. """ if isinstance(a, np.ndarray): return np.transpose(a, axes) elif isinstance(a, RemoteArray): return a.transpose(*axes) elif isinstance(a, Remote): return _remote_to_array(a).transpose(*axes) elif isinstance(a, DistArray): if axes is None: axes = range(a.ndim - 1, -1, -1) axes = list(axes) if len(set(axes)) < len(axes): raise ValueError("repeated axis in transpose") if sorted(axes) != list(range(a.ndim)): raise ValueError("axes don't match array") distaxis = a._distaxis new_distaxis = axes.index(distaxis) new_subarrays = [ra.transpose(*axes) for ra in a._subarrays] return DistArray(new_subarrays, new_distaxis) else: return np.transpose(a, axes)
[ "def", "transpose", "(", "a", ",", "axes", "=", "None", ")", ":", "if", "isinstance", "(", "a", ",", "np", ".", "ndarray", ")", ":", "return", "np", ".", "transpose", "(", "a", ",", "axes", ")", "elif", "isinstance", "(", "a", ",", "RemoteArray", ...
Returns a view of the array with axes transposed. For a 1-D array, this has no effect. For a 2-D array, this is the usual matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted Args: a (array_like): Input array. axes (list of int, optional): By default, reverse the dimensions, otherwise permute the axes according to the values given.
[ "Returns", "a", "view", "of", "the", "array", "with", "axes", "transposed", "." ]
python
valid
37.575758
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L1951-L1968
def add_filter(self, table, cols, condition): """ Add a filter. When reading *table*, rows in *table* will be filtered by filter_rows(). Args: table: The table the filter applies to. cols: The columns in *table* to filter on. condition: The filter function. """ if table is not None and table not in self.relations: raise ItsdbError('Cannot add filter; table "{}" is not defined ' 'by the relations file.' .format(table)) # this is a hack, though perhaps well-motivated if cols is None: cols = [None] self.filters[table].append((cols, condition))
[ "def", "add_filter", "(", "self", ",", "table", ",", "cols", ",", "condition", ")", ":", "if", "table", "is", "not", "None", "and", "table", "not", "in", "self", ".", "relations", ":", "raise", "ItsdbError", "(", "'Cannot add filter; table \"{}\" is not define...
Add a filter. When reading *table*, rows in *table* will be filtered by filter_rows(). Args: table: The table the filter applies to. cols: The columns in *table* to filter on. condition: The filter function.
[ "Add", "a", "filter", ".", "When", "reading", "*", "table", "*", "rows", "in", "*", "table", "*", "will", "be", "filtered", "by", "filter_rows", "()", "." ]
python
train
40
jmoiron/micromongo
micromongo/backend.py
https://github.com/jmoiron/micromongo/blob/0d7dd1396e2f25ece6648619ccff32345bc306a1/micromongo/backend.py#L115-L127
def next(self): """A `next` that caches the returned results. Together with the slightly different `__iter__`, these cursors can be iterated over more than once.""" if self.__tailable: return PymongoCursor.next(self) try: ret = PymongoCursor.next(self) except StopIteration: self.__fullcache = True raise self.__itercache.append(ret) return ret
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "__tailable", ":", "return", "PymongoCursor", ".", "next", "(", "self", ")", "try", ":", "ret", "=", "PymongoCursor", ".", "next", "(", "self", ")", "except", "StopIteration", ":", "self", ".", ...
A `next` that caches the returned results. Together with the slightly different `__iter__`, these cursors can be iterated over more than once.
[ "A", "next", "that", "caches", "the", "returned", "results", ".", "Together", "with", "the", "slightly", "different", "__iter__", "these", "cursors", "can", "be", "iterated", "over", "more", "than", "once", "." ]
python
train
34.230769
Esri/ArcREST
src/arcrest/ags/_gpobjects.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_gpobjects.py#L118-L129
def fromTable(table, paramName): """ Converts a table to GPRecordSet object Inputs: table - path to the table paramName - name of the parameter """ from ..common.spatial import recordset_to_json g = GPRecordSet() g.paramName = paramName g.value = json.loads(recordset_to_json(table)) return g
[ "def", "fromTable", "(", "table", ",", "paramName", ")", ":", "from", ".", ".", "common", ".", "spatial", "import", "recordset_to_json", "g", "=", "GPRecordSet", "(", ")", "g", ".", "paramName", "=", "paramName", "g", ".", "value", "=", "json", ".", "l...
Converts a table to GPRecordSet object Inputs: table - path to the table paramName - name of the parameter
[ "Converts", "a", "table", "to", "GPRecordSet", "object", "Inputs", ":", "table", "-", "path", "to", "the", "table", "paramName", "-", "name", "of", "the", "parameter" ]
python
train
31.25
secdev/scapy
scapy/packet.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L1857-L1874
def fuzz(p, _inplace=0): """Transform a layer into a fuzzy layer by replacing some default values by random objects""" # noqa: E501 if not _inplace: p = p.copy() q = p while not isinstance(q, NoPayload): for f in q.fields_desc: if isinstance(f, PacketListField): for r in getattr(q, f.name): print("fuzzing", repr(r)) fuzz(r, _inplace=1) elif f.default is not None: if not isinstance(f, ConditionalField) or f._evalcond(q): rnd = f.randval() if rnd is not None: q.default_fields[f.name] = rnd q = q.payload return p
[ "def", "fuzz", "(", "p", ",", "_inplace", "=", "0", ")", ":", "# noqa: E501", "if", "not", "_inplace", ":", "p", "=", "p", ".", "copy", "(", ")", "q", "=", "p", "while", "not", "isinstance", "(", "q", ",", "NoPayload", ")", ":", "for", "f", "in...
Transform a layer into a fuzzy layer by replacing some default values by random objects
[ "Transform", "a", "layer", "into", "a", "fuzzy", "layer", "by", "replacing", "some", "default", "values", "by", "random", "objects" ]
python
train
39.055556
mozilla-releng/scriptworker
scriptworker/version.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/version.py#L60-L82
def write_version(name=None, path=None): """Write the version info to ../version.json, for setup.py. Args: name (Optional[str]): this is for the ``write_version(name=__name__)`` below. That's one way to both follow the ``if __name__ == '__main__':`` convention but also allow for full coverage without ignoring parts of the file. path (Optional[str]): the path to write the version json to. Defaults to ../version.json """ # Written like this for coverage purposes. # http://stackoverflow.com/questions/5850268/how-to-test-or-mock-if-name-main-contents/27084447#27084447 if name in (None, '__main__'): path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "version.json") contents = { 'version': __version__, 'version_string': __version_string__, } with open(path, 'w') as filehandle: filehandle.write(json.dumps(contents, sort_keys=True, indent=4))
[ "def", "write_version", "(", "name", "=", "None", ",", "path", "=", "None", ")", ":", "# Written like this for coverage purposes.", "# http://stackoverflow.com/questions/5850268/how-to-test-or-mock-if-name-main-contents/27084447#27084447", "if", "name", "in", "(", "None", ",", ...
Write the version info to ../version.json, for setup.py. Args: name (Optional[str]): this is for the ``write_version(name=__name__)`` below. That's one way to both follow the ``if __name__ == '__main__':`` convention but also allow for full coverage without ignoring parts of the file. path (Optional[str]): the path to write the version json to. Defaults to ../version.json
[ "Write", "the", "version", "info", "to", "..", "/", "version", ".", "json", "for", "setup", ".", "py", "." ]
python
train
45.130435
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ipv6_access_list.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ipv6_access_list.py#L26-L41
def ipv6_acl_ipv6_access_list_standard_seq_seq_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ipv6_acl = ET.SubElement(config, "ipv6-acl", xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list") ipv6 = ET.SubElement(ipv6_acl, "ipv6") access_list = ET.SubElement(ipv6, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') seq = ET.SubElement(standard, "seq") seq_id = ET.SubElement(seq, "seq-id") seq_id.text = kwargs.pop('seq_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ipv6_acl_ipv6_access_list_standard_seq_seq_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ipv6_acl", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ipv6-acl\"", ",", "xmlns", "...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
45.0625
pywbem/pywbem
pywbem/_recorder.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_recorder.py#L119-L126
def _cimdatetime_constructor(loader, node): """ PyYAML constructor function for CIMDateTime objects. This is needed for yaml.safe_load() to support CIMDateTime. """ cimdatetime_str = loader.construct_scalar(node) cimdatetime = CIMDateTime(cimdatetime_str) return cimdatetime
[ "def", "_cimdatetime_constructor", "(", "loader", ",", "node", ")", ":", "cimdatetime_str", "=", "loader", ".", "construct_scalar", "(", "node", ")", "cimdatetime", "=", "CIMDateTime", "(", "cimdatetime_str", ")", "return", "cimdatetime" ]
PyYAML constructor function for CIMDateTime objects. This is needed for yaml.safe_load() to support CIMDateTime.
[ "PyYAML", "constructor", "function", "for", "CIMDateTime", "objects", ".", "This", "is", "needed", "for", "yaml", ".", "safe_load", "()", "to", "support", "CIMDateTime", "." ]
python
train
36.875
edx/xblock-utils
xblockutils/studio_editable.py
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L405-L413
def preview_view(self, context): """ Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context. Default implementation uses author_view if available, otherwise falls back to student_view Child classes can override this method to control their presentation in preview context """ view_to_render = 'author_view' if hasattr(self, 'author_view') else 'student_view' renderer = getattr(self, view_to_render) return renderer(context)
[ "def", "preview_view", "(", "self", ",", "context", ")", ":", "view_to_render", "=", "'author_view'", "if", "hasattr", "(", "self", ",", "'author_view'", ")", "else", "'student_view'", "renderer", "=", "getattr", "(", "self", ",", "view_to_render", ")", "retur...
Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context. Default implementation uses author_view if available, otherwise falls back to student_view Child classes can override this method to control their presentation in preview context
[ "Preview", "view", "-", "used", "by", "StudioContainerWithNestedXBlocksMixin", "to", "render", "nested", "xblocks", "in", "preview", "context", ".", "Default", "implementation", "uses", "author_view", "if", "available", "otherwise", "falls", "back", "to", "student_vie...
python
train
58.888889
StanfordVL/robosuite
robosuite/models/tasks/pick_place_task.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/tasks/pick_place_task.py#L88-L128
def place_objects(self): """Places objects randomly until no collisions or max iterations hit.""" placed_objects = [] index = 0 # place objects by rejection sampling for _, obj_mjcf in self.mujoco_objects.items(): horizontal_radius = obj_mjcf.get_horizontal_radius() bottom_offset = obj_mjcf.get_bottom_offset() success = False for _ in range(5000): # 5000 retries bin_x_half = self.bin_size[0] / 2 - horizontal_radius - 0.05 bin_y_half = self.bin_size[1] / 2 - horizontal_radius - 0.05 object_x = np.random.uniform(high=bin_x_half, low=-bin_x_half) object_y = np.random.uniform(high=bin_y_half, low=-bin_y_half) # make sure objects do not overlap object_xy = np.array([object_x, object_y, 0]) pos = self.bin_offset - bottom_offset + object_xy location_valid = True for pos2, r in placed_objects: dist = np.linalg.norm(pos[:2] - pos2[:2], np.inf) if dist <= r + horizontal_radius: location_valid = False break # place the object if location_valid: # add object to the position placed_objects.append((pos, horizontal_radius)) self.objects[index].set("pos", array_to_string(pos)) # random z-rotation quat = self.sample_quat() self.objects[index].set("quat", array_to_string(quat)) success = True break # raise error if all objects cannot be placed after maximum retries if not success: raise RandomizationError("Cannot place all objects in the bins") index += 1
[ "def", "place_objects", "(", "self", ")", ":", "placed_objects", "=", "[", "]", "index", "=", "0", "# place objects by rejection sampling", "for", "_", ",", "obj_mjcf", "in", "self", ".", "mujoco_objects", ".", "items", "(", ")", ":", "horizontal_radius", "=",...
Places objects randomly until no collisions or max iterations hit.
[ "Places", "objects", "randomly", "until", "no", "collisions", "or", "max", "iterations", "hit", "." ]
python
train
46.04878
merenlab/illumina-utils
IlluminaUtils/utils/helperfunctions.py
https://github.com/merenlab/illumina-utils/blob/246d0611f976471783b83d2aba309b0cb57210f6/IlluminaUtils/utils/helperfunctions.py#L572-L654
def visualize_qual_stats_dict_single(D, dest, title): """ same as visualize_qual_stats_dict, but puts all tiles together. """ # first find out how many cycles were there. it is going to be about 101 for # hiseq runs, and 251 in miseq runs, but these values may change from run to # run. although all lanes are expected to have the identical number of cycles # the following code makes sure that the number_of_cycles variable holds the # longest one if there is a variation between the number of cycles between # lanes number_of_cycles = 0 for pair in ['1', '2']: if pair not in D: continue for tile in D[pair]: if len(D[pair][tile]['mean']) > number_of_cycles: number_of_cycles = len(D[pair][tile]['mean']) fig = plt.figure(figsize = (12, 8)) plt.rcParams.update({'axes.linewidth' : 0.9}) plt.rc('grid', color='0.50', linestyle='-', linewidth=0.1) all_tiles = {'1': {'mean': [0] * number_of_cycles, 'count': [0] * number_of_cycles}, '2': {'mean': [0] * number_of_cycles, 'count': [0] * number_of_cycles} } for i in range(0, number_of_cycles): means_p1 = [] counts_p1 = [] means_p2 = [] counts_p2 = [] for tile_id in D['1']: tile = D['1'][tile_id] means_p1.append(tile['mean'][i]) counts_p1.append(tile['count'][i]) if '2' in D and D['2']: tile = D['2'][tile_id] means_p2.append(tile['mean'][i]) counts_p2.append(tile['count'][i]) all_tiles['1']['mean'][i] = numpy.mean(means_p1) all_tiles['1']['count'][i] = sum(counts_p1) if '2' in D and D['2']: all_tiles['2']['mean'][i] = numpy.mean(means_p2) all_tiles['2']['count'][i] = sum(counts_p2) colors = cm.get_cmap('RdYlGn', lut=256) plt.grid(True) plt.subplots_adjust(left=0.02, bottom = 0.03, top = 0.95, right = 0.98) plt.xticks(list(range(number_of_cycles / 10, number_of_cycles, number_of_cycles / 10)), rotation=90, size='xx-small') plt.ylim(ymin = 0, ymax = 42) plt.xlim(xmin = 0, xmax = number_of_cycles - 1) plt.yticks(list(range(5, 41, 5)), size='xx-small') plt.fill_between(list(range(0, number_of_cycles)), [42 for _ in range(0, number_of_cycles)], y2 = 0, color = colors(0), alpha = 0.2) plt.plot(all_tiles['1']['mean'], color = 'orange', lw = 6) read_number_percent_dropdown = [42 * (x / all_tiles['1']['count'][0]) for x in all_tiles['1']['count']] if not len(set(read_number_percent_dropdown)) <= 1: plt.fill_between(list(range(0, number_of_cycles)), read_number_percent_dropdown, y2 = 0, color = 'black', alpha = 0.08) plt.text(5, 2.5, '%s' % big_number_pretty_print(all_tiles['1']['count'][0]), alpha=0.5) else: plt.text(5, 2.5, '%s' % big_number_pretty_print(all_tiles['1']['count'][0]), alpha=0.5) if '2' in all_tiles and all_tiles['2']: plt.plot(all_tiles['2']['mean'], color = 'purple', lw = 6) plt.figtext(0.5, 0.97, '%s' % (title), weight = 'black', size = 'xx-large', ha = 'center') try: plt.savefig(dest + '.tiff') except: plt.savefig(dest + '.png') return (all_tiles['1']['mean'], all_tiles['2']['mean'])
[ "def", "visualize_qual_stats_dict_single", "(", "D", ",", "dest", ",", "title", ")", ":", "# first find out how many cycles were there. it is going to be about 101 for", "# hiseq runs, and 251 in miseq runs, but these values may change from run to", "# run. although all lanes are expected to...
same as visualize_qual_stats_dict, but puts all tiles together.
[ "same", "as", "visualize_qual_stats_dict", "but", "puts", "all", "tiles", "together", "." ]
python
train
39.361446
fxsjy/jieba
jieba/__init__.py
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L569-L589
def enable_parallel(processnum=None): """ Change the module's `cut` and `cut_for_search` functions to the parallel version. Note that this only works using dt, custom Tokenizer instances are not supported. """ global pool, dt, cut, cut_for_search from multiprocessing import cpu_count if os.name == 'nt': raise NotImplementedError( "jieba: parallel mode only supports posix system") else: from multiprocessing import Pool dt.check_initialized() if processnum is None: processnum = cpu_count() pool = Pool(processnum) cut = _pcut cut_for_search = _pcut_for_search
[ "def", "enable_parallel", "(", "processnum", "=", "None", ")", ":", "global", "pool", ",", "dt", ",", "cut", ",", "cut_for_search", "from", "multiprocessing", "import", "cpu_count", "if", "os", ".", "name", "==", "'nt'", ":", "raise", "NotImplementedError", ...
Change the module's `cut` and `cut_for_search` functions to the parallel version. Note that this only works using dt, custom Tokenizer instances are not supported.
[ "Change", "the", "module", "s", "cut", "and", "cut_for_search", "functions", "to", "the", "parallel", "version", "." ]
python
train
30.428571
cds-astro/mocpy
mocpy/moc/moc.py
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/moc/moc.py#L387-L411
def from_vizier_table(cls, table_id, nside=256): """ Creates a `~mocpy.moc.MOC` object from a VizieR table. **Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object from a vizier catalog ID. Parameters ---------- table_id : str table index nside : int, optional 256 by default Returns ------- result : `~mocpy.moc.MOC` The resulting MOC. """ nside_possible_values = (8, 16, 32, 64, 128, 256, 512) if nside not in nside_possible_values: raise ValueError('Bad value for nside. Must be in {0}'.format(nside_possible_values)) result = cls.from_ivorn('ivo://CDS/' + table_id, nside) return result
[ "def", "from_vizier_table", "(", "cls", ",", "table_id", ",", "nside", "=", "256", ")", ":", "nside_possible_values", "=", "(", "8", ",", "16", ",", "32", ",", "64", ",", "128", ",", "256", ",", "512", ")", "if", "nside", "not", "in", "nside_possible...
Creates a `~mocpy.moc.MOC` object from a VizieR table. **Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object from a vizier catalog ID. Parameters ---------- table_id : str table index nside : int, optional 256 by default Returns ------- result : `~mocpy.moc.MOC` The resulting MOC.
[ "Creates", "a", "~mocpy", ".", "moc", ".", "MOC", "object", "from", "a", "VizieR", "table", "." ]
python
train
34.44
dossier/dossier.models
dossier/models/etl/interface.py
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/etl/interface.py#L152-L231
def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None, timestamp=None, other_features=None): '''`html` is expected to be a raw string received over the wire from a remote webserver, and `encoding`, if provided, is used to decode it. Typically, encoding comes from the Content-Type header field. The :func:`~streamcorpus_pipeline._clean_html.make_clean_html` function handles character encodings. ''' def add_feature(name, xs): if name not in fc: fc[name] = StringCounter() fc[name] += StringCounter(xs) timestamp = timestamp or int(time.time() * 1000) other_features = other_features or {} if clean_html is None: if html is not None: try: clean_html_utf8 = make_clean_html(html, encoding=encoding) except: logger.warn('dropping doc because:', exc_info=True) return clean_html = clean_html_utf8.decode('utf-8') else: clean_html_utf8 = u'' clean_html = u'' else: clean_html_utf8 = u'' if clean_visible is None or len(clean_visible) == 0: clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8') elif isinstance(clean_visible, str): clean_visible = clean_visible.decode('utf-8') fc = FeatureCollection() fc[u'meta_raw'] = html and uni(html, encoding) or u'' fc[u'meta_clean_html'] = clean_html fc[u'meta_clean_visible'] = clean_visible fc[u'meta_timestamp'] = unicode(timestamp) url = url or u'' fc[u'meta_url'] = uni(url) add_feature(u'icq', features.ICQs(clean_visible)) add_feature(u'skype', features.skypes(clean_visible)) add_feature(u'phone', features.phones(clean_visible)) add_feature(u'email', features.emails(clean_visible)) bowNP, normalizations = features.noun_phrases( cleanse(clean_visible), included_unnormalized=True) add_feature(u'bowNP', bowNP) bowNP_unnorm = chain(*normalizations.values()) add_feature(u'bowNP_unnorm', bowNP_unnorm) add_feature(u'image_url', features.image_urls(clean_html)) add_feature(u'a_url', features.a_urls(clean_html)) ## get parsed versions, extract usernames fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url']) fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url']) fc[u'usernames'] = features.usernames(fc[u'image_url']) fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url']) fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url']) fc[u'usernames'] += features.usernames(fc[u'a_url']) #fc[u'usernames'] += features.usernames2( # fc[u'meta_clean_visible']) # beginning of treating this as a pipeline... xform = features.entity_names() fc = xform.process(fc) for feat_name, feat_val in other_features.iteritems(): fc[feat_name] += StringCounter(feat_val) return fc
[ "def", "html_to_fc", "(", "html", "=", "None", ",", "clean_html", "=", "None", ",", "clean_visible", "=", "None", ",", "encoding", "=", "None", ",", "url", "=", "None", ",", "timestamp", "=", "None", ",", "other_features", "=", "None", ")", ":", "def",...
`html` is expected to be a raw string received over the wire from a remote webserver, and `encoding`, if provided, is used to decode it. Typically, encoding comes from the Content-Type header field. The :func:`~streamcorpus_pipeline._clean_html.make_clean_html` function handles character encodings.
[ "html", "is", "expected", "to", "be", "a", "raw", "string", "received", "over", "the", "wire", "from", "a", "remote", "webserver", "and", "encoding", "if", "provided", "is", "used", "to", "decode", "it", ".", "Typically", "encoding", "comes", "from", "the"...
python
train
36.2875
genialis/resolwe-runtime-utils
resolwe_runtime_utils.py
https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L62-L70
def save_list(key, *values): """Convert the given list of parameters to a JSON object. JSON object is of the form: { key: [values[0], values[1], ... ] }, where values represent the given list of parameters. """ return json.dumps({key: [_get_json(value) for value in values]})
[ "def", "save_list", "(", "key", ",", "*", "values", ")", ":", "return", "json", ".", "dumps", "(", "{", "key", ":", "[", "_get_json", "(", "value", ")", "for", "value", "in", "values", "]", "}", ")" ]
Convert the given list of parameters to a JSON object. JSON object is of the form: { key: [values[0], values[1], ... ] }, where values represent the given list of parameters.
[ "Convert", "the", "given", "list", "of", "parameters", "to", "a", "JSON", "object", "." ]
python
train
32.555556
SmartTeleMax/iktomi
iktomi/db/files.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/db/files.py#L177-L190
def store(self, transient_file, persistent_file): '''Makes PersistentFile from TransientFile''' #for i in range(5): # persistent_file = PersistentFile(self.persistent_root, # persistent_name, self) # if not os.path.exists(persistent_file.path): # break #else: # raise Exception('Unable to find free file name') dirname = os.path.dirname(persistent_file.path) if not os.path.isdir(dirname): os.makedirs(dirname) os.rename(transient_file.path, persistent_file.path) return persistent_file
[ "def", "store", "(", "self", ",", "transient_file", ",", "persistent_file", ")", ":", "#for i in range(5):", "# persistent_file = PersistentFile(self.persistent_root,", "# persistent_name, self)", "# if not os.path.exists(persistent_file.path):", ...
Makes PersistentFile from TransientFile
[ "Makes", "PersistentFile", "from", "TransientFile" ]
python
train
45.214286
memphis-iis/GLUDB
gludb/backends/sqlite.py
https://github.com/memphis-iis/GLUDB/blob/25692528ff6fe8184a3570f61f31f1a90088a388/gludb/backends/sqlite.py#L77-L95
def find_by_index(self, cls, index_name, value): """Find all rows matching index query - as per the gludb spec.""" cur = self._conn().cursor() query = 'select id,value from %s where %s = ?' % ( cls.get_table_name(), index_name ) found = [] for row in cur.execute(query, (value,)): id, data = row[0], row[1] obj = cls.from_data(data) assert id == obj.id found.append(obj) cur.close() return found
[ "def", "find_by_index", "(", "self", ",", "cls", ",", "index_name", ",", "value", ")", ":", "cur", "=", "self", ".", "_conn", "(", ")", ".", "cursor", "(", ")", "query", "=", "'select id,value from %s where %s = ?'", "%", "(", "cls", ".", "get_table_name",...
Find all rows matching index query - as per the gludb spec.
[ "Find", "all", "rows", "matching", "index", "query", "-", "as", "per", "the", "gludb", "spec", "." ]
python
train
27.210526
treycucco/bidon
bidon/db/access/pg_advisory_lock.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/pg_advisory_lock.py#L101-L119
def _lock_fxn(direction, lock_mode, xact): """Builds a pg advisory lock function name based on various options. :direction: one of "lock" or "unlock" :lock_mode: a member of the LockMode enum :xact: a boolean, if True the lock will be automatically released at the end of the transaction and cannot be manually released. """ if direction == "unlock" or lock_mode == LockMode.wait: try_mode = "" else: try_mode = "_try" if direction == "lock" and xact: xact_mode = "_xact" else: xact_mode = "" return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction)
[ "def", "_lock_fxn", "(", "direction", ",", "lock_mode", ",", "xact", ")", ":", "if", "direction", "==", "\"unlock\"", "or", "lock_mode", "==", "LockMode", ".", "wait", ":", "try_mode", "=", "\"\"", "else", ":", "try_mode", "=", "\"_try\"", "if", "direction...
Builds a pg advisory lock function name based on various options. :direction: one of "lock" or "unlock" :lock_mode: a member of the LockMode enum :xact: a boolean, if True the lock will be automatically released at the end of the transaction and cannot be manually released.
[ "Builds", "a", "pg", "advisory", "lock", "function", "name", "based", "on", "various", "options", "." ]
python
train
31
Koed00/django-q
django_q/humanhash.py
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/humanhash.py#L94-L126
def compress(bytes, target): """ Compress a list of byte values to a fixed target length. >>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151] >>> HumanHasher.compress(bytes, 4) [205, 128, 156, 96] Attempting to compress a smaller number of bytes to a larger number is an error: >>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Fewer input bytes than requested output """ length = len(bytes) if target > length: raise ValueError("Fewer input bytes than requested output") # Split `bytes` into `target` segments. seg_size = length // target segments = [bytes[i * seg_size:(i + 1) * seg_size] for i in range(target)] # Catch any left-over bytes in the last segment. segments[-1].extend(bytes[target * seg_size:]) # Use a simple XOR checksum-like function for compression. checksum = lambda bytes: reduce(operator.xor, bytes, 0) checksums = list(map(checksum, segments)) return checksums
[ "def", "compress", "(", "bytes", ",", "target", ")", ":", "length", "=", "len", "(", "bytes", ")", "if", "target", ">", "length", ":", "raise", "ValueError", "(", "\"Fewer input bytes than requested output\"", ")", "# Split `bytes` into `target` segments.", "seg_siz...
Compress a list of byte values to a fixed target length. >>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151] >>> HumanHasher.compress(bytes, 4) [205, 128, 156, 96] Attempting to compress a smaller number of bytes to a larger number is an error: >>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Fewer input bytes than requested output
[ "Compress", "a", "list", "of", "byte", "values", "to", "a", "fixed", "target", "length", "." ]
python
train
35.575758
dhermes/bezier
src/bezier/surface.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L948-L1005
def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True): """Find the common intersection with another surface. Args: other (Surface): Other surface to intersect with. strategy (Optional[~bezier.curve.IntersectionStrategy]): The intersection algorithm to use. Defaults to geometric. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the algorithm as it proceeds. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: List[Union[~bezier.curved_polygon.CurvedPolygon, \ ~bezier.surface.Surface]]: List of intersections (possibly empty). Raises: TypeError: If ``other`` is not a surface (and ``_verify=True``). NotImplementedError: If at least one of the surfaces isn't two-dimensional (and ``_verify=True``). ValueError: If ``strategy`` is not a valid :class:`.IntersectionStrategy`. """ if _verify: if not isinstance(other, Surface): raise TypeError( "Can only intersect with another surface", "Received", other, ) if self._dimension != 2 or other._dimension != 2: raise NotImplementedError( "Intersection only implemented in 2D" ) if strategy == _STRATEGY.GEOMETRIC: do_intersect = _surface_intersection.geometric_intersect elif strategy == _STRATEGY.ALGEBRAIC: do_intersect = _surface_intersection.algebraic_intersect else: raise ValueError("Unexpected strategy.", strategy) edge_infos, contained, all_edge_nodes = do_intersect( self._nodes, self._degree, other._nodes, other._degree, _verify ) if edge_infos is None: if contained: return [self] else: return [other] else: return [ _make_intersection(edge_info, all_edge_nodes) for edge_info in edge_infos ]
[ "def", "intersect", "(", "self", ",", "other", ",", "strategy", "=", "_STRATEGY", ".", "GEOMETRIC", ",", "_verify", "=", "True", ")", ":", "if", "_verify", ":", "if", "not", "isinstance", "(", "other", ",", "Surface", ")", ":", "raise", "TypeError", "(...
Find the common intersection with another surface. Args: other (Surface): Other surface to intersect with. strategy (Optional[~bezier.curve.IntersectionStrategy]): The intersection algorithm to use. Defaults to geometric. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the algorithm as it proceeds. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: List[Union[~bezier.curved_polygon.CurvedPolygon, \ ~bezier.surface.Surface]]: List of intersections (possibly empty). Raises: TypeError: If ``other`` is not a surface (and ``_verify=True``). NotImplementedError: If at least one of the surfaces isn't two-dimensional (and ``_verify=True``). ValueError: If ``strategy`` is not a valid :class:`.IntersectionStrategy`.
[ "Find", "the", "common", "intersection", "with", "another", "surface", "." ]
python
train
38.241379
galactics/beyond
beyond/orbits/man.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/orbits/man.py#L47-L66
def dv(self, orb): """Computation of the velocity increment in the reference frame of the orbit Args: orb (Orbit): Return: numpy.array: Velocity increment, length 3 """ orb = orb.copy(form="cartesian") if self.frame == "QSW": mat = to_qsw(orb).T elif self.frame == "TNW": mat = to_tnw(orb).T else: mat = np.identity(3) # velocity increment in the same reference frame as the orbit return mat @ self._dv
[ "def", "dv", "(", "self", ",", "orb", ")", ":", "orb", "=", "orb", ".", "copy", "(", "form", "=", "\"cartesian\"", ")", "if", "self", ".", "frame", "==", "\"QSW\"", ":", "mat", "=", "to_qsw", "(", "orb", ")", ".", "T", "elif", "self", ".", "fra...
Computation of the velocity increment in the reference frame of the orbit Args: orb (Orbit): Return: numpy.array: Velocity increment, length 3
[ "Computation", "of", "the", "velocity", "increment", "in", "the", "reference", "frame", "of", "the", "orbit" ]
python
train
26.35
spyder-ide/spyder
spyder/preferences/shortcuts.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L456-L461
def set_sequence_to_default(self): """Set the new sequence to the default value defined in the config.""" sequence = CONF.get_default( 'shortcuts', "{}/{}".format(self.context, self.name)) self._qsequences = sequence.split(', ') self.update_warning()
[ "def", "set_sequence_to_default", "(", "self", ")", ":", "sequence", "=", "CONF", ".", "get_default", "(", "'shortcuts'", ",", "\"{}/{}\"", ".", "format", "(", "self", ".", "context", ",", "self", ".", "name", ")", ")", "self", ".", "_qsequences", "=", "...
Set the new sequence to the default value defined in the config.
[ "Set", "the", "new", "sequence", "to", "the", "default", "value", "defined", "in", "the", "config", "." ]
python
train
49
senaite/senaite.core
bika/lims/content/samplinground.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/samplinground.py#L346-L368
def getSRTemplateInfo(self): """ Returns a dict with the SRTemplate infomration {'uid':'xxxx','id':'xxxx','title':'xxx','url':'xxx'} """ pc = getToolByName(api.portal.get(), 'portal_catalog') contentFilter = {'portal_type': 'SRTemplate', 'UID': self.sr_template} srt = pc(contentFilter) srtdict = {'uid': '', 'id': '', 'title': '', 'url': ''} if len(srt) == 1: template = srt[0].getObject() srtdict = { 'uid': template.id, 'id': template.UID(), 'title': template.title, 'url': template.absolute_url(), } else: from bika.lims import logger error = "Error when looking for sr template with uid '%s'. " logger.exception(error, self.sr_template) return srtdict
[ "def", "getSRTemplateInfo", "(", "self", ")", ":", "pc", "=", "getToolByName", "(", "api", ".", "portal", ".", "get", "(", ")", ",", "'portal_catalog'", ")", "contentFilter", "=", "{", "'portal_type'", ":", "'SRTemplate'", ",", "'UID'", ":", "self", ".", ...
Returns a dict with the SRTemplate infomration {'uid':'xxxx','id':'xxxx','title':'xxx','url':'xxx'}
[ "Returns", "a", "dict", "with", "the", "SRTemplate", "infomration", "{", "uid", ":", "xxxx", "id", ":", "xxxx", "title", ":", "xxx", "url", ":", "xxx", "}" ]
python
train
38.391304
markovmodel/PyEMMA
pyemma/coordinates/transform/nystroem_tica.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/transform/nystroem_tica.py#L578-L602
def approximate_cholesky(self, epsilon=1e-6): r""" Compute low-rank approximation to the Cholesky decomposition of target matrix. The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive. Parameters ---------- epsilon : float, optional, default 1e-6 Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues will be cut off. Returns ------- L : ndarray((n,m), dtype=float) Cholesky matrix such that `A \approx L L^{\top}`. Number of columns :math:`m` is most at the number of columns used in the Nystroem approximation, but may be smaller depending on epsilon. """ # compute the Eigenvalues of C0 using Schur factorization Wk = self._C_k[self._columns, :] L0 = spd_inv_split(Wk, epsilon=epsilon) L = np.dot(self._C_k, L0) return L
[ "def", "approximate_cholesky", "(", "self", ",", "epsilon", "=", "1e-6", ")", ":", "# compute the Eigenvalues of C0 using Schur factorization", "Wk", "=", "self", ".", "_C_k", "[", "self", ".", "_columns", ",", ":", "]", "L0", "=", "spd_inv_split", "(", "Wk", ...
r""" Compute low-rank approximation to the Cholesky decomposition of target matrix. The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive. Parameters ---------- epsilon : float, optional, default 1e-6 Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues will be cut off. Returns ------- L : ndarray((n,m), dtype=float) Cholesky matrix such that `A \approx L L^{\top}`. Number of columns :math:`m` is most at the number of columns used in the Nystroem approximation, but may be smaller depending on epsilon.
[ "r", "Compute", "low", "-", "rank", "approximation", "to", "the", "Cholesky", "decomposition", "of", "target", "matrix", "." ]
python
train
42.88
wandb/client
wandb/apis/internal.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L925-L964
def agent_heartbeat(self, agent_id, metrics, run_states): """Notify server about agent state, receive commands. Args: agent_id (str): agent_id metrics (dict): system metrics run_states (dict): run_id: state mapping Returns: List of commands to execute. """ mutation = gql(''' mutation Heartbeat( $id: ID!, $metrics: JSONString, $runState: JSONString ) { agentHeartbeat(input: { id: $id, metrics: $metrics, runState: $runState }) { agent { id } commands } } ''') try: response = self.gql(mutation, variable_values={ 'id': agent_id, 'metrics': json.dumps(metrics), 'runState': json.dumps(run_states)}) except Exception as e: # GQL raises exceptions with stringified python dictionaries :/ message = ast.literal_eval(e.args[0])["message"] logger.error('Error communicating with W&B: %s', message) return [] else: return json.loads(response['agentHeartbeat']['commands'])
[ "def", "agent_heartbeat", "(", "self", ",", "agent_id", ",", "metrics", ",", "run_states", ")", ":", "mutation", "=", "gql", "(", "'''\n mutation Heartbeat(\n $id: ID!,\n $metrics: JSONString,\n $runState: JSONString\n ) {\n a...
Notify server about agent state, receive commands. Args: agent_id (str): agent_id metrics (dict): system metrics run_states (dict): run_id: state mapping Returns: List of commands to execute.
[ "Notify", "server", "about", "agent", "state", "receive", "commands", "." ]
python
train
32.025
cisco-sas/kitty
kitty/data/report.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/data/report.py#L219-L239
def get_status(self): ''' Get the status of the report and its sub-reports. :rtype: str :return: report status ('passed', 'failed' or 'error') ''' status = self.get('status') if status == Report.PASSED: for sr_name in self._sub_reports: sr = self._sub_reports[sr_name] sr_status = sr.get_status() reason = sr.get('reason') if sr_status == Report.ERROR: self.error(reason) break if sr_status == Report.FAILED: self.failed(reason) break status = self.get('status') return status
[ "def", "get_status", "(", "self", ")", ":", "status", "=", "self", ".", "get", "(", "'status'", ")", "if", "status", "==", "Report", ".", "PASSED", ":", "for", "sr_name", "in", "self", ".", "_sub_reports", ":", "sr", "=", "self", ".", "_sub_reports", ...
Get the status of the report and its sub-reports. :rtype: str :return: report status ('passed', 'failed' or 'error')
[ "Get", "the", "status", "of", "the", "report", "and", "its", "sub", "-", "reports", "." ]
python
train
33.571429
ellethee/argparseinator
argparseinator/__init__.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L580-L597
def exit(self, status=EXIT_OK, message=None): """ Terminate the script. """ if not self.parser: self.parser = argparse.ArgumentParser() if self.msg_on_error_only: # if msg_on_error_only is True if status != EXIT_OK: # if we have an error we'll exit with the message also. self.parser.exit(status, message) else: # else we'll exit with the status ongly self.parser.exit(status, None) else: # else if msg_on_error_only is not True # we'll exit with the status and the message self.parser.exit(status, message)
[ "def", "exit", "(", "self", ",", "status", "=", "EXIT_OK", ",", "message", "=", "None", ")", ":", "if", "not", "self", ".", "parser", ":", "self", ".", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "if", "self", ".", "msg_on_error_only", ...
Terminate the script.
[ "Terminate", "the", "script", "." ]
python
train
38.111111
rambo/python-holviapi
holviapi/utils.py
https://github.com/rambo/python-holviapi/blob/f57f44e7b0a1030786aafd6f387114abb546bb32/holviapi/utils.py#L167-L173
def iso_reference_valid_char(c, raise_error=True): """Helper to make sure the given character is valid for a reference number""" if c in ISO_REFERENCE_VALID: return True if raise_error: raise ValueError("'%s' is not in '%s'" % (c, ISO_REFERENCE_VALID)) return False
[ "def", "iso_reference_valid_char", "(", "c", ",", "raise_error", "=", "True", ")", ":", "if", "c", "in", "ISO_REFERENCE_VALID", ":", "return", "True", "if", "raise_error", ":", "raise", "ValueError", "(", "\"'%s' is not in '%s'\"", "%", "(", "c", ",", "ISO_REF...
Helper to make sure the given character is valid for a reference number
[ "Helper", "to", "make", "sure", "the", "given", "character", "is", "valid", "for", "a", "reference", "number" ]
python
valid
41.571429
raymondEhlers/pachyderm
pachyderm/yaml.py
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/yaml.py#L133-L154
def numpy_from_yaml(constructor: Constructor, data: ruamel.yaml.nodes.SequenceNode) -> np.ndarray: """ Read an array from YAML to numpy. It reads arrays registered under the tag ``!numpy_array``. Use with: .. code-block:: python >>> yaml = ruamel.yaml.YAML() >>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml) Note: We cannot use ``yaml.register_class`` because it won't register the proper type. (It would register the type of the class, rather than of `numpy.ndarray`). Instead, we use the above approach to register this method explicitly with the representer. """ # Construct the contained values so that we properly construct int, float, etc. # We just leave this to YAML because it already stores this information. values = [constructor.construct_object(n) for n in data.value] logger.debug(f"{data}, {values}") return np.array(values)
[ "def", "numpy_from_yaml", "(", "constructor", ":", "Constructor", ",", "data", ":", "ruamel", ".", "yaml", ".", "nodes", ".", "SequenceNode", ")", "->", "np", ".", "ndarray", ":", "# Construct the contained values so that we properly construct int, float, etc.", "# We j...
Read an array from YAML to numpy. It reads arrays registered under the tag ``!numpy_array``. Use with: .. code-block:: python >>> yaml = ruamel.yaml.YAML() >>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml) Note: We cannot use ``yaml.register_class`` because it won't register the proper type. (It would register the type of the class, rather than of `numpy.ndarray`). Instead, we use the above approach to register this method explicitly with the representer.
[ "Read", "an", "array", "from", "YAML", "to", "numpy", "." ]
python
train
42.409091
saltstack/salt
salt/returners/influxdb_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/influxdb_return.py#L313-L326
def get_minions(): ''' Return a list of minions ''' serv = _get_serv(ret=None) sql = "select distinct(id) from returns" data = serv.query(sql) ret = [] if data: for jid in data[0]['points']: ret.append(jid[1]) return ret
[ "def", "get_minions", "(", ")", ":", "serv", "=", "_get_serv", "(", "ret", "=", "None", ")", "sql", "=", "\"select distinct(id) from returns\"", "data", "=", "serv", ".", "query", "(", "sql", ")", "ret", "=", "[", "]", "if", "data", ":", "for", "jid", ...
Return a list of minions
[ "Return", "a", "list", "of", "minions" ]
python
train
18.928571
rueckstiess/mtools
mtools/mplotqueries/plottypes/base_type.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/base_type.py#L60-L64
def logevents(self): """Iterator yielding all logevents from groups dictionary.""" for key in self.groups: for logevent in self.groups[key]: yield logevent
[ "def", "logevents", "(", "self", ")", ":", "for", "key", "in", "self", ".", "groups", ":", "for", "logevent", "in", "self", ".", "groups", "[", "key", "]", ":", "yield", "logevent" ]
Iterator yielding all logevents from groups dictionary.
[ "Iterator", "yielding", "all", "logevents", "from", "groups", "dictionary", "." ]
python
train
39
portfors-lab/sparkle
sparkle/gui/plotting/raster_bounds_dlg.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/raster_bounds_dlg.py#L16-L24
def values(self): """Gets the user enter max and min values of where the raster points should appear on the y-axis :returns: (float, float) -- (min, max) y-values to bound the raster plot by """ lower = float(self.lowerSpnbx.value()) upper = float(self.upperSpnbx.value()) return (lower, upper)
[ "def", "values", "(", "self", ")", ":", "lower", "=", "float", "(", "self", ".", "lowerSpnbx", ".", "value", "(", ")", ")", "upper", "=", "float", "(", "self", ".", "upperSpnbx", ".", "value", "(", ")", ")", "return", "(", "lower", ",", "upper", ...
Gets the user enter max and min values of where the raster points should appear on the y-axis :returns: (float, float) -- (min, max) y-values to bound the raster plot by
[ "Gets", "the", "user", "enter", "max", "and", "min", "values", "of", "where", "the", "raster", "points", "should", "appear", "on", "the", "y", "-", "axis" ]
python
train
38.222222
JamesPHoughton/pysd
pysd/py_backend/functions.py
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L768-L800
def _integrate(self, time_steps, capture_elements, return_timestamps): """ Performs euler integration Parameters ---------- time_steps: iterable the time steps that the integrator progresses over capture_elements: list which model elements to capture - uses pysafe names return_timestamps: which subset of 'timesteps' should be values be returned? Returns ------- outputs: list of dictionaries """ # Todo: consider adding the timestamp to the return elements, and using that as the index outputs = [] for t2 in time_steps[1:]: if self.time() in return_timestamps: outputs.append({key: getattr(self.components, key)() for key in capture_elements}) self._euler_step(t2 - self.time()) self.time.update(t2) # this will clear the stepwise caches # need to add one more time step, because we run only the state updates in the previous # loop and thus may be one short. if self.time() in return_timestamps: outputs.append({key: getattr(self.components, key)() for key in capture_elements}) return outputs
[ "def", "_integrate", "(", "self", ",", "time_steps", ",", "capture_elements", ",", "return_timestamps", ")", ":", "# Todo: consider adding the timestamp to the return elements, and using that as the index", "outputs", "=", "[", "]", "for", "t2", "in", "time_steps", "[", "...
Performs euler integration Parameters ---------- time_steps: iterable the time steps that the integrator progresses over capture_elements: list which model elements to capture - uses pysafe names return_timestamps: which subset of 'timesteps' should be values be returned? Returns ------- outputs: list of dictionaries
[ "Performs", "euler", "integration" ]
python
train
36.848485
molpopgen/fwdpy11
fwdpy11/ezparams.py
https://github.com/molpopgen/fwdpy11/blob/7a5905f0f0a09e24ae5b0f39d22017499e81ea9e/fwdpy11/ezparams.py#L21-L62
def mslike(pop, **kwargs): """ Function to establish default parameters for a single-locus simulation for standard pop-gen modeling scenarios. :params pop: An instance of :class:`fwdpy11.DiploidPopulation` :params kwargs: Keyword arguments. """ import fwdpy11 if isinstance(pop, fwdpy11.DiploidPopulation) is False: raise ValueError("incorrect pop type: " + str(type(pop))) defaults = {'simlen': 10*pop.N, 'beg': 0.0, 'end': 1.0, 'theta': 100.0, 'pneutral': 1.0, 'rho': 100.0, 'dfe': None } for key, value in kwargs.items(): if key in defaults: defaults[key] = value import numpy as np params = {'demography': np.array([pop.N]*defaults['simlen'], dtype=np.uint32), 'nregions': [fwdpy11.Region(defaults['beg'], defaults['end'], 1.0)], 'recregions': [fwdpy11.Region(defaults['beg'], defaults['end'], 1.0)], 'rates': ((defaults['pneutral']*defaults['theta'])/(4.0*pop.N), ((1.0-defaults['pneutral'])*defaults['theta']) / (4.0*pop.N), defaults['rho']/(4.0*float(pop.N))), 'gvalue': fwdpy11.Multiplicative(2.0) } if defaults['dfe'] is None: params['sregions'] = [] else: params['sregions'] = [defaults['dfe']] return params
[ "def", "mslike", "(", "pop", ",", "*", "*", "kwargs", ")", ":", "import", "fwdpy11", "if", "isinstance", "(", "pop", ",", "fwdpy11", ".", "DiploidPopulation", ")", "is", "False", ":", "raise", "ValueError", "(", "\"incorrect pop type: \"", "+", "str", "(",...
Function to establish default parameters for a single-locus simulation for standard pop-gen modeling scenarios. :params pop: An instance of :class:`fwdpy11.DiploidPopulation` :params kwargs: Keyword arguments.
[ "Function", "to", "establish", "default", "parameters", "for", "a", "single", "-", "locus", "simulation", "for", "standard", "pop", "-", "gen", "modeling", "scenarios", "." ]
python
train
35.97619
tcalmant/ipopo
pelix/shell/parser.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/parser.py#L670-L681
def var_set(session, **kwargs): """ Sets the given variables or prints the current ones. "set answer=42" """ if not kwargs: for name, value in session.variables.items(): session.write_line("{0}={1}".format(name, value)) else: for name, value in kwargs.items(): name = name.strip() session.set(name, value) session.write_line("{0}={1}", name, value)
[ "def", "var_set", "(", "session", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ":", "for", "name", ",", "value", "in", "session", ".", "variables", ".", "items", "(", ")", ":", "session", ".", "write_line", "(", "\"{0}={1}\"", ".", "form...
Sets the given variables or prints the current ones. "set answer=42"
[ "Sets", "the", "given", "variables", "or", "prints", "the", "current", "ones", ".", "set", "answer", "=", "42" ]
python
train
38.75
simpleai-team/simpleai
samples/search/eight_puzzle.py
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/eight_puzzle.py#L81-L91
def result(self, state, action): '''Return the resulting state after moving a piece to the empty space. (the "action" parameter contains the piece to move) ''' rows = string_to_list(state) row_e, col_e = find_location(rows, 'e') row_n, col_n = find_location(rows, action) rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e] return list_to_string(rows)
[ "def", "result", "(", "self", ",", "state", ",", "action", ")", ":", "rows", "=", "string_to_list", "(", "state", ")", "row_e", ",", "col_e", "=", "find_location", "(", "rows", ",", "'e'", ")", "row_n", ",", "col_n", "=", "find_location", "(", "rows", ...
Return the resulting state after moving a piece to the empty space. (the "action" parameter contains the piece to move)
[ "Return", "the", "resulting", "state", "after", "moving", "a", "piece", "to", "the", "empty", "space", ".", "(", "the", "action", "parameter", "contains", "the", "piece", "to", "move", ")" ]
python
train
39.818182
ga4gh/ga4gh-server
ga4gh/server/datamodel/variants.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/variants.py#L882-L890
def _createGaVariantAnnotation(self): """ Convenience method to set the common fields in a GA VariantAnnotation object from this variant set. """ ret = protocol.VariantAnnotation() ret.created = self._creationTime ret.variant_annotation_set_id = self.getId() return ret
[ "def", "_createGaVariantAnnotation", "(", "self", ")", ":", "ret", "=", "protocol", ".", "VariantAnnotation", "(", ")", "ret", ".", "created", "=", "self", ".", "_creationTime", "ret", ".", "variant_annotation_set_id", "=", "self", ".", "getId", "(", ")", "r...
Convenience method to set the common fields in a GA VariantAnnotation object from this variant set.
[ "Convenience", "method", "to", "set", "the", "common", "fields", "in", "a", "GA", "VariantAnnotation", "object", "from", "this", "variant", "set", "." ]
python
train
36.111111
Erotemic/utool
utool/util_num.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_num.py#L38-L75
def sigfig_str(number, sigfig): """ References: http://stackoverflow.com/questions/2663612/nicely-repr-float-in-python """ assert(sigfig > 0) try: d = decimal.Decimal(number) except TypeError: d = float_to_decimal(float(number)) sign, digits, exponent = d.as_tuple() if len(digits) < sigfig: digits = list(digits) digits.extend([0] * (sigfig - len(digits))) shift = d.adjusted() result = int(''.join(map(str, digits[:sigfig]))) # Round the result if len(digits) > sigfig and digits[sigfig] >= 5: result += 1 result = list(str(result)) # Rounding can change the length of result # If so, adjust shift shift += len(result) - sigfig # reset len of result to sigfig result = result[:sigfig] if shift >= sigfig - 1: # Tack more zeros on the end result += ['0'] * (shift - sigfig + 1) elif 0 <= shift: # Place the decimal point in between digits result.insert(shift + 1, '.') else: # Tack zeros on the front assert(shift < 0) result = ['0.'] + ['0'] * (-shift - 1) + result if sign: result.insert(0, '-') return ''.join(result)
[ "def", "sigfig_str", "(", "number", ",", "sigfig", ")", ":", "assert", "(", "sigfig", ">", "0", ")", "try", ":", "d", "=", "decimal", ".", "Decimal", "(", "number", ")", "except", "TypeError", ":", "d", "=", "float_to_decimal", "(", "float", "(", "nu...
References: http://stackoverflow.com/questions/2663612/nicely-repr-float-in-python
[ "References", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "2663612", "/", "nicely", "-", "repr", "-", "float", "-", "in", "-", "python" ]
python
train
31.263158
softlayer/softlayer-python
SoftLayer/CLI/vpn/ipsec/subnet/add.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/vpn/ipsec/subnet/add.py#L33-L81
def cli(env, context_id, subnet_id, subnet_type, network_identifier): """Add a subnet to an IPSEC tunnel context. A subnet id may be specified to link to the existing tunnel context. Otherwise, a network identifier in CIDR notation should be specified, indicating that a subnet resource should first be created before associating it with the tunnel context. Note that this is only supported for remote subnets, which are also deleted upon failure to attach to a context. A separate configuration request should be made to realize changes on network devices. """ create_remote = False if subnet_id is None: if network_identifier is None: raise ArgumentError('Either a network identifier or subnet id ' 'must be provided.') if subnet_type != 'remote': raise ArgumentError('Unable to create {} subnets' .format(subnet_type)) create_remote = True manager = SoftLayer.IPSECManager(env.client) context = manager.get_tunnel_context(context_id) if create_remote: subnet = manager.create_remote_subnet(context['accountId'], identifier=network_identifier[0], cidr=network_identifier[1]) subnet_id = subnet['id'] env.out('Created subnet {}/{} #{}' .format(network_identifier[0], network_identifier[1], subnet_id)) succeeded = False if subnet_type == 'internal': succeeded = manager.add_internal_subnet(context_id, subnet_id) elif subnet_type == 'remote': succeeded = manager.add_remote_subnet(context_id, subnet_id) elif subnet_type == 'service': succeeded = manager.add_service_subnet(context_id, subnet_id) if succeeded: env.out('Added {} subnet #{}'.format(subnet_type, subnet_id)) else: raise CLIHalt('Failed to add {} subnet #{}' .format(subnet_type, subnet_id))
[ "def", "cli", "(", "env", ",", "context_id", ",", "subnet_id", ",", "subnet_type", ",", "network_identifier", ")", ":", "create_remote", "=", "False", "if", "subnet_id", "is", "None", ":", "if", "network_identifier", "is", "None", ":", "raise", "ArgumentError"...
Add a subnet to an IPSEC tunnel context. A subnet id may be specified to link to the existing tunnel context. Otherwise, a network identifier in CIDR notation should be specified, indicating that a subnet resource should first be created before associating it with the tunnel context. Note that this is only supported for remote subnets, which are also deleted upon failure to attach to a context. A separate configuration request should be made to realize changes on network devices.
[ "Add", "a", "subnet", "to", "an", "IPSEC", "tunnel", "context", "." ]
python
train
41.816327
inasafe/inasafe
safe/impact_function/impact_function.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/impact_function.py#L1259-L1367
def _compute_analysis_extent(self): """Compute the minimum extent between layers. This function will set the self._analysis_extent geometry using aggregation CRS or crs property. :return: A tuple with the status of the IF and an error message if needed. The status is PREPARE_SUCCESS if everything was fine. The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client should fix the analysis extent. The status is PREPARE_FAILED_BAD_CODE if something went wrong from the code. :rtype: (int, m.Message) """ exposure_extent = QgsGeometry.fromRect(self.exposure.extent()) hazard_extent = QgsGeometry.fromRect(self.hazard.extent()) if self.aggregation: analysis_crs = self.aggregation.crs() else: analysis_crs = self._crs if self.hazard.crs().authid() != analysis_crs.authid(): crs_transform = QgsCoordinateTransform( self.hazard.crs(), analysis_crs, QgsProject.instance()) hazard_extent.transform(crs_transform) if self.exposure.crs().authid() != analysis_crs.authid(): crs_transform = QgsCoordinateTransform( self.exposure.crs(), analysis_crs, QgsProject.instance()) exposure_extent.transform(crs_transform) # We check if the hazard and the exposure overlap. if not exposure_extent.intersects(hazard_extent): message = generate_input_error_message( tr('Layers need to overlap.'), m.Paragraph(tr( 'The exposure and the hazard layer need to overlap.')) ) return PREPARE_FAILED_INSUFFICIENT_OVERLAP, message else: hazard_exposure = exposure_extent.intersection(hazard_extent) if not self.aggregation: if self.requested_extent: user_bounding_box = QgsGeometry.fromRect(self.requested_extent) if self._crs != self.exposure.crs(): crs_transform = QgsCoordinateTransform( self._crs, self.exposure.crs(), QgsProject.instance()) user_bounding_box.transform(crs_transform) if not hazard_exposure.intersects(user_bounding_box): message = generate_input_error_message( tr('The bounding box need to overlap layers.'), m.Paragraph(tr( 'The requested analysis extent is not overlaping ' 'the exposure and the hazard.')) ) return ( PREPARE_FAILED_INSUFFICIENT_OVERLAP_REQUESTED_EXTENT, message) else: self._analysis_extent = hazard_exposure.intersection( user_bounding_box) elif self.use_exposure_view_only: self._analysis_extent = exposure_extent else: self._analysis_extent = hazard_exposure else: # We monkey patch if we use selected features only. self.aggregation.use_selected_features_only = ( self.use_selected_features_only) self.aggregation = create_valid_aggregation(self.aggregation) list_geometry = [] for area in self.aggregation.getFeatures(): list_geometry.append(QgsGeometry(area.geometry())) geometry = QgsGeometry.unaryUnion(list_geometry) if geometry.isMultipart(): multi_polygon = geometry.asMultiPolygon() for polygon in multi_polygon: for ring in polygon[1:]: polygon.remove(ring) self._analysis_extent = QgsGeometry.fromMultiPolygonXY( multi_polygon) else: polygon = geometry.asPolygon() for ring in polygon[1:]: polygon.remove(ring) self._analysis_extent = QgsGeometry.fromPolygonXY(polygon) is_empty = self._analysis_extent.isEmpty() is_invalid = not self._analysis_extent.isGeosValid() if is_empty or is_invalid: message = generate_input_error_message( tr('There is a problem with the aggregation layer.'), m.Paragraph(tr( 'The aggregation layer seems to have a problem. ' 'Some features might be invalid. You should check the ' 'validity of this layer or use a selection within ' 'this layer.')) ) return PREPARE_FAILED_BAD_LAYER, message return PREPARE_SUCCESS, None
[ "def", "_compute_analysis_extent", "(", "self", ")", ":", "exposure_extent", "=", "QgsGeometry", ".", "fromRect", "(", "self", ".", "exposure", ".", "extent", "(", ")", ")", "hazard_extent", "=", "QgsGeometry", ".", "fromRect", "(", "self", ".", "hazard", "....
Compute the minimum extent between layers. This function will set the self._analysis_extent geometry using aggregation CRS or crs property. :return: A tuple with the status of the IF and an error message if needed. The status is PREPARE_SUCCESS if everything was fine. The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client should fix the analysis extent. The status is PREPARE_FAILED_BAD_CODE if something went wrong from the code. :rtype: (int, m.Message)
[ "Compute", "the", "minimum", "extent", "between", "layers", "." ]
python
train
43.981651
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_cee_map.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_cee_map.py#L12-L21
def cee_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map") name = ET.SubElement(cee_map, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "cee_map_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "cee_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"cee-map\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:broca...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
37.8
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/optional_traversal.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/optional_traversal.py#L193-L266
def prune_non_existent_outputs(compound_match_query): """Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery. Each of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks, For each of these, remove the outputs (that have been implicitly pruned away) from each corresponding ConstructResult block. Args: compound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects (see convert_optional_traversals_to_compound_match_query) Returns: CompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects """ if len(compound_match_query.match_queries) == 1: return compound_match_query elif len(compound_match_query.match_queries) == 0: raise AssertionError(u'Received CompoundMatchQuery with ' u'an empty list of MatchQuery objects.') else: match_queries = [] for match_query in compound_match_query.match_queries: match_traversals = match_query.match_traversals output_block = match_query.output_block present_locations_tuple = _get_present_locations(match_traversals) present_locations, present_non_optional_locations = present_locations_tuple new_output_fields = {} for output_name, expression in six.iteritems(output_block.fields): if isinstance(expression, OutputContextField): # An OutputContextField as an output Expression indicates that we are not # within an @optional scope. Therefore, the location this output uses must # be in present_locations, and the output is never pruned. location_name, _ = expression.location.get_location_name() if location_name not in present_locations: raise AssertionError(u'Non-optional output location {} was not found in ' u'present_locations: {}' .format(expression.location, present_locations)) new_output_fields[output_name] = expression elif isinstance(expression, FoldedContextField): # A FoldedContextField as an output Expression indicates that we are not # within an @optional scope. Therefore, the location this output uses must # be in present_locations, and the output is never pruned. base_location = expression.fold_scope_location.base_location location_name, _ = base_location.get_location_name() if location_name not in present_locations: raise AssertionError(u'Folded output location {} was found in ' u'present_locations: {}' .format(base_location, present_locations)) new_output_fields[output_name] = expression elif isinstance(expression, TernaryConditional): # A TernaryConditional indicates that this output is within some optional scope. # This may be pruned away based on the contents of present_locations. location_name, _ = expression.if_true.location.get_location_name() if location_name in present_locations: if location_name in present_non_optional_locations: new_output_fields[output_name] = expression.if_true else: new_output_fields[output_name] = expression else: raise AssertionError(u'Invalid expression of type {} in output block: ' u'{}'.format(type(expression).__name__, output_block)) match_queries.append( MatchQuery( match_traversals=match_traversals, folds=match_query.folds, output_block=ConstructResult(new_output_fields), where_block=match_query.where_block, ) ) return CompoundMatchQuery(match_queries=match_queries)
[ "def", "prune_non_existent_outputs", "(", "compound_match_query", ")", ":", "if", "len", "(", "compound_match_query", ".", "match_queries", ")", "==", "1", ":", "return", "compound_match_query", "elif", "len", "(", "compound_match_query", ".", "match_queries", ")", ...
Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery. Each of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks, For each of these, remove the outputs (that have been implicitly pruned away) from each corresponding ConstructResult block. Args: compound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects (see convert_optional_traversals_to_compound_match_query) Returns: CompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects
[ "Remove", "non", "-", "existent", "outputs", "from", "each", "MatchQuery", "in", "the", "given", "CompoundMatchQuery", "." ]
python
train
58.256757
Jaymon/prom
prom/interface/postgres.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/postgres.py#L184-L262
def _get_fields(self, table_name, **kwargs): """return all the fields for the given schema""" ret = {} query_str = [] query_args = ['f', table_name] # I had to brush up on my join knowledge while writing this query # https://en.wikipedia.org/wiki/Join_(SQL) # # other helpful links # https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns # https://www.postgresql.org/docs/9.4/static/catalog-pg-attribute.html # https://www.postgresql.org/docs/9.3/static/catalog-pg-type.html # # another approach # http://dba.stackexchange.com/questions/22362/how-do-i-list-all-columns-for-a-specified-table # http://gis.stackexchange.com/questions/94049/how-to-get-the-data-type-of-each-column-from-a-postgis-table query_str.append('SELECT') query_str.append(', '.join([ 'a.attnum', 'a.attname', 'a.attnotnull', 't.typname', 'i.indisprimary', #'s.conname', #'pg_get_constraintdef(s.oid, true) as condef', 'c.relname AS confrelname', ])) query_str.append('FROM') query_str.append(' pg_attribute a') query_str.append('JOIN pg_type t ON a.atttypid = t.oid') query_str.append('LEFT JOIN pg_index i ON a.attrelid = i.indrelid') query_str.append(' AND a.attnum = any(i.indkey)') query_str.append('LEFT JOIN pg_constraint s ON a.attrelid = s.conrelid') query_str.append(' AND s.contype = {} AND a.attnum = any(s.conkey)'.format(self.val_placeholder)) query_str.append('LEFT JOIN pg_class c ON s.confrelid = c.oid') query_str.append('WHERE') query_str.append(' a.attrelid = {}::regclass'.format(self.val_placeholder)) query_str.append(' AND a.attisdropped = False') query_str.append(' AND a.attnum > 0') query_str.append('ORDER BY a.attnum ASC') query_str = os.linesep.join(query_str) fields = self.query(query_str, *query_args, **kwargs) pg_types = { "float8": float, "timestamp": datetime.datetime, "int2": int, "int4": int, "int8": long, "numeric": decimal.Decimal, "text": str, "bpchar": str, "varchar": str, "bool": bool, "date": datetime.date, "blob": bytearray, } # the rows we can set: field_type, name, field_required, min_size, max_size, # size, unique, pk, <foreign key info> # These keys will roughly correspond with schema.Field for row in fields: field = { "name": row["attname"], "field_type": pg_types[row["typname"]], "field_required": row["attnotnull"], "pk": bool(row["indisprimary"]), } if row["confrelname"]: # TODO -- I can't decide which name I like field["schema_table_name"] = row["confrelname"] field["ref_table_name"] = row["confrelname"] ret[field["name"]] = field return ret
[ "def", "_get_fields", "(", "self", ",", "table_name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "query_str", "=", "[", "]", "query_args", "=", "[", "'f'", ",", "table_name", "]", "# I had to brush up on my join knowledge while writing this query",...
return all the fields for the given schema
[ "return", "all", "the", "fields", "for", "the", "given", "schema" ]
python
train
39.822785
williballenthin/python-pyqt5-hexview
hexview/tablecellstylemodels.py
https://github.com/williballenthin/python-pyqt5-hexview/blob/461feb286dfde60bdbc12b3fb772d650f4b8ba71/hexview/tablecellstylemodels.py#L101-L170
def compute_region_border(start, end): """ given the buffer start and end indices of a range, compute the border edges that should be drawn to enclose the range. this function currently assumes 0x10 length rows. the result is a dictionary from buffer index to Cell instance. the Cell instance has boolean properties "top", "bottom", "left", and "right" that describe if a border should be drawn on that side of the cell view. :rtype: Mapping[int, CellT] """ cells = defaultdict(Cell) start_row = row_number(start) end_row = row_number(end) if end % 0x10 == 0: end_row -= 1 ## topmost cells if start_row == end_row: for i in range(start, end): cells[i].top = True else: for i in range(start, row_end_index(start) + 1): cells[i].top = True # cells on second row, top left if start_row != end_row: next_row_start = row_start_index(start) + 0x10 for i in range(next_row_start, next_row_start + column_number(start)): cells[i].top = True ## bottommost cells if start_row == end_row: for i in range(start, end): cells[i].bottom = True else: for i in range(row_start_index(end), end): cells[i].bottom = True # cells on second-to-last row, bottom right if start_row != end_row: prev_row_end = row_end_index(end) - 0x10 for i in range(prev_row_end - (0x10 - column_number(end) - 1), prev_row_end + 1): cells[i].bottom = True ## leftmost cells if start_row == end_row: cells[start].left = True else: second_row_start = row_start_index(start) + 0x10 for i in range(second_row_start, row_start_index(end) + 0x10, 0x10): cells[i].left = True # cells in first row, top left if start_row != end_row: cells[start].left = True ## rightmost cells if start_row == end_row: cells[end - 1].right = True else: penultimate_row_end = row_end_index(end) - 0x10 for i in range(row_end_index(start), penultimate_row_end + 0x10, 0x10): cells[i].right = True # cells in last row, bottom right if start_row != end_row: cells[end - 1].right = True # convert back to standard dict # trick from: http://stackoverflow.com/a/20428703/87207 cells.default_factory = None return cells
[ "def", "compute_region_border", "(", "start", ",", "end", ")", ":", "cells", "=", "defaultdict", "(", "Cell", ")", "start_row", "=", "row_number", "(", "start", ")", "end_row", "=", "row_number", "(", "end", ")", "if", "end", "%", "0x10", "==", "0", ":...
given the buffer start and end indices of a range, compute the border edges that should be drawn to enclose the range. this function currently assumes 0x10 length rows. the result is a dictionary from buffer index to Cell instance. the Cell instance has boolean properties "top", "bottom", "left", and "right" that describe if a border should be drawn on that side of the cell view. :rtype: Mapping[int, CellT]
[ "given", "the", "buffer", "start", "and", "end", "indices", "of", "a", "range", "compute", "the", "border", "edges", "that", "should", "be", "drawn", "to", "enclose", "the", "range", "." ]
python
train
33.828571
xzased/lvm2py
lvm2py/vg.py
https://github.com/xzased/lvm2py/blob/34ce69304531a474c2fe4a4009ca445a8c103cd6/lvm2py/vg.py#L227-L238
def size(self, units="MiB"): """ Returns the volume group size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """ self.open() size = lvm_vg_get_size(self.handle) self.close() return size_convert(size, units)
[ "def", "size", "(", "self", ",", "units", "=", "\"MiB\"", ")", ":", "self", ".", "open", "(", ")", "size", "=", "lvm_vg_get_size", "(", "self", ".", "handle", ")", "self", ".", "close", "(", ")", "return", "size_convert", "(", "size", ",", "units", ...
Returns the volume group size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
[ "Returns", "the", "volume", "group", "size", "in", "the", "given", "units", ".", "Default", "units", "are", "MiB", "." ]
python
train
29.166667
syrusakbary/promise
promise/dataloader.py
https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L233-L252
def dispatch_queue(loader): # type: (DataLoader) -> None """ Given the current state of a Loader instance, perform a batch load from its current queue. """ # Take the current loader queue, replacing it with an empty queue. queue = loader._queue loader._queue = [] # If a maxBatchSize was provided and the queue is longer, then segment the # queue into multiple batches, otherwise treat the queue as a single batch. max_batch_size = loader.max_batch_size if max_batch_size and max_batch_size < len(queue): chunks = get_chunks(queue, max_batch_size) for chunk in chunks: dispatch_queue_batch(loader, chunk) else: dispatch_queue_batch(loader, queue)
[ "def", "dispatch_queue", "(", "loader", ")", ":", "# type: (DataLoader) -> None", "# Take the current loader queue, replacing it with an empty queue.", "queue", "=", "loader", ".", "_queue", "loader", ".", "_queue", "=", "[", "]", "# If a maxBatchSize was provided and the queue...
Given the current state of a Loader instance, perform a batch load from its current queue.
[ "Given", "the", "current", "state", "of", "a", "Loader", "instance", "perform", "a", "batch", "load", "from", "its", "current", "queue", "." ]
python
train
35.85
genepattern/genepattern-python
gp/modules.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/modules.py#L585-L594
def _generate_base_lsid(self): """ Generates and returns a base LSID :return: """ domain = self._generate_domain() namespace = self._generate_namespace() # Return the base LSID return "urn:lsid:" + domain + ":" + namespace
[ "def", "_generate_base_lsid", "(", "self", ")", ":", "domain", "=", "self", ".", "_generate_domain", "(", ")", "namespace", "=", "self", ".", "_generate_namespace", "(", ")", "# Return the base LSID", "return", "\"urn:lsid:\"", "+", "domain", "+", "\":\"", "+", ...
Generates and returns a base LSID :return:
[ "Generates", "and", "returns", "a", "base", "LSID", ":", "return", ":" ]
python
train
27.8
boxed/mutmut
mutmut/__main__.py
https://github.com/boxed/mutmut/blob/dd3bbe9aba3168ed21b85fbfe0b654b150239697/mutmut/__main__.py#L750-L780
def compute_exit_code(config, exception=None): """Compute an exit code for mutmut mutation testing The following exit codes are available for mutmut: * 0 if all mutants were killed (OK_KILLED) * 1 if a fatal error occurred * 2 if one or more mutants survived (BAD_SURVIVED) * 4 if one or more mutants timed out (BAD_TIMEOUT) * 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS) Exit codes 1 to 8 will be bit-ORed so that it is possible to know what different mutant statuses occurred during mutation testing. :param exception: :type exception: Exception :param config: :type config: Config :return: integer noting the exit code of the mutation tests. :rtype: int """ code = 0 if exception is not None: code = code | 1 if config.surviving_mutants > 0: code = code | 2 if config.surviving_mutants_timeout > 0: code = code | 4 if config.suspicious_mutants > 0: code = code | 8 return code
[ "def", "compute_exit_code", "(", "config", ",", "exception", "=", "None", ")", ":", "code", "=", "0", "if", "exception", "is", "not", "None", ":", "code", "=", "code", "|", "1", "if", "config", ".", "surviving_mutants", ">", "0", ":", "code", "=", "c...
Compute an exit code for mutmut mutation testing The following exit codes are available for mutmut: * 0 if all mutants were killed (OK_KILLED) * 1 if a fatal error occurred * 2 if one or more mutants survived (BAD_SURVIVED) * 4 if one or more mutants timed out (BAD_TIMEOUT) * 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS) Exit codes 1 to 8 will be bit-ORed so that it is possible to know what different mutant statuses occurred during mutation testing. :param exception: :type exception: Exception :param config: :type config: Config :return: integer noting the exit code of the mutation tests. :rtype: int
[ "Compute", "an", "exit", "code", "for", "mutmut", "mutation", "testing" ]
python
valid
32.612903
atl/py-smartdc
smartdc/machine.py
https://github.com/atl/py-smartdc/blob/cc5cd5910e19004cc46e376ce035affe28fc798e/smartdc/machine.py#L724-L733
def delete(self): """ :: DELETE /:login/machines/:id/snapshots/:name Deletes the snapshot from the machine. """ _, r = self.machine.datacenter.request('DELETE', self.path) r.raise_for_status()
[ "def", "delete", "(", "self", ")", ":", "_", ",", "r", "=", "self", ".", "machine", ".", "datacenter", ".", "request", "(", "'DELETE'", ",", "self", ".", "path", ")", "r", ".", "raise_for_status", "(", ")" ]
:: DELETE /:login/machines/:id/snapshots/:name Deletes the snapshot from the machine.
[ "::", "DELETE", "/", ":", "login", "/", "machines", "/", ":", "id", "/", "snapshots", "/", ":", "name", "Deletes", "the", "snapshot", "from", "the", "machine", "." ]
python
train
26.1
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L909-L916
def simxClearIntegerSignal(clientID, signalName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if (sys.version_info[0] == 3) and (type(signalName) is str): signalName=signalName.encode('utf-8') return c_ClearIntegerSignal(clientID, signalName, operationMode)
[ "def", "simxClearIntegerSignal", "(", "clientID", ",", "signalName", ",", "operationMode", ")", ":", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "signalName", ")", "is", "str", ")", ":", "signalName", ...
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
43
MillionIntegrals/vel
vel/rl/models/stochastic_policy_model_separate.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/stochastic_policy_model_separate.py#L85-L90
def policy(self, observations): """ Calculate only action head for given state """ input_data = self.input_block(observations) policy_base_output = self.policy_backbone(input_data) policy_params = self.action_head(policy_base_output) return policy_params
[ "def", "policy", "(", "self", ",", "observations", ")", ":", "input_data", "=", "self", ".", "input_block", "(", "observations", ")", "policy_base_output", "=", "self", ".", "policy_backbone", "(", "input_data", ")", "policy_params", "=", "self", ".", "action_...
Calculate only action head for given state
[ "Calculate", "only", "action", "head", "for", "given", "state" ]
python
train
48.166667
juju/charm-helpers
charmhelpers/contrib/storage/linux/ceph.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L922-L932
def create_key_file(service, key): """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): log('Keyfile exists at %s.' % keyfile, level=WARNING) return with open(keyfile, 'w') as fd: fd.write(key) log('Created new keyfile at %s.' % keyfile, level=INFO)
[ "def", "create_key_file", "(", "service", ",", "key", ")", ":", "keyfile", "=", "_keyfile_path", "(", "service", ")", "if", "os", ".", "path", ".", "exists", "(", "keyfile", ")", ":", "log", "(", "'Keyfile exists at %s.'", "%", "keyfile", ",", "level", "...
Create a file containing key.
[ "Create", "a", "file", "containing", "key", "." ]
python
train
29.909091
opencobra/memote
memote/experimental/config.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/config.py#L67-L81
def load(self, model): """ Load all information from an experimental configuration file. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ self.load_medium(model) self.load_essentiality(model) self.load_growth(model) # self.load_experiment(config.config.get("growth"), model) return self
[ "def", "load", "(", "self", ",", "model", ")", ":", "self", ".", "load_medium", "(", "model", ")", "self", ".", "load_essentiality", "(", "model", ")", "self", ".", "load_growth", "(", "model", ")", "# self.load_experiment(config.config.get(\"growth\"), model)", ...
Load all information from an experimental configuration file. Parameters ---------- model : cobra.Model The metabolic model under investigation.
[ "Load", "all", "information", "from", "an", "experimental", "configuration", "file", "." ]
python
train
27.466667
ray-project/ray
python/ray/monitor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/monitor.py#L201-L217
def xray_driver_removed_handler(self, unused_channel, data): """Handle a notification that a driver has been removed. Args: unused_channel: The message channel. data: The message data. """ gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( data, 0) driver_data = gcs_entries.Entries(0) message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData( driver_data, 0) driver_id = message.DriverId() logger.info("Monitor: " "XRay Driver {} has been removed.".format( binary_to_hex(driver_id))) self._xray_clean_up_entries_for_driver(driver_id)
[ "def", "xray_driver_removed_handler", "(", "self", ",", "unused_channel", ",", "data", ")", ":", "gcs_entries", "=", "ray", ".", "gcs_utils", ".", "GcsTableEntry", ".", "GetRootAsGcsTableEntry", "(", "data", ",", "0", ")", "driver_data", "=", "gcs_entries", ".",...
Handle a notification that a driver has been removed. Args: unused_channel: The message channel. data: The message data.
[ "Handle", "a", "notification", "that", "a", "driver", "has", "been", "removed", "." ]
python
train
41.529412
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/svg.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/svg.py#L41-L54
def svg_to_clipboard(string): """ Copy a SVG document to the clipboard. Parameters: ----------- string : basestring A Python string containing a SVG document. """ if isinstance(string, unicode): string = string.encode('utf-8') mime_data = QtCore.QMimeData() mime_data.setData('image/svg+xml', string) QtGui.QApplication.clipboard().setMimeData(mime_data)
[ "def", "svg_to_clipboard", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "unicode", ")", ":", "string", "=", "string", ".", "encode", "(", "'utf-8'", ")", "mime_data", "=", "QtCore", ".", "QMimeData", "(", ")", "mime_data", ".", "setDa...
Copy a SVG document to the clipboard. Parameters: ----------- string : basestring A Python string containing a SVG document.
[ "Copy", "a", "SVG", "document", "to", "the", "clipboard", "." ]
python
test
28.214286
seleniumbase/SeleniumBase
seleniumbase/fixtures/email_manager.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/email_manager.py#L241-L252
def search_for_subject(self, subject, timeout=None, content_type=None): """ Get content of emails, sent to a specific email address. @Params email - the recipient email address to search for timeout - seconds to try beore timing out content_type - type of email string to return @Returns Content of the matched email in the given content type """ return self.search(timeout=timeout, content_type=content_type, SUBJECT=subject)
[ "def", "search_for_subject", "(", "self", ",", "subject", ",", "timeout", "=", "None", ",", "content_type", "=", "None", ")", ":", "return", "self", ".", "search", "(", "timeout", "=", "timeout", ",", "content_type", "=", "content_type", ",", "SUBJECT", "=...
Get content of emails, sent to a specific email address. @Params email - the recipient email address to search for timeout - seconds to try beore timing out content_type - type of email string to return @Returns Content of the matched email in the given content type
[ "Get", "content", "of", "emails", "sent", "to", "a", "specific", "email", "address", "." ]
python
train
43.5
spacetelescope/pysynphot
pysynphot/refs.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/refs.py#L106-L129
def _set_default_refdata(): """Default refdata set on import.""" global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA # Component tables are defined here. try: GRAPHTABLE = _refTable(os.path.join('mtab','*_tmg.fits')) COMPTABLE = _refTable(os.path.join('mtab','*_tmc.fits')) except IOError as e: GRAPHTABLE = None COMPTABLE = None warnings.warn('No graph or component tables found; ' 'functionality will be SEVERELY crippled. ' + str(e)) try: THERMTABLE = _refTable(os.path.join('mtab','*_tmt.fits')) except IOError as e: THERMTABLE = None warnings.warn('No thermal tables found, ' 'no thermal calculations can be performed. ' + str(e)) PRIMARY_AREA = 45238.93416 # cm^2 - default to HST mirror set_default_waveset()
[ "def", "_set_default_refdata", "(", ")", ":", "global", "GRAPHTABLE", ",", "COMPTABLE", ",", "THERMTABLE", ",", "PRIMARY_AREA", "# Component tables are defined here.", "try", ":", "GRAPHTABLE", "=", "_refTable", "(", "os", ".", "path", ".", "join", "(", "'mtab'", ...
Default refdata set on import.
[ "Default", "refdata", "set", "on", "import", "." ]
python
train
35.208333
PMEAL/OpenPNM
openpnm/algorithms/FickianDiffusion.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/FickianDiffusion.py#L99-L135
def calc_effective_diffusivity(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r""" This calculates the effective diffusivity in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes. """ return self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length)
[ "def", "calc_effective_diffusivity", "(", "self", ",", "inlets", "=", "None", ",", "outlets", "=", "None", ",", "domain_area", "=", "None", ",", "domain_length", "=", "None", ")", ":", "return", "self", ".", "_calc_eff_prop", "(", "inlets", "=", "inlets", ...
r""" This calculates the effective diffusivity in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet composition boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
[ "r", "This", "calculates", "the", "effective", "diffusivity", "in", "this", "linear", "transport", "algorithm", "." ]
python
train
42.135135
bitesofcode/projexui
projexui/widgets/xprogressfeedbackwidget/xprogressfeedbackwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xprogressfeedbackwidget/xprogressfeedbackwidget.py#L95-L104
def showMessage(self, level, message): """ Logs the inputed message for the given level. This will update both the feedback label and the details widget. :param level | <int> message | <str> """ self.uiFeedbackLBL.setText(message) self.uiLoggerEDIT.log(level, message)
[ "def", "showMessage", "(", "self", ",", "level", ",", "message", ")", ":", "self", ".", "uiFeedbackLBL", ".", "setText", "(", "message", ")", "self", ".", "uiLoggerEDIT", ".", "log", "(", "level", ",", "message", ")" ]
Logs the inputed message for the given level. This will update both the feedback label and the details widget. :param level | <int> message | <str>
[ "Logs", "the", "inputed", "message", "for", "the", "given", "level", ".", "This", "will", "update", "both", "the", "feedback", "label", "and", "the", "details", "widget", ".", ":", "param", "level", "|", "<int", ">", "message", "|", "<str", ">" ]
python
train
36.1
serge-sans-paille/pythran
pythran/backend.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L82-L124
def cxx_loop(visit): """ Decorator for loop node (For and While) to handle "else" branching. Decorated node will save flags for a goto statement used instead of usual break and add this flag at the end of the else statements. Examples -------- >> for i in xrange(12): >> if i == 5: >> break >> else: >> ... some code ... Becomes >> for(type i : xrange(12)) >> if(i==5) >> goto __no_breaking0; >> ... some code ... >> __no_breaking0; """ def loop_visitor(self, node): """ New decorate function. It push the breaking flag, run the visitor and add "else" statements. """ if not node.orelse: with pushpop(self.break_handlers, None): res = visit(self, node) return res break_handler = "__no_breaking{0}".format(id(node)) with pushpop(self.break_handlers, break_handler): res = visit(self, node) # handle the body of the for loop orelse = [self.visit(stmt) for stmt in node.orelse] orelse_label = Label(break_handler) return Block([res] + orelse + [orelse_label]) return loop_visitor
[ "def", "cxx_loop", "(", "visit", ")", ":", "def", "loop_visitor", "(", "self", ",", "node", ")", ":", "\"\"\"\n New decorate function.\n\n It push the breaking flag, run the visitor and add \"else\" statements.\n \"\"\"", "if", "not", "node", ".", "orelse",...
Decorator for loop node (For and While) to handle "else" branching. Decorated node will save flags for a goto statement used instead of usual break and add this flag at the end of the else statements. Examples -------- >> for i in xrange(12): >> if i == 5: >> break >> else: >> ... some code ... Becomes >> for(type i : xrange(12)) >> if(i==5) >> goto __no_breaking0; >> ... some code ... >> __no_breaking0;
[ "Decorator", "for", "loop", "node", "(", "For", "and", "While", ")", "to", "handle", "else", "branching", "." ]
python
train
27.674419
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/device_adapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/device_adapter.py#L471-L517
def _on_response_message(self, sequence, topic, message): """Process a response message received Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message (dict): The message itself """ try: conn_key = self._find_connection(topic) context = self.conns.get_context(conn_key) except ArgumentError: self._logger.warn("Dropping message that does not correspond with a known connection, message=%s", message) return if 'client' in message and message['client'] != self.name: self._logger.debug("Dropping message that is for another client %s, we are %s", message['client'], self.name) if messages.DisconnectionResponse.matches(message): self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None)) elif messages.OpenInterfaceResponse.matches(message): self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None)) elif messages.RPCResponse.matches(message): rpc_message = messages.RPCResponse.verify(message) self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None)) elif messages.ProgressNotification.matches(message): progress_callback = context.get('progress_callback', None) if progress_callback is not None: progress_callback(message['done_count'], message['total_count']) elif messages.ScriptResponse.matches(message): if 'progress_callback' in context: del context['progress_callback'] self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None)) elif messages.DisconnectionNotification.matches(message): try: conn_key = self._find_connection(topic) conn_id = self.conns.get_connection_id(conn_key) except ArgumentError: self._logger.warn("Dropping disconnect notification that does not correspond with a known connection, topic=%s", topic) return self.conns.unexpected_disconnect(conn_key) self._trigger_callback('on_disconnect', self.id, conn_id) else: self._logger.warn("Invalid response message received, message=%s", message)
[ "def", "_on_response_message", "(", "self", ",", "sequence", ",", "topic", ",", "message", ")", ":", "try", ":", "conn_key", "=", "self", ".", "_find_connection", "(", "topic", ")", "context", "=", "self", ".", "conns", ".", "get_context", "(", "conn_key",...
Process a response message received Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message (dict): The message itself
[ "Process", "a", "response", "message", "received" ]
python
train
53.851064
isambard-uob/ampal
src/ampal/align.py
https://github.com/isambard-uob/ampal/blob/906e2afacb435ffb129b381f262ff8e7bfb324c5/src/ampal/align.py#L93-L132
def _mmc_loop(self, rounds, max_angle, max_distance, temp=298.15, stop_when=None, verbose=True): """The main Metropolis Monte Carlo loop.""" current_round = 0 while current_round < rounds: working_model = copy.deepcopy(self.polypeptide) random_vector = unit_vector(numpy.random.uniform(-1, 1, size=3)) mode = random.choice(['rotate', 'rotate', 'rotate', 'translate']) if mode == 'rotate': random_angle = numpy.random.rand() * max_angle working_model.rotate(random_angle, random_vector, working_model.centre_of_mass) else: random_translation = random_vector * (numpy.random.rand() * max_distance) working_model.translate(random_translation) proposed_energy = self.eval_fn(working_model, *self.eval_args) move_accepted = self.check_move(proposed_energy, self.current_energy, t=temp) if move_accepted: self.current_energy = proposed_energy if self.current_energy < self.best_energy: self.polypeptide = working_model self.best_energy = copy.deepcopy(self.current_energy) self.best_model = copy.deepcopy(working_model) if verbose: sys.stdout.write( '\rRound: {}, Current RMSD: {}, Proposed RMSD: {} ' '(best {}), {}. ' .format(current_round, self.float_f(self.current_energy), self.float_f(proposed_energy), self.float_f( self.best_energy), "ACCEPTED" if move_accepted else "DECLINED") ) sys.stdout.flush() current_round += 1 if stop_when: if self.best_energy <= stop_when: break return
[ "def", "_mmc_loop", "(", "self", ",", "rounds", ",", "max_angle", ",", "max_distance", ",", "temp", "=", "298.15", ",", "stop_when", "=", "None", ",", "verbose", "=", "True", ")", ":", "current_round", "=", "0", "while", "current_round", "<", "rounds", "...
The main Metropolis Monte Carlo loop.
[ "The", "main", "Metropolis", "Monte", "Carlo", "loop", "." ]
python
train
51.325
pkgw/pwkit
pwkit/numutil.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/numutil.py#L750-L771
def make_tophat_ee (lower, upper): """Return a ufunc-like tophat function on the defined range, left-exclusive and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise. """ if not np.isfinite (lower): raise ValueError ('"lower" argument must be finite number; got %r' % lower) if not np.isfinite (upper): raise ValueError ('"upper" argument must be finite number; got %r' % upper) def range_tophat_ee (x): x = np.asarray (x) x1 = np.atleast_1d (x) r = ((lower < x1) & (x1 < upper)).astype (x.dtype) if x.ndim == 0: return np.asscalar (r) return r range_tophat_ee.__doc__ = ('Ranged tophat function, left-exclusive and ' 'right-exclusive. Returns 1 if %g < x < %g, ' '0 otherwise.') % (lower, upper) return range_tophat_ee
[ "def", "make_tophat_ee", "(", "lower", ",", "upper", ")", ":", "if", "not", "np", ".", "isfinite", "(", "lower", ")", ":", "raise", "ValueError", "(", "'\"lower\" argument must be finite number; got %r'", "%", "lower", ")", "if", "not", "np", ".", "isfinite", ...
Return a ufunc-like tophat function on the defined range, left-exclusive and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise.
[ "Return", "a", "ufunc", "-", "like", "tophat", "function", "on", "the", "defined", "range", "left", "-", "exclusive", "and", "right", "-", "exclusive", ".", "Returns", "1", "if", "lower", "<", "x", "<", "upper", "0", "otherwise", "." ]
python
train
39.681818
librosa/librosa
librosa/display.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L791-L831
def __scale_axes(axes, ax_type, which): '''Set the axis scaling''' kwargs = dict() if which == 'x': thresh = 'linthreshx' base = 'basex' scale = 'linscalex' scaler = axes.set_xscale limit = axes.set_xlim else: thresh = 'linthreshy' base = 'basey' scale = 'linscaley' scaler = axes.set_yscale limit = axes.set_ylim # Map ticker scales if ax_type == 'mel': mode = 'symlog' kwargs[thresh] = 1000.0 kwargs[base] = 2 elif ax_type == 'log': mode = 'symlog' kwargs[base] = 2 kwargs[thresh] = core.note_to_hz('C2') kwargs[scale] = 0.5 elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']: mode = 'log' kwargs[base] = 2 elif ax_type == 'tempo': mode = 'log' kwargs[base] = 2 limit(16, 480) else: return scaler(mode, **kwargs)
[ "def", "__scale_axes", "(", "axes", ",", "ax_type", ",", "which", ")", ":", "kwargs", "=", "dict", "(", ")", "if", "which", "==", "'x'", ":", "thresh", "=", "'linthreshx'", "base", "=", "'basex'", "scale", "=", "'linscalex'", "scaler", "=", "axes", "."...
Set the axis scaling
[ "Set", "the", "axis", "scaling" ]
python
test
22.02439
jrief/django-websocket-redis
ws4redis/websocket.py
https://github.com/jrief/django-websocket-redis/blob/abcddaad2f579d71dbf375e5e34bc35eef795a81/ws4redis/websocket.py#L384-L425
def encode_header(cls, fin, opcode, mask, length, flags): """ Encodes a WebSocket header. :param fin: Whether this is the final frame for this opcode. :param opcode: The opcode of the payload, see `OPCODE_*` :param mask: Whether the payload is masked. :param length: The length of the frame. :param flags: The RSV* flags. :return: A bytestring encoded header. """ first_byte = opcode second_byte = 0 if six.PY2: extra = '' else: extra = b'' if fin: first_byte |= cls.FIN_MASK if flags & cls.RSV0_MASK: first_byte |= cls.RSV0_MASK if flags & cls.RSV1_MASK: first_byte |= cls.RSV1_MASK if flags & cls.RSV2_MASK: first_byte |= cls.RSV2_MASK # now deal with length complexities if length < 126: second_byte += length elif length <= 0xffff: second_byte += 126 extra = struct.pack('!H', length) elif length <= 0xffffffffffffffff: second_byte += 127 extra = struct.pack('!Q', length) else: raise FrameTooLargeException if mask: second_byte |= cls.MASK_MASK extra += mask if six.PY3: return bytes([first_byte, second_byte]) + extra return chr(first_byte) + chr(second_byte) + extra
[ "def", "encode_header", "(", "cls", ",", "fin", ",", "opcode", ",", "mask", ",", "length", ",", "flags", ")", ":", "first_byte", "=", "opcode", "second_byte", "=", "0", "if", "six", ".", "PY2", ":", "extra", "=", "''", "else", ":", "extra", "=", "b...
Encodes a WebSocket header. :param fin: Whether this is the final frame for this opcode. :param opcode: The opcode of the payload, see `OPCODE_*` :param mask: Whether the payload is masked. :param length: The length of the frame. :param flags: The RSV* flags. :return: A bytestring encoded header.
[ "Encodes", "a", "WebSocket", "header", "." ]
python
train
33.642857
neherlab/treetime
treetime/treeanc.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeanc.py#L1086-L1117
def dict_sequence(self, node, keep_var_ambigs=False): """ For VCF-based TreeAnc objects, we do not want to store the entire sequence on every node, as they could be large. Instead, this returns the dict of variants & their positions for this sequence. This is used in place of :py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still call :py:meth:`expanded_sequence` if they require the full sequence. Parameters ---------- node : PhyloTree.Clade Tree node Returns ------- seq : dict dict where keys are the basepair position (numbering from 0) and value is the variant call """ seq = {} node_seq = node.cseq if keep_var_ambigs and hasattr(node, "original_cseq") and node.is_terminal(): node_seq = node.original_cseq for pos in self.nonref_positions: cseqLoc = self.full_to_reduced_sequence_map[pos] base = node_seq[cseqLoc] if self.ref[pos] != base: seq[pos] = base return seq
[ "def", "dict_sequence", "(", "self", ",", "node", ",", "keep_var_ambigs", "=", "False", ")", ":", "seq", "=", "{", "}", "node_seq", "=", "node", ".", "cseq", "if", "keep_var_ambigs", "and", "hasattr", "(", "node", ",", "\"original_cseq\"", ")", "and", "n...
For VCF-based TreeAnc objects, we do not want to store the entire sequence on every node, as they could be large. Instead, this returns the dict of variants & their positions for this sequence. This is used in place of :py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still call :py:meth:`expanded_sequence` if they require the full sequence. Parameters ---------- node : PhyloTree.Clade Tree node Returns ------- seq : dict dict where keys are the basepair position (numbering from 0) and value is the variant call
[ "For", "VCF", "-", "based", "TreeAnc", "objects", "we", "do", "not", "want", "to", "store", "the", "entire", "sequence", "on", "every", "node", "as", "they", "could", "be", "large", ".", "Instead", "this", "returns", "the", "dict", "of", "variants", "&",...
python
test
35.75
ratt-ru/PyMORESANE
pymoresane/iuwt_toolbox.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt_toolbox.py#L17-L48
def estimate_threshold(in1, edge_excl=0, int_excl=0): """ This function estimates the noise using the MAD estimator. INPUTS: in1 (no default): The array from which the noise is estimated OUTPUTS: out1 An array of per-scale noise estimates. """ out1 = np.empty([in1.shape[0]]) mid = in1.shape[1]/2 if (edge_excl!=0) | (int_excl!=0): if edge_excl!=0: mask = np.zeros([in1.shape[1], in1.shape[2]]) mask[edge_excl:-edge_excl, edge_excl:-edge_excl] = 1 else: mask = np.ones([in1.shape[1], in1.shape[2]]) if int_excl!=0: mask[mid-int_excl:mid+int_excl, mid-int_excl:mid+int_excl] = 0 else: mask = np.ones([in1.shape[1], in1.shape[2]]) for i in range(in1.shape[0]): out1[i] = np.median(np.abs(in1[i,mask==1]))/0.6745 return out1
[ "def", "estimate_threshold", "(", "in1", ",", "edge_excl", "=", "0", ",", "int_excl", "=", "0", ")", ":", "out1", "=", "np", ".", "empty", "(", "[", "in1", ".", "shape", "[", "0", "]", "]", ")", "mid", "=", "in1", ".", "shape", "[", "1", "]", ...
This function estimates the noise using the MAD estimator. INPUTS: in1 (no default): The array from which the noise is estimated OUTPUTS: out1 An array of per-scale noise estimates.
[ "This", "function", "estimates", "the", "noise", "using", "the", "MAD", "estimator", "." ]
python
train
27.5
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L10981-L10990
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'enabled') and self.enabled is not None: _dict['enabled'] = self.enabled if hasattr(self, 'time_zone') and self.time_zone is not None: _dict['time_zone'] = self.time_zone if hasattr(self, 'frequency') and self.frequency is not None: _dict['frequency'] = self.frequency return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'enabled'", ")", "and", "self", ".", "enabled", "is", "not", "None", ":", "_dict", "[", "'enabled'", "]", "=", "self", ".", "enabled", "if", "hasa...
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
46
jupyterhub/nbgitpuller
nbgitpuller/pull.py
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L83-L97
def initialize_repo(self): """ Clones repository & sets up usernames. """ logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir)) clone_args = ['git', 'clone'] if self.depth and self.depth > 0: clone_args.extend(['--depth', str(self.depth)]) clone_args.extend(['--branch', self.branch_name]) clone_args.extend([self.git_url, self.repo_dir]) yield from execute_cmd(clone_args) yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir) yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir) logging.info('Repo {} initialized'.format(self.repo_dir))
[ "def", "initialize_repo", "(", "self", ")", ":", "logging", ".", "info", "(", "'Repo {} doesn\\'t exist. Cloning...'", ".", "format", "(", "self", ".", "repo_dir", ")", ")", "clone_args", "=", "[", "'git'", ",", "'clone'", "]", "if", "self", ".", "depth", ...
Clones repository & sets up usernames.
[ "Clones", "repository", "&", "sets", "up", "usernames", "." ]
python
train
49.066667
pyapi-gitlab/pyapi-gitlab
gitlab/__init__.py
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L1578-L1593
def getrawblob(self, project_id, sha1): """ Get the raw file contents for a blob by blob SHA. :param project_id: The ID of a project :param sha1: the commit sha :return: raw blob """ request = requests.get( '{0}/{1}/repository/raw_blobs/{2}'.format(self.projects_url, project_id, sha1), verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout) if request.status_code == 200: return request.content else: return False
[ "def", "getrawblob", "(", "self", ",", "project_id", ",", "sha1", ")", ":", "request", "=", "requests", ".", "get", "(", "'{0}/{1}/repository/raw_blobs/{2}'", ".", "format", "(", "self", ".", "projects_url", ",", "project_id", ",", "sha1", ")", ",", "verify"...
Get the raw file contents for a blob by blob SHA. :param project_id: The ID of a project :param sha1: the commit sha :return: raw blob
[ "Get", "the", "raw", "file", "contents", "for", "a", "blob", "by", "blob", "SHA", "." ]
python
train
34.375
mromanello/hucitlib
knowledge_base/surfext/__init__.py
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/surfext/__init__.py#L164-L183
def get_abbreviations(self): """ Get abbreviations of the names of the author. :return: a list of strings (empty list if no abbreviations available). """ abbreviations = [] try: type_abbreviation = self.session.get_resource(BASE_URI_TYPES % "abbreviation" , self.session.get_class(surf.ns.ECRM['E55_Type'])) abbreviations = [unicode(label) for name in self.ecrm_P1_is_identified_by for abbreviation in name.ecrm_P139_has_alternative_form for label in abbreviation.rdfs_label if name.uri == surf.ns.EFRBROO['F12_Name'] and abbreviation.ecrm_P2_has_type.first == type_abbreviation] except Exception as e: logger.debug("Exception raised when getting abbreviations for %a"%self) finally: return abbreviations
[ "def", "get_abbreviations", "(", "self", ")", ":", "abbreviations", "=", "[", "]", "try", ":", "type_abbreviation", "=", "self", ".", "session", ".", "get_resource", "(", "BASE_URI_TYPES", "%", "\"abbreviation\"", ",", "self", ".", "session", ".", "get_class",...
Get abbreviations of the names of the author. :return: a list of strings (empty list if no abbreviations available).
[ "Get", "abbreviations", "of", "the", "names", "of", "the", "author", "." ]
python
train
52.7
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L237-L248
def apply_function(self, func): """Apply a function to all grid_stack in the grid-stack. This is used by the *ray-tracing* module to easily apply tracing operations to all grid_stack.""" if self.blurring is not None and self.pix is not None: return GridStack(func(self.regular), func(self.sub), func(self.blurring), func(self.pix)) elif self.blurring is None and self.pix is not None: return GridStack(func(self.regular), func(self.sub), self.blurring, func(self.pix)) elif self.blurring is not None and self.pix is None: return GridStack(func(self.regular), func(self.sub), func(self.blurring), self.pix) else: return GridStack(func(self.regular), func(self.sub), self.blurring, self.pix)
[ "def", "apply_function", "(", "self", ",", "func", ")", ":", "if", "self", ".", "blurring", "is", "not", "None", "and", "self", ".", "pix", "is", "not", "None", ":", "return", "GridStack", "(", "func", "(", "self", ".", "regular", ")", ",", "func", ...
Apply a function to all grid_stack in the grid-stack. This is used by the *ray-tracing* module to easily apply tracing operations to all grid_stack.
[ "Apply", "a", "function", "to", "all", "grid_stack", "in", "the", "grid", "-", "stack", ".", "This", "is", "used", "by", "the", "*", "ray", "-", "tracing", "*", "module", "to", "easily", "apply", "tracing", "operations", "to", "all", "grid_stack", "." ]
python
valid
65.25
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L8040-L8062
def get_method_descriptor(self, class_name, method_name, descriptor): """ Return the specific method :param class_name: the class name of the method :type class_name: string :param method_name: the name of the method :type method_name: string :param descriptor: the descriptor of the method :type descriptor: string :rtype: None or a :class:`EncodedMethod` object """ key = class_name + method_name + descriptor if self.__cache_methods is None: self.__cache_methods = {} for i in self.get_classes(): for j in i.get_methods(): self.__cache_methods[j.get_class_name() + j.get_name() + j.get_descriptor()] = j return self.__cache_methods.get(key)
[ "def", "get_method_descriptor", "(", "self", ",", "class_name", ",", "method_name", ",", "descriptor", ")", ":", "key", "=", "class_name", "+", "method_name", "+", "descriptor", "if", "self", ".", "__cache_methods", "is", "None", ":", "self", ".", "__cache_met...
Return the specific method :param class_name: the class name of the method :type class_name: string :param method_name: the name of the method :type method_name: string :param descriptor: the descriptor of the method :type descriptor: string :rtype: None or a :class:`EncodedMethod` object
[ "Return", "the", "specific", "method" ]
python
train
36.086957
tensorflow/datasets
tensorflow_datasets/image/celeba.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/celeba.py#L177-L207
def _generate_examples(self, file_id, extracted_dirs): """Yields examples.""" filedir = os.path.join(extracted_dirs["img_align_celeba"], "img_align_celeba") img_list_path = extracted_dirs["list_eval_partition"] landmarks_path = extracted_dirs["landmarks_celeba"] attr_path = extracted_dirs["list_attr_celeba"] with tf.io.gfile.GFile(img_list_path) as f: files = [ line.split()[0] for line in f.readlines() if int(line.split()[1]) == file_id ] attributes = self._process_celeba_config_file(attr_path) landmarks = self._process_celeba_config_file(landmarks_path) for file_name in sorted(files): path = os.path.join(filedir, file_name) yield { "image": path, "landmarks": { k: v for k, v in zip(landmarks[0], landmarks[1][file_name]) }, "attributes": { # atributes value are either 1 or -1, so convert to bool k: v > 0 for k, v in zip(attributes[0], attributes[1][file_name]) }, }
[ "def", "_generate_examples", "(", "self", ",", "file_id", ",", "extracted_dirs", ")", ":", "filedir", "=", "os", ".", "path", ".", "join", "(", "extracted_dirs", "[", "\"img_align_celeba\"", "]", ",", "\"img_align_celeba\"", ")", "img_list_path", "=", "extracted...
Yields examples.
[ "Yields", "examples", "." ]
python
train
34.387097
googleapis/oauth2client
oauth2client/contrib/appengine.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/appengine.py#L333-L349
def _is_ndb(self): """Determine whether the model of the instance is an NDB model. Returns: Boolean indicating whether or not the model is an NDB or DB model. """ # issubclass will fail if one of the arguments is not a class, only # need worry about new-style classes since ndb and db models are # new-style if isinstance(self._model, type): if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL): return True elif issubclass(self._model, db.Model): return False raise TypeError( 'Model class not an NDB or DB model: {0}.'.format(self._model))
[ "def", "_is_ndb", "(", "self", ")", ":", "# issubclass will fail if one of the arguments is not a class, only", "# need worry about new-style classes since ndb and db models are", "# new-style", "if", "isinstance", "(", "self", ".", "_model", ",", "type", ")", ":", "if", "_ND...
Determine whether the model of the instance is an NDB model. Returns: Boolean indicating whether or not the model is an NDB or DB model.
[ "Determine", "whether", "the", "model", "of", "the", "instance", "is", "an", "NDB", "model", "." ]
python
valid
40.235294
wandb/client
wandb/apis/internal.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L233-L270
def settings(self, key=None, section=None): """The settings overridden from the wandb/settings file. Args: key (str, optional): If provided only this setting is returned section (str, optional): If provided this section of the setting file is used, defaults to "default" Returns: A dict with the current settings { "entity": "models", "base_url": "https://api.wandb.ai", "project": None } """ if not self._settings: self._settings = self.default_settings.copy() section = section or self._settings['section'] try: if section in self.settings_parser.sections(): for option in self.settings_parser.options(section): self._settings[option] = self.settings_parser.get( section, option) except configparser.InterpolationSyntaxError: print("WARNING: Unable to parse settings file") self._settings["project"] = env.get_project( self._settings.get("project")) self._settings["entity"] = env.get_entity( self._settings.get("entity")) self._settings["base_url"] = env.get_base_url( self._settings.get("base_url")) self._settings["ignore_globs"] = env.get_ignore( self._settings.get("ignore_globs") ) return self._settings if key is None else self._settings[key]
[ "def", "settings", "(", "self", ",", "key", "=", "None", ",", "section", "=", "None", ")", ":", "if", "not", "self", ".", "_settings", ":", "self", ".", "_settings", "=", "self", ".", "default_settings", ".", "copy", "(", ")", "section", "=", "sectio...
The settings overridden from the wandb/settings file. Args: key (str, optional): If provided only this setting is returned section (str, optional): If provided this section of the setting file is used, defaults to "default" Returns: A dict with the current settings { "entity": "models", "base_url": "https://api.wandb.ai", "project": None }
[ "The", "settings", "overridden", "from", "the", "wandb", "/", "settings", "file", "." ]
python
train
41.631579
polysquare/polysquare-generic-file-linter
polysquarelinter/linter.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/linter.py#L774-L798
def _run_lint_on_file_stamped_args(file_path, # suppress(too-many-arguments) stamp_file_path, log_technical_terms_to, linter_functions, tool_options, fix_what_you_can): """Return tuple of args and kwargs that function would be called with.""" dictionary_path = os.path.abspath("DICTIONARY") dependencies = [file_path] if os.path.exists(dictionary_path): dependencies.append(dictionary_path) kwargs = OrderedDict() kwargs["jobstamps_dependencies"] = dependencies kwargs["jobstamps_cache_output_directory"] = stamp_file_path if log_technical_terms_to: kwargs["jobstamps_output_files"] = [log_technical_terms_to] return ((file_path, linter_functions, tool_options, fix_what_you_can), kwargs)
[ "def", "_run_lint_on_file_stamped_args", "(", "file_path", ",", "# suppress(too-many-arguments)", "stamp_file_path", ",", "log_technical_terms_to", ",", "linter_functions", ",", "tool_options", ",", "fix_what_you_can", ")", ":", "dictionary_path", "=", "os", ".", "path", ...
Return tuple of args and kwargs that function would be called with.
[ "Return", "tuple", "of", "args", "and", "kwargs", "that", "function", "would", "be", "called", "with", "." ]
python
train
37.88
fermiPy/fermipy
fermipy/hpx_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/hpx_utils.py#L658-L671
def make_energy_bounds_hdu(self, extname="EBOUNDS"): """ Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name """ if self._ebins is None: return None cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))), fits.Column("E_MIN", "1E", unit='keV', array=1000 * self._ebins[0:-1]), fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])] hdu = fits.BinTableHDU.from_columns( cols, self.make_header(), name=extname) return hdu
[ "def", "make_energy_bounds_hdu", "(", "self", ",", "extname", "=", "\"EBOUNDS\"", ")", ":", "if", "self", ".", "_ebins", "is", "None", ":", "return", "None", "cols", "=", "[", "fits", ".", "Column", "(", "\"CHANNEL\"", ",", "\"I\"", ",", "array", "=", ...
Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name
[ "Builds", "and", "returns", "a", "FITs", "HDU", "with", "the", "energy", "bin", "boundries" ]
python
train
45.714286
eight04/pyAPNG
apng/__init__.py
https://github.com/eight04/pyAPNG/blob/b4d2927f7892a1de967b5cf57d434ed65f6a017e/apng/__init__.py#L345-L411
def to_bytes(self): """Convert the entire image to bytes. :rtype: bytes """ # grab the chunks we needs out = [PNG_SIGN] # FIXME: it's tricky to define "other_chunks". HoneyView stop the # animation if it sees chunks other than fctl or idat, so we put other # chunks to the end of the file other_chunks = [] seq = 0 # for first frame png, control = self.frames[0] # header out.append(png.hdr) # acTL out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays))) # fcTL if control: out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())) seq += 1 # and others... idat_chunks = [] for type_, data in png.chunks: if type_ in ("IHDR", "IEND"): continue if type_ == "IDAT": # put at last idat_chunks.append(data) continue out.append(data) out.extend(idat_chunks) # FIXME: we should do some optimization to frames... # for other frames for png, control in self.frames[1:]: # fcTL out.append( make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()) ) seq += 1 # and others... for type_, data in png.chunks: if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT: continue elif type_ == "IDAT": # convert IDAT to fdAT out.append( make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4]) ) seq += 1 else: other_chunks.append(data) # end out.extend(other_chunks) out.append(png.end) return b"".join(out)
[ "def", "to_bytes", "(", "self", ")", ":", "# grab the chunks we needs", "out", "=", "[", "PNG_SIGN", "]", "# FIXME: it's tricky to define \"other_chunks\". HoneyView stop the ", "# animation if it sees chunks other than fctl or idat, so we put other", "# chunks to the end of the file", ...
Convert the entire image to bytes. :rtype: bytes
[ "Convert", "the", "entire", "image", "to", "bytes", ".", ":", "rtype", ":", "bytes" ]
python
train
22
Duke-GCB/lando-messaging
lando_messaging/workqueue.py
https://github.com/Duke-GCB/lando-messaging/blob/b90ccc79a874714e0776af8badf505bb2b56c0ec/lando_messaging/workqueue.py#L63-L70
def close(self): """ Close internal connection to AMQP if connected. """ if self.connection: logging.info("Closing connection to {}.".format(self.host)) self.connection.close() self.connection = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "connection", ":", "logging", ".", "info", "(", "\"Closing connection to {}.\"", ".", "format", "(", "self", ".", "host", ")", ")", "self", ".", "connection", ".", "close", "(", ")", "self", "."...
Close internal connection to AMQP if connected.
[ "Close", "internal", "connection", "to", "AMQP", "if", "connected", "." ]
python
train
32.5
thespacedoctor/neddy
neddy/_basesearch.py
https://github.com/thespacedoctor/neddy/blob/f32653b7d6a39a2c46c5845f83b3a29056311e5e/neddy/_basesearch.py#L237-L292
def _convert_html_to_csv( self): """ *contert html to csv* **Key Arguments:** # - **Return:** - None .. todo:: - @review: when complete, clean _convert_html_to_csv method - @review: when complete add logging """ self.log.info('starting the ``_convert_html_to_csv`` method') import codecs allData = "" regex1 = re.compile( r'.*<PRE><strong> (.*?)</strong>(.*?)</PRE></TABLE>.*', re.I | re.S) regex2 = re.compile(r'\|(\w)\|', re.I | re.S) for thisFile in self.nedResults: pathToReadFile = thisFile try: self.log.debug("attempting to open the file %s" % (pathToReadFile,)) readFile = codecs.open( pathToReadFile, encoding='utf-8', mode='r') thisData = readFile.read() readFile.close() except IOError, e: message = 'could not open the file %s' % (pathToReadFile,) self.log.critical(message) raise IOError(message) except: if pathToReadFile == None: message = 'we have no file to open' self.log.error(message) continue readFile.close() self.log.debug("regex 1 - sub") thisData = regex1.sub("\g<1>\g<2>", thisData) self.log.debug("regex 2 - sub") thisData = regex2.sub("abs(\g<1>)", thisData) self.log.debug("replace text") thisData = thisData.replace("|b|", "abs(b)") writeFile = codecs.open(pathToReadFile, encoding='utf-8', mode='w') writeFile.write(thisData) writeFile.close() self.log.info('completed the ``_convert_html_to_csv`` method') return None
[ "def", "_convert_html_to_csv", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_convert_html_to_csv`` method'", ")", "import", "codecs", "allData", "=", "\"\"", "regex1", "=", "re", ".", "compile", "(", "r'.*<PRE><strong> (.*?)</stron...
*contert html to csv* **Key Arguments:** # - **Return:** - None .. todo:: - @review: when complete, clean _convert_html_to_csv method - @review: when complete add logging
[ "*", "contert", "html", "to", "csv", "*" ]
python
train
33.839286
kinegratii/borax
borax/calendars/lunardate.py
https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/lunardate.py#L343-L346
def get_gz_cn(offset: int) -> str: """Get n-th(0-based) GanZhi """ return TextUtils.STEMS[offset % 10] + TextUtils.BRANCHES[offset % 12]
[ "def", "get_gz_cn", "(", "offset", ":", "int", ")", "->", "str", ":", "return", "TextUtils", ".", "STEMS", "[", "offset", "%", "10", "]", "+", "TextUtils", ".", "BRANCHES", "[", "offset", "%", "12", "]" ]
Get n-th(0-based) GanZhi
[ "Get", "n", "-", "th", "(", "0", "-", "based", ")", "GanZhi" ]
python
train
39.25
saltstack/salt
salt/client/ssh/ssh_py_shim.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/ssh_py_shim.py#L183-L203
def unpack_ext(ext_path): ''' Unpack the external modules. ''' modcache = os.path.join( OPTIONS.saltdir, 'running_data', 'var', 'cache', 'salt', 'minion', 'extmods') tfile = tarfile.TarFile.gzopen(ext_path) old_umask = os.umask(0o077) # pylint: disable=blacklisted-function tfile.extractall(path=modcache) tfile.close() os.umask(old_umask) # pylint: disable=blacklisted-function os.unlink(ext_path) ver_path = os.path.join(modcache, 'ext_version') ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version') shutil.move(ver_path, ver_dst)
[ "def", "unpack_ext", "(", "ext_path", ")", ":", "modcache", "=", "os", ".", "path", ".", "join", "(", "OPTIONS", ".", "saltdir", ",", "'running_data'", ",", "'var'", ",", "'cache'", ",", "'salt'", ",", "'minion'", ",", "'extmods'", ")", "tfile", "=", "...
Unpack the external modules.
[ "Unpack", "the", "external", "modules", "." ]
python
train
31
CalebBell/fluids
fluids/numerics/__init__.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/numerics/__init__.py#L941-L980
def damping_maintain_sign(x, step, damping=1.0, factor=0.5): '''Famping function which will maintain the sign of the variable being manipulated. If the step puts it at the other sign, the distance between `x` and `step` will be shortened by the multiple of `factor`; i.e. if factor is `x`, the new value of `x` will be 0 exactly. The provided `damping` is applied as well. Parameters ---------- x : float Previous value in iteration, [-] step : float Change in `x`, [-] damping : float, optional The damping factor to be applied always, [-] factor : float, optional If the calculated step changes sign, this factor will be used instead of the step, [-] Returns ------- x_new : float The new value in the iteration, [-] Notes ----- Examples -------- >>> damping_maintain_sign(100, -200, factor=.5) 50.0 ''' positive = x > 0.0 step_x = x + step if (positive and step_x < 0) or (not positive and step_x > 0.0): # print('damping') step = -factor*x return x + step*damping
[ "def", "damping_maintain_sign", "(", "x", ",", "step", ",", "damping", "=", "1.0", ",", "factor", "=", "0.5", ")", ":", "positive", "=", "x", ">", "0.0", "step_x", "=", "x", "+", "step", "if", "(", "positive", "and", "step_x", "<", "0", ")", "or", ...
Famping function which will maintain the sign of the variable being manipulated. If the step puts it at the other sign, the distance between `x` and `step` will be shortened by the multiple of `factor`; i.e. if factor is `x`, the new value of `x` will be 0 exactly. The provided `damping` is applied as well. Parameters ---------- x : float Previous value in iteration, [-] step : float Change in `x`, [-] damping : float, optional The damping factor to be applied always, [-] factor : float, optional If the calculated step changes sign, this factor will be used instead of the step, [-] Returns ------- x_new : float The new value in the iteration, [-] Notes ----- Examples -------- >>> damping_maintain_sign(100, -200, factor=.5) 50.0
[ "Famping", "function", "which", "will", "maintain", "the", "sign", "of", "the", "variable", "being", "manipulated", ".", "If", "the", "step", "puts", "it", "at", "the", "other", "sign", "the", "distance", "between", "x", "and", "step", "will", "be", "short...
python
train
27.95
bimbar/pykwb
pykwb/kwb.py
https://github.com/bimbar/pykwb/blob/3f607c064cc53b8310d22d42506ce817a5b735fe/pykwb/kwb.py#L259-L265
def _decode_temp(byte_1, byte_2): """Decode a signed short temperature as two bytes to a single number.""" temp = (byte_1 << 8) + byte_2 if (temp > 32767): temp = temp - 65536 temp = temp / 10 return temp
[ "def", "_decode_temp", "(", "byte_1", ",", "byte_2", ")", ":", "temp", "=", "(", "byte_1", "<<", "8", ")", "+", "byte_2", "if", "(", "temp", ">", "32767", ")", ":", "temp", "=", "temp", "-", "65536", "temp", "=", "temp", "/", "10", "return", "tem...
Decode a signed short temperature as two bytes to a single number.
[ "Decode", "a", "signed", "short", "temperature", "as", "two", "bytes", "to", "a", "single", "number", "." ]
python
train
35.714286
astrorafael/twisted-mqtt
mqtt/pdu.py
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L430-L445
def encode(self): ''' Encode and store an UNSUBCRIBE control packet @raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes ''' header = bytearray(1) payload = bytearray() varHeader = encode16Int(self.msgId) header[0] = 0xA2 # packet with QoS=1 for topic in self.topics: payload.extend(encodeString(topic)) # topic name header.extend(encodeLength(len(varHeader) + len(payload))) header.extend(varHeader) header.extend(payload) self.encoded = header return str(header) if PY2 else bytes(header)
[ "def", "encode", "(", "self", ")", ":", "header", "=", "bytearray", "(", "1", ")", "payload", "=", "bytearray", "(", ")", "varHeader", "=", "encode16Int", "(", "self", ".", "msgId", ")", "header", "[", "0", "]", "=", "0xA2", "# packet with QoS=1", "for...
Encode and store an UNSUBCRIBE control packet @raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
[ "Encode", "and", "store", "an", "UNSUBCRIBE", "control", "packet" ]
python
test
39.125
DocNow/twarc
twarc/client.py
https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/client.py#L544-L603
def replies(self, tweet, recursive=False, prune=()): """ replies returns a generator of tweets that are replies for a given tweet. It includes the original tweet. If you would like to fetch the replies to the replies use recursive=True which will do a depth-first recursive walk of the replies. It also walk up the reply chain if you supply a tweet that is itself a reply to another tweet. You can optionally supply a tuple of tweet ids to ignore during this traversal using the prune parameter. """ yield tweet # get replies to the tweet screen_name = tweet['user']['screen_name'] tweet_id = tweet['id_str'] log.info("looking for replies to: %s", tweet_id) for reply in self.search("to:%s" % screen_name, since_id=tweet_id): if reply['in_reply_to_status_id_str'] != tweet_id: continue if reply['id_str'] in prune: log.info("ignoring pruned tweet id %s", reply['id_str']) continue log.info("found reply: %s", reply["id_str"]) if recursive: if reply['id_str'] not in prune: prune = prune + (tweet_id,) for r in self.replies(reply, recursive, prune): yield r else: yield reply # if this tweet is itself a reply to another tweet get it and # get other potential replies to it reply_to_id = tweet.get('in_reply_to_status_id_str') log.info("prune=%s", prune) if recursive and reply_to_id and reply_to_id not in prune: t = self.tweet(reply_to_id) if t: log.info("found reply-to: %s", t['id_str']) prune = prune + (tweet['id_str'],) for r in self.replies(t, recursive=True, prune=prune): yield r # if this tweet is a quote go get that too whatever tweets it # may be in reply to quote_id = tweet.get('quotes_status_id_str') if recursive and quote_id and quote_id not in prune: t = self.tweet(quote_id) if t: log.info("found quote: %s", t['id_str']) prune = prune + (tweet['id_str'],) for r in self.replies(t, recursive=True, prune=prune): yield r
[ "def", "replies", "(", "self", ",", "tweet", ",", "recursive", "=", "False", ",", "prune", "=", "(", ")", ")", ":", "yield", "tweet", "# get replies to the tweet", "screen_name", "=", "tweet", "[", "'user'", "]", "[", "'screen_name'", "]", "tweet_id", "=",...
replies returns a generator of tweets that are replies for a given tweet. It includes the original tweet. If you would like to fetch the replies to the replies use recursive=True which will do a depth-first recursive walk of the replies. It also walk up the reply chain if you supply a tweet that is itself a reply to another tweet. You can optionally supply a tuple of tweet ids to ignore during this traversal using the prune parameter.
[ "replies", "returns", "a", "generator", "of", "tweets", "that", "are", "replies", "for", "a", "given", "tweet", ".", "It", "includes", "the", "original", "tweet", ".", "If", "you", "would", "like", "to", "fetch", "the", "replies", "to", "the", "replies", ...
python
train
39.5
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L503-L512
def dictlist_convert_to_string(dict_list: Iterable[Dict], key: str) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a blank string, convert it to ``None``. """ for d in dict_list: d[key] = str(d[key]) if d[key] == "": d[key] = None
[ "def", "dictlist_convert_to_string", "(", "dict_list", ":", "Iterable", "[", "Dict", "]", ",", "key", ":", "str", ")", "->", "None", ":", "for", "d", "in", "dict_list", ":", "d", "[", "key", "]", "=", "str", "(", "d", "[", "key", "]", ")", "if", ...
Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a blank string, convert it to ``None``.
[ "Process", "an", "iterable", "of", "dictionaries", ".", "For", "each", "dictionary", "d", "convert", "(", "in", "place", ")", "d", "[", "key", "]", "to", "a", "string", "form", "str", "(", "d", "[", "key", "]", ")", ".", "If", "the", "result", "is"...
python
train
38.5
inveniosoftware/invenio-files-rest
invenio_files_rest/storage/pyfs.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/storage/pyfs.py#L131-L162
def pyfs_storage_factory(fileinstance=None, default_location=None, default_storage_class=None, filestorage_class=PyFSFileStorage, fileurl=None, size=None, modified=None, clean_dir=True): """Get factory function for creating a PyFS file storage instance.""" # Either the FileInstance needs to be specified or all filestorage # class parameters need to be specified assert fileinstance or (fileurl and size) if fileinstance: # FIXME: Code here should be refactored since it assumes a lot on the # directory structure where the file instances are written fileurl = None size = fileinstance.size modified = fileinstance.updated if fileinstance.uri: # Use already existing URL. fileurl = fileinstance.uri else: assert default_location # Generate a new URL. fileurl = make_path( default_location, str(fileinstance.id), 'data', current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'], current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'], ) return filestorage_class( fileurl, size=size, modified=modified, clean_dir=clean_dir)
[ "def", "pyfs_storage_factory", "(", "fileinstance", "=", "None", ",", "default_location", "=", "None", ",", "default_storage_class", "=", "None", ",", "filestorage_class", "=", "PyFSFileStorage", ",", "fileurl", "=", "None", ",", "size", "=", "None", ",", "modif...
Get factory function for creating a PyFS file storage instance.
[ "Get", "factory", "function", "for", "creating", "a", "PyFS", "file", "storage", "instance", "." ]
python
train
41.03125
bcbio/bcbio-nextgen
bcbio/variation/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L139-L184
def compare_to_rm(data): """Compare final variant calls against reference materials of known calls. """ if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])): data = _normalize_cwl_inputs(data) toval_data = _get_validate(data) toval_data = cwlutils.unpack_tarballs(toval_data, toval_data) if toval_data: caller = _get_caller(toval_data) sample = dd.get_sample_name(toval_data) base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller)) if isinstance(toval_data["vrn_file"], (list, tuple)): raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"]) else: vrn_file = os.path.abspath(toval_data["vrn_file"]) rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data) rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"), toval_data), toval_data) rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-", bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep"))) rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"), base_dir, data) rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data), data.get("genome_build"), base_dir, data) if rm_interval_file else None) vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg") # RTG can fail on totally empty files. Call everything in truth set as false negatives if not vcfutils.vcf_has_variants(vrn_file): eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, "fn") data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) # empty validation file, every call is a false positive elif not vcfutils.vcf_has_variants(rm_file): eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, "fp") data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) elif vmethod in ["rtg", "rtg-squash-ploidy"]: eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod) eval_files = _annotate_validations(eval_files, toval_data) data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data) elif vmethod == "hap.py": data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data) elif vmethod == "bcbio.variation": data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, toval_data) return [[data]]
[ "def", "compare_to_rm", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", "and", "cwlutils", ".", "is_cwl_run", "(", "utils", ".", "to_single_data", "(", "data", "[", "0", "]", ")", ")", ":", "data", ...
Compare final variant calls against reference materials of known calls.
[ "Compare", "final", "variant", "calls", "against", "reference", "materials", "of", "known", "calls", "." ]
python
train
68.413043
hyperledger/indy-node
indy_node/server/domain_req_handler.py
https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/indy_node/server/domain_req_handler.py#L930-L938
def transform_txn_for_ledger(txn): """ Some transactions need to be transformed before they can be stored in the ledger, eg. storing certain payload in another data store and only its hash in the ledger """ if get_type(txn) == ATTRIB: txn = DomainReqHandler.transform_attrib_for_ledger(txn) return txn
[ "def", "transform_txn_for_ledger", "(", "txn", ")", ":", "if", "get_type", "(", "txn", ")", "==", "ATTRIB", ":", "txn", "=", "DomainReqHandler", ".", "transform_attrib_for_ledger", "(", "txn", ")", "return", "txn" ]
Some transactions need to be transformed before they can be stored in the ledger, eg. storing certain payload in another data store and only its hash in the ledger
[ "Some", "transactions", "need", "to", "be", "transformed", "before", "they", "can", "be", "stored", "in", "the", "ledger", "eg", ".", "storing", "certain", "payload", "in", "another", "data", "store", "and", "only", "its", "hash", "in", "the", "ledger" ]
python
train
40.111111
BD2KGenomics/protect
src/protect/addons/assess_mhc_pathway.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/addons/assess_mhc_pathway.py#L27-L39
def run_mhc_gene_assessment(job, rsem_files, rna_haplotype, univ_options, reports_options): """ A wrapper for assess_mhc_genes. :param dict rsem_files: Results from running rsem :param str rna_haplotype: The job store id for the rna haplotype file :param dict univ_options: Dict of universal options used by almost all tools :param dict reports_options: Options specific to reporting modules :return: The results of running assess_mhc_genes :rtype: toil.fileStore.FileID """ return job.addChildJobFn(assess_mhc_genes, rsem_files['rsem.genes.results'], rna_haplotype, univ_options, reports_options).rv()
[ "def", "run_mhc_gene_assessment", "(", "job", ",", "rsem_files", ",", "rna_haplotype", ",", "univ_options", ",", "reports_options", ")", ":", "return", "job", ".", "addChildJobFn", "(", "assess_mhc_genes", ",", "rsem_files", "[", "'rsem.genes.results'", "]", ",", ...
A wrapper for assess_mhc_genes. :param dict rsem_files: Results from running rsem :param str rna_haplotype: The job store id for the rna haplotype file :param dict univ_options: Dict of universal options used by almost all tools :param dict reports_options: Options specific to reporting modules :return: The results of running assess_mhc_genes :rtype: toil.fileStore.FileID
[ "A", "wrapper", "for", "assess_mhc_genes", "." ]
python
train
50.769231
pycontribs/activedirectory
activedirectory/activedirectory.py
https://github.com/pycontribs/activedirectory/blob/cd491511e2ed667c3b4634a682ea012c6cbedb38/activedirectory/activedirectory.py#L253-L276
def __compress_attributes(self, dic): """ This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn t make sense. :param dic: :return: """ result = {} for k, v in dic.iteritems(): if isinstance(v, types.ListType) and len(v) == 1: if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash', 'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory', 'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates', 'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum', 'userSMIMECertificate', 'userCertificate', 'userCert', 'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName', 'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'): try: result[k] = v[0].decode('utf-8') except Exception as e: logging. error("Failed to decode attribute: %s -- %s" % (k, e)) result[k] = v[0] return result
[ "def", "__compress_attributes", "(", "self", ",", "dic", ")", ":", "result", "=", "{", "}", "for", "k", ",", "v", "in", "dic", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", "types", ".", "ListType", ")", "and", "len", "(", "...
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn t make sense. :param dic: :return:
[ "This", "will", "convert", "all", "attributes", "that", "are", "list", "with", "only", "one", "item", "string", "into", "simple", "string", ".", "It", "seems", "that", "LDAP", "always", "return", "lists", "even", "when", "it", "doesn", "t", "make", "sense"...
python
train
57.25
aouyar/PyMunin
pysysinfo/phpopc.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/phpopc.py#L72-L83
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) #with open('/tmp/opcinfo.json') as f: # response = f.read() self._statusDict = json.loads(response)
[ "def", "initStats", "(", "self", ",", "extras", "=", "None", ")", ":", "url", "=", "\"%s://%s:%d/%s\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_monpath", ")", "response", "=", "util",...
Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive.
[ "Query", "and", "parse", "Web", "Server", "Status", "Page", "." ]
python
train
41.25
spacetelescope/pysynphot
pysynphot/spectrum.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/spectrum.py#L1791-L1813
def avgwave(self): """Calculate :ref:`pysynphot-formula-avgwv`. Returns ------- ans : float Average wavelength. """ mywaveunits = self.waveunits.name self.convert('angstroms') wave = self.wave thru = self.throughput self.convert(mywaveunits) num = self.trapezoidIntegration(wave, thru*wave) den = self.trapezoidIntegration(wave, thru) if 0.0 in (num, den): return 0.0 else: return num/den
[ "def", "avgwave", "(", "self", ")", ":", "mywaveunits", "=", "self", ".", "waveunits", ".", "name", "self", ".", "convert", "(", "'angstroms'", ")", "wave", "=", "self", ".", "wave", "thru", "=", "self", ".", "throughput", "self", ".", "convert", "(", ...
Calculate :ref:`pysynphot-formula-avgwv`. Returns ------- ans : float Average wavelength.
[ "Calculate", ":", "ref", ":", "pysynphot", "-", "formula", "-", "avgwv", "." ]
python
train
22.565217
twisted/axiom
benchmark/benchlib.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/benchmark/benchlib.py#L25-L30
def createSomeItems(store, itemType, values, counter): """ Create some instances of a particular type in a store. """ for i in counter: itemType(store=store, **values)
[ "def", "createSomeItems", "(", "store", ",", "itemType", ",", "values", ",", "counter", ")", ":", "for", "i", "in", "counter", ":", "itemType", "(", "store", "=", "store", ",", "*", "*", "values", ")" ]
Create some instances of a particular type in a store.
[ "Create", "some", "instances", "of", "a", "particular", "type", "in", "a", "store", "." ]
python
train
31
rix0rrr/gcl
gcl/ast.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L528-L540
def mkApplications(location, *atoms): """Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), []) """ atoms = list(atoms) while len(atoms) > 1: atoms[0:2] = [Application(location, atoms[0], atoms[1])] # Nothing left to apply return atoms[0]
[ "def", "mkApplications", "(", "location", ",", "*", "atoms", ")", ":", "atoms", "=", "list", "(", "atoms", ")", "while", "len", "(", "atoms", ")", ">", "1", ":", "atoms", "[", "0", ":", "2", "]", "=", "[", "Application", "(", "location", ",", "at...
Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
[ "Make", "a", "sequence", "of", "applications", "from", "a", "list", "of", "tokens", "." ]
python
train
30.538462