repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
carpedm20/ndrive
ndrive/models.py
ndrive.getMusicAlbumList
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100): """GetMusicAlbumList Args: tagtype = ??? startnum pagingrow Returns: ??? False: Failed to get property """ url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'tagtype': tagtype, 'startnum': startnum, 'pagingrow': pagingrow, } r = self.session.post(url = url, data = data) return resultManager(r.text)
python
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100): """GetMusicAlbumList Args: tagtype = ??? startnum pagingrow Returns: ??? False: Failed to get property """ url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'tagtype': tagtype, 'startnum': startnum, 'pagingrow': pagingrow, } r = self.session.post(url = url, data = data) return resultManager(r.text)
[ "def", "getMusicAlbumList", "(", "self", ",", "tagtype", "=", "0", ",", "startnum", "=", "0", ",", "pagingrow", "=", "100", ")", ":", "url", "=", "nurls", "[", "'setProperty'", "]", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'us...
GetMusicAlbumList Args: tagtype = ??? startnum pagingrow Returns: ??? False: Failed to get property
[ "GetMusicAlbumList" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L687-L712
carpedm20/ndrive
ndrive/models.py
ndrive.resultManager
def resultManager(self, text): """ resultcode & message: 0 = success => if "resultvalue" = null: no result to show 11 = Not Exist Path 36 = File Infomation Not Found 2002 = Invalidation Cookie """ j = json.loads(text) if j['message'] != 'success': print "[*] Error : " + j['message'] return False else: return True
python
def resultManager(self, text): """ resultcode & message: 0 = success => if "resultvalue" = null: no result to show 11 = Not Exist Path 36 = File Infomation Not Found 2002 = Invalidation Cookie """ j = json.loads(text) if j['message'] != 'success': print "[*] Error : " + j['message'] return False else: return True
[ "def", "resultManager", "(", "self", ",", "text", ")", ":", "j", "=", "json", ".", "loads", "(", "text", ")", "if", "j", "[", "'message'", "]", "!=", "'success'", ":", "print", "\"[*] Error : \"", "+", "j", "[", "'message'", "]", "return", "False", "...
resultcode & message: 0 = success => if "resultvalue" = null: no result to show 11 = Not Exist Path 36 = File Infomation Not Found 2002 = Invalidation Cookie
[ "resultcode", "&", "message", ":", "0", "=", "success", "=", ">", "if", "resultvalue", "=", "null", ":", "no", "result", "to", "show", "11", "=", "Not", "Exist", "Path", "36", "=", "File", "Infomation", "Not", "Found", "2002", "=", "Invalidation", "Coo...
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L714-L729
cebel/pyctd
src/pyctd/cli.py
update
def update(connection, force_download): """Update the database""" manager.database.update( connection=connection, force_download=force_download )
python
def update(connection, force_download): """Update the database""" manager.database.update( connection=connection, force_download=force_download )
[ "def", "update", "(", "connection", ",", "force_download", ")", ":", "manager", ".", "database", ".", "update", "(", "connection", "=", "connection", ",", "force_download", "=", "force_download", ")" ]
Update the database
[ "Update", "the", "database" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/cli.py#L48-L53
cebel/pyctd
src/pyctd/cli.py
set_mysql
def set_mysql(host, user, password, db, charset): """Set the SQLAlchemy connection string with MySQL settings""" manager.database.set_mysql_connection( host=host, user=user, password=password, db=db, charset=charset )
python
def set_mysql(host, user, password, db, charset): """Set the SQLAlchemy connection string with MySQL settings""" manager.database.set_mysql_connection( host=host, user=user, password=password, db=db, charset=charset )
[ "def", "set_mysql", "(", "host", ",", "user", ",", "password", ",", "db", ",", "charset", ")", ":", "manager", ".", "database", ".", "set_mysql_connection", "(", "host", "=", "host", ",", "user", "=", "user", ",", "password", "=", "password", ",", "db"...
Set the SQLAlchemy connection string with MySQL settings
[ "Set", "the", "SQLAlchemy", "connection", "string", "with", "MySQL", "settings" ]
train
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/cli.py#L69-L77
brunobord/md2ebook
md2ebook/md2ebook.py
main
def main(): "Main program" generators = check_dependencies() args = docopt(__doc__, version='md2ebook 0.0.1-dev') commander = Commander(args, generators) commander.handle()
python
def main(): "Main program" generators = check_dependencies() args = docopt(__doc__, version='md2ebook 0.0.1-dev') commander = Commander(args, generators) commander.handle()
[ "def", "main", "(", ")", ":", "generators", "=", "check_dependencies", "(", ")", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "'md2ebook 0.0.1-dev'", ")", "commander", "=", "Commander", "(", "args", ",", "generators", ")", "commander", ".", ...
Main program
[ "Main", "program" ]
train
https://github.com/brunobord/md2ebook/blob/31e0d06b77f2d986e6af1115c9e613dfec0591a9/md2ebook/md2ebook.py#L38-L43
limix/scipy-sugar
scipy_sugar/stats/_normalize.py
quantile_gaussianize
def quantile_gaussianize(x): """Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975] """ from scipy.stats import norm, rankdata x = asarray(x, float).copy() ok = isfinite(x) x[ok] *= -1 y = empty_like(x) y[ok] = rankdata(x[ok]) y[ok] = norm.isf(y[ok] / (sum(ok) + 1)) y[~ok] = x[~ok] return y
python
def quantile_gaussianize(x): """Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975] """ from scipy.stats import norm, rankdata x = asarray(x, float).copy() ok = isfinite(x) x[ok] *= -1 y = empty_like(x) y[ok] = rankdata(x[ok]) y[ok] = norm.isf(y[ok] / (sum(ok) + 1)) y[~ok] = x[~ok] return y
[ "def", "quantile_gaussianize", "(", "x", ")", ":", "from", "scipy", ".", "stats", "import", "norm", ",", "rankdata", "x", "=", "asarray", "(", "x", ",", "float", ")", ".", "copy", "(", ")", "ok", "=", "isfinite", "(", "x", ")", "x", "[", "ok", "]...
Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975]
[ "Normalize", "a", "sequence", "of", "values", "via", "rank", "and", "Normal", "c", ".", "d", ".", "f", "." ]
train
https://github.com/limix/scipy-sugar/blob/8109685b14f61cf4c7fc66e6a98f10f35cbd086c/scipy_sugar/stats/_normalize.py#L6-L32
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
Peak.apply_noise
def apply_noise(self, noise_generator, split_idx, ndigits=6): """Apply noise to dimensions within a peak. :param noise_generator: Noise generator object. :param int split_idx: Index specifying which peak list split parameters to use. :return: None :rtype: :py:obj:`None` """ noise = noise_generator.generate(self.labels, split_idx) for dim, noise_value in zip(self, noise): dim.chemshift = round(dim.chemshift + noise_value, ndigits)
python
def apply_noise(self, noise_generator, split_idx, ndigits=6): """Apply noise to dimensions within a peak. :param noise_generator: Noise generator object. :param int split_idx: Index specifying which peak list split parameters to use. :return: None :rtype: :py:obj:`None` """ noise = noise_generator.generate(self.labels, split_idx) for dim, noise_value in zip(self, noise): dim.chemshift = round(dim.chemshift + noise_value, ndigits)
[ "def", "apply_noise", "(", "self", ",", "noise_generator", ",", "split_idx", ",", "ndigits", "=", "6", ")", ":", "noise", "=", "noise_generator", ".", "generate", "(", "self", ".", "labels", ",", "split_idx", ")", "for", "dim", ",", "noise_value", "in", ...
Apply noise to dimensions within a peak. :param noise_generator: Noise generator object. :param int split_idx: Index specifying which peak list split parameters to use. :return: None :rtype: :py:obj:`None`
[ "Apply", "noise", "to", "dimensions", "within", "a", "peak", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L89-L99
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
PeakList._to_sparky
def _to_sparky(self): """Save :class:`~nmrstarlib.plsimulator.PeakList` into Sparky-formatted string. :return: Peak list representation in Sparky format. :rtype: :py:class:`str` """ sparky_str = "Assignment\t\t{}\n\n".format("\t\t".join(["w" + str(i + 1) for i in range(len(self.labels))])) for peak in self: assignment_str = "-".join(peak.assignments_list) dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list]) sparky_str += ("{}\t\t{}\n".format(assignment_str, dimensions_str)) return sparky_str
python
def _to_sparky(self): """Save :class:`~nmrstarlib.plsimulator.PeakList` into Sparky-formatted string. :return: Peak list representation in Sparky format. :rtype: :py:class:`str` """ sparky_str = "Assignment\t\t{}\n\n".format("\t\t".join(["w" + str(i + 1) for i in range(len(self.labels))])) for peak in self: assignment_str = "-".join(peak.assignments_list) dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list]) sparky_str += ("{}\t\t{}\n".format(assignment_str, dimensions_str)) return sparky_str
[ "def", "_to_sparky", "(", "self", ")", ":", "sparky_str", "=", "\"Assignment\\t\\t{}\\n\\n\"", ".", "format", "(", "\"\\t\\t\"", ".", "join", "(", "[", "\"w\"", "+", "str", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "len", "(", "self", ...
Save :class:`~nmrstarlib.plsimulator.PeakList` into Sparky-formatted string. :return: Peak list representation in Sparky format. :rtype: :py:class:`str`
[ "Save", ":", "class", ":", "~nmrstarlib", ".", "plsimulator", ".", "PeakList", "into", "Sparky", "-", "formatted", "string", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L118-L129
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
PeakList._to_autoassign
def _to_autoassign(self): """Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string. :return: Peak list representation in AutoAssign format. :rtype: :py:class:`str` """ autoassign_str = "#Index\t\t{}\t\tIntensity\t\tWorkbook\n".format( "\t\t".join([str(i + 1) + "Dim" for i in range(len(self.labels))])) for peak_idx, peak in enumerate(self): dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list]) autoassign_str += "{}\t\t{}\t\t{}\t\t{}\n".format(peak_idx+1, dimensions_str, 0, self.spectrum_name) return autoassign_str
python
def _to_autoassign(self): """Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string. :return: Peak list representation in AutoAssign format. :rtype: :py:class:`str` """ autoassign_str = "#Index\t\t{}\t\tIntensity\t\tWorkbook\n".format( "\t\t".join([str(i + 1) + "Dim" for i in range(len(self.labels))])) for peak_idx, peak in enumerate(self): dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list]) autoassign_str += "{}\t\t{}\t\t{}\t\t{}\n".format(peak_idx+1, dimensions_str, 0, self.spectrum_name) return autoassign_str
[ "def", "_to_autoassign", "(", "self", ")", ":", "autoassign_str", "=", "\"#Index\\t\\t{}\\t\\tIntensity\\t\\tWorkbook\\n\"", ".", "format", "(", "\"\\t\\t\"", ".", "join", "(", "[", "str", "(", "i", "+", "1", ")", "+", "\"Dim\"", "for", "i", "in", "range", "...
Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string. :return: Peak list representation in AutoAssign format. :rtype: :py:class:`str`
[ "Save", ":", "class", ":", "~nmrstarlib", ".", "plsimulator", ".", "PeakList", "into", "AutoAssign", "-", "formatted", "string", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L131-L142
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
PeakList._to_json
def _to_json(self): """Save :class:`~nmrstarlib.plsimulator.PeakList` into JSON string. :return: Peak list representation in JSON format. :rtype: :py:class:`str` """ json_list = [{"Assignment": peak.assignments_list, "Dimensions": peak.chemshifts_list} for peak in self] return json.dumps(json_list, sort_keys=True, indent=4)
python
def _to_json(self): """Save :class:`~nmrstarlib.plsimulator.PeakList` into JSON string. :return: Peak list representation in JSON format. :rtype: :py:class:`str` """ json_list = [{"Assignment": peak.assignments_list, "Dimensions": peak.chemshifts_list} for peak in self] return json.dumps(json_list, sort_keys=True, indent=4)
[ "def", "_to_json", "(", "self", ")", ":", "json_list", "=", "[", "{", "\"Assignment\"", ":", "peak", ".", "assignments_list", ",", "\"Dimensions\"", ":", "peak", ".", "chemshifts_list", "}", "for", "peak", "in", "self", "]", "return", "json", ".", "dumps",...
Save :class:`~nmrstarlib.plsimulator.PeakList` into JSON string. :return: Peak list representation in JSON format. :rtype: :py:class:`str`
[ "Save", ":", "class", ":", "~nmrstarlib", ".", "plsimulator", ".", "PeakList", "into", "JSON", "string", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L144-L151
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
PeakList.write
def write(self, filehandle, fileformat): """Write :class:`~nmrstarlib.plsimulator.PeakList` data into file. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper` :param str fileformat: Format to use to write data: `sparky`, `autoassign`, or `json`. :return: None :rtype: :py:obj:`None` """ try: if fileformat == "sparky": sparky_str = self._to_sparky() filehandle.write(sparky_str) elif fileformat == "autoassign": autoassign_str = self._to_sparky() filehandle.write(autoassign_str) elif fileformat == "json": json_str = self._to_json() filehandle.write(json_str) else: raise TypeError("Unknown file format.") except IOError: raise IOError('"filehandle" parameter must be writable.') filehandle.close()
python
def write(self, filehandle, fileformat): """Write :class:`~nmrstarlib.plsimulator.PeakList` data into file. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper` :param str fileformat: Format to use to write data: `sparky`, `autoassign`, or `json`. :return: None :rtype: :py:obj:`None` """ try: if fileformat == "sparky": sparky_str = self._to_sparky() filehandle.write(sparky_str) elif fileformat == "autoassign": autoassign_str = self._to_sparky() filehandle.write(autoassign_str) elif fileformat == "json": json_str = self._to_json() filehandle.write(json_str) else: raise TypeError("Unknown file format.") except IOError: raise IOError('"filehandle" parameter must be writable.') filehandle.close()
[ "def", "write", "(", "self", ",", "filehandle", ",", "fileformat", ")", ":", "try", ":", "if", "fileformat", "==", "\"sparky\"", ":", "sparky_str", "=", "self", ".", "_to_sparky", "(", ")", "filehandle", ".", "write", "(", "sparky_str", ")", "elif", "fil...
Write :class:`~nmrstarlib.plsimulator.PeakList` data into file. :param filehandle: file-like object. :type filehandle: :py:class:`io.TextIOWrapper` :param str fileformat: Format to use to write data: `sparky`, `autoassign`, or `json`. :return: None :rtype: :py:obj:`None`
[ "Write", ":", "class", ":", "~nmrstarlib", ".", "plsimulator", ".", "PeakList", "data", "into", "file", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L153-L176
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
SequenceSite.is_sequential
def is_sequential(self): """Check if residues that sequence site is composed of are in sequential order. :return: If sequence site is in valid sequential order (True) or not (False). :rtype: :py:obj:`True` or :py:obj:`False` """ seq_ids = tuple(int(residue["Seq_ID"]) for residue in self) return seq_ids == tuple(range(int(seq_ids[0]), int(seq_ids[-1])+1))
python
def is_sequential(self): """Check if residues that sequence site is composed of are in sequential order. :return: If sequence site is in valid sequential order (True) or not (False). :rtype: :py:obj:`True` or :py:obj:`False` """ seq_ids = tuple(int(residue["Seq_ID"]) for residue in self) return seq_ids == tuple(range(int(seq_ids[0]), int(seq_ids[-1])+1))
[ "def", "is_sequential", "(", "self", ")", ":", "seq_ids", "=", "tuple", "(", "int", "(", "residue", "[", "\"Seq_ID\"", "]", ")", "for", "residue", "in", "self", ")", "return", "seq_ids", "==", "tuple", "(", "range", "(", "int", "(", "seq_ids", "[", "...
Check if residues that sequence site is composed of are in sequential order. :return: If sequence site is in valid sequential order (True) or not (False). :rtype: :py:obj:`True` or :py:obj:`False`
[ "Check", "if", "residues", "that", "sequence", "site", "is", "composed", "of", "are", "in", "sequential", "order", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L216-L223
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
PeakDescription.create_dimension_groups
def create_dimension_groups(dimension_positions): """Create list of dimension groups. :param zip dimension_positions: List of tuples describing dimension and its position within sequence site. :return: List of dimension groups. :rtype: :py:class:`list` """ dimension_groups = [] for dim_group_label, position in dimension_positions: dim_group = DimensionGroup(dim_group_label, position) for dim_label in nmrstarlib.RESONANCE_CLASSES[dim_group_label]: dim_group.dimensions.append(Dimension(dim_label, position)) dimension_groups.append(dim_group) return dimension_groups
python
def create_dimension_groups(dimension_positions): """Create list of dimension groups. :param zip dimension_positions: List of tuples describing dimension and its position within sequence site. :return: List of dimension groups. :rtype: :py:class:`list` """ dimension_groups = [] for dim_group_label, position in dimension_positions: dim_group = DimensionGroup(dim_group_label, position) for dim_label in nmrstarlib.RESONANCE_CLASSES[dim_group_label]: dim_group.dimensions.append(Dimension(dim_label, position)) dimension_groups.append(dim_group) return dimension_groups
[ "def", "create_dimension_groups", "(", "dimension_positions", ")", ":", "dimension_groups", "=", "[", "]", "for", "dim_group_label", ",", "position", "in", "dimension_positions", ":", "dim_group", "=", "DimensionGroup", "(", "dim_group_label", ",", "position", ")", ...
Create list of dimension groups. :param zip dimension_positions: List of tuples describing dimension and its position within sequence site. :return: List of dimension groups. :rtype: :py:class:`list`
[ "Create", "list", "of", "dimension", "groups", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L287-L302
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
Spectrum.peak_templates
def peak_templates(self): """Create a list of concrete peak templates from a list of general peak descriptions. :return: List of peak templates. :rtype: :py:class:`list` """ peak_templates = [] for peak_descr in self: expanded_dims = [dim_group.dimensions for dim_group in peak_descr] templates = product(*expanded_dims) for template in templates: peak_templates.append(PeakTemplate(template)) return peak_templates
python
def peak_templates(self): """Create a list of concrete peak templates from a list of general peak descriptions. :return: List of peak templates. :rtype: :py:class:`list` """ peak_templates = [] for peak_descr in self: expanded_dims = [dim_group.dimensions for dim_group in peak_descr] templates = product(*expanded_dims) for template in templates: peak_templates.append(PeakTemplate(template)) return peak_templates
[ "def", "peak_templates", "(", "self", ")", ":", "peak_templates", "=", "[", "]", "for", "peak_descr", "in", "self", ":", "expanded_dims", "=", "[", "dim_group", ".", "dimensions", "for", "dim_group", "in", "peak_descr", "]", "templates", "=", "product", "(",...
Create a list of concrete peak templates from a list of general peak descriptions. :return: List of peak templates. :rtype: :py:class:`list`
[ "Create", "a", "list", "of", "concrete", "peak", "templates", "from", "a", "list", "of", "general", "peak", "descriptions", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L322-L334
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/plsimulator.py
Spectrum.seq_site_length
def seq_site_length(self): """Calculate length of a single sequence site based upon relative positions specified in peak descriptions. :return: Length of sequence site. :rtype: :py:class:`int` """ relative_positions_set = set() for peak_descr in self: relative_positions_set.update(peak_descr.relative_positions) return len(relative_positions_set)
python
def seq_site_length(self): """Calculate length of a single sequence site based upon relative positions specified in peak descriptions. :return: Length of sequence site. :rtype: :py:class:`int` """ relative_positions_set = set() for peak_descr in self: relative_positions_set.update(peak_descr.relative_positions) return len(relative_positions_set)
[ "def", "seq_site_length", "(", "self", ")", ":", "relative_positions_set", "=", "set", "(", ")", "for", "peak_descr", "in", "self", ":", "relative_positions_set", ".", "update", "(", "peak_descr", ".", "relative_positions", ")", "return", "len", "(", "relative_p...
Calculate length of a single sequence site based upon relative positions specified in peak descriptions. :return: Length of sequence site. :rtype: :py:class:`int`
[ "Calculate", "length", "of", "a", "single", "sequence", "site", "based", "upon", "relative", "positions", "specified", "in", "peak", "descriptions", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/plsimulator.py#L337-L346
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/translator.py
StarFileToPeakList.create_spectrum
def create_spectrum(spectrum_name): """Initialize spectrum and peak descriptions. :param str spectrum_name: Name of the spectrum from which peak list will be simulated. :return: Spectrum object. :rtype: :class:`~nmrstarlib.plsimulator.Spectrum` """ try: spectrum_description = nmrstarlib.SPECTRUM_DESCRIPTIONS[spectrum_name] except KeyError: raise NotImplementedError("Experiment type is not defined.") spectrum = plsimulator.Spectrum(spectrum_name, spectrum_description["Labels"], spectrum_description["MinNumberPeaksPerSpinSystem"], spectrum_description.get("ResonanceLimit", None)) for peak_descr in spectrum_description["PeakDescriptions"]: spectrum.append(plsimulator.PeakDescription(peak_descr["fraction"], peak_descr["dimensions"])) return spectrum
python
def create_spectrum(spectrum_name): """Initialize spectrum and peak descriptions. :param str spectrum_name: Name of the spectrum from which peak list will be simulated. :return: Spectrum object. :rtype: :class:`~nmrstarlib.plsimulator.Spectrum` """ try: spectrum_description = nmrstarlib.SPECTRUM_DESCRIPTIONS[spectrum_name] except KeyError: raise NotImplementedError("Experiment type is not defined.") spectrum = plsimulator.Spectrum(spectrum_name, spectrum_description["Labels"], spectrum_description["MinNumberPeaksPerSpinSystem"], spectrum_description.get("ResonanceLimit", None)) for peak_descr in spectrum_description["PeakDescriptions"]: spectrum.append(plsimulator.PeakDescription(peak_descr["fraction"], peak_descr["dimensions"])) return spectrum
[ "def", "create_spectrum", "(", "spectrum_name", ")", ":", "try", ":", "spectrum_description", "=", "nmrstarlib", ".", "SPECTRUM_DESCRIPTIONS", "[", "spectrum_name", "]", "except", "KeyError", ":", "raise", "NotImplementedError", "(", "\"Experiment type is not defined.\"",...
Initialize spectrum and peak descriptions. :param str spectrum_name: Name of the spectrum from which peak list will be simulated. :return: Spectrum object. :rtype: :class:`~nmrstarlib.plsimulator.Spectrum`
[ "Initialize", "spectrum", "and", "peak", "descriptions", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/translator.py#L122-L141
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/translator.py
StarFileToPeakList.create_sequence_sites
def create_sequence_sites(chain, seq_site_length): """Create sequence sites using sequence ids. :param dict chain: Chain object that contains chemical shift values and assignment information. :param int seq_site_length: Length of a single sequence site. :return: List of sequence sites. :rtype: :py:class:`list` """ seq_ids = sorted(list(chain.keys()), key=int) # make sure that sequence is sorted by sequence id slices = [itertools.islice(seq_ids, i, None) for i in range(seq_site_length)] seq_site_ids = list(zip(*slices)) sequence_sites = [] for seq_site_id in seq_site_ids: seq_site = plsimulator.SequenceSite(chain[seq_id] for seq_id in seq_site_id) if seq_site.is_sequential(): sequence_sites.append(seq_site) else: continue return sequence_sites
python
def create_sequence_sites(chain, seq_site_length): """Create sequence sites using sequence ids. :param dict chain: Chain object that contains chemical shift values and assignment information. :param int seq_site_length: Length of a single sequence site. :return: List of sequence sites. :rtype: :py:class:`list` """ seq_ids = sorted(list(chain.keys()), key=int) # make sure that sequence is sorted by sequence id slices = [itertools.islice(seq_ids, i, None) for i in range(seq_site_length)] seq_site_ids = list(zip(*slices)) sequence_sites = [] for seq_site_id in seq_site_ids: seq_site = plsimulator.SequenceSite(chain[seq_id] for seq_id in seq_site_id) if seq_site.is_sequential(): sequence_sites.append(seq_site) else: continue return sequence_sites
[ "def", "create_sequence_sites", "(", "chain", ",", "seq_site_length", ")", ":", "seq_ids", "=", "sorted", "(", "list", "(", "chain", ".", "keys", "(", ")", ")", ",", "key", "=", "int", ")", "# make sure that sequence is sorted by sequence id", "slices", "=", "...
Create sequence sites using sequence ids. :param dict chain: Chain object that contains chemical shift values and assignment information. :param int seq_site_length: Length of a single sequence site. :return: List of sequence sites. :rtype: :py:class:`list`
[ "Create", "sequence", "sites", "using", "sequence", "ids", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/translator.py#L144-L164
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/translator.py
StarFileToPeakList.calculate_intervals
def calculate_intervals(chunk_sizes): """Calculate intervals for a given chunk sizes. :param list chunk_sizes: List of chunk sizes. :return: Tuple of intervals. :rtype: :py:class:`tuple` """ start_indexes = [sum(chunk_sizes[:i]) for i in range(0, len(chunk_sizes))] end_indexes = [sum(chunk_sizes[:i+1]) for i in range(0, len(chunk_sizes))] return tuple(zip(start_indexes, end_indexes))
python
def calculate_intervals(chunk_sizes): """Calculate intervals for a given chunk sizes. :param list chunk_sizes: List of chunk sizes. :return: Tuple of intervals. :rtype: :py:class:`tuple` """ start_indexes = [sum(chunk_sizes[:i]) for i in range(0, len(chunk_sizes))] end_indexes = [sum(chunk_sizes[:i+1]) for i in range(0, len(chunk_sizes))] return tuple(zip(start_indexes, end_indexes))
[ "def", "calculate_intervals", "(", "chunk_sizes", ")", ":", "start_indexes", "=", "[", "sum", "(", "chunk_sizes", "[", ":", "i", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "chunk_sizes", ")", ")", "]", "end_indexes", "=", "[", "s...
Calculate intervals for a given chunk sizes. :param list chunk_sizes: List of chunk sizes. :return: Tuple of intervals. :rtype: :py:class:`tuple`
[ "Calculate", "intervals", "for", "a", "given", "chunk", "sizes", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/translator.py#L167-L176
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/translator.py
StarFileToPeakList.split_by_percent
def split_by_percent(self, spin_systems_list): """Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list` """ chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit] if sum(chunk_sizes) < len(spin_systems_list): difference = len(spin_systems_list) - sum(chunk_sizes) chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference assert sum(chunk_sizes) == len(spin_systems_list), \ "sum of chunk sizes must be equal to spin systems list length." intervals = self.calculate_intervals(chunk_sizes) chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals] return chunks_of_spin_systems_by_percentage
python
def split_by_percent(self, spin_systems_list): """Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list` """ chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit] if sum(chunk_sizes) < len(spin_systems_list): difference = len(spin_systems_list) - sum(chunk_sizes) chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference assert sum(chunk_sizes) == len(spin_systems_list), \ "sum of chunk sizes must be equal to spin systems list length." intervals = self.calculate_intervals(chunk_sizes) chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals] return chunks_of_spin_systems_by_percentage
[ "def", "split_by_percent", "(", "self", ",", "spin_systems_list", ")", ":", "chunk_sizes", "=", "[", "int", "(", "(", "i", "*", "len", "(", "spin_systems_list", ")", ")", "/", "100", ")", "for", "i", "in", "self", ".", "plsplit", "]", "if", "sum", "(...
Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list`
[ "Split", "list", "of", "spin", "systems", "by", "specified", "percentages", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/translator.py#L178-L195
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/translator.py
StarFileToPeakList.create_peaklist
def create_peaklist(self, spectrum, chain, chain_idx, source): """Create peak list file. :param spectrum: Spectrum object instance. :type spectrum: :class:`~nmrstarlib.plsimulator.Spectrum` :param dict chain: Chain object that contains chemical shift values and assignment information. :param int chain_idx: Protein chain index. :param str source: :class:`~nmrstarlib.nmrstarlib.StarFile` source. :return: Peak list object. :rtype: :class:`~nmrstarlib.plsimulator.PeakList` """ sequence_sites = self.create_sequence_sites(chain, spectrum.seq_site_length) spin_systems = [] peaklist = plsimulator.PeakList(spectrum.name, spectrum.labels, source, chain_idx) for seq_site in sequence_sites: spin_system = plsimulator.SpinSystem() for template in spectrum.peak_templates: peak = plsimulator.Peak(template.dimension_labels) for dim in template: chemshift = seq_site[dim.position].get(dim.label, None) assignment = "{}{}{}".format(seq_site[dim.position]["AA3Code"], seq_site[dim.position]["Seq_ID"], dim.label) if chemshift and assignment: peak_dim = plsimulator.Dimension(dim.label, dim.position, assignment, float(chemshift)) peak.append(peak_dim) else: continue if len(peak) == len(template): spin_system.append(peak) peaklist.append(peak) else: continue spin_systems.append(spin_system) if all(len(i) < spectrum.min_spin_system_peaks for i in spin_systems): return None if self.noise_generator is not None: spin_systems_chunks = self.split_by_percent(spin_systems) for split_idx, chunk in enumerate(spin_systems_chunks): for spin_system in chunk: for peak in spin_system: peak.apply_noise(self.noise_generator, split_idx) return peaklist
python
def create_peaklist(self, spectrum, chain, chain_idx, source): """Create peak list file. :param spectrum: Spectrum object instance. :type spectrum: :class:`~nmrstarlib.plsimulator.Spectrum` :param dict chain: Chain object that contains chemical shift values and assignment information. :param int chain_idx: Protein chain index. :param str source: :class:`~nmrstarlib.nmrstarlib.StarFile` source. :return: Peak list object. :rtype: :class:`~nmrstarlib.plsimulator.PeakList` """ sequence_sites = self.create_sequence_sites(chain, spectrum.seq_site_length) spin_systems = [] peaklist = plsimulator.PeakList(spectrum.name, spectrum.labels, source, chain_idx) for seq_site in sequence_sites: spin_system = plsimulator.SpinSystem() for template in spectrum.peak_templates: peak = plsimulator.Peak(template.dimension_labels) for dim in template: chemshift = seq_site[dim.position].get(dim.label, None) assignment = "{}{}{}".format(seq_site[dim.position]["AA3Code"], seq_site[dim.position]["Seq_ID"], dim.label) if chemshift and assignment: peak_dim = plsimulator.Dimension(dim.label, dim.position, assignment, float(chemshift)) peak.append(peak_dim) else: continue if len(peak) == len(template): spin_system.append(peak) peaklist.append(peak) else: continue spin_systems.append(spin_system) if all(len(i) < spectrum.min_spin_system_peaks for i in spin_systems): return None if self.noise_generator is not None: spin_systems_chunks = self.split_by_percent(spin_systems) for split_idx, chunk in enumerate(spin_systems_chunks): for spin_system in chunk: for peak in spin_system: peak.apply_noise(self.noise_generator, split_idx) return peaklist
[ "def", "create_peaklist", "(", "self", ",", "spectrum", ",", "chain", ",", "chain_idx", ",", "source", ")", ":", "sequence_sites", "=", "self", ".", "create_sequence_sites", "(", "chain", ",", "spectrum", ".", "seq_site_length", ")", "spin_systems", "=", "[", ...
Create peak list file. :param spectrum: Spectrum object instance. :type spectrum: :class:`~nmrstarlib.plsimulator.Spectrum` :param dict chain: Chain object that contains chemical shift values and assignment information. :param int chain_idx: Protein chain index. :param str source: :class:`~nmrstarlib.nmrstarlib.StarFile` source. :return: Peak list object. :rtype: :class:`~nmrstarlib.plsimulator.PeakList`
[ "Create", "peak", "list", "file", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/translator.py#L197-L245
pletzer/pnumpy
src/pnDistArray.py
distArrayFactory
def distArrayFactory(BaseClass): """ Returns a distributed array class that derives from BaseClass @param BaseClass base class, e.g. numpy.ndarray or numpy.ma.masked_array @return dist array class """ class DistArrayAny(BaseClass): """ Distributed array. Each process owns data and can expose a subset of the data to other processes. These are known as windows. Any number of windows can be exposed and the data of windows can be overlapping. Any process can access exposed windows from any other process. This relies on MPI-2 one-sided get communication. """ def __new__(cls, *args, **kwargs): return numpy.ndarray.__new__(cls, *args, **kwargs) def __init__(self, shap, dtyp): """ Constructor @param shap array shape @param dtyp numpy type """ # default communicator self.comm = MPI.COMM_WORLD # winID: {'slice': slce, # 'dataSrc': dataSrc, # 'dataDst': dataDst, # 'window': window} self.windows = {} # the type of data self.dtyp = dtyp # this process's MPI rank self.rk = self.comm.Get_rank() # number of processes self.sz = self.comm.Get_size() # mapping of numpy data types to MPI data types, # assumes that the data are of some numpy variant self.dtypMPI = None if dtyp == numpy.float64: self.dtypMPI = MPI.DOUBLE elif dtyp == numpy.float32: self.dtypeMPI = MPI.FLOAT elif dtyp == numpy.int64: self.dtypeMPI = MPI.INT64_T elif dtyp == numpy.int32: self.dtypeMPI = MPI.INT32_T elif dtyp == numpy.int16: self.dtypeMPI = MPI.INT16_T elif dtyp == numpy.int8: self.dtypeMPI = MPI.INT8_T elif dtyp == numpy.bool_: self.dtypeMPI = MPI.BYTE else: raise NotImplementedError def setComm(self, comm): """ Set communicator @param comm communicator """ self.comm = comm self.rk = self.comm.Get_rank() self.sz = self.comm.Get_size() def getMPIRank(self): """ Get the MPI rank of this process @return rank """ return self.rk def getMPISize(self): """ Get the MPI size (number of processes) of this communicator @return rank """ return self.sz def expose(self, slce, winID): """ Collective operation to expose a sub-set of data @param slce tuple of slice objects @param winID the data window ID """ # buffer for source data dataSrc = numpy.zeros(self[slce].shape, self.dtyp) # buffer for destination data dataDst = numpy.zeros(self[slce].shape, self.dtyp) self.windows[winID] = { 'slice': slce, 'dataSrc': dataSrc, 'dataDst': dataDst, 'dataWindow': MPI.Win.Create(dataSrc, comm=self.comm), } if hasattr(self, 'mask'): maskSrc = numpy.ones(self[slce].shape, numpy.bool_) maskDst = numpy.ones(self[slce].shape, numpy.bool_) iw = self.windows[winID] iw['maskSrc'] = maskSrc iw['maskDst'] = maskDst iw['maskWindow'] = MPI.Win.Create(maskSrc, comm=self.comm) def getMask(self, pe, winID): """ Access remote mask (collective operation) @param pe remote processing element, if None then no operation @param winID remote window @return mask array or None (if there is no mask) @note this is a no operation if there is no mask attached to the data """ if 'maskWindow' not in self.windows: # no mask, no op return None iw = self.windows[winID] slce = iw['slice'] maskSrc = iw['maskSrc'] maskDst = iw['maskDst'] # copy src mask into buffer maskSrc[...] = self.mask[slce] win = iw['maskWindow'] win.Fence(MPI.MODE_NOPUT | MPI.MODE_NOPRECEDE) if pe is not None: win.Get([maskDst, MPI.BYTE], pe) win.Fence(MPI.MODE_NOSUCCEED) return maskDst def getData(self, pe, winID): """ Access remote data (collective operation) @param pe remote processing element, if None then no operation @param winID remote window @return array """ iw = self.windows[winID] slce = iw['slice'] dataSrc = iw['dataSrc'] dataDst = iw['dataDst'] # copy src data into buffer dataSrc[...] = self[slce] win = iw['dataWindow'] win.Fence(MPI.MODE_NOPRECEDE) #MPI.MODE_NOPUT | MPI.MODE_NOPRECEDE) if pe is not None: win.Get([dataDst, self.dtypMPI], pe) win.Fence(MPI.MODE_NOSUCCEED) return dataDst def free(self): """ Must be called to free all exposed windows """ for iw in self.windows: self.windows[iw]['dataWindow'].Free() def reduce(self, op, initializer=None, rootPe=0): """ Collective reduce operation @param op function (e.g. lambda x,y:x+y) @param initializer the return value if there are no elements @param rootPe the root process which receives the result @return result on rootPe, None on all other processes """ if len(self) == 0: return initializer val = functools.reduce(op, self.flat) data = self.comm.gather(val, rootPe) if self.rk == rootPe: return functools.reduce(op, data) else: return None return DistArrayAny
python
def distArrayFactory(BaseClass): """ Returns a distributed array class that derives from BaseClass @param BaseClass base class, e.g. numpy.ndarray or numpy.ma.masked_array @return dist array class """ class DistArrayAny(BaseClass): """ Distributed array. Each process owns data and can expose a subset of the data to other processes. These are known as windows. Any number of windows can be exposed and the data of windows can be overlapping. Any process can access exposed windows from any other process. This relies on MPI-2 one-sided get communication. """ def __new__(cls, *args, **kwargs): return numpy.ndarray.__new__(cls, *args, **kwargs) def __init__(self, shap, dtyp): """ Constructor @param shap array shape @param dtyp numpy type """ # default communicator self.comm = MPI.COMM_WORLD # winID: {'slice': slce, # 'dataSrc': dataSrc, # 'dataDst': dataDst, # 'window': window} self.windows = {} # the type of data self.dtyp = dtyp # this process's MPI rank self.rk = self.comm.Get_rank() # number of processes self.sz = self.comm.Get_size() # mapping of numpy data types to MPI data types, # assumes that the data are of some numpy variant self.dtypMPI = None if dtyp == numpy.float64: self.dtypMPI = MPI.DOUBLE elif dtyp == numpy.float32: self.dtypeMPI = MPI.FLOAT elif dtyp == numpy.int64: self.dtypeMPI = MPI.INT64_T elif dtyp == numpy.int32: self.dtypeMPI = MPI.INT32_T elif dtyp == numpy.int16: self.dtypeMPI = MPI.INT16_T elif dtyp == numpy.int8: self.dtypeMPI = MPI.INT8_T elif dtyp == numpy.bool_: self.dtypeMPI = MPI.BYTE else: raise NotImplementedError def setComm(self, comm): """ Set communicator @param comm communicator """ self.comm = comm self.rk = self.comm.Get_rank() self.sz = self.comm.Get_size() def getMPIRank(self): """ Get the MPI rank of this process @return rank """ return self.rk def getMPISize(self): """ Get the MPI size (number of processes) of this communicator @return rank """ return self.sz def expose(self, slce, winID): """ Collective operation to expose a sub-set of data @param slce tuple of slice objects @param winID the data window ID """ # buffer for source data dataSrc = numpy.zeros(self[slce].shape, self.dtyp) # buffer for destination data dataDst = numpy.zeros(self[slce].shape, self.dtyp) self.windows[winID] = { 'slice': slce, 'dataSrc': dataSrc, 'dataDst': dataDst, 'dataWindow': MPI.Win.Create(dataSrc, comm=self.comm), } if hasattr(self, 'mask'): maskSrc = numpy.ones(self[slce].shape, numpy.bool_) maskDst = numpy.ones(self[slce].shape, numpy.bool_) iw = self.windows[winID] iw['maskSrc'] = maskSrc iw['maskDst'] = maskDst iw['maskWindow'] = MPI.Win.Create(maskSrc, comm=self.comm) def getMask(self, pe, winID): """ Access remote mask (collective operation) @param pe remote processing element, if None then no operation @param winID remote window @return mask array or None (if there is no mask) @note this is a no operation if there is no mask attached to the data """ if 'maskWindow' not in self.windows: # no mask, no op return None iw = self.windows[winID] slce = iw['slice'] maskSrc = iw['maskSrc'] maskDst = iw['maskDst'] # copy src mask into buffer maskSrc[...] = self.mask[slce] win = iw['maskWindow'] win.Fence(MPI.MODE_NOPUT | MPI.MODE_NOPRECEDE) if pe is not None: win.Get([maskDst, MPI.BYTE], pe) win.Fence(MPI.MODE_NOSUCCEED) return maskDst def getData(self, pe, winID): """ Access remote data (collective operation) @param pe remote processing element, if None then no operation @param winID remote window @return array """ iw = self.windows[winID] slce = iw['slice'] dataSrc = iw['dataSrc'] dataDst = iw['dataDst'] # copy src data into buffer dataSrc[...] = self[slce] win = iw['dataWindow'] win.Fence(MPI.MODE_NOPRECEDE) #MPI.MODE_NOPUT | MPI.MODE_NOPRECEDE) if pe is not None: win.Get([dataDst, self.dtypMPI], pe) win.Fence(MPI.MODE_NOSUCCEED) return dataDst def free(self): """ Must be called to free all exposed windows """ for iw in self.windows: self.windows[iw]['dataWindow'].Free() def reduce(self, op, initializer=None, rootPe=0): """ Collective reduce operation @param op function (e.g. lambda x,y:x+y) @param initializer the return value if there are no elements @param rootPe the root process which receives the result @return result on rootPe, None on all other processes """ if len(self) == 0: return initializer val = functools.reduce(op, self.flat) data = self.comm.gather(val, rootPe) if self.rk == rootPe: return functools.reduce(op, data) else: return None return DistArrayAny
[ "def", "distArrayFactory", "(", "BaseClass", ")", ":", "class", "DistArrayAny", "(", "BaseClass", ")", ":", "\"\"\"\n Distributed array. Each process owns data and can expose a subset\n of the data to other processes. These are known as windows. Any\n number of windows c...
Returns a distributed array class that derives from BaseClass @param BaseClass base class, e.g. numpy.ndarray or numpy.ma.masked_array @return dist array class
[ "Returns", "a", "distributed", "array", "class", "that", "derives", "from", "BaseClass" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L15-L208
pletzer/pnumpy
src/pnDistArray.py
daArray
def daArray(arry, dtype=numpy.float): """ Array constructor for numpy distributed array @param arry numpy-like array """ a = numpy.array(arry, dtype) res = DistArray(a.shape, a.dtype) res[:] = a return res
python
def daArray(arry, dtype=numpy.float): """ Array constructor for numpy distributed array @param arry numpy-like array """ a = numpy.array(arry, dtype) res = DistArray(a.shape, a.dtype) res[:] = a return res
[ "def", "daArray", "(", "arry", ",", "dtype", "=", "numpy", ".", "float", ")", ":", "a", "=", "numpy", ".", "array", "(", "arry", ",", "dtype", ")", "res", "=", "DistArray", "(", "a", ".", "shape", ",", "a", ".", "dtype", ")", "res", "[", ":", ...
Array constructor for numpy distributed array @param arry numpy-like array
[ "Array", "constructor", "for", "numpy", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L218-L226
pletzer/pnumpy
src/pnDistArray.py
daZeros
def daZeros(shap, dtype=numpy.float): """ Zero constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type """ res = DistArray(shap, dtype) res[:] = 0 return res
python
def daZeros(shap, dtype=numpy.float): """ Zero constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type """ res = DistArray(shap, dtype) res[:] = 0 return res
[ "def", "daZeros", "(", "shap", ",", "dtype", "=", "numpy", ".", "float", ")", ":", "res", "=", "DistArray", "(", "shap", ",", "dtype", ")", "res", "[", ":", "]", "=", "0", "return", "res" ]
Zero constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type
[ "Zero", "constructor", "for", "numpy", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L229-L237
pletzer/pnumpy
src/pnDistArray.py
daOnes
def daOnes(shap, dtype=numpy.float): """ One constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type """ res = DistArray(shap, dtype) res[:] = 1 return res
python
def daOnes(shap, dtype=numpy.float): """ One constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type """ res = DistArray(shap, dtype) res[:] = 1 return res
[ "def", "daOnes", "(", "shap", ",", "dtype", "=", "numpy", ".", "float", ")", ":", "res", "=", "DistArray", "(", "shap", ",", "dtype", ")", "res", "[", ":", "]", "=", "1", "return", "res" ]
One constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type
[ "One", "constructor", "for", "numpy", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L240-L248
pletzer/pnumpy
src/pnDistArray.py
mdaArray
def mdaArray(arry, dtype=numpy.float, mask=None): """ Array constructor for masked distributed array @param arry numpy-like array @param mask mask array (or None if all data elements are valid) """ a = numpy.array(arry, dtype) res = MaskedDistArray(a.shape, a.dtype) res[:] = a res.mask = mask return res
python
def mdaArray(arry, dtype=numpy.float, mask=None): """ Array constructor for masked distributed array @param arry numpy-like array @param mask mask array (or None if all data elements are valid) """ a = numpy.array(arry, dtype) res = MaskedDistArray(a.shape, a.dtype) res[:] = a res.mask = mask return res
[ "def", "mdaArray", "(", "arry", ",", "dtype", "=", "numpy", ".", "float", ",", "mask", "=", "None", ")", ":", "a", "=", "numpy", ".", "array", "(", "arry", ",", "dtype", ")", "res", "=", "MaskedDistArray", "(", "a", ".", "shape", ",", "a", ".", ...
Array constructor for masked distributed array @param arry numpy-like array @param mask mask array (or None if all data elements are valid)
[ "Array", "constructor", "for", "masked", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L254-L264
pletzer/pnumpy
src/pnDistArray.py
mdaZeros
def mdaZeros(shap, dtype=numpy.float, mask=None): """ Zero constructor for masked distributed array @param shap the shape of the array @param dtype the numpy data type @param mask mask array (or None if all data elements are valid) """ res = MaskedDistArray(shap, dtype) res[:] = 0 res.mask = mask return res
python
def mdaZeros(shap, dtype=numpy.float, mask=None): """ Zero constructor for masked distributed array @param shap the shape of the array @param dtype the numpy data type @param mask mask array (or None if all data elements are valid) """ res = MaskedDistArray(shap, dtype) res[:] = 0 res.mask = mask return res
[ "def", "mdaZeros", "(", "shap", ",", "dtype", "=", "numpy", ".", "float", ",", "mask", "=", "None", ")", ":", "res", "=", "MaskedDistArray", "(", "shap", ",", "dtype", ")", "res", "[", ":", "]", "=", "0", "res", ".", "mask", "=", "mask", "return"...
Zero constructor for masked distributed array @param shap the shape of the array @param dtype the numpy data type @param mask mask array (or None if all data elements are valid)
[ "Zero", "constructor", "for", "masked", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L267-L277
pletzer/pnumpy
src/pnDistArray.py
mdaOnes
def mdaOnes(shap, dtype=numpy.float, mask=None): """ One constructor for masked distributed array @param shap the shape of the array @param dtype the numpy data type @param mask mask array (or None if all data elements are valid) """ res = MaskedDistArray(shap, dtype) res[:] = 1 res.mask = mask return res
python
def mdaOnes(shap, dtype=numpy.float, mask=None): """ One constructor for masked distributed array @param shap the shape of the array @param dtype the numpy data type @param mask mask array (or None if all data elements are valid) """ res = MaskedDistArray(shap, dtype) res[:] = 1 res.mask = mask return res
[ "def", "mdaOnes", "(", "shap", ",", "dtype", "=", "numpy", ".", "float", ",", "mask", "=", "None", ")", ":", "res", "=", "MaskedDistArray", "(", "shap", ",", "dtype", ")", "res", "[", ":", "]", "=", "1", "res", ".", "mask", "=", "mask", "return",...
One constructor for masked distributed array @param shap the shape of the array @param dtype the numpy data type @param mask mask array (or None if all data elements are valid)
[ "One", "constructor", "for", "masked", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnDistArray.py#L280-L290
THLO/map
map/MapArgumentParser.py
MapArgumentParser.format_help
def format_help(self): """ The help statement is slightly changed in that: 1) MapConstants.placeholderCounterHelpVersion is replaced MapConstants.placeholderCounter 2) 'COUNT_FROM', 'NUMBER_LENGTH', and 'EXTENSIONS' are shortened to 'VALUE', 'LENGTH', and 'EXT', respectively. """ return super(MapArgumentParser,self).format_help().replace(MapConstants.placeholderCounterHelpVersion,MapConstants.placeholderCounter).replace('COUNT_FROM','VALUE').replace('NUMBER_LENGTH','LENGTH').replace('EXTENSIONS','EXT')
python
def format_help(self): """ The help statement is slightly changed in that: 1) MapConstants.placeholderCounterHelpVersion is replaced MapConstants.placeholderCounter 2) 'COUNT_FROM', 'NUMBER_LENGTH', and 'EXTENSIONS' are shortened to 'VALUE', 'LENGTH', and 'EXT', respectively. """ return super(MapArgumentParser,self).format_help().replace(MapConstants.placeholderCounterHelpVersion,MapConstants.placeholderCounter).replace('COUNT_FROM','VALUE').replace('NUMBER_LENGTH','LENGTH').replace('EXTENSIONS','EXT')
[ "def", "format_help", "(", "self", ")", ":", "return", "super", "(", "MapArgumentParser", ",", "self", ")", ".", "format_help", "(", ")", ".", "replace", "(", "MapConstants", ".", "placeholderCounterHelpVersion", ",", "MapConstants", ".", "placeholderCounter", "...
The help statement is slightly changed in that: 1) MapConstants.placeholderCounterHelpVersion is replaced MapConstants.placeholderCounter 2) 'COUNT_FROM', 'NUMBER_LENGTH', and 'EXTENSIONS' are shortened to 'VALUE', 'LENGTH', and 'EXT', respectively.
[ "The", "help", "statement", "is", "slightly", "changed", "in", "that", ":", "1", ")", "MapConstants", ".", "placeholderCounterHelpVersion", "is", "replaced", "MapConstants", ".", "placeholderCounter", "2", ")", "COUNT_FROM", "NUMBER_LENGTH", "and", "EXTENSIONS", "ar...
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/MapArgumentParser.py#L54-L59
caktus/django-sticky-uploads
stickyuploads/forms.py
UploadForm.stash
def stash(self, storage, url): """Stores the uploaded file in a temporary storage location.""" result = {} if self.is_valid(): upload = self.cleaned_data['upload'] name = storage.save(upload.name, upload) result['filename'] = os.path.basename(name) try: result['url'] = storage.url(name) except NotImplementedError: result['url'] = None result['stored'] = serialize_upload(name, storage, url) return result
python
def stash(self, storage, url): """Stores the uploaded file in a temporary storage location.""" result = {} if self.is_valid(): upload = self.cleaned_data['upload'] name = storage.save(upload.name, upload) result['filename'] = os.path.basename(name) try: result['url'] = storage.url(name) except NotImplementedError: result['url'] = None result['stored'] = serialize_upload(name, storage, url) return result
[ "def", "stash", "(", "self", ",", "storage", ",", "url", ")", ":", "result", "=", "{", "}", "if", "self", ".", "is_valid", "(", ")", ":", "upload", "=", "self", ".", "cleaned_data", "[", "'upload'", "]", "name", "=", "storage", ".", "save", "(", ...
Stores the uploaded file in a temporary storage location.
[ "Stores", "the", "uploaded", "file", "in", "a", "temporary", "storage", "location", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/forms.py#L14-L26
trisk/pysesame
pysesame/candyhouse.py
CandyHouseAccount.login
def login(self, email=None, password=None, timeout=5): """Log in to CANDY HOUSE account. Return True on success.""" if email is not None: self.email = email if password is not None: self.password = password url = self.api_url + API_LOGIN_ENDPOINT data = json.dumps({'email': self.email, 'password': self.password}) headers = {'Content-Type': 'application/json'} response = None try: response = self.session.post(url, data=data, headers=headers, timeout=timeout) except requests.exceptions.ConnectionError: _LOGGER.warning("Unable to connect to %s", url) except requests.exceptions.Timeout: _LOGGER.warning("No response from %s", url) if response is not None: if response.status_code == 200: self.auth_token = json.loads(response.text)['authorization'] return True else: _LOGGER.warning("Login failed for %s: %s", self.email, response.text) else: _LOGGER.warning("Login failed for %s", self.email) return False
python
def login(self, email=None, password=None, timeout=5): """Log in to CANDY HOUSE account. Return True on success.""" if email is not None: self.email = email if password is not None: self.password = password url = self.api_url + API_LOGIN_ENDPOINT data = json.dumps({'email': self.email, 'password': self.password}) headers = {'Content-Type': 'application/json'} response = None try: response = self.session.post(url, data=data, headers=headers, timeout=timeout) except requests.exceptions.ConnectionError: _LOGGER.warning("Unable to connect to %s", url) except requests.exceptions.Timeout: _LOGGER.warning("No response from %s", url) if response is not None: if response.status_code == 200: self.auth_token = json.loads(response.text)['authorization'] return True else: _LOGGER.warning("Login failed for %s: %s", self.email, response.text) else: _LOGGER.warning("Login failed for %s", self.email) return False
[ "def", "login", "(", "self", ",", "email", "=", "None", ",", "password", "=", "None", ",", "timeout", "=", "5", ")", ":", "if", "email", "is", "not", "None", ":", "self", ".", "email", "=", "email", "if", "password", "is", "not", "None", ":", "se...
Log in to CANDY HOUSE account. Return True on success.
[ "Log", "in", "to", "CANDY", "HOUSE", "account", ".", "Return", "True", "on", "success", "." ]
train
https://github.com/trisk/pysesame/blob/8f9df4a478cf8f328ec8185bcac7c8704cbd9c01/pysesame/candyhouse.py#L32-L62
trisk/pysesame
pysesame/candyhouse.py
CandyHouseAccount.request
def request(self, method, endpoint, payload=None, timeout=5): """Send request to API.""" url = self.api_url + endpoint data = None headers = {} if payload is not None: data = json.dumps(payload) headers['Content-Type'] = 'application/json' try: if self.auth_token is not None: headers[API_AUTH_HEADER] = self.auth_token response = self.session.request(method, url, data=data, headers=headers, timeout=timeout) if response.status_code != 401: return response _LOGGER.debug("Renewing auth token") if not self.login(timeout=timeout): return None # Retry request headers[API_AUTH_HEADER] = self.auth_token return self.session.request(method, url, data=data, headers=headers, timeout=timeout) except requests.exceptions.ConnectionError: _LOGGER.warning("Unable to connect to %s", url) except requests.exceptions.Timeout: _LOGGER.warning("No response from %s", url) return None
python
def request(self, method, endpoint, payload=None, timeout=5): """Send request to API.""" url = self.api_url + endpoint data = None headers = {} if payload is not None: data = json.dumps(payload) headers['Content-Type'] = 'application/json' try: if self.auth_token is not None: headers[API_AUTH_HEADER] = self.auth_token response = self.session.request(method, url, data=data, headers=headers, timeout=timeout) if response.status_code != 401: return response _LOGGER.debug("Renewing auth token") if not self.login(timeout=timeout): return None # Retry request headers[API_AUTH_HEADER] = self.auth_token return self.session.request(method, url, data=data, headers=headers, timeout=timeout) except requests.exceptions.ConnectionError: _LOGGER.warning("Unable to connect to %s", url) except requests.exceptions.Timeout: _LOGGER.warning("No response from %s", url) return None
[ "def", "request", "(", "self", ",", "method", ",", "endpoint", ",", "payload", "=", "None", ",", "timeout", "=", "5", ")", ":", "url", "=", "self", ".", "api_url", "+", "endpoint", "data", "=", "None", "headers", "=", "{", "}", "if", "payload", "is...
Send request to API.
[ "Send", "request", "to", "API", "." ]
train
https://github.com/trisk/pysesame/blob/8f9df4a478cf8f328ec8185bcac7c8704cbd9c01/pysesame/candyhouse.py#L64-L97
trisk/pysesame
pysesame/candyhouse.py
CandyHouseAccount.sesames
def sesames(self): """Return list of Sesames.""" response = self.request('GET', API_SESAME_LIST_ENDPOINT) if response is not None and response.status_code == 200: return json.loads(response.text)['sesames'] _LOGGER.warning("Unable to list Sesames") return []
python
def sesames(self): """Return list of Sesames.""" response = self.request('GET', API_SESAME_LIST_ENDPOINT) if response is not None and response.status_code == 200: return json.loads(response.text)['sesames'] _LOGGER.warning("Unable to list Sesames") return []
[ "def", "sesames", "(", "self", ")", ":", "response", "=", "self", ".", "request", "(", "'GET'", ",", "API_SESAME_LIST_ENDPOINT", ")", "if", "response", "is", "not", "None", "and", "response", ".", "status_code", "==", "200", ":", "return", "json", ".", "...
Return list of Sesames.
[ "Return", "list", "of", "Sesames", "." ]
train
https://github.com/trisk/pysesame/blob/8f9df4a478cf8f328ec8185bcac7c8704cbd9c01/pysesame/candyhouse.py#L100-L107
ClimateImpactLab/DataFS
datafs/config/helpers.py
get_api
def get_api( profile=None, config_file=None, requirements=None): ''' Generate a datafs.DataAPI object from a config profile ``get_api`` generates a DataAPI object based on a pre-configured datafs profile specified in your datafs config file. To create a datafs config file, use the command line tool ``datafs configure --helper`` or export an existing DataAPI object with :py:meth:`datafs.ConfigFile.write_config_from_api` Parameters ---------- profile : str (optional) name of a profile in your datafs config file. If profile is not provided, the default profile specified in the file will be used. config_file : str or file (optional) path to your datafs configuration file. By default, get_api uses your OS's default datafs application directory. Examples -------- The following specifies a simple API with a MongoDB manager and a temporary storage service: .. code-block:: python >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> import tempfile >>> tempdir = tempfile.mkdtemp() >>> >>> config_file = StringIO(""" ... default-profile: my-data ... profiles: ... my-data: ... manager: ... class: MongoDBManager ... kwargs: ... database_name: 'MyDatabase' ... table_name: 'DataFiles' ... ... authorities: ... local: ... service: OSFS ... args: ['{}'] ... """.format(tempdir)) >>> >>> # This file can be read in using the datafs.get_api helper function ... >>> >>> api = get_api(profile='my-data', config_file=config_file) >>> api.manager.create_archive_table( ... 'DataFiles', ... raise_on_err=False) >>> >>> archive = api.create( ... 'my_first_archive', ... metadata = dict(description = 'My test data archive'), ... raise_on_err=False) >>> >>> with archive.open('w+') as f: ... res = f.write(u'hello!') ... >>> with archive.open('r') as f: ... print(f.read()) ... hello! >>> >>> # clean up ... >>> archive.delete() >>> import shutil >>> shutil.rmtree(tempdir) ''' config = ConfigFile(config_file=config_file) config.read_config() if profile is None: profile = config.config['default-profile'] profile_config = config.get_profile_config(profile) default_versions = {} if requirements is None: requirements = config.config.get('requirements', None) if requirements is not None and not os.path.isfile(requirements): for reqline in re.split(r'[\r\n;]+', requirements): if re.search(r'^\s*$', reqline): continue archive, version = _parse_requirement(reqline) default_versions[archive] = version else: if requirements is None: requirements = 'requirements_data.txt' if os.path.isfile(requirements): with open_filelike(requirements, 'r') as reqfile: for reqline in reqfile.readlines(): if re.search(r'^\s*$', reqline): continue archive, version = _parse_requirement(reqline) default_versions[archive] = version api = APIConstructor.generate_api_from_config(profile_config) api.default_versions = default_versions APIConstructor.attach_manager_from_config(api, profile_config) APIConstructor.attach_services_from_config(api, profile_config) APIConstructor.attach_cache_from_config(api, profile_config) return api
python
def get_api( profile=None, config_file=None, requirements=None): ''' Generate a datafs.DataAPI object from a config profile ``get_api`` generates a DataAPI object based on a pre-configured datafs profile specified in your datafs config file. To create a datafs config file, use the command line tool ``datafs configure --helper`` or export an existing DataAPI object with :py:meth:`datafs.ConfigFile.write_config_from_api` Parameters ---------- profile : str (optional) name of a profile in your datafs config file. If profile is not provided, the default profile specified in the file will be used. config_file : str or file (optional) path to your datafs configuration file. By default, get_api uses your OS's default datafs application directory. Examples -------- The following specifies a simple API with a MongoDB manager and a temporary storage service: .. code-block:: python >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> import tempfile >>> tempdir = tempfile.mkdtemp() >>> >>> config_file = StringIO(""" ... default-profile: my-data ... profiles: ... my-data: ... manager: ... class: MongoDBManager ... kwargs: ... database_name: 'MyDatabase' ... table_name: 'DataFiles' ... ... authorities: ... local: ... service: OSFS ... args: ['{}'] ... """.format(tempdir)) >>> >>> # This file can be read in using the datafs.get_api helper function ... >>> >>> api = get_api(profile='my-data', config_file=config_file) >>> api.manager.create_archive_table( ... 'DataFiles', ... raise_on_err=False) >>> >>> archive = api.create( ... 'my_first_archive', ... metadata = dict(description = 'My test data archive'), ... raise_on_err=False) >>> >>> with archive.open('w+') as f: ... res = f.write(u'hello!') ... >>> with archive.open('r') as f: ... print(f.read()) ... hello! >>> >>> # clean up ... >>> archive.delete() >>> import shutil >>> shutil.rmtree(tempdir) ''' config = ConfigFile(config_file=config_file) config.read_config() if profile is None: profile = config.config['default-profile'] profile_config = config.get_profile_config(profile) default_versions = {} if requirements is None: requirements = config.config.get('requirements', None) if requirements is not None and not os.path.isfile(requirements): for reqline in re.split(r'[\r\n;]+', requirements): if re.search(r'^\s*$', reqline): continue archive, version = _parse_requirement(reqline) default_versions[archive] = version else: if requirements is None: requirements = 'requirements_data.txt' if os.path.isfile(requirements): with open_filelike(requirements, 'r') as reqfile: for reqline in reqfile.readlines(): if re.search(r'^\s*$', reqline): continue archive, version = _parse_requirement(reqline) default_versions[archive] = version api = APIConstructor.generate_api_from_config(profile_config) api.default_versions = default_versions APIConstructor.attach_manager_from_config(api, profile_config) APIConstructor.attach_services_from_config(api, profile_config) APIConstructor.attach_cache_from_config(api, profile_config) return api
[ "def", "get_api", "(", "profile", "=", "None", ",", "config_file", "=", "None", ",", "requirements", "=", "None", ")", ":", "config", "=", "ConfigFile", "(", "config_file", "=", "config_file", ")", "config", ".", "read_config", "(", ")", "if", "profile", ...
Generate a datafs.DataAPI object from a config profile ``get_api`` generates a DataAPI object based on a pre-configured datafs profile specified in your datafs config file. To create a datafs config file, use the command line tool ``datafs configure --helper`` or export an existing DataAPI object with :py:meth:`datafs.ConfigFile.write_config_from_api` Parameters ---------- profile : str (optional) name of a profile in your datafs config file. If profile is not provided, the default profile specified in the file will be used. config_file : str or file (optional) path to your datafs configuration file. By default, get_api uses your OS's default datafs application directory. Examples -------- The following specifies a simple API with a MongoDB manager and a temporary storage service: .. code-block:: python >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> import tempfile >>> tempdir = tempfile.mkdtemp() >>> >>> config_file = StringIO(""" ... default-profile: my-data ... profiles: ... my-data: ... manager: ... class: MongoDBManager ... kwargs: ... database_name: 'MyDatabase' ... table_name: 'DataFiles' ... ... authorities: ... local: ... service: OSFS ... args: ['{}'] ... """.format(tempdir)) >>> >>> # This file can be read in using the datafs.get_api helper function ... >>> >>> api = get_api(profile='my-data', config_file=config_file) >>> api.manager.create_archive_table( ... 'DataFiles', ... raise_on_err=False) >>> >>> archive = api.create( ... 'my_first_archive', ... metadata = dict(description = 'My test data archive'), ... raise_on_err=False) >>> >>> with archive.open('w+') as f: ... res = f.write(u'hello!') ... >>> with archive.open('r') as f: ... print(f.read()) ... hello! >>> >>> # clean up ... >>> archive.delete() >>> import shutil >>> shutil.rmtree(tempdir)
[ "Generate", "a", "datafs", ".", "DataAPI", "object", "from", "a", "config", "profile" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/config/helpers.py#L25-L155
ClimateImpactLab/DataFS
datafs/config/helpers.py
check_requirements
def check_requirements(to_populate, prompts, helper=False): ''' Iterates through required values, checking to_populate for required values If a key in prompts is missing in to_populate and ``helper==True``, prompts the user using the values in to_populate. Otherwise, raises an error. Parameters ---------- to_populate : dict Data dictionary to fill. Prompts given to the user are taken from this dictionary. prompts : dict Keys and prompts to use when filling ``to_populate`` ''' for kw, prompt in prompts.items(): if helper: if kw not in to_populate: to_populate[kw] = click.prompt(prompt) else: msg = ( 'Required value "{}" not found. ' 'Use helper=True or the --helper ' 'flag for assistance.'.format(kw)) assert kw in to_populate, msg
python
def check_requirements(to_populate, prompts, helper=False): ''' Iterates through required values, checking to_populate for required values If a key in prompts is missing in to_populate and ``helper==True``, prompts the user using the values in to_populate. Otherwise, raises an error. Parameters ---------- to_populate : dict Data dictionary to fill. Prompts given to the user are taken from this dictionary. prompts : dict Keys and prompts to use when filling ``to_populate`` ''' for kw, prompt in prompts.items(): if helper: if kw not in to_populate: to_populate[kw] = click.prompt(prompt) else: msg = ( 'Required value "{}" not found. ' 'Use helper=True or the --helper ' 'flag for assistance.'.format(kw)) assert kw in to_populate, msg
[ "def", "check_requirements", "(", "to_populate", ",", "prompts", ",", "helper", "=", "False", ")", ":", "for", "kw", ",", "prompt", "in", "prompts", ".", "items", "(", ")", ":", "if", "helper", ":", "if", "kw", "not", "in", "to_populate", ":", "to_popu...
Iterates through required values, checking to_populate for required values If a key in prompts is missing in to_populate and ``helper==True``, prompts the user using the values in to_populate. Otherwise, raises an error. Parameters ---------- to_populate : dict Data dictionary to fill. Prompts given to the user are taken from this dictionary. prompts : dict Keys and prompts to use when filling ``to_populate``
[ "Iterates", "through", "required", "values", "checking", "to_populate", "for", "required", "values" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/config/helpers.py#L158-L187
PedalPi/Raspberry-Physical
component/pcd8544/pcd8544_bitbang.py
PCD8544.write_all
def write_all(self, data): """ :param list[int] data: """ self._set_cursor_x(0) self._set_cursor_y(0) self.DC.on() self._write(data)
python
def write_all(self, data): """ :param list[int] data: """ self._set_cursor_x(0) self._set_cursor_y(0) self.DC.on() self._write(data)
[ "def", "write_all", "(", "self", ",", "data", ")", ":", "self", ".", "_set_cursor_x", "(", "0", ")", "self", ".", "_set_cursor_y", "(", "0", ")", "self", ".", "DC", ".", "on", "(", ")", "self", ".", "_write", "(", "data", ")" ]
:param list[int] data:
[ ":", "param", "list", "[", "int", "]", "data", ":" ]
train
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/component/pcd8544/pcd8544_bitbang.py#L99-L108
chbrown/pi
pi/commands/upgrade.py
cli
def cli(parser): ''' Simply calls out to easy_install --upgrade ''' parser.add_argument('packages', nargs='*', default=local_packages(), help='Packages to upgrade (defaults to all installed packages)') parser.add_argument('-n', '--dry-run', action='store_true', help='Print upgrade actions without running') opts = parser.parse_args() for package in opts.packages: upgrade(package, execute=not opts.dry_run)
python
def cli(parser): ''' Simply calls out to easy_install --upgrade ''' parser.add_argument('packages', nargs='*', default=local_packages(), help='Packages to upgrade (defaults to all installed packages)') parser.add_argument('-n', '--dry-run', action='store_true', help='Print upgrade actions without running') opts = parser.parse_args() for package in opts.packages: upgrade(package, execute=not opts.dry_run)
[ "def", "cli", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'packages'", ",", "nargs", "=", "'*'", ",", "default", "=", "local_packages", "(", ")", ",", "help", "=", "'Packages to upgrade (defaults to all installed packages)'", ")", "parser", "."...
Simply calls out to easy_install --upgrade
[ "Simply", "calls", "out", "to", "easy_install", "--", "upgrade" ]
train
https://github.com/chbrown/pi/blob/a3661eccf1c6f0105e34a0ee24328022bf4e6b92/pi/commands/upgrade.py#L23-L34
caktus/django-sticky-uploads
stickyuploads/views.py
UploadView.post
def post(self, *args, **kwargs): """Save file and return saved info or report errors.""" if self.upload_allowed(): form = self.get_upload_form() result = {} if form.is_valid(): storage = self.get_storage() result['is_valid'] = True info = form.stash(storage, self.request.path) result.update(info) else: result.update({ 'is_valid': False, 'errors': form.errors, }) return HttpResponse(json.dumps(result), content_type='application/json') else: return HttpResponseForbidden()
python
def post(self, *args, **kwargs): """Save file and return saved info or report errors.""" if self.upload_allowed(): form = self.get_upload_form() result = {} if form.is_valid(): storage = self.get_storage() result['is_valid'] = True info = form.stash(storage, self.request.path) result.update(info) else: result.update({ 'is_valid': False, 'errors': form.errors, }) return HttpResponse(json.dumps(result), content_type='application/json') else: return HttpResponseForbidden()
[ "def", "post", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "upload_allowed", "(", ")", ":", "form", "=", "self", ".", "get_upload_form", "(", ")", "result", "=", "{", "}", "if", "form", ".", "is_valid", "(...
Save file and return saved info or report errors.
[ "Save", "file", "and", "return", "saved", "info", "or", "report", "errors", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/views.py#L18-L35
caktus/django-sticky-uploads
stickyuploads/views.py
UploadView.get_upload_form
def get_upload_form(self): """Construct form for accepting file upload.""" return self.form_class(self.request.POST, self.request.FILES)
python
def get_upload_form(self): """Construct form for accepting file upload.""" return self.form_class(self.request.POST, self.request.FILES)
[ "def", "get_upload_form", "(", "self", ")", ":", "return", "self", ".", "form_class", "(", "self", ".", "request", ".", "POST", ",", "self", ".", "request", ".", "FILES", ")" ]
Construct form for accepting file upload.
[ "Construct", "form", "for", "accepting", "file", "upload", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/views.py#L45-L47
Overboard/discoverhue
discoverhue/discoverhue.py
from_url
def from_url(location): """ HTTP request for page at location returned as string malformed url returns ValueError nonexistant IP returns URLError wrong subnet IP return URLError reachable IP, no HTTP server returns URLError reachable IP, HTTP, wrong page returns HTTPError """ req = urllib.request.Request(location) with urllib.request.urlopen(req) as response: the_page = response.read().decode() return the_page
python
def from_url(location): """ HTTP request for page at location returned as string malformed url returns ValueError nonexistant IP returns URLError wrong subnet IP return URLError reachable IP, no HTTP server returns URLError reachable IP, HTTP, wrong page returns HTTPError """ req = urllib.request.Request(location) with urllib.request.urlopen(req) as response: the_page = response.read().decode() return the_page
[ "def", "from_url", "(", "location", ")", ":", "req", "=", "urllib", ".", "request", ".", "Request", "(", "location", ")", "with", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "as", "response", ":", "the_page", "=", "response", ".", "read"...
HTTP request for page at location returned as string malformed url returns ValueError nonexistant IP returns URLError wrong subnet IP return URLError reachable IP, no HTTP server returns URLError reachable IP, HTTP, wrong page returns HTTPError
[ "HTTP", "request", "for", "page", "at", "location", "returned", "as", "string" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L45-L57
Overboard/discoverhue
discoverhue/discoverhue.py
parse_description_xml
def parse_description_xml(location): """ Extract serial number, base ip, and img url from description.xml missing data from XML returns AttributeError malformed XML returns ParseError Refer to included example for URLBase and serialNumber elements """ class _URLBase(str): """ Convenient access to hostname (ip) portion of the URL """ @property def hostname(self): return urlsplit(self).hostname # """TODO: review error handling on xml""" # may want to suppress ParseError in the event that it was caused # by a none bridge device although this seems unlikely try: xml_str = from_url(location) except urllib.request.HTTPError as error: logger.info("No description for %s: %s", location, error) return None, error except urllib.request.URLError as error: logger.info("No HTTP server for %s: %s", location, error) return None, error else: root = ET.fromstring(xml_str) rootname = {'root': root.tag[root.tag.find('{')+1:root.tag.find('}')]} baseip = root.find('root:URLBase', rootname).text device = root.find('root:device', rootname) serial = device.find('root:serialNumber', rootname).text # anicon = device.find('root:iconList', rootname).find('root:icon', rootname) # imgurl = anicon.find('root:url', rootname).text # Alternatively, could look directly in the modelDescription field if all(x in xml_str.lower() for x in ['philips', 'hue']): return serial, _URLBase(baseip) else: return None, None
python
def parse_description_xml(location): """ Extract serial number, base ip, and img url from description.xml missing data from XML returns AttributeError malformed XML returns ParseError Refer to included example for URLBase and serialNumber elements """ class _URLBase(str): """ Convenient access to hostname (ip) portion of the URL """ @property def hostname(self): return urlsplit(self).hostname # """TODO: review error handling on xml""" # may want to suppress ParseError in the event that it was caused # by a none bridge device although this seems unlikely try: xml_str = from_url(location) except urllib.request.HTTPError as error: logger.info("No description for %s: %s", location, error) return None, error except urllib.request.URLError as error: logger.info("No HTTP server for %s: %s", location, error) return None, error else: root = ET.fromstring(xml_str) rootname = {'root': root.tag[root.tag.find('{')+1:root.tag.find('}')]} baseip = root.find('root:URLBase', rootname).text device = root.find('root:device', rootname) serial = device.find('root:serialNumber', rootname).text # anicon = device.find('root:iconList', rootname).find('root:icon', rootname) # imgurl = anicon.find('root:url', rootname).text # Alternatively, could look directly in the modelDescription field if all(x in xml_str.lower() for x in ['philips', 'hue']): return serial, _URLBase(baseip) else: return None, None
[ "def", "parse_description_xml", "(", "location", ")", ":", "class", "_URLBase", "(", "str", ")", ":", "\"\"\" Convenient access to hostname (ip) portion of the URL \"\"\"", "@", "property", "def", "hostname", "(", "self", ")", ":", "return", "urlsplit", "(", "self", ...
Extract serial number, base ip, and img url from description.xml missing data from XML returns AttributeError malformed XML returns ParseError Refer to included example for URLBase and serialNumber elements
[ "Extract", "serial", "number", "base", "ip", "and", "img", "url", "from", "description", ".", "xml" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L59-L97
Overboard/discoverhue
discoverhue/discoverhue.py
_build_from
def _build_from(baseip): """ Build URL for description.xml from ip """ from ipaddress import ip_address try: ip_address(baseip) except ValueError: # """attempt to construct url but the ip format has changed""" # logger.warning("Format of internalipaddress changed: %s", baseip) if 'http' not in baseip[0:4].lower(): baseip = urlunsplit(['http', baseip, '', '', '']) spl = urlsplit(baseip) if '.xml' not in spl.path: sep = '' if spl.path.endswith('/') else '/' spl = spl._replace(path=spl.path+sep+'description.xml') return spl.geturl() else: # construct url knowing baseip is a pure ip return urlunsplit(('http', baseip, '/description.xml', '', ''))
python
def _build_from(baseip): """ Build URL for description.xml from ip """ from ipaddress import ip_address try: ip_address(baseip) except ValueError: # """attempt to construct url but the ip format has changed""" # logger.warning("Format of internalipaddress changed: %s", baseip) if 'http' not in baseip[0:4].lower(): baseip = urlunsplit(['http', baseip, '', '', '']) spl = urlsplit(baseip) if '.xml' not in spl.path: sep = '' if spl.path.endswith('/') else '/' spl = spl._replace(path=spl.path+sep+'description.xml') return spl.geturl() else: # construct url knowing baseip is a pure ip return urlunsplit(('http', baseip, '/description.xml', '', ''))
[ "def", "_build_from", "(", "baseip", ")", ":", "from", "ipaddress", "import", "ip_address", "try", ":", "ip_address", "(", "baseip", ")", "except", "ValueError", ":", "# \"\"\"attempt to construct url but the ip format has changed\"\"\"", "# logger.warning(\"Format of interna...
Build URL for description.xml from ip
[ "Build", "URL", "for", "description", ".", "xml", "from", "ip" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L99-L116
Overboard/discoverhue
discoverhue/discoverhue.py
parse_portal_json
def parse_portal_json(): """ Extract id, ip from https://www.meethue.com/api/nupnp Note: the ip is only the base and needs xml file appended, and the id is not exactly the same as the serial number in the xml """ try: json_str = from_url('https://www.meethue.com/api/nupnp') except urllib.request.HTTPError as error: logger.error("Problem at portal: %s", error) raise except urllib.request.URLError as error: logger.warning("Problem reaching portal: %s", error) return [] else: portal_list = [] json_list = json.loads(json_str) for bridge in json_list: serial = bridge['id'] baseip = bridge['internalipaddress'] # baseip should look like "192.168.0.1" xmlurl = _build_from(baseip) # xmlurl should look like "http://192.168.0.1/description.xml" portal_list.append((serial, xmlurl)) return portal_list
python
def parse_portal_json(): """ Extract id, ip from https://www.meethue.com/api/nupnp Note: the ip is only the base and needs xml file appended, and the id is not exactly the same as the serial number in the xml """ try: json_str = from_url('https://www.meethue.com/api/nupnp') except urllib.request.HTTPError as error: logger.error("Problem at portal: %s", error) raise except urllib.request.URLError as error: logger.warning("Problem reaching portal: %s", error) return [] else: portal_list = [] json_list = json.loads(json_str) for bridge in json_list: serial = bridge['id'] baseip = bridge['internalipaddress'] # baseip should look like "192.168.0.1" xmlurl = _build_from(baseip) # xmlurl should look like "http://192.168.0.1/description.xml" portal_list.append((serial, xmlurl)) return portal_list
[ "def", "parse_portal_json", "(", ")", ":", "try", ":", "json_str", "=", "from_url", "(", "'https://www.meethue.com/api/nupnp'", ")", "except", "urllib", ".", "request", ".", "HTTPError", "as", "error", ":", "logger", ".", "error", "(", "\"Problem at portal: %s\"",...
Extract id, ip from https://www.meethue.com/api/nupnp Note: the ip is only the base and needs xml file appended, and the id is not exactly the same as the serial number in the xml
[ "Extract", "id", "ip", "from", "https", ":", "//", "www", ".", "meethue", ".", "com", "/", "api", "/", "nupnp" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L122-L146
Overboard/discoverhue
discoverhue/discoverhue.py
via_upnp
def via_upnp(): """ Use SSDP as described by the Philips guide """ ssdp_list = ssdp_discover("ssdp:all", timeout=5) #import pickle #with open("ssdp.pickle", "wb") as f: #pickle.dump(ssdp_list,f) bridges_from_ssdp = [u for u in ssdp_list if 'IpBridge' in u.server] logger.info('SSDP returned %d items with %d Hue bridges(s).', len(ssdp_list), len(bridges_from_ssdp)) # Confirm SSDP gave an accessible bridge device by reading from the returned # location. Should look like: http://192.168.0.1:80/description.xml found_bridges = {} for bridge in bridges_from_ssdp: serial, bridge_info = parse_description_xml(bridge.location) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('SSDP returned nothing')
python
def via_upnp(): """ Use SSDP as described by the Philips guide """ ssdp_list = ssdp_discover("ssdp:all", timeout=5) #import pickle #with open("ssdp.pickle", "wb") as f: #pickle.dump(ssdp_list,f) bridges_from_ssdp = [u for u in ssdp_list if 'IpBridge' in u.server] logger.info('SSDP returned %d items with %d Hue bridges(s).', len(ssdp_list), len(bridges_from_ssdp)) # Confirm SSDP gave an accessible bridge device by reading from the returned # location. Should look like: http://192.168.0.1:80/description.xml found_bridges = {} for bridge in bridges_from_ssdp: serial, bridge_info = parse_description_xml(bridge.location) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('SSDP returned nothing')
[ "def", "via_upnp", "(", ")", ":", "ssdp_list", "=", "ssdp_discover", "(", "\"ssdp:all\"", ",", "timeout", "=", "5", ")", "#import pickle", "#with open(\"ssdp.pickle\", \"wb\") as f:", "#pickle.dump(ssdp_list,f)", "bridges_from_ssdp", "=", "[", "u", "for", "u", "in", ...
Use SSDP as described by the Philips guide
[ "Use", "SSDP", "as", "described", "by", "the", "Philips", "guide" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L148-L169
Overboard/discoverhue
discoverhue/discoverhue.py
via_nupnp
def via_nupnp(): """ Use method 2 as described by the Philips guide """ bridges_from_portal = parse_portal_json() logger.info('Portal returned %d Hue bridges(s).', len(bridges_from_portal)) # Confirm Portal gave an accessible bridge device by reading from the returned # location. Should look like: http://192.168.0.1/description.xml found_bridges = {} for bridge in bridges_from_portal: serial, bridge_info = parse_description_xml(bridge[1]) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('Portal returned nothing')
python
def via_nupnp(): """ Use method 2 as described by the Philips guide """ bridges_from_portal = parse_portal_json() logger.info('Portal returned %d Hue bridges(s).', len(bridges_from_portal)) # Confirm Portal gave an accessible bridge device by reading from the returned # location. Should look like: http://192.168.0.1/description.xml found_bridges = {} for bridge in bridges_from_portal: serial, bridge_info = parse_description_xml(bridge[1]) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('Portal returned nothing')
[ "def", "via_nupnp", "(", ")", ":", "bridges_from_portal", "=", "parse_portal_json", "(", ")", "logger", ".", "info", "(", "'Portal returned %d Hue bridges(s).'", ",", "len", "(", "bridges_from_portal", ")", ")", "# Confirm Portal gave an accessible bridge device by reading ...
Use method 2 as described by the Philips guide
[ "Use", "method", "2", "as", "described", "by", "the", "Philips", "guide" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L171-L188
Overboard/discoverhue
discoverhue/discoverhue.py
via_scan
def via_scan(): """ IP scan - now implemented """ import socket import ipaddress import httpfind bridges_from_scan = [] hosts = socket.gethostbyname_ex(socket.gethostname())[2] for host in hosts: bridges_from_scan += httpfind.survey( # TODO: how do we determine subnet configuration? ipaddress.ip_interface(host+'/24').network, path='description.xml', pattern='(P|p)hilips') logger.info('Scan on %s', host) logger.info('Scan returned %d Hue bridges(s).', len(bridges_from_scan)) # Confirm Scan gave an accessible bridge device by reading from the returned # location. Should look like: http://192.168.0.1/description.xml found_bridges = {} for bridge in bridges_from_scan: serial, bridge_info = parse_description_xml(bridge) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('Scan returned nothing')
python
def via_scan(): """ IP scan - now implemented """ import socket import ipaddress import httpfind bridges_from_scan = [] hosts = socket.gethostbyname_ex(socket.gethostname())[2] for host in hosts: bridges_from_scan += httpfind.survey( # TODO: how do we determine subnet configuration? ipaddress.ip_interface(host+'/24').network, path='description.xml', pattern='(P|p)hilips') logger.info('Scan on %s', host) logger.info('Scan returned %d Hue bridges(s).', len(bridges_from_scan)) # Confirm Scan gave an accessible bridge device by reading from the returned # location. Should look like: http://192.168.0.1/description.xml found_bridges = {} for bridge in bridges_from_scan: serial, bridge_info = parse_description_xml(bridge) if serial: found_bridges[serial] = bridge_info logger.debug('%s', found_bridges) if found_bridges: return found_bridges else: raise DiscoveryError('Scan returned nothing')
[ "def", "via_scan", "(", ")", ":", "import", "socket", "import", "ipaddress", "import", "httpfind", "bridges_from_scan", "=", "[", "]", "hosts", "=", "socket", ".", "gethostbyname_ex", "(", "socket", ".", "gethostname", "(", ")", ")", "[", "2", "]", "for", ...
IP scan - now implemented
[ "IP", "scan", "-", "now", "implemented" ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L190-L217
Overboard/discoverhue
discoverhue/discoverhue.py
find_bridges
def find_bridges(prior_bridges=None): """ Confirm or locate IP addresses of Philips Hue bridges. `prior_bridges` -- optional list of bridge serial numbers * omitted - all discovered bridges returned as dictionary * single string - returns IP as string or None * dictionary - validate provided ip's before attempting discovery * collection or sequence - return dictionary of filtered sn:ip pairs * if mutable then found bridges are removed from argument """ found_bridges = {} # Validate caller's provided list try: prior_bridges_list = prior_bridges.items() except AttributeError: # if caller didnt provide dict then assume single SN or None # in either case, the discovery must be executed run_discovery = True else: for prior_sn, prior_ip in prior_bridges_list: if prior_ip: serial, baseip = parse_description_xml(_build_from(prior_ip)) if serial: # there is a bridge at provided IP, add to found found_bridges[serial] = baseip else: # nothing usable at that ip logger.info('%s not found at %s', prior_sn, prior_ip) run_discovery = found_bridges.keys() != prior_bridges.keys() # prior_bridges is None, unknown, dict of unfound SNs, or empty dict # found_bridges is dict of found SNs from prior, or empty dict if run_discovery: # do the discovery, not all IPs were confirmed try: found_bridges.update(via_upnp()) except DiscoveryError: try: found_bridges.update(via_nupnp()) except DiscoveryError: try: found_bridges.update(via_scan()) except DiscoveryError: logger.warning("All discovery methods returned nothing") if prior_bridges: # prior_bridges is either single SN or dict of unfound SNs # first assume single Serial SN string try: ip_address = found_bridges[prior_bridges] except TypeError: # user passed an invalid type for key # presumably it's a dict meant for alternate mode logger.debug('Assuming alternate mode, prior_bridges is type %s.', type(prior_bridges)) except KeyError: # user provided Serial Number was not found # TODO: dropping tuples here if return none executed # return None pass # let it turn the string into a set, eww else: # user provided Serial Number found return ip_address # Filter the found list to subset of prior prior_bridges_keys = set(prior_bridges) keys_to_remove = prior_bridges_keys ^ found_bridges.keys() logger.debug('Removing %s from found_bridges', keys_to_remove) for key in keys_to_remove: found_bridges.pop(key, None) # Filter the prior dict to unfound only keys_to_remove = prior_bridges_keys & found_bridges.keys() logger.debug('Removing %s from prior_bridges', keys_to_remove) for key in keys_to_remove: try: prior_bridges.pop(key, None) except TypeError: # not a dict, try as set or list prior_bridges.remove(key) except AttributeError: # likely not mutable break keys_to_report = prior_bridges_keys - found_bridges.keys() for serial in keys_to_report: logger.warning('Could not locate bridge with Serial ID %s', serial) else: # prior_bridges is None or empty dict, return all found pass return found_bridges
python
def find_bridges(prior_bridges=None): """ Confirm or locate IP addresses of Philips Hue bridges. `prior_bridges` -- optional list of bridge serial numbers * omitted - all discovered bridges returned as dictionary * single string - returns IP as string or None * dictionary - validate provided ip's before attempting discovery * collection or sequence - return dictionary of filtered sn:ip pairs * if mutable then found bridges are removed from argument """ found_bridges = {} # Validate caller's provided list try: prior_bridges_list = prior_bridges.items() except AttributeError: # if caller didnt provide dict then assume single SN or None # in either case, the discovery must be executed run_discovery = True else: for prior_sn, prior_ip in prior_bridges_list: if prior_ip: serial, baseip = parse_description_xml(_build_from(prior_ip)) if serial: # there is a bridge at provided IP, add to found found_bridges[serial] = baseip else: # nothing usable at that ip logger.info('%s not found at %s', prior_sn, prior_ip) run_discovery = found_bridges.keys() != prior_bridges.keys() # prior_bridges is None, unknown, dict of unfound SNs, or empty dict # found_bridges is dict of found SNs from prior, or empty dict if run_discovery: # do the discovery, not all IPs were confirmed try: found_bridges.update(via_upnp()) except DiscoveryError: try: found_bridges.update(via_nupnp()) except DiscoveryError: try: found_bridges.update(via_scan()) except DiscoveryError: logger.warning("All discovery methods returned nothing") if prior_bridges: # prior_bridges is either single SN or dict of unfound SNs # first assume single Serial SN string try: ip_address = found_bridges[prior_bridges] except TypeError: # user passed an invalid type for key # presumably it's a dict meant for alternate mode logger.debug('Assuming alternate mode, prior_bridges is type %s.', type(prior_bridges)) except KeyError: # user provided Serial Number was not found # TODO: dropping tuples here if return none executed # return None pass # let it turn the string into a set, eww else: # user provided Serial Number found return ip_address # Filter the found list to subset of prior prior_bridges_keys = set(prior_bridges) keys_to_remove = prior_bridges_keys ^ found_bridges.keys() logger.debug('Removing %s from found_bridges', keys_to_remove) for key in keys_to_remove: found_bridges.pop(key, None) # Filter the prior dict to unfound only keys_to_remove = prior_bridges_keys & found_bridges.keys() logger.debug('Removing %s from prior_bridges', keys_to_remove) for key in keys_to_remove: try: prior_bridges.pop(key, None) except TypeError: # not a dict, try as set or list prior_bridges.remove(key) except AttributeError: # likely not mutable break keys_to_report = prior_bridges_keys - found_bridges.keys() for serial in keys_to_report: logger.warning('Could not locate bridge with Serial ID %s', serial) else: # prior_bridges is None or empty dict, return all found pass return found_bridges
[ "def", "find_bridges", "(", "prior_bridges", "=", "None", ")", ":", "found_bridges", "=", "{", "}", "# Validate caller's provided list", "try", ":", "prior_bridges_list", "=", "prior_bridges", ".", "items", "(", ")", "except", "AttributeError", ":", "# if caller did...
Confirm or locate IP addresses of Philips Hue bridges. `prior_bridges` -- optional list of bridge serial numbers * omitted - all discovered bridges returned as dictionary * single string - returns IP as string or None * dictionary - validate provided ip's before attempting discovery * collection or sequence - return dictionary of filtered sn:ip pairs * if mutable then found bridges are removed from argument
[ "Confirm", "or", "locate", "IP", "addresses", "of", "Philips", "Hue", "bridges", "." ]
train
https://github.com/Overboard/discoverhue/blob/81948ac8a37ec3d69b5608b3822b42a9bd4e0a7e/discoverhue/discoverhue.py#L221-L314
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/conglomerators/bodhi/requests.py
ByPackage.matches
def matches(self, a, b, **config): """ The message must match by package """ package_a = self.processor._u2p(a['msg']['update']['title'])[0] package_b = self.processor._u2p(b['msg']['update']['title'])[0] if package_a != package_b: return False return True
python
def matches(self, a, b, **config): """ The message must match by package """ package_a = self.processor._u2p(a['msg']['update']['title'])[0] package_b = self.processor._u2p(b['msg']['update']['title'])[0] if package_a != package_b: return False return True
[ "def", "matches", "(", "self", ",", "a", ",", "b", ",", "*", "*", "config", ")", ":", "package_a", "=", "self", ".", "processor", ".", "_u2p", "(", "a", "[", "'msg'", "]", "[", "'update'", "]", "[", "'title'", "]", ")", "[", "0", "]", "package_...
The message must match by package
[ "The", "message", "must", "match", "by", "package" ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/conglomerators/bodhi/requests.py#L82-L88
chbrown/pi
pi/commands/install.py
cli
def cli(parser): ''' Currently a cop-out -- just calls easy_install ''' parser.add_argument('-n', '--dry-run', action='store_true', help='Print uninstall actions without running') parser.add_argument('packages', nargs='+', help='Packages to install') opts = parser.parse_args() for package in opts.packages: install(package, execute=not opts.dry_run)
python
def cli(parser): ''' Currently a cop-out -- just calls easy_install ''' parser.add_argument('-n', '--dry-run', action='store_true', help='Print uninstall actions without running') parser.add_argument('packages', nargs='+', help='Packages to install') opts = parser.parse_args() for package in opts.packages: install(package, execute=not opts.dry_run)
[ "def", "cli", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-n'", ",", "'--dry-run'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Print uninstall actions without running'", ")", "parser", ".", "add_argument", "(", "'packages'", ",", ...
Currently a cop-out -- just calls easy_install
[ "Currently", "a", "cop", "-", "out", "--", "just", "calls", "easy_install" ]
train
https://github.com/chbrown/pi/blob/a3661eccf1c6f0105e34a0ee24328022bf4e6b92/pi/commands/install.py#L14-L23
rikonor/mdict
mdict.py
mget
def mget(m_dict, keys, default=None, delimiter=':'): """ :param m_dict: A dictionary to search inside of :type m_dict: dict :param keys: A list of keys :type keys: str :param default: A default value to return if none found :param delimiter: The delimiter used in the keys list :type delimiter: str :return: The value according to the keys list """ val = m_dict keys = keys.split(delimiter) for key in keys: try: val = val[key] except KeyError: return default except TypeError: return default return val
python
def mget(m_dict, keys, default=None, delimiter=':'): """ :param m_dict: A dictionary to search inside of :type m_dict: dict :param keys: A list of keys :type keys: str :param default: A default value to return if none found :param delimiter: The delimiter used in the keys list :type delimiter: str :return: The value according to the keys list """ val = m_dict keys = keys.split(delimiter) for key in keys: try: val = val[key] except KeyError: return default except TypeError: return default return val
[ "def", "mget", "(", "m_dict", ",", "keys", ",", "default", "=", "None", ",", "delimiter", "=", "':'", ")", ":", "val", "=", "m_dict", "keys", "=", "keys", ".", "split", "(", "delimiter", ")", "for", "key", "in", "keys", ":", "try", ":", "val", "=...
:param m_dict: A dictionary to search inside of :type m_dict: dict :param keys: A list of keys :type keys: str :param default: A default value to return if none found :param delimiter: The delimiter used in the keys list :type delimiter: str :return: The value according to the keys list
[ ":", "param", "m_dict", ":", "A", "dictionary", "to", "search", "inside", "of", ":", "type", "m_dict", ":", "dict", ":", "param", "keys", ":", "A", "list", "of", "keys", ":", "type", "keys", ":", "str", ":", "param", "default", ":", "A", "default", ...
train
https://github.com/rikonor/mdict/blob/8671a31dd1429fdb1522d3b96509da789173b904/mdict.py#L1-L21
rikonor/mdict
mdict.py
mset
def mset(m_dict, keys, value, delimiter=':'): """ :param m_dict: A dictionary to set the value inside of :type m_dict: dict :param keys: A list of keys :type keys: str :param value: The value to set inside of the dictionary :param delimiter: The delimiter used in the keys list :type delimiter: str """ val = m_dict keys = keys.split(delimiter) for i, key in enumerate(keys): try: if i == len(keys) - 1: val[key] = value return else: val = val[key] except KeyError: if i == len(keys) - 1: val[key] = value return else: val[key] = {} val = val[key]
python
def mset(m_dict, keys, value, delimiter=':'): """ :param m_dict: A dictionary to set the value inside of :type m_dict: dict :param keys: A list of keys :type keys: str :param value: The value to set inside of the dictionary :param delimiter: The delimiter used in the keys list :type delimiter: str """ val = m_dict keys = keys.split(delimiter) for i, key in enumerate(keys): try: if i == len(keys) - 1: val[key] = value return else: val = val[key] except KeyError: if i == len(keys) - 1: val[key] = value return else: val[key] = {} val = val[key]
[ "def", "mset", "(", "m_dict", ",", "keys", ",", "value", ",", "delimiter", "=", "':'", ")", ":", "val", "=", "m_dict", "keys", "=", "keys", ".", "split", "(", "delimiter", ")", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "tr...
:param m_dict: A dictionary to set the value inside of :type m_dict: dict :param keys: A list of keys :type keys: str :param value: The value to set inside of the dictionary :param delimiter: The delimiter used in the keys list :type delimiter: str
[ ":", "param", "m_dict", ":", "A", "dictionary", "to", "set", "the", "value", "inside", "of", ":", "type", "m_dict", ":", "dict", ":", "param", "keys", ":", "A", "list", "of", "keys", ":", "type", "keys", ":", "str", ":", "param", "value", ":", "The...
train
https://github.com/rikonor/mdict/blob/8671a31dd1429fdb1522d3b96509da789173b904/mdict.py#L24-L49
snipsco/snipsmanagercore
snipsmanagercore/instant_time.py
InstantTime.parse_grain
def parse_grain(grain): """ Parse a string to a granularity, e.g. "Day" to InstantTime.day. :param grain: a string representing a granularity. """ if not grain: return InstantTime.day if grain.lower() == 'week': return InstantTime.week return InstantTime.day
python
def parse_grain(grain): """ Parse a string to a granularity, e.g. "Day" to InstantTime.day. :param grain: a string representing a granularity. """ if not grain: return InstantTime.day if grain.lower() == 'week': return InstantTime.week return InstantTime.day
[ "def", "parse_grain", "(", "grain", ")", ":", "if", "not", "grain", ":", "return", "InstantTime", ".", "day", "if", "grain", ".", "lower", "(", ")", "==", "'week'", ":", "return", "InstantTime", ".", "week", "return", "InstantTime", ".", "day" ]
Parse a string to a granularity, e.g. "Day" to InstantTime.day. :param grain: a string representing a granularity.
[ "Parse", "a", "string", "to", "a", "granularity", "e", ".", "g", ".", "Day", "to", "InstantTime", ".", "day", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/instant_time.py#L23-L32
yunojuno-archive/django-package-monitor
package_monitor/models.py
PackageVersion.update_from_pypi
def update_from_pypi(self): """Call get_latest_version and then save the object.""" package = pypi.Package(self.package_name) self.licence = package.licence() if self.is_parseable: self.latest_version = package.latest_version() self.next_version = package.next_version(self.current_version) self.diff_status = pypi.version_diff(self.current_version, self.latest_version) self.python_support = package.python_support() self.django_support = package.django_support() self.supports_py3 = package.supports_py3() self.checked_pypi_at = tz_now() self.save() return self
python
def update_from_pypi(self): """Call get_latest_version and then save the object.""" package = pypi.Package(self.package_name) self.licence = package.licence() if self.is_parseable: self.latest_version = package.latest_version() self.next_version = package.next_version(self.current_version) self.diff_status = pypi.version_diff(self.current_version, self.latest_version) self.python_support = package.python_support() self.django_support = package.django_support() self.supports_py3 = package.supports_py3() self.checked_pypi_at = tz_now() self.save() return self
[ "def", "update_from_pypi", "(", "self", ")", ":", "package", "=", "pypi", ".", "Package", "(", "self", ".", "package_name", ")", "self", ".", "licence", "=", "package", ".", "licence", "(", ")", "if", "self", ".", "is_parseable", ":", "self", ".", "lat...
Call get_latest_version and then save the object.
[ "Call", "get_latest_version", "and", "then", "save", "the", "object", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/models.py#L130-L143
akissa/spamc
spamc/conn.py
Connector.close
def close(self): """close conn""" if not self._s or not hasattr(self._s, "close"): return try: self._s.close() except BaseException: pass
python
def close(self): """close conn""" if not self._s or not hasattr(self._s, "close"): return try: self._s.close() except BaseException: pass
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_s", "or", "not", "hasattr", "(", "self", ".", "_s", ",", "\"close\"", ")", ":", "return", "try", ":", "self", ".", "_s", ".", "close", "(", ")", "except", "BaseException", ":", "p...
close conn
[ "close", "conn" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/conn.py#L59-L66
akissa/spamc
spamc/conn.py
Connector.sendfile
def sendfile(self, data, zlib_compress=None, compress_level=6): """Send data from a file object""" if hasattr(data, 'seek'): data.seek(0) chunk_size = CHUNK_SIZE if zlib_compress: chunk_size = BLOCK_SIZE compressor = compressobj(compress_level) while 1: binarydata = data.read(chunk_size) if binarydata == '': break if zlib_compress: binarydata = compressor.compress(binarydata) if not binarydata: continue self.send(binarydata) if zlib_compress: remaining = compressor.flush() while remaining: binarydata = remaining[:BLOCK_SIZE] remaining = remaining[BLOCK_SIZE:] self.send(binarydata)
python
def sendfile(self, data, zlib_compress=None, compress_level=6): """Send data from a file object""" if hasattr(data, 'seek'): data.seek(0) chunk_size = CHUNK_SIZE if zlib_compress: chunk_size = BLOCK_SIZE compressor = compressobj(compress_level) while 1: binarydata = data.read(chunk_size) if binarydata == '': break if zlib_compress: binarydata = compressor.compress(binarydata) if not binarydata: continue self.send(binarydata) if zlib_compress: remaining = compressor.flush() while remaining: binarydata = remaining[:BLOCK_SIZE] remaining = remaining[BLOCK_SIZE:] self.send(binarydata)
[ "def", "sendfile", "(", "self", ",", "data", ",", "zlib_compress", "=", "None", ",", "compress_level", "=", "6", ")", ":", "if", "hasattr", "(", "data", ",", "'seek'", ")", ":", "data", ".", "seek", "(", "0", ")", "chunk_size", "=", "CHUNK_SIZE", "if...
Send data from a file object
[ "Send", "data", "from", "a", "file", "object" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/conn.py#L81-L107
pletzer/pnumpy
src/pnGhostedDistArray.py
ghostedDistArrayFactory
def ghostedDistArrayFactory(BaseClass): """ Returns a ghosted distributed array class that derives from BaseClass @param BaseClass base class, e.g. DistArray or MaskedDistArray @return ghosted dist array class """ class GhostedDistArrayAny(BaseClass): """ Ghosted distributed array. Each process owns data and exposes the halo region to other processes. These are accessed with tuples such (1, 0) for north, (-1, 0) for south, etc. """ def __init__(self, shape, dtype): """ Constructor @param shape shape of the array @param dtype numpy data type @param numGhosts the width of the halo """ # call the parent Ctor BaseClass.__init__(self, shape, dtype) def setNumberOfGhosts(self, numGhosts): """ Set the width of the ghost halo @param numGhosts halo thickness """ # expose each window to other PE domains ndim = len(self.shape) for dim in range(ndim): for drect in (-1, 1): # the window id uniquely specifies the # location of the window. we use 0's to indicate # a slab extending over the entire length for a # given direction, a 1 represents a layer of # thickness numGhosts on the high index side, # -1 on the low index side. winId = tuple([0 for i in range(dim)] + [drect] + [0 for i in range(dim+1, ndim)]) slce = slice(0, numGhosts) if drect == 1: slce = slice(self.shape[dim] - numGhosts, self.shape[dim]) slab = self.getSlab(dim, slce) # expose MPI window self.expose(slab, winId) def getSlab(self, dim, slce): """ Get slab. A slab is a multi-dimensional slice extending in all directions except along dim where slce applies @param dim dimension (0=first index, 1=2nd index...) @param slce python slice object along dimension dim @return slab """ shape = self.shape ndim = len(shape) slab = [slice(0, shape[i]) for i in range(dim)] + \ [slce] + [slice(0, shape[i]) for i in range(dim+1, ndim)] return slab def getEllipsis(self, winID): """ Get the ellipsis for a given halo side @param winID a tuple of zeros and one +1 or -1. To access the "north" side for instance, set side=(1, 0), (-1, 0) to access the south side, (0, 1) the east side, etc. This does not involve any communication. @return None if halo was not exposed (bad winID) """ if winID in self.windows: return self.windows[winID]['slice'] else: return None return GhostedDistArrayAny
python
def ghostedDistArrayFactory(BaseClass): """ Returns a ghosted distributed array class that derives from BaseClass @param BaseClass base class, e.g. DistArray or MaskedDistArray @return ghosted dist array class """ class GhostedDistArrayAny(BaseClass): """ Ghosted distributed array. Each process owns data and exposes the halo region to other processes. These are accessed with tuples such (1, 0) for north, (-1, 0) for south, etc. """ def __init__(self, shape, dtype): """ Constructor @param shape shape of the array @param dtype numpy data type @param numGhosts the width of the halo """ # call the parent Ctor BaseClass.__init__(self, shape, dtype) def setNumberOfGhosts(self, numGhosts): """ Set the width of the ghost halo @param numGhosts halo thickness """ # expose each window to other PE domains ndim = len(self.shape) for dim in range(ndim): for drect in (-1, 1): # the window id uniquely specifies the # location of the window. we use 0's to indicate # a slab extending over the entire length for a # given direction, a 1 represents a layer of # thickness numGhosts on the high index side, # -1 on the low index side. winId = tuple([0 for i in range(dim)] + [drect] + [0 for i in range(dim+1, ndim)]) slce = slice(0, numGhosts) if drect == 1: slce = slice(self.shape[dim] - numGhosts, self.shape[dim]) slab = self.getSlab(dim, slce) # expose MPI window self.expose(slab, winId) def getSlab(self, dim, slce): """ Get slab. A slab is a multi-dimensional slice extending in all directions except along dim where slce applies @param dim dimension (0=first index, 1=2nd index...) @param slce python slice object along dimension dim @return slab """ shape = self.shape ndim = len(shape) slab = [slice(0, shape[i]) for i in range(dim)] + \ [slce] + [slice(0, shape[i]) for i in range(dim+1, ndim)] return slab def getEllipsis(self, winID): """ Get the ellipsis for a given halo side @param winID a tuple of zeros and one +1 or -1. To access the "north" side for instance, set side=(1, 0), (-1, 0) to access the south side, (0, 1) the east side, etc. This does not involve any communication. @return None if halo was not exposed (bad winID) """ if winID in self.windows: return self.windows[winID]['slice'] else: return None return GhostedDistArrayAny
[ "def", "ghostedDistArrayFactory", "(", "BaseClass", ")", ":", "class", "GhostedDistArrayAny", "(", "BaseClass", ")", ":", "\"\"\"\n Ghosted distributed array. Each process owns data and exposes the\n halo region to other processes. These are accessed with tuples\n such (...
Returns a ghosted distributed array class that derives from BaseClass @param BaseClass base class, e.g. DistArray or MaskedDistArray @return ghosted dist array class
[ "Returns", "a", "ghosted", "distributed", "array", "class", "that", "derives", "from", "BaseClass" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L11-L95
pletzer/pnumpy
src/pnGhostedDistArray.py
gdaArray
def gdaArray(arry, dtype, numGhosts=1): """ ghosted distributed array constructor @param arry numpy-like array @param numGhosts the number of ghosts (>= 0) """ a = numpy.array(arry, dtype) res = GhostedDistArray(a.shape, a.dtype) res.setNumberOfGhosts(numGhosts) res[:] = a return res
python
def gdaArray(arry, dtype, numGhosts=1): """ ghosted distributed array constructor @param arry numpy-like array @param numGhosts the number of ghosts (>= 0) """ a = numpy.array(arry, dtype) res = GhostedDistArray(a.shape, a.dtype) res.setNumberOfGhosts(numGhosts) res[:] = a return res
[ "def", "gdaArray", "(", "arry", ",", "dtype", ",", "numGhosts", "=", "1", ")", ":", "a", "=", "numpy", ".", "array", "(", "arry", ",", "dtype", ")", "res", "=", "GhostedDistArray", "(", "a", ".", "shape", ",", "a", ".", "dtype", ")", "res", ".", ...
ghosted distributed array constructor @param arry numpy-like array @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "constructor" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L105-L115
pletzer/pnumpy
src/pnGhostedDistArray.py
gdaZeros
def gdaZeros(shape, dtype, numGhosts=1): """ ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedDistArray(shape, dtype) res.setNumberOfGhosts(numGhosts) res[:] = 0 return res
python
def gdaZeros(shape, dtype, numGhosts=1): """ ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedDistArray(shape, dtype) res.setNumberOfGhosts(numGhosts) res[:] = 0 return res
[ "def", "gdaZeros", "(", "shape", ",", "dtype", ",", "numGhosts", "=", "1", ")", ":", "res", "=", "GhostedDistArray", "(", "shape", ",", "dtype", ")", "res", ".", "setNumberOfGhosts", "(", "numGhosts", ")", "res", "[", ":", "]", "=", "0", "return", "r...
ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "zero", "constructor" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L118-L128
pletzer/pnumpy
src/pnGhostedDistArray.py
gdaOnes
def gdaOnes(shape, dtype, numGhosts=1): """ ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedDistArray(shape, dtype) res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
python
def gdaOnes(shape, dtype, numGhosts=1): """ ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedDistArray(shape, dtype) res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
[ "def", "gdaOnes", "(", "shape", ",", "dtype", ",", "numGhosts", "=", "1", ")", ":", "res", "=", "GhostedDistArray", "(", "shape", ",", "dtype", ")", "res", ".", "setNumberOfGhosts", "(", "numGhosts", ")", "res", "[", ":", "]", "=", "1", "return", "re...
ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "one", "constructor" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L131-L141
pletzer/pnumpy
src/pnGhostedDistArray.py
gmdaArray
def gmdaArray(arry, dtype, mask=None, numGhosts=1): """ ghosted distributed array constructor @param arry numpy-like array @param numGhosts the number of ghosts (>= 0) """ a = numpy.array(arry, dtype) res = GhostedMaskedDistArray(a.shape, a.dtype) res.mask = mask res.setNumberOfGhosts(numGhosts) res[:] = a return res
python
def gmdaArray(arry, dtype, mask=None, numGhosts=1): """ ghosted distributed array constructor @param arry numpy-like array @param numGhosts the number of ghosts (>= 0) """ a = numpy.array(arry, dtype) res = GhostedMaskedDistArray(a.shape, a.dtype) res.mask = mask res.setNumberOfGhosts(numGhosts) res[:] = a return res
[ "def", "gmdaArray", "(", "arry", ",", "dtype", ",", "mask", "=", "None", ",", "numGhosts", "=", "1", ")", ":", "a", "=", "numpy", ".", "array", "(", "arry", ",", "dtype", ")", "res", "=", "GhostedMaskedDistArray", "(", "a", ".", "shape", ",", "a", ...
ghosted distributed array constructor @param arry numpy-like array @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "constructor" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L147-L158
pletzer/pnumpy
src/pnGhostedDistArray.py
gmdaZeros
def gmdaZeros(shape, dtype, mask=None, numGhosts=1): """ ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedMaskedDistArray(shape, dtype) res.mas = mask res.setNumberOfGhosts(numGhosts) res[:] = 0 return res
python
def gmdaZeros(shape, dtype, mask=None, numGhosts=1): """ ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedMaskedDistArray(shape, dtype) res.mas = mask res.setNumberOfGhosts(numGhosts) res[:] = 0 return res
[ "def", "gmdaZeros", "(", "shape", ",", "dtype", ",", "mask", "=", "None", ",", "numGhosts", "=", "1", ")", ":", "res", "=", "GhostedMaskedDistArray", "(", "shape", ",", "dtype", ")", "res", ".", "mas", "=", "mask", "res", ".", "setNumberOfGhosts", "(",...
ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "zero", "constructor" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L161-L172
pletzer/pnumpy
src/pnGhostedDistArray.py
gmdaOnes
def gmdaOnes(shape, dtype, mask=None, numGhosts=1): """ ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedMaskedDistArray(shape, dtype) res.mask = mask res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
python
def gmdaOnes(shape, dtype, mask=None, numGhosts=1): """ ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedMaskedDistArray(shape, dtype) res.mask = mask res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
[ "def", "gmdaOnes", "(", "shape", ",", "dtype", ",", "mask", "=", "None", ",", "numGhosts", "=", "1", ")", ":", "res", "=", "GhostedMaskedDistArray", "(", "shape", ",", "dtype", ")", "res", ".", "mask", "=", "mask", "res", ".", "setNumberOfGhosts", "(",...
ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "one", "constructor" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L175-L186
caktus/django-sticky-uploads
stickyuploads/utils.py
serialize_upload
def serialize_upload(name, storage, url): """ Serialize uploaded file by name and storage. Namespaced by the upload url. """ if isinstance(storage, LazyObject): # Unwrap lazy storage class storage._setup() cls = storage._wrapped.__class__ else: cls = storage.__class__ return signing.dumps({ 'name': name, 'storage': '%s.%s' % (cls.__module__, cls.__name__) }, salt=url)
python
def serialize_upload(name, storage, url): """ Serialize uploaded file by name and storage. Namespaced by the upload url. """ if isinstance(storage, LazyObject): # Unwrap lazy storage class storage._setup() cls = storage._wrapped.__class__ else: cls = storage.__class__ return signing.dumps({ 'name': name, 'storage': '%s.%s' % (cls.__module__, cls.__name__) }, salt=url)
[ "def", "serialize_upload", "(", "name", ",", "storage", ",", "url", ")", ":", "if", "isinstance", "(", "storage", ",", "LazyObject", ")", ":", "# Unwrap lazy storage class", "storage", ".", "_setup", "(", ")", "cls", "=", "storage", ".", "_wrapped", ".", "...
Serialize uploaded file by name and storage. Namespaced by the upload url.
[ "Serialize", "uploaded", "file", "by", "name", "and", "storage", ".", "Namespaced", "by", "the", "upload", "url", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/utils.py#L11-L24
caktus/django-sticky-uploads
stickyuploads/utils.py
deserialize_upload
def deserialize_upload(value, url): """ Restore file and name and storage from serialized value and the upload url. """ result = {'name': None, 'storage': None} try: result = signing.loads(value, salt=url) except signing.BadSignature: # TODO: Log invalid signature pass else: try: result['storage'] = get_storage_class(result['storage']) except (ImproperlyConfigured, ImportError): # TODO: Log invalid class result = {'name': None, 'storage': None} return result
python
def deserialize_upload(value, url): """ Restore file and name and storage from serialized value and the upload url. """ result = {'name': None, 'storage': None} try: result = signing.loads(value, salt=url) except signing.BadSignature: # TODO: Log invalid signature pass else: try: result['storage'] = get_storage_class(result['storage']) except (ImproperlyConfigured, ImportError): # TODO: Log invalid class result = {'name': None, 'storage': None} return result
[ "def", "deserialize_upload", "(", "value", ",", "url", ")", ":", "result", "=", "{", "'name'", ":", "None", ",", "'storage'", ":", "None", "}", "try", ":", "result", "=", "signing", ".", "loads", "(", "value", ",", "salt", "=", "url", ")", "except", ...
Restore file and name and storage from serialized value and the upload url.
[ "Restore", "file", "and", "name", "and", "storage", "from", "serialized", "value", "and", "the", "upload", "url", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/utils.py#L27-L43
caktus/django-sticky-uploads
stickyuploads/utils.py
open_stored_file
def open_stored_file(value, url): """ Deserialize value for a given upload url and return open file. Returns None if deserialization fails. """ upload = None result = deserialize_upload(value, url) filename = result['name'] storage_class = result['storage'] if storage_class and filename: storage = storage_class() if storage.exists(filename): upload = storage.open(filename) upload.name = os.path.basename(filename) return upload
python
def open_stored_file(value, url): """ Deserialize value for a given upload url and return open file. Returns None if deserialization fails. """ upload = None result = deserialize_upload(value, url) filename = result['name'] storage_class = result['storage'] if storage_class and filename: storage = storage_class() if storage.exists(filename): upload = storage.open(filename) upload.name = os.path.basename(filename) return upload
[ "def", "open_stored_file", "(", "value", ",", "url", ")", ":", "upload", "=", "None", "result", "=", "deserialize_upload", "(", "value", ",", "url", ")", "filename", "=", "result", "[", "'name'", "]", "storage_class", "=", "result", "[", "'storage'", "]", ...
Deserialize value for a given upload url and return open file. Returns None if deserialization fails.
[ "Deserialize", "value", "for", "a", "given", "upload", "url", "and", "return", "open", "file", ".", "Returns", "None", "if", "deserialization", "fails", "." ]
train
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/utils.py#L46-L60
akissa/spamc
spamc/client.py
_check_action
def _check_action(action): """check for invalid actions""" if isinstance(action, types.StringTypes): action = action.lower() if action not in ['learn', 'forget', 'report', 'revoke']: raise SpamCError('The action option is invalid') return action
python
def _check_action(action): """check for invalid actions""" if isinstance(action, types.StringTypes): action = action.lower() if action not in ['learn', 'forget', 'report', 'revoke']: raise SpamCError('The action option is invalid') return action
[ "def", "_check_action", "(", "action", ")", ":", "if", "isinstance", "(", "action", ",", "types", ".", "StringTypes", ")", ":", "action", "=", "action", ".", "lower", "(", ")", "if", "action", "not", "in", "[", "'learn'", ",", "'forget'", ",", "'report...
check for invalid actions
[ "check", "for", "invalid", "actions" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L40-L47
akissa/spamc
spamc/client.py
get_response
def get_response(cmd, conn): """Return a response""" resp = conn.socket().makefile('rb', -1) resp_dict = dict( code=0, message='', isspam=False, score=0.0, basescore=0.0, report=[], symbols=[], headers={}, ) if cmd == 'TELL': resp_dict['didset'] = False resp_dict['didremove'] = False data = resp.read() lines = data.split('\r\n') for index, line in enumerate(lines): if index == 0: match = RESPONSE_RE.match(line) if not match: raise SpamCResponseError( 'spamd unrecognized response: %s' % data) resp_dict.update(match.groupdict()) resp_dict['code'] = int(resp_dict['code']) else: if not line.strip(): continue match = SPAM_RE.match(line) if match: tmp = match.groupdict() resp_dict['score'] = float(tmp['score']) resp_dict['basescore'] = float(tmp['basescore']) resp_dict['isspam'] = tmp['isspam'] in ['True', 'Yes'] if not match: if cmd == 'SYMBOLS': match = PART_RE.findall(line) for part in match: resp_dict['symbols'].append(part) if not match and cmd != 'PROCESS': match = RULE_RE.findall(line) if match: resp_dict['report'] = [] for part in match: score = part[0] + part[1] score = score.strip() resp_dict['report'].append( dict(score=score, name=part[2], description=SPACE_RE.sub(" ", part[3]))) if line.startswith('DidSet:'): resp_dict['didset'] = True if line.startswith('DidRemove:'): resp_dict['didremove'] = True if cmd == 'PROCESS': resp_dict['message'] = ''.join(lines[4:]) + '\r\n' if cmd == 'HEADERS': parser = Parser() headers = parser.parsestr('\r\n'.join(lines[4:]), headersonly=True) for key in headers.keys(): resp_dict['headers'][key] = headers[key] return resp_dict
python
def get_response(cmd, conn): """Return a response""" resp = conn.socket().makefile('rb', -1) resp_dict = dict( code=0, message='', isspam=False, score=0.0, basescore=0.0, report=[], symbols=[], headers={}, ) if cmd == 'TELL': resp_dict['didset'] = False resp_dict['didremove'] = False data = resp.read() lines = data.split('\r\n') for index, line in enumerate(lines): if index == 0: match = RESPONSE_RE.match(line) if not match: raise SpamCResponseError( 'spamd unrecognized response: %s' % data) resp_dict.update(match.groupdict()) resp_dict['code'] = int(resp_dict['code']) else: if not line.strip(): continue match = SPAM_RE.match(line) if match: tmp = match.groupdict() resp_dict['score'] = float(tmp['score']) resp_dict['basescore'] = float(tmp['basescore']) resp_dict['isspam'] = tmp['isspam'] in ['True', 'Yes'] if not match: if cmd == 'SYMBOLS': match = PART_RE.findall(line) for part in match: resp_dict['symbols'].append(part) if not match and cmd != 'PROCESS': match = RULE_RE.findall(line) if match: resp_dict['report'] = [] for part in match: score = part[0] + part[1] score = score.strip() resp_dict['report'].append( dict(score=score, name=part[2], description=SPACE_RE.sub(" ", part[3]))) if line.startswith('DidSet:'): resp_dict['didset'] = True if line.startswith('DidRemove:'): resp_dict['didremove'] = True if cmd == 'PROCESS': resp_dict['message'] = ''.join(lines[4:]) + '\r\n' if cmd == 'HEADERS': parser = Parser() headers = parser.parsestr('\r\n'.join(lines[4:]), headersonly=True) for key in headers.keys(): resp_dict['headers'][key] = headers[key] return resp_dict
[ "def", "get_response", "(", "cmd", ",", "conn", ")", ":", "resp", "=", "conn", ".", "socket", "(", ")", ".", "makefile", "(", "'rb'", ",", "-", "1", ")", "resp_dict", "=", "dict", "(", "code", "=", "0", ",", "message", "=", "''", ",", "isspam", ...
Return a response
[ "Return", "a", "response" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L51-L115
akissa/spamc
spamc/client.py
SpamC.get_connection
def get_connection(self): """Creates a new connection""" if self.host is None: connector = SpamCUnixConnector conn = connector(self.socket_file, self.backend_mod) else: connector = SpamCTcpConnector conn = connector( self.host, self.port, self.backend_mod, is_ssl=self.is_ssl, **self.ssl_args) return conn
python
def get_connection(self): """Creates a new connection""" if self.host is None: connector = SpamCUnixConnector conn = connector(self.socket_file, self.backend_mod) else: connector = SpamCTcpConnector conn = connector( self.host, self.port, self.backend_mod, is_ssl=self.is_ssl, **self.ssl_args) return conn
[ "def", "get_connection", "(", "self", ")", ":", "if", "self", ".", "host", "is", "None", ":", "connector", "=", "SpamCUnixConnector", "conn", "=", "connector", "(", "self", ".", "socket_file", ",", "self", ".", "backend_mod", ")", "else", ":", "connector",...
Creates a new connection
[ "Creates", "a", "new", "connection" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L153-L166
akissa/spamc
spamc/client.py
SpamC.get_headers
def get_headers(self, cmd, msg_length, extra_headers): """Returns the headers string based on command to execute""" cmd_header = "%s %s" % (cmd, PROTOCOL_VERSION) len_header = "Content-length: %s" % msg_length headers = [cmd_header, len_header] if self.user: user_header = "User: %s" % self.user headers.append(user_header) if self.gzip: headers.append("Compress: zlib") if extra_headers is not None: for key in extra_headers: if key.lower() != 'content-length': headers.append("%s: %s" % (key, extra_headers[key])) headers.append('') headers.append('') return '\r\n'.join(headers)
python
def get_headers(self, cmd, msg_length, extra_headers): """Returns the headers string based on command to execute""" cmd_header = "%s %s" % (cmd, PROTOCOL_VERSION) len_header = "Content-length: %s" % msg_length headers = [cmd_header, len_header] if self.user: user_header = "User: %s" % self.user headers.append(user_header) if self.gzip: headers.append("Compress: zlib") if extra_headers is not None: for key in extra_headers: if key.lower() != 'content-length': headers.append("%s: %s" % (key, extra_headers[key])) headers.append('') headers.append('') return '\r\n'.join(headers)
[ "def", "get_headers", "(", "self", ",", "cmd", ",", "msg_length", ",", "extra_headers", ")", ":", "cmd_header", "=", "\"%s %s\"", "%", "(", "cmd", ",", "PROTOCOL_VERSION", ")", "len_header", "=", "\"Content-length: %s\"", "%", "msg_length", "headers", "=", "["...
Returns the headers string based on command to execute
[ "Returns", "the", "headers", "string", "based", "on", "command", "to", "execute" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L168-L184
akissa/spamc
spamc/client.py
SpamC.perform
def perform(self, cmd, msg='', extra_headers=None): """Perform the call""" tries = 0 while 1: conn = None try: conn = self.get_connection() if hasattr(msg, 'read') and hasattr(msg, 'fileno'): msg_length = str(os.fstat(msg.fileno()).st_size) elif hasattr(msg, 'read'): msg.seek(0, 2) msg_length = str(msg.tell() + 2) else: if msg: try: msg_length = str(len(msg) + 2) except TypeError: conn.close() raise ValueError( 'msg param should be a string or file handle') else: msg_length = '2' headers = self.get_headers(cmd, msg_length, extra_headers) if isinstance(msg, types.StringTypes): if self.gzip and msg: msg = compress(msg + '\r\n', self.compress_level) else: msg = msg + '\r\n' conn.send(headers + msg) else: conn.send(headers) if hasattr(msg, 'read'): if hasattr(msg, 'seek'): msg.seek(0) conn.sendfile(msg, self.gzip, self.compress_level) conn.send('\r\n') try: conn.socket().shutdown(socket.SHUT_WR) except socket.error: pass return get_response(cmd, conn) except socket.gaierror as err: if conn is not None: conn.release() raise SpamCError(str(err)) except socket.timeout as err: if conn is not None: conn.release() raise SpamCTimeOutError(str(err)) except socket.error as err: if conn is not None: conn.close() errors = (errno.EAGAIN, errno.EPIPE, errno.EBADF, errno.ECONNRESET) if err[0] not in errors or tries >= self.max_tries: raise SpamCError("socket.error: %s" % str(err)) except BaseException: if conn is not None: conn.release() raise tries += 1 self.backend_mod.sleep(self.wait_tries)
python
def perform(self, cmd, msg='', extra_headers=None): """Perform the call""" tries = 0 while 1: conn = None try: conn = self.get_connection() if hasattr(msg, 'read') and hasattr(msg, 'fileno'): msg_length = str(os.fstat(msg.fileno()).st_size) elif hasattr(msg, 'read'): msg.seek(0, 2) msg_length = str(msg.tell() + 2) else: if msg: try: msg_length = str(len(msg) + 2) except TypeError: conn.close() raise ValueError( 'msg param should be a string or file handle') else: msg_length = '2' headers = self.get_headers(cmd, msg_length, extra_headers) if isinstance(msg, types.StringTypes): if self.gzip and msg: msg = compress(msg + '\r\n', self.compress_level) else: msg = msg + '\r\n' conn.send(headers + msg) else: conn.send(headers) if hasattr(msg, 'read'): if hasattr(msg, 'seek'): msg.seek(0) conn.sendfile(msg, self.gzip, self.compress_level) conn.send('\r\n') try: conn.socket().shutdown(socket.SHUT_WR) except socket.error: pass return get_response(cmd, conn) except socket.gaierror as err: if conn is not None: conn.release() raise SpamCError(str(err)) except socket.timeout as err: if conn is not None: conn.release() raise SpamCTimeOutError(str(err)) except socket.error as err: if conn is not None: conn.close() errors = (errno.EAGAIN, errno.EPIPE, errno.EBADF, errno.ECONNRESET) if err[0] not in errors or tries >= self.max_tries: raise SpamCError("socket.error: %s" % str(err)) except BaseException: if conn is not None: conn.release() raise tries += 1 self.backend_mod.sleep(self.wait_tries)
[ "def", "perform", "(", "self", ",", "cmd", ",", "msg", "=", "''", ",", "extra_headers", "=", "None", ")", ":", "tries", "=", "0", "while", "1", ":", "conn", "=", "None", "try", ":", "conn", "=", "self", ".", "get_connection", "(", ")", "if", "has...
Perform the call
[ "Perform", "the", "call" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L187-L250
akissa/spamc
spamc/client.py
SpamC.tell
def tell(self, msg, action, learnas=''): """Tell what type of we are to process and what should be done with that message. This includes setting or removing a local or a remote database (learning, reporting, forgetting, revoking).""" action = _check_action(action) mode = learnas.upper() headers = { 'Message-class': '', 'Set': 'local', } if action == 'learn': if mode == 'SPAM': headers['Message-class'] = 'spam' elif mode in ['HAM', 'NOTSPAM', 'NOT_SPAM']: headers['Message-class'] = 'ham' else: raise SpamCError('The learnas option is invalid') elif action == 'forget': del headers['Message-class'] del headers['Set'] headers['Remove'] = 'local' elif action == 'report': headers['Message-class'] = 'spam' headers['Set'] = 'local, remote' elif action == 'revoke': headers['Message-class'] = 'ham' headers['Remove'] = 'remote' return self.perform('TELL', msg, headers)
python
def tell(self, msg, action, learnas=''): """Tell what type of we are to process and what should be done with that message. This includes setting or removing a local or a remote database (learning, reporting, forgetting, revoking).""" action = _check_action(action) mode = learnas.upper() headers = { 'Message-class': '', 'Set': 'local', } if action == 'learn': if mode == 'SPAM': headers['Message-class'] = 'spam' elif mode in ['HAM', 'NOTSPAM', 'NOT_SPAM']: headers['Message-class'] = 'ham' else: raise SpamCError('The learnas option is invalid') elif action == 'forget': del headers['Message-class'] del headers['Set'] headers['Remove'] = 'local' elif action == 'report': headers['Message-class'] = 'spam' headers['Set'] = 'local, remote' elif action == 'revoke': headers['Message-class'] = 'ham' headers['Remove'] = 'remote' return self.perform('TELL', msg, headers)
[ "def", "tell", "(", "self", ",", "msg", ",", "action", ",", "learnas", "=", "''", ")", ":", "action", "=", "_check_action", "(", "action", ")", "mode", "=", "learnas", ".", "upper", "(", ")", "headers", "=", "{", "'Message-class'", ":", "''", ",", ...
Tell what type of we are to process and what should be done with that message. This includes setting or removing a local or a remote database (learning, reporting, forgetting, revoking).
[ "Tell", "what", "type", "of", "we", "are", "to", "process", "and", "what", "should", "be", "done", "with", "that", "message", ".", "This", "includes", "setting", "or", "removing", "a", "local", "or", "a", "remote", "database", "(", "learning", "reporting",...
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L283-L312
akissa/spamc
spamc/client.py
SpamC.learn
def learn(self, msg, learnas): """Learn message as spam/ham or forget""" if not isinstance(learnas, types.StringTypes): raise SpamCError('The learnas option is invalid') if learnas.lower() == 'forget': resp = self.tell(msg, 'forget') else: resp = self.tell(msg, 'learn', learnas) return resp
python
def learn(self, msg, learnas): """Learn message as spam/ham or forget""" if not isinstance(learnas, types.StringTypes): raise SpamCError('The learnas option is invalid') if learnas.lower() == 'forget': resp = self.tell(msg, 'forget') else: resp = self.tell(msg, 'learn', learnas) return resp
[ "def", "learn", "(", "self", ",", "msg", ",", "learnas", ")", ":", "if", "not", "isinstance", "(", "learnas", ",", "types", ".", "StringTypes", ")", ":", "raise", "SpamCError", "(", "'The learnas option is invalid'", ")", "if", "learnas", ".", "lower", "("...
Learn message as spam/ham or forget
[ "Learn", "message", "as", "spam", "/", "ham", "or", "forget" ]
train
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/spamc/client.py#L314-L322
getsenic/senic.cryptoyaml
cryptoyaml/api.py
generate_key
def generate_key(filepath): ''' generates a new, random secret key at the given location on the filesystem and returns its path ''' fs = path.abspath(path.expanduser(filepath)) with open(fs, 'wb') as outfile: outfile.write(Fernet.generate_key()) chmod(fs, 0o400) return fs
python
def generate_key(filepath): ''' generates a new, random secret key at the given location on the filesystem and returns its path ''' fs = path.abspath(path.expanduser(filepath)) with open(fs, 'wb') as outfile: outfile.write(Fernet.generate_key()) chmod(fs, 0o400) return fs
[ "def", "generate_key", "(", "filepath", ")", ":", "fs", "=", "path", ".", "abspath", "(", "path", ".", "expanduser", "(", "filepath", ")", ")", "with", "open", "(", "fs", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "Fernet",...
generates a new, random secret key at the given location on the filesystem and returns its path
[ "generates", "a", "new", "random", "secret", "key", "at", "the", "given", "location", "on", "the", "filesystem", "and", "returns", "its", "path" ]
train
https://github.com/getsenic/senic.cryptoyaml/blob/d15199b93bc8cf83c7241e6437f5a00076a71013/cryptoyaml/api.py#L6-L14
getsenic/senic.cryptoyaml
cryptoyaml/api.py
get_key
def get_key(key=None, keyfile=None): """ returns a key given either its value, a path to it on the filesystem or as last resort it checks the environment variable CRYPTOYAML_SECRET """ if key is None: if keyfile is None: key = environ.get('CRYPTOYAML_SECRET') if key is None: raise MissingKeyException( '''You must either provide a key value,''' ''' a path to a key or its value via the environment variable ''' ''' CRYPTOYAML_SECRET''' ) else: key = key.encode('utf-8') else: key = open(keyfile, 'rb').read() return key
python
def get_key(key=None, keyfile=None): """ returns a key given either its value, a path to it on the filesystem or as last resort it checks the environment variable CRYPTOYAML_SECRET """ if key is None: if keyfile is None: key = environ.get('CRYPTOYAML_SECRET') if key is None: raise MissingKeyException( '''You must either provide a key value,''' ''' a path to a key or its value via the environment variable ''' ''' CRYPTOYAML_SECRET''' ) else: key = key.encode('utf-8') else: key = open(keyfile, 'rb').read() return key
[ "def", "get_key", "(", "key", "=", "None", ",", "keyfile", "=", "None", ")", ":", "if", "key", "is", "None", ":", "if", "keyfile", "is", "None", ":", "key", "=", "environ", ".", "get", "(", "'CRYPTOYAML_SECRET'", ")", "if", "key", "is", "None", ":"...
returns a key given either its value, a path to it on the filesystem or as last resort it checks the environment variable CRYPTOYAML_SECRET
[ "returns", "a", "key", "given", "either", "its", "value", "a", "path", "to", "it", "on", "the", "filesystem", "or", "as", "last", "resort", "it", "checks", "the", "environment", "variable", "CRYPTOYAML_SECRET" ]
train
https://github.com/getsenic/senic.cryptoyaml/blob/d15199b93bc8cf83c7241e6437f5a00076a71013/cryptoyaml/api.py#L22-L39
getsenic/senic.cryptoyaml
cryptoyaml/api.py
CryptoYAML.read
def read(self): """ Reads and decrypts data from the filesystem """ if path.exists(self.filepath): with open(self.filepath, 'rb') as infile: self.data = yaml.load( self.fernet.decrypt(infile.read())) else: self.data = dict()
python
def read(self): """ Reads and decrypts data from the filesystem """ if path.exists(self.filepath): with open(self.filepath, 'rb') as infile: self.data = yaml.load( self.fernet.decrypt(infile.read())) else: self.data = dict()
[ "def", "read", "(", "self", ")", ":", "if", "path", ".", "exists", "(", "self", ".", "filepath", ")", ":", "with", "open", "(", "self", ".", "filepath", ",", "'rb'", ")", "as", "infile", ":", "self", ".", "data", "=", "yaml", ".", "load", "(", ...
Reads and decrypts data from the filesystem
[ "Reads", "and", "decrypts", "data", "from", "the", "filesystem" ]
train
https://github.com/getsenic/senic.cryptoyaml/blob/d15199b93bc8cf83c7241e6437f5a00076a71013/cryptoyaml/api.py#L58-L65
getsenic/senic.cryptoyaml
cryptoyaml/api.py
CryptoYAML.write
def write(self): """ Encrypts and writes the current state back onto the filesystem """ with open(self.filepath, 'wb') as outfile: outfile.write( self.fernet.encrypt( yaml.dump(self.data, encoding='utf-8')))
python
def write(self): """ Encrypts and writes the current state back onto the filesystem """ with open(self.filepath, 'wb') as outfile: outfile.write( self.fernet.encrypt( yaml.dump(self.data, encoding='utf-8')))
[ "def", "write", "(", "self", ")", ":", "with", "open", "(", "self", ".", "filepath", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "self", ".", "fernet", ".", "encrypt", "(", "yaml", ".", "dump", "(", "self", ".", "data", ...
Encrypts and writes the current state back onto the filesystem
[ "Encrypts", "and", "writes", "the", "current", "state", "back", "onto", "the", "filesystem" ]
train
https://github.com/getsenic/senic.cryptoyaml/blob/d15199b93bc8cf83c7241e6437f5a00076a71013/cryptoyaml/api.py#L67-L72
switchboardpy/switchboard
switchboard/conditions.py
ConditionSet.has_active_condition
def has_active_condition(self, condition, instances): """ Given a list of instances, and the condition active for this switch, returns a boolean representing if the conditional is met, including a non-instance default. """ return_value = None for instance in instances + [None]: if not self.can_execute(instance): continue result = self.is_active(instance, condition) if result is False: return False elif result is True: return_value = True return return_value
python
def has_active_condition(self, condition, instances): """ Given a list of instances, and the condition active for this switch, returns a boolean representing if the conditional is met, including a non-instance default. """ return_value = None for instance in instances + [None]: if not self.can_execute(instance): continue result = self.is_active(instance, condition) if result is False: return False elif result is True: return_value = True return return_value
[ "def", "has_active_condition", "(", "self", ",", "condition", ",", "instances", ")", ":", "return_value", "=", "None", "for", "instance", "in", "instances", "+", "[", "None", "]", ":", "if", "not", "self", ".", "can_execute", "(", "instance", ")", ":", "...
Given a list of instances, and the condition active for this switch, returns a boolean representing if the conditional is met, including a non-instance default.
[ "Given", "a", "list", "of", "instances", "and", "the", "condition", "active", "for", "this", "switch", "returns", "a", "boolean", "representing", "if", "the", "conditional", "is", "met", "including", "a", "non", "-", "instance", "default", "." ]
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/conditions.py#L330-L345
ludeeus/pyhaversion
pyhaversion/__init__.py
Version.get_local_version
async def get_local_version(self): """Get the local installed version.""" self._version_data["source"] = "Local" try: from homeassistant.const import __version__ as localversion self._version = localversion _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except ImportError as error: _LOGGER.critical("Home Assistant not found - %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
async def get_local_version(self): """Get the local installed version.""" self._version_data["source"] = "Local" try: from homeassistant.const import __version__ as localversion self._version = localversion _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except ImportError as error: _LOGGER.critical("Home Assistant not found - %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
[ "async", "def", "get_local_version", "(", "self", ")", ":", "self", ".", "_version_data", "[", "\"source\"", "]", "=", "\"Local\"", "try", ":", "from", "homeassistant", ".", "const", "import", "__version__", "as", "localversion", "self", ".", "_version", "=", ...
Get the local installed version.
[ "Get", "the", "local", "installed", "version", "." ]
train
https://github.com/ludeeus/pyhaversion/blob/a49d714fce0343657d94faae360a77edf22305dc/pyhaversion/__init__.py#L32-L45
ludeeus/pyhaversion
pyhaversion/__init__.py
Version.get_pypi_version
async def get_pypi_version(self): """Get version published to PyPi.""" self._version_data["beta"] = self.beta self._version_data["source"] = "PyPi" info_version = None last_release = None try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get(URL["pypi"]) data = await response.json() info_version = data["info"]["version"] releases = data["releases"] for version in sorted(releases, reverse=True): if re.search(r"^(\\d+\\.)?(\\d\\.)?(\\*|\\d+)$", version): continue else: last_release = version break self._version = info_version if self.beta: if info_version in last_release: self._version = info_version else: self._version = last_release _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information from PyPi") except KeyError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
async def get_pypi_version(self): """Get version published to PyPi.""" self._version_data["beta"] = self.beta self._version_data["source"] = "PyPi" info_version = None last_release = None try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get(URL["pypi"]) data = await response.json() info_version = data["info"]["version"] releases = data["releases"] for version in sorted(releases, reverse=True): if re.search(r"^(\\d+\\.)?(\\d\\.)?(\\*|\\d+)$", version): continue else: last_release = version break self._version = info_version if self.beta: if info_version in last_release: self._version = info_version else: self._version = last_release _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information from PyPi") except KeyError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
[ "async", "def", "get_pypi_version", "(", "self", ")", ":", "self", ".", "_version_data", "[", "\"beta\"", "]", "=", "self", ".", "beta", "self", ".", "_version_data", "[", "\"source\"", "]", "=", "\"PyPi\"", "info_version", "=", "None", "last_release", "=", ...
Get version published to PyPi.
[ "Get", "version", "published", "to", "PyPi", "." ]
train
https://github.com/ludeeus/pyhaversion/blob/a49d714fce0343657d94faae360a77edf22305dc/pyhaversion/__init__.py#L47-L91
ludeeus/pyhaversion
pyhaversion/__init__.py
Version.get_hassio_version
async def get_hassio_version(self): """Get version published for hassio.""" if self.image not in IMAGES: _LOGGER.warning("%s is not a valid image using default", self.image) self.image = "default" board = BOARDS.get(self.image, BOARDS["default"]) self._version_data["source"] = "Hassio" self._version_data["beta"] = self.beta self._version_data["board"] = board self._version_data["image"] = IMAGES[self.image]["hassio"] try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get( URL["hassio"]["beta" if self.beta else "stable"] ) data = await response.json() self._version = data["homeassistant"][IMAGES[self.image]["hassio"]] self._version_data["hassos"] = data["hassos"][board] self._version_data["supervisor"] = data["supervisor"] self._version_data["hassos-cli"] = data["hassos-cli"] _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information for hassio") except KeyError as error: _LOGGER.error("Error parsing version information for hassio, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information for hassio, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information for hassio, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information for hassio, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
async def get_hassio_version(self): """Get version published for hassio.""" if self.image not in IMAGES: _LOGGER.warning("%s is not a valid image using default", self.image) self.image = "default" board = BOARDS.get(self.image, BOARDS["default"]) self._version_data["source"] = "Hassio" self._version_data["beta"] = self.beta self._version_data["board"] = board self._version_data["image"] = IMAGES[self.image]["hassio"] try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get( URL["hassio"]["beta" if self.beta else "stable"] ) data = await response.json() self._version = data["homeassistant"][IMAGES[self.image]["hassio"]] self._version_data["hassos"] = data["hassos"][board] self._version_data["supervisor"] = data["supervisor"] self._version_data["hassos-cli"] = data["hassos-cli"] _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information for hassio") except KeyError as error: _LOGGER.error("Error parsing version information for hassio, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information for hassio, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information for hassio, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information for hassio, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
[ "async", "def", "get_hassio_version", "(", "self", ")", ":", "if", "self", ".", "image", "not", "in", "IMAGES", ":", "_LOGGER", ".", "warning", "(", "\"%s is not a valid image using default\"", ",", "self", ".", "image", ")", "self", ".", "image", "=", "\"de...
Get version published for hassio.
[ "Get", "version", "published", "for", "hassio", "." ]
train
https://github.com/ludeeus/pyhaversion/blob/a49d714fce0343657d94faae360a77edf22305dc/pyhaversion/__init__.py#L93-L132
ludeeus/pyhaversion
pyhaversion/__init__.py
Version.get_docker_version
async def get_docker_version(self): """Get version published for docker.""" if self.image not in IMAGES: _LOGGER.warning("%s is not a valid image using default", self.image) self.image = "default" self._version_data["beta"] = self.beta self._version_data["source"] = "Docker" self._version_data["image"] = IMAGES[self.image]["docker"] try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get( URL["docker"].format(IMAGES[self.image]["docker"]) ) data = await response.json() for version in data["results"]: if version["name"] in ["latest", "landingpage", "rc", "dev"]: continue elif re.search(r"\b.+b\d", version["name"]): if self.beta: self._version = version["name"] break else: continue else: self._version = version["name"] if self._version is not None: break else: continue _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information for docker") except KeyError as error: _LOGGER.error("Error parsing version information for docker, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information for docker, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information for docker, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information for docker, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
async def get_docker_version(self): """Get version published for docker.""" if self.image not in IMAGES: _LOGGER.warning("%s is not a valid image using default", self.image) self.image = "default" self._version_data["beta"] = self.beta self._version_data["source"] = "Docker" self._version_data["image"] = IMAGES[self.image]["docker"] try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get( URL["docker"].format(IMAGES[self.image]["docker"]) ) data = await response.json() for version in data["results"]: if version["name"] in ["latest", "landingpage", "rc", "dev"]: continue elif re.search(r"\b.+b\d", version["name"]): if self.beta: self._version = version["name"] break else: continue else: self._version = version["name"] if self._version is not None: break else: continue _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information for docker") except KeyError as error: _LOGGER.error("Error parsing version information for docker, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information for docker, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information for docker, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information for docker, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
[ "async", "def", "get_docker_version", "(", "self", ")", ":", "if", "self", ".", "image", "not", "in", "IMAGES", ":", "_LOGGER", ".", "warning", "(", "\"%s is not a valid image using default\"", ",", "self", ".", "image", ")", "self", ".", "image", "=", "\"de...
Get version published for docker.
[ "Get", "version", "published", "for", "docker", "." ]
train
https://github.com/ludeeus/pyhaversion/blob/a49d714fce0343657d94faae360a77edf22305dc/pyhaversion/__init__.py#L134-L179
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/ansible.py
relative_playbook
def relative_playbook(playbook): """ Returns a tuple (controlled, playbook). - controlled is a boolean indicating whether or not we think that the playbook being run was checked in to our ansible git repo. - playbook is the relative file path of the playbook. """ if playbook.startswith(fs_prefix): return True, playbook[len(fs_prefix):] else: return False, playbook.split('/')[-1]
python
def relative_playbook(playbook): """ Returns a tuple (controlled, playbook). - controlled is a boolean indicating whether or not we think that the playbook being run was checked in to our ansible git repo. - playbook is the relative file path of the playbook. """ if playbook.startswith(fs_prefix): return True, playbook[len(fs_prefix):] else: return False, playbook.split('/')[-1]
[ "def", "relative_playbook", "(", "playbook", ")", ":", "if", "playbook", ".", "startswith", "(", "fs_prefix", ")", ":", "return", "True", ",", "playbook", "[", "len", "(", "fs_prefix", ")", ":", "]", "else", ":", "return", "False", ",", "playbook", ".", ...
Returns a tuple (controlled, playbook). - controlled is a boolean indicating whether or not we think that the playbook being run was checked in to our ansible git repo. - playbook is the relative file path of the playbook.
[ "Returns", "a", "tuple", "(", "controlled", "playbook", ")", "." ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/ansible.py#L30-L40
fedora-infra/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/conglomerators/bodhi/overrides.py
ByUserTag.matches
def matches(self, a, b, **config): """ The message must match by username """ submitter_a = a['msg']['override']['submitter']['name'] submitter_b = b['msg']['override']['submitter']['name'] if submitter_a != submitter_b: return False return True
python
def matches(self, a, b, **config): """ The message must match by username """ submitter_a = a['msg']['override']['submitter']['name'] submitter_b = b['msg']['override']['submitter']['name'] if submitter_a != submitter_b: return False return True
[ "def", "matches", "(", "self", ",", "a", ",", "b", ",", "*", "*", "config", ")", ":", "submitter_a", "=", "a", "[", "'msg'", "]", "[", "'override'", "]", "[", "'submitter'", "]", "[", "'name'", "]", "submitter_b", "=", "b", "[", "'msg'", "]", "["...
The message must match by username
[ "The", "message", "must", "match", "by", "username" ]
train
https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/blob/85bf4162692e3042c7dbcc12dfafaca4764b4ae6/fedmsg_meta_fedora_infrastructure/conglomerators/bodhi/overrides.py#L11-L17
datasift/datasift-python
datasift/client.py
Client.start_stream_subscriber
def start_stream_subscriber(self): """ Starts the stream consumer's main loop. Called when the stream consumer has been set up with the correct callbacks. """ if not self._stream_process_started: # pragma: no cover if sys.platform.startswith("win"): # if we're on windows we can't expect multiprocessing to work self._stream_process_started = True self._stream() self._stream_process_started = True self._stream_process.start()
python
def start_stream_subscriber(self): """ Starts the stream consumer's main loop. Called when the stream consumer has been set up with the correct callbacks. """ if not self._stream_process_started: # pragma: no cover if sys.platform.startswith("win"): # if we're on windows we can't expect multiprocessing to work self._stream_process_started = True self._stream() self._stream_process_started = True self._stream_process.start()
[ "def", "start_stream_subscriber", "(", "self", ")", ":", "if", "not", "self", ".", "_stream_process_started", ":", "# pragma: no cover", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "# if we're on windows we can't expect multiprocessing to ...
Starts the stream consumer's main loop. Called when the stream consumer has been set up with the correct callbacks.
[ "Starts", "the", "stream", "consumer", "s", "main", "loop", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L149-L159
datasift/datasift-python
datasift/client.py
Client.subscribe
def subscribe(self, stream): """ Subscribe to a stream. :param stream: stream to subscribe to :type stream: str :raises: :class:`~datasift.exceptions.StreamSubscriberNotStarted`, :class:`~datasift.exceptions.DeleteRequired`, :class:`~datasift.exceptions.StreamNotConnected` Used as a decorator, eg.:: @client.subscribe(stream) def subscribe_to_hash(msg): print(msg) """ if not self._stream_process_started: raise StreamSubscriberNotStarted() def real_decorator(func): if not self._on_delete: raise DeleteRequired("""An on_delete function is required. You must process delete messages and remove them from your system (if stored) in order to remain compliant with the ToS""") if hasattr(self.factory, 'datasift') and 'send_message' in self.factory.datasift: # pragma: no cover self.subscriptions[stream] = func self.factory.datasift['send_message'](json.dumps({"action": "subscribe", "hash": stream}).encode("utf8")) else: # pragma: no cover raise StreamNotConnected('The client is not connected to DataSift, unable to subscribe to stream') return real_decorator
python
def subscribe(self, stream): """ Subscribe to a stream. :param stream: stream to subscribe to :type stream: str :raises: :class:`~datasift.exceptions.StreamSubscriberNotStarted`, :class:`~datasift.exceptions.DeleteRequired`, :class:`~datasift.exceptions.StreamNotConnected` Used as a decorator, eg.:: @client.subscribe(stream) def subscribe_to_hash(msg): print(msg) """ if not self._stream_process_started: raise StreamSubscriberNotStarted() def real_decorator(func): if not self._on_delete: raise DeleteRequired("""An on_delete function is required. You must process delete messages and remove them from your system (if stored) in order to remain compliant with the ToS""") if hasattr(self.factory, 'datasift') and 'send_message' in self.factory.datasift: # pragma: no cover self.subscriptions[stream] = func self.factory.datasift['send_message'](json.dumps({"action": "subscribe", "hash": stream}).encode("utf8")) else: # pragma: no cover raise StreamNotConnected('The client is not connected to DataSift, unable to subscribe to stream') return real_decorator
[ "def", "subscribe", "(", "self", ",", "stream", ")", ":", "if", "not", "self", ".", "_stream_process_started", ":", "raise", "StreamSubscriberNotStarted", "(", ")", "def", "real_decorator", "(", "func", ")", ":", "if", "not", "self", ".", "_on_delete", ":", ...
Subscribe to a stream. :param stream: stream to subscribe to :type stream: str :raises: :class:`~datasift.exceptions.StreamSubscriberNotStarted`, :class:`~datasift.exceptions.DeleteRequired`, :class:`~datasift.exceptions.StreamNotConnected` Used as a decorator, eg.:: @client.subscribe(stream) def subscribe_to_hash(msg): print(msg)
[ "Subscribe", "to", "a", "stream", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L161-L187
datasift/datasift-python
datasift/client.py
Client.on_open
def on_open(self, func): """ Function to set the callback for the opening of a stream. Can be called manually:: def open_callback(data): setup_stream() client.on_open(open_callback) or as a decorator:: @client.on_open def open_callback(): setup_stream() """ self._on_open = func if self.opened: # pragma: no cover self._on_open(self) return func
python
def on_open(self, func): """ Function to set the callback for the opening of a stream. Can be called manually:: def open_callback(data): setup_stream() client.on_open(open_callback) or as a decorator:: @client.on_open def open_callback(): setup_stream() """ self._on_open = func if self.opened: # pragma: no cover self._on_open(self) return func
[ "def", "on_open", "(", "self", ",", "func", ")", ":", "self", ".", "_on_open", "=", "func", "if", "self", ".", "opened", ":", "# pragma: no cover", "self", ".", "_on_open", "(", "self", ")", "return", "func" ]
Function to set the callback for the opening of a stream. Can be called manually:: def open_callback(data): setup_stream() client.on_open(open_callback) or as a decorator:: @client.on_open def open_callback(): setup_stream()
[ "Function", "to", "set", "the", "callback", "for", "the", "opening", "of", "a", "stream", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L189-L207
datasift/datasift-python
datasift/client.py
Client._stream
def _stream(self): # pragma: no cover """Runs in a sub-process to perform stream consumption""" self.factory.protocol = LiveStream self.factory.datasift = { 'on_open': self._on_open, 'on_close': self._on_close, 'on_message': self._on_message, 'send_message': None } if self.config.ssl: from twisted.internet import ssl options = ssl.optionsForClientTLS(hostname=WEBSOCKET_HOST) connectWS(self.factory, options) else: connectWS(self.factory) reactor.run()
python
def _stream(self): # pragma: no cover """Runs in a sub-process to perform stream consumption""" self.factory.protocol = LiveStream self.factory.datasift = { 'on_open': self._on_open, 'on_close': self._on_close, 'on_message': self._on_message, 'send_message': None } if self.config.ssl: from twisted.internet import ssl options = ssl.optionsForClientTLS(hostname=WEBSOCKET_HOST) connectWS(self.factory, options) else: connectWS(self.factory) reactor.run()
[ "def", "_stream", "(", "self", ")", ":", "# pragma: no cover", "self", ".", "factory", ".", "protocol", "=", "LiveStream", "self", ".", "factory", ".", "datasift", "=", "{", "'on_open'", ":", "self", ".", "_on_open", ",", "'on_close'", ":", "self", ".", ...
Runs in a sub-process to perform stream consumption
[ "Runs", "in", "a", "sub", "-", "process", "to", "perform", "stream", "consumption" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L287-L302
datasift/datasift-python
datasift/client.py
Client.compile
def compile(self, csdl): """ Compile the given CSDL. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation. :param csdl: CSDL to compile :type csdl: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('compile', data=dict(csdl=csdl))
python
def compile(self, csdl): """ Compile the given CSDL. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation. :param csdl: CSDL to compile :type csdl: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('compile', data=dict(csdl=csdl))
[ "def", "compile", "(", "self", ",", "csdl", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'compile'", ",", "data", "=", "dict", "(", "csdl", "=", "csdl", ")", ")" ]
Compile the given CSDL. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation. :param csdl: CSDL to compile :type csdl: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Compile", "the", "given", "CSDL", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L304-L317
datasift/datasift-python
datasift/client.py
Client.is_valid
def is_valid(self, csdl): """ Checks if the given CSDL is valid. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/validate :param csdl: CSDL to validate :type csdl: str :returns: Boolean indicating the validity of the CSDL :rtype: bool :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ try: self.validate(csdl) except DataSiftApiException as e: if e.response.status_code == 400: return False else: raise e return True
python
def is_valid(self, csdl): """ Checks if the given CSDL is valid. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/validate :param csdl: CSDL to validate :type csdl: str :returns: Boolean indicating the validity of the CSDL :rtype: bool :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ try: self.validate(csdl) except DataSiftApiException as e: if e.response.status_code == 400: return False else: raise e return True
[ "def", "is_valid", "(", "self", ",", "csdl", ")", ":", "try", ":", "self", ".", "validate", "(", "csdl", ")", "except", "DataSiftApiException", "as", "e", ":", "if", "e", ".", "response", ".", "status_code", "==", "400", ":", "return", "False", "else",...
Checks if the given CSDL is valid. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/validate :param csdl: CSDL to validate :type csdl: str :returns: Boolean indicating the validity of the CSDL :rtype: bool :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Checks", "if", "the", "given", "CSDL", "is", "valid", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L332-L350
datasift/datasift-python
datasift/client.py
Client.usage
def usage(self, period='hour'): """ Check the number of objects processed and delivered for a given time period Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/usage :param period: (optional) time period to measure usage for, can be one of "day", "hour" or "current" (5 minutes), default is hour :type period: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get('usage', params=dict(period=period))
python
def usage(self, period='hour'): """ Check the number of objects processed and delivered for a given time period Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/usage :param period: (optional) time period to measure usage for, can be one of "day", "hour" or "current" (5 minutes), default is hour :type period: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get('usage', params=dict(period=period))
[ "def", "usage", "(", "self", ",", "period", "=", "'hour'", ")", ":", "return", "self", ".", "request", ".", "get", "(", "'usage'", ",", "params", "=", "dict", "(", "period", "=", "period", ")", ")" ]
Check the number of objects processed and delivered for a given time period Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/usage :param period: (optional) time period to measure usage for, can be one of "day", "hour" or "current" (5 minutes), default is hour :type period: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Check", "the", "number", "of", "objects", "processed", "and", "delivered", "for", "a", "given", "time", "period" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L352-L363
datasift/datasift-python
datasift/client.py
Client.dpu
def dpu(self, hash=None, historics_id=None): """ Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if hash: return self.request.get('dpu', params=dict(hash=hash)) if historics_id: return self.request.get('dpu', params=dict(historics_id=historics_id))
python
def dpu(self, hash=None, historics_id=None): """ Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if hash: return self.request.get('dpu', params=dict(hash=hash)) if historics_id: return self.request.get('dpu', params=dict(historics_id=historics_id))
[ "def", "dpu", "(", "self", ",", "hash", "=", "None", ",", "historics_id", "=", "None", ")", ":", "if", "hash", ":", "return", "self", ".", "request", ".", "get", "(", "'dpu'", ",", "params", "=", "dict", "(", "hash", "=", "hash", ")", ")", "if", ...
Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Calculate", "the", "DPU", "cost", "of", "consuming", "a", "stream", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L365-L379
datasift/datasift-python
datasift/client.py
Client.pull
def pull(self, subscription_id, size=None, cursor=None): """ Pulls a series of interactions from the queue for the given subscription ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pull :param subscription_id: The ID of the subscription to pull interactions for :type subscription_id: str :param size: the max amount of data to pull in bytes :type size: int :param cursor: an ID to use as the point in the queue from which to start fetching data :type cursor: str :returns: dict with extra response data :rtype: :class:`~datasift.request.ResponseList` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': subscription_id} if size: params['size'] = size if cursor: params['cursor'] = cursor raw = self.request('get', 'pull', params=params) def pull_parser(headers, data): pull_type = headers.get("X-DataSift-Format") if pull_type in ("json_meta", "json_array"): return json.loads(data) else: lines = data.strip().split("\n").__iter__() return list(map(json.loads, lines)) return self.request.build_response(raw, parser=pull_parser)
python
def pull(self, subscription_id, size=None, cursor=None): """ Pulls a series of interactions from the queue for the given subscription ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pull :param subscription_id: The ID of the subscription to pull interactions for :type subscription_id: str :param size: the max amount of data to pull in bytes :type size: int :param cursor: an ID to use as the point in the queue from which to start fetching data :type cursor: str :returns: dict with extra response data :rtype: :class:`~datasift.request.ResponseList` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': subscription_id} if size: params['size'] = size if cursor: params['cursor'] = cursor raw = self.request('get', 'pull', params=params) def pull_parser(headers, data): pull_type = headers.get("X-DataSift-Format") if pull_type in ("json_meta", "json_array"): return json.loads(data) else: lines = data.strip().split("\n").__iter__() return list(map(json.loads, lines)) return self.request.build_response(raw, parser=pull_parser)
[ "def", "pull", "(", "self", ",", "subscription_id", ",", "size", "=", "None", ",", "cursor", "=", "None", ")", ":", "params", "=", "{", "'id'", ":", "subscription_id", "}", "if", "size", ":", "params", "[", "'size'", "]", "=", "size", "if", "cursor",...
Pulls a series of interactions from the queue for the given subscription ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pull :param subscription_id: The ID of the subscription to pull interactions for :type subscription_id: str :param size: the max amount of data to pull in bytes :type size: int :param cursor: an ID to use as the point in the queue from which to start fetching data :type cursor: str :returns: dict with extra response data :rtype: :class:`~datasift.request.ResponseList` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Pulls", "a", "series", "of", "interactions", "from", "the", "queue", "for", "the", "given", "subscription", "ID", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L392-L422
lbrictson/pypubg
pypubg/core.py
PUBGAPI._get_player_profile
def _get_player_profile(self, player_handle): """Returns pubg player profile from PUBG api, no filtering :param player_handle: player PUBG profile name :type player_handle: str :return: return json from PUBG API :rtype: dict """ url = self.pubg_url + player_handle response = requests.request("GET", url, headers=self.headers) data = json.loads(response.text) return data
python
def _get_player_profile(self, player_handle): """Returns pubg player profile from PUBG api, no filtering :param player_handle: player PUBG profile name :type player_handle: str :return: return json from PUBG API :rtype: dict """ url = self.pubg_url + player_handle response = requests.request("GET", url, headers=self.headers) data = json.loads(response.text) return data
[ "def", "_get_player_profile", "(", "self", ",", "player_handle", ")", ":", "url", "=", "self", ".", "pubg_url", "+", "player_handle", "response", "=", "requests", ".", "request", "(", "\"GET\"", ",", "url", ",", "headers", "=", "self", ".", "headers", ")",...
Returns pubg player profile from PUBG api, no filtering :param player_handle: player PUBG profile name :type player_handle: str :return: return json from PUBG API :rtype: dict
[ "Returns", "pubg", "player", "profile", "from", "PUBG", "api", "no", "filtering", ":", "param", "player_handle", ":", "player", "PUBG", "profile", "name", ":", "type", "player_handle", ":", "str", ":", "return", ":", "return", "json", "from", "PUBG", "API", ...
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L25-L36
lbrictson/pypubg
pypubg/core.py
PUBGAPI.player
def player(self, player_handle): """Returns the full set of data on a player, no filtering""" try: url = self.pubg_url + player_handle response = requests.request("GET", url, headers=self.headers) return json.loads(response.text) except BaseException as error: print('Unhandled exception: ' + str(error)) raise
python
def player(self, player_handle): """Returns the full set of data on a player, no filtering""" try: url = self.pubg_url + player_handle response = requests.request("GET", url, headers=self.headers) return json.loads(response.text) except BaseException as error: print('Unhandled exception: ' + str(error)) raise
[ "def", "player", "(", "self", ",", "player_handle", ")", ":", "try", ":", "url", "=", "self", ".", "pubg_url", "+", "player_handle", "response", "=", "requests", ".", "request", "(", "\"GET\"", ",", "url", ",", "headers", "=", "self", ".", "headers", "...
Returns the full set of data on a player, no filtering
[ "Returns", "the", "full", "set", "of", "data", "on", "a", "player", "no", "filtering" ]
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L38-L46
lbrictson/pypubg
pypubg/core.py
PUBGAPI.player_s
def player_s(self, sid) : """Returns the full set of data on a player, no filtering""" try: url = self.pubg_url_steam.format(str(sid)) response = requests.request("GET", url, headers=self.headers) return json.loads(response.text) except BaseException as error: print('Unhandled exception: ' + str(error)) raise
python
def player_s(self, sid) : """Returns the full set of data on a player, no filtering""" try: url = self.pubg_url_steam.format(str(sid)) response = requests.request("GET", url, headers=self.headers) return json.loads(response.text) except BaseException as error: print('Unhandled exception: ' + str(error)) raise
[ "def", "player_s", "(", "self", ",", "sid", ")", ":", "try", ":", "url", "=", "self", ".", "pubg_url_steam", ".", "format", "(", "str", "(", "sid", ")", ")", "response", "=", "requests", ".", "request", "(", "\"GET\"", ",", "url", ",", "headers", "...
Returns the full set of data on a player, no filtering
[ "Returns", "the", "full", "set", "of", "data", "on", "a", "player", "no", "filtering" ]
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L48-L56
lbrictson/pypubg
pypubg/core.py
PUBGAPI.player_mode_stats
def player_mode_stats(self, player_handle, game_mode=constants.GAME_MODE_WILDCARD, game_region=constants.GAME_REGION_WILDCARD): """Returns the stats for a particular mode of play, accepts solo, duo and squad. Will return both regional and global stats. Default gamemode is solo by Zac: Add parameter game_region to extract player stats by region directly """ if game_mode not in constants.GAME_MODES: raise APIException("game_mode must be one of: solo, duo, squad, all") if game_region not in constants.GAME_REGIONS: raise APIException("game_region must be one of: as, na, agg, sea, eu, oc, sa, all") try: data = self._get_player_profile(player_handle) data = self._filter_gameplay_stats(data, game_mode, game_region) return data except BaseException as error: print('Unhandled exception: ' + str(error)) raise
python
def player_mode_stats(self, player_handle, game_mode=constants.GAME_MODE_WILDCARD, game_region=constants.GAME_REGION_WILDCARD): """Returns the stats for a particular mode of play, accepts solo, duo and squad. Will return both regional and global stats. Default gamemode is solo by Zac: Add parameter game_region to extract player stats by region directly """ if game_mode not in constants.GAME_MODES: raise APIException("game_mode must be one of: solo, duo, squad, all") if game_region not in constants.GAME_REGIONS: raise APIException("game_region must be one of: as, na, agg, sea, eu, oc, sa, all") try: data = self._get_player_profile(player_handle) data = self._filter_gameplay_stats(data, game_mode, game_region) return data except BaseException as error: print('Unhandled exception: ' + str(error)) raise
[ "def", "player_mode_stats", "(", "self", ",", "player_handle", ",", "game_mode", "=", "constants", ".", "GAME_MODE_WILDCARD", ",", "game_region", "=", "constants", ".", "GAME_REGION_WILDCARD", ")", ":", "if", "game_mode", "not", "in", "constants", ".", "GAME_MODES...
Returns the stats for a particular mode of play, accepts solo, duo and squad. Will return both regional and global stats. Default gamemode is solo by Zac: Add parameter game_region to extract player stats by region directly
[ "Returns", "the", "stats", "for", "a", "particular", "mode", "of", "play", "accepts", "solo", "duo", "and", "squad", ".", "Will", "return", "both", "regional", "and", "global", "stats", ".", "Default", "gamemode", "is", "solo", "by", "Zac", ":", "Add", "...
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L58-L74
lbrictson/pypubg
pypubg/core.py
PUBGAPI._filter_gameplay_stats
def _filter_gameplay_stats(self, data, game_mode, game_region): """Returns gameplay stats that are filtered by game_mode and game_region. :param data: Json of gameplay stats. :type data: dict :param game_mode: Target game mode. :type game_mode: str :param game_region: Target game region. :type game_region: str :return: return list of gameplay stats with target game mode and region. :rtype: list """ return_data = [] for stat in data['Stats']: if self._is_target_game_mode(stat, game_mode) and self._is_target_region(stat, game_region): return_data.append(stat) return return_data
python
def _filter_gameplay_stats(self, data, game_mode, game_region): """Returns gameplay stats that are filtered by game_mode and game_region. :param data: Json of gameplay stats. :type data: dict :param game_mode: Target game mode. :type game_mode: str :param game_region: Target game region. :type game_region: str :return: return list of gameplay stats with target game mode and region. :rtype: list """ return_data = [] for stat in data['Stats']: if self._is_target_game_mode(stat, game_mode) and self._is_target_region(stat, game_region): return_data.append(stat) return return_data
[ "def", "_filter_gameplay_stats", "(", "self", ",", "data", ",", "game_mode", ",", "game_region", ")", ":", "return_data", "=", "[", "]", "for", "stat", "in", "data", "[", "'Stats'", "]", ":", "if", "self", ".", "_is_target_game_mode", "(", "stat", ",", "...
Returns gameplay stats that are filtered by game_mode and game_region. :param data: Json of gameplay stats. :type data: dict :param game_mode: Target game mode. :type game_mode: str :param game_region: Target game region. :type game_region: str :return: return list of gameplay stats with target game mode and region. :rtype: list
[ "Returns", "gameplay", "stats", "that", "are", "filtered", "by", "game_mode", "and", "game_region", ".", ":", "param", "data", ":", "Json", "of", "gameplay", "stats", ".", ":", "type", "data", ":", "dict", ":", "param", "game_mode", ":", "Target", "game", ...
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L76-L92
lbrictson/pypubg
pypubg/core.py
PUBGAPI._is_target_game_mode
def _is_target_game_mode(self, stat, game_mode): """Returns if the stat matches target game mode. :param stat: Json of gameplay stat. :type stat: dict :param game_mode: Target game mode. :type game_mode: str :return: return does the stat matches target game mode. :rtype: bool """ if game_mode == constants.GAME_MODE_WILDCARD: return True return stat['Match'] == game_mode
python
def _is_target_game_mode(self, stat, game_mode): """Returns if the stat matches target game mode. :param stat: Json of gameplay stat. :type stat: dict :param game_mode: Target game mode. :type game_mode: str :return: return does the stat matches target game mode. :rtype: bool """ if game_mode == constants.GAME_MODE_WILDCARD: return True return stat['Match'] == game_mode
[ "def", "_is_target_game_mode", "(", "self", ",", "stat", ",", "game_mode", ")", ":", "if", "game_mode", "==", "constants", ".", "GAME_MODE_WILDCARD", ":", "return", "True", "return", "stat", "[", "'Match'", "]", "==", "game_mode" ]
Returns if the stat matches target game mode. :param stat: Json of gameplay stat. :type stat: dict :param game_mode: Target game mode. :type game_mode: str :return: return does the stat matches target game mode. :rtype: bool
[ "Returns", "if", "the", "stat", "matches", "target", "game", "mode", ".", ":", "param", "stat", ":", "Json", "of", "gameplay", "stat", ".", ":", "type", "stat", ":", "dict", ":", "param", "game_mode", ":", "Target", "game", "mode", ".", ":", "type", ...
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L94-L106
lbrictson/pypubg
pypubg/core.py
PUBGAPI._is_target_region
def _is_target_region(self, stat, game_region): """Returns if the stat matches target game region. :param stat: Json of gameplay stat. :type stat: dict :param game_region: Target game region. :type game_region: str :return: return does the stat matches target game region. :rtype: bool """ if game_region == constants.GAME_REGION_WILDCARD: return True return stat['Region'] == game_region
python
def _is_target_region(self, stat, game_region): """Returns if the stat matches target game region. :param stat: Json of gameplay stat. :type stat: dict :param game_region: Target game region. :type game_region: str :return: return does the stat matches target game region. :rtype: bool """ if game_region == constants.GAME_REGION_WILDCARD: return True return stat['Region'] == game_region
[ "def", "_is_target_region", "(", "self", ",", "stat", ",", "game_region", ")", ":", "if", "game_region", "==", "constants", ".", "GAME_REGION_WILDCARD", ":", "return", "True", "return", "stat", "[", "'Region'", "]", "==", "game_region" ]
Returns if the stat matches target game region. :param stat: Json of gameplay stat. :type stat: dict :param game_region: Target game region. :type game_region: str :return: return does the stat matches target game region. :rtype: bool
[ "Returns", "if", "the", "stat", "matches", "target", "game", "region", ".", ":", "param", "stat", ":", "Json", "of", "gameplay", "stat", ".", ":", "type", "stat", ":", "dict", ":", "param", "game_region", ":", "Target", "game", "region", ".", ":", "typ...
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L108-L120
lbrictson/pypubg
pypubg/core.py
PUBGAPI.player_skill
def player_skill(self, player_handle, game_mode='solo'): """Returns the current skill rating of the player for a specified gamemode, default gamemode is solo""" if game_mode not in constants.GAME_MODES: raise APIException("game_mode must be one of: solo, duo, squad, all") try: data = self._get_player_profile(player_handle) player_stats = {} return_data = [] for stat in data['Stats']: if stat['Match'] == game_mode: for datas in stat['Stats']: if datas['label'] == 'Rating': player_stats[stat['Region']] = datas['value'] return player_stats except BaseException as error: print('Unhandled exception: ' + str(error)) raise
python
def player_skill(self, player_handle, game_mode='solo'): """Returns the current skill rating of the player for a specified gamemode, default gamemode is solo""" if game_mode not in constants.GAME_MODES: raise APIException("game_mode must be one of: solo, duo, squad, all") try: data = self._get_player_profile(player_handle) player_stats = {} return_data = [] for stat in data['Stats']: if stat['Match'] == game_mode: for datas in stat['Stats']: if datas['label'] == 'Rating': player_stats[stat['Region']] = datas['value'] return player_stats except BaseException as error: print('Unhandled exception: ' + str(error)) raise
[ "def", "player_skill", "(", "self", ",", "player_handle", ",", "game_mode", "=", "'solo'", ")", ":", "if", "game_mode", "not", "in", "constants", ".", "GAME_MODES", ":", "raise", "APIException", "(", "\"game_mode must be one of: solo, duo, squad, all\"", ")", "try",...
Returns the current skill rating of the player for a specified gamemode, default gamemode is solo
[ "Returns", "the", "current", "skill", "rating", "of", "the", "player", "for", "a", "specified", "gamemode", "default", "gamemode", "is", "solo" ]
train
https://github.com/lbrictson/pypubg/blob/4421fc0446e124eeab795d759f75696d470a38b3/pypubg/core.py#L122-L139
pletzer/pnumpy
examples/exAverage2d.py
plot
def plot(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data, title=''): """ Plot distributed array @param nxG number of global cells in x @param nyG number of global cells in y @param iBeg global starting index in x @param iEnd global ending index in x @param jBeg global starting index in y @param jEnd global ending index in y @param data local array @param title plot title """ sz = MPI.COMM_WORLD.Get_size() rk = MPI.COMM_WORLD.Get_rank() iBegs = MPI.COMM_WORLD.gather(iBeg, root=0) iEnds = MPI.COMM_WORLD.gather(iEnd, root=0) jBegs = MPI.COMM_WORLD.gather(jBeg, root=0) jEnds = MPI.COMM_WORLD.gather(jEnd, root=0) arrays = MPI.COMM_WORLD.gather(numpy.array(data), root=0) if rk == 0: bigArray = numpy.zeros((nxG, nyG), data.dtype) for pe in range(sz): bigArray[iBegs[pe]:iEnds[pe], jBegs[pe]:jEnds[pe]] = arrays[pe] from matplotlib import pylab pylab.pcolor(bigArray.transpose()) # add the decomp domains for pe in range(sz): pylab.plot([iBegs[pe], iBegs[pe]], [0, nyG - 1], 'w--') pylab.plot([0, nxG - 1], [jBegs[pe], jBegs[pe]], 'w--') pylab.title(title) pylab.show()
python
def plot(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data, title=''): """ Plot distributed array @param nxG number of global cells in x @param nyG number of global cells in y @param iBeg global starting index in x @param iEnd global ending index in x @param jBeg global starting index in y @param jEnd global ending index in y @param data local array @param title plot title """ sz = MPI.COMM_WORLD.Get_size() rk = MPI.COMM_WORLD.Get_rank() iBegs = MPI.COMM_WORLD.gather(iBeg, root=0) iEnds = MPI.COMM_WORLD.gather(iEnd, root=0) jBegs = MPI.COMM_WORLD.gather(jBeg, root=0) jEnds = MPI.COMM_WORLD.gather(jEnd, root=0) arrays = MPI.COMM_WORLD.gather(numpy.array(data), root=0) if rk == 0: bigArray = numpy.zeros((nxG, nyG), data.dtype) for pe in range(sz): bigArray[iBegs[pe]:iEnds[pe], jBegs[pe]:jEnds[pe]] = arrays[pe] from matplotlib import pylab pylab.pcolor(bigArray.transpose()) # add the decomp domains for pe in range(sz): pylab.plot([iBegs[pe], iBegs[pe]], [0, nyG - 1], 'w--') pylab.plot([0, nxG - 1], [jBegs[pe], jBegs[pe]], 'w--') pylab.title(title) pylab.show()
[ "def", "plot", "(", "nxG", ",", "nyG", ",", "iBeg", ",", "iEnd", ",", "jBeg", ",", "jEnd", ",", "data", ",", "title", "=", "''", ")", ":", "sz", "=", "MPI", ".", "COMM_WORLD", ".", "Get_size", "(", ")", "rk", "=", "MPI", ".", "COMM_WORLD", ".",...
Plot distributed array @param nxG number of global cells in x @param nyG number of global cells in y @param iBeg global starting index in x @param iEnd global ending index in x @param jBeg global starting index in y @param jEnd global ending index in y @param data local array @param title plot title
[ "Plot", "distributed", "array" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/examples/exAverage2d.py#L12-L42