id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
227,500
SmileyChris/django-countries
django_countries/__init__.py
Countries.numeric
def numeric(self, code, padded=False): """ Return the ISO 3166-1 numeric country code matching the provided country code. If no match is found, returns ``None``. :param padded: Pass ``True`` to return a 0-padded three character string, otherwise an integer will be returned. """ code = self.alpha2(code) try: num = self.alt_codes[code][1] except KeyError: return None if padded: return "%03d" % num return num
python
def numeric(self, code, padded=False): code = self.alpha2(code) try: num = self.alt_codes[code][1] except KeyError: return None if padded: return "%03d" % num return num
[ "def", "numeric", "(", "self", ",", "code", ",", "padded", "=", "False", ")", ":", "code", "=", "self", ".", "alpha2", "(", "code", ")", "try", ":", "num", "=", "self", ".", "alt_codes", "[", "code", "]", "[", "1", "]", "except", "KeyError", ":",...
Return the ISO 3166-1 numeric country code matching the provided country code. If no match is found, returns ``None``. :param padded: Pass ``True`` to return a 0-padded three character string, otherwise an integer will be returned.
[ "Return", "the", "ISO", "3166", "-", "1", "numeric", "country", "code", "matching", "the", "provided", "country", "code", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/__init__.py#L287-L304
227,501
SmileyChris/django-countries
django_countries/widgets.py
LazyChoicesMixin.choices
def choices(self): """ When it's time to get the choices, if it was a lazy then figure it out now and memoize the result. """ if isinstance(self._choices, Promise): self._choices = list(self._choices) return self._choices
python
def choices(self): if isinstance(self._choices, Promise): self._choices = list(self._choices) return self._choices
[ "def", "choices", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_choices", ",", "Promise", ")", ":", "self", ".", "_choices", "=", "list", "(", "self", ".", "_choices", ")", "return", "self", ".", "_choices" ]
When it's time to get the choices, if it was a lazy then figure it out now and memoize the result.
[ "When", "it", "s", "time", "to", "get", "the", "choices", "if", "it", "was", "a", "lazy", "then", "figure", "it", "out", "now", "and", "memoize", "the", "result", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/widgets.py#L26-L33
227,502
SmileyChris/django-countries
django_countries/ioc_data.py
check_ioc_countries
def check_ioc_countries(verbosity=1): """ Check if all IOC codes map to ISO codes correctly """ from django_countries.data import COUNTRIES if verbosity: # pragma: no cover print("Checking if all IOC codes map correctly") for key in ISO_TO_IOC: assert COUNTRIES.get(key), "No ISO code for %s" % key if verbosity: # pragma: no cover print("Finished checking IOC codes")
python
def check_ioc_countries(verbosity=1): from django_countries.data import COUNTRIES if verbosity: # pragma: no cover print("Checking if all IOC codes map correctly") for key in ISO_TO_IOC: assert COUNTRIES.get(key), "No ISO code for %s" % key if verbosity: # pragma: no cover print("Finished checking IOC codes")
[ "def", "check_ioc_countries", "(", "verbosity", "=", "1", ")", ":", "from", "django_countries", ".", "data", "import", "COUNTRIES", "if", "verbosity", ":", "# pragma: no cover", "print", "(", "\"Checking if all IOC codes map correctly\"", ")", "for", "key", "in", "I...
Check if all IOC codes map to ISO codes correctly
[ "Check", "if", "all", "IOC", "codes", "map", "to", "ISO", "codes", "correctly" ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/ioc_data.py#L318-L329
227,503
SmileyChris/django-countries
django_countries/data.py
self_generate
def self_generate(output_filename, filename="iso3166-1.csv"): # pragma: no cover """ The following code can be used for self-generation of this file. It requires a UTF-8 CSV file containing the short ISO name and two letter country code as the first two columns. """ import csv import re countries = [] alt_codes = [] with open(filename, "r") as csv_file: for row in csv.reader(csv_file): name = row[0].rstrip("*") name = re.sub(r"\(the\)", "", name) if name: countries.append((name, row[1])) alt_codes.append((row[1], row[2], int(row[3]))) with open(__file__, "r") as source_file: contents = source_file.read() # Write countries. bits = re.match("(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)", contents, re.DOTALL).groups() country_list = [] for name, code in countries: name = name.replace('"', r"\"").strip() country_list.append(' "{code}": _("{name}"),'.format(name=name, code=code)) content = bits[0] content += "\n".join(country_list) # Write alt codes. alt_bits = re.match( "(.*\nALT_CODES = \{\n)(.*)(\n\}.*)", bits[2], re.DOTALL ).groups() alt_list = [] for code, code3, codenum in alt_codes: name = name.replace('"', r"\"").strip() alt_list.append( ' "{code}": ("{code3}", {codenum}),'.format( code=code, code3=code3, codenum=codenum ) ) content += alt_bits[0] content += "\n".join(alt_list) content += alt_bits[2] # Generate file. with open(output_filename, "w") as output_file: output_file.write(content) return countries
python
def self_generate(output_filename, filename="iso3166-1.csv"): # pragma: no cover import csv import re countries = [] alt_codes = [] with open(filename, "r") as csv_file: for row in csv.reader(csv_file): name = row[0].rstrip("*") name = re.sub(r"\(the\)", "", name) if name: countries.append((name, row[1])) alt_codes.append((row[1], row[2], int(row[3]))) with open(__file__, "r") as source_file: contents = source_file.read() # Write countries. bits = re.match("(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)", contents, re.DOTALL).groups() country_list = [] for name, code in countries: name = name.replace('"', r"\"").strip() country_list.append(' "{code}": _("{name}"),'.format(name=name, code=code)) content = bits[0] content += "\n".join(country_list) # Write alt codes. alt_bits = re.match( "(.*\nALT_CODES = \{\n)(.*)(\n\}.*)", bits[2], re.DOTALL ).groups() alt_list = [] for code, code3, codenum in alt_codes: name = name.replace('"', r"\"").strip() alt_list.append( ' "{code}": ("{code3}", {codenum}),'.format( code=code, code3=code3, codenum=codenum ) ) content += alt_bits[0] content += "\n".join(alt_list) content += alt_bits[2] # Generate file. with open(output_filename, "w") as output_file: output_file.write(content) return countries
[ "def", "self_generate", "(", "output_filename", ",", "filename", "=", "\"iso3166-1.csv\"", ")", ":", "# pragma: no cover", "import", "csv", "import", "re", "countries", "=", "[", "]", "alt_codes", "=", "[", "]", "with", "open", "(", "filename", ",", "\"r\"", ...
The following code can be used for self-generation of this file. It requires a UTF-8 CSV file containing the short ISO name and two letter country code as the first two columns.
[ "The", "following", "code", "can", "be", "used", "for", "self", "-", "generation", "of", "this", "file", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/data.py#L538-L585
227,504
SmileyChris/django-countries
django_countries/fields.py
LazyChoicesMixin._set_choices
def _set_choices(self, value): """ Also update the widget's choices. """ super(LazyChoicesMixin, self)._set_choices(value) self.widget.choices = value
python
def _set_choices(self, value): super(LazyChoicesMixin, self)._set_choices(value) self.widget.choices = value
[ "def", "_set_choices", "(", "self", ",", "value", ")", ":", "super", "(", "LazyChoicesMixin", ",", "self", ")", ".", "_set_choices", "(", "value", ")", "self", ".", "widget", ".", "choices", "=", "value" ]
Also update the widget's choices.
[ "Also", "update", "the", "widget", "s", "choices", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/fields.py#L235-L240
227,505
SmileyChris/django-countries
django_countries/fields.py
CountryField.deconstruct
def deconstruct(self): """ Remove choices from deconstructed field, as this is the country list and not user editable. Not including the ``blank_label`` property, as this isn't database related. """ name, path, args, kwargs = super(CountryField, self).deconstruct() kwargs.pop("choices") if self.multiple: # multiple determines the length of the field kwargs["multiple"] = self.multiple if self.countries is not countries: # Include the countries class if it's not the default countries # instance. kwargs["countries"] = self.countries.__class__ return name, path, args, kwargs
python
def deconstruct(self): name, path, args, kwargs = super(CountryField, self).deconstruct() kwargs.pop("choices") if self.multiple: # multiple determines the length of the field kwargs["multiple"] = self.multiple if self.countries is not countries: # Include the countries class if it's not the default countries # instance. kwargs["countries"] = self.countries.__class__ return name, path, args, kwargs
[ "def", "deconstruct", "(", "self", ")", ":", "name", ",", "path", ",", "args", ",", "kwargs", "=", "super", "(", "CountryField", ",", "self", ")", ".", "deconstruct", "(", ")", "kwargs", ".", "pop", "(", "\"choices\"", ")", "if", "self", ".", "multip...
Remove choices from deconstructed field, as this is the country list and not user editable. Not including the ``blank_label`` property, as this isn't database related.
[ "Remove", "choices", "from", "deconstructed", "field", "as", "this", "is", "the", "country", "list", "and", "not", "user", "editable", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/fields.py#L337-L353
227,506
SmileyChris/django-countries
django_countries/fields.py
CountryField.validate
def validate(self, value, model_instance): """ Use custom validation for when using a multiple countries field. """ if not self.multiple: return super(CountryField, self).validate(value, model_instance) if not self.editable: # Skip validation for non-editable fields. return if value: choices = [option_key for option_key, option_value in self.choices] for single_value in value: if single_value not in choices: raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": single_value}, ) if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank")
python
def validate(self, value, model_instance): if not self.multiple: return super(CountryField, self).validate(value, model_instance) if not self.editable: # Skip validation for non-editable fields. return if value: choices = [option_key for option_key, option_value in self.choices] for single_value in value: if single_value not in choices: raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": single_value}, ) if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank")
[ "def", "validate", "(", "self", ",", "value", ",", "model_instance", ")", ":", "if", "not", "self", ".", "multiple", ":", "return", "super", "(", "CountryField", ",", "self", ")", ".", "validate", "(", "value", ",", "model_instance", ")", "if", "not", ...
Use custom validation for when using a multiple countries field.
[ "Use", "custom", "validation", "for", "when", "using", "a", "multiple", "countries", "field", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/fields.py#L391-L413
227,507
SmileyChris/django-countries
django_countries/fields.py
CountryField.value_to_string
def value_to_string(self, obj): """ Ensure data is serialized correctly. """ value = self.value_from_object(obj) return self.get_prep_value(value)
python
def value_to_string(self, obj): value = self.value_from_object(obj) return self.get_prep_value(value)
[ "def", "value_to_string", "(", "self", ",", "obj", ")", ":", "value", "=", "self", ".", "value_from_object", "(", "obj", ")", "return", "self", ".", "get_prep_value", "(", "value", ")" ]
Ensure data is serialized correctly.
[ "Ensure", "data", "is", "serialized", "correctly", "." ]
68b0934e8180d47bc15eff2887b6887aaa6e0228
https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/fields.py#L415-L420
227,508
thampiman/reverse-geocoder
reverse_geocoder/cKDTree_MP.py
_pquery
def _pquery(scheduler, data, ndata, ndim, leafsize, x, nx, d, i, k, eps, p, dub, ierr): """ Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler """ try: _data = shmem_as_nparray(data).reshape((ndata, ndim)) _x = shmem_as_nparray(x).reshape((nx, ndim)) _d = shmem_as_nparray(d).reshape((nx, k)) _i = shmem_as_nparray(i).reshape((nx, k)) kdtree = cKDTree(_data, leafsize=leafsize) for s in scheduler: d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub) m_d = d_out.shape[0] m_i = i_out.shape[0] _d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1) except: ierr.value += 1
python
def _pquery(scheduler, data, ndata, ndim, leafsize, x, nx, d, i, k, eps, p, dub, ierr): try: _data = shmem_as_nparray(data).reshape((ndata, ndim)) _x = shmem_as_nparray(x).reshape((nx, ndim)) _d = shmem_as_nparray(d).reshape((nx, k)) _i = shmem_as_nparray(i).reshape((nx, k)) kdtree = cKDTree(_data, leafsize=leafsize) for s in scheduler: d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub) m_d = d_out.shape[0] m_i = i_out.shape[0] _d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1) except: ierr.value += 1
[ "def", "_pquery", "(", "scheduler", ",", "data", ",", "ndata", ",", "ndim", ",", "leafsize", ",", "x", ",", "nx", ",", "d", ",", "i", ",", "k", ",", "eps", ",", "p", ",", "dub", ",", "ierr", ")", ":", "try", ":", "_data", "=", "shmem_as_nparray...
Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler
[ "Function", "that", "parallelly", "queries", "the", "K", "-", "D", "tree", "based", "on", "chunks", "of", "data", "returned", "by", "the", "scheduler" ]
a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0
https://github.com/thampiman/reverse-geocoder/blob/a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0/reverse_geocoder/cKDTree_MP.py#L17-L36
227,509
thampiman/reverse-geocoder
reverse_geocoder/cKDTree_MP.py
cKDTree_MP.pquery
def pquery(self, x_list, k=1, eps=0, p=2, distance_upper_bound=np.inf): """ Function to parallelly query the K-D Tree """ x = np.array(x_list) nx, mx = x.shape shmem_x = mp.Array(ctypes.c_double, nx*mx) shmem_d = mp.Array(ctypes.c_double, nx*k) shmem_i = mp.Array(ctypes.c_double, nx*k) _x = shmem_as_nparray(shmem_x).reshape((nx, mx)) _d = shmem_as_nparray(shmem_d).reshape((nx, k)) _i = shmem_as_nparray(shmem_i) if k != 1: _i = _i.reshape((nx, k)) _x[:, :] = x nprocs = num_cpus() scheduler = Scheduler(nx, nprocs) ierr = mp.Value(ctypes.c_int, 0) query_args = (scheduler, self.shmem_data, self.n, self.m, self.leafsize, shmem_x, nx, shmem_d, shmem_i, k, eps, p, distance_upper_bound, ierr) pool = [mp.Process(target=_pquery, args=query_args) for _ in range(nprocs)] for p in pool: p.start() for p in pool: p.join() if ierr.value != 0: raise RuntimeError('%d errors in worker processes' % (ierr.value)) return _d.copy(), _i.astype(int).copy()
python
def pquery(self, x_list, k=1, eps=0, p=2, distance_upper_bound=np.inf): x = np.array(x_list) nx, mx = x.shape shmem_x = mp.Array(ctypes.c_double, nx*mx) shmem_d = mp.Array(ctypes.c_double, nx*k) shmem_i = mp.Array(ctypes.c_double, nx*k) _x = shmem_as_nparray(shmem_x).reshape((nx, mx)) _d = shmem_as_nparray(shmem_d).reshape((nx, k)) _i = shmem_as_nparray(shmem_i) if k != 1: _i = _i.reshape((nx, k)) _x[:, :] = x nprocs = num_cpus() scheduler = Scheduler(nx, nprocs) ierr = mp.Value(ctypes.c_int, 0) query_args = (scheduler, self.shmem_data, self.n, self.m, self.leafsize, shmem_x, nx, shmem_d, shmem_i, k, eps, p, distance_upper_bound, ierr) pool = [mp.Process(target=_pquery, args=query_args) for _ in range(nprocs)] for p in pool: p.start() for p in pool: p.join() if ierr.value != 0: raise RuntimeError('%d errors in worker processes' % (ierr.value)) return _d.copy(), _i.astype(int).copy()
[ "def", "pquery", "(", "self", ",", "x_list", ",", "k", "=", "1", ",", "eps", "=", "0", ",", "p", "=", "2", ",", "distance_upper_bound", "=", "np", ".", "inf", ")", ":", "x", "=", "np", ".", "array", "(", "x_list", ")", "nx", ",", "mx", "=", ...
Function to parallelly query the K-D Tree
[ "Function", "to", "parallelly", "query", "the", "K", "-", "D", "Tree" ]
a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0
https://github.com/thampiman/reverse-geocoder/blob/a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0/reverse_geocoder/cKDTree_MP.py#L66-L102
227,510
thampiman/reverse-geocoder
reverse_geocoder/__init__.py
singleton
def singleton(cls): """ Function to get single instance of the RGeocoder class """ instances = {} def getinstance(**kwargs): """ Creates a new RGeocoder instance if not created already """ if cls not in instances: instances[cls] = cls(**kwargs) return instances[cls] return getinstance
python
def singleton(cls): instances = {} def getinstance(**kwargs): """ Creates a new RGeocoder instance if not created already """ if cls not in instances: instances[cls] = cls(**kwargs) return instances[cls] return getinstance
[ "def", "singleton", "(", "cls", ")", ":", "instances", "=", "{", "}", "def", "getinstance", "(", "*", "*", "kwargs", ")", ":", "\"\"\"\n Creates a new RGeocoder instance if not created already\n \"\"\"", "if", "cls", "not", "in", "instances", ":", "ins...
Function to get single instance of the RGeocoder class
[ "Function", "to", "get", "single", "instance", "of", "the", "RGeocoder", "class" ]
a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0
https://github.com/thampiman/reverse-geocoder/blob/a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0/reverse_geocoder/__init__.py#L78-L90
227,511
thampiman/reverse-geocoder
reverse_geocoder/__init__.py
rel_path
def rel_path(filename): """ Function that gets relative path to the filename """ return os.path.join(os.getcwd(), os.path.dirname(__file__), filename)
python
def rel_path(filename): return os.path.join(os.getcwd(), os.path.dirname(__file__), filename)
[ "def", "rel_path", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "filename", ")" ]
Function that gets relative path to the filename
[ "Function", "that", "gets", "relative", "path", "to", "the", "filename" ]
a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0
https://github.com/thampiman/reverse-geocoder/blob/a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0/reverse_geocoder/__init__.py#L267-L271
227,512
thampiman/reverse-geocoder
reverse_geocoder/__init__.py
get
def get(geo_coord, mode=2, verbose=True): """ Function to query for a single coordinate """ if not isinstance(geo_coord, tuple) or not isinstance(geo_coord[0], float): raise TypeError('Expecting a tuple') _rg = RGeocoder(mode=mode, verbose=verbose) return _rg.query([geo_coord])[0]
python
def get(geo_coord, mode=2, verbose=True): if not isinstance(geo_coord, tuple) or not isinstance(geo_coord[0], float): raise TypeError('Expecting a tuple') _rg = RGeocoder(mode=mode, verbose=verbose) return _rg.query([geo_coord])[0]
[ "def", "get", "(", "geo_coord", ",", "mode", "=", "2", ",", "verbose", "=", "True", ")", ":", "if", "not", "isinstance", "(", "geo_coord", ",", "tuple", ")", "or", "not", "isinstance", "(", "geo_coord", "[", "0", "]", ",", "float", ")", ":", "raise...
Function to query for a single coordinate
[ "Function", "to", "query", "for", "a", "single", "coordinate" ]
a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0
https://github.com/thampiman/reverse-geocoder/blob/a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0/reverse_geocoder/__init__.py#L273-L281
227,513
thampiman/reverse-geocoder
reverse_geocoder/__init__.py
search
def search(geo_coords, mode=2, verbose=True): """ Function to query for a list of coordinates """ if not isinstance(geo_coords, tuple) and not isinstance(geo_coords, list): raise TypeError('Expecting a tuple or a tuple/list of tuples') elif not isinstance(geo_coords[0], tuple): geo_coords = [geo_coords] _rg = RGeocoder(mode=mode, verbose=verbose) return _rg.query(geo_coords)
python
def search(geo_coords, mode=2, verbose=True): if not isinstance(geo_coords, tuple) and not isinstance(geo_coords, list): raise TypeError('Expecting a tuple or a tuple/list of tuples') elif not isinstance(geo_coords[0], tuple): geo_coords = [geo_coords] _rg = RGeocoder(mode=mode, verbose=verbose) return _rg.query(geo_coords)
[ "def", "search", "(", "geo_coords", ",", "mode", "=", "2", ",", "verbose", "=", "True", ")", ":", "if", "not", "isinstance", "(", "geo_coords", ",", "tuple", ")", "and", "not", "isinstance", "(", "geo_coords", ",", "list", ")", ":", "raise", "TypeError...
Function to query for a list of coordinates
[ "Function", "to", "query", "for", "a", "list", "of", "coordinates" ]
a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0
https://github.com/thampiman/reverse-geocoder/blob/a81b4095bf2cb7ef84d2187fcbc8945d5d8922d0/reverse_geocoder/__init__.py#L283-L293
227,514
insanum/gcalcli
gcalcli/printer.py
Printer.art_msg
def art_msg(self, arttag, colorname, file=sys.stdout): """Wrapper for easy emission of the calendar borders""" self.msg(self.art[arttag], colorname, file=file)
python
def art_msg(self, arttag, colorname, file=sys.stdout): self.msg(self.art[arttag], colorname, file=file)
[ "def", "art_msg", "(", "self", ",", "arttag", ",", "colorname", ",", "file", "=", "sys", ".", "stdout", ")", ":", "self", ".", "msg", "(", "self", ".", "art", "[", "arttag", "]", ",", "colorname", ",", "file", "=", "file", ")" ]
Wrapper for easy emission of the calendar borders
[ "Wrapper", "for", "easy", "emission", "of", "the", "calendar", "borders" ]
428378a88f89d154c8d4046deb9bdb5eb4e81019
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/printer.py#L99-L101
227,515
insanum/gcalcli
gcalcli/gcal.py
GoogleCalendarInterface._cal_monday
def _cal_monday(self, day_num): """Shift the day number if we're doing cal monday, or cal_weekend is false, since that also means we're starting on day 1 """ if self.options['cal_monday'] or not self.options['cal_weekend']: day_num -= 1 if day_num < 0: day_num = 6 return day_num
python
def _cal_monday(self, day_num): if self.options['cal_monday'] or not self.options['cal_weekend']: day_num -= 1 if day_num < 0: day_num = 6 return day_num
[ "def", "_cal_monday", "(", "self", ",", "day_num", ")", ":", "if", "self", ".", "options", "[", "'cal_monday'", "]", "or", "not", "self", ".", "options", "[", "'cal_weekend'", "]", ":", "day_num", "-=", "1", "if", "day_num", "<", "0", ":", "day_num", ...
Shift the day number if we're doing cal monday, or cal_weekend is false, since that also means we're starting on day 1
[ "Shift", "the", "day", "number", "if", "we", "re", "doing", "cal", "monday", "or", "cal_weekend", "is", "false", "since", "that", "also", "means", "we", "re", "starting", "on", "day", "1" ]
428378a88f89d154c8d4046deb9bdb5eb4e81019
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/gcal.py#L259-L267
227,516
insanum/gcalcli
gcalcli/gcal.py
GoogleCalendarInterface.QuickAddEvent
def QuickAddEvent(self, event_text, reminders=None): """Wrapper around Google Calendar API's quickAdd""" if not event_text: raise GcalcliError('event_text is required for a quickAdd') if len(self.cals) != 1: # TODO: get a better name for this exception class # and use it elsewhere raise GcalcliError('You must only specify a single calendar\n') new_event = self._retry_with_backoff( self.get_cal_service() .events() .quickAdd( calendarId=self.cals[0]['id'], text=event_text ) ) if reminders or not self.options['default_reminders']: rem = {} rem['reminders'] = {'useDefault': False, 'overrides': []} for r in reminders: n, m = utils.parse_reminder(r) rem['reminders']['overrides'].append({'minutes': n, 'method': m}) new_event = self._retry_with_backoff( self.get_cal_service() .events() .patch( calendarId=self.cals[0]['id'], eventId=new_event['id'], body=rem ) ) if self.details.get('url'): hlink = new_event['htmlLink'] self.printer.msg('New event added: %s\n' % hlink, 'green') return new_event
python
def QuickAddEvent(self, event_text, reminders=None): if not event_text: raise GcalcliError('event_text is required for a quickAdd') if len(self.cals) != 1: # TODO: get a better name for this exception class # and use it elsewhere raise GcalcliError('You must only specify a single calendar\n') new_event = self._retry_with_backoff( self.get_cal_service() .events() .quickAdd( calendarId=self.cals[0]['id'], text=event_text ) ) if reminders or not self.options['default_reminders']: rem = {} rem['reminders'] = {'useDefault': False, 'overrides': []} for r in reminders: n, m = utils.parse_reminder(r) rem['reminders']['overrides'].append({'minutes': n, 'method': m}) new_event = self._retry_with_backoff( self.get_cal_service() .events() .patch( calendarId=self.cals[0]['id'], eventId=new_event['id'], body=rem ) ) if self.details.get('url'): hlink = new_event['htmlLink'] self.printer.msg('New event added: %s\n' % hlink, 'green') return new_event
[ "def", "QuickAddEvent", "(", "self", ",", "event_text", ",", "reminders", "=", "None", ")", ":", "if", "not", "event_text", ":", "raise", "GcalcliError", "(", "'event_text is required for a quickAdd'", ")", "if", "len", "(", "self", ".", "cals", ")", "!=", "...
Wrapper around Google Calendar API's quickAdd
[ "Wrapper", "around", "Google", "Calendar", "API", "s", "quickAdd" ]
428378a88f89d154c8d4046deb9bdb5eb4e81019
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/gcal.py#L1230-L1272
227,517
insanum/gcalcli
gcalcli/gcal.py
GoogleCalendarInterface.Remind
def Remind(self, minutes, command, use_reminders=False): """ Check for events between now and now+minutes. If use_reminders then only remind if now >= event['start'] - reminder """ # perform a date query for now + minutes + slip start = self.now end = (start + timedelta(minutes=(minutes + 5))) event_list = self._search_for_events(start, end, None) message = '' for event in event_list: # skip this event if it already started # XXX maybe add a 2+ minute grace period here... if event['s'] < self.now: continue # not sure if 'reminders' always in event if use_reminders and 'reminders' in event \ and 'overrides' in event['reminders']: if all(event['s'] - timedelta(minutes=r['minutes']) > self.now for r in event['reminders']['overrides']): # don't remind if all reminders haven't arrived yet continue if self.options.get('military'): tmp_time_str = event['s'].strftime('%H:%M') else: tmp_time_str = \ event['s'].strftime('%I:%M').lstrip('0') + \ event['s'].strftime('%p').lower() message += '%s %s\n' % \ (tmp_time_str, self._valid_title(event).strip()) if not message: return cmd = shlex.split(command) for i, a in zip(range(len(cmd)), cmd): if a == '%s': cmd[i] = message pid = os.fork() if not pid: os.execvp(cmd[0], cmd)
python
def Remind(self, minutes, command, use_reminders=False): # perform a date query for now + minutes + slip start = self.now end = (start + timedelta(minutes=(minutes + 5))) event_list = self._search_for_events(start, end, None) message = '' for event in event_list: # skip this event if it already started # XXX maybe add a 2+ minute grace period here... if event['s'] < self.now: continue # not sure if 'reminders' always in event if use_reminders and 'reminders' in event \ and 'overrides' in event['reminders']: if all(event['s'] - timedelta(minutes=r['minutes']) > self.now for r in event['reminders']['overrides']): # don't remind if all reminders haven't arrived yet continue if self.options.get('military'): tmp_time_str = event['s'].strftime('%H:%M') else: tmp_time_str = \ event['s'].strftime('%I:%M').lstrip('0') + \ event['s'].strftime('%p').lower() message += '%s %s\n' % \ (tmp_time_str, self._valid_title(event).strip()) if not message: return cmd = shlex.split(command) for i, a in zip(range(len(cmd)), cmd): if a == '%s': cmd[i] = message pid = os.fork() if not pid: os.execvp(cmd[0], cmd)
[ "def", "Remind", "(", "self", ",", "minutes", ",", "command", ",", "use_reminders", "=", "False", ")", ":", "# perform a date query for now + minutes + slip", "start", "=", "self", ".", "now", "end", "=", "(", "start", "+", "timedelta", "(", "minutes", "=", ...
Check for events between now and now+minutes. If use_reminders then only remind if now >= event['start'] - reminder
[ "Check", "for", "events", "between", "now", "and", "now", "+", "minutes", "." ]
428378a88f89d154c8d4046deb9bdb5eb4e81019
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/gcal.py#L1325-L1376
227,518
insanum/gcalcli
gcalcli/validators.py
color_validator
def color_validator(input_str): """ A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise. """ try: assert input_str in VALID_OVERRIDE_COLORS + [''] return input_str except AssertionError: raise ValidationError( 'Expected colors are: ' + ', '.join(color for color in VALID_OVERRIDE_COLORS) + '. (Ctrl-C to exit)\n')
python
def color_validator(input_str): try: assert input_str in VALID_OVERRIDE_COLORS + [''] return input_str except AssertionError: raise ValidationError( 'Expected colors are: ' + ', '.join(color for color in VALID_OVERRIDE_COLORS) + '. (Ctrl-C to exit)\n')
[ "def", "color_validator", "(", "input_str", ")", ":", "try", ":", "assert", "input_str", "in", "VALID_OVERRIDE_COLORS", "+", "[", "''", "]", "return", "input_str", "except", "AssertionError", ":", "raise", "ValidationError", "(", "'Expected colors are: '", "+", "'...
A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise.
[ "A", "filter", "allowing", "only", "the", "particular", "colors", "used", "by", "the", "Google", "Calendar", "API" ]
428378a88f89d154c8d4046deb9bdb5eb4e81019
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/validators.py#L31-L45
227,519
insanum/gcalcli
gcalcli/validators.py
reminder_validator
def reminder_validator(input_str): """ Allows a string that matches utils.REMINDER_REGEX. Raises ValidationError otherwise. """ match = re.match(REMINDER_REGEX, input_str) if match or input_str == '.': return input_str else: raise ValidationError('Expected format: <number><w|d|h|m> ' '<popup|email|sms>. (Ctrl-C to exit)\n')
python
def reminder_validator(input_str): match = re.match(REMINDER_REGEX, input_str) if match or input_str == '.': return input_str else: raise ValidationError('Expected format: <number><w|d|h|m> ' '<popup|email|sms>. (Ctrl-C to exit)\n')
[ "def", "reminder_validator", "(", "input_str", ")", ":", "match", "=", "re", ".", "match", "(", "REMINDER_REGEX", ",", "input_str", ")", "if", "match", "or", "input_str", "==", "'.'", ":", "return", "input_str", "else", ":", "raise", "ValidationError", "(", ...
Allows a string that matches utils.REMINDER_REGEX. Raises ValidationError otherwise.
[ "Allows", "a", "string", "that", "matches", "utils", ".", "REMINDER_REGEX", ".", "Raises", "ValidationError", "otherwise", "." ]
428378a88f89d154c8d4046deb9bdb5eb4e81019
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/validators.py#L101-L111
227,520
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.registration_id_chunks
def registration_id_chunks(self, registration_ids): """ Splits registration ids in several lists of max 1000 registration ids per list Args: registration_ids (list): FCM device registration ID Yields: generator: list including lists with registration ids """ try: xrange except NameError: xrange = range # Yield successive 1000-sized (max fcm recipients per request) chunks from registration_ids for i in xrange(0, len(registration_ids), self.FCM_MAX_RECIPIENTS): yield registration_ids[i:i + self.FCM_MAX_RECIPIENTS]
python
def registration_id_chunks(self, registration_ids): try: xrange except NameError: xrange = range # Yield successive 1000-sized (max fcm recipients per request) chunks from registration_ids for i in xrange(0, len(registration_ids), self.FCM_MAX_RECIPIENTS): yield registration_ids[i:i + self.FCM_MAX_RECIPIENTS]
[ "def", "registration_id_chunks", "(", "self", ",", "registration_ids", ")", ":", "try", ":", "xrange", "except", "NameError", ":", "xrange", "=", "range", "# Yield successive 1000-sized (max fcm recipients per request) chunks from registration_ids", "for", "i", "in", "xrang...
Splits registration ids in several lists of max 1000 registration ids per list Args: registration_ids (list): FCM device registration ID Yields: generator: list including lists with registration ids
[ "Splits", "registration", "ids", "in", "several", "lists", "of", "max", "1000", "registration", "ids", "per", "list" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L86-L103
227,521
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.json_dumps
def json_dumps(self, data): """ Standardized json.dumps function with separators and sorted keys set Args: data (dict or list): data to be dumped Returns: string: json """ return json.dumps( data, separators=(',', ':'), sort_keys=True, cls=self.json_encoder, ensure_ascii=False ).encode('utf8')
python
def json_dumps(self, data): return json.dumps( data, separators=(',', ':'), sort_keys=True, cls=self.json_encoder, ensure_ascii=False ).encode('utf8')
[ "def", "json_dumps", "(", "self", ",", "data", ")", ":", "return", "json", ".", "dumps", "(", "data", ",", "separators", "=", "(", "','", ",", "':'", ")", ",", "sort_keys", "=", "True", ",", "cls", "=", "self", ".", "json_encoder", ",", "ensure_ascii...
Standardized json.dumps function with separators and sorted keys set Args: data (dict or list): data to be dumped Returns: string: json
[ "Standardized", "json", ".", "dumps", "function", "with", "separators", "and", "sorted", "keys", "set" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L105-L121
227,522
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.registration_info_request
def registration_info_request(self, registration_id): """ Makes a request for registration info and returns the response object Args: registration_id: id to be checked Returns: response of registration info request """ return self.requests_session.get( self.INFO_END_POINT + registration_id, params={'details': 'true'} )
python
def registration_info_request(self, registration_id): return self.requests_session.get( self.INFO_END_POINT + registration_id, params={'details': 'true'} )
[ "def", "registration_info_request", "(", "self", ",", "registration_id", ")", ":", "return", "self", ".", "requests_session", ".", "get", "(", "self", ".", "INFO_END_POINT", "+", "registration_id", ",", "params", "=", "{", "'details'", ":", "'true'", "}", ")" ...
Makes a request for registration info and returns the response object Args: registration_id: id to be checked Returns: response of registration info request
[ "Makes", "a", "request", "for", "registration", "info", "and", "returns", "the", "response", "object" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L315-L328
227,523
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.clean_registration_ids
def clean_registration_ids(self, registration_ids=[]): """ Checks registration ids and excludes inactive ids Args: registration_ids (list, optional): list of ids to be cleaned Returns: list: cleaned registration ids """ valid_registration_ids = [] for registration_id in registration_ids: details = self.registration_info_request(registration_id) if details.status_code == 200: valid_registration_ids.append(registration_id) return valid_registration_ids
python
def clean_registration_ids(self, registration_ids=[]): valid_registration_ids = [] for registration_id in registration_ids: details = self.registration_info_request(registration_id) if details.status_code == 200: valid_registration_ids.append(registration_id) return valid_registration_ids
[ "def", "clean_registration_ids", "(", "self", ",", "registration_ids", "=", "[", "]", ")", ":", "valid_registration_ids", "=", "[", "]", "for", "registration_id", "in", "registration_ids", ":", "details", "=", "self", ".", "registration_info_request", "(", "regist...
Checks registration ids and excludes inactive ids Args: registration_ids (list, optional): list of ids to be cleaned Returns: list: cleaned registration ids
[ "Checks", "registration", "ids", "and", "excludes", "inactive", "ids" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L330-L345
227,524
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.get_registration_id_info
def get_registration_id_info(self, registration_id): """ Returns details related to a registration id if it exists otherwise return None Args: registration_id: id to be checked Returns: dict: info about registration id None: if id doesn't exist """ response = self.registration_info_request(registration_id) if response.status_code == 200: return response.json() return None
python
def get_registration_id_info(self, registration_id): response = self.registration_info_request(registration_id) if response.status_code == 200: return response.json() return None
[ "def", "get_registration_id_info", "(", "self", ",", "registration_id", ")", ":", "response", "=", "self", ".", "registration_info_request", "(", "registration_id", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "response", ".", "json", "...
Returns details related to a registration id if it exists otherwise return None Args: registration_id: id to be checked Returns: dict: info about registration id None: if id doesn't exist
[ "Returns", "details", "related", "to", "a", "registration", "id", "if", "it", "exists", "otherwise", "return", "None" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L347-L361
227,525
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.subscribe_registration_ids_to_topic
def subscribe_registration_ids_to_topic(self, registration_ids, topic_name): """ Subscribes a list of registration ids to a topic Args: registration_ids (list): ids to be subscribed topic_name (str): name of topic Returns: True: if operation succeeded Raises: InvalidDataError: data sent to server was incorrectly formatted FCMError: an error occured on the server """ url = 'https://iid.googleapis.com/iid/v1:batchAdd' payload = { 'to': '/topics/' + topic_name, 'registration_tokens': registration_ids, } response = self.requests_session.post(url, json=payload) if response.status_code == 200: return True elif response.status_code == 400: error = response.json() raise InvalidDataError(error['error']) else: raise FCMError()
python
def subscribe_registration_ids_to_topic(self, registration_ids, topic_name): url = 'https://iid.googleapis.com/iid/v1:batchAdd' payload = { 'to': '/topics/' + topic_name, 'registration_tokens': registration_ids, } response = self.requests_session.post(url, json=payload) if response.status_code == 200: return True elif response.status_code == 400: error = response.json() raise InvalidDataError(error['error']) else: raise FCMError()
[ "def", "subscribe_registration_ids_to_topic", "(", "self", ",", "registration_ids", ",", "topic_name", ")", ":", "url", "=", "'https://iid.googleapis.com/iid/v1:batchAdd'", "payload", "=", "{", "'to'", ":", "'/topics/'", "+", "topic_name", ",", "'registration_tokens'", ...
Subscribes a list of registration ids to a topic Args: registration_ids (list): ids to be subscribed topic_name (str): name of topic Returns: True: if operation succeeded Raises: InvalidDataError: data sent to server was incorrectly formatted FCMError: an error occured on the server
[ "Subscribes", "a", "list", "of", "registration", "ids", "to", "a", "topic" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L363-L390
227,526
olucurious/PyFCM
pyfcm/baseapi.py
BaseAPI.parse_responses
def parse_responses(self): """ Parses the json response sent back by the server and tries to get out the important return variables Returns: dict: multicast_ids (list), success (int), failure (int), canonical_ids (int), results (list) and optional topic_message_id (str but None by default) Raises: FCMServerError: FCM is temporary not available AuthenticationError: error authenticating the sender account InvalidDataError: data passed to FCM was incorrecly structured """ response_dict = { 'multicast_ids': [], 'success': 0, 'failure': 0, 'canonical_ids': 0, 'results': [], 'topic_message_id': None } for response in self.send_request_responses: if response.status_code == 200: if 'content-length' in response.headers and int(response.headers['content-length']) <= 0: raise FCMServerError("FCM server connection error, the response is empty") else: parsed_response = response.json() multicast_id = parsed_response.get('multicast_id', None) success = parsed_response.get('success', 0) failure = parsed_response.get('failure', 0) canonical_ids = parsed_response.get('canonical_ids', 0) results = parsed_response.get('results', []) message_id = parsed_response.get('message_id', None) # for topic messages if message_id: success = 1 if multicast_id: response_dict['multicast_ids'].append(multicast_id) response_dict['success'] += success response_dict['failure'] += failure response_dict['canonical_ids'] += canonical_ids response_dict['results'].extend(results) response_dict['topic_message_id'] = message_id elif response.status_code == 401: raise AuthenticationError("There was an error authenticating the sender account") elif response.status_code == 400: raise InvalidDataError(response.text) else: raise FCMServerError("FCM server is temporarily unavailable") return response_dict
python
def parse_responses(self): response_dict = { 'multicast_ids': [], 'success': 0, 'failure': 0, 'canonical_ids': 0, 'results': [], 'topic_message_id': None } for response in self.send_request_responses: if response.status_code == 200: if 'content-length' in response.headers and int(response.headers['content-length']) <= 0: raise FCMServerError("FCM server connection error, the response is empty") else: parsed_response = response.json() multicast_id = parsed_response.get('multicast_id', None) success = parsed_response.get('success', 0) failure = parsed_response.get('failure', 0) canonical_ids = parsed_response.get('canonical_ids', 0) results = parsed_response.get('results', []) message_id = parsed_response.get('message_id', None) # for topic messages if message_id: success = 1 if multicast_id: response_dict['multicast_ids'].append(multicast_id) response_dict['success'] += success response_dict['failure'] += failure response_dict['canonical_ids'] += canonical_ids response_dict['results'].extend(results) response_dict['topic_message_id'] = message_id elif response.status_code == 401: raise AuthenticationError("There was an error authenticating the sender account") elif response.status_code == 400: raise InvalidDataError(response.text) else: raise FCMServerError("FCM server is temporarily unavailable") return response_dict
[ "def", "parse_responses", "(", "self", ")", ":", "response_dict", "=", "{", "'multicast_ids'", ":", "[", "]", ",", "'success'", ":", "0", ",", "'failure'", ":", "0", ",", "'canonical_ids'", ":", "0", ",", "'results'", ":", "[", "]", ",", "'topic_message_...
Parses the json response sent back by the server and tries to get out the important return variables Returns: dict: multicast_ids (list), success (int), failure (int), canonical_ids (int), results (list) and optional topic_message_id (str but None by default) Raises: FCMServerError: FCM is temporary not available AuthenticationError: error authenticating the sender account InvalidDataError: data passed to FCM was incorrecly structured
[ "Parses", "the", "json", "response", "sent", "back", "by", "the", "server", "and", "tries", "to", "get", "out", "the", "important", "return", "variables" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/baseapi.py#L421-L472
227,527
olucurious/PyFCM
pyfcm/fcm.py
FCMNotification.notify_single_device
def notify_single_device(self, registration_id=None, message_body=None, message_title=None, message_icon=None, sound=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, click_action=None, badge=None, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Send push notification to a single device Args: registration_id (list, optional): FCM device registration ID message_body (str, optional): Message string to display in the notification tray message_title (str, optional): Message title to display in the notification tray message_icon (str, optional): Icon that apperas next to the notification sound (str, optional): The sound file name to play. Specify "Default" for device default sound. condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs click_action (str, optional): Action associated with a user click on the notification badge (str, optional): Badge of notification color (str, optional): Color of the icon tag (str, optional): Group notification by tag body_loc_key (str, optional): Indicates the key to the body string for localization body_loc_args (list, optional): Indicates the string value to replace format specifiers in body string for localization title_loc_key (str, optional): Indicates the key to the title string for localization title_loc_args (list, optional): Indicates the string value to replace format specifiers in title string for localization content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue """ if registration_id is None: raise InvalidDataError('Invalid registration ID') # [registration_id] cos we're sending to a single device payload = self.parse_payload( registration_ids=[registration_id], message_body=message_body, message_title=message_title, message_icon=message_icon, sound=sound, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, click_action=click_action, badge=badge, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, android_channel_id=android_channel_id, content_available=content_available, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
python
def notify_single_device(self, registration_id=None, message_body=None, message_title=None, message_icon=None, sound=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, click_action=None, badge=None, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): if registration_id is None: raise InvalidDataError('Invalid registration ID') # [registration_id] cos we're sending to a single device payload = self.parse_payload( registration_ids=[registration_id], message_body=message_body, message_title=message_title, message_icon=message_icon, sound=sound, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, click_action=click_action, badge=badge, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, android_channel_id=android_channel_id, content_available=content_available, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
[ "def", "notify_single_device", "(", "self", ",", "registration_id", "=", "None", ",", "message_body", "=", "None", ",", "message_title", "=", "None", ",", "message_icon", "=", "None", ",", "sound", "=", "None", ",", "condition", "=", "None", ",", "collapse_k...
Send push notification to a single device Args: registration_id (list, optional): FCM device registration ID message_body (str, optional): Message string to display in the notification tray message_title (str, optional): Message title to display in the notification tray message_icon (str, optional): Icon that apperas next to the notification sound (str, optional): The sound file name to play. Specify "Default" for device default sound. condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs click_action (str, optional): Action associated with a user click on the notification badge (str, optional): Badge of notification color (str, optional): Color of the icon tag (str, optional): Group notification by tag body_loc_key (str, optional): Indicates the key to the body string for localization body_loc_args (list, optional): Indicates the string value to replace format specifiers in body string for localization title_loc_key (str, optional): Indicates the key to the title string for localization title_loc_args (list, optional): Indicates the string value to replace format specifiers in title string for localization content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue
[ "Send", "push", "notification", "to", "a", "single", "device" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/fcm.py#L6-L117
227,528
olucurious/PyFCM
pyfcm/fcm.py
FCMNotification.single_device_data_message
def single_device_data_message(self, registration_id=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Send push message to a single device Args: registration_id (list, optional): FCM device registration ID condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue """ if registration_id is None: raise InvalidDataError('Invalid registration ID') # [registration_id] cos we're sending to a single device payload = self.parse_payload( registration_ids=[registration_id], condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
python
def single_device_data_message(self, registration_id=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): if registration_id is None: raise InvalidDataError('Invalid registration ID') # [registration_id] cos we're sending to a single device payload = self.parse_payload( registration_ids=[registration_id], condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
[ "def", "single_device_data_message", "(", "self", ",", "registration_id", "=", "None", ",", "condition", "=", "None", ",", "collapse_key", "=", "None", ",", "delay_while_idle", "=", "False", ",", "time_to_live", "=", "None", ",", "restricted_package_name", "=", ...
Send push message to a single device Args: registration_id (list, optional): FCM device registration ID condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue
[ "Send", "push", "message", "to", "a", "single", "device" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/fcm.py#L119-L195
227,529
olucurious/PyFCM
pyfcm/fcm.py
FCMNotification.notify_multiple_devices
def notify_multiple_devices(self, registration_ids=None, message_body=None, message_title=None, message_icon=None, sound=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, click_action=None, badge=None, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Sends push notification to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs message_body (str, optional): Message string to display in the notification tray message_title (str, optional): Message title to display in the notification tray message_icon (str, optional): Icon that apperas next to the notification sound (str, optional): The sound file name to play. Specify "Default" for device default sound. condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs click_action (str, optional): Action associated with a user click on the notification badge (str, optional): Badge of notification color (str, optional): Color of the icon tag (str, optional): Group notification by tag body_loc_key (str, optional): Indicates the key to the body string for localization body_loc_args (list, optional): Indicates the string value to replace format specifiers in body string for localization title_loc_key (str, optional): Indicates the key to the title string for localization title_loc_args (list, optional): Indicates the string value to replace format specifiers in title string for localization content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it. """ if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, message_body=message_body, message_title=message_title, message_icon=message_icon, sound=sound, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, click_action=click_action, badge=badge, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, content_available=content_available, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs )) self.send_request(payloads, timeout) return self.parse_responses()
python
def notify_multiple_devices(self, registration_ids=None, message_body=None, message_title=None, message_icon=None, sound=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, click_action=None, badge=None, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, message_body=message_body, message_title=message_title, message_icon=message_icon, sound=sound, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, click_action=click_action, badge=badge, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, content_available=content_available, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs )) self.send_request(payloads, timeout) return self.parse_responses()
[ "def", "notify_multiple_devices", "(", "self", ",", "registration_ids", "=", "None", ",", "message_body", "=", "None", ",", "message_title", "=", "None", ",", "message_icon", "=", "None", ",", "sound", "=", "None", ",", "condition", "=", "None", ",", "collap...
Sends push notification to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs message_body (str, optional): Message string to display in the notification tray message_title (str, optional): Message title to display in the notification tray message_icon (str, optional): Icon that apperas next to the notification sound (str, optional): The sound file name to play. Specify "Default" for device default sound. condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs click_action (str, optional): Action associated with a user click on the notification badge (str, optional): Badge of notification color (str, optional): Color of the icon tag (str, optional): Group notification by tag body_loc_key (str, optional): Indicates the key to the body string for localization body_loc_args (list, optional): Indicates the string value to replace format specifiers in body string for localization title_loc_key (str, optional): Indicates the key to the title string for localization title_loc_args (list, optional): Indicates the string value to replace format specifiers in title string for localization content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it.
[ "Sends", "push", "notification", "to", "multiple", "devices", "can", "send", "to", "over", "1000", "devices" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/fcm.py#L197-L313
227,530
olucurious/PyFCM
pyfcm/fcm.py
FCMNotification.multiple_devices_data_message
def multiple_devices_data_message(self, registration_ids=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Sends push message to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it. """ if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs) ) self.send_request(payloads, timeout) return self.parse_responses()
python
def multiple_devices_data_message(self, registration_ids=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs) ) self.send_request(payloads, timeout) return self.parse_responses()
[ "def", "multiple_devices_data_message", "(", "self", ",", "registration_ids", "=", "None", ",", "condition", "=", "None", ",", "collapse_key", "=", "None", ",", "delay_while_idle", "=", "False", ",", "time_to_live", "=", "None", ",", "restricted_package_name", "="...
Sends push message to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it.
[ "Sends", "push", "message", "to", "multiple", "devices", "can", "send", "to", "over", "1000", "devices" ]
28096cd5f6ef515bb6034e63327723d12304249a
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/fcm.py#L315-L387
227,531
nccgroup/Scout2
AWSScout2/utils.py
get_keys
def get_keys(src, dst, keys): """ Copies the value of keys from source object to dest object :param src: :param dst: :param keys: :return: """ for key in keys: #dst[no_camel(key)] = src[key] if key in src else None dst[key] = src[key] if key in src else None
python
def get_keys(src, dst, keys): for key in keys: #dst[no_camel(key)] = src[key] if key in src else None dst[key] = src[key] if key in src else None
[ "def", "get_keys", "(", "src", ",", "dst", ",", "keys", ")", ":", "for", "key", "in", "keys", ":", "#dst[no_camel(key)] = src[key] if key in src else None", "dst", "[", "key", "]", "=", "src", "[", "key", "]", "if", "key", "in", "src", "else", "None" ]
Copies the value of keys from source object to dest object :param src: :param dst: :param keys: :return:
[ "Copies", "the", "value", "of", "keys", "from", "source", "object", "to", "dest", "object" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/utils.py#L40-L51
227,532
nccgroup/Scout2
AWSScout2/utils.py
is_throttled
def is_throttled(e): """ Determines whether the exception is due to API throttling. :param e: Exception raised :return: True if it's a throttling exception else False """ return True if (hasattr(e, 'response') and e.response is not None and 'Error' in e.response and e.response['Error']['Code'] in ['Throttling', 'RequestLimitExceeded', 'ThrottlingException']) else \ False
python
def is_throttled(e): return True if (hasattr(e, 'response') and e.response is not None and 'Error' in e.response and e.response['Error']['Code'] in ['Throttling', 'RequestLimitExceeded', 'ThrottlingException']) else \ False
[ "def", "is_throttled", "(", "e", ")", ":", "return", "True", "if", "(", "hasattr", "(", "e", ",", "'response'", ")", "and", "e", ".", "response", "is", "not", "None", "and", "'Error'", "in", "e", ".", "response", "and", "e", ".", "response", "[", "...
Determines whether the exception is due to API throttling. :param e: Exception raised :return: True if it's a throttling exception else False
[ "Determines", "whether", "the", "exception", "is", "due", "to", "API", "throttling", "." ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/utils.py#L65-L76
227,533
nccgroup/Scout2
AWSScout2/services/ec2.py
EC2RegionConfig.parse_instance
def parse_instance(self, global_params, region, reservation): """ Parse a single EC2 instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Cluster """ for i in reservation['Instances']: instance = {} vpc_id = i['VpcId'] if 'VpcId' in i and i['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) instance['reservation_id'] = reservation['ReservationId'] instance['id'] = i['InstanceId'] get_name(i, instance, 'InstanceId') get_keys(i, instance, ['KeyName', 'LaunchTime', 'InstanceType', 'State', 'IamInstanceProfile', 'SubnetId']) # Network interfaces & security groups manage_dictionary(instance, 'network_interfaces', {}) for eni in i['NetworkInterfaces']: nic = {} get_keys(eni, nic, ['Association', 'Groups', 'PrivateIpAddresses', 'SubnetId', 'Ipv6Addresses']) instance['network_interfaces'][eni['NetworkInterfaceId']] = nic self.vpcs[vpc_id].instances[i['InstanceId']] = instance
python
def parse_instance(self, global_params, region, reservation): for i in reservation['Instances']: instance = {} vpc_id = i['VpcId'] if 'VpcId' in i and i['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) instance['reservation_id'] = reservation['ReservationId'] instance['id'] = i['InstanceId'] get_name(i, instance, 'InstanceId') get_keys(i, instance, ['KeyName', 'LaunchTime', 'InstanceType', 'State', 'IamInstanceProfile', 'SubnetId']) # Network interfaces & security groups manage_dictionary(instance, 'network_interfaces', {}) for eni in i['NetworkInterfaces']: nic = {} get_keys(eni, nic, ['Association', 'Groups', 'PrivateIpAddresses', 'SubnetId', 'Ipv6Addresses']) instance['network_interfaces'][eni['NetworkInterfaceId']] = nic self.vpcs[vpc_id].instances[i['InstanceId']] = instance
[ "def", "parse_instance", "(", "self", ",", "global_params", ",", "region", ",", "reservation", ")", ":", "for", "i", "in", "reservation", "[", "'Instances'", "]", ":", "instance", "=", "{", "}", "vpc_id", "=", "i", "[", "'VpcId'", "]", "if", "'VpcId'", ...
Parse a single EC2 instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Cluster
[ "Parse", "a", "single", "EC2", "instance" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/ec2.py#L47-L69
227,534
nccgroup/Scout2
AWSScout2/services/elasticache.py
ElastiCacheRegionConfig.parse_cluster
def parse_cluster(self, global_params, region, cluster): """ Parse a single ElastiCache cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: ElastiCache cluster """ cluster_name = cluster.pop('CacheClusterId') cluster['name'] = cluster_name # Must fetch info about the subnet group to retrieve the VPC ID... if 'CacheSubnetGroupName' in cluster: subnet_group = api_clients[region].describe_cache_subnet_groups(CacheSubnetGroupName = cluster['CacheSubnetGroupName'])['CacheSubnetGroups'][0] vpc_id = subnet_group['VpcId'] else: vpc_id = ec2_classic subnet_group = None manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_name] = cluster if subnet_group: self.vpcs[vpc_id].subnet_groups[subnet_group['CacheSubnetGroupName']] = subnet_group
python
def parse_cluster(self, global_params, region, cluster): cluster_name = cluster.pop('CacheClusterId') cluster['name'] = cluster_name # Must fetch info about the subnet group to retrieve the VPC ID... if 'CacheSubnetGroupName' in cluster: subnet_group = api_clients[region].describe_cache_subnet_groups(CacheSubnetGroupName = cluster['CacheSubnetGroupName'])['CacheSubnetGroups'][0] vpc_id = subnet_group['VpcId'] else: vpc_id = ec2_classic subnet_group = None manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_name] = cluster if subnet_group: self.vpcs[vpc_id].subnet_groups[subnet_group['CacheSubnetGroupName']] = subnet_group
[ "def", "parse_cluster", "(", "self", ",", "global_params", ",", "region", ",", "cluster", ")", ":", "cluster_name", "=", "cluster", ".", "pop", "(", "'CacheClusterId'", ")", "cluster", "[", "'name'", "]", "=", "cluster_name", "# Must fetch info about the subnet gr...
Parse a single ElastiCache cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: ElastiCache cluster
[ "Parse", "a", "single", "ElastiCache", "cluster" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/elasticache.py#L20-L40
227,535
nccgroup/Scout2
AWSScout2/services/redshift.py
RedshiftRegionConfig.parse_cluster
def parse_cluster(self, global_params, region, cluster): """ Parse a single Redshift cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Cluster """ vpc_id = cluster.pop('VpcId') if 'VpcId' in cluster else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) name = cluster.pop('ClusterIdentifier') cluster['name'] = name self.vpcs[vpc_id].clusters[name] = cluster
python
def parse_cluster(self, global_params, region, cluster): vpc_id = cluster.pop('VpcId') if 'VpcId' in cluster else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) name = cluster.pop('ClusterIdentifier') cluster['name'] = name self.vpcs[vpc_id].clusters[name] = cluster
[ "def", "parse_cluster", "(", "self", ",", "global_params", ",", "region", ",", "cluster", ")", ":", "vpc_id", "=", "cluster", ".", "pop", "(", "'VpcId'", ")", "if", "'VpcId'", "in", "cluster", "else", "ec2_classic", "manage_dictionary", "(", "self", ".", "...
Parse a single Redshift cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Cluster
[ "Parse", "a", "single", "Redshift", "cluster" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/redshift.py#L23-L35
227,536
nccgroup/Scout2
AWSScout2/services/redshift.py
RedshiftRegionConfig.parse_parameter_group
def parse_parameter_group(self, global_params, region, parameter_group): """ Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group """ pg_name = parameter_group.pop('ParameterGroupName') pg_id = self.get_non_aws_id(pg_name) # Name could be used as only letters digits or hyphens parameter_group['name'] = pg_name parameter_group['parameters'] = {} api_client = api_clients[region] parameters = handle_truncated_response(api_client.describe_cluster_parameters, {'ParameterGroupName': pg_name}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] param['source'] = parameter['Source'] parameter_group['parameters'][parameter['ParameterName']] = param (self).parameter_groups[pg_id] = parameter_group
python
def parse_parameter_group(self, global_params, region, parameter_group): pg_name = parameter_group.pop('ParameterGroupName') pg_id = self.get_non_aws_id(pg_name) # Name could be used as only letters digits or hyphens parameter_group['name'] = pg_name parameter_group['parameters'] = {} api_client = api_clients[region] parameters = handle_truncated_response(api_client.describe_cluster_parameters, {'ParameterGroupName': pg_name}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] param['source'] = parameter['Source'] parameter_group['parameters'][parameter['ParameterName']] = param (self).parameter_groups[pg_id] = parameter_group
[ "def", "parse_parameter_group", "(", "self", ",", "global_params", ",", "region", ",", "parameter_group", ")", ":", "pg_name", "=", "parameter_group", ".", "pop", "(", "'ParameterGroupName'", ")", "pg_id", "=", "self", ".", "get_non_aws_id", "(", "pg_name", ")",...
Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group
[ "Parse", "a", "single", "Redshift", "parameter", "group", "and", "fetch", "all", "of", "its", "parameters" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/redshift.py#L38-L57
227,537
nccgroup/Scout2
AWSScout2/services/route53.py
Route53DomainsConfig.parse_domains
def parse_domains(self, domain, params): """ Parse a single Route53Domains domain """ domain_id = self.get_non_aws_id(domain['DomainName']) domain['name'] = domain.pop('DomainName') #TODO: Get Dnssec info when available #api_client = params['api_client'] #details = api_client.get_domain_detail(DomainName = domain['name']) #get_keys(details, domain, ['Dnssec']) self.domains[domain_id] = domain
python
def parse_domains(self, domain, params): domain_id = self.get_non_aws_id(domain['DomainName']) domain['name'] = domain.pop('DomainName') #TODO: Get Dnssec info when available #api_client = params['api_client'] #details = api_client.get_domain_detail(DomainName = domain['name']) #get_keys(details, domain, ['Dnssec']) self.domains[domain_id] = domain
[ "def", "parse_domains", "(", "self", ",", "domain", ",", "params", ")", ":", "domain_id", "=", "self", ".", "get_non_aws_id", "(", "domain", "[", "'DomainName'", "]", ")", "domain", "[", "'name'", "]", "=", "domain", ".", "pop", "(", "'DomainName'", ")",...
Parse a single Route53Domains domain
[ "Parse", "a", "single", "Route53Domains", "domain" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/route53.py#L30-L40
227,538
nccgroup/Scout2
AWSScout2/services/route53.py
Route53Config.parse_hosted_zones
def parse_hosted_zones(self, hosted_zone, params): """ Parse a single Route53hosted_zoness hosted_zones """ # When resuming upon throttling error, skip if already fetched hosted_zone_id = hosted_zone.pop('Id') hosted_zone['name'] = hosted_zone.pop('Name') api_client = params['api_client'] record_sets = handle_truncated_response(api_client.list_resource_record_sets, {'HostedZoneId': hosted_zone_id}, ['ResourceRecordSets']) hosted_zone.update(record_sets) #print(str(record_sets)) #record_sets = api_client.list_resource_record_sets() #hosted_zone['RecordSets'] = record_sets['Resourc'] self.hosted_zones[hosted_zone_id] = hosted_zone
python
def parse_hosted_zones(self, hosted_zone, params): # When resuming upon throttling error, skip if already fetched hosted_zone_id = hosted_zone.pop('Id') hosted_zone['name'] = hosted_zone.pop('Name') api_client = params['api_client'] record_sets = handle_truncated_response(api_client.list_resource_record_sets, {'HostedZoneId': hosted_zone_id}, ['ResourceRecordSets']) hosted_zone.update(record_sets) #print(str(record_sets)) #record_sets = api_client.list_resource_record_sets() #hosted_zone['RecordSets'] = record_sets['Resourc'] self.hosted_zones[hosted_zone_id] = hosted_zone
[ "def", "parse_hosted_zones", "(", "self", ",", "hosted_zone", ",", "params", ")", ":", "# When resuming upon throttling error, skip if already fetched", "hosted_zone_id", "=", "hosted_zone", ".", "pop", "(", "'Id'", ")", "hosted_zone", "[", "'name'", "]", "=", "hosted...
Parse a single Route53hosted_zoness hosted_zones
[ "Parse", "a", "single", "Route53hosted_zoness", "hosted_zones" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/route53.py#L63-L76
227,539
nccgroup/Scout2
AWSScout2/services/cloudformation.py
CloudFormationRegionConfig.parse_stack
def parse_stack(self, global_params, region, stack): """ Parse a single stack and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param stack_url: URL of the AWS stack """ stack['id'] = stack.pop('StackId') stack['name'] = stack.pop('StackName') stack_policy = api_clients[region].get_stack_policy(StackName = stack['name']) if 'StackPolicyBody' in stack_policy: stack['policy'] = json.loads(stack_policy['StackPolicyBody']) self.stacks[stack['name']] = stack
python
def parse_stack(self, global_params, region, stack): stack['id'] = stack.pop('StackId') stack['name'] = stack.pop('StackName') stack_policy = api_clients[region].get_stack_policy(StackName = stack['name']) if 'StackPolicyBody' in stack_policy: stack['policy'] = json.loads(stack_policy['StackPolicyBody']) self.stacks[stack['name']] = stack
[ "def", "parse_stack", "(", "self", ",", "global_params", ",", "region", ",", "stack", ")", ":", "stack", "[", "'id'", "]", "=", "stack", ".", "pop", "(", "'StackId'", ")", "stack", "[", "'name'", "]", "=", "stack", ".", "pop", "(", "'StackName'", ")"...
Parse a single stack and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param stack_url: URL of the AWS stack
[ "Parse", "a", "single", "stack", "and", "fetch", "additional", "attributes" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/cloudformation.py#L18-L31
227,540
nccgroup/Scout2
AWSScout2/services/sns.py
SNSRegionConfig.parse_subscription
def parse_subscription(self, params, region, subscription): """ Parse a single subscription and reference it in its corresponding topic :param params: Global parameters (defaults to {}) :param subscription: SNS Subscription """ topic_arn = subscription.pop('TopicArn') topic_name = topic_arn.split(':')[-1] if topic_name in self.topics: topic = self.topics[topic_name] manage_dictionary(topic['subscriptions'], 'protocol', {}) protocol = subscription.pop('Protocol') manage_dictionary(topic['subscriptions']['protocol'], protocol, []) topic['subscriptions']['protocol'][protocol].append(subscription) topic['subscriptions_count'] += 1
python
def parse_subscription(self, params, region, subscription): topic_arn = subscription.pop('TopicArn') topic_name = topic_arn.split(':')[-1] if topic_name in self.topics: topic = self.topics[topic_name] manage_dictionary(topic['subscriptions'], 'protocol', {}) protocol = subscription.pop('Protocol') manage_dictionary(topic['subscriptions']['protocol'], protocol, []) topic['subscriptions']['protocol'][protocol].append(subscription) topic['subscriptions_count'] += 1
[ "def", "parse_subscription", "(", "self", ",", "params", ",", "region", ",", "subscription", ")", ":", "topic_arn", "=", "subscription", ".", "pop", "(", "'TopicArn'", ")", "topic_name", "=", "topic_arn", ".", "split", "(", "':'", ")", "[", "-", "1", "]"...
Parse a single subscription and reference it in its corresponding topic :param params: Global parameters (defaults to {}) :param subscription: SNS Subscription
[ "Parse", "a", "single", "subscription", "and", "reference", "it", "in", "its", "corresponding", "topic" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/sns.py#L22-L37
227,541
nccgroup/Scout2
AWSScout2/services/sns.py
SNSRegionConfig.parse_topic
def parse_topic(self, params, region, topic): """ Parse a single topic and fetch additional attributes :param params: Global parameters (defaults to {}) :param topic: SNS Topic """ topic['arn'] = topic.pop('TopicArn') topic['name'] = topic['arn'].split(':')[-1] (prefix, partition, service, region, account, name) = topic['arn'].split(':') api_client = api_clients[region] attributes = api_client.get_topic_attributes(TopicArn=topic['arn'])['Attributes'] for k in ['Owner', 'DisplayName']: topic[k] = attributes[k] if k in attributes else None for k in ['Policy', 'DeliveryPolicy', 'EffectiveDeliveryPolicy']: topic[k] = json.loads(attributes[k]) if k in attributes else None topic['name'] = topic['arn'].split(':')[-1] manage_dictionary(topic, 'subscriptions', {}) manage_dictionary(topic, 'subscriptions_count', 0) self.topics[topic['name']] = topic
python
def parse_topic(self, params, region, topic): topic['arn'] = topic.pop('TopicArn') topic['name'] = topic['arn'].split(':')[-1] (prefix, partition, service, region, account, name) = topic['arn'].split(':') api_client = api_clients[region] attributes = api_client.get_topic_attributes(TopicArn=topic['arn'])['Attributes'] for k in ['Owner', 'DisplayName']: topic[k] = attributes[k] if k in attributes else None for k in ['Policy', 'DeliveryPolicy', 'EffectiveDeliveryPolicy']: topic[k] = json.loads(attributes[k]) if k in attributes else None topic['name'] = topic['arn'].split(':')[-1] manage_dictionary(topic, 'subscriptions', {}) manage_dictionary(topic, 'subscriptions_count', 0) self.topics[topic['name']] = topic
[ "def", "parse_topic", "(", "self", ",", "params", ",", "region", ",", "topic", ")", ":", "topic", "[", "'arn'", "]", "=", "topic", ".", "pop", "(", "'TopicArn'", ")", "topic", "[", "'name'", "]", "=", "topic", "[", "'arn'", "]", ".", "split", "(", ...
Parse a single topic and fetch additional attributes :param params: Global parameters (defaults to {}) :param topic: SNS Topic
[ "Parse", "a", "single", "topic", "and", "fetch", "additional", "attributes" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/sns.py#L40-L59
227,542
nccgroup/Scout2
AWSScout2/services/emr.py
EMRRegionConfig.parse_cluster
def parse_cluster(self, global_params, region, cluster): """ Parse a single EMR cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: EMR cluster """ cluster_id = cluster['Id'] cluster = api_clients[region].describe_cluster(ClusterId = cluster_id)['Cluster'] cluster['id'] = cluster.pop('Id') cluster['name'] = cluster.pop('Name') vpc_id = 'TODO' # The EMR API won't disclose the VPC ID, so wait until all configs have been fetch and look up the VPC based on the subnet ID manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_id] = cluster
python
def parse_cluster(self, global_params, region, cluster): cluster_id = cluster['Id'] cluster = api_clients[region].describe_cluster(ClusterId = cluster_id)['Cluster'] cluster['id'] = cluster.pop('Id') cluster['name'] = cluster.pop('Name') vpc_id = 'TODO' # The EMR API won't disclose the VPC ID, so wait until all configs have been fetch and look up the VPC based on the subnet ID manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_id] = cluster
[ "def", "parse_cluster", "(", "self", ",", "global_params", ",", "region", ",", "cluster", ")", ":", "cluster_id", "=", "cluster", "[", "'Id'", "]", "cluster", "=", "api_clients", "[", "region", "]", ".", "describe_cluster", "(", "ClusterId", "=", "cluster_id...
Parse a single EMR cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: EMR cluster
[ "Parse", "a", "single", "EMR", "cluster" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/emr.py#L19-L33
227,543
nccgroup/Scout2
AWSScout2/configs/base.py
BaseConfig.fetch_all
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Generic fetching function that iterates through all of the service's targets :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in [ 's3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region, silent = True) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service(self.service, credentials, 'us-east-1', silent = True) # TODO: use partition's default region else: api_client = connect_service(self.service, credentials, silent = True) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, self.thread_config['parse']) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, self.thread_config['list']) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
python
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in [ 's3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region, silent = True) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service(self.service, credentials, 'us-east-1', silent = True) # TODO: use partition's default region else: api_client = connect_service(self.service, credentials, silent = True) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, self.thread_config['parse']) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, self.thread_config['list']) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
[ "def", "fetch_all", "(", "self", ",", "credentials", ",", "regions", "=", "[", "]", ",", "partition_name", "=", "'aws'", ",", "targets", "=", "None", ")", ":", "global", "status", ",", "formatted_string", "# Initialize targets", "if", "not", "targets", ":", ...
Generic fetching function that iterates through all of the service's targets :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all.
[ "Generic", "fetching", "function", "that", "iterates", "through", "all", "of", "the", "service", "s", "targets" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/base.py#L53-L101
227,544
nccgroup/Scout2
AWSScout2/services/ses.py
SESRegionConfig.parse_identitie
def parse_identitie(self, global_params, region, identity_name): """ Parse a single identity and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region """ identity = {'name': identity_name, 'policies': {}} policy_names = api_clients[region].list_identity_policies(Identity = identity_name)['PolicyNames'] if len(policy_names): policies = api_clients[region].get_identity_policies(Identity = identity_name, PolicyNames = policy_names)['Policies'] for policy_name in policies: identity['policies'][policy_name] = json.loads(policies[policy_name]) dkim = api_clients[region].get_identity_dkim_attributes(Identities = [ identity_name ])['DkimAttributes'][identity_name] identity['DkimEnabled'] = dkim['DkimEnabled'] identity['DkimVerificationStatus'] = dkim['DkimVerificationStatus'] self.identities[self.get_non_aws_id(identity_name)] = identity
python
def parse_identitie(self, global_params, region, identity_name): identity = {'name': identity_name, 'policies': {}} policy_names = api_clients[region].list_identity_policies(Identity = identity_name)['PolicyNames'] if len(policy_names): policies = api_clients[region].get_identity_policies(Identity = identity_name, PolicyNames = policy_names)['Policies'] for policy_name in policies: identity['policies'][policy_name] = json.loads(policies[policy_name]) dkim = api_clients[region].get_identity_dkim_attributes(Identities = [ identity_name ])['DkimAttributes'][identity_name] identity['DkimEnabled'] = dkim['DkimEnabled'] identity['DkimVerificationStatus'] = dkim['DkimVerificationStatus'] self.identities[self.get_non_aws_id(identity_name)] = identity
[ "def", "parse_identitie", "(", "self", ",", "global_params", ",", "region", ",", "identity_name", ")", ":", "identity", "=", "{", "'name'", ":", "identity_name", ",", "'policies'", ":", "{", "}", "}", "policy_names", "=", "api_clients", "[", "region", "]", ...
Parse a single identity and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region
[ "Parse", "a", "single", "identity", "and", "fetch", "additional", "attributes" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/ses.py#L18-L34
227,545
nccgroup/Scout2
AWSScout2/rules/utils.py
pass_conditions
def pass_conditions(all_info, current_path, conditions, unknown_as_pass_condition = False): """ Pass all conditions? :param all_info: :param current_path: :param conditions: :param unknown_as_pass_condition: Consider an undetermined condition as passed :return: """ result = False if len(conditions) == 0: return True condition_operator = conditions.pop(0) for condition in conditions: if condition[0] in condition_operators: res = pass_conditions(all_info, current_path, condition, unknown_as_pass_condition) else: # Conditions are formed as "path to value", "type of test", "value(s) for test" path_to_value, test_name, test_values = condition path_to_value = fix_path_string(all_info, current_path, path_to_value) target_obj = get_value_at(all_info, current_path, path_to_value) if type(test_values) != list: dynamic_value = re_get_value_at.match(test_values) if dynamic_value: test_values = get_value_at(all_info, current_path, dynamic_value.groups()[0], True) try: res = pass_condition(target_obj, test_name, test_values) except Exception as e: res = True if unknown_as_pass_condition else False printError('Unable to process testcase \'%s\' on value \'%s\', interpreted as %s.' % (test_name, str(target_obj), res)) printException(e, True) # Quick exit and + false if condition_operator == 'and' and not res: return False # Quick exit or + true if condition_operator == 'or' and res: return True # Still here ? # or -> false # and -> true if condition_operator == 'or': return False else: return True
python
def pass_conditions(all_info, current_path, conditions, unknown_as_pass_condition = False): result = False if len(conditions) == 0: return True condition_operator = conditions.pop(0) for condition in conditions: if condition[0] in condition_operators: res = pass_conditions(all_info, current_path, condition, unknown_as_pass_condition) else: # Conditions are formed as "path to value", "type of test", "value(s) for test" path_to_value, test_name, test_values = condition path_to_value = fix_path_string(all_info, current_path, path_to_value) target_obj = get_value_at(all_info, current_path, path_to_value) if type(test_values) != list: dynamic_value = re_get_value_at.match(test_values) if dynamic_value: test_values = get_value_at(all_info, current_path, dynamic_value.groups()[0], True) try: res = pass_condition(target_obj, test_name, test_values) except Exception as e: res = True if unknown_as_pass_condition else False printError('Unable to process testcase \'%s\' on value \'%s\', interpreted as %s.' % (test_name, str(target_obj), res)) printException(e, True) # Quick exit and + false if condition_operator == 'and' and not res: return False # Quick exit or + true if condition_operator == 'or' and res: return True # Still here ? # or -> false # and -> true if condition_operator == 'or': return False else: return True
[ "def", "pass_conditions", "(", "all_info", ",", "current_path", ",", "conditions", ",", "unknown_as_pass_condition", "=", "False", ")", ":", "result", "=", "False", "if", "len", "(", "conditions", ")", "==", "0", ":", "return", "True", "condition_operator", "=...
Pass all conditions? :param all_info: :param current_path: :param conditions: :param unknown_as_pass_condition: Consider an undetermined condition as passed :return:
[ "Pass", "all", "conditions?" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/utils.py#L94-L138
227,546
nccgroup/Scout2
AWSScout2/services/directconnect.py
DirectConnectRegionConfig.parse_connection
def parse_connection(self, global_params, region, connection): """ Parse a single connection and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param connection_url: URL of the AWS connection """ connection['id'] = connection.pop('connectionId') connection['name'] = connection.pop('connectionName') self.connections[connection['id']] = connection
python
def parse_connection(self, global_params, region, connection): connection['id'] = connection.pop('connectionId') connection['name'] = connection.pop('connectionName') self.connections[connection['id']] = connection
[ "def", "parse_connection", "(", "self", ",", "global_params", ",", "region", ",", "connection", ")", ":", "connection", "[", "'id'", "]", "=", "connection", ".", "pop", "(", "'connectionId'", ")", "connection", "[", "'name'", "]", "=", "connection", ".", "...
Parse a single connection and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param connection_url: URL of the AWS connection
[ "Parse", "a", "single", "connection", "and", "fetch", "additional", "attributes" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/directconnect.py#L16-L26
227,547
nccgroup/Scout2
AWSScout2/output/console.py
format_listall_output
def format_listall_output(format_file, format_item_dir, format, rule, option_prefix = None, template = None, skip_options = False): """ Prepare listall output template :param format_file: :param format_item_dir: :param format: :param config: :param option_prefix: :param template: :param skip_options: :return: """ # Set the list of keys if printing from a file spec # _LINE_(whatever)_EOL_ # _ITEM_(resource)_METI_ # _KEY_(path_to_value) if format_file and os.path.isfile(format_file): if not template: with open(format_file, 'rt') as f: template = f.read() # Optional files if not skip_options: re_option = re.compile(r'(%_OPTION_\((.*?)\)_NOITPO_)') optional_files = re_option.findall(template) for optional_file in optional_files: if optional_file[1].startswith(option_prefix + '-'): with open(os.path.join(format_item_dir, optional_file[1].strip()), 'rt') as f: template = template.replace(optional_file[0].strip(), f.read()) # Include files if needed re_file = re.compile(r'(_FILE_\((.*?)\)_ELIF_)') while True: requested_files = re_file.findall(template) available_files = os.listdir(format_item_dir) if format_item_dir else [] for requested_file in requested_files: if requested_file[1].strip() in available_files: with open(os.path.join(format_item_dir, requested_file[1].strip()), 'rt') as f: template = template.replace(requested_file[0].strip(), f.read()) # Find items and keys to be printed re_line = re.compile(r'(_ITEM_\((.*?)\)_METI_)') re_key = re.compile(r'_KEY_\(*(.*?)\)', re.DOTALL|re.MULTILINE) # Remove the multiline ? lines = re_line.findall(template) for (i, line) in enumerate(lines): lines[i] = line + (re_key.findall(line[1]),) requested_files = re_file.findall(template) if len(requested_files) == 0: break elif format and format[0] == 'csv': keys = rule.keys line = ', '.join('_KEY_(%s)' % k for k in keys) lines = [ (line, line, keys) ] template = line return (lines, template)
python
def format_listall_output(format_file, format_item_dir, format, rule, option_prefix = None, template = None, skip_options = False): # Set the list of keys if printing from a file spec # _LINE_(whatever)_EOL_ # _ITEM_(resource)_METI_ # _KEY_(path_to_value) if format_file and os.path.isfile(format_file): if not template: with open(format_file, 'rt') as f: template = f.read() # Optional files if not skip_options: re_option = re.compile(r'(%_OPTION_\((.*?)\)_NOITPO_)') optional_files = re_option.findall(template) for optional_file in optional_files: if optional_file[1].startswith(option_prefix + '-'): with open(os.path.join(format_item_dir, optional_file[1].strip()), 'rt') as f: template = template.replace(optional_file[0].strip(), f.read()) # Include files if needed re_file = re.compile(r'(_FILE_\((.*?)\)_ELIF_)') while True: requested_files = re_file.findall(template) available_files = os.listdir(format_item_dir) if format_item_dir else [] for requested_file in requested_files: if requested_file[1].strip() in available_files: with open(os.path.join(format_item_dir, requested_file[1].strip()), 'rt') as f: template = template.replace(requested_file[0].strip(), f.read()) # Find items and keys to be printed re_line = re.compile(r'(_ITEM_\((.*?)\)_METI_)') re_key = re.compile(r'_KEY_\(*(.*?)\)', re.DOTALL|re.MULTILINE) # Remove the multiline ? lines = re_line.findall(template) for (i, line) in enumerate(lines): lines[i] = line + (re_key.findall(line[1]),) requested_files = re_file.findall(template) if len(requested_files) == 0: break elif format and format[0] == 'csv': keys = rule.keys line = ', '.join('_KEY_(%s)' % k for k in keys) lines = [ (line, line, keys) ] template = line return (lines, template)
[ "def", "format_listall_output", "(", "format_file", ",", "format_item_dir", ",", "format", ",", "rule", ",", "option_prefix", "=", "None", ",", "template", "=", "None", ",", "skip_options", "=", "False", ")", ":", "# Set the list of keys if printing from a file spec",...
Prepare listall output template :param format_file: :param format_item_dir: :param format: :param config: :param option_prefix: :param template: :param skip_options: :return:
[ "Prepare", "listall", "output", "template" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/output/console.py#L16-L68
227,548
nccgroup/Scout2
AWSScout2/output/console.py
generate_listall_output
def generate_listall_output(lines, resources, aws_config, template, arguments, nodup = False): """ Format and print the output of ListAll :param lines: :param resources: :param aws_config: :param template: :param arguments: :param nodup: :return: """ for line in lines: output = [] for resource in resources: current_path = resource.split('.') outline = line[1] for key in line[2]: outline = outline.replace('_KEY_('+key+')', get_value_at(aws_config['services'], current_path, key, True)) output.append(outline) output = '\n'.join(line for line in sorted(set(output))) template = template.replace(line[0], output) for (i, argument) in enumerate(arguments): template = template.replace('_ARG_%d_' % i, argument) return template
python
def generate_listall_output(lines, resources, aws_config, template, arguments, nodup = False): for line in lines: output = [] for resource in resources: current_path = resource.split('.') outline = line[1] for key in line[2]: outline = outline.replace('_KEY_('+key+')', get_value_at(aws_config['services'], current_path, key, True)) output.append(outline) output = '\n'.join(line for line in sorted(set(output))) template = template.replace(line[0], output) for (i, argument) in enumerate(arguments): template = template.replace('_ARG_%d_' % i, argument) return template
[ "def", "generate_listall_output", "(", "lines", ",", "resources", ",", "aws_config", ",", "template", ",", "arguments", ",", "nodup", "=", "False", ")", ":", "for", "line", "in", "lines", ":", "output", "=", "[", "]", "for", "resource", "in", "resources", ...
Format and print the output of ListAll :param lines: :param resources: :param aws_config: :param template: :param arguments: :param nodup: :return:
[ "Format", "and", "print", "the", "output", "of", "ListAll" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/output/console.py#L71-L95
227,549
nccgroup/Scout2
AWSScout2/services/cloudtrail.py
CloudTrailRegionConfig.parse_trail
def parse_trail(self, global_params, region, trail): """ Parse a single CloudTrail trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Trail """ trail_config = {} trail_config['name'] = trail.pop('Name') trail_id = self.get_non_aws_id(trail_config['name']) trail_details = None api_client = api_clients[region] # Do not duplicate entries for multiregion trails if 'IsMultiRegionTrail' in trail and trail['IsMultiRegionTrail'] and trail['HomeRegion'] != region: for key in ['HomeRegion', 'TrailARN']: trail_config[key] = trail[key] trail_config['scout2_link'] = 'services.cloudtrail.regions.%s.trails.%s' % (trail['HomeRegion'], trail_id) else: for key in trail: trail_config[key] = trail[key] trail_config['bucket_id'] = self.get_non_aws_id(trail_config.pop('S3BucketName')) for key in ['IsMultiRegionTrail', 'LogFileValidationEnabled']: if key not in trail_config: trail_config[key] = False trail_details = api_client.get_trail_status(Name=trail['TrailARN']) for key in ['IsLogging', 'LatestDeliveryTime', 'LatestDeliveryError', 'StartLoggingTime', 'StopLoggingTime', 'LatestNotificationTime', 'LatestNotificationError', 'LatestCloudWatchLogsDeliveryError', 'LatestCloudWatchLogsDeliveryTime']: trail_config[key] = trail_details[key] if key in trail_details else None if trail_details: trail_config['wildcard_data_logging'] = self.data_logging_status(trail_config['name'], trail_details, api_client) self.trails[trail_id] = trail_config
python
def parse_trail(self, global_params, region, trail): trail_config = {} trail_config['name'] = trail.pop('Name') trail_id = self.get_non_aws_id(trail_config['name']) trail_details = None api_client = api_clients[region] # Do not duplicate entries for multiregion trails if 'IsMultiRegionTrail' in trail and trail['IsMultiRegionTrail'] and trail['HomeRegion'] != region: for key in ['HomeRegion', 'TrailARN']: trail_config[key] = trail[key] trail_config['scout2_link'] = 'services.cloudtrail.regions.%s.trails.%s' % (trail['HomeRegion'], trail_id) else: for key in trail: trail_config[key] = trail[key] trail_config['bucket_id'] = self.get_non_aws_id(trail_config.pop('S3BucketName')) for key in ['IsMultiRegionTrail', 'LogFileValidationEnabled']: if key not in trail_config: trail_config[key] = False trail_details = api_client.get_trail_status(Name=trail['TrailARN']) for key in ['IsLogging', 'LatestDeliveryTime', 'LatestDeliveryError', 'StartLoggingTime', 'StopLoggingTime', 'LatestNotificationTime', 'LatestNotificationError', 'LatestCloudWatchLogsDeliveryError', 'LatestCloudWatchLogsDeliveryTime']: trail_config[key] = trail_details[key] if key in trail_details else None if trail_details: trail_config['wildcard_data_logging'] = self.data_logging_status(trail_config['name'], trail_details, api_client) self.trails[trail_id] = trail_config
[ "def", "parse_trail", "(", "self", ",", "global_params", ",", "region", ",", "trail", ")", ":", "trail_config", "=", "{", "}", "trail_config", "[", "'name'", "]", "=", "trail", ".", "pop", "(", "'Name'", ")", "trail_id", "=", "self", ".", "get_non_aws_id...
Parse a single CloudTrail trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Trail
[ "Parse", "a", "single", "CloudTrail", "trail" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/cloudtrail.py#L20-L54
227,550
nccgroup/Scout2
AWSScout2/services/iam.py
IAMConfig.fetch_credential_report
def fetch_credential_report(self, credentials, ignore_exception = False): """ Fetch the credential report :param: api_client :type: FOO :param: ignore_exception : initiate credential report creation as not always ready :type: Boolean """ iam_report = {} try: api_client = connect_service('iam', credentials, silent = True) response = api_client.generate_credential_report() if response['State'] != 'COMPLETE': if not ignore_exception: printError('Failed to generate a credential report.') return report = api_client.get_credential_report()['Content'] lines = report.splitlines() keys = lines[0].decode('utf-8').split(',') for line in lines[1:]: values = line.decode('utf-8').split(',') manage_dictionary(iam_report, values[0], {}) for key, value in zip(keys, values): iam_report[values[0]][key] = value self.credential_report = iam_report self.fetchstatuslogger.counts['credential_report']['fetched'] = 1 except Exception as e: if ignore_exception: return printError('Failed to download a credential report.') printException(e)
python
def fetch_credential_report(self, credentials, ignore_exception = False): iam_report = {} try: api_client = connect_service('iam', credentials, silent = True) response = api_client.generate_credential_report() if response['State'] != 'COMPLETE': if not ignore_exception: printError('Failed to generate a credential report.') return report = api_client.get_credential_report()['Content'] lines = report.splitlines() keys = lines[0].decode('utf-8').split(',') for line in lines[1:]: values = line.decode('utf-8').split(',') manage_dictionary(iam_report, values[0], {}) for key, value in zip(keys, values): iam_report[values[0]][key] = value self.credential_report = iam_report self.fetchstatuslogger.counts['credential_report']['fetched'] = 1 except Exception as e: if ignore_exception: return printError('Failed to download a credential report.') printException(e)
[ "def", "fetch_credential_report", "(", "self", ",", "credentials", ",", "ignore_exception", "=", "False", ")", ":", "iam_report", "=", "{", "}", "try", ":", "api_client", "=", "connect_service", "(", "'iam'", ",", "credentials", ",", "silent", "=", "True", "...
Fetch the credential report :param: api_client :type: FOO :param: ignore_exception : initiate credential report creation as not always ready :type: Boolean
[ "Fetch", "the", "credential", "report" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L69-L100
227,551
nccgroup/Scout2
AWSScout2/services/iam.py
IAMConfig.parse_groups
def parse_groups(self, group, params): """ Parse a single IAM group and fetch additional information """ # When resuming upon throttling error, skip if already fetched if group['GroupName'] in self.groups: return api_client = params['api_client'] # Ensure consistent attribute names across resource types group['id'] = group.pop('GroupId') group['name'] = group.pop('GroupName') group['arn'] = group.pop('Arn') # Get group's members group['users'] = self.__fetch_group_users(api_client, group['name']); # Get inline policies policies = self.__get_inline_policies(api_client, 'group', group['id'], group['name']) if len(policies): group['inline_policies'] = policies group['inline_policies_count'] = len(policies) self.groups[group['id']] = group
python
def parse_groups(self, group, params): # When resuming upon throttling error, skip if already fetched if group['GroupName'] in self.groups: return api_client = params['api_client'] # Ensure consistent attribute names across resource types group['id'] = group.pop('GroupId') group['name'] = group.pop('GroupName') group['arn'] = group.pop('Arn') # Get group's members group['users'] = self.__fetch_group_users(api_client, group['name']); # Get inline policies policies = self.__get_inline_policies(api_client, 'group', group['id'], group['name']) if len(policies): group['inline_policies'] = policies group['inline_policies_count'] = len(policies) self.groups[group['id']] = group
[ "def", "parse_groups", "(", "self", ",", "group", ",", "params", ")", ":", "# When resuming upon throttling error, skip if already fetched", "if", "group", "[", "'GroupName'", "]", "in", "self", ".", "groups", ":", "return", "api_client", "=", "params", "[", "'api...
Parse a single IAM group and fetch additional information
[ "Parse", "a", "single", "IAM", "group", "and", "fetch", "additional", "information" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L107-L126
227,552
nccgroup/Scout2
AWSScout2/services/iam.py
IAMConfig.parse_policies
def parse_policies(self, fetched_policy, params): """ Parse a single IAM policy and fetch additional information """ api_client = params['api_client'] policy = {} policy['name'] = fetched_policy.pop('PolicyName') policy['id'] = fetched_policy.pop('PolicyId') policy['arn'] = fetched_policy.pop('Arn') # Download version and document policy_version = api_client.get_policy_version(PolicyArn = policy['arn'], VersionId = fetched_policy['DefaultVersionId']) policy_version = policy_version['PolicyVersion'] policy['PolicyDocument'] = policy_version['Document'] # Get attached IAM entities policy['attached_to'] = {} attached_entities = handle_truncated_response(api_client.list_entities_for_policy, {'PolicyArn': policy['arn']}, ['PolicyGroups', 'PolicyRoles', 'PolicyUsers']) for entity_type in attached_entities: resource_type = entity_type.replace('Policy', '').lower() if len(attached_entities[entity_type]): policy['attached_to'][resource_type] = [] for entity in attached_entities[entity_type]: name_field = entity_type.replace('Policy', '')[:-1] + 'Name' resource_name = entity[name_field] policy['attached_to'][resource_type].append({'name': resource_name}) # Save policy self.policies[policy['id']] = policy
python
def parse_policies(self, fetched_policy, params): api_client = params['api_client'] policy = {} policy['name'] = fetched_policy.pop('PolicyName') policy['id'] = fetched_policy.pop('PolicyId') policy['arn'] = fetched_policy.pop('Arn') # Download version and document policy_version = api_client.get_policy_version(PolicyArn = policy['arn'], VersionId = fetched_policy['DefaultVersionId']) policy_version = policy_version['PolicyVersion'] policy['PolicyDocument'] = policy_version['Document'] # Get attached IAM entities policy['attached_to'] = {} attached_entities = handle_truncated_response(api_client.list_entities_for_policy, {'PolicyArn': policy['arn']}, ['PolicyGroups', 'PolicyRoles', 'PolicyUsers']) for entity_type in attached_entities: resource_type = entity_type.replace('Policy', '').lower() if len(attached_entities[entity_type]): policy['attached_to'][resource_type] = [] for entity in attached_entities[entity_type]: name_field = entity_type.replace('Policy', '')[:-1] + 'Name' resource_name = entity[name_field] policy['attached_to'][resource_type].append({'name': resource_name}) # Save policy self.policies[policy['id']] = policy
[ "def", "parse_policies", "(", "self", ",", "fetched_policy", ",", "params", ")", ":", "api_client", "=", "params", "[", "'api_client'", "]", "policy", "=", "{", "}", "policy", "[", "'name'", "]", "=", "fetched_policy", ".", "pop", "(", "'PolicyName'", ")",...
Parse a single IAM policy and fetch additional information
[ "Parse", "a", "single", "IAM", "policy", "and", "fetch", "additional", "information" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L133-L158
227,553
nccgroup/Scout2
AWSScout2/services/iam.py
IAMConfig.fetch_password_policy
def fetch_password_policy(self, credentials): """ Fetch the password policy that applies to all IAM users within the AWS account """ self.fetchstatuslogger.counts['password_policy']['discovered'] = 0 self.fetchstatuslogger.counts['password_policy']['fetched'] = 0 try: api_client = connect_service('iam', credentials, silent = True) self.password_policy = api_client.get_account_password_policy()['PasswordPolicy'] if 'PasswordReusePrevention' not in self.password_policy: self.password_policy['PasswordReusePrevention'] = False else: self.password_policy['PreviousPasswordPrevented'] = self.password_policy['PasswordReusePrevention'] self.password_policy['PasswordReusePrevention'] = True # There is a bug in the API: ExpirePasswords always returns false if 'MaxPasswordAge' in self.password_policy: self.password_policy['ExpirePasswords'] = True self.fetchstatuslogger.counts['password_policy']['discovered'] = 1 self.fetchstatuslogger.counts['password_policy']['fetched'] = 1 except ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': self.password_policy = {} self.password_policy['MinimumPasswordLength'] = '1' # As of 10/10/2016, 1-character passwords were authorized when no policy exists, even though the console displays 6 self.password_policy['RequireUppercaseCharacters'] = False self.password_policy['RequireLowercaseCharacters'] = False self.password_policy['RequireNumbers'] = False self.password_policy['RequireSymbols'] = False self.password_policy['PasswordReusePrevention'] = False self.password_policy['ExpirePasswords'] = False else: raise e except Exception as e: printError(str(e))
python
def fetch_password_policy(self, credentials): self.fetchstatuslogger.counts['password_policy']['discovered'] = 0 self.fetchstatuslogger.counts['password_policy']['fetched'] = 0 try: api_client = connect_service('iam', credentials, silent = True) self.password_policy = api_client.get_account_password_policy()['PasswordPolicy'] if 'PasswordReusePrevention' not in self.password_policy: self.password_policy['PasswordReusePrevention'] = False else: self.password_policy['PreviousPasswordPrevented'] = self.password_policy['PasswordReusePrevention'] self.password_policy['PasswordReusePrevention'] = True # There is a bug in the API: ExpirePasswords always returns false if 'MaxPasswordAge' in self.password_policy: self.password_policy['ExpirePasswords'] = True self.fetchstatuslogger.counts['password_policy']['discovered'] = 1 self.fetchstatuslogger.counts['password_policy']['fetched'] = 1 except ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': self.password_policy = {} self.password_policy['MinimumPasswordLength'] = '1' # As of 10/10/2016, 1-character passwords were authorized when no policy exists, even though the console displays 6 self.password_policy['RequireUppercaseCharacters'] = False self.password_policy['RequireLowercaseCharacters'] = False self.password_policy['RequireNumbers'] = False self.password_policy['RequireSymbols'] = False self.password_policy['PasswordReusePrevention'] = False self.password_policy['ExpirePasswords'] = False else: raise e except Exception as e: printError(str(e))
[ "def", "fetch_password_policy", "(", "self", ",", "credentials", ")", ":", "self", ".", "fetchstatuslogger", ".", "counts", "[", "'password_policy'", "]", "[", "'discovered'", "]", "=", "0", "self", ".", "fetchstatuslogger", ".", "counts", "[", "'password_policy...
Fetch the password policy that applies to all IAM users within the AWS account
[ "Fetch", "the", "password", "policy", "that", "applies", "to", "all", "IAM", "users", "within", "the", "AWS", "account" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L165-L198
227,554
nccgroup/Scout2
AWSScout2/services/iam.py
IAMConfig.parse_roles
def parse_roles(self, fetched_role, params): """ Parse a single IAM role and fetch additional data """ role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, [ 'CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
python
def parse_roles(self, fetched_role, params): role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, [ 'CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
[ "def", "parse_roles", "(", "self", ",", "fetched_role", ",", "params", ")", ":", "role", "=", "{", "}", "role", "[", "'instances_count'", "]", "=", "'N/A'", "# When resuming upon throttling error, skip if already fetched", "if", "fetched_role", "[", "'RoleName'", "]...
Parse a single IAM role and fetch additional data
[ "Parse", "a", "single", "IAM", "role", "and", "fetch", "additional", "data" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L205-L237
227,555
nccgroup/Scout2
AWSScout2/services/iam.py
IAMConfig.parse_users
def parse_users(self, user, params): """ Parse a single IAM user and fetch additional data """ if user['UserName'] in self.users: return api_client = params['api_client'] # Ensure consistent attribute names across resource types user['id'] = user.pop('UserId') user['name'] = user.pop('UserName') user['arn'] = user.pop('Arn') policies = self.__get_inline_policies(api_client, 'user', user['id'], user['name']) if len(policies): user['inline_policies'] = policies user['inline_policies_count'] = len(policies) user['groups'] = [] groups = handle_truncated_response(api_client.list_groups_for_user, {'UserName': user['name']}, ['Groups'])['Groups'] for group in groups: user['groups'].append(group['GroupName']) try: user['LoginProfile'] = api_client.get_login_profile(UserName = user['name'])['LoginProfile'] except Exception as e: pass user['AccessKeys'] = api_client.list_access_keys(UserName = user['name'])['AccessKeyMetadata'] user['MFADevices'] = api_client.list_mfa_devices(UserName = user['name'])['MFADevices'] # TODO: Users signing certss self.users[user['id']] = user
python
def parse_users(self, user, params): if user['UserName'] in self.users: return api_client = params['api_client'] # Ensure consistent attribute names across resource types user['id'] = user.pop('UserId') user['name'] = user.pop('UserName') user['arn'] = user.pop('Arn') policies = self.__get_inline_policies(api_client, 'user', user['id'], user['name']) if len(policies): user['inline_policies'] = policies user['inline_policies_count'] = len(policies) user['groups'] = [] groups = handle_truncated_response(api_client.list_groups_for_user, {'UserName': user['name']}, ['Groups'])['Groups'] for group in groups: user['groups'].append(group['GroupName']) try: user['LoginProfile'] = api_client.get_login_profile(UserName = user['name'])['LoginProfile'] except Exception as e: pass user['AccessKeys'] = api_client.list_access_keys(UserName = user['name'])['AccessKeyMetadata'] user['MFADevices'] = api_client.list_mfa_devices(UserName = user['name'])['MFADevices'] # TODO: Users signing certss self.users[user['id']] = user
[ "def", "parse_users", "(", "self", ",", "user", ",", "params", ")", ":", "if", "user", "[", "'UserName'", "]", "in", "self", ".", "users", ":", "return", "api_client", "=", "params", "[", "'api_client'", "]", "# Ensure consistent attribute names across resource ...
Parse a single IAM user and fetch additional data
[ "Parse", "a", "single", "IAM", "user", "and", "fetch", "additional", "data" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L244-L270
227,556
nccgroup/Scout2
AWSScout2/services/vpc.py
get_subnet_flow_logs_list
def get_subnet_flow_logs_list(current_config, subnet): """ Return the flow logs that cover a given subnet :param current_config: :param subnet: the subnet that the flow logs should cover :return: """ flow_logs_list = [] for flow_log in current_config.flow_logs: if current_config.flow_logs[flow_log]['ResourceId'] == subnet['SubnetId'] or \ current_config.flow_logs[flow_log]['ResourceId'] == subnet['VpcId']: flow_logs_list.append(flow_log) return flow_logs_list
python
def get_subnet_flow_logs_list(current_config, subnet): flow_logs_list = [] for flow_log in current_config.flow_logs: if current_config.flow_logs[flow_log]['ResourceId'] == subnet['SubnetId'] or \ current_config.flow_logs[flow_log]['ResourceId'] == subnet['VpcId']: flow_logs_list.append(flow_log) return flow_logs_list
[ "def", "get_subnet_flow_logs_list", "(", "current_config", ",", "subnet", ")", ":", "flow_logs_list", "=", "[", "]", "for", "flow_log", "in", "current_config", ".", "flow_logs", ":", "if", "current_config", ".", "flow_logs", "[", "flow_log", "]", "[", "'Resource...
Return the flow logs that cover a given subnet :param current_config: :param subnet: the subnet that the flow logs should cover :return:
[ "Return", "the", "flow", "logs", "that", "cover", "a", "given", "subnet" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/vpc.py#L231-L244
227,557
nccgroup/Scout2
AWSScout2/services/vpc.py
VPCRegionConfig.parse_subnet
def parse_subnet(self, global_params, region, subnet): """ Parse subnet object. :param global_params: :param region: :param subnet: :return: """ vpc_id = subnet['VpcId'] manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) subnet_id = subnet['SubnetId'] get_name(subnet, subnet, 'SubnetId') # set flow logs that cover this subnet subnet['flow_logs'] = get_subnet_flow_logs_list(self, subnet) # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].subnets[subnet_id] = subnet
python
def parse_subnet(self, global_params, region, subnet): vpc_id = subnet['VpcId'] manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) subnet_id = subnet['SubnetId'] get_name(subnet, subnet, 'SubnetId') # set flow logs that cover this subnet subnet['flow_logs'] = get_subnet_flow_logs_list(self, subnet) # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].subnets[subnet_id] = subnet
[ "def", "parse_subnet", "(", "self", ",", "global_params", ",", "region", ",", "subnet", ")", ":", "vpc_id", "=", "subnet", "[", "'VpcId'", "]", "manage_dictionary", "(", "self", ".", "vpcs", ",", "vpc_id", ",", "SingleVPCConfig", "(", "self", ".", "vpc_res...
Parse subnet object. :param global_params: :param region: :param subnet: :return:
[ "Parse", "subnet", "object", "." ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/vpc.py#L98-L115
227,558
nccgroup/Scout2
AWSScout2/services/cloudwatch.py
CloudWatchRegionConfig.parse_alarm
def parse_alarm(self, global_params, region, alarm): """ Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm """ alarm['arn'] = alarm.pop('AlarmArn') alarm['name'] = alarm.pop('AlarmName') # Drop some data for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']: foo = alarm.pop(k) if k in alarm else None alarm_id = self.get_non_aws_id(alarm['arn']) self.alarms[alarm_id] = alarm
python
def parse_alarm(self, global_params, region, alarm): alarm['arn'] = alarm.pop('AlarmArn') alarm['name'] = alarm.pop('AlarmName') # Drop some data for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']: foo = alarm.pop(k) if k in alarm else None alarm_id = self.get_non_aws_id(alarm['arn']) self.alarms[alarm_id] = alarm
[ "def", "parse_alarm", "(", "self", ",", "global_params", ",", "region", ",", "alarm", ")", ":", "alarm", "[", "'arn'", "]", "=", "alarm", ".", "pop", "(", "'AlarmArn'", ")", "alarm", "[", "'name'", "]", "=", "alarm", ".", "pop", "(", "'AlarmName'", "...
Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm
[ "Parse", "a", "single", "CloudWatch", "trail" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/cloudwatch.py#L21-L35
227,559
nccgroup/Scout2
AWSScout2/configs/browser.py
get_attribute_at
def get_attribute_at(config, target_path, key, default_value=None): """ Return attribute value at a given path :param config: :param target_path: :param key: :param default_value: :return: """ for target in target_path: config = config[target] return config[key] if key in config else default_value
python
def get_attribute_at(config, target_path, key, default_value=None): for target in target_path: config = config[target] return config[key] if key in config else default_value
[ "def", "get_attribute_at", "(", "config", ",", "target_path", ",", "key", ",", "default_value", "=", "None", ")", ":", "for", "target", "in", "target_path", ":", "config", "=", "config", "[", "target", "]", "return", "config", "[", "key", "]", "if", "key...
Return attribute value at a given path :param config: :param target_path: :param key: :param default_value: :return:
[ "Return", "attribute", "value", "at", "a", "given", "path" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/browser.py#L18-L30
227,560
nccgroup/Scout2
AWSScout2/configs/browser.py
get_value_at
def get_value_at(all_info, current_path, key, to_string=False): """ Get value located at a given path :param all_info: :param current_path: :param key: :param to_string: :return: """ keys = key.split('.') if keys[-1] == 'id': target_obj = current_path[len(keys)-1] else: if key == 'this': target_path = current_path elif '.' in key: target_path = [] for i, key in enumerate(keys): if key == 'id': target_path.append(current_path[i]) else: target_path.append(key) if len(keys) > len(current_path): target_path = target_path + keys[len(target_path):] else: target_path = copy.deepcopy(current_path) target_path.append(key) target_obj = all_info for p in target_path: try: if type(target_obj) == list and type(target_obj[0]) == dict: target_obj = target_obj[int(p)] elif type(target_obj) == list: target_obj = p elif p == '': target_obj = target_obj else: try: target_obj = target_obj[p] except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception if to_string: return str(target_obj) else: return target_obj
python
def get_value_at(all_info, current_path, key, to_string=False): keys = key.split('.') if keys[-1] == 'id': target_obj = current_path[len(keys)-1] else: if key == 'this': target_path = current_path elif '.' in key: target_path = [] for i, key in enumerate(keys): if key == 'id': target_path.append(current_path[i]) else: target_path.append(key) if len(keys) > len(current_path): target_path = target_path + keys[len(target_path):] else: target_path = copy.deepcopy(current_path) target_path.append(key) target_obj = all_info for p in target_path: try: if type(target_obj) == list and type(target_obj[0]) == dict: target_obj = target_obj[int(p)] elif type(target_obj) == list: target_obj = p elif p == '': target_obj = target_obj else: try: target_obj = target_obj[p] except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception if to_string: return str(target_obj) else: return target_obj
[ "def", "get_value_at", "(", "all_info", ",", "current_path", ",", "key", ",", "to_string", "=", "False", ")", ":", "keys", "=", "key", ".", "split", "(", "'.'", ")", "if", "keys", "[", "-", "1", "]", "==", "'id'", ":", "target_obj", "=", "current_pat...
Get value located at a given path :param all_info: :param current_path: :param key: :param to_string: :return:
[ "Get", "value", "located", "at", "a", "given", "path" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/browser.py#L55-L114
227,561
nccgroup/Scout2
AWSScout2/rules/rule_definition.py
RuleDefinition.load
def load(self): """ Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions :return: None """ file_name_valid = False rule_type_valid = False # Look for a locally-defined rule for rule_dir in self.rule_dirs: file_path = os.path.join(rule_dir, self.file_name) if rule_dir else self.file_name if os.path.isfile(file_path): self.file_path = file_path file_name_valid = True break # Look for a built-in rule if not file_name_valid: for rule_type in self.rule_types: if self.file_name.startswith(rule_type): self.file_path = os.path.join(self.rules_data_path, self.file_name) rule_type_valid = True file_name_valid = True break if not rule_type_valid: for rule_type in self.rule_types: self.file_path = os.path.join(self.rules_data_path, rule_type, self.file_name) if os.path.isfile(self.file_path): file_name_valid = True break else: if os.path.isfile(self.file_path): file_name_valid = True if not file_name_valid: printError('Error: could not find %s' % self.file_name) else: try: with open(self.file_path, 'rt') as f: self.string_definition = f.read() self.load_from_string_definition() except Exception as e: printException(e) printError('Failed to load rule defined in %s' % file_path)
python
def load(self): file_name_valid = False rule_type_valid = False # Look for a locally-defined rule for rule_dir in self.rule_dirs: file_path = os.path.join(rule_dir, self.file_name) if rule_dir else self.file_name if os.path.isfile(file_path): self.file_path = file_path file_name_valid = True break # Look for a built-in rule if not file_name_valid: for rule_type in self.rule_types: if self.file_name.startswith(rule_type): self.file_path = os.path.join(self.rules_data_path, self.file_name) rule_type_valid = True file_name_valid = True break if not rule_type_valid: for rule_type in self.rule_types: self.file_path = os.path.join(self.rules_data_path, rule_type, self.file_name) if os.path.isfile(self.file_path): file_name_valid = True break else: if os.path.isfile(self.file_path): file_name_valid = True if not file_name_valid: printError('Error: could not find %s' % self.file_name) else: try: with open(self.file_path, 'rt') as f: self.string_definition = f.read() self.load_from_string_definition() except Exception as e: printException(e) printError('Failed to load rule defined in %s' % file_path)
[ "def", "load", "(", "self", ")", ":", "file_name_valid", "=", "False", "rule_type_valid", "=", "False", "# Look for a locally-defined rule", "for", "rule_dir", "in", "self", ".", "rule_dirs", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "rule_di...
Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions :return: None
[ "Load", "the", "definition", "of", "the", "rule", "searching", "in", "the", "specified", "rule", "dirs", "first", "then", "in", "the", "built", "-", "in", "definitions" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/rule_definition.py#L36-L77
227,562
nccgroup/Scout2
AWSScout2/configs/regions.py
RegionalServiceConfig.init_region_config
def init_region_config(self, region): """ Initialize the region's configuration :param region: Name of the region """ self.regions[region] = self.region_config_class(region_name = region, resource_types = self.resource_types)
python
def init_region_config(self, region): self.regions[region] = self.region_config_class(region_name = region, resource_types = self.resource_types)
[ "def", "init_region_config", "(", "self", ",", "region", ")", ":", "self", ".", "regions", "[", "region", "]", "=", "self", ".", "region_config_class", "(", "region_name", "=", "region", ",", "resource_types", "=", "self", ".", "resource_types", ")" ]
Initialize the region's configuration :param region: Name of the region
[ "Initialize", "the", "region", "s", "configuration" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/regions.py#L83-L89
227,563
nccgroup/Scout2
AWSScout2/configs/regions.py
RegionalServiceConfig.fetch_all
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets # Tweak params realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
python
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): # Initialize targets # Tweak params realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
[ "def", "fetch_all", "(", "self", ",", "credentials", ",", "regions", "=", "[", "]", ",", "partition_name", "=", "'aws'", ",", "targets", "=", "None", ")", ":", "# Initialize targets", "# Tweak params", "realtargets", "=", "(", ")", "if", "not", "targets", ...
Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all.
[ "Fetch", "all", "the", "configuration", "supported", "by", "Scout2", "for", "a", "given", "service" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/regions.py#L91-L136
227,564
nccgroup/Scout2
AWSScout2/configs/regions.py
RegionalServiceConfig._init_threading
def _init_threading(self, function, params={}, num_threads=10): """ Initialize queue and threads :param function: :param params: :param num_threads: :return: """ q = Queue(maxsize=0) # TODO: find something appropriate for i in range(num_threads): worker = Thread(target=function, args=(q, params)) worker.setDaemon(True) worker.start() return q
python
def _init_threading(self, function, params={}, num_threads=10): q = Queue(maxsize=0) # TODO: find something appropriate for i in range(num_threads): worker = Thread(target=function, args=(q, params)) worker.setDaemon(True) worker.start() return q
[ "def", "_init_threading", "(", "self", ",", "function", ",", "params", "=", "{", "}", ",", "num_threads", "=", "10", ")", ":", "q", "=", "Queue", "(", "maxsize", "=", "0", ")", "# TODO: find something appropriate", "for", "i", "in", "range", "(", "num_th...
Initialize queue and threads :param function: :param params: :param num_threads: :return:
[ "Initialize", "queue", "and", "threads" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/regions.py#L138-L152
227,565
nccgroup/Scout2
AWSScout2/configs/regions.py
RegionConfig.fetch_all
def fetch_all(self, api_client, fetchstatuslogger, q, targets): ''' Make all API calls as defined in metadata.json :param api_client: :param fetchstatuslogger: :param q: :param targets: :return: ''' self.fetchstatuslogger = fetchstatuslogger if targets != None: # Ensure targets is a tuple if type(targets) != list and type(targets) != tuple: targets = tuple(targets,) elif type(targets) != tuple: targets = tuple(targets) for target in targets: self._fetch_targets(api_client, q, target)
python
def fetch_all(self, api_client, fetchstatuslogger, q, targets): ''' Make all API calls as defined in metadata.json :param api_client: :param fetchstatuslogger: :param q: :param targets: :return: ''' self.fetchstatuslogger = fetchstatuslogger if targets != None: # Ensure targets is a tuple if type(targets) != list and type(targets) != tuple: targets = tuple(targets,) elif type(targets) != tuple: targets = tuple(targets) for target in targets: self._fetch_targets(api_client, q, target)
[ "def", "fetch_all", "(", "self", ",", "api_client", ",", "fetchstatuslogger", ",", "q", ",", "targets", ")", ":", "self", ".", "fetchstatuslogger", "=", "fetchstatuslogger", "if", "targets", "!=", "None", ":", "# Ensure targets is a tuple", "if", "type", "(", ...
Make all API calls as defined in metadata.json :param api_client: :param fetchstatuslogger: :param q: :param targets: :return:
[ "Make", "all", "API", "calls", "as", "defined", "in", "metadata", ".", "json" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/regions.py#L241-L259
227,566
nccgroup/Scout2
AWSScout2/services/sqs.py
SQSRegionConfig.parse_queue
def parse_queue(self, global_params, region, queue_url): """ Parse a single queue and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param queue_url: URL of the AWS queue """ queue = {'QueueUrl': queue_url} attributes = api_clients[region].get_queue_attributes(QueueUrl = queue_url, AttributeNames = ['CreatedTimestamp', 'Policy', 'QueueArn'])['Attributes'] queue['arn'] = attributes.pop('QueueArn') for k in ['CreatedTimestamp']: queue[k] = attributes[k] if k in attributes else None if 'Policy' in attributes: queue['Policy'] = json.loads(attributes['Policy']) else: queue['Policy'] = {'Statement': []} queue['name'] = queue['arn'].split(':')[-1] self.queues[queue['name']] = queue
python
def parse_queue(self, global_params, region, queue_url): queue = {'QueueUrl': queue_url} attributes = api_clients[region].get_queue_attributes(QueueUrl = queue_url, AttributeNames = ['CreatedTimestamp', 'Policy', 'QueueArn'])['Attributes'] queue['arn'] = attributes.pop('QueueArn') for k in ['CreatedTimestamp']: queue[k] = attributes[k] if k in attributes else None if 'Policy' in attributes: queue['Policy'] = json.loads(attributes['Policy']) else: queue['Policy'] = {'Statement': []} queue['name'] = queue['arn'].split(':')[-1] self.queues[queue['name']] = queue
[ "def", "parse_queue", "(", "self", ",", "global_params", ",", "region", ",", "queue_url", ")", ":", "queue", "=", "{", "'QueueUrl'", ":", "queue_url", "}", "attributes", "=", "api_clients", "[", "region", "]", ".", "get_queue_attributes", "(", "QueueUrl", "=...
Parse a single queue and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param queue_url: URL of the AWS queue
[ "Parse", "a", "single", "queue", "and", "fetch", "additional", "attributes" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/sqs.py#L18-L37
227,567
nccgroup/Scout2
AWSScout2/services/s3.py
get_s3_buckets
def get_s3_buckets(api_client, s3_info, s3_params): """ List all available buckets :param api_client: :param s3_info: :param s3_params: :return: """ manage_dictionary(s3_info, 'buckets', {}) buckets = api_client[get_s3_list_region(s3_params['selected_regions'])].list_buckets()['Buckets'] targets = [] for b in buckets: # Abort if bucket is not of interest if (b['Name'] in s3_params['skipped_buckets']) or (len(s3_params['checked_buckets']) and b['Name'] not in s3_params['checked_buckets']): continue targets.append(b) s3_info['buckets_count'] = len(targets) s3_params['api_clients'] = api_client s3_params['s3_info'] = s3_info thread_work(targets, get_s3_bucket, params = s3_params, num_threads = 30) show_status(s3_info) s3_info['buckets_count'] = len(s3_info['buckets']) return s3_info
python
def get_s3_buckets(api_client, s3_info, s3_params): manage_dictionary(s3_info, 'buckets', {}) buckets = api_client[get_s3_list_region(s3_params['selected_regions'])].list_buckets()['Buckets'] targets = [] for b in buckets: # Abort if bucket is not of interest if (b['Name'] in s3_params['skipped_buckets']) or (len(s3_params['checked_buckets']) and b['Name'] not in s3_params['checked_buckets']): continue targets.append(b) s3_info['buckets_count'] = len(targets) s3_params['api_clients'] = api_client s3_params['s3_info'] = s3_info thread_work(targets, get_s3_bucket, params = s3_params, num_threads = 30) show_status(s3_info) s3_info['buckets_count'] = len(s3_info['buckets']) return s3_info
[ "def", "get_s3_buckets", "(", "api_client", ",", "s3_info", ",", "s3_params", ")", ":", "manage_dictionary", "(", "s3_info", ",", "'buckets'", ",", "{", "}", ")", "buckets", "=", "api_client", "[", "get_s3_list_region", "(", "s3_params", "[", "'selected_regions'...
List all available buckets :param api_client: :param s3_info: :param s3_params: :return:
[ "List", "all", "available", "buckets" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/s3.py#L291-L314
227,568
nccgroup/Scout2
AWSScout2/services/s3.py
S3Config.parse_buckets
def parse_buckets(self, bucket, params): """ Parse a single S3 bucket TODO: - CORS - Lifecycle - Notification ? - Get bucket's policy :param bucket: :param params: :return: """ bucket['name'] = bucket.pop('Name') api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) get_s3_bucket_default_encryption(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) get_s3_bucket_policy(api_client, bucket['name'], bucket) get_s3_bucket_secure_transport(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
python
def parse_buckets(self, bucket, params): bucket['name'] = bucket.pop('Name') api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) get_s3_bucket_default_encryption(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) get_s3_bucket_policy(api_client, bucket['name'], bucket) get_s3_bucket_secure_transport(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
[ "def", "parse_buckets", "(", "self", ",", "bucket", ",", "params", ")", ":", "bucket", "[", "'name'", "]", "=", "bucket", ".", "pop", "(", "'Name'", ")", "api_client", "=", "params", "[", "'api_clients'", "]", "[", "get_s3_list_region", "(", "list", "(",...
Parse a single S3 bucket TODO: - CORS - Lifecycle - Notification ? - Get bucket's policy :param bucket: :param params: :return:
[ "Parse", "a", "single", "S3", "bucket" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/s3.py#L38-L79
227,569
nccgroup/Scout2
AWSScout2/rules/ruleset.py
Ruleset.load
def load(self, rule_type, quiet = False): """ Open a JSON file definiting a ruleset and load it into a Ruleset object :param quiet: :return: """ if self.filename and os.path.exists(self.filename): try: with open(self.filename, 'rt') as f: ruleset = json.load(f) self.about = ruleset['about'] if 'about' in ruleset else '' self.rules = {} for filename in ruleset['rules']: self.rules[filename] = [] for rule in ruleset['rules'][filename]: self.handle_rule_versions(filename, rule_type, rule) except Exception as e: printException(e) printError('Error: ruleset file %s contains malformed JSON.' % self.filename) self.rules = [] self.about = '' else: self.rules = [] if not quiet: printError('Error: the file %s does not exist.' % self.filename)
python
def load(self, rule_type, quiet = False): if self.filename and os.path.exists(self.filename): try: with open(self.filename, 'rt') as f: ruleset = json.load(f) self.about = ruleset['about'] if 'about' in ruleset else '' self.rules = {} for filename in ruleset['rules']: self.rules[filename] = [] for rule in ruleset['rules'][filename]: self.handle_rule_versions(filename, rule_type, rule) except Exception as e: printException(e) printError('Error: ruleset file %s contains malformed JSON.' % self.filename) self.rules = [] self.about = '' else: self.rules = [] if not quiet: printError('Error: the file %s does not exist.' % self.filename)
[ "def", "load", "(", "self", ",", "rule_type", ",", "quiet", "=", "False", ")", ":", "if", "self", ".", "filename", "and", "os", ".", "path", ".", "exists", "(", "self", ".", "filename", ")", ":", "try", ":", "with", "open", "(", "self", ".", "fil...
Open a JSON file definiting a ruleset and load it into a Ruleset object :param quiet: :return:
[ "Open", "a", "JSON", "file", "definiting", "a", "ruleset", "and", "load", "it", "into", "a", "Ruleset", "object" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/ruleset.py#L61-L86
227,570
nccgroup/Scout2
AWSScout2/rules/ruleset.py
Ruleset.handle_rule_versions
def handle_rule_versions(self, filename, rule_type, rule): """ For each version of a rule found in the ruleset, append a new Rule object """ if 'versions' in rule: versions = rule.pop('versions') for version_key_suffix in versions: version = versions[version_key_suffix] version['key_suffix'] = version_key_suffix tmp_rule = dict(rule, **version) self.rules[filename].append(Rule(filename, rule_type, tmp_rule)) else: self.rules[filename].append(Rule(filename, rule_type, rule))
python
def handle_rule_versions(self, filename, rule_type, rule): if 'versions' in rule: versions = rule.pop('versions') for version_key_suffix in versions: version = versions[version_key_suffix] version['key_suffix'] = version_key_suffix tmp_rule = dict(rule, **version) self.rules[filename].append(Rule(filename, rule_type, tmp_rule)) else: self.rules[filename].append(Rule(filename, rule_type, rule))
[ "def", "handle_rule_versions", "(", "self", ",", "filename", ",", "rule_type", ",", "rule", ")", ":", "if", "'versions'", "in", "rule", ":", "versions", "=", "rule", ".", "pop", "(", "'versions'", ")", "for", "version_key_suffix", "in", "versions", ":", "v...
For each version of a rule found in the ruleset, append a new Rule object
[ "For", "each", "version", "of", "a", "rule", "found", "in", "the", "ruleset", "append", "a", "new", "Rule", "object" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/ruleset.py#L100-L112
227,571
nccgroup/Scout2
AWSScout2/rules/ruleset.py
Ruleset.prepare_rules
def prepare_rules(self, attributes = [], ip_ranges = [], params = {}): """ Update the ruleset's rules by duplicating fields as required by the HTML ruleset generator :return: """ for filename in self.rule_definitions: if filename in self.rules: for rule in self.rules[filename]: rule.set_definition(self.rule_definitions, attributes, ip_ranges, params) else: self.rules[filename] = [] new_rule = Rule(filename, self.rule_type, {'enabled': False, 'level': 'danger'}) new_rule.set_definition(self.rule_definitions, attributes, ip_ranges, params) self.rules[filename].append(new_rule)
python
def prepare_rules(self, attributes = [], ip_ranges = [], params = {}): for filename in self.rule_definitions: if filename in self.rules: for rule in self.rules[filename]: rule.set_definition(self.rule_definitions, attributes, ip_ranges, params) else: self.rules[filename] = [] new_rule = Rule(filename, self.rule_type, {'enabled': False, 'level': 'danger'}) new_rule.set_definition(self.rule_definitions, attributes, ip_ranges, params) self.rules[filename].append(new_rule)
[ "def", "prepare_rules", "(", "self", ",", "attributes", "=", "[", "]", ",", "ip_ranges", "=", "[", "]", ",", "params", "=", "{", "}", ")", ":", "for", "filename", "in", "self", ".", "rule_definitions", ":", "if", "filename", "in", "self", ".", "rules...
Update the ruleset's rules by duplicating fields as required by the HTML ruleset generator :return:
[ "Update", "the", "ruleset", "s", "rules", "by", "duplicating", "fields", "as", "required", "by", "the", "HTML", "ruleset", "generator" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/ruleset.py#L115-L129
227,572
nccgroup/Scout2
AWSScout2/rules/ruleset.py
Ruleset.load_rule_definitions
def load_rule_definitions(self, ruleset_generator = False, rule_dirs = []): """ Load definition of rules declared in the ruleset :param services: :param ip_ranges: :param aws_account_id: :param generator: :return: """ # Load rules from JSON files self.rule_definitions = {} for rule_filename in self.rules: for rule in self.rules[rule_filename]: if not rule.enabled and not ruleset_generator: continue self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename, rule_dirs = rule_dirs) # In case of the ruleset generator, list all available built-in rules if ruleset_generator: rule_dirs.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/findings')) rule_filenames = [] for rule_dir in rule_dirs: rule_filenames += [f for f in os.listdir(rule_dir) if os.path.isfile(os.path.join(rule_dir, f))] for rule_filename in rule_filenames: if rule_filename not in self.rule_definitions: self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename)
python
def load_rule_definitions(self, ruleset_generator = False, rule_dirs = []): # Load rules from JSON files self.rule_definitions = {} for rule_filename in self.rules: for rule in self.rules[rule_filename]: if not rule.enabled and not ruleset_generator: continue self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename, rule_dirs = rule_dirs) # In case of the ruleset generator, list all available built-in rules if ruleset_generator: rule_dirs.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/findings')) rule_filenames = [] for rule_dir in rule_dirs: rule_filenames += [f for f in os.listdir(rule_dir) if os.path.isfile(os.path.join(rule_dir, f))] for rule_filename in rule_filenames: if rule_filename not in self.rule_definitions: self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename)
[ "def", "load_rule_definitions", "(", "self", ",", "ruleset_generator", "=", "False", ",", "rule_dirs", "=", "[", "]", ")", ":", "# Load rules from JSON files", "self", ".", "rule_definitions", "=", "{", "}", "for", "rule_filename", "in", "self", ".", "rules", ...
Load definition of rules declared in the ruleset :param services: :param ip_ranges: :param aws_account_id: :param generator: :return:
[ "Load", "definition", "of", "rules", "declared", "in", "the", "ruleset" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/ruleset.py#L132-L158
227,573
nccgroup/Scout2
AWSScout2/output/utils.py
prompt_4_yes_no
def prompt_4_yes_no(question): """ Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean """ while True: sys.stdout.write(question + ' (y/n)? ') try: choice = raw_input().lower() except: choice = input().lower() if choice == 'yes' or choice == 'y': return True elif choice == 'no' or choice == 'n': return False else: printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice)
python
def prompt_4_yes_no(question): while True: sys.stdout.write(question + ' (y/n)? ') try: choice = raw_input().lower() except: choice = input().lower() if choice == 'yes' or choice == 'y': return True elif choice == 'no' or choice == 'n': return False else: printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice)
[ "def", "prompt_4_yes_no", "(", "question", ")", ":", "while", "True", ":", "sys", ".", "stdout", ".", "write", "(", "question", "+", "' (y/n)? '", ")", "try", ":", "choice", "=", "raw_input", "(", ")", ".", "lower", "(", ")", "except", ":", "choice", ...
Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean
[ "Ask", "a", "question", "and", "prompt", "for", "yes", "or", "no" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/output/utils.py#L12-L30
227,574
nccgroup/Scout2
AWSScout2/services/rds.py
RDSRegionConfig.parse_instance
def parse_instance(self, global_params, region, dbi): """ Parse a single RDS instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Instance """ vpc_id = dbi['DBSubnetGroup']['VpcId'] if 'DBSubnetGroup' in dbi and 'VpcId' in dbi['DBSubnetGroup'] and dbi['DBSubnetGroup']['VpcId'] else ec2_classic instance = {} instance['name'] = dbi.pop('DBInstanceIdentifier') for key in ['InstanceCreateTime', 'Engine', 'DBInstanceStatus', 'AutoMinorVersionUpgrade', 'DBInstanceClass', 'MultiAZ', 'Endpoint', 'BackupRetentionPeriod', 'PubliclyAccessible', 'StorageEncrypted', 'VpcSecurityGroups', 'DBSecurityGroups', 'DBParameterGroups', 'EnhancedMonitoringResourceArn', 'StorageEncrypted']: # parameter_groups , security_groups, vpc_security_groups instance[key] = dbi[key] if key in dbi else None # If part of a cluster, multi AZ information is only available via cluster information if 'DBClusterIdentifier' in dbi: api_client = api_clients[region] cluster = api_client.describe_db_clusters(DBClusterIdentifier = dbi['DBClusterIdentifier'])['DBClusters'][0] instance['MultiAZ'] = cluster['MultiAZ'] # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].instances[instance['name']] = instance
python
def parse_instance(self, global_params, region, dbi): vpc_id = dbi['DBSubnetGroup']['VpcId'] if 'DBSubnetGroup' in dbi and 'VpcId' in dbi['DBSubnetGroup'] and dbi['DBSubnetGroup']['VpcId'] else ec2_classic instance = {} instance['name'] = dbi.pop('DBInstanceIdentifier') for key in ['InstanceCreateTime', 'Engine', 'DBInstanceStatus', 'AutoMinorVersionUpgrade', 'DBInstanceClass', 'MultiAZ', 'Endpoint', 'BackupRetentionPeriod', 'PubliclyAccessible', 'StorageEncrypted', 'VpcSecurityGroups', 'DBSecurityGroups', 'DBParameterGroups', 'EnhancedMonitoringResourceArn', 'StorageEncrypted']: # parameter_groups , security_groups, vpc_security_groups instance[key] = dbi[key] if key in dbi else None # If part of a cluster, multi AZ information is only available via cluster information if 'DBClusterIdentifier' in dbi: api_client = api_clients[region] cluster = api_client.describe_db_clusters(DBClusterIdentifier = dbi['DBClusterIdentifier'])['DBClusters'][0] instance['MultiAZ'] = cluster['MultiAZ'] # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].instances[instance['name']] = instance
[ "def", "parse_instance", "(", "self", ",", "global_params", ",", "region", ",", "dbi", ")", ":", "vpc_id", "=", "dbi", "[", "'DBSubnetGroup'", "]", "[", "'VpcId'", "]", "if", "'DBSubnetGroup'", "in", "dbi", "and", "'VpcId'", "in", "dbi", "[", "'DBSubnetGro...
Parse a single RDS instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Instance
[ "Parse", "a", "single", "RDS", "instance" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/rds.py#L22-L46
227,575
nccgroup/Scout2
AWSScout2/rules/preprocessing.py
preprocessing
def preprocessing(aws_config, ip_ranges = [], ip_ranges_name_key = None): """ Tweak the AWS config to match cross-service resources and clean any fetching artifacts :param aws_config: :return: """ map_all_sgs(aws_config) map_all_subnets(aws_config) set_emr_vpc_ids(aws_config) #parse_elb_policies(aws_config) # Various data processing calls add_security_group_name_to_ec2_grants(aws_config['services']['ec2'], aws_config['aws_account_id']) process_cloudtrail_trails(aws_config['services']['cloudtrail']) add_cidr_display_name(aws_config, ip_ranges, ip_ranges_name_key) merge_route53_and_route53domains(aws_config) match_instances_and_roles(aws_config) match_iam_policies_and_buckets(aws_config) # Preprocessing dictated by metadata process_metadata_callbacks(aws_config)
python
def preprocessing(aws_config, ip_ranges = [], ip_ranges_name_key = None): map_all_sgs(aws_config) map_all_subnets(aws_config) set_emr_vpc_ids(aws_config) #parse_elb_policies(aws_config) # Various data processing calls add_security_group_name_to_ec2_grants(aws_config['services']['ec2'], aws_config['aws_account_id']) process_cloudtrail_trails(aws_config['services']['cloudtrail']) add_cidr_display_name(aws_config, ip_ranges, ip_ranges_name_key) merge_route53_and_route53domains(aws_config) match_instances_and_roles(aws_config) match_iam_policies_and_buckets(aws_config) # Preprocessing dictated by metadata process_metadata_callbacks(aws_config)
[ "def", "preprocessing", "(", "aws_config", ",", "ip_ranges", "=", "[", "]", ",", "ip_ranges_name_key", "=", "None", ")", ":", "map_all_sgs", "(", "aws_config", ")", "map_all_subnets", "(", "aws_config", ")", "set_emr_vpc_ids", "(", "aws_config", ")", "#parse_elb...
Tweak the AWS config to match cross-service resources and clean any fetching artifacts :param aws_config: :return:
[ "Tweak", "the", "AWS", "config", "to", "match", "cross", "-", "service", "resources", "and", "clean", "any", "fetching", "artifacts" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/preprocessing.py#L13-L35
227,576
nccgroup/Scout2
AWSScout2/rules/preprocessing.py
process_vpc_peering_connections_callback
def process_vpc_peering_connections_callback(aws_config, current_config, path, current_path, pc_id, callback_args): """ Create a list of peering connection IDs in each VPC :param aws_config: :param current_config: :param path: :param current_path: :param pc_id: :param callback_args: :return: """ info = 'AccepterVpcInfo' if current_config['AccepterVpcInfo']['OwnerId'] == aws_config['aws_account_id'] else 'RequesterVpcInfo' region = current_path[current_path.index('regions')+1] vpc_id = current_config[info]['VpcId'] target = aws_config['services']['vpc']['regions'][region]['vpcs'][vpc_id] manage_dictionary(target, 'peering_connections', []) if pc_id not in target['peering_connections']: target['peering_connections'].append(pc_id) # VPC information for the peer'd VPC current_config['peer_info'] = copy.deepcopy(current_config['AccepterVpcInfo' if info == 'RequesterVpcInfo' else 'RequesterVpcInfo']) if 'PeeringOptions' in current_config['peer_info']: current_config['peer_info'].pop('PeeringOptions') if 'organization' in aws_config and current_config['peer_info']['OwnerId'] in aws_config['organization']: current_config['peer_info']['name'] = aws_config['organization'][current_config['peer_info']['OwnerId']]['Name'] else: current_config['peer_info']['name'] = current_config['peer_info']['OwnerId']
python
def process_vpc_peering_connections_callback(aws_config, current_config, path, current_path, pc_id, callback_args): info = 'AccepterVpcInfo' if current_config['AccepterVpcInfo']['OwnerId'] == aws_config['aws_account_id'] else 'RequesterVpcInfo' region = current_path[current_path.index('regions')+1] vpc_id = current_config[info]['VpcId'] target = aws_config['services']['vpc']['regions'][region]['vpcs'][vpc_id] manage_dictionary(target, 'peering_connections', []) if pc_id not in target['peering_connections']: target['peering_connections'].append(pc_id) # VPC information for the peer'd VPC current_config['peer_info'] = copy.deepcopy(current_config['AccepterVpcInfo' if info == 'RequesterVpcInfo' else 'RequesterVpcInfo']) if 'PeeringOptions' in current_config['peer_info']: current_config['peer_info'].pop('PeeringOptions') if 'organization' in aws_config and current_config['peer_info']['OwnerId'] in aws_config['organization']: current_config['peer_info']['name'] = aws_config['organization'][current_config['peer_info']['OwnerId']]['Name'] else: current_config['peer_info']['name'] = current_config['peer_info']['OwnerId']
[ "def", "process_vpc_peering_connections_callback", "(", "aws_config", ",", "current_config", ",", "path", ",", "current_path", ",", "pc_id", ",", "callback_args", ")", ":", "info", "=", "'AccepterVpcInfo'", "if", "current_config", "[", "'AccepterVpcInfo'", "]", "[", ...
Create a list of peering connection IDs in each VPC :param aws_config: :param current_config: :param path: :param current_path: :param pc_id: :param callback_args: :return:
[ "Create", "a", "list", "of", "peering", "connection", "IDs", "in", "each", "VPC" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/preprocessing.py#L339-L366
227,577
nccgroup/Scout2
AWSScout2/rules/preprocessing.py
go_to_and_do
def go_to_and_do(aws_config, current_config, path, current_path, callback, callback_args = None): """ Recursively go to a target and execute a callback :param aws_config: :param current_config: :param path: :param current_path: :param callback: :param callback_args: :return: """ try: key = path.pop(0) if not current_config: current_config = aws_config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] if key in current_config: current_path.append(key) for (i, value) in enumerate(list(current_config[key])): if len(path) == 0: if type(current_config[key] == dict) and type(value) != dict and type(value) != list: callback(aws_config, current_config[key][value], path, current_path, value, callback_args) else: callback(aws_config, current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) go_to_and_do(aws_config, current_config[key][value], copy.deepcopy(path), tmp, callback, callback_args) except: tmp.pop() tmp.append(i) go_to_and_do(aws_config, current_config[key][i], copy.deepcopy(path), tmp, callback, callback_args) except Exception as e: printException(e) if i: printInfo('Index: %s' % str(i)) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key)) printInfo('Value = %s' % str(value)) printInfo('Path = %s' % path)
python
def go_to_and_do(aws_config, current_config, path, current_path, callback, callback_args = None): try: key = path.pop(0) if not current_config: current_config = aws_config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] if key in current_config: current_path.append(key) for (i, value) in enumerate(list(current_config[key])): if len(path) == 0: if type(current_config[key] == dict) and type(value) != dict and type(value) != list: callback(aws_config, current_config[key][value], path, current_path, value, callback_args) else: callback(aws_config, current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) go_to_and_do(aws_config, current_config[key][value], copy.deepcopy(path), tmp, callback, callback_args) except: tmp.pop() tmp.append(i) go_to_and_do(aws_config, current_config[key][i], copy.deepcopy(path), tmp, callback, callback_args) except Exception as e: printException(e) if i: printInfo('Index: %s' % str(i)) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key)) printInfo('Value = %s' % str(value)) printInfo('Path = %s' % path)
[ "def", "go_to_and_do", "(", "aws_config", ",", "current_config", ",", "path", ",", "current_path", ",", "callback", ",", "callback_args", "=", "None", ")", ":", "try", ":", "key", "=", "path", ".", "pop", "(", "0", ")", "if", "not", "current_config", ":"...
Recursively go to a target and execute a callback :param aws_config: :param current_config: :param path: :param current_path: :param callback: :param callback_args: :return:
[ "Recursively", "go", "to", "a", "target", "and", "execute", "a", "callback" ]
5d86d46d7ed91a92000496189e9cfa6b98243937
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/preprocessing.py#L547-L598
227,578
wesm/feather
cpp/build-support/cpplint.py
IsErrorSuppressedByNolint
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
python
def IsErrorSuppressedByNolint(category, linenum): return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
[ "def", "IsErrorSuppressedByNolint", "(", "category", ",", "linenum", ")", ":", "return", "(", "linenum", "in", "_error_suppressions", ".", "get", "(", "category", ",", "set", "(", ")", ")", "or", "linenum", "in", "_error_suppressions", ".", "get", "(", "None...
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
[ "Returns", "true", "if", "the", "specified", "error", "category", "is", "suppressed", "on", "this", "line", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L541-L554
227,579
wesm/feather
cpp/build-support/cpplint.py
Error
def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence))
python
def Error(filename, linenum, category, confidence, message): if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence))
[ "def", "Error", "(", "filename", ",", "linenum", ",", "category", ",", "confidence", ",", "message", ")", ":", "if", "_ShouldPrintError", "(", "category", ",", "confidence", ",", "linenum", ")", ":", "_cpplint_state", ".", "IncrementErrorCount", "(", "category...
Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message.
[ "Logs", "the", "fact", "we", "ve", "found", "a", "lint", "error", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L1092-L1124
227,580
wesm/feather
cpp/build-support/cpplint.py
CheckHeaderFileIncluded
def CheckHeaderFileIncluded(filename, include_state, error): """Logs an error if a .cc file does not include its header.""" # Do not check test files if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'): return fileinfo = FileInfo(filename) headerfile = filename[0:len(filename) - 2] + 'h' if not os.path.exists(headerfile): return headername = FileInfo(headerfile).RepositoryName() first_include = 0 for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername))
python
def CheckHeaderFileIncluded(filename, include_state, error): # Do not check test files if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'): return fileinfo = FileInfo(filename) headerfile = filename[0:len(filename) - 2] + 'h' if not os.path.exists(headerfile): return headername = FileInfo(headerfile).RepositoryName() first_include = 0 for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername))
[ "def", "CheckHeaderFileIncluded", "(", "filename", ",", "include_state", ",", "error", ")", ":", "# Do not check test files", "if", "filename", ".", "endswith", "(", "'_test.cc'", ")", "or", "filename", ".", "endswith", "(", "'_unittest.cc'", ")", ":", "return", ...
Logs an error if a .cc file does not include its header.
[ "Logs", "an", "error", "if", "a", ".", "cc", "file", "does", "not", "include", "its", "header", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L1775-L1797
227,581
wesm/feather
cpp/build-support/cpplint.py
CheckForBadCharacters
def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
python
def CheckForBadCharacters(filename, lines, error): for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
[ "def", "CheckForBadCharacters", "(", "filename", ",", "lines", ",", "error", ")", ":", "for", "linenum", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "u'\\ufffd'", "in", "line", ":", "error", "(", "filename", ",", "linenum", ",", "'reada...
Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
[ "Logs", "an", "error", "for", "each", "line", "containing", "bad", "characters", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L1800-L1822
227,582
wesm/feather
cpp/build-support/cpplint.py
CheckOperatorSpacing
def CheckOperatorSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. Those are checked separately # in CheckRValueReference match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1))
python
def CheckOperatorSpacing(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. Those are checked separately # in CheckRValueReference match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1))
[ "def", "CheckOperatorSpacing", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Don't try to do spacing checks for operator methods. Do this by", "# replacing the troublesome cha...
Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Checks", "for", "horizontal", "spacing", "around", "operators", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3127-L3239
227,583
wesm/feather
cpp/build-support/cpplint.py
IsDeletedOrDefault
def IsDeletedOrDefault(clean_lines, linenum): """Check if current constructor or operator is deleted or default. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if this is a deleted or default constructor. """ open_paren = clean_lines.elided[linenum].find('(') if open_paren < 0: return False (close_line, _, close_paren) = CloseExpression( clean_lines, linenum, open_paren) if close_paren < 0: return False return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
python
def IsDeletedOrDefault(clean_lines, linenum): open_paren = clean_lines.elided[linenum].find('(') if open_paren < 0: return False (close_line, _, close_paren) = CloseExpression( clean_lines, linenum, open_paren) if close_paren < 0: return False return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
[ "def", "IsDeletedOrDefault", "(", "clean_lines", ",", "linenum", ")", ":", "open_paren", "=", "clean_lines", ".", "elided", "[", "linenum", "]", ".", "find", "(", "'('", ")", "if", "open_paren", "<", "0", ":", "return", "False", "(", "close_line", ",", "...
Check if current constructor or operator is deleted or default. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if this is a deleted or default constructor.
[ "Check", "if", "current", "constructor", "or", "operator", "is", "deleted", "or", "default", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3635-L3651
227,584
wesm/feather
cpp/build-support/cpplint.py
IsRValueAllowed
def IsRValueAllowed(clean_lines, linenum, typenames): """Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. typenames: set of type names from template-argument-list. Returns: True if line is within the region where RValue references are allowed. """ # Allow region marked by PUSH/POP macros for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') # Allow operator= line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) # Allow constructors match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) # Reject types not mentioned in template-argument-list while line: match = Match(r'^.*?(\w+)\s*&&(.*)$', line) if not match: break if match.group(1) not in typenames: return False line = match.group(2) # All RValue types that were in template-argument-list should have # been removed by now. Those were allowed, assuming that they will # be forwarded. # # If there are no remaining RValue types left (i.e. types that were # not found in template-argument-list), flag those as not allowed. return line.find('&&') < 0
python
def IsRValueAllowed(clean_lines, linenum, typenames): # Allow region marked by PUSH/POP macros for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') # Allow operator= line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) # Allow constructors match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) # Reject types not mentioned in template-argument-list while line: match = Match(r'^.*?(\w+)\s*&&(.*)$', line) if not match: break if match.group(1) not in typenames: return False line = match.group(2) # All RValue types that were in template-argument-list should have # been removed by now. Those were allowed, assuming that they will # be forwarded. # # If there are no remaining RValue types left (i.e. types that were # not found in template-argument-list), flag those as not allowed. return line.find('&&') < 0
[ "def", "IsRValueAllowed", "(", "clean_lines", ",", "linenum", ",", "typenames", ")", ":", "# Allow region marked by PUSH/POP macros", "for", "i", "in", "xrange", "(", "linenum", ",", "0", ",", "-", "1", ")", ":", "line", "=", "clean_lines", ".", "elided", "[...
Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. typenames: set of type names from template-argument-list. Returns: True if line is within the region where RValue references are allowed.
[ "Check", "if", "RValue", "reference", "is", "allowed", "on", "a", "particular", "line", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3654-L3709
227,585
wesm/feather
cpp/build-support/cpplint.py
GetTemplateArgs
def GetTemplateArgs(clean_lines, linenum): """Find list of template arguments associated with this function declaration. Args: clean_lines: A CleansedLines instance containing the file. linenum: Line number containing the start of the function declaration, usually one line after the end of the template-argument-list. Returns: Set of type names, or empty set if this does not appear to have any template parameters. """ # Find start of function func_line = linenum while func_line > 0: line = clean_lines.elided[func_line] if Match(r'^\s*$', line): return set() if line.find('(') >= 0: break func_line -= 1 if func_line == 0: return set() # Collapse template-argument-list into a single string argument_list = '' match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) if match: # template-argument-list on the same line as function name start_col = len(match.group(1)) _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) if end_col > -1 and end_line == func_line: start_col += 1 # Skip the opening bracket argument_list = clean_lines.elided[func_line][start_col:end_col] elif func_line > 1: # template-argument-list one line before function name match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) if match: end_col = len(match.group(1)) _, start_line, start_col = ReverseCloseExpression( clean_lines, func_line - 1, end_col) if start_col > -1: start_col += 1 # Skip the opening bracket while start_line < func_line - 1: argument_list += clean_lines.elided[start_line][start_col:] start_col = 0 start_line += 1 argument_list += clean_lines.elided[func_line - 1][start_col:end_col] if not argument_list: return set() # Extract type names typenames = set() while True: match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', argument_list) if not match: break typenames.add(match.group(1)) argument_list = match.group(2) return typenames
python
def GetTemplateArgs(clean_lines, linenum): # Find start of function func_line = linenum while func_line > 0: line = clean_lines.elided[func_line] if Match(r'^\s*$', line): return set() if line.find('(') >= 0: break func_line -= 1 if func_line == 0: return set() # Collapse template-argument-list into a single string argument_list = '' match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) if match: # template-argument-list on the same line as function name start_col = len(match.group(1)) _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) if end_col > -1 and end_line == func_line: start_col += 1 # Skip the opening bracket argument_list = clean_lines.elided[func_line][start_col:end_col] elif func_line > 1: # template-argument-list one line before function name match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) if match: end_col = len(match.group(1)) _, start_line, start_col = ReverseCloseExpression( clean_lines, func_line - 1, end_col) if start_col > -1: start_col += 1 # Skip the opening bracket while start_line < func_line - 1: argument_list += clean_lines.elided[start_line][start_col:] start_col = 0 start_line += 1 argument_list += clean_lines.elided[func_line - 1][start_col:end_col] if not argument_list: return set() # Extract type names typenames = set() while True: match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', argument_list) if not match: break typenames.add(match.group(1)) argument_list = match.group(2) return typenames
[ "def", "GetTemplateArgs", "(", "clean_lines", ",", "linenum", ")", ":", "# Find start of function", "func_line", "=", "linenum", "while", "func_line", ">", "0", ":", "line", "=", "clean_lines", ".", "elided", "[", "func_line", "]", "if", "Match", "(", "r'^\\s*...
Find list of template arguments associated with this function declaration. Args: clean_lines: A CleansedLines instance containing the file. linenum: Line number containing the start of the function declaration, usually one line after the end of the template-argument-list. Returns: Set of type names, or empty set if this does not appear to have any template parameters.
[ "Find", "list", "of", "template", "arguments", "associated", "with", "this", "function", "declaration", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3712-L3773
227,586
wesm/feather
cpp/build-support/cpplint.py
CheckRValueReference
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): """Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Find lines missing spaces around &&. # TODO(unknown): currently we don't check for rvalue references # with spaces surrounding the && to avoid false positives with # boolean expressions. line = clean_lines.elided[linenum] match = Match(r'^(.*\S)&&', line) if not match: match = Match(r'(.*)&&\S', line) if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): return # Either poorly formed && or an rvalue reference, check the context # to get a more accurate error message. Mostly we want to determine # if what's to the left of "&&" is a type or not. typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&')
python
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): # Find lines missing spaces around &&. # TODO(unknown): currently we don't check for rvalue references # with spaces surrounding the && to avoid false positives with # boolean expressions. line = clean_lines.elided[linenum] match = Match(r'^(.*\S)&&', line) if not match: match = Match(r'(.*)&&\S', line) if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): return # Either poorly formed && or an rvalue reference, check the context # to get a more accurate error message. Mostly we want to determine # if what's to the left of "&&" is a type or not. typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&')
[ "def", "CheckRValueReference", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", ":", "# Find lines missing spaces around &&.", "# TODO(unknown): currently we don't check for rvalue references", "# with spaces surrounding the && to avoid fa...
Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
[ "Check", "for", "rvalue", "references", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3776-L3809
227,587
wesm/feather
cpp/build-support/cpplint.py
_DropCommonSuffixes
def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
python
def _DropCommonSuffixes(filename): for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
[ "def", "_DropCommonSuffixes", "(", "filename", ")", ":", "for", "suffix", "in", "(", "'test.cc'", ",", "'regtest.cc'", ",", "'unittest.cc'", ",", "'inl.h'", ",", "'impl.h'", ",", "'internal.h'", ")", ":", "if", "(", "filename", ".", "endswith", "(", "suffix"...
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
[ "Drops", "common", "suffixes", "like", "_test", ".", "cc", "or", "-", "inl", ".", "h", "from", "filename", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L4501-L4525
227,588
wesm/feather
cpp/build-support/cpplint.py
CheckGlobalStatic
def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
python
def CheckGlobalStatic(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
[ "def", "CheckGlobalStatic", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Match two lines at a time to support multiline declarations", "if", "linenum", "+", "1", "<", ...
Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Check", "for", "unsafe", "global", "or", "static", "objects", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L4919-L4968
227,589
wesm/feather
cpp/build-support/cpplint.py
CheckCStyleCast
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # [](int) -> bool { # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # Function((function_pointer_arg)(int), int param) # <TemplateArgument(int)>; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); raw_line = clean_lines.raw_lines[linenum] if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True
python
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # [](int) -> bool { # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # Function((function_pointer_arg)(int), int param) # <TemplateArgument(int)>; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); raw_line = clean_lines.raw_lines[linenum] if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True
[ "def", "CheckCStyleCast", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "cast_type", ",", "pattern", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "match", "=", "Search", "(", "pattern", ",", "line", ...
Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise.
[ "Checks", "for", "a", "C", "-", "style", "cast", "by", "looking", "for", "the", "pattern", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L5337-L5438
227,590
wesm/feather
cpp/build-support/cpplint.py
FilesBelongToSameModule
def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ if not filename_cc.endswith('.cc'): return (False, '') filename_cc = filename_cc[:-len('.cc')] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:-len('_unittest')] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:-len('_test')] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path
python
def FilesBelongToSameModule(filename_cc, filename_h): if not filename_cc.endswith('.cc'): return (False, '') filename_cc = filename_cc[:-len('.cc')] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:-len('_unittest')] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:-len('_test')] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path
[ "def", "FilesBelongToSameModule", "(", "filename_cc", ",", "filename_h", ")", ":", "if", "not", "filename_cc", ".", "endswith", "(", "'.cc'", ")", ":", "return", "(", "False", ",", "''", ")", "filename_cc", "=", "filename_cc", "[", ":", "-", "len", "(", ...
Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file.
[ "Check", "if", "these", "two", "filenames", "belong", "to", "the", "same", "module", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L5522-L5574
227,591
wesm/feather
cpp/build-support/cpplint.py
CheckForIncludeWhatYouUse
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template)
python
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template)
[ "def", "CheckForIncludeWhatYouUse", "(", "filename", ",", "clean_lines", ",", "include_state", ",", "error", ",", "io", "=", "codecs", ")", ":", "required", "=", "{", "}", "# A map of header name to linenumber and the template entity.", "# Example of required: { '<functiona...
Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection.
[ "Reports", "for", "missing", "stl", "includes", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L5604-L5695
227,592
wesm/feather
cpp/build-support/cpplint.py
CheckDefaultLambdaCaptures
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): """Check that default lambda captures are not used. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # A lambda introducer specifies a default capture if it starts with "[=" # or if it starts with "[&" _not_ followed by an identifier. match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) if match: # Found a potential error, check what comes after the lambda-introducer. # If it's not open parenthesis (for lambda-declarator) or open brace # (for compound-statement), it's not a lambda. line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): error(filename, linenum, 'build/c++11', 4, # 4 = high confidence 'Default lambda captures are an unapproved C++ feature.')
python
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # A lambda introducer specifies a default capture if it starts with "[=" # or if it starts with "[&" _not_ followed by an identifier. match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) if match: # Found a potential error, check what comes after the lambda-introducer. # If it's not open parenthesis (for lambda-declarator) or open brace # (for compound-statement), it's not a lambda. line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): error(filename, linenum, 'build/c++11', 4, # 4 = high confidence 'Default lambda captures are an unapproved C++ feature.')
[ "def", "CheckDefaultLambdaCaptures", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# A lambda introducer specifies a default capture if it starts with \"[=\"", "# or if it starts...
Check that default lambda captures are not used. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Check", "that", "default", "lambda", "captures", "are", "not", "used", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L5722-L5744
227,593
wesm/feather
cpp/build-support/cpplint.py
ProcessLine
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
python
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
[ "def", "ProcessLine", "(", "filename", ",", "file_extension", ",", "clean_lines", ",", "line", ",", "include_state", ",", "function_state", ",", "nesting_state", ",", "error", ",", "extra_check_functions", "=", "[", "]", ")", ":", "raw_lines", "=", "clean_lines"...
Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
[ "Processes", "a", "single", "line", "in", "the", "file", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L5906-L5949
227,594
wesm/feather
cpp/build-support/cpplint.py
FlagCxx11Features
def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name)
python
def FlagCxx11Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name)
[ "def", "FlagCxx11Features", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Flag unapproved C++11 headers.", "include", "=", "Match", "(", "r'\\s*#\\s*include\\s+[<\"]([^...
Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Flag", "those", "c", "++", "11", "features", "that", "we", "only", "allow", "in", "certain", "places", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L5951-L5994
227,595
wesm/feather
cpp/build-support/cpplint.py
ProcessConfigOverrides
def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True
python
def ProcessConfigOverrides(filename): abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True
[ "def", "ProcessConfigOverrides", "(", "filename", ")", ":", "abs_filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "cfg_filters", "=", "[", "]", "keep_looking", "=", "True", "while", "keep_looking", ":", "abs_path", ",", "base_name", "...
Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further.
[ "Loads", "the", "configuration", "files", "and", "processes", "the", "config", "overrides", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L6048-L6121
227,596
wesm/feather
cpp/build-support/cpplint.py
_CppLintState.PrintErrorCounts
def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count)
python
def PrintErrorCounts(self): for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count)
[ "def", "PrintErrorCounts", "(", "self", ")", ":", "for", "category", ",", "count", "in", "self", ".", "errors_by_category", ".", "iteritems", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Category \\'%s\\' errors found: %d\\n'", "%", "(", "category...
Print a summary of errors by category, and the total.
[ "Print", "a", "summary", "of", "errors", "by", "category", "and", "the", "total", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L841-L846
227,597
wesm/feather
cpp/build-support/cpplint.py
_FunctionState.Check
def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger))
python
def Check(self, error, filename, linenum): if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger))
[ "def", "Check", "(", "self", ",", "error", ",", "filename", ",", "linenum", ")", ":", "if", "Match", "(", "r'T(EST|est)'", ",", "self", ".", "current_function", ")", ":", "base_trigger", "=", "self", ".", "_TEST_TRIGGER", "else", ":", "base_trigger", "=", ...
Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check.
[ "Report", "if", "too", "many", "lines", "in", "function", "body", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L939-L962
227,598
wesm/feather
cpp/build-support/cpplint.py
FileInfo.RepositoryName
def RepositoryName(self): """FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname
python
def RepositoryName(self): fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname
[ "def", "RepositoryName", "(", "self", ")", ":", "fullname", "=", "self", ".", "FullName", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "fullname", ")", ":", "project_dir", "=", "os", ".", "path", ".", "dirname", "(", "fullname", ")", "if", ...
FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors.
[ "FullName", "after", "removing", "the", "local", "path", "to", "the", "repository", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L988-L1031
227,599
wesm/feather
cpp/build-support/cpplint.py
_NamespaceInfo.CheckEnd
def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"')
python
def CheckEnd(self, filename, clean_lines, linenum, error): line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"')
[ "def", "CheckEnd", "(", "self", ",", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "raw_lines", "[", "linenum", "]", "# Check how many lines is enclosed in this namespace. Don't issue", "# warning for missing n...
Check end of namespace comments.
[ "Check", "end", "of", "namespace", "comments", "." ]
99267b30461c46b9e437f95e1d9338a92a854270
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L2132-L2182