code
stringlengths
17
6.64M
def lenet(batch_size): n = caffe.NetSpec() (n.data, n.label) = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), dict(dim=[batch_size, 1, 1, 1])], transform_param=dict(scale=(1.0 / 255)), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) return n.to_proto()
def anon_lenet(batch_size): (data, label) = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), dict(dim=[batch_size, 1, 1, 1])], transform_param=dict(scale=(1.0 / 255)), ntop=2) conv1 = L.Convolution(data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) conv2 = L.Convolution(pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) ip1 = L.InnerProduct(pool2, num_output=500, weight_filler=dict(type='xavier')) relu1 = L.ReLU(ip1, in_place=True) ip2 = L.InnerProduct(relu1, num_output=10, weight_filler=dict(type='xavier')) loss = L.SoftmaxWithLoss(ip2, label) return loss.to_proto()
def silent_net(): n = caffe.NetSpec() (n.data, n.data2) = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], ntop=2) n.silence_data = L.Silence(n.data, ntop=0) n.silence_data2 = L.Silence(n.data2, ntop=0) return n.to_proto()
class TestNetSpec(unittest.TestCase): def load_net(self, net_proto): f = tempfile.NamedTemporaryFile(mode='w+', delete=False) f.write(str(net_proto)) f.close() return caffe.Net(f.name, caffe.TEST) def test_lenet(self): 'Construct and build the Caffe version of LeNet.' net_proto = lenet(50) self.assertEqual(net_proto.layer[6].bottom, net_proto.layer[6].top) net = self.load_net(net_proto) self.assertEqual(len(net.layers), 9) net_proto = anon_lenet(50) self.assertEqual(net_proto.layer[6].bottom, net_proto.layer[6].top) net = self.load_net(net_proto) self.assertEqual(len(net.layers), 9) def test_zero_tops(self): 'Test net construction for top-less layers.' net_proto = silent_net() net = self.load_net(net_proto) self.assertEqual(len(net.forward()), 0)
class SimpleLayer(caffe.Layer): 'A layer that just multiplies by ten' def setup(self, bottom, top): pass def reshape(self, bottom, top): top[0].reshape(*bottom[0].data.shape) def forward(self, bottom, top): top[0].data[...] = (10 * bottom[0].data) def backward(self, top, propagate_down, bottom): bottom[0].diff[...] = (10 * top[0].diff)
class ExceptionLayer(caffe.Layer): 'A layer for checking exceptions from Python' def setup(self, bottom, top): raise RuntimeError
class ParameterLayer(caffe.Layer): 'A layer that just multiplies by ten' def setup(self, bottom, top): self.blobs.add_blob(1) self.blobs[0].data[0] = 0 def reshape(self, bottom, top): top[0].reshape(*bottom[0].data.shape) def forward(self, bottom, top): pass def backward(self, top, propagate_down, bottom): self.blobs[0].diff[0] = 1
def python_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'\n python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }\n layer { type: 'Python' name: 'two' bottom: 'one' top: 'two'\n python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }\n layer { type: 'Python' name: 'three' bottom: 'two' top: 'three'\n python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }") return f.name
def exception_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'\n python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } }\n ") return f.name
def parameter_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'\n python_param { module: 'test_python_layer' layer: 'ParameterLayer' } }\n ") return f.name
class TestPythonLayer(unittest.TestCase): def setUp(self): net_file = python_net_file() self.net = caffe.Net(net_file, caffe.TRAIN) os.remove(net_file) def test_forward(self): x = 8 self.net.blobs['data'].data[...] = x self.net.forward() for y in self.net.blobs['three'].data.flat: self.assertEqual(y, ((10 ** 3) * x)) def test_backward(self): x = 7 self.net.blobs['three'].diff[...] = x self.net.backward() for y in self.net.blobs['data'].diff.flat: self.assertEqual(y, ((10 ** 3) * x)) def test_reshape(self): s = 4 self.net.blobs['data'].reshape(s, s, s, s) self.net.forward() for blob in six.itervalues(self.net.blobs): for d in blob.data.shape: self.assertEqual(s, d) def test_exception(self): net_file = exception_net_file() self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) os.remove(net_file) def test_parameter(self): net_file = parameter_net_file() net = caffe.Net(net_file, caffe.TRAIN) net.forward() net.backward() layer = net.layers[list(net._layer_names).index('layer')] self.assertEqual(layer.blobs[0].data[0], 0) self.assertEqual(layer.blobs[0].diff[0], 1) layer.blobs[0].data[0] += layer.blobs[0].diff[0] self.assertEqual(layer.blobs[0].data[0], 1) (h, caffemodel_file) = tempfile.mkstemp() net.save(caffemodel_file) layer.blobs[0].data[0] = (- 1) self.assertEqual(layer.blobs[0].data[0], (- 1)) net.copy_from(caffemodel_file) self.assertEqual(layer.blobs[0].data[0], 1) os.remove(caffemodel_file) net2 = caffe.Net(net_file, caffe.TRAIN) net2.share_with(net) layer = net.layers[list(net2._layer_names).index('layer')] self.assertEqual(layer.blobs[0].data[0], 1) os.remove(net_file)
class SimpleParamLayer(caffe.Layer): 'A layer that just multiplies by the numeric value of its param string' def setup(self, bottom, top): try: self.value = float(self.param_str) except ValueError: raise ValueError('Parameter string must be a legible float') def reshape(self, bottom, top): top[0].reshape(*bottom[0].data.shape) def forward(self, bottom, top): top[0].data[...] = (self.value * bottom[0].data) def backward(self, top, propagate_down, bottom): bottom[0].diff[...] = (self.value * top[0].diff)
def python_param_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10'\n python_param { module: 'test_python_layer_with_param_str'\n layer: 'SimpleParamLayer' param_str: '10' } }\n layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2'\n python_param { module: 'test_python_layer_with_param_str'\n layer: 'SimpleParamLayer' param_str: '2' } }") return f.name
class TestLayerWithParam(unittest.TestCase): def setUp(self): net_file = python_param_net_file() self.net = caffe.Net(net_file, caffe.TRAIN) os.remove(net_file) def test_forward(self): x = 8 self.net.blobs['data'].data[...] = x self.net.forward() for y in self.net.blobs['mul2'].data.flat: self.assertEqual(y, ((2 * 10) * x)) def test_backward(self): x = 7 self.net.blobs['mul2'].diff[...] = x self.net.backward() for y in self.net.blobs['data'].diff.flat: self.assertEqual(y, ((2 * 10) * x))
class TestSolver(unittest.TestCase): def setUp(self): self.num_output = 13 net_f = simple_net_file(self.num_output) f = tempfile.NamedTemporaryFile(mode='w+', delete=False) f.write((("net: '" + net_f) + "'\n test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75\n display: 100 max_iter: 100 snapshot_after_train: false")) f.close() self.solver = caffe.SGDSolver(f.name) caffe.get_solver(f.name) caffe.set_mode_cpu() self.solver.net.blobs['label'].data[...] = np.random.randint(self.num_output, size=self.solver.net.blobs['label'].data.shape) self.solver.test_nets[0].blobs['label'].data[...] = np.random.randint(self.num_output, size=self.solver.test_nets[0].blobs['label'].data.shape) os.remove(f.name) os.remove(net_f) def test_solve(self): self.assertEqual(self.solver.iter, 0) self.solver.solve() self.assertEqual(self.solver.iter, 100) def test_net_memory(self): 'Check that nets survive after the solver is destroyed.' nets = ([self.solver.net] + list(self.solver.test_nets)) self.assertEqual(len(nets), 2) del self.solver total = 0 for net in nets: for ps in six.itervalues(net.params): for p in ps: total += (p.data.sum() + p.diff.sum()) for bl in six.itervalues(net.blobs): total += (bl.data.sum() + bl.diff.sum())
def main(argv): pycaffe_dir = os.path.dirname(__file__) parser = argparse.ArgumentParser() parser.add_argument('input_file', help='Input image, directory, or npy.') parser.add_argument('output_file', help='Output npy filename.') parser.add_argument('--model_def', default=os.path.join(pycaffe_dir, '../models/bvlc_reference_caffenet/deploy.prototxt'), help='Model definition file.') parser.add_argument('--pretrained_model', default=os.path.join(pycaffe_dir, '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'), help='Trained model weights file.') parser.add_argument('--gpu', action='store_true', help='Switch for gpu computation.') parser.add_argument('--center_only', action='store_true', help=('Switch for prediction from center crop alone instead of ' + 'averaging predictions across crops (default).')) parser.add_argument('--images_dim', default='256,256', help="Canonical 'height,width' dimensions of input images.") parser.add_argument('--mean_file', default=os.path.join(pycaffe_dir, 'caffe/imagenet/ilsvrc_2012_mean.npy'), help=('Data set image mean of [Channels x Height x Width] dimensions ' + "(numpy array). Set to '' for no mean subtraction.")) parser.add_argument('--input_scale', type=float, help='Multiply input features by this scale to finish preprocessing.') parser.add_argument('--raw_scale', type=float, default=255.0, help='Multiply raw input by this scale before preprocessing.') parser.add_argument('--channel_swap', default='2,1,0', help=('Order to permute input channels. The default converts ' + 'RGB -> BGR since BGR is the Caffe default by way of OpenCV.')) parser.add_argument('--ext', default='jpg', help=('Image file extension to take as input when a directory ' + 'is given as the input file.')) args = parser.parse_args() image_dims = [int(s) for s in args.images_dim.split(',')] (mean, channel_swap) = (None, None) if args.mean_file: mean = np.load(args.mean_file) if args.channel_swap: channel_swap = [int(s) for s in args.channel_swap.split(',')] if args.gpu: caffe.set_mode_gpu() print('GPU mode') else: caffe.set_mode_cpu() print('CPU mode') classifier = caffe.Classifier(args.model_def, args.pretrained_model, image_dims=image_dims, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap) args.input_file = os.path.expanduser(args.input_file) if args.input_file.endswith('npy'): print(('Loading file: %s' % args.input_file)) inputs = np.load(args.input_file) elif os.path.isdir(args.input_file): print(('Loading folder: %s' % args.input_file)) inputs = [caffe.io.load_image(im_f) for im_f in glob.glob(((args.input_file + '/*.') + args.ext))] else: print(('Loading file: %s' % args.input_file)) inputs = [caffe.io.load_image(args.input_file)] print(('Classifying %d inputs.' % len(inputs))) start = time.time() predictions = classifier.predict(inputs, (not args.center_only)) print(('Done in %.2f s.' % (time.time() - start))) print(('Saving results into %s' % args.output_file)) np.save(args.output_file, predictions)
def main(argv): pycaffe_dir = os.path.dirname(__file__) parser = argparse.ArgumentParser() parser.add_argument('input_file', help="Input txt/csv filename. If .txt, must be list of filenames. If .csv, must be comma-separated file with header 'filename, xmin, ymin, xmax, ymax'") parser.add_argument('output_file', help='Output h5/csv filename. Format depends on extension.') parser.add_argument('--model_def', default=os.path.join(pycaffe_dir, '../models/bvlc_reference_caffenet/deploy.prototxt.prototxt'), help='Model definition file.') parser.add_argument('--pretrained_model', default=os.path.join(pycaffe_dir, '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'), help='Trained model weights file.') parser.add_argument('--crop_mode', default='selective_search', choices=CROP_MODES, help='How to generate windows for detection.') parser.add_argument('--gpu', action='store_true', help='Switch for gpu computation.') parser.add_argument('--mean_file', default=os.path.join(pycaffe_dir, 'caffe/imagenet/ilsvrc_2012_mean.npy'), help=('Data set image mean of H x W x K dimensions (numpy array). ' + "Set to '' for no mean subtraction.")) parser.add_argument('--input_scale', type=float, help='Multiply input features by this scale to finish preprocessing.') parser.add_argument('--raw_scale', type=float, default=255.0, help='Multiply raw input by this scale before preprocessing.') parser.add_argument('--channel_swap', default='2,1,0', help=('Order to permute input channels. The default converts ' + 'RGB -> BGR since BGR is the Caffe default by way of OpenCV.')) parser.add_argument('--context_pad', type=int, default='16', help='Amount of surrounding context to collect in input window.') args = parser.parse_args() (mean, channel_swap) = (None, None) if args.mean_file: mean = np.load(args.mean_file) if (mean.shape[1:] != (1, 1)): mean = mean.mean(1).mean(1) if args.channel_swap: channel_swap = [int(s) for s in args.channel_swap.split(',')] if args.gpu: caffe.set_mode_gpu() print('GPU mode') else: caffe.set_mode_cpu() print('CPU mode') detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap, context_pad=args.context_pad) t = time.time() print('Loading input...') if args.input_file.lower().endswith('txt'): with open(args.input_file) as f: inputs = [_.strip() for _ in f.readlines()] elif args.input_file.lower().endswith('csv'): inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str}) inputs.set_index('filename', inplace=True) else: raise Exception('Unknown input file type: not in txt or csv.') if (args.crop_mode == 'list'): images_windows = [(ix, inputs.iloc[np.where((inputs.index == ix))][COORD_COLS].values) for ix in inputs.index.unique()] detections = detector.detect_windows(images_windows) else: detections = detector.detect_selective_search(inputs) print('Processed {} windows in {:.3f} s.'.format(len(detections), (time.time() - t))) df = pd.DataFrame(detections) df.set_index('filename', inplace=True) df[COORD_COLS] = pd.DataFrame(data=np.vstack(df['window']), index=df.index, columns=COORD_COLS) del df['window'] t = time.time() if args.output_file.lower().endswith('csv'): class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)] df[class_cols] = pd.DataFrame(data=np.vstack(df['feat']), index=df.index, columns=class_cols) df.to_csv(args.output_file, cols=(COORD_COLS + class_cols)) else: df.to_hdf(args.output_file, 'df', mode='w') print('Saved to {} in {:.3f} s.'.format(args.output_file, (time.time() - t)))
def parse_args(): 'Parse input arguments\n ' parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('input_net_proto_file', help='Input network prototxt file') parser.add_argument('output_image_file', help='Output image file') parser.add_argument('--rankdir', help='One of TB (top-bottom, i.e., vertical), RL (right-left, i.e., horizontal), or another valid dot option; see http://www.graphviz.org/doc/info/attrs.html#k:rankdir', default='LR') args = parser.parse_args() return args
def main(): args = parse_args() net = caffe_pb2.NetParameter() text_format.Merge(open(args.input_net_proto_file).read(), net) print(('Drawing net to %s' % args.output_image_file)) caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir)
def ParseNolintSuppressions(filename, raw_line, linenum, error): 'Updates the global list of error-suppressions.\n\n Parses any NOLINT comments on the current line, updating the global\n error_suppressions store. Reports an error if the NOLINT comment\n was malformed.\n\n Args:\n filename: str, the name of the input file.\n raw_line: str, the line of input text, with comments.\n linenum: int, the number of the current line.\n error: function, an error handler.\n ' matched = _RE_SUPPRESSION.search(raw_line) if matched: if (matched.group(1) == '_NEXT_LINE'): linenum += 1 category = matched.group(2) if (category in (None, '(*)')): _error_suppressions.setdefault(None, set()).add(linenum) elif (category.startswith('(') and category.endswith(')')): category = category[1:(- 1)] if (category in _ERROR_CATEGORIES): _error_suppressions.setdefault(category, set()).add(linenum) else: error(filename, linenum, 'readability/nolint', 5, ('Unknown NOLINT error category: %s' % category))
def ResetNolintSuppressions(): 'Resets the set of NOLINT suppressions to empty.' _error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum): 'Returns true if the specified error category is suppressed on this line.\n\n Consults the global error_suppressions map populated by\n ParseNolintSuppressions/ResetNolintSuppressions.\n\n Args:\n category: str, the category of the error.\n linenum: int, the current line number.\n Returns:\n bool, True iff the error should be suppressed due to a NOLINT comment.\n ' return ((linenum in _error_suppressions.get(category, set())) or (linenum in _error_suppressions.get(None, set())))
def Match(pattern, s): 'Matches the string with the pattern, caching the compiled regexp.' if (pattern not in _regexp_compile_cache): _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s): 'Replaces instances of pattern in a string with a replacement.\n\n The compiled regex is kept in a cache shared by Match and Search.\n\n Args:\n pattern: regex pattern\n rep: replacement text\n s: search string\n\n Returns:\n string with replacements made (or original string if no replacements)\n ' if (pattern not in _regexp_compile_cache): _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s): 'Searches the string for the pattern, caching the compiled regexp.' if (pattern not in _regexp_compile_cache): _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict): 'Tracks line numbers for includes, and the order in which includes appear.\n\n As a dict, an _IncludeState object serves as a mapping between include\n filename and line number on which that file was included.\n\n Call CheckNextIncludeOrder() once for each header in the file, passing\n in the type constants defined above. Calls in an illegal order will\n raise an _IncludeError with an appropriate error message.\n\n ' _INITIAL_SECTION = 0 _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 _OTHER_H_SECTION = 4 _TYPE_NAMES = {_C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header'} _SECTION_NAMES = {_INITIAL_SECTION: "... nothing. (This can't be an error.)", _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', _OTHER_H_SECTION: 'other header'} def __init__(self): dict.__init__(self) self.ResetSection() def ResetSection(self): self._section = self._INITIAL_SECTION self._last_header = '' def SetLastHeader(self, header_path): self._last_header = header_path def CanonicalizeAlphabeticalOrder(self, header_path): 'Returns a path canonicalized for alphabetical comparison.\n\n - replaces "-" with "_" so they both cmp the same.\n - removes \'-inl\' since we don\'t require them to be after the main header.\n - lowercase everything, just in case.\n\n Args:\n header_path: Path to be canonicalized.\n\n Returns:\n Canonicalized path.\n ' return header_path.replace('-inl.h', '.h').replace('-', '_').lower() def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): 'Check if a header is in alphabetical order with the previous header.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n header_path: Canonicalized header to be checked.\n\n Returns:\n Returns true if the header is in alphabetical order.\n ' if ((self._last_header > header_path) and (not Match('^\\s*$', clean_lines.elided[(linenum - 1)]))): return False return True def CheckNextIncludeOrder(self, header_type): "Returns a non-empty error message if the next header is out of order.\n\n This function also updates the internal state to be ready to check\n the next include.\n\n Args:\n header_type: One of the _XXX_HEADER constants defined above.\n\n Returns:\n The empty string if the header is in the right order, or an\n error message describing what's wrong.\n\n " error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if (header_type == _C_SYS_HEADER): if (self._section <= self._C_SECTION): self._section = self._C_SECTION else: self._last_header = '' return error_message elif (header_type == _CPP_SYS_HEADER): if (self._section <= self._CPP_SECTION): self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif (header_type == _LIKELY_MY_HEADER): if (self._section <= self._MY_H_SECTION): self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif (header_type == _POSSIBLE_MY_HEADER): if (self._section <= self._MY_H_SECTION): self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION else: assert (header_type == _OTHER_HEADER) self._section = self._OTHER_H_SECTION if (last_section != self._section): self._last_header = '' return ''
class _CppLintState(object): 'Maintains module-wide state..' def __init__(self): self.verbose_level = 1 self.error_count = 0 self.filters = _DEFAULT_FILTERS[:] self.counting = 'total' self.errors_by_category = {} self.output_format = 'emacs' def SetOutputFormat(self, output_format): 'Sets the output format for errors.' self.output_format = output_format def SetVerboseLevel(self, level): "Sets the module's verbosity, and returns the previous setting." last_verbose_level = self.verbose_level self.verbose_level = level return last_verbose_level def SetCountingStyle(self, counting_style): "Sets the module's counting options." self.counting = counting_style def SetFilters(self, filters): 'Sets the error-message filters.\n\n These filters are applied when deciding whether to emit a given\n error message.\n\n Args:\n filters: A string of comma-separated filters (eg "+whitespace/indent").\n Each filter should start with + or -; else we die.\n\n Raises:\n ValueError: The comma-separated filters did not all start with \'+\' or \'-\'.\n E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"\n ' self.filters = _DEFAULT_FILTERS[:] for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if (not (filt.startswith('+') or filt.startswith('-'))): raise ValueError(('Every filter in --filters must start with + or - (%s does not)' % filt)) def ResetErrorCounts(self): "Sets the module's error statistic back to zero." self.error_count = 0 self.errors_by_category = {} def IncrementErrorCount(self, category): "Bumps the module's error statistic." self.error_count += 1 if (self.counting in ('toplevel', 'detailed')): if (self.counting != 'detailed'): category = category.split('/')[0] if (category not in self.errors_by_category): self.errors_by_category[category] = 0 self.errors_by_category[category] += 1 def PrintErrorCounts(self): 'Print a summary of errors by category, and the total.' for (category, count) in self.errors_by_category.iteritems(): sys.stderr.write(("Category '%s' errors found: %d\n" % (category, count))) sys.stderr.write(('Total errors found: %d\n' % self.error_count))
def _OutputFormat(): "Gets the module's output format." return _cpplint_state.output_format
def _SetOutputFormat(output_format): "Sets the module's output format." _cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel(): "Returns the module's verbosity setting." return _cpplint_state.verbose_level
def _SetVerboseLevel(level): "Sets the module's verbosity, and returns the previous setting." return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level): "Sets the module's counting options." _cpplint_state.SetCountingStyle(level)
def _Filters(): "Returns the module's list of output filters, as a list." return _cpplint_state.filters
def _SetFilters(filters): 'Sets the module\'s error-message filters.\n\n These filters are applied when deciding whether to emit a given\n error message.\n\n Args:\n filters: A string of comma-separated filters (eg "whitespace/indent").\n Each filter should start with + or -; else we die.\n ' _cpplint_state.SetFilters(filters)
class _FunctionState(object): 'Tracks current function name and the number of lines in its body.' _NORMAL_TRIGGER = 250 _TEST_TRIGGER = 400 def __init__(self): self.in_a_function = False self.lines_in_function = 0 self.current_function = '' def Begin(self, function_name): 'Start analyzing function body.\n\n Args:\n function_name: The name of the function being tracked.\n ' self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name def Count(self): 'Count line in current function body.' if self.in_a_function: self.lines_in_function += 1 def Check(self, error, filename, linenum): 'Report if too many lines in function body.\n\n Args:\n error: The function to call with any errors found.\n filename: The name of the current file.\n linenum: The number of the line to check.\n ' if Match('T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = (base_trigger * (2 ** _VerboseLevel())) if (self.lines_in_function > trigger): error_level = int(math.log((self.lines_in_function / base_trigger), 2)) if (error_level > 5): error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, ('Small and focused functions are preferred: %s has %d non-comment lines (error triggered by exceeding %d lines).' % (self.current_function, self.lines_in_function, trigger))) def End(self): 'Stop analyzing function body.' self.in_a_function = False
class _IncludeError(Exception): 'Indicates a problem with the include order in a file.' pass
class FileInfo(): "Provides utility functions for filenames.\n\n FileInfo provides easy access to the components of a file's path\n relative to the project root.\n " def __init__(self, filename): self._filename = filename def FullName(self): 'Make Windows paths like Unix.' return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): 'FullName after removing the local path to the repository.\n\n If we have a real absolute path name here we can try to do something smart:\n detecting the root of the checkout and truncating /path/to/checkout from\n the name so that we get header guards that don\'t include things like\n "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus\n people on different computers who have checked the source out to different\n locations won\'t see bogus errors.\n ' fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, '.svn')): root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, '.svn')): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[(len(prefix) + 1):] root_dir = os.path.dirname(fullname) while ((root_dir != os.path.dirname(root_dir)) and (not os.path.exists(os.path.join(root_dir, '.git'))) and (not os.path.exists(os.path.join(root_dir, '.hg'))) and (not os.path.exists(os.path.join(root_dir, '.svn')))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, '.git')) or os.path.exists(os.path.join(root_dir, '.hg')) or os.path.exists(os.path.join(root_dir, '.svn'))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[(len(prefix) + 1):] return fullname def Split(self): "Splits the file into the directory, basename, and extension.\n\n For 'chrome/browser/browser.cc', Split() would\n return ('chrome/browser', 'browser', '.cc')\n\n Returns:\n A tuple of (directory, basename, extension).\n " googlename = self.RepositoryName() (project, rest) = os.path.split(googlename) return ((project,) + os.path.splitext(rest)) def BaseName(self): 'File base name - text after the final slash, before the final period.' return self.Split()[1] def Extension(self): 'File extension - text following the final period.' return self.Split()[2] def NoExtension(self): 'File has no source file extension.' return '/'.join(self.Split()[0:2]) def IsSource(self): 'File has a source file extension.' return (self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx'))
def _ShouldPrintError(category, confidence, linenum): 'If confidence >= verbose, category passes filter and is not suppressed.' if IsErrorSuppressedByNolint(category, linenum): return False if (confidence < _cpplint_state.verbose_level): return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False if is_filtered: return False return True
def Error(filename, linenum, category, confidence, message): 'Logs the fact we\'ve found a lint error.\n\n We log where the error was found, and also our confidence in the error,\n that is, how certain we are this is a legitimate style regression, and\n not a misidentification or a use that\'s sometimes justified.\n\n False positives can be suppressed by the use of\n "cpplint(category)" comments on the offending line. These are\n parsed into _error_suppressions.\n\n Args:\n filename: The name of the file containing the error.\n linenum: The number of the line containing the error.\n category: A string used to describe the "category" this bug\n falls under: "whitespace", say, or "runtime". Categories\n may have a hierarchy separated by slashes: "whitespace/indent".\n confidence: A number from 1-5 representing a confidence score for\n the error, with 5 meaning that we are certain of the problem,\n and 1 meaning that it could be a legitimate construct.\n message: The error message.\n ' if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if (_cpplint_state.output_format == 'vs7'): sys.stderr.write(('%s(%s): %s [%s] [%d]\n' % (filename, linenum, message, category, confidence))) elif (_cpplint_state.output_format == 'eclipse'): sys.stderr.write(('%s:%s: warning: %s [%s] [%d]\n' % (filename, linenum, message, category, confidence))) else: sys.stderr.write(('%s:%s: %s [%s] [%d]\n' % (filename, linenum, message, category, confidence)))
def IsCppString(line): "Does line terminate so, that the next symbol is in string constant.\n\n This function does not consider single-line nor multi-line comments.\n\n Args:\n line: is a partial line of code starting from the 0..n.\n\n Returns:\n True, if next character appended to 'line' is inside a\n string constant.\n " line = line.replace('\\\\', 'XX') return ((((line.count('"') - line.count('\\"')) - line.count('\'"\'')) & 1) == 1)
def CleanseRawStrings(raw_lines): 'Removes C++11 raw strings from lines.\n\n Before:\n static const char kData[] = R"(\n multi-line string\n )";\n\n After:\n static const char kData[] = ""\n (replaced by blank line)\n "";\n\n Args:\n raw_lines: list of raw lines.\n\n Returns:\n list of lines with C++11 raw strings replaced by empty strings.\n ' delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: end = line.find(delimiter) if (end >= 0): leading_space = Match('^(\\s*)\\S', line) line = ((leading_space.group(1) + '""') + line[(end + len(delimiter)):]) delimiter = None else: line = '' else: matched = Match('^(.*)\\b(?:R|u8R|uR|UR|LR)"([^\\s\\\\()]*)\\((.*)$', line) if matched: delimiter = ((')' + matched.group(2)) + '"') end = matched.group(3).find(delimiter) if (end >= 0): line = ((matched.group(1) + '""') + matched.group(3)[(end + len(delimiter)):]) delimiter = None else: line = (matched.group(1) + '""') lines_without_raw_strings.append(line) return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix): 'Find the beginning marker for a multiline comment.' while (lineix < len(lines)): if lines[lineix].strip().startswith('/*'): if (lines[lineix].strip().find('*/', 2) < 0): return lineix lineix += 1 return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix): 'We are inside a comment, find the end marker.' while (lineix < len(lines)): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end): 'Clears a range of lines for multi-line comments.' for i in range(begin, end): lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error): 'Removes multiline (c-style) comments from lines.' lineix = 0 while (lineix < len(lines)): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if (lineix_begin >= len(lines)): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if (lineix_end >= len(lines)): error(filename, (lineix_begin + 1), 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, (lineix_end + 1)) lineix = (lineix_end + 1)
def CleanseComments(line): 'Removes //-comments and single-line C-style /* */ comments.\n\n Args:\n line: A line of C++ source.\n\n Returns:\n The line with single-line comments removed.\n ' commentpos = line.find('//') if ((commentpos != (- 1)) and (not IsCppString(line[:commentpos]))): line = line[:commentpos].rstrip() return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object): "Holds 3 copies of all lines with different preprocessing applied to them.\n\n 1) elided member contains lines without strings and comments,\n 2) lines member contains lines without comments, and\n 3) raw_lines member contains all the lines without processing.\n All these three members are of <type 'list'>, and of the same length.\n " def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments(self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): 'Returns the number of lines represented.' return self.num_lines @staticmethod def _CollapseStrings(elided): 'Collapses strings and chars on a line to simple "" or \'\' blocks.\n\n We nix strings first so we\'re not fooled by text like \'"http://"\'\n\n Args:\n elided: The line being processed.\n\n Returns:\n The line with collapsed strings.\n ' if (not _RE_PATTERN_INCLUDE.match(elided)): elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): 'Find the position just after the matching endchar.\n\n Args:\n line: a CleansedLines line.\n startpos: start searching at this position.\n depth: nesting level at startpos.\n startchar: expression opening character.\n endchar: expression closing character.\n\n Returns:\n On finding matching endchar: (index just after matching endchar, 0)\n Otherwise: (-1, new depth at end of this line)\n ' for i in xrange(startpos, len(line)): if (line[i] == startchar): depth += 1 elif (line[i] == endchar): depth -= 1 if (depth == 0): return ((i + 1), 0) return ((- 1), depth)
def CloseExpression(clean_lines, linenum, pos): "If input points to ( or { or [ or <, finds the position that closes it.\n\n If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the\n linenum/pos that correspond to the closing of the expression.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n pos: A position on the line.\n\n Returns:\n A tuple (line, linenum, pos) pointer *past* the closing brace, or\n (line, len(lines), -1) if we never find a close. Note we ignore\n strings and comments when matching; and the line we return is the\n 'cleansed' line at linenum.\n " line = clean_lines.elided[linenum] startchar = line[pos] if (startchar not in '({[<'): return (line, clean_lines.NumLines(), (- 1)) if (startchar == '('): endchar = ')' if (startchar == '['): endchar = ']' if (startchar == '{'): endchar = '}' if (startchar == '<'): endchar = '>' (end_pos, num_open) = FindEndOfExpressionInLine(line, pos, 0, startchar, endchar) if (end_pos > (- 1)): return (line, linenum, end_pos) while (linenum < (clean_lines.NumLines() - 1)): linenum += 1 line = clean_lines.elided[linenum] (end_pos, num_open) = FindEndOfExpressionInLine(line, 0, num_open, startchar, endchar) if (end_pos > (- 1)): return (line, linenum, end_pos) return (line, clean_lines.NumLines(), (- 1))
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): 'Find position at the matching startchar.\n\n This is almost the reverse of FindEndOfExpressionInLine, but note\n that the input position and returned position differs by 1.\n\n Args:\n line: a CleansedLines line.\n endpos: start searching at this position.\n depth: nesting level at endpos.\n startchar: expression opening character.\n endchar: expression closing character.\n\n Returns:\n On finding matching startchar: (index at matching startchar, 0)\n Otherwise: (-1, new depth at beginning of this line)\n ' for i in xrange(endpos, (- 1), (- 1)): if (line[i] == endchar): depth += 1 elif (line[i] == startchar): depth -= 1 if (depth == 0): return (i, 0) return ((- 1), depth)
def ReverseCloseExpression(clean_lines, linenum, pos): "If input points to ) or } or ] or >, finds the position that opens it.\n\n If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the\n linenum/pos that correspond to the opening of the expression.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n pos: A position on the line.\n\n Returns:\n A tuple (line, linenum, pos) pointer *at* the opening brace, or\n (line, 0, -1) if we never find the matching opening brace. Note\n we ignore strings and comments when matching; and the line we\n return is the 'cleansed' line at linenum.\n " line = clean_lines.elided[linenum] endchar = line[pos] if (endchar not in ')}]>'): return (line, 0, (- 1)) if (endchar == ')'): startchar = '(' if (endchar == ']'): startchar = '[' if (endchar == '}'): startchar = '{' if (endchar == '>'): startchar = '<' (start_pos, num_open) = FindStartOfExpressionInLine(line, pos, 0, startchar, endchar) if (start_pos > (- 1)): return (line, linenum, start_pos) while (linenum > 0): linenum -= 1 line = clean_lines.elided[linenum] (start_pos, num_open) = FindStartOfExpressionInLine(line, (len(line) - 1), num_open, startchar, endchar) if (start_pos > (- 1)): return (line, linenum, start_pos) return (line, 0, (- 1))
def CheckForCopyright(filename, lines, error): 'Logs an error if a Copyright message appears at the top of the file.' for line in xrange(1, min(len(lines), 11)): if _RE_COPYRIGHT.search(lines[line], re.I): error(filename, 0, 'legal/copyright', 5, 'Copyright message found. You should not include a copyright line.')
def GetHeaderGuardCPPVariable(filename): 'Returns the CPP variable that should be used as a header guard.\n\n Args:\n filename: The name of a C++ header file.\n\n Returns:\n The CPP variable that should be used as a header guard in the\n named file.\n\n ' filename = re.sub('_flymake\\.h$', '.h', filename) filename = re.sub('/\\.flymake/([^/]*)$', '/\\1', filename) fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub((('^' + _root) + os.sep), '', file_path_from_root) return (re.sub('[-./\\s]', '_', file_path_from_root).upper() + '_')
def CheckForHeaderGuard(filename, lines, error): 'Checks that the file contains a header guard.\n\n Logs an error if no #ifndef header guard is present. For other\n headers, checks that the full pathname is used.\n\n Args:\n filename: The name of the C++ header file.\n lines: An array of strings, each representing a line of the file.\n error: The function to call with any errors found.\n ' cppvar = GetHeaderGuardCPPVariable(filename) ifndef = None ifndef_linenum = 0 define = None endif = None endif_linenum = 0 for (linenum, line) in enumerate(lines): linesplit = line.split() if (len(linesplit) >= 2): if ((not ifndef) and (linesplit[0] == '#ifndef')): ifndef = linesplit[1] ifndef_linenum = linenum if ((not define) and (linesplit[0] == '#define')): define = linesplit[1] if line.startswith('#endif'): endif = line endif_linenum = linenum if (not ifndef): error(filename, 0, 'build/header_guard', 5, ('No #ifndef header guard found, suggested CPP variable is: %s' % cppvar)) return if (not define): error(filename, 0, 'build/header_guard', 5, ('No #define header guard found, suggested CPP variable is: %s' % cppvar)) return if (ifndef != cppvar): error_level = 0 if (ifndef != (cppvar + '_')): error_level = 5 ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, ('#ifndef header guard has wrong style, please use: %s' % cppvar)) if (define != ifndef): error(filename, 0, 'build/header_guard', 5, ("#ifndef and #define don't match, suggested CPP variable is: %s" % cppvar)) return if (endif != ('#endif // %s' % cppvar)): error_level = 0 if (endif != ('#endif // %s' % (cppvar + '_'))): error_level = 5 ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, error) error(filename, endif_linenum, 'build/header_guard', error_level, ('#endif line should be "#endif // %s"' % cppvar))
def CheckForBadCharacters(filename, lines, error): "Logs an error for each line containing bad characters.\n\n Two kinds of bad characters:\n\n 1. Unicode replacement characters: These indicate that either the file\n contained invalid UTF-8 (likely) or Unicode replacement characters (which\n it shouldn't). Note that it's possible for this to throw off line\n numbering if the invalid UTF-8 occurred adjacent to a newline.\n\n 2. NUL bytes. These are problematic for some tools.\n\n Args:\n filename: The name of the current file.\n lines: An array of strings, each representing a line of the file.\n error: The function to call with any errors found.\n " for (linenum, line) in enumerate(lines): if (u'�' in line): error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if ('\x00' in line): error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error): 'Logs an error if there is no newline char at the end of the file.\n\n Args:\n filename: The name of the current file.\n lines: An array of strings, each representing a line of the file.\n error: The function to call with any errors found.\n ' if ((len(lines) < 3) or lines[(- 2)]): error(filename, (len(lines) - 2), 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): 'Logs an error if we see /* ... */ or "..." that extend past one line.\n\n /* ... */ comments are legit inside macros, for one line.\n Otherwise, we prefer // comments, so it\'s ok to warn about the\n other. Likewise, it\'s ok for strings to extend across multiple\n lines, as long as a line continuation character (backslash)\n terminates each line. Although not currently prohibited by the C++\n style guide, it\'s ugly and unnecessary. We don\'t do well with either\n in this lint program, so we warn about both.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] line = line.replace('\\\\', '') if (line.count('/*') > line.count('*/')): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. Lint may give bogus warnings. Consider replacing these with //-style comments, with #if 0...#endif, or with more clearly structured multi-line comments.') if ((line.count('"') - line.count('\\"')) % 2): error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t do well with such strings, and may give bogus warnings. Use C++11 raw strings or concatenation instead.')
def CheckCaffeAlternatives(filename, clean_lines, linenum, error): 'Checks for C(++) functions for which a Caffe substitute should be used.\n\n For certain native C functions (memset, memcpy), there is a Caffe alternative\n which should be used instead.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] for (function, alts) in caffe_alt_function_list: ix = line.find((function + '(')) if ((ix >= 0) and ((ix == 0) or ((not line[(ix - 1)].isalnum()) and (line[(ix - 1)] not in ('_', '.', '>'))))): disp_alts = [('%s(...)' % alt) for alt in alts] error(filename, linenum, 'caffe/alt_fn', 2, ('Use Caffe function %s instead of %s(...).' % (' or '.join(disp_alts), function)))
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error): 'Except the base classes, Caffe DataLayer should define DataLayerSetUp\n instead of LayerSetUp.\n \n The base DataLayers define common SetUp steps, the subclasses should\n not override them.\n \n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] ix = line.find('DataLayer<Dtype>::LayerSetUp') if ((ix >= 0) and ((line.find('void DataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void ImageDataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void WindowDataLayer<Dtype>::LayerSetUp') != (- 1)))): error(filename, linenum, 'caffe/data_layer_setup', 2, ((('Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers') + ' define common SetUp steps, the subclasses should') + ' not override them.')) ix = line.find('DataLayer<Dtype>::DataLayerSetUp') if ((ix >= 0) and ((line.find('void Base') == (- 1)) and (line.find('void DataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == (- 1)))): error(filename, linenum, 'caffe/data_layer_setup', 2, ((('Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers') + ' define common SetUp steps, the subclasses should') + ' not override them.'))
def CheckCaffeRandom(filename, clean_lines, linenum, error): 'Checks for calls to C random functions (rand, rand_r, random, ...).\n\n Caffe code should (almost) always use the caffe_rng_* functions rather\n than these, as the internal state of these C functions is independent of the\n native Caffe RNG system which should produce deterministic results for a\n fixed Caffe seed set using Caffe::set_random_seed(...).\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] for function in c_random_function_list: ix = line.find(function) if ((ix >= 0) and ((ix == 0) or ((not line[(ix - 1)].isalnum()) and (line[(ix - 1)] not in ('_', '.', '>'))))): error(filename, linenum, 'caffe/random_fn', 2, (('Use caffe_rng_rand() (or other caffe_rng_* function) instead of ' + function) + ') to ensure results are deterministic for a fixed Caffe seed.'))
def CheckPosixThreading(filename, clean_lines, linenum, error): 'Checks for calls to thread-unsafe functions.\n\n Much code has been originally written without consideration of\n multi-threading. Also, engineers are relying on their old experience;\n they have learned posix before threading extensions were added. These\n tests guide the engineers to use thread-safe functions (when using\n posix directly).\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] for (single_thread_function, multithread_safe_function) in threading_list: ix = line.find(single_thread_function) if ((ix >= 0) and ((ix == 0) or ((not line[(ix - 1)].isalnum()) and (line[(ix - 1)] not in ('_', '.', '>'))))): error(filename, linenum, 'runtime/threadsafe_fn', 2, (((('Consider using ' + multithread_safe_function) + '...) instead of ') + single_thread_function) + '...) for improved thread safety.'))
def CheckVlogArguments(filename, clean_lines, linenum, error): 'Checks that VLOG() is only used for defining a logging level.\n\n For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and\n VLOG(FATAL) are not.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] if Search('\\bVLOG\\((INFO|ERROR|WARNING|DFATAL|FATAL)\\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. Use LOG() if you want symbolic severity levels.')
def CheckInvalidIncrement(filename, clean_lines, linenum, error): 'Checks for invalid increment *count++.\n\n For example following function:\n void increment_counter(int* count) {\n *count++;\n }\n is invalid, because it effectively does count++, moving pointer, and should\n be replaced with ++*count, (*count)++ or *count += 1.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object): 'Stores information about a generic block of code.' def __init__(self, seen_open_brace): self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM def CheckBegin(self, filename, clean_lines, linenum, error): 'Run checks that applies to text up to the opening brace.\n\n This is mostly for checking the text after the class identifier\n and the "{", usually where the base class is specified. For other\n blocks, there isn\'t much to check, so we always pass.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' pass def CheckEnd(self, filename, clean_lines, linenum, error): 'Run checks that applies to text after the closing brace.\n\n This is mostly used for checking end of namespace comments.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' pass
class _ClassInfo(_BlockInfo): 'Stores information about a class.' def __init__(self, name, class_or_struct, clean_lines, linenum): _BlockInfo.__init__(self, False) self.name = name self.starting_linenum = linenum self.is_derived = False if (class_or_struct == 'struct'): self.access = 'public' self.is_struct = True else: self.access = 'private' self.is_struct = False initial_indent = Match('^( *)\\S', clean_lines.raw_lines[linenum]) if initial_indent: self.class_indent = len(initial_indent.group(1)) else: self.class_indent = 0 self.last_line = 0 depth = 0 for i in range(linenum, clean_lines.NumLines()): line = clean_lines.elided[i] depth += (line.count('{') - line.count('}')) if (not depth): self.last_line = i break def CheckBegin(self, filename, clean_lines, linenum, error): if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): indent = Match('^( *)\\}', clean_lines.elided[linenum]) if (indent and (len(indent.group(1)) != self.class_indent)): if self.is_struct: parent = ('struct ' + self.name) else: parent = ('class ' + self.name) error(filename, linenum, 'whitespace/indent', 3, ('Closing brace should be aligned with beginning of %s' % parent))
class _NamespaceInfo(_BlockInfo): 'Stores information about a namespace.' def __init__(self, name, linenum): _BlockInfo.__init__(self, False) self.name = (name or '') self.starting_linenum = linenum def CheckEnd(self, filename, clean_lines, linenum, error): 'Check end of namespace comments.' line = clean_lines.raw_lines[linenum] if (((linenum - self.starting_linenum) < 10) and (not Match('};*\\s*(//|/\\*).*\\bnamespace\\b', line))): return if self.name: if (not Match((('};*\\s*(//|/\\*).*\\bnamespace\\s+' + re.escape(self.name)) + '[\\*/\\.\\\\\\s]*$'), line)): error(filename, linenum, 'readability/namespace', 5, ('Namespace should be terminated with "// namespace %s"' % self.name)) elif (not Match('};*\\s*(//|/\\*).*\\bnamespace[\\*/\\.\\\\\\s]*$', line)): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object): 'Stores checkpoints of nesting stacks when #if/#else is seen.' def __init__(self, stack_before_if): self.stack_before_if = stack_before_if self.stack_before_else = [] self.seen_else = False
class _NestingState(object): 'Holds states related to parsing braces.' def __init__(self): self.stack = [] self.pp_stack = [] def SeenOpenBrace(self): 'Check if we have seen the opening brace for the innermost block.\n\n Returns:\n True if we have seen the opening brace, False if the innermost\n block is still expecting an opening brace.\n ' return ((not self.stack) or self.stack[(- 1)].seen_open_brace) def InNamespaceBody(self): 'Check if we are currently one level inside a namespace body.\n\n Returns:\n True if top of the stack is a namespace block, False otherwise.\n ' return (self.stack and isinstance(self.stack[(- 1)], _NamespaceInfo)) def UpdatePreprocessor(self, line): 'Update preprocessor stack.\n\n We need to handle preprocessors due to classes like this:\n #ifdef SWIG\n struct ResultDetailsPageElementExtensionPoint {\n #else\n struct ResultDetailsPageElementExtensionPoint : public Extension {\n #endif\n\n We make the following assumptions (good enough for most files):\n - Preprocessor condition evaluates to true from #if up to first\n #else/#elif/#endif.\n\n - Preprocessor condition evaluates to false from #else/#elif up\n to #endif. We still perform lint checks on these lines, but\n these do not affect nesting stack.\n\n Args:\n line: current line to check.\n ' if Match('^\\s*#\\s*(if|ifdef|ifndef)\\b', line): self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match('^\\s*#\\s*(else|elif)\\b', line): if self.pp_stack: if (not self.pp_stack[(- 1)].seen_else): self.pp_stack[(- 1)].seen_else = True self.pp_stack[(- 1)].stack_before_else = copy.deepcopy(self.stack) self.stack = copy.deepcopy(self.pp_stack[(- 1)].stack_before_if) else: pass elif Match('^\\s*#\\s*endif\\b', line): if self.pp_stack: if self.pp_stack[(- 1)].seen_else: self.stack = self.pp_stack[(- 1)].stack_before_else self.pp_stack.pop() else: pass def Update(self, filename, clean_lines, linenum, error): 'Update nesting state with current line.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] self.UpdatePreprocessor(line) if self.stack: inner_block = self.stack[(- 1)] depth_change = (line.count('(') - line.count(')')) inner_block.open_parentheses += depth_change if (inner_block.inline_asm in (_NO_ASM, _END_ASM)): if ((depth_change != 0) and (inner_block.open_parentheses == 1) and _MATCH_ASM.match(line)): inner_block.inline_asm = _INSIDE_ASM else: inner_block.inline_asm = _NO_ASM elif ((inner_block.inline_asm == _INSIDE_ASM) and (inner_block.open_parentheses == 0)): inner_block.inline_asm = _END_ASM while True: namespace_decl_match = Match('^\\s*namespace\\b\\s*([:\\w]+)?(.*)$', line) if (not namespace_decl_match): break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if (line.find('{') != (- 1)): new_namespace.seen_open_brace = True line = line[(line.find('{') + 1):] class_decl_match = Match('\\s*(template\\s*<[\\w\\s<>,:]*>\\s*)?(class|struct)\\s+([A-Z_]+\\s+)*(\\w+(?:::\\w+)*)(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\\s*>)*)$', line) if (class_decl_match and ((not self.stack) or (self.stack[(- 1)].open_parentheses == 0))): self.stack.append(_ClassInfo(class_decl_match.group(4), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(5) if (not self.SeenOpenBrace()): self.stack[(- 1)].CheckBegin(filename, clean_lines, linenum, error) if (self.stack and isinstance(self.stack[(- 1)], _ClassInfo)): classinfo = self.stack[(- 1)] access_match = Match('^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?:(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) indent = access_match.group(1) if ((len(indent) != (classinfo.class_indent + 1)) and Match('^\\s*$', indent)): if classinfo.is_struct: parent = ('struct ' + classinfo.name) else: parent = ('class ' + classinfo.name) slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, ('%s%s: should be indented +1 space inside %s' % (access_match.group(2), slots, parent))) while True: matched = Match('^[^{;)}]*([{;)}])(.*)$', line) if (not matched): break token = matched.group(1) if (token == '{'): if (not self.SeenOpenBrace()): self.stack[(- 1)].seen_open_brace = True else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[(- 1)].inline_asm = _BLOCK_ASM elif ((token == ';') or (token == ')')): if (not self.SeenOpenBrace()): self.stack.pop() elif self.stack: self.stack[(- 1)].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2) def InnermostClass(self): 'Get class info on the top of the stack.\n\n Returns:\n A _ClassInfo object if we are inside a class, or None otherwise.\n ' for i in range(len(self.stack), 0, (- 1)): classinfo = self.stack[(i - 1)] if isinstance(classinfo, _ClassInfo): return classinfo return None def CheckCompletedBlocks(self, filename, error): 'Checks that all classes and namespaces have been completely parsed.\n\n Call this when all lines in a file have been processed.\n Args:\n filename: The name of the current file.\n error: The function to call with any errors found.\n ' for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, ('Failed to find complete declaration of class %s' % obj.name)) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, ('Failed to find complete declaration of namespace %s' % obj.name))
def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): 'Logs an error if we see certain non-ANSI constructs ignored by gcc-2.\n\n Complain about several constructs which gcc-2 accepts, but which are\n not standard C++. Warning about these in lint is one way to ease the\n transition to new compilers.\n - put storage class first (e.g. "static const" instead of "const static").\n - "%lld" instead of %qd" in printf-type functions.\n - "%1$d" is non-standard in printf-type functions.\n - "\\%" is an undefined character escape sequence.\n - text after #endif is not allowed.\n - invalid inner-style forward declaration.\n - >? and <? operators, and their >?= and <?= cousins.\n\n Additionally, check for constructor/destructor style violations and reference\n members, as it is very convenient to do so while checking for\n gcc-2 compliance.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: A callable to which errors are reported, which takes 4 arguments:\n filename, line number, error level, and message\n ' line = clean_lines.lines[linenum] if Search('printf\\s*\\(.*".*%[-+ ]?\\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if Search('printf\\s*\\(.*".*%\\d+\\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') line = line.replace('\\\\', '') if Search('("|\\\').*\\\\(%|\\[|\\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') line = clean_lines.elided[linenum] if Search('\\b(const|volatile|void|char|short|int|long|float|double|signed|unsigned|schar|u?int8|u?int16|u?int32|u?int64)\\s+(register|static|extern|typedef)\\b', line): error(filename, linenum, 'build/storage_class', 5, 'Storage class (static, extern, typedef, etc) should be first.') if Match('\\s*#\\s*endif\\s*[^/\\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if Match('\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if Search('(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') if Search('^\\s*const\\s*string\\s*&\\s*\\w+\\s*;', line): error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use alternatives, such as pointers or simple constants.') classinfo = nesting_state.InnermostClass() if ((not classinfo) or (not classinfo.seen_open_brace)): return base_classname = classinfo.name.split('::')[(- 1)] args = Match(('\\s+(?:inline\\s+)?%s\\s*\\(([^,()]+)\\)' % re.escape(base_classname)), line) if (args and (args.group(1) != 'void') and (not Match(('(const\\s+)?%s(\\s+const)?\\s*(?:<\\w+>\\s*)?&' % re.escape(base_classname)), args.group(1).strip()))): error(filename, linenum, 'runtime/explicit', 5, 'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error): 'Checks for the correctness of various spacing around function calls.\n\n Args:\n filename: The name of the current file.\n line: The text of the line to check.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' fncall = line for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) break if ((not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall)) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall))): if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall): error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search('\\w\\s+\\(', fncall) and (not Search('#\\s*define|typedef', fncall)) and (not Search('\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall))): error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall): if Search('^\\s+\\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )')
def IsBlankLine(line): 'Returns true if the given line is blank.\n\n We consider a line to be blank if the line is empty or consists of\n only white spaces.\n\n Args:\n line: A line of a string.\n\n Returns:\n True, if the given line is blank.\n ' return ((not line) or line.isspace())
def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): 'Reports for long function bodies.\n\n For an overview why this is done, see:\n http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions\n\n Uses a simplistic algorithm assuming other style guidelines\n (especially spacing) are followed.\n Only checks unindented functions, so class members are unchecked.\n Trivial bodies are unchecked, so constructors with huge initializer lists\n may be missed.\n Blank/comment lines are not counted so as to avoid encouraging the removal\n of vertical space and comments just to get through a lint check.\n NOLINT *on the last line of a function* disables this check.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n function_state: Current function name and lines in body so far.\n error: The function to call with any errors found.\n ' lines = clean_lines.lines line = lines[linenum] raw = clean_lines.raw_lines raw_line = raw[linenum] joined_line = '' starting_func = False regexp = '(\\w(\\w|::|\\*|\\&|\\s)*)\\(' match_result = Match(regexp, line) if match_result: function_name = match_result.group(1).split()[(- 1)] if ((function_name == 'TEST') or (function_name == 'TEST_F') or (not Match('[A-Z_]+$', function_name))): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += (' ' + start_line.lstrip()) if Search('(;|})', start_line): body_found = True break elif Search('{', start_line): body_found = True function = Search('((\\w|:)*)\\(', line).group(1) if Match('TEST', function): parameter_regexp = Search('(\\(.*\\))', joined_line) if parameter_regexp: function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if (not body_found): error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match('^\\}\\s*$', line): function_state.Check(error, filename, linenum) function_state.End() elif (not Match('^\\s*$', line)): function_state.Count()
def CheckComment(comment, filename, linenum, error): 'Checks for common mistakes in TODO comments.\n\n Args:\n comment: The text of the comment from the line in question.\n filename: The name of the current file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' match = _RE_PATTERN_TODO.match(comment) if match: leading_whitespace = match.group(1) if (len(leading_whitespace) > 1): error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if (not username): error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like "// TODO(my_username): Stuff."') middle_whitespace = match.group(3) if ((middle_whitespace != ' ') and (middle_whitespace != '')): error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error): 'Checks for improper use of DISALLOW* macros.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] matched = Match('\\s*(DISALLOW_COPY_AND_ASSIGN|DISALLOW_EVIL_CONSTRUCTORS|DISALLOW_IMPLICIT_CONSTRUCTORS)', line) if (not matched): return if (nesting_state.stack and isinstance(nesting_state.stack[(- 1)], _ClassInfo)): if (nesting_state.stack[(- 1)].access != 'private'): error(filename, linenum, 'readability/constructors', 3, ('%s must be in the private: section' % matched.group(1))) else: pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix): 'Find the corresponding > to close a template.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: Current line number.\n init_suffix: Remainder of the current line after the initial <.\n\n Returns:\n True if a matching bracket exists.\n ' line = init_suffix nesting_stack = ['<'] while True: match = Search('^[^<>(),;\\[\\]]*([<>(),;\\[\\]])(.*)$', line) if match: operator = match.group(1) line = match.group(2) if (nesting_stack[(- 1)] == '<'): if (operator in ('<', '(', '[')): nesting_stack.append(operator) elif (operator == '>'): nesting_stack.pop() if (not nesting_stack): return True elif (operator == ','): return True else: return False elif (operator in ('<', '(', '[')): nesting_stack.append(operator) elif (operator in (')', ']')): nesting_stack.pop() else: linenum += 1 if (linenum >= len(clean_lines.elided)): break line = clean_lines.elided[linenum] return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): 'Find the corresponding < that started a template.\n\n Args:\n clean_lines: A CleansedLines instance containing the file.\n linenum: Current line number.\n init_prefix: Part of the current line before the initial >.\n\n Returns:\n True if a matching bracket exists.\n ' line = init_prefix nesting_stack = ['>'] while True: match = Search('^(.*)([<>(),;\\[\\]])[^<>(),;\\[\\]]*$', line) if match: operator = match.group(2) line = match.group(1) if (nesting_stack[(- 1)] == '>'): if (operator in ('>', ')', ']')): nesting_stack.append(operator) elif (operator == '<'): nesting_stack.pop() if (not nesting_stack): return True elif (operator == ','): return True else: return False elif (operator in ('>', ')', ']')): nesting_stack.append(operator) elif (operator in ('(', '[')): nesting_stack.pop() else: linenum -= 1 if (linenum < 0): break line = clean_lines.elided[linenum] return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): "Checks for the correctness of various spacing issues in the code.\n\n Things we check for: spaces around operators, spaces after\n if/for/while/switch, no spaces around parens in function calls, two\n spaces between code and comment, don't start a block with a blank\n line, don't end a function with a blank line, don't add a blank line\n after public/protected/private, don't have too many blank lines in a row.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n " raw = clean_lines.lines_without_raw_strings line = raw[linenum] if (IsBlankLine(line) and (not nesting_state.InNamespaceBody())): elided = clean_lines.elided prev_line = elided[(linenum - 1)] prevbrace = prev_line.rfind('{') if ((prevbrace != (- 1)) and (prev_line[prevbrace:].find('}') == (- 1))): exception = False if Match(' {6}\\w', prev_line): search_position = (linenum - 2) while ((search_position >= 0) and Match(' {6}\\w', elided[search_position])): search_position -= 1 exception = ((search_position >= 0) and (elided[search_position][:5] == ' :')) else: exception = (Match(' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)', prev_line) or Match(' {4}:', prev_line)) if (not exception): error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block should be deleted.') if ((linenum + 1) < clean_lines.NumLines()): next_line = raw[(linenum + 1)] if (next_line and Match('\\s*}', next_line) and (next_line.find('} else ') == (- 1))): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block should be deleted.') matched = Match('\\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, ('Do not leave a blank line after "%s:"' % matched.group(1))) commentpos = line.find('//') if (commentpos != (- 1)): if (((line.count('"', 0, commentpos) - line.count('\\"', 0, commentpos)) % 2) == 0): if ((not Match('^\\s*{ //', line)) and (((commentpos >= 1) and (line[(commentpos - 1)] not in string.whitespace)) or ((commentpos >= 2) and (line[(commentpos - 2)] not in string.whitespace)))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') commentend = (commentpos + 2) if ((commentend < len(line)) and (not (line[commentend] == ' '))): match = (Search('[=/-]{4,}\\s*$', line[commentend:]) or Search('^/$', line[commentend:]) or Search('^!< ', line[commentend:]) or Search('^/< ', line[commentend:]) or Search('^/+ ', line[commentend:])) if (not match): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') CheckComment(line[commentpos:], filename, linenum, error) line = clean_lines.elided[linenum] line = re.sub('operator(==|!=|<|<<|<=|>=|>>|>)\\(', 'operator\\(', line) if (Search('[\\w.]=[\\w.]', line) and (not Search('\\b(if|while) ', line))): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') match = Search('[^<>=!\\s](==|!=|<=|>=)[^<>=!\\s]', line) if match: error(filename, linenum, 'whitespace/operators', 3, ('Missing spaces around %s' % match.group(1))) match = Search('(operator|\\S)(?:L|UL|ULL|l|ul|ull)?<<(\\S)', line) if (match and (not (match.group(1).isdigit() and match.group(2).isdigit())) and (not ((match.group(1) == 'operator') and (match.group(2) == ';')))): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') elif (not Match('#.*include', line)): reduced_line = line.replace('->', '') match = Search('[^\\s<]<([^\\s=<].*)', reduced_line) if (match and (not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1)))): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') match = Search('^(.*[^\\s>])>[^\\s=>]', reduced_line) if (match and (not FindPreviousMatchingAngleBracket(clean_lines, linenum, match.group(1)))): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') match = Search('>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') match = Search('(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, ('Extra space for operator %s' % match.group(1))) match = Search(' (if\\(|for\\(|while\\(|switch\\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, ('Missing space before ( in %s' % match.group(1))) match = Search('\\b(if|for|while|switch)\\s*\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$', line) if match: if (len(match.group(2)) != len(match.group(4))): if (not (((match.group(3) == ';') and (len(match.group(2)) == (1 + len(match.group(4))))) or ((not match.group(2)) and Search('\\bfor\\s*\\(.*; \\)', line)))): error(filename, linenum, 'whitespace/parens', 5, ('Mismatching spaces inside () in %s' % match.group(1))) if (len(match.group(2)) not in [0, 1]): error(filename, linenum, 'whitespace/parens', 5, ('Should have zero or one spaces inside ( and ) in %s' % match.group(1))) if (Search(',[^,\\s]', line) and Search(',[^,\\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') if Search(';[^\\s};\\\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') CheckSpacingForFunctionCall(filename, line, linenum, error) match = Match('^(.*[^ ({]){', line) if match: (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1))) trailing_text = '' if (endpos > (- 1)): trailing_text = endline[endpos:] for offset in xrange((endlinenum + 1), min((endlinenum + 3), (clean_lines.NumLines() - 1))): trailing_text += clean_lines.elided[offset] if (not Match('^[\\s}]*[{.;,)<\\]]', trailing_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') if Search('}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') if (Search('\\w\\s+\\[', line) and (not Search('delete\\s+\\[', line))): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') if Search(':\\s*;\\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search('^\\s*;\\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, use {} instead.') elif (Search('\\s+;\\s*$', line) and (not Search('\\bfor\\b', line))): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty statement, use {} instead.') if (Search('for *\\(.*[^:]:[^: ]', line) or Search('for *\\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): 'Checks for additional blank line issues related to sections.\n\n Currently the only thing checked here is blank line before protected/private.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n class_info: A _ClassInfo objects.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' if (((class_info.last_line - class_info.starting_linenum) <= 24) or (linenum <= class_info.starting_linenum)): return matched = Match('\\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: prev_line = clean_lines.lines[(linenum - 1)] if ((not IsBlankLine(prev_line)) and (not Search('\\b(class|struct)\\b', prev_line)) and (not Search('\\\\$', prev_line))): end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search('\\{\\s*$', clean_lines.lines[i]): end_class_head = i break if (end_class_head < (linenum - 1)): error(filename, linenum, 'whitespace/blank_line', 3, ('"%s:" should be preceded by a blank line' % matched.group(1)))
def GetPreviousNonBlankLine(clean_lines, linenum): 'Return the most recent non-blank line and its line number.\n\n Args:\n clean_lines: A CleansedLines instance containing the file contents.\n linenum: The number of the line to check.\n\n Returns:\n A tuple with two elements. The first element is the contents of the last\n non-blank line before the current line, or the empty string if this is the\n first non-blank line. The second is the line number of that line, or -1\n if this is the first non-blank line.\n ' prevlinenum = (linenum - 1) while (prevlinenum >= 0): prevline = clean_lines.elided[prevlinenum] if (not IsBlankLine(prevline)): return (prevline, prevlinenum) prevlinenum -= 1 return ('', (- 1))
def CheckBraces(filename, clean_lines, linenum, error): 'Looks for misplaced braces (e.g. at the end of line).\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] if Match('\\s*{\\s*$', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if ((not Search('[,;:}{(]\\s*$', prevline)) and (not Match('\\s*#', prevline))): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') if Match('\\s*else\\s*', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match('\\s*}\\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') if (Search('}\\s*else[^{]*$', line) or Match('[^}]*else\\s*{', line)): if Search('}\\s*else if([^{]*)$', line): pos = line.find('else if') pos = line.find('(', pos) if (pos > 0): (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) if (endline[endpos:].find('{') == (- 1)): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') else: error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') if (Search('\\belse [^\\s{]', line) and (not Search('\\belse if\\b', line))): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') if Match('\\s*do [^\\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') match = Match('^(.*\\)\\s*)\\{', line) if match: closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression(clean_lines, linenum, closing_brace_pos) if (opening_parenthesis[2] > (- 1)): line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search('\\b([A-Z_]+)\\s*$', line_prefix) if ((macro and (macro.group(1) not in ('TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF'))) or Search('\\s+=\\s*$', line_prefix)): match = None else: match = Match('^(.*(?:else|\\)\\s*const)\\s*)\\{', line) if (not match): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (prevline and Search('[;{}]\\s*$', prevline)): match = Match('^(\\s*)\\{', line) if match: (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1))) if ((endpos > (- 1)) and Match('^\\s*;', endline[endpos:])): error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error): 'Look for empty loop/conditional body with only a single semicolon.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] matched = Match('\\s*(for|while|if)\\s*\\(', line) if matched: (end_line, end_linenum, end_pos) = CloseExpression(clean_lines, linenum, line.find('(')) if ((end_pos >= 0) and Match(';', end_line[end_pos:])): if (matched.group(1) == 'if'): error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error): 'Checks the use of CHECK and EXPECT macros.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' lines = clean_lines.elided check_macro = None start_pos = (- 1) for macro in _CHECK_MACROS: i = lines[linenum].find(macro) if (i >= 0): check_macro = macro matched = Match((('^(.*\\b' + check_macro) + '\\s*)\\('), lines[linenum]) if (not matched): continue start_pos = len(matched.group(1)) break if ((not check_macro) or (start_pos < 0)): return (last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum, start_pos) if (end_pos < 0): return if (linenum == end_line): expression = lines[linenum][(start_pos + 1):(end_pos - 1)] else: expression = lines[linenum][(start_pos + 1):] for i in xrange((linenum + 1), end_line): expression += lines[i] expression += last_line[0:(end_pos - 1)] lhs = '' rhs = '' operator = None while expression: matched = Match('^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||==|!=|>=|>|<=|<|\\()(.*)$', expression) if matched: token = matched.group(1) if (token == '('): expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')') if (end < 0): return lhs += ('(' + expression[0:end]) expression = expression[end:] elif (token in ('&&', '||')): return elif (token in ('<<', '<<=', '>>', '>>=', '->*', '->')): lhs += token expression = matched.group(2) else: operator = token rhs = matched.group(2) break else: matched = Match('^([^-=!<>()&|]+)(.*)$', expression) if (not matched): matched = Match('^(\\s*\\S)(.*)$', expression) if (not matched): break lhs += matched.group(1) expression = matched.group(2) if (not (lhs and operator and rhs)): return if ((rhs.find('&&') > (- 1)) or (rhs.find('||') > (- 1))): return lhs = lhs.strip() rhs = rhs.strip() match_constant = '^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\\\'.*\\\')$' if (Match(match_constant, lhs) or Match(match_constant, rhs)): error(filename, linenum, 'readability/check', 2, ('Consider using %s instead of %s(a %s b)' % (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)))
def CheckAltTokens(filename, clean_lines, linenum, error): 'Check alternative keywords being used in boolean expressions.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] if Match('^\\s*#', line): return if ((line.find('/*') >= 0) or (line.find('*/') >= 0)): return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, ('Use operator %s instead of %s' % (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))))
def GetLineWidth(line): 'Determines the width of the line in column positions.\n\n Args:\n line: A string, which may be a Unicode string.\n\n Returns:\n The width of the line in column positions, accounting for Unicode\n combining characters and wide characters.\n ' if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if (unicodedata.east_asian_width(uc) in ('W', 'F')): width += 2 elif (not unicodedata.combining(uc)): width += 1 return width else: return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): "Checks rules from the 'C++ style rules' section of cppguide.html.\n\n Most of these rules are hard to test (naming, comment style), but we\n do what we can. In particular we check for 2-space indents, line lengths,\n tab usage, spaces inside code, etc.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n file_extension: The extension (without the dot) of the filename.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n " raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if (line.find('\t') != (- 1)): error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while ((initial_spaces < len(line)) and (line[initial_spaces] == ' ')): initial_spaces += 1 if (line and line[(- 1)].isspace()): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') elif (((initial_spaces == 1) or (initial_spaces == 3)) and (not Match('\\s*\\w+\\s*:\\s*$', cleansed_line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. Are you using a 2-space indent?') is_header_guard = False if (file_extension == 'h'): cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith(('#ifndef %s' % cppvar)) or line.startswith(('#define %s' % cppvar)) or line.startswith(('#endif // %s' % cppvar))): is_header_guard = True if ((not line.startswith('#include')) and (not is_header_guard) and (not Match('^\\s*//.*http(s?)://\\S*$', line)) and (not Match('^// \\$Id:.*#[0-9]+ \\$$', line))): line_width = GetLineWidth(line) extended_length = int((_line_length * 1.25)) if (line_width > extended_length): error(filename, linenum, 'whitespace/line_length', 4, ('Lines should very rarely be longer than %i characters' % extended_length)) elif (line_width > _line_length): error(filename, linenum, 'whitespace/line_length', 2, ('Lines should be <= %i characters long' % _line_length)) if ((cleansed_line.count(';') > 1) and (cleansed_line.find('for') == (- 1)) and ((GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == (- 1)) or (GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != (- 1))) and (not (((cleansed_line.find('case ') != (- 1)) or (cleansed_line.find('default:') != (- 1))) and (cleansed_line.find('break;') != (- 1))))): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') CheckBraces(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
def _DropCommonSuffixes(filename): "Drops common suffixes like _test.cc or -inl.h from filename.\n\n For example:\n >>> _DropCommonSuffixes('foo/foo-inl.h')\n 'foo/foo'\n >>> _DropCommonSuffixes('foo/bar/foo.cc')\n 'foo/bar/foo'\n >>> _DropCommonSuffixes('foo/foo_internal.h')\n 'foo/foo'\n >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n 'foo/foo_unusualinternal'\n\n Args:\n filename: The input filename.\n\n Returns:\n The filename with the common suffix removed.\n " for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))): return filename[:((- len(suffix)) - 1)] return os.path.splitext(filename)[0]
def _IsTestFilename(filename): "Determines if the given filename has a suffix that identifies it as a test.\n\n Args:\n filename: The input filename.\n\n Returns:\n True if 'filename' looks like a test, False otherwise.\n " if (filename.endswith('_test.cc') or filename.endswith('_unittest.cc') or filename.endswith('_regtest.cc')): return True else: return False
def _ClassifyInclude(fileinfo, include, is_system): 'Figures out what kind of header \'include\' is.\n\n Args:\n fileinfo: The current file cpplint is running over. A FileInfo instance.\n include: The path to a #included file.\n is_system: True if the #include used <> rather than "".\n\n Returns:\n One of the _XXX_HEADER constants.\n\n For example:\n >>> _ClassifyInclude(FileInfo(\'foo/foo.cc\'), \'stdio.h\', True)\n _C_SYS_HEADER\n >>> _ClassifyInclude(FileInfo(\'foo/foo.cc\'), \'string\', True)\n _CPP_SYS_HEADER\n >>> _ClassifyInclude(FileInfo(\'foo/foo.cc\'), \'foo/foo.h\', False)\n _LIKELY_MY_HEADER\n >>> _ClassifyInclude(FileInfo(\'foo/foo_unknown_extension.cc\'),\n ... \'bar/foo_other_ext.h\', False)\n _POSSIBLE_MY_HEADER\n >>> _ClassifyInclude(FileInfo(\'foo/foo.cc\'), \'foo/bar.h\', False)\n _OTHER_HEADER\n ' is_cpp_h = (include in _CPP_HEADERS) if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER (target_dir, target_base) = os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())) (include_dir, include_base) = os.path.split(_DropCommonSuffixes(include)) if ((target_base == include_base) and ((include_dir == target_dir) or (include_dir == os.path.normpath((target_dir + '/../public'))))): return _LIKELY_MY_HEADER target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and (target_first_component.group(0) == include_first_component.group(0))): return _POSSIBLE_MY_HEADER return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): 'Check rules that are applicable to #include lines.\n\n Strings on #include lines are NOT removed from elided line, to make\n certain tasks easier. However, to prevent false positives, checks\n applicable to #include lines in CheckLanguage must be put here.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n include_state: An _IncludeState instance in which the headers are inserted.\n error: The function to call with any errors found.\n ' fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): error(filename, linenum, 'build/include_dir', 4, 'Include the directory when naming .h files') match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') if (include in include_state): error(filename, linenum, 'build/include', 4, ('"%s" already included at %s:%s' % (include, filename, include_state[include]))) else: include_state[include] = linenum error_message = include_state.CheckNextIncludeOrder(_ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, ('%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName()))) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if (not include_state.IsInAlphabeticalOrder(clean_lines, linenum, canonical_include)): error(filename, linenum, 'build/include_alpha', 4, ('Include "%s" not in alphabetical order' % include)) include_state.SetLastHeader(canonical_include) match = _RE_PATTERN_INCLUDE.match(line) if match: include = match.group(2) if Match('(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): if (not _IsTestFilename(filename)): error(filename, linenum, 'readability/streams', 3, 'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern): "Retrieves all the text between matching open and close parentheses.\n\n Given a string of lines and a regular expression string, retrieve all the text\n following the expression and between opening punctuation symbols like\n (, [, or {, and the matching close-punctuation symbol. This properly nested\n occurrences of the punctuations, so for the text like\n printf(a(), b(c()));\n a call to _GetTextInside(text, r'printf\\(') will return 'a(), b(c())'.\n start_pattern must match string having an open punctuation symbol at the end.\n\n Args:\n text: The lines to extract text. Its comments and strings must be elided.\n It can be single line and can span multiple lines.\n start_pattern: The regexp string indicating where to start extracting\n the text.\n Returns:\n The extracted text.\n None if either the opening string or ending punctuation could not be found.\n " matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(matching_punctuation.itervalues()) match = re.search(start_pattern, text, re.M) if (not match): return None start_position = match.end(0) assert (start_position > 0), 'start_pattern must ends with an opening punctuation.' assert (text[(start_position - 1)] in matching_punctuation), 'start_pattern must ends with an opening punctuation.' punctuation_stack = [matching_punctuation[text[(start_position - 1)]]] position = start_position while (punctuation_stack and (position < len(text))): if (text[position] == punctuation_stack[(- 1)]): punctuation_stack.pop() elif (text[position] in closing_punctuation): return None elif (text[position] in matching_punctuation): punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: return None return text[start_position:(position - 1)]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): "Checks rules from the 'C++ language rules' section of cppguide.html.\n\n Some of these rules are hard to test (function overloading, using\n uint32 inappropriately), but we do the best we can.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n file_extension: The extension (without the dot) of the filename.\n include_state: An _IncludeState instance in which the headers are inserted.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n " line = clean_lines.elided[linenum] if (not line): return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return if Match('^\\s*#\\s*(?:ifdef|elif|else|endif)\\b', line): include_state.ResetSection() fullname = os.path.abspath(filename).replace('\\', '/') match = Search('(\\bnew\\s+)?\\b(int|float|double|bool|char|int32|uint32|int64|uint64)(\\([^)].*)', line) if match: matched_new = match.group(1) matched_type = match.group(2) matched_funcptr = match.group(3) if ((matched_new is None) and (not (Match('^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\(', line) or Search('\\bMockCallback<.*>', line) or Search('\\bstd::function<.*>', line))) and (not (matched_funcptr and Match('\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\(', matched_funcptr)))): if ((linenum < 2) or (not (Match('^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$', clean_lines.elided[(linenum - 1)]) or Match('^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$', clean_lines.elided[(linenum - 2)])))): error(filename, linenum, 'readability/casting', 4, ('Using deprecated casting style. Use static_cast<%s>(...) instead' % matched_type)) CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'static_cast', '\\((int|float|double|bool|char|u?int(16|32|64))\\)', error) if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'const_cast', '\\((char\\s?\\*+\\s?)\\)\\s*"', error): pass else: CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'reinterpret_cast', '\\((\\w+\\s?\\*+\\s?)\\)', error) match = Search('(?:&\\(([^)]+)\\)[\\w(])|(?:&(static|dynamic|down|reinterpret)_cast\\b)', line) if (match and (match.group(1) != '*')): error(filename, linenum, 'runtime/casting', 4, 'Are you taking an address of a cast? This is dangerous: could be a temp var. Take the address before doing the cast, rather than after') if ((linenum + 1) < clean_lines.NumLines()): extended_line = (line + clean_lines.elided[(linenum + 1)]) else: extended_line = line match = Match('((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)', line) if (match and (not Search('\\boperator\\W', line)) and (not Match('\\s*(<.*>)?(::[a-zA-Z0-9_]+)?\\s*\\(([^"]|$)', match.group(3)))): error(filename, linenum, 'runtime/string', 4, ('For a static/global string constant, use a C style string instead: "%schar %s[]".' % (match.group(1), match.group(2)))) if Search('\\b([A-Za-z0-9_]*_)\\(\\1\\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') if (file_extension == 'h'): pass if Search('\\bshort port\\b', line): if (not Search('\\bunsigned short port\\b', line)): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search('\\b(short|long(?! +double)|long long)\\b', line) if match: error(filename, linenum, 'runtime/int', 4, ('Use int16/int64/etc, rather than the C type %s' % match.group(1))) match = Search('snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,', line) if (match and (match.group(2) != '0')): error(filename, linenum, 'runtime/printf', 3, ('If you can, use sizeof(%s) instead of %s as the 2nd arg to snprintf.' % (match.group(1), match.group(2)))) if Search('\\bsprintf\\b', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search('\\b(strcpy|strcat)\\b', line) if match: error(filename, linenum, 'runtime/printf', 4, ('Almost always, snprintf is better than %s' % match.group(1))) if Search('\\boperator\\s*&\\s*\\(\\s*\\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') if Search('\\}\\s*if\\s*\\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') printf_args = _GetTextInside(line, '(?i)\\b(string)?printf\\s*\\(') if printf_args: match = Match('([\\w.\\->()]+)$', printf_args) if (match and (match.group(1) != '__VA_ARGS__')): function_name = re.search('\\b((?:string)?printf)\\s*\\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, ('Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1)))) match = Search('memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)', line) if (match and (not Match("^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)))): error(filename, linenum, 'runtime/memset', 4, ('Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2)))) if Search('\\busing namespace\\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. Use using-declarations instead.') match = Match('\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];', line) if (match and (match.group(2) != 'return') and (match.group(2) != 'delete') and (match.group(3).find(']') == (- 1))): tokens = re.split('\\s|\\+|\\-|\\*|\\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search('sizeof\\(.+\\)', tok): continue if Search('arraysize\\(\\w+\\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if (not tok): continue if Match('\\d+', tok): continue if Match('0[xX][0-9a-fA-F]+', tok): continue if Match('k[A-Z0-9]\\w*', tok): continue if Match('(.+::)?k[A-Z0-9]\\w*', tok): continue if Match('(.+::)?[A-Z][A-Z0-9_]*', tok): continue if tok.startswith('sizeof'): skip_next = True continue is_const = False break if (not is_const): error(filename, linenum, 'runtime/arrays', 1, "Do not use variable-length arrays. Use an appropriately named ('k' followed by CamelCase) compile-time constant for the size.") match = Match('\\s*(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))\\(.*\\);$', line) if (match and ((linenum + 1) < clean_lines.NumLines())): next_line = clean_lines.elided[(linenum + 1)] if (not Search('^\\s*}[\\w\\*,\\s]*;', next_line)): error(filename, linenum, 'readability/constructors', 3, (match.group(1) + ' should be the last thing in the class')) if ((file_extension == 'h') and Search('\\bnamespace\\s*{', line) and (line[(- 1)] != '\\')): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): 'Check for non-const references.\n\n Separate from CheckLanguage since it scans backwards from current\n line, instead of scanning forward.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: The function to call with any errors found.\n ' line = clean_lines.elided[linenum] if ('&' not in line): return if (linenum > 1): previous = None if Match('\\s*::(?:[\\w<>]|::)+\\s*&\\s*\\S', line): previous = Search('\\b((?:const\\s*)?(?:[\\w<>]|::)+[\\w<>])\\s*$', clean_lines.elided[(linenum - 1)]) elif Match('\\s*[a-zA-Z_]([\\w<>]|::)+\\s*&\\s*\\S', line): previous = Search('\\b((?:const\\s*)?(?:[\\w<>]|::)+::)\\s*$', clean_lines.elided[(linenum - 1)]) if previous: line = (previous.group(1) + line.lstrip()) else: endpos = line.rfind('>') if (endpos > (- 1)): (_, startline, startpos) = ReverseCloseExpression(clean_lines, linenum, endpos) if ((startpos > (- 1)) and (startline < linenum)): line = '' for i in xrange(startline, (linenum + 1)): line += clean_lines.elided[i].strip() check_params = False if (not nesting_state.stack): check_params = True elif (isinstance(nesting_state.stack[(- 1)], _ClassInfo) or isinstance(nesting_state.stack[(- 1)], _NamespaceInfo)): check_params = True elif Match('.*{\\s*$', line): if ((len(nesting_state.stack) == 1) or isinstance(nesting_state.stack[(- 2)], _ClassInfo) or isinstance(nesting_state.stack[(- 2)], _NamespaceInfo)): check_params = True whitelisted_functions = '(?:[sS]wap(?:<\\w:+>)?|operator\\s*[<>][<>]|static_assert|COMPILE_ASSERT)\\s*\\(' if Search(whitelisted_functions, line): check_params = False elif (not Search('\\S+\\([^)]*$', line)): for i in xrange(2): if ((linenum > i) and Search(whitelisted_functions, clean_lines.elided[((linenum - i) - 1)])): check_params = False break if check_params: decls = ReplaceAll('{[^}]*}', ' ', line) for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter)): error(filename, linenum, 'runtime/references', 2, ('Is this a non-const reference? If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, error): 'Checks for a C-style cast by looking for the pattern.\n\n Args:\n filename: The name of the current file.\n linenum: The number of the line to check.\n line: The line of code to check.\n raw_line: The raw line of code to check, with comments.\n cast_type: The string for the C++ cast to recommend. This is either\n reinterpret_cast, static_cast, or const_cast, depending.\n pattern: The regular expression used to find C-style casts.\n error: The function to call with any errors found.\n\n Returns:\n True if an error was emitted.\n False otherwise.\n ' match = Search(pattern, line) if (not match): return False sizeof_match = Match('.*sizeof\\s*$', line[0:(match.start(1) - 1)]) if sizeof_match: return False if (line[0:(match.start(1) - 1)].endswith(' operator++') or line[0:(match.start(1) - 1)].endswith(' operator--')): return False remainder = line[match.end(0):] if Match('^\\s*(?:;|const\\b|throw\\b|=|>|\\{|\\))', remainder): if Match('^\\s*>', remainder): return False matched_zero = Match('^\\s=\\s*(\\S+)\\s*;', remainder) if (matched_zero and (matched_zero.group(1) != '0')): return False if Match('.*\\)\\s*$', line[0:match.start(0)]): return False if ('/*' in raw_line): return False error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True error(filename, linenum, 'readability/casting', 4, ('Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1)))) return True
def FilesBelongToSameModule(filename_cc, filename_h): "Check if these two filenames belong to the same module.\n\n The concept of a 'module' here is a as follows:\n foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the\n same 'module' if they are in the same directory.\n some/path/public/xyzzy and some/path/internal/xyzzy are also considered\n to belong to the same module here.\n\n If the filename_cc contains a longer path than the filename_h, for example,\n '/absolute/path/to/base/sysinfo.cc', and this file would include\n 'base/sysinfo.h', this function also produces the prefix needed to open the\n header. This is used by the caller of this function to more robustly open the\n header file. We don't have access to the real include paths in this context,\n so we need this guesswork here.\n\n Known bugs: tools/base/bar.cc and base/bar.h belong to the same module\n according to this implementation. Because of this, this function gives\n some false positives. This should be sufficiently rare in practice.\n\n Args:\n filename_cc: is the path for the .cc file\n filename_h: is the path for the header path\n\n Returns:\n Tuple with a bool and a string:\n bool: True if filename_cc and filename_h belong to the same module.\n string: the additional prefix needed to open the header file.\n " if (not filename_cc.endswith('.cc')): return (False, '') filename_cc = filename_cc[:(- len('.cc'))] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:(- len('_unittest'))] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:(- len('_test'))] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if (not filename_h.endswith('.h')): return (False, '') filename_h = filename_h[:(- len('.h'))] if filename_h.endswith('-inl'): filename_h = filename_h[:(- len('-inl'))] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:(- len(filename_h))] return (files_belong_to_same_module, common_path)
def UpdateIncludeState(filename, include_state, io=codecs): 'Fill up the include_state with new includes found from the file.\n\n Args:\n filename: the name of the header to read.\n include_state: an _IncludeState instance in which the headers are inserted.\n io: The io factory to use to read the file. Provided for testability.\n\n Returns:\n True if a header was succesfully added. False otherwise.\n ' headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_state.setdefault(include, ('%s:%d' % (filename, linenum))) return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): 'Reports for missing stl includes.\n\n This function will output warnings to make sure you are including the headers\n necessary for the stl containers and functions that you use. We only give one\n reason to include a header. For example, if you use both equal_to<> and\n less<> in a .h file, only one (the latter in the file) of these will be\n reported as a reason to include the <functional>.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n include_state: An _IncludeState instance.\n error: The function to call with any errors found.\n io: The IO factory to use to read the header file. Provided for unittest\n injection.\n ' required = {} for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if ((not line) or (line[0] == '#')): continue matched = _RE_PATTERN_STRING.search(line) if matched: prefix = line[:matched.start()] if (prefix.endswith('std::') or (not prefix.endswith('::'))): required['<string>'] = (linenum, 'string') for (pattern, template, header) in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) if (not ('<' in line)): continue for (pattern, template, header) in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) include_state = include_state.copy() header_found = False abs_filename = FileInfo(filename).FullName() abs_filename = re.sub('_flymake\\.cc$', '.cc', abs_filename) header_keys = include_state.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = (common_path + header) if (same_module and UpdateIncludeState(fullpath, include_state, io)): header_found = True if (filename.endswith('.cc') and (not header_found)): return for required_header_unstripped in required: template = required[required_header_unstripped][1] if (required_header_unstripped.strip('<>"') not in include_state): error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, ((('Add #include ' + required_header_unstripped) + ' for ') + template))
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): "Check that make_pair's template arguments are deduced.\n\n G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are\n specified explicitly, and such use isn't intended in any case.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n " line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, 'For C++11-compatibility, omit template arguments from make_pair OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): 'Processes a single line in the file.\n\n Args:\n filename: Filename of the file that is being processed.\n file_extension: The extension (dot not included) of the file.\n clean_lines: An array of strings, each representing a line of the file,\n with comments stripped.\n line: Number of line being processed.\n include_state: An _IncludeState instance in which the headers are inserted.\n function_state: A _FunctionState instance which counts function lines, etc.\n nesting_state: A _NestingState instance which maintains information about\n the current stack of nested blocks being parsed.\n error: A callable to which errors are reported, which takes 4 arguments:\n filename, line number, error level, and message\n extra_check_functions: An array of additional check functions that will be\n run on each source line. Each function takes 4\n arguments: filename, clean_lines, line, error\n ' raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) if (nesting_state.stack and (nesting_state.stack[(- 1)].inline_asm != _NO_ASM)): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckCaffeAlternatives(filename, clean_lines, line, error) CheckCaffeDataLayerSetUp(filename, clean_lines, line, error) CheckCaffeRandom(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]): 'Performs lint checks and reports any errors to the given error function.\n\n Args:\n filename: Filename of the file that is being processed.\n file_extension: The extension (dot not included) of the file.\n lines: An array of strings, each representing a line of the file, with the\n last element being empty if the file is terminated with a newline.\n error: A callable to which errors are reported, which takes 4 arguments:\n filename, line number, error level, and message\n extra_check_functions: An array of additional check functions that will be\n run on each source line. Each function takes 4\n arguments: filename, clean_lines, line, error\n ' lines = ((['// marker so line numbers and indices both start at 1'] + lines) + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = _NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) if (file_extension == 'h'): CheckForHeaderGuard(filename, lines, error) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) for line in xrange(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]): 'Does google-lint on a single file.\n\n Args:\n filename: The name of the file to parse.\n\n vlevel: The level of errors to report. Every error of confidence\n >= verbose_level will be reported. 0 is a good default.\n\n extra_check_functions: An array of additional check functions that will be\n run on each source line. Each function takes 4\n arguments: filename, clean_lines, line, error\n ' _SetVerboseLevel(vlevel) try: if (filename == '-'): lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') carriage_return_found = False for linenum in range(len(lines)): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') carriage_return_found = True except IOError: sys.stderr.write(("Skipping input '%s': Can't open for reading\n" % filename)) return file_extension = filename[(filename.rfind('.') + 1):] if ((filename != '-') and (file_extension not in _valid_extensions)): sys.stderr.write(('Ignoring %s; not a valid file name (%s)\n' % (filename, ', '.join(_valid_extensions)))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) if (carriage_return_found and (os.linesep != '\r\n')): Error(filename, 0, 'whitespace/newline', 1, 'One or more unexpected \\r (^M) found;better to use only a \\n') sys.stderr.write(('Done processing %s\n' % filename))