id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
185,797
from __future__ import division from __future__ import print_function import time import os import sys import itertools import threading import subprocess import socket from optparse import OptionParser, SUPPRESS_HELP import platform bandwidth_tasks = [task_pidigits] def run_bandwidth_test(func, args, nthreads): # Create a listening socket to receive the packets. We use UDP which should # be painlessly cross-platform. with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: sock.bind(("127.0.0.1", 0)) addr = sock.getsockname() duration = BANDWIDTH_DURATION packet_size = BANDWIDTH_PACKET_SIZE results = [] threads = [] end_event = [] start_cond = threading.Condition() started = False if nthreads > 0: # Warm up func(*args) results = [] loop = TimedLoop(func, args) ready = [] ready_cond = threading.Condition() def run(): with ready_cond: ready.append(None) ready_cond.notify() with start_cond: while not started: start_cond.wait() loop(start_time, duration * 1.5, end_event, do_yield=False) for i in range(nthreads): threads.append(threading.Thread(target=run)) for t in threads: t.daemon = True t.start() # Wait for threads to be ready with ready_cond: while len(ready) < nthreads: ready_cond.wait() # Run the client and wait for the first packet to arrive before # unblocking the background threads. process = run_bandwidth_client(addr=addr, packet_size=packet_size, duration=duration) _time = time.time # This will also wait for the parent to be ready s = _recv(sock, packet_size) remote_addr = eval(s.partition('#')[0]) with start_cond: start_time = _time() started = True start_cond.notify(nthreads) n = 0 first_time = None while not end_event and BW_END not in s: _sendto(sock, s, remote_addr) s = _recv(sock, packet_size) if first_time is None: first_time = _time() n += 1 end_time = _time() end_event.append(None) for t in threads: t.join() process.kill() return (n - 1) / (end_time - first_time) def run_bandwidth_tests(max_threads): for task in bandwidth_tasks: print("Background CPU task:", task.__doc__) print() func, args = task() nthreads = 0 baseline_speed = None while nthreads <= max_threads: results = run_bandwidth_test(func, args, nthreads) speed = results #speed = len(results) * 1.0 / results[-1][0] print("CPU threads=%d: %.1f" % (nthreads, speed), end="") if baseline_speed is None: print(" packets/s.") baseline_speed = speed else: print(" ( %d %%)" % (speed / baseline_speed * 100)) nthreads += 1 print()
null
185,799
from pegen import grammar from pegen.grammar import ( Alt, Cut, Gather, GrammarVisitor, Group, Lookahead, NamedItem, NameLeaf, NegativeLookahead, Opt, PositiveLookahead, Repeat0, Repeat1, Rhs, Rule, StringLeaf, ) class GrammarValidator(GrammarVisitor): def __init__(self, grammar: grammar.Grammar): self.grammar = grammar self.rulename = None def validate_rule(self, rulename: str, node: Rule): self.rulename = rulename self.visit(node) self.rulename = None def validate_grammar(the_grammar: grammar.Grammar): for validator_cls in GrammarValidator.__subclasses__(): validator = validator_cls(the_grammar) for rule_name, rule in the_grammar.rules.items(): validator.validate_rule(rule_name, rule)
null
185,804
import argparse import sys import time import token import traceback from typing import Tuple from pegen.build import Grammar, Parser, Tokenizer, ParserGenerator from pegen.validator import validate_grammar Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') Tuple.__doc__ = \ """Tuple type; Tuple[X, Y] is the cross-product type of X and Y. Example: Tuple[T1, T2] is a tuple of two elements corresponding to type variables T1 and T2. Tuple[int, float, str] is a tuple of an int, a float and a string. To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. """ def build_c_parser_and_generator( grammar_file: str, tokens_file: str, output_file: str, compile_extension: bool = False, verbose_tokenizer: bool = False, verbose_parser: bool = False, verbose_c_extension: bool = False, keep_asserts_in_extension: bool = True, skip_actions: bool = False, ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]: """Generate rules, C parser, tokenizer, parser generator for a given grammar Args: grammar_file (string): Path for the grammar file tokens_file (string): Path for the tokens file output_file (string): Path for the output file compile_extension (bool, optional): Whether to compile the C extension. Defaults to False. verbose_tokenizer (bool, optional): Whether to display additional output when generating the tokenizer. Defaults to False. verbose_parser (bool, optional): Whether to display additional output when generating the parser. Defaults to False. verbose_c_extension (bool, optional): Whether to display additional output when compiling the C extension . Defaults to False. keep_asserts_in_extension (bool, optional): Whether to keep the assert statements when compiling the extension module. Defaults to True. skip_actions (bool, optional): Whether to pretend no rule has any actions. """ grammar, parser, tokenizer = build_parser(grammar_file, verbose_tokenizer, verbose_parser) gen = build_c_generator( grammar, grammar_file, tokens_file, output_file, compile_extension, verbose_c_extension, keep_asserts_in_extension, skip_actions=skip_actions, ) return grammar, parser, tokenizer, gen def generate_c_code( args: argparse.Namespace, ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]: from pegen.build import build_c_parser_and_generator verbose = args.verbose verbose_tokenizer = verbose >= 3 verbose_parser = verbose == 2 or verbose >= 4 try: grammar, parser, tokenizer, gen = build_c_parser_and_generator( args.grammar_filename, args.tokens_filename, args.output, args.compile_extension, verbose_tokenizer, verbose_parser, args.verbose, keep_asserts_in_extension=False if args.optimized else True, skip_actions=args.skip_actions, ) return grammar, parser, tokenizer, gen except Exception as err: if args.verbose: raise # Show traceback traceback.print_exception(err.__class__, err, None) sys.stderr.write("For full traceback, use -v\n") sys.exit(1)
null
185,805
import argparse import sys import time import token import traceback from typing import Tuple from pegen.build import Grammar, Parser, Tokenizer, ParserGenerator from pegen.validator import validate_grammar Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') Tuple.__doc__ = \ """Tuple type; Tuple[X, Y] is the cross-product type of X and Y. Example: Tuple[T1, T2] is a tuple of two elements corresponding to type variables T1 and T2. Tuple[int, float, str] is a tuple of an int, a float and a string. To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. """ def build_python_parser_and_generator( grammar_file: str, output_file: str, verbose_tokenizer: bool = False, verbose_parser: bool = False, skip_actions: bool = False, ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]: def generate_python_code( args: argparse.Namespace, ) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]: from pegen.build import build_python_parser_and_generator verbose = args.verbose verbose_tokenizer = verbose >= 3 verbose_parser = verbose == 2 or verbose >= 4 try: grammar, parser, tokenizer, gen = build_python_parser_and_generator( args.grammar_filename, args.output, verbose_tokenizer, verbose_parser, skip_actions=args.skip_actions, ) return grammar, parser, tokenizer, gen except Exception as err: if args.verbose: raise # Show traceback traceback.print_exception(err.__class__, err, None) sys.stderr.write("For full traceback, use -v\n") sys.exit(1)
null
185,808
def ast_dump(node, annotate_fields=True, include_attributes=False, *, indent=None): def _format(node, level=0): if indent is not None: level += 1 prefix = "\n" + indent * level sep = ",\n" + indent * level else: prefix = "" sep = ", " if any(cls.__name__ == "AST" for cls in node.__class__.__mro__): cls = type(node) args = [] allsimple = True keywords = annotate_fields for name in node._fields: try: value = getattr(node, name) except AttributeError: keywords = True continue if value is None and getattr(cls, name, ...) is None: keywords = True continue value, simple = _format(value, level) allsimple = allsimple and simple if keywords: args.append("%s=%s" % (name, value)) else: args.append(value) if include_attributes and node._attributes: for name in node._attributes: try: value = getattr(node, name) except AttributeError: continue if value is None and getattr(cls, name, ...) is None: continue value, simple = _format(value, level) allsimple = allsimple and simple args.append("%s=%s" % (name, value)) if allsimple and len(args) <= 3: return "%s(%s)" % (node.__class__.__name__, ", ".join(args)), not args return "%s(%s%s)" % (node.__class__.__name__, prefix, sep.join(args)), False elif isinstance(node, list): if not node: return "[]", True return "[%s%s]" % (prefix, sep.join(_format(x, level)[0] for x in node)), False return repr(node), True if all(cls.__name__ != "AST" for cls in node.__class__.__mro__): raise TypeError("expected AST, got %r" % node.__class__.__name__) if indent is not None and not isinstance(indent, str): indent = " " * indent return _format(node)[0]
null
185,810
import argparse import sys from typing import Any, List from pegen.build import build_parser from pegen.grammar import ( Alt, Cut, Forced, Grammar, Group, Leaf, Lookahead, Rule, NameLeaf, NamedItem, Opt, Repeat, Rhs, ) def Any(self, parameters): List = _alias(list, 1, inst=False, name='List') class Rule: def __init__(self, name: str, type: Optional[str], rhs: Rhs, memo: Optional[object] = None): def is_loop(self) -> bool: def is_gather(self) -> bool: def __str__(self) -> str: def __repr__(self) -> str: def __iter__(self) -> Iterator[Rhs]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: def flatten(self) -> Rhs: def collect_todo(self, gen: ParserGenerator) -> None: class Leaf: def __init__(self, value: str): def __str__(self) -> str: def __iter__(self) -> Iterable[str]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: class NameLeaf(Leaf): def __str__(self) -> str: def __repr__(self) -> str: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: class Rhs: def __init__(self, alts: List[Alt]): def __str__(self) -> str: def __repr__(self) -> str: def __iter__(self) -> Iterator[List[Alt]]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: def collect_todo(self, gen: ParserGenerator) -> None: class Alt: def __init__(self, items: List[NamedItem], *, icut: int = -1, action: Optional[str] = None): def __str__(self) -> str: def __repr__(self) -> str: def __iter__(self) -> Iterator[List[NamedItem]]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: def collect_todo(self, gen: ParserGenerator) -> None: class NamedItem: def __init__(self, name: Optional[str], item: Item, type: Optional[str] = None): def __str__(self) -> str: def __repr__(self) -> str: def __iter__(self) -> Iterator[Item]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: def collect_todo(self, gen: ParserGenerator) -> None: class Forced: def __init__(self, node: Plain): def __str__(self) -> str: def __iter__(self) -> Iterator[Plain]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: class Lookahead: def __init__(self, node: Plain, sign: str): def __str__(self) -> str: def __iter__(self) -> Iterator[Plain]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: class Opt: def __init__(self, node: Item): def __str__(self) -> str: def __repr__(self) -> str: def __iter__(self) -> Iterator[Item]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: class Repeat: def __init__(self, node: Plain): def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def __iter__(self) -> Iterator[Plain]: def initial_names(self) -> AbstractSet[str]: class Group: def __init__(self, rhs: Rhs): def __str__(self) -> str: def __repr__(self) -> str: def __iter__(self) -> Iterator[Rhs]: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: class Cut: def __init__(self) -> None: def __repr__(self) -> str: def __str__(self) -> str: def __iter__(self) -> Iterator[Tuple[str, str]]: def __eq__(self, other: object) -> bool: def nullable_visit(self, rules: Dict[str, Rule]) -> bool: def initial_names(self) -> AbstractSet[str]: def references_for_item(item: Any) -> List[Any]: if isinstance(item, Alt): return [_ref for _item in item.items for _ref in references_for_item(_item)] elif isinstance(item, Cut): return [] elif isinstance(item, Forced): return references_for_item(item.node) elif isinstance(item, Group): return references_for_item(item.rhs) elif isinstance(item, Lookahead): return references_for_item(item.node) elif isinstance(item, NamedItem): return references_for_item(item.item) # NOTE NameLeaf must be before Leaf elif isinstance(item, NameLeaf): if item.value == "ENDMARKER": return [] return [item.value] elif isinstance(item, Leaf): return [] elif isinstance(item, Opt): return references_for_item(item.node) elif isinstance(item, Repeat): return references_for_item(item.node) elif isinstance(item, Rhs): return [_ref for alt in item.alts for _ref in references_for_item(alt)] elif isinstance(item, Rule): return references_for_item(item.rhs) else: raise RuntimeError(f"Unknown item: {type(item)}")
null
185,811
import sys import ast FAIL = "\033[91m" ENDC = "\033[0m" def check_nested_expr(nesting_depth: int) -> bool: expr = f"{'(' * nesting_depth}0{')' * nesting_depth}" try: ast.parse(expr) print(f"Nesting depth of {nesting_depth} is successful") return True except Exception as err: print(f"{FAIL}(Failed with nesting depth of {nesting_depth}{ENDC}") print(f"{FAIL}\t{err}{ENDC}") return False
null
185,812
import argparse import ast import sys import os from time import time try: import memory_profiler except ModuleNotFoundError: print( "Please run `make venv` to create a virtual environment and install" " all the dependencies, before running this script." ) sys.exit(1) from scripts.test_parse_directory import parse_directory def benchmark(func): def wrapper(*args): times = list() for _ in range(3): start = time() result = func(*args) end = time() times.append(end - start) memory = memory_profiler.memory_usage((func, args)) print(f"{func.__name__}") print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs") print(f"\tMemory: {max(memory)} MiB on an average of 3 runs") return result return wrapper
null
185,813
import argparse import ast import sys import os from time import time from scripts.test_parse_directory import parse_directory def time_compile(source): return compile(source, "<string>", "exec") def time_parse(source): return ast.parse(source) def run_benchmark_xxl(subcommand, source): if subcommand == "compile": time_compile(source) elif subcommand == "parse": time_parse(source)
null
185,814
import argparse import ast import sys import os from time import time from scripts.test_parse_directory import parse_directory def parse_directory(directory: str, verbose: bool, excluded_files: List[str], short: bool) -> int: def run_benchmark_stdlib(subcommand): modes = {"compile": 2, "parse": 1} for _ in range(3): parse_directory( "../../Lib", verbose=False, excluded_files=["*/bad*", "*/lib2to3/tests/data/*",], short=True, mode=modes[subcommand], )
null
185,815
import argparse import os import json from typing import Dict, Any from urllib.request import urlretrieve def Any(self, parameters): """Special type indicating an unconstrained type. - Any is compatible with every type. - Any assumed to have all methods. - All values assumed to be instances of Any. Note that all the above statements are true from the point of view of static type checkers. At runtime, Any should not be used with instance or class checks. """ raise TypeError(f"{self} is not subscriptable") Dict = _alias(dict, 2, inst=False, name='Dict') def load_json(filename: str) -> Dict[Any, Any]: with open(os.path.join("data", f"{filename}.json"), "r") as f: j = json.loads(f.read()) return j
null
185,834
import abc import ast import collections import contextlib import copy import cpp import functools import hashlib import inspect import io import itertools import os import pprint import re import shlex import string import sys import tempfile import textwrap import traceback import types from types import * def fail(*args, filename=None, line_number=None): class BlockParser: def __init__(self, input, language, *, verify=True): def __iter__(self): def __next__(self): def is_start_line(self, line): def _line(self, lookahead=False): def parse_verbatim_block(self): def parse_clinic_block(self, dsl_name): def is_stop_line(line): extensions = { name: CLanguage for name in "c cc cpp cxx h hh hpp hxx".split() } extensions['py'] = PythonLanguage def write_file(filename, new_contents): clinic = None class Clinic: def __init__(self, language, printer=None, *, verify=True, filename=None): def add_destination(self, name, type, *args): def get_destination(self, name): def get_destination_buffer(self, name, item=0): def parse(self, input): def _module_and_class(self, fields): clinic = None def parse_file(filename, *, verify=True, output=None): if not output: output = filename extension = os.path.splitext(filename)[1][1:] if not extension: fail("Can't extract file type for file " + repr(filename)) try: language = extensions[extension](filename) except KeyError: fail("Can't identify file type for file " + repr(filename)) with open(filename, 'r', encoding="utf-8") as f: raw = f.read() # exit quickly if there are no clinic markers in the file find_start_re = BlockParser("", language).find_start_re if not find_start_re.search(raw): return clinic = Clinic(language, verify=verify, filename=filename) cooked = clinic.parse(raw) write_file(output, cooked)
null
185,843
from collections import namedtuple import logging import os import os.path import re import textwrap from c_common.tables import build_table, resolve_columns from c_parser.parser._regexes import _ind from ._files import iter_header_files, resolve_filename from . import REPO_ROOT CAPI_RE = re.compile(textwrap.dedent(rf''' (?: {_ind(CAPI_FUNC, 2)} | {_ind(CAPI_DATA, 2)} | {_ind(CAPI_INLINE, 2)} | {_ind(CAPI_DEFINE, 2)} ) '''), re.VERBOSE) KINDS = [ 'func', 'data', 'inline', 'macro', 'constant', ] def _parse_line(line, prev=None): last = line if prev: if not prev.endswith(os.linesep): prev += os.linesep line = prev + line m = CAPI_RE.match(line) if not m: if not prev and line.startswith('static inline '): return line # the new "prev" #if 'PyAPI_' in line or '#define ' in line or ' define ' in line: # print(line) return None results = zip(KINDS, m.groups()) for kind, name in results: if name: clean = last.split('//')[0].rstrip() if clean.endswith('*/'): clean = clean.split('/*')[0].rstrip() if kind == 'macro' or kind == 'constant': if not clean.endswith('\\'): return name, kind elif kind == 'inline': if clean.endswith('}'): if not prev or clean == '}': return name, kind elif kind == 'func' or kind == 'data': if clean.endswith(';'): return name, kind else: # This should not be reached. raise NotImplementedError return line # the new "prev" # It was a plain #define. return None
null
185,844
from collections import namedtuple import logging import os import os.path import re import textwrap from c_common.tables import build_table, resolve_columns from c_parser.parser._regexes import _ind from ._files import iter_header_files, resolve_filename from . import REPO_ROOT INCLUDE_ROOT = os.path.join(REPO_ROOT, 'Include') INCLUDE_CPYTHON = os.path.join(INCLUDE_ROOT, 'cpython') INCLUDE_INTERNAL = os.path.join(INCLUDE_ROOT, 'internal') def _get_level(filename, name, *, _cpython=INCLUDE_CPYTHON + os.path.sep, _internal=INCLUDE_INTERNAL + os.path.sep, ): if filename.startswith(_internal): return 'internal' elif name.startswith('_'): return 'private' elif os.path.dirname(filename) == INCLUDE_ROOT: return 'stable' elif filename.startswith(_cpython): return 'cpython' else: raise NotImplementedError #return '???'
null
185,845
from collections import namedtuple import logging import os import os.path import re import textwrap from c_common.tables import build_table, resolve_columns from c_parser.parser._regexes import _ind from ._files import iter_header_files, resolve_filename from . import REPO_ROOT def _collate(items, groupby, includeempty): groupby = _parse_groupby(groupby)[0] maxfilename = maxname = maxkind = maxlevel = 0 collated = {} groups = GROUPINGS[groupby] for group in groups: collated[group] = [] for item in items: key = getattr(item, groupby) collated[key].append(item) maxfilename = max(len(item.relfile), maxfilename) maxname = max(len(item.name), maxname) maxkind = max(len(item.kind), maxkind) maxlevel = max(len(item.level), maxlevel) if not includeempty: for group in groups: if not collated[group]: del collated[group] maxextra = { 'kind': maxkind, 'level': maxlevel, } return collated, groupby, maxfilename, maxname, maxextra def _get_sortkey(sort, _groupby, _columns): if sort is True or sort is None: # For now: def sortkey(item): return ( item.level == 'private', LEVELS.index(item.level), KINDS.index(item.kind), os.path.dirname(item.file), os.path.basename(item.file), item.name, ) return sortkey sortfields = 'not-private level kind dirname basename name'.split() elif isinstance(sort, str): sortfields = sort.replace(',', ' ').strip().split() elif callable(sort): return sort else: raise NotImplementedError # XXX Build a sortkey func from sortfields. raise NotImplementedError def _render_item_full(item, groupby, verbose): yield item.name yield f' {"filename:":10} {item.relfile}' for extra in ('kind', 'level'): #if groupby != extra: yield f' {extra+":":10} {getattr(item, extra)}' if verbose: print(' ---------------------------------------') for lno, line in enumerate(item.text, item.lno): print(f' | {lno:3} {line}') print(' ---------------------------------------') def render_full(items, *, groupby='kind', sort=None, showempty=None, verbose=False, ): if groupby is None: groupby = 'kind' if showempty is None: showempty = False if sort: sortkey = _get_sortkey(sort, groupby, None) if groupby: collated, groupby, _, _, _ = _collate(items, groupby, showempty) for group, grouped in collated.items(): yield '#' * 25 yield f'# {group} ({len(grouped)})' yield '#' * 25 yield '' if not grouped: continue if sort: grouped = sorted(grouped, key=sortkey) for item in grouped: yield from _render_item_full(item, groupby, verbose) yield '' else: if sort: items = sorted(items, key=sortkey) for item in items: yield from _render_item_full(item, None, verbose) yield ''
null
185,846
from collections import namedtuple import logging import os import os.path import re import textwrap from c_common.tables import build_table, resolve_columns from c_parser.parser._regexes import _ind from ._files import iter_header_files, resolve_filename from . import REPO_ROOT def summarize(items, *, groupby='kind', includeempty=True, minimize=None): if minimize is None: if includeempty is None: minimize = True includeempty = False else: minimize = includeempty elif includeempty is None: includeempty = minimize elif minimize and includeempty: raise ValueError(f'cannot minimize and includeempty at the same time') groupby = _parse_groupby(groupby)[0] _outer, _inner = _resolve_full_groupby(groupby) outers = GROUPINGS[_outer] inners = GROUPINGS[_inner] summary = { 'totals': { 'all': 0, 'subs': {o: 0 for o in outers}, 'bygroup': {o: {i: 0 for i in inners} for o in outers}, }, } for item in items: outer = getattr(item, _outer) inner = getattr(item, _inner) # Update totals. summary['totals']['all'] += 1 summary['totals']['subs'][outer] += 1 summary['totals']['bygroup'][outer][inner] += 1 if not includeempty: subtotals = summary['totals']['subs'] bygroup = summary['totals']['bygroup'] for outer in outers: if subtotals[outer] == 0: del subtotals[outer] del bygroup[outer] continue for inner in inners: if bygroup[outer][inner] == 0: del bygroup[outer][inner] if minimize: if len(bygroup[outer]) == 1: del bygroup[outer] return summary def render_summary(items, *, groupby='kind', sort=None, showempty=None, verbose=False, ): if groupby is None: groupby = 'kind' summary = summarize( items, groupby=groupby, includeempty=showempty, minimize=None if showempty else not verbose, ) subtotals = summary['totals']['subs'] bygroup = summary['totals']['bygroup'] lastempty = False for outer, subtotal in subtotals.items(): if bygroup: subtotal = f'({subtotal})' yield f'{outer + ":":20} {subtotal:>8}' else: yield f'{outer + ":":10} {subtotal:>8}' if outer in bygroup: for inner, count in bygroup[outer].items(): yield f' {inner + ":":9} {count}' lastempty = False else: lastempty = True total = f'*{summary["totals"]["all"]}*' label = '*total*:' if bygroup: yield f'{label:20} {total:>8}' else: yield f'{label:10} {total:>9}'
null
185,847
import os.path import re from c_common.clsutil import classonly from c_parser.info import ( KIND, DeclID, Declaration, TypeDeclaration, TypeDef, Struct, Member, FIXED_TYPE, ) from c_parser.match import ( is_type_decl, is_pots, is_funcptr, ) from c_analyzer.match import ( is_system_type, is_process_global, is_fixed_type, is_immutable, ) import c_analyzer as _c_analyzer import c_analyzer.info as _info import c_analyzer.datafiles as _datafiles from . import _parser, REPO_ROOT IGNORED_FILE = os.path.join(_DATA_DIR, 'ignored.tsv') def write_known(): raise NotImplementedError datafiles.write_known(decls, IGNORED_FILE, ['unsupported'], relroot=REPO_ROOT)
null
185,848
import os.path import re from c_common.clsutil import classonly from c_parser.info import ( KIND, DeclID, Declaration, TypeDeclaration, TypeDef, Struct, Member, FIXED_TYPE, ) from c_parser.match import ( is_type_decl, is_pots, is_funcptr, ) from c_analyzer.match import ( is_system_type, is_process_global, is_fixed_type, is_immutable, ) import c_analyzer as _c_analyzer import c_analyzer.info as _info import c_analyzer.datafiles as _datafiles from . import _parser, REPO_ROOT IGNORED_FILE = os.path.join(_DATA_DIR, 'ignored.tsv') def write_ignored(): raise NotImplementedError _datafiles.write_ignored(variables, IGNORED_FILE, relroot=REPO_ROOT)
null
185,849
import os.path import re from c_common.clsutil import classonly from c_parser.info import ( KIND, DeclID, Declaration, TypeDeclaration, TypeDef, Struct, Member, FIXED_TYPE, ) from c_parser.match import ( is_type_decl, is_pots, is_funcptr, ) from c_analyzer.match import ( is_system_type, is_process_global, is_fixed_type, is_immutable, ) import c_analyzer as _c_analyzer import c_analyzer.info as _info import c_analyzer.datafiles as _datafiles from . import _parser, REPO_ROOT def read_ignored(): if not _IGNORED: _IGNORED.update(_datafiles.read_ignored(IGNORED_FILE, relroot=REPO_ROOT)) return dict(_IGNORED) class KIND(enum.Enum): # XXX Use these in the raw parser code. TYPEDEF = 'typedef' STRUCT = 'struct' UNION = 'union' ENUM = 'enum' FUNCTION = 'function' VARIABLE = 'variable' STATEMENT = 'statement' def _from_raw(cls, raw): if raw is None: return None elif isinstance(raw, cls): return raw elif type(raw) is str: # We could use cls[raw] for the upper-case form, # but there's no need to go to the trouble. return cls(raw.lower()) else: raise NotImplementedError(raw) def by_priority(cls, group=None): if group is None: return cls._ALL_BY_PRIORITY.copy() elif group == 'type': return cls._TYPE_DECLS_BY_PRIORITY.copy() elif group == 'decl': return cls._ALL_DECLS_BY_PRIORITY.copy() elif isinstance(group, str): raise NotImplementedError(group) else: # XXX Treat group as a set of kinds & return in priority order? raise NotImplementedError(group) def is_type_decl(cls, kind): if kind in cls.TYPES: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def is_decl(cls, kind): if kind in cls.DECLS: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def get_group(cls, kind, *, groups=None): if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') if groups is None: groups = ['type'] elif not groups: groups = () elif isinstance(groups, str): group = groups if group not in cls._GROUPS: raise ValueError(f'unsupported group {group!r}') groups = [group] else: unsupported = [g for g in groups if g not in cls._GROUPS] if unsupported: raise ValueError(f'unsupported groups {", ".join(repr(unsupported))}') for group in groups: if kind in cls._GROUPS[group]: return group else: return kind.value def resolve_group(cls, group): if isinstance(group, cls): return {group} elif isinstance(group, str): try: return cls._GROUPS[group].copy() except KeyError: raise ValueError(f'unsupported group {group!r}') else: resolved = set() for gr in group: resolve.update(cls.resolve_group(gr)) return resolved #return {*cls.resolve_group(g) for g in group} KIND._TYPE_DECLS_BY_PRIORITY = [ # These are in preferred order. KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, ] KIND._ALL_DECLS_BY_PRIORITY = [ # These are in preferred order. *KIND._TYPE_DECLS_BY_PRIORITY, KIND.FUNCTION, KIND.VARIABLE, ] KIND._ALL_BY_PRIORITY = [ # These are in preferred order. *KIND._ALL_DECLS_BY_PRIORITY, KIND.STATEMENT, ] KIND.TYPES = frozenset(KIND._TYPE_DECLS_BY_PRIORITY) KIND.DECLS = frozenset(KIND._ALL_DECLS_BY_PRIORITY) KIND._GROUPS = { 'type': KIND.TYPES, 'decl': KIND.DECLS, } KIND._GROUPS.update((k.value, {k}) for k in KIND) def check_globals(analysis): # yield (data, failure) ignored = read_ignored() for item in analysis: if item.kind != KIND.VARIABLE: continue if item.supported: continue if item.id in ignored: continue reason = item.unsupported if not reason: reason = '???' elif not isinstance(reason, str): if len(reason) == 1: reason, = reason reason = f'({reason})' yield item, f'not supported {reason:20}\t{item.storage or ""} {item.vartype}'
null
185,850
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT FILES_KWARGS = dict(excluded=_parser.EXCLUDED, nargs='*') def add_files_cli(parser, *, excluded=None, nargs=None): process_files = add_file_filtering_cli(parser, excluded=excluded) parser.add_argument('filenames', nargs=nargs or '+', metavar='FILENAME') return [ process_files, ] def add_kind_filtering_cli(parser, *, default=None): parser.add_argument('--kinds', action='append') def process_args(args, *, argv=None): ns = vars(args) kinds = [] for kind in ns.pop('kinds') or default or (): kinds.extend(kind.strip().replace(',', ' ').split()) if not kinds: match_kind = (lambda k: True) else: included = set() excluded = set() for kind in kinds: if kind.startswith('-'): kind = kind[1:] excluded.add(kind) if kind in included: included.remove(kind) else: included.add(kind) if kind in excluded: excluded.remove(kind) if excluded: if included: ... # XXX fail? def match_kind(kind, *, _excluded=excluded): return kind not in _excluded else: def match_kind(kind, *, _included=included): return kind in _included args.match_kind = match_kind return process_args def _cli_parse(parser): process_output = c_parser.add_output_cli(parser) process_kind = add_kind_filtering_cli(parser) process_preprocessor = c_parser.add_preprocessor_cli( parser, get_preprocessor=_parser.get_preprocessor, ) process_files = add_files_cli(parser, **FILES_KWARGS) return [ process_output, process_kind, process_preprocessor, process_files, ]
null
185,851
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT def _resolve_filenames(filenames): def cmd_parse(filenames=None, **kwargs): filenames = _resolve_filenames(filenames) if 'get_file_preprocessor' not in kwargs: kwargs['get_file_preprocessor'] = _parser.get_preprocessor() c_parser.cmd_parse( filenames, relroot=REPO_ROOT, **kwargs )
null
185,852
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT def _resolve_filenames(filenames): if filenames: resolved = (_files.resolve_filename(f) for f in filenames) else: resolved = _files.iter_filenames() return resolved CHECKS = dict(c_analyzer.CHECKS, **{ 'globals': _analyzer.check_globals, }) def cmd_check(filenames=None, **kwargs): filenames = _resolve_filenames(filenames) kwargs['get_file_preprocessor'] = _parser.get_preprocessor(log_err=print) c_analyzer.cmd_check( filenames, relroot=REPO_ROOT, _analyze=_analyzer.analyze, _CHECKS=CHECKS, **kwargs )
null
185,853
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT def _resolve_filenames(filenames): if filenames: resolved = (_files.resolve_filename(f) for f in filenames) else: resolved = _files.iter_filenames() return resolved def fmt_summary(analysis): # XXX Support sorting and grouping. supported = [] unsupported = [] for item in analysis: if item.supported: supported.append(item) else: unsupported.append(item) total = 0 def section(name, groupitems): nonlocal total items, render = c_analyzer.build_section(name, groupitems, relroot=REPO_ROOT) yield from render() total += len(items) yield '' yield '====================' yield 'supported' yield '====================' yield from section('types', supported) yield from section('variables', supported) yield '' yield '====================' yield 'unsupported' yield '====================' yield from section('types', unsupported) yield from section('variables', unsupported) yield '' yield f'grand total: {total}' def cmd_analyze(filenames=None, **kwargs): formats = dict(c_analyzer.FORMATS) formats['summary'] = fmt_summary filenames = _resolve_filenames(filenames) kwargs['get_file_preprocessor'] = _parser.get_preprocessor(log_err=print) c_analyzer.cmd_analyze( filenames, relroot=REPO_ROOT, _analyze=_analyzer.analyze, formats=formats, **kwargs )
null
185,854
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT def _cli_data(parser): filenames = False known = True return c_analyzer._cli_data(parser, filenames, known)
null
185,855
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT def _resolve_filenames(filenames): if filenames: resolved = (_files.resolve_filename(f) for f in filenames) else: resolved = _files.iter_filenames() return resolved def fmt_summary(analysis): # XXX Support sorting and grouping. supported = [] unsupported = [] for item in analysis: if item.supported: supported.append(item) else: unsupported.append(item) total = 0 def section(name, groupitems): nonlocal total items, render = c_analyzer.build_section(name, groupitems, relroot=REPO_ROOT) yield from render() total += len(items) yield '' yield '====================' yield 'supported' yield '====================' yield from section('types', supported) yield from section('variables', supported) yield '' yield '====================' yield 'unsupported' yield '====================' yield from section('types', unsupported) yield from section('variables', unsupported) yield '' yield f'grand total: {total}' class KIND(enum.Enum): # XXX Use these in the raw parser code. TYPEDEF = 'typedef' STRUCT = 'struct' UNION = 'union' ENUM = 'enum' FUNCTION = 'function' VARIABLE = 'variable' STATEMENT = 'statement' def _from_raw(cls, raw): if raw is None: return None elif isinstance(raw, cls): return raw elif type(raw) is str: # We could use cls[raw] for the upper-case form, # but there's no need to go to the trouble. return cls(raw.lower()) else: raise NotImplementedError(raw) def by_priority(cls, group=None): if group is None: return cls._ALL_BY_PRIORITY.copy() elif group == 'type': return cls._TYPE_DECLS_BY_PRIORITY.copy() elif group == 'decl': return cls._ALL_DECLS_BY_PRIORITY.copy() elif isinstance(group, str): raise NotImplementedError(group) else: # XXX Treat group as a set of kinds & return in priority order? raise NotImplementedError(group) def is_type_decl(cls, kind): if kind in cls.TYPES: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def is_decl(cls, kind): if kind in cls.DECLS: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def get_group(cls, kind, *, groups=None): if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') if groups is None: groups = ['type'] elif not groups: groups = () elif isinstance(groups, str): group = groups if group not in cls._GROUPS: raise ValueError(f'unsupported group {group!r}') groups = [group] else: unsupported = [g for g in groups if g not in cls._GROUPS] if unsupported: raise ValueError(f'unsupported groups {", ".join(repr(unsupported))}') for group in groups: if kind in cls._GROUPS[group]: return group else: return kind.value def resolve_group(cls, group): if isinstance(group, cls): return {group} elif isinstance(group, str): try: return cls._GROUPS[group].copy() except KeyError: raise ValueError(f'unsupported group {group!r}') else: resolved = set() for gr in group: resolve.update(cls.resolve_group(gr)) return resolved #return {*cls.resolve_group(g) for g in group} KIND._TYPE_DECLS_BY_PRIORITY = [ # These are in preferred order. KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, ] KIND._ALL_DECLS_BY_PRIORITY = [ # These are in preferred order. *KIND._TYPE_DECLS_BY_PRIORITY, KIND.FUNCTION, KIND.VARIABLE, ] KIND._ALL_BY_PRIORITY = [ # These are in preferred order. *KIND._ALL_DECLS_BY_PRIORITY, KIND.STATEMENT, ] KIND.TYPES = frozenset(KIND._TYPE_DECLS_BY_PRIORITY) KIND.DECLS = frozenset(KIND._ALL_DECLS_BY_PRIORITY) KIND._GROUPS = { 'type': KIND.TYPES, 'decl': KIND.DECLS, } KIND._GROUPS.update((k.value, {k}) for k in KIND) UNKNOWN = _misc.Labeled('UNKNOWN') def cmd_data(datacmd, **kwargs): formats = dict(c_analyzer.FORMATS) formats['summary'] = fmt_summary filenames = (file for file in _resolve_filenames(None) if file not in _parser.EXCLUDED) kwargs['get_file_preprocessor'] = _parser.get_preprocessor(log_err=print) if datacmd == 'show': types = _analyzer.read_known() results = [] for decl, info in types.items(): if info is UNKNOWN: if decl.kind in (KIND.STRUCT, KIND.UNION): extra = {'unsupported': ['type unknown'] * len(decl.members)} else: extra = {'unsupported': ['type unknown']} info = (info, extra) results.append((decl, info)) if decl.shortkey == 'struct _object': tempinfo = info known = _analyzer.Analysis.from_results(results) analyze = None elif datacmd == 'dump': known = _analyzer.KNOWN_FILE def analyze(files, **kwargs): decls = [] for decl in _analyzer.iter_decls(files, **kwargs): if not KIND.is_type_decl(decl.kind): continue if not decl.filename.endswith('.h'): if decl.shortkey not in _analyzer.KNOWN_IN_DOT_C: continue decls.append(decl) results = _c_analyzer.analyze_decls( decls, known={}, analyze_resolved=_analyzer.analyze_resolved, ) return _analyzer.Analysis.from_results(results) else: # check known = _analyzer.read_known() def analyze(files, **kwargs): return _analyzer.iter_decls(files, **kwargs) extracolumns = None c_analyzer.cmd_data( datacmd, filenames, known, _analyze=analyze, formats=formats, extracolumns=extracolumns, relroot=REPO_ROOT, **kwargs )
null
185,856
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT def add_progress_cli(parser, *, threshold=VERBOSITY, **kwargs): parser.add_argument('--progress', dest='track_progress', action='store_const', const=True) parser.add_argument('--no-progress', dest='track_progress', action='store_false') parser.set_defaults(track_progress=True) def process_args(args, *, argv=None): if args.track_progress: ns = vars(args) verbosity = ns.get('verbosity', VERBOSITY) if verbosity <= threshold: args.track_progress = track_progress_compact else: args.track_progress = track_progress_flat return process_args def _cli_capi(parser): parser.add_argument('--levels', action='append', metavar='LEVEL[,...]') parser.add_argument(f'--public', dest='levels', action='append_const', const='public') parser.add_argument(f'--no-public', dest='levels', action='append_const', const='no-public') for level in _capi.LEVELS: parser.add_argument(f'--{level}', dest='levels', action='append_const', const=level) def process_levels(args, *, argv=None): levels = [] for raw in args.levels or (): for level in raw.replace(',', ' ').strip().split(): if level == 'public': levels.append('stable') levels.append('cpython') elif level == 'no-public': levels.append('private') levels.append('internal') elif level in _capi.LEVELS: levels.append(level) else: parser.error(f'expected LEVEL to be one of {sorted(_capi.LEVELS)}, got {level!r}') args.levels = set(levels) parser.add_argument('--kinds', action='append', metavar='KIND[,...]') for kind in _capi.KINDS: parser.add_argument(f'--{kind}', dest='kinds', action='append_const', const=kind) def process_kinds(args, *, argv=None): kinds = [] for raw in args.kinds or (): for kind in raw.replace(',', ' ').strip().split(): if kind in _capi.KINDS: kinds.append(kind) else: parser.error(f'expected KIND to be one of {sorted(_capi.KINDS)}, got {kind!r}') args.kinds = set(kinds) parser.add_argument('--group-by', dest='groupby', choices=['level', 'kind']) parser.add_argument('--format', default='table') parser.add_argument('--summary', dest='format', action='store_const', const='summary') def process_format(args, *, argv=None): orig = args.format args.format = _capi.resolve_format(args.format) if isinstance(args.format, str): if args.format not in _capi._FORMATS: parser.error(f'unsupported format {orig!r}') parser.add_argument('--show-empty', dest='showempty', action='store_true') parser.add_argument('--no-show-empty', dest='showempty', action='store_false') parser.set_defaults(showempty=None) # XXX Add --sort-by, --sort and --no-sort. parser.add_argument('--ignore', dest='ignored', action='append') def process_ignored(args, *, argv=None): ignored = [] for raw in args.ignored or (): ignored.extend(raw.replace(',', ' ').strip().split()) args.ignored = ignored or None parser.add_argument('filenames', nargs='*', metavar='FILENAME') process_progress = add_progress_cli(parser) return [ process_levels, process_kinds, process_format, process_ignored, process_progress, ]
null
185,857
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT logger = logging.getLogger(__name__) VERBOSITY = 3 def cmd_capi(filenames=None, *, levels=None, kinds=None, groupby='kind', format='table', showempty=None, ignored=None, track_progress=None, verbosity=VERBOSITY, **kwargs ): render = _capi.get_renderer(format) filenames = _files.iter_header_files(filenames, levels=levels) #filenames = (file for file, _ in main_for_filenames(filenames)) if track_progress: filenames = track_progress(filenames) items = _capi.iter_capi(filenames) if levels: items = (item for item in items if item.level in levels) if kinds: items = (item for item in items if item.kind in kinds) filter = _capi.resolve_filter(ignored) if filter: items = (item for item in items if filter(item, log=lambda msg: logger.log(1, msg))) lines = render( items, groupby=groupby, showempty=showempty, verbose=verbosity > VERBOSITY, ) print() for line in lines: print(line)
null
185,858
import logging import sys from c_common.fsutil import expand_filenames, iter_files_by_suffix from c_common.scriptutil import ( VERBOSITY, add_verbosity_cli, add_traceback_cli, add_commands_cli, add_kind_filtering_cli, add_files_cli, add_progress_cli, main_for_filenames, process_args_by_key, configure_logger, get_prog, ) from c_parser.info import KIND import c_parser.__main__ as c_parser import c_analyzer.__main__ as c_analyzer import c_analyzer as _c_analyzer from c_analyzer.info import UNKNOWN from . import _analyzer, _capi, _files, _parser, REPO_ROOT COMMANDS = { 'check': ( 'analyze and fail if the CPython source code has any problems', [_cli_check], cmd_check, ), 'analyze': ( 'report on the state of the CPython source code', [(lambda p: c_analyzer._cli_analyze(p, **FILES_KWARGS))], cmd_analyze, ), 'parse': ( 'parse the CPython source files', [_cli_parse], cmd_parse, ), 'data': ( 'check/manage local data (e.g. known types, ignored vars, caches)', [_cli_data], cmd_data, ), 'capi': ( 'inspect the C-API', [_cli_capi], cmd_capi, ), } def get_prog(spec=None, *, absolute=False, allowsuffix=True): if spec is None: _, spec = _find_script() # This is more natural for prog than __file__ would be. filename = sys.argv[0] elif isinstance(spec, str): filename = os.path.normpath(spec) spec = None else: filename = spec.origin if _is_standalone(filename): # Check if "installed". if allowsuffix or not filename.endswith('.py'): basename = os.path.basename(filename) found = shutil.which(basename) if found: script = os.path.abspath(filename) found = os.path.abspath(found) if os.path.normcase(script) == os.path.normcase(found): return basename # It is only "standalone". if absolute: filename = os.path.abspath(filename) return filename elif spec is not None: module = spec.name if module.endswith('.__main__'): module = module[:-9] return f'{sys.executable} -m {module}' else: if absolute: filename = os.path.abspath(filename) return f'{sys.executable} {filename}' def add_verbosity_cli(parser): parser.add_argument('-q', '--quiet', action='count', default=0) parser.add_argument('-v', '--verbose', action='count', default=0) def process_args(args, *, argv=None): ns = vars(args) key = 'verbosity' if key in ns: parser.error(f'duplicate arg {key!r}') ns[key] = max(0, VERBOSITY + ns.pop('verbose') - ns.pop('quiet')) return key return process_args def add_traceback_cli(parser): parser.add_argument('--traceback', '--tb', action='store_true', default=TRACEBACK) parser.add_argument('--no-traceback', '--no-tb', dest='traceback', action='store_const', const=False) def process_args(args, *, argv=None): ns = vars(args) key = 'traceback_cm' if key in ns: parser.error(f'duplicate arg {key!r}') showtb = ns.pop('traceback') def traceback_cm(): restore = loggingutil.hide_emit_errors() try: yield except BrokenPipeError: # It was piped to "head" or something similar. pass except NotImplementedError: raise # re-raise except Exception as exc: if not showtb: sys.exit(f'ERROR: {exc}') raise # re-raise except KeyboardInterrupt: if not showtb: sys.exit('\nINTERRUPTED') raise # re-raise except BaseException as exc: if not showtb: sys.exit(f'{type(exc).__name__}: {exc}') raise # re-raise finally: restore() ns[key] = traceback_cm() return key return process_args def add_commands_cli(parser, commands, *, commonspecs=COMMON_CLI, subset=None): arg_processors = {} if isinstance(subset, str): cmdname = subset try: _, argspecs, _ = commands[cmdname] except KeyError: raise ValueError(f'unsupported subset {subset!r}') parser.set_defaults(cmd=cmdname) arg_processors[cmdname] = _add_cmd_cli(parser, commonspecs, argspecs) else: if subset is None: cmdnames = subset = list(commands) elif not subset: raise NotImplementedError elif isinstance(subset, set): cmdnames = [k for k in commands if k in subset] subset = sorted(subset) else: cmdnames = [n for n in subset if n in commands] if len(cmdnames) < len(subset): bad = tuple(n for n in subset if n not in commands) raise ValueError(f'unsupported subset {bad}') common = argparse.ArgumentParser(add_help=False) common_processors = apply_cli_argspecs(common, commonspecs) subs = parser.add_subparsers(dest='cmd') for cmdname in cmdnames: description, argspecs, _ = commands[cmdname] sub = subs.add_parser( cmdname, description=description, parents=[common], ) cmd_processors = _add_cmd_cli(sub, (), argspecs) arg_processors[cmdname] = common_processors + cmd_processors return arg_processors def process_args_by_key(args, argv, processors, keys): extracted = process_args(args, argv, processors, keys=keys) return [extracted[key] for key in keys] def parse_args(argv=sys.argv[1:], prog=None, *, subset=None): import argparse parser = argparse.ArgumentParser( prog=prog or get_prog(), ) # if subset == 'check' or subset == ['check']: # if checks is not None: # commands = dict(COMMANDS) # commands['check'] = list(commands['check']) # cli = commands['check'][1][0] # commands['check'][1][0] = (lambda p: cli(p, checks=checks)) processors = add_commands_cli( parser, commands=COMMANDS, commonspecs=[ add_verbosity_cli, add_traceback_cli, ], subset=subset, ) args = parser.parse_args(argv) ns = vars(args) cmd = ns.pop('cmd') verbosity, traceback_cm = process_args_by_key( args, argv, processors[cmd], ['verbosity', 'traceback_cm'], ) if cmd != 'parse': # "verbosity" is sent to the commands, so we put it back. args.verbosity = verbosity return cmd, ns, verbosity, traceback_cm
null
185,859
import os.path import re from c_parser.preprocessor import ( get_preprocessor as _get_preprocessor, ) from c_parser import ( parse_file as _parse_file, parse_files as _parse_files, ) from . import REPO_ROOT GLOB_ALL = '**/*' The provided code snippet includes necessary dependencies for implementing the `clean_lines` function. Write a Python function `def clean_lines(text)` to solve the following problem: Clear out comments, blank lines, and leading/trailing whitespace. Here is the function: def clean_lines(text): """Clear out comments, blank lines, and leading/trailing whitespace.""" lines = (line.strip() for line in text.splitlines()) lines = (line.partition('#')[0].rstrip() for line in lines if line and not line.startswith('#')) glob_all = f'{GLOB_ALL} ' lines = (re.sub(r'^[*] ', glob_all, line) for line in lines) lines = (os.path.join(REPO_ROOT, line) for line in lines) return list(lines)
Clear out comments, blank lines, and leading/trailing whitespace.
185,860
import os.path import re from c_parser.preprocessor import ( get_preprocessor as _get_preprocessor, ) from c_parser import ( parse_file as _parse_file, parse_files as _parse_files, ) from . import REPO_ROOT def get_preprocessor(*, file_macros=None, file_incldirs=None, file_same=None, **kwargs ): macros = tuple(MACROS) if file_macros: macros += tuple(file_macros) incldirs = tuple(INCL_DIRS) if file_incldirs: incldirs += tuple(file_incldirs) return _get_preprocessor( file_macros=macros, file_incldirs=incldirs, file_same=file_same, **kwargs ) def get_preprocessor(*, file_macros=None, file_incldirs=None, file_same=None, ignore_exc=False, log_err=None, ): _preprocess = preprocess if file_macros: file_macros = tuple(_parse_macros(file_macros)) if file_incldirs: file_incldirs = tuple(_parse_incldirs(file_incldirs)) if file_same: file_same = tuple(file_same) if not callable(ignore_exc): ignore_exc = (lambda exc, _ig=ignore_exc: _ig) def get_file_preprocessor(filename): filename = filename.strip() if file_macros: macros = list(_resolve_file_values(filename, file_macros)) if file_incldirs: incldirs = [v for v, in _resolve_file_values(filename, file_incldirs)] def preprocess(**kwargs): if file_macros and 'macros' not in kwargs: kwargs['macros'] = macros if file_incldirs and 'incldirs' not in kwargs: kwargs['incldirs'] = [v for v, in _resolve_file_values(filename, file_incldirs)] if file_same and 'file_same' not in kwargs: kwargs['samefiles'] = file_same kwargs.setdefault('filename', filename) with handling_errors(ignore_exc, log_err=log_err): return _preprocess(filename, **kwargs) return preprocess return get_file_preprocessor def parse_file(filename, *, match_kind=None, ignore_exc=None, log_err=None, ): get_file_preprocessor = get_preprocessor( ignore_exc=ignore_exc, log_err=log_err, ) yield from _parse_file( filename, match_kind=match_kind, get_file_preprocessor=get_file_preprocessor, )
null
185,861
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed def fmt_raw(filename, item, *, showfwd=None): yield str(tuple(item))
null
185,862
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed def fmt_full(filename, item, *, showfwd=None): raise NotImplementedError
null
185,863
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed def add_output_cli(parser): parser.add_argument('--format', dest='fmt', default='summary', choices=tuple(FORMATS)) parser.add_argument('--showfwd', action='store_true', default=None) parser.add_argument('--no-showfwd', dest='showfwd', action='store_false', default=None) def process_args(args, *, argv=None): pass return process_args def _cli_parse(parser, excluded=None, **prepr_kwargs): process_output = add_output_cli(parser) process_kinds = add_kind_filtering_cli(parser) process_preprocessor = add_preprocessor_cli(parser, **prepr_kwargs) process_files = add_files_cli(parser, excluded=excluded) return [ process_output, process_kinds, process_preprocessor, process_files, ]
null
185,864
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed def _get_preprocessor(filename, **kwargs): return get_processor(filename, log_err=print, **kwargs ) FORMATS = { 'raw': fmt_raw, 'summary': fmt_summary, 'full': fmt_full, } def cmd_parse(filenames, *, fmt='summary', showfwd=None, iter_filenames=None, relroot=None, **kwargs ): if 'get_file_preprocessor' not in kwargs: kwargs['get_file_preprocessor'] = _get_preprocessor() try: do_fmt = FORMATS[fmt] except KeyError: raise ValueError(f'unsupported fmt {fmt!r}') for filename, relfile in main_for_filenames(filenames, iter_filenames, relroot): for item in _iter_parsed(filename, **kwargs): item = item.fix_filename(relroot, fixroot=False, normalize=False) for line in do_fmt(relfile, item, showfwd=showfwd): print(line)
null
185,865
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed def _cli_data(parser): ... return []
null
185,866
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed def cmd_data(filenames, **kwargs ): # XXX raise NotImplementedError
null
185,867
import logging import os.path import sys from c_common import fsutil from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from .preprocessor import get_preprocessor from .preprocessor.__main__ import ( add_common_cli as add_preprocessor_cli, ) from .info import KIND from . import parse_file as _iter_parsed COMMANDS = { 'parse': ( 'parse the given C source & header files', [_cli_parse], cmd_parse, ), 'data': ( 'check/manage local data (e.g. excludes, macros)', [_cli_data], cmd_data, ), } def parse_args(argv=sys.argv[1:], prog=sys.argv[0], *, subset='parse'): import argparse parser = argparse.ArgumentParser( prog=prog or get_prog, ) processors = add_commands_cli( parser, commands={k: v[1] for k, v in COMMANDS.items()}, commonspecs=[ add_verbosity_cli, add_traceback_cli, ], subset=subset, ) args = parser.parse_args(argv) ns = vars(args) cmd = ns.pop('cmd') verbosity, traceback_cm = process_args_by_key( args, argv, processors[cmd], ['verbosity', 'traceback_cm'], ) return cmd, ns, verbosity, traceback_cm
null
185,868
import os.path import re from . import common as _common TOOL = 'gcc' POST_ARGS = ( '-pthread', '-std=c99', #'-g', #'-Og', #'-Wno-unused-result', #'-Wsign-compare', #'-Wall', #'-Wextra', '-E', ) def _iter_lines(text, filename, samefiles, *, raw=False): def preprocess(filename, incldirs=None, macros=None, samefiles=None): text = _common.preprocess( TOOL, filename, incldirs=incldirs, macros=macros, #preargs=PRE_ARGS, postargs=POST_ARGS, executable=['gcc'], compiler='unix', ) return _iter_lines(text, filename, samefiles)
null
185,869
import sys def _as_tuple(items): if isinstance(items, str): return tuple(items.strip().replace(',', ' ').split()) elif items: return tuple(items) else: return ()
null
185,870
import logging import sys from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_failure_filtering_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from . import ( errors as _errors, get_preprocessor as _get_preprocessor, ) def add_common_cli(parser, *, get_preprocessor=_get_preprocessor): parser.add_argument('--macros', action='append') parser.add_argument('--incldirs', action='append') parser.add_argument('--same', action='append') process_fail_arg = add_failure_filtering_cli(parser, FAIL) def process_args(args, *, argv): ns = vars(args) process_fail_arg(args, argv) ignore_exc = ns.pop('ignore_exc') # We later pass ignore_exc to _get_preprocessor(). args.get_file_preprocessor = get_preprocessor( file_macros=ns.pop('macros'), file_incldirs=ns.pop('incldirs'), file_same=ns.pop('same'), ignore_exc=ignore_exc, log_err=print, ) return process_args def _cli_preprocess(parser, excluded=None, **prepr_kwargs): parser.add_argument('--pure', action='store_true') parser.add_argument('--no-pure', dest='pure', action='store_const', const=False) process_kinds = add_kind_filtering_cli(parser) process_common = add_common_cli(parser, **prepr_kwargs) parser.add_argument('--raw', action='store_true') process_files = add_files_cli(parser, excluded=excluded) return [ process_kinds, process_common, process_files, ]
null
185,871
import logging import sys from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_failure_filtering_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from . import ( errors as _errors, get_preprocessor as _get_preprocessor, ) def _iter_preprocessed(filename, *, get_preprocessor, match_kind=None, pure=False, ): preprocess = get_preprocessor(filename) for line in preprocess(tool=not pure) or (): if match_kind is not None and not match_kind(line.kind): continue yield line def cmd_preprocess(filenames, *, raw=False, iter_filenames=None, **kwargs ): if 'get_file_preprocessor' not in kwargs: kwargs['get_file_preprocessor'] = _get_preprocessor() if raw: def show_file(filename, lines): for line in lines: print(line) #print(line.raw) else: def show_file(filename, lines): for line in lines: linefile = '' if line.filename != filename: linefile = f' ({line.filename})' text = line.data if line.kind == 'comment': text = '/* ' + line.data.splitlines()[0] text += ' */' if '\n' in line.data else r'\n... */' print(f' {line.lno:>4} {line.kind:10} | {text}') filenames = main_for_filenames(filenames, iter_filenames) for filename in filenames: lines = _iter_preprocessed(filename, **kwargs) show_file(filename, lines)
null
185,872
import logging import sys from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_failure_filtering_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from . import ( errors as _errors, get_preprocessor as _get_preprocessor, ) def _cli_data(parser): ... return None
null
185,873
import logging import sys from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_failure_filtering_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from . import ( errors as _errors, get_preprocessor as _get_preprocessor, ) def cmd_data(filenames, **kwargs ): # XXX raise NotImplementedError
null
185,874
import logging import sys from c_common.scriptutil import ( CLIArgSpec as Arg, add_verbosity_cli, add_traceback_cli, add_kind_filtering_cli, add_files_cli, add_failure_filtering_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, main_for_filenames, ) from . import ( errors as _errors, get_preprocessor as _get_preprocessor, ) COMMANDS = { 'preprocess': ( 'preprocess the given C source & header files', [_cli_preprocess], cmd_preprocess, ), 'data': ( 'check/manage local data (e.g. excludes, macros)', [_cli_data], cmd_data, ), } def parse_args(argv=sys.argv[1:], prog=sys.argv[0], *, subset='preprocess', excluded=None, **prepr_kwargs ): import argparse parser = argparse.ArgumentParser( prog=prog or get_prog(), ) processors = add_commands_cli( parser, commands={k: v[1] for k, v in COMMANDS.items()}, commonspecs=[ add_verbosity_cli, add_traceback_cli, ], subset=subset, ) args = parser.parse_args(argv) ns = vars(args) cmd = ns.pop('cmd') verbosity, traceback_cm = process_args_by_key( args, argv, processors[cmd], ['verbosity', 'traceback_cm'], ) return cmd, ns, verbosity, traceback_cm
null
185,875
from ..source import ( opened as _open_source, ) from . import common as _common def preprocess(lines, filename=None): if isinstance(lines, str): with _open_source(lines, filename) as (lines, filename): yield from preprocess(lines, filename) return # XXX actually preprocess... for lno, line in enumerate(lines, 1): kind = 'source' data = line conditions = None yield _common.SourceLine( _common.FileInfo(filename, lno), kind, data, conditions, )
null
185,876
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE def match_storage(decl, expected): default = _info.get_default_storage(decl) #assert default if expected is None: expected = {default} elif isinstance(expected, str): expected = {expected or default} elif not expected: expected = _info.STORAGE else: expected = {v or default for v in expected} storage = _info.get_effective_storage(decl, default=default) return storage in expected
null
185,877
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def is_decl(item): return _KIND.is_decl(item.kind)
null
185,878
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def is_type_decl(item): def is_forward_decl(decl): if decl.kind is _KIND.TYPEDEF: return False elif is_type_decl(decl): return not decl.data elif decl.kind is _KIND.FUNCTION: # XXX This doesn't work with ParsedItem. return decl.signature.isforward elif decl.kind is _KIND.VARIABLE: # No var decls are considered forward (or all are...). return False else: raise NotImplementedError(decl)
null
185,879
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def can_have_symbol(decl): return decl.kind in (_KIND.VARIABLE, _KIND.FUNCTION) def has_external_symbol(decl): if not can_have_symbol(decl): return False if _info.get_effective_storage(decl) != 'extern': return False if decl.kind is _KIND.FUNCTION: return not decl.signature.isforward else: # It must be a variable, which can only be implicitly extern here. return decl.storage != 'extern'
null
185,880
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE def can_have_symbol(decl): return decl.kind in (_KIND.VARIABLE, _KIND.FUNCTION) def has_internal_symbol(decl): if not can_have_symbol(decl): return False return _info.get_actual_storage(decl) == 'static'
null
185,881
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def can_have_symbol(decl): return decl.kind in (_KIND.VARIABLE, _KIND.FUNCTION) def is_external_reference(decl): if not can_have_symbol(decl): return False # We have to check the declared storage rather tnan the effective. if decl.storage != 'extern': return False if decl.kind is _KIND.FUNCTION: return decl.signature.isforward # Otherwise it's a variable. return True
null
185,882
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def is_local_var(decl): if not decl.kind is _KIND.VARIABLE: return False return True if decl.parent else False
null
185,883
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def is_global_var(decl): if not decl.kind is _KIND.VARIABLE: return False return False if decl.parent else True
null
185,884
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def filter_by_kind(items, kind): if kind == 'type': kinds = _KIND._TYPE_DECLS elif kind == 'decl': kinds = _KIND._TYPE_DECLS try: okay = kind in _KIND except TypeError: kinds = set(kind) else: kinds = {kind} if okay else set(kind) for item in items: if item.kind in kinds: yield item
null
185,885
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE def group_by_category(decls, categories, *, ignore_non_match=True): collated = {} for decl in decls: # Matchers should be mutually exclusive. (First match wins.) for category, match in categories.items(): if match(decl): if category not in collated: collated[category] = [decl] else: collated[category].append(decl) break else: if not ignore_non_match: raise Exception(f'no match for {decl!r}') return collated
null
185,886
import re from . import info as _info from .parser._regexes import SIMPLE_TYPE _KIND = _info.KIND def group_by_kind(items): collated = {kind: [] for kind in _KIND} for item in items: try: collated[item.kind].append(item) except KeyError: raise ValueError(f'unsupported kind in {item!r}') return collated
null
185,887
import contextlib import os.path def _looks_like_filename(value): if not isinstance(value, str): return False return value.endswith(('.c', '.h')) def _resolve_filename(filename, alt=None): if os.path.isabs(filename): ... # raise NotImplementedError else: filename = os.path.join('.', filename) if not alt: alt = filename elif os.path.abspath(filename) == os.path.abspath(alt): alt = filename else: raise ValueError(f'mismatch: {filename} != {alt}') return filename, alt def good_file(filename, alt=None): if not _looks_like_filename(filename): raise ValueError(f'expected a filename, got {filename}') filename, _ = _resolve_filename(filename, alt) try: yield filename except Exception: if not os.path.exists(filename): raise FileNotFoundError(f'file not found: {filename}') raise # re-raise
null
185,888
import contextlib import os.path def resolve(source, filename): if _looks_like_filename(source): return _resolve_filename(source, filename) if isinstance(source, str): source = source.splitlines() # At this point "source" is not a str. if not filename: filename = None elif not isinstance(filename, str): raise TypeError(f'filename should be str (or None), got {filename!r}') else: filename, _ = _resolve_filename(filename) return source, filename def opened(source, filename=None): source, filename = resolve(source, filename) if isinstance(source, str): with open(source) as srcfile: yield srcfile, filename else: yield source, filename
null
185,889
import re from ._regexes import ( LOCAL as _LOCAL, LOCAL_STATICS as _LOCAL_STATICS, ) from ._common import ( log_match, parse_var_decl, set_capture_groups, match_paren, ) from ._compound_decl_body import DECL_BODY_PARSERS LOCAL_STATICS_RE = re.compile(rf'^ \s* {LOCAL_STATICS}', re.VERBOSE) def _parse_next_local_static(m, srcinfo, anon_name, func, depth): def parse_function_statics(source, func, anon_name): # For now we do not worry about locals declared in for loop "headers". depth = 1; while depth > 0: for srcinfo in source: m = LOCAL_STATICS_RE.match(srcinfo.text) if m: break else: # We ran out of lines. if srcinfo is not None: srcinfo.done() return for item, depth in _parse_next_local_static(m, srcinfo, anon_name, func, depth): if callable(item): parse_body = item yield from parse_body(source) elif item is not None: yield item
null
185,890
def parse(text, anon_name): context = None data = None for m in DELIMITER_RE.find_iter(text): before, opened, closed = m.groups() delim = opened or closed handle_segment = HANDLERS[context][delim] result, context, data = handle_segment(before, delim, data) if result: yield result def _parse(srclines, anon_name): text = ' '.join(l for _, l in srclines) from ._delim import parse yield from parse(text, anon_name)
null
185,891
import re from ._regexes import ( STRUCT_MEMBER_DECL as _STRUCT_MEMBER_DECL, ENUM_MEMBER_DECL as _ENUM_MEMBER_DECL, ) from ._common import ( log_match, parse_var_decl, set_capture_groups, ) STRUCT_MEMBER_RE = re.compile(rf'^ \s* {STRUCT_MEMBER_DECL}', re.VERBOSE) def _parse_struct_next(m, srcinfo, anon_name, parent): (inline_kind, inline_name, qualspec, declarator, size, ending, close, ) = m.groups() remainder = srcinfo.text[m.end():] if close: log_match('compound close', m) srcinfo.advance(remainder) elif inline_kind: log_match('compound inline', m) kind = inline_kind name = inline_name or anon_name('inline-') # Immediately emit a forward declaration. yield srcinfo.resolve(kind, name=name, data=None) # un-inline the decl. Note that it might not actually be inline. # We handle the case in the "maybe_inline_actual" branch. srcinfo.nest( remainder, f'{kind} {name}', ) def parse_body(source): _parse_body = DECL_BODY_PARSERS[kind] data = [] # members ident = f'{kind} {name}' for item in _parse_body(source, anon_name, ident): if item.kind == 'field': data.append(item) else: yield item # XXX Should "parent" really be None for inline type decls? yield srcinfo.resolve(kind, data, name, parent=None) srcinfo.resume() yield parse_body else: # not inline (member) log_match('compound member', m) if qualspec: _, name, data = parse_var_decl(f'{qualspec} {declarator}') if not name: name = anon_name('struct-field-') if size: # data = (data, size) data['size'] = int(size) else: # This shouldn't happen (we expect each field to have a name). raise NotImplementedError name = sized_name or anon_name('struct-field-') data = int(size) yield srcinfo.resolve('field', data, name, parent) # XXX Restart? if ending == ',': remainder = rf'{qualspec} {remainder}' srcinfo.advance(remainder) def parse_struct_body(source, anon_name, parent): done = False while not done: done = True for srcinfo in source: m = STRUCT_MEMBER_RE.match(srcinfo.text) if m: break else: # We ran out of lines. if srcinfo is not None: srcinfo.done() return for item in _parse_struct_next(m, srcinfo, anon_name, parent): if callable(item): parse_body = item yield from parse_body(source) else: yield item done = False
null
185,892
import re from ._regexes import ( STRUCT_MEMBER_DECL as _STRUCT_MEMBER_DECL, ENUM_MEMBER_DECL as _ENUM_MEMBER_DECL, ) from ._common import ( log_match, parse_var_decl, set_capture_groups, ) ENUM_MEMBER_RE = re.compile(rf'{ENUM_MEMBER_DECL}', re.VERBOSE) def parse_enum_body(source, _anon_name, _parent): ending = None while ending != '}': for srcinfo in source: m = ENUM_MEMBER_RE.match(srcinfo.text) if m: break else: # We ran out of lines. if srcinfo is not None: srcinfo.done() return remainder = srcinfo.text[m.end():] (close, name, init, ending, ) = m.groups() if close: ending = '}' else: data = init yield srcinfo.resolve('field', data, name, _parent) srcinfo.advance(remainder)
null
185,893
import textwrap def _ind(text, level=1, edges='both'): indent = ' ' * level text = textwrap.indent(text, indent) if edges == 'pre' or edges == 'both': text = '\n' + indent + text.lstrip() if edges == 'post' or edges == 'both': text = text.rstrip() + '\n' + ' ' * (level - 1) return text
null
185,894
import re from ._regexes import ( GLOBAL as _GLOBAL, ) from ._common import ( log_match, parse_var_decl, set_capture_groups, ) from ._compound_decl_body import DECL_BODY_PARSERS from ._func_body import parse_function_statics as parse_function_body GLOBAL_RE = re.compile(rf'^ \s* {GLOBAL}', re.VERBOSE) def _parse_next(m, srcinfo, anon_name): ( empty, # compound type decl (maybe inline) compound_leading, compound_kind, compound_name, forward_kind, forward_name, maybe_inline_actual, # typedef typedef_decl, typedef_func_params, # vars and funcs storage, func_inline, decl, func_params, func_delim, func_legacy_params, var_init, var_ending, ) = m.groups() remainder = srcinfo.text[m.end():] if empty: log_match('global empty', m) srcinfo.advance(remainder) elif maybe_inline_actual: log_match('maybe_inline_actual', m) # Ignore forward declarations. # XXX Maybe return them too (with an "isforward" flag)? if not maybe_inline_actual.strip().endswith(';'): remainder = maybe_inline_actual + remainder yield srcinfo.resolve(forward_kind, None, forward_name) if maybe_inline_actual.strip().endswith('='): # We use a dummy prefix for a fake typedef. # XXX Ideally this case would not be caught by MAYBE_INLINE_ACTUAL. _, name, data = parse_var_decl(f'{forward_kind} {forward_name} fake_typedef_{forward_name}') yield srcinfo.resolve('typedef', data, name, parent=None) remainder = f'{name} {remainder}' srcinfo.advance(remainder) elif compound_kind: kind = compound_kind name = compound_name or anon_name('inline-') # Immediately emit a forward declaration. yield srcinfo.resolve(kind, name=name, data=None) # un-inline the decl. Note that it might not actually be inline. # We handle the case in the "maybe_inline_actual" branch. srcinfo.nest( remainder, f'{compound_leading or ""} {compound_kind} {name}', ) def parse_body(source): _parse_body = DECL_BODY_PARSERS[compound_kind] data = [] # members ident = f'{kind} {name}' for item in _parse_body(source, anon_name, ident): if item.kind == 'field': data.append(item) else: yield item # XXX Should "parent" really be None for inline type decls? yield srcinfo.resolve(kind, data, name, parent=None) srcinfo.resume() yield parse_body elif typedef_decl: log_match('typedef', m) kind = 'typedef' _, name, data = parse_var_decl(typedef_decl) if typedef_func_params: return_type = data # This matches the data for func declarations. data = { 'storage': None, 'inline': None, 'params': f'({typedef_func_params})', 'returntype': return_type, 'isforward': True, } yield srcinfo.resolve(kind, data, name, parent=None) srcinfo.advance(remainder) elif func_delim or func_legacy_params: log_match('function', m) kind = 'function' _, name, return_type = parse_var_decl(decl) func_params = func_params or func_legacy_params data = { 'storage': storage, 'inline': func_inline, 'params': f'({func_params})', 'returntype': return_type, 'isforward': func_delim == ';', } yield srcinfo.resolve(kind, data, name, parent=None) srcinfo.advance(remainder) if func_delim == '{' or func_legacy_params: def parse_body(source): yield from parse_function_body(source, name, anon_name) yield parse_body elif var_ending: log_match('global variable', m) kind = 'variable' _, name, vartype = parse_var_decl(decl) data = { 'storage': storage, 'vartype': vartype, } yield srcinfo.resolve(kind, data, name, parent=None) if var_ending == ',': # It was a multi-declaration, so queue up the next one. _, qual, typespec, _ = vartype.values() remainder = f'{storage or ""} {qual or ""} {typespec} {remainder}' srcinfo.advance(remainder) if var_init: _data = f'{name} = {var_init.strip()}' yield srcinfo.resolve('statement', _data, name=None) else: # This should be unreachable. raise NotImplementedError def parse_globals(source, anon_name): for srcinfo in source: m = GLOBAL_RE.match(srcinfo.text) if not m: # We need more text. continue for item in _parse_next(m, srcinfo, anon_name): if callable(item): parse_body = item yield from parse_body(source) else: yield item else: # We ran out of lines. if srcinfo is not None: srcinfo.done() return
null
185,895
import re from ._regexes import ( _ind, STRING_LITERAL, VAR_DECL as _VAR_DECL, ) def set_capture_group(pattern, group, *, strict=True): old = f'(?: # <{group}>' if strict and f'(?: # <{group}>' not in pattern: raise ValueError(f'{old!r} not found in pattern') return pattern.replace(old, f'( # <{group}>', 1) def set_capture_groups(pattern, groups, *, strict=True): for group in groups: pattern = set_capture_group(pattern, group, strict=strict) return pattern
null
185,896
import re from ._regexes import ( _ind, STRING_LITERAL, VAR_DECL as _VAR_DECL, ) def iter_results(results): if not results: return if callable(results): results = results() for result, text in results(): if result: yield result, text
null
185,897
from collections import namedtuple import enum import os.path import re from c_common import fsutil from c_common.clsutil import classonly import c_common.misc as _misc import c_common.strutil as _strutil import c_common.tables as _tables from .parser._regexes import SIMPLE_TYPE, _STORAGE class KIND(enum.Enum): # XXX Use these in the raw parser code. TYPEDEF = 'typedef' STRUCT = 'struct' UNION = 'union' ENUM = 'enum' FUNCTION = 'function' VARIABLE = 'variable' STATEMENT = 'statement' def _from_raw(cls, raw): if raw is None: return None elif isinstance(raw, cls): return raw elif type(raw) is str: # We could use cls[raw] for the upper-case form, # but there's no need to go to the trouble. return cls(raw.lower()) else: raise NotImplementedError(raw) def by_priority(cls, group=None): if group is None: return cls._ALL_BY_PRIORITY.copy() elif group == 'type': return cls._TYPE_DECLS_BY_PRIORITY.copy() elif group == 'decl': return cls._ALL_DECLS_BY_PRIORITY.copy() elif isinstance(group, str): raise NotImplementedError(group) else: # XXX Treat group as a set of kinds & return in priority order? raise NotImplementedError(group) def is_type_decl(cls, kind): if kind in cls.TYPES: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def is_decl(cls, kind): if kind in cls.DECLS: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def get_group(cls, kind, *, groups=None): if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') if groups is None: groups = ['type'] elif not groups: groups = () elif isinstance(groups, str): group = groups if group not in cls._GROUPS: raise ValueError(f'unsupported group {group!r}') groups = [group] else: unsupported = [g for g in groups if g not in cls._GROUPS] if unsupported: raise ValueError(f'unsupported groups {", ".join(repr(unsupported))}') for group in groups: if kind in cls._GROUPS[group]: return group else: return kind.value def resolve_group(cls, group): if isinstance(group, cls): return {group} elif isinstance(group, str): try: return cls._GROUPS[group].copy() except KeyError: raise ValueError(f'unsupported group {group!r}') else: resolved = set() for gr in group: resolve.update(cls.resolve_group(gr)) return resolved #return {*cls.resolve_group(g) for g in group} KIND._TYPE_DECLS_BY_PRIORITY = [ # These are in preferred order. KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, ] KIND._ALL_DECLS_BY_PRIORITY = [ # These are in preferred order. *KIND._TYPE_DECLS_BY_PRIORITY, KIND.FUNCTION, KIND.VARIABLE, ] KIND._ALL_BY_PRIORITY = [ # These are in preferred order. *KIND._ALL_DECLS_BY_PRIORITY, KIND.STATEMENT, ] KIND.TYPES = frozenset(KIND._TYPE_DECLS_BY_PRIORITY) KIND.DECLS = frozenset(KIND._ALL_DECLS_BY_PRIORITY) KIND._GROUPS = { 'type': KIND.TYPES, 'decl': KIND.DECLS, } KIND._GROUPS.update((k.value, {k}) for k in KIND) def get_kind_group(item): return KIND.get_group(item.kind)
null
185,898
from collections import namedtuple import enum import os.path import re from c_common import fsutil from c_common.clsutil import classonly import c_common.misc as _misc import c_common.strutil as _strutil import c_common.tables as _tables from .parser._regexes import SIMPLE_TYPE, _STORAGE def _fix_filename(filename, relroot, *, formatted=True, **kwargs): if formatted: fix = fsutil.format_filename else: fix = fsutil.fix_filename return fix(filename, relroot=relroot, **kwargs)
null
185,899
from collections import namedtuple import enum import os.path import re from c_common import fsutil from c_common.clsutil import classonly import c_common.misc as _misc import c_common.strutil as _strutil import c_common.tables as _tables from .parser._regexes import SIMPLE_TYPE, _STORAGE class KIND(enum.Enum): # XXX Use these in the raw parser code. TYPEDEF = 'typedef' STRUCT = 'struct' UNION = 'union' ENUM = 'enum' FUNCTION = 'function' VARIABLE = 'variable' STATEMENT = 'statement' def _from_raw(cls, raw): if raw is None: return None elif isinstance(raw, cls): return raw elif type(raw) is str: # We could use cls[raw] for the upper-case form, # but there's no need to go to the trouble. return cls(raw.lower()) else: raise NotImplementedError(raw) def by_priority(cls, group=None): if group is None: return cls._ALL_BY_PRIORITY.copy() elif group == 'type': return cls._TYPE_DECLS_BY_PRIORITY.copy() elif group == 'decl': return cls._ALL_DECLS_BY_PRIORITY.copy() elif isinstance(group, str): raise NotImplementedError(group) else: # XXX Treat group as a set of kinds & return in priority order? raise NotImplementedError(group) def is_type_decl(cls, kind): if kind in cls.TYPES: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def is_decl(cls, kind): if kind in cls.DECLS: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def get_group(cls, kind, *, groups=None): if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') if groups is None: groups = ['type'] elif not groups: groups = () elif isinstance(groups, str): group = groups if group not in cls._GROUPS: raise ValueError(f'unsupported group {group!r}') groups = [group] else: unsupported = [g for g in groups if g not in cls._GROUPS] if unsupported: raise ValueError(f'unsupported groups {", ".join(repr(unsupported))}') for group in groups: if kind in cls._GROUPS[group]: return group else: return kind.value def resolve_group(cls, group): if isinstance(group, cls): return {group} elif isinstance(group, str): try: return cls._GROUPS[group].copy() except KeyError: raise ValueError(f'unsupported group {group!r}') else: resolved = set() for gr in group: resolve.update(cls.resolve_group(gr)) return resolved #return {*cls.resolve_group(g) for g in group} KIND._TYPE_DECLS_BY_PRIORITY = [ # These are in preferred order. KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, ] KIND._ALL_DECLS_BY_PRIORITY = [ # These are in preferred order. *KIND._TYPE_DECLS_BY_PRIORITY, KIND.FUNCTION, KIND.VARIABLE, ] KIND._ALL_BY_PRIORITY = [ # These are in preferred order. *KIND._ALL_DECLS_BY_PRIORITY, KIND.STATEMENT, ] KIND.TYPES = frozenset(KIND._TYPE_DECLS_BY_PRIORITY) KIND.DECLS = frozenset(KIND._ALL_DECLS_BY_PRIORITY) KIND._GROUPS = { 'type': KIND.TYPES, 'decl': KIND.DECLS, } KIND._GROUPS.update((k.value, {k}) for k in KIND) def _fmt_line(parsed, data=None): parts = [ f'<{parsed.kind.value}>', ] parent = '' if parsed.parent: parent = parsed.parent if not isinstance(parent, str): if parent.kind is KIND.FUNCTION: parent = f'{parent.name}()' else: parent = parent.name name = f'<{parent}>.{parsed.name}' else: name = parsed.name if data is None: data = parsed.data elif data is iter(data): data, = data parts.extend([ name, f'<{data}>' if data else '-', f'({str(parsed.file or "<unknown file>")})', ]) yield '\t'.join(parts)
null
185,900
from collections import namedtuple import enum import os.path import re from c_common import fsutil from c_common.clsutil import classonly import c_common.misc as _misc import c_common.strutil as _strutil import c_common.tables as _tables from .parser._regexes import SIMPLE_TYPE, _STORAGE class KIND(enum.Enum): def _from_raw(cls, raw): def by_priority(cls, group=None): def is_type_decl(cls, kind): def is_decl(cls, kind): def get_group(cls, kind, *, groups=None): def resolve_group(cls, group): KIND._TYPE_DECLS_BY_PRIORITY = [ # These are in preferred order. KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, ] KIND._ALL_DECLS_BY_PRIORITY = [ # These are in preferred order. *KIND._TYPE_DECLS_BY_PRIORITY, KIND.FUNCTION, KIND.VARIABLE, ] KIND._ALL_BY_PRIORITY = [ # These are in preferred order. *KIND._ALL_DECLS_BY_PRIORITY, KIND.STATEMENT, ] KIND.TYPES = frozenset(KIND._TYPE_DECLS_BY_PRIORITY) KIND.DECLS = frozenset(KIND._ALL_DECLS_BY_PRIORITY) KIND._GROUPS = { 'type': KIND.TYPES, 'decl': KIND.DECLS, } KIND._GROUPS.update((k.value, {k}) for k in KIND) def _fmt_full(parsed, data=None): if parsed.kind is KIND.VARIABLE and parsed.parent: prefix = 'local ' suffix = f' ({parsed.parent.name})' else: # XXX Show other prefixes (e.g. global, public) prefix = suffix = '' yield f'{prefix}{parsed.kind.value} {parsed.name!r}{suffix}' for column, info in parsed.render_rowdata().items(): if column == 'kind': continue if column == 'name': continue if column == 'parent' and parsed.kind is not KIND.VARIABLE: continue if column == 'data': if parsed.kind in (KIND.STRUCT, KIND.UNION): column = 'members' elif parsed.kind is KIND.ENUM: column = 'enumerators' elif parsed.kind is KIND.STATEMENT: column = 'text' data, = data else: column = 'signature' data, = data if not data: # yield f'\t{column}:\t-' continue elif isinstance(data, str): yield f'\t{column}:\t{data!r}' else: yield f'\t{column}:' for line in data: yield f'\t\t- {line}' else: yield f'\t{column}:\t{info}'
null
185,901
from collections import namedtuple import enum import os.path import re from c_common import fsutil from c_common.clsutil import classonly import c_common.misc as _misc import c_common.strutil as _strutil import c_common.tables as _tables from .parser._regexes import SIMPLE_TYPE, _STORAGE class HighlevelParsedItem: def from_parsed(cls, parsed): def _resolve_file(cls, parsed): def _resolve_data(cls, data): def _raw_data(cls, data, extra): def _data_as_row(cls, data, extra, colnames): def _render_data_row_item(cls, colname, data, extra): def _render_data_row(cls, fmt, data, extra, colnames): def _render_data(cls, fmt, data, extra): def _resolve_parent(cls, parsed, *, _kind=None): def _parse_columns(cls, columns): def __init__(self, file, name, data, parent=None, *, _extra=None, _shortkey=None, _key=None, ): def __repr__(self): def __str__(self): def __getattr__(self, name): def __hash__(self): def __eq__(self, other): def __gt__(self, other): def id(self): def shortkey(self): def key(self): def filename(self): def parsed(self): def fix_filename(self, relroot=fsutil.USE_CWD, **kwargs): def as_rowdata(self, columns=None): def render_rowdata(self, columns=None): def data_as_row(data, ext, cols): def _as_row(self, colnames, datacolumns, data_as_row): def render(self, fmt='line'): KIND_CLASSES = {cls.kind: cls for cls in [ Variable, Function, TypeDef, Struct, Union, Enum, Statement, ]} def resolve_parsed(parsed): if isinstance(parsed, HighlevelParsedItem): return parsed try: cls = KIND_CLASSES[parsed.kind] except KeyError: raise ValueError(f'unsupported kind in {parsed!r}') return cls.from_parsed(parsed)
null
185,902
def parse(srclines): if isinstance(srclines, str): # a filename raise NotImplementedError
null
185,903
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info def _get_columns(group, extra=None): return BASE_COLUMNS + list(extra or ()) + [END_COLUMNS[group]] #return [ # *BASE_COLUMNS, # *extra or (), # END_COLUMNS[group], #] def read_parsed(infile): # XXX Support other formats than TSV? columns = _get_columns('parsed') for row in _tables.read_table(infile, columns, sep='\t', fix='-'): yield _info.ParsedItem.from_row(row, columns)
null
185,904
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info def _get_columns(group, extra=None): return BASE_COLUMNS + list(extra or ()) + [END_COLUMNS[group]] #return [ # *BASE_COLUMNS, # *extra or (), # END_COLUMNS[group], #] def write_parsed(items, outfile): # XXX Support other formats than TSV? columns = _get_columns('parsed') rows = (item.as_row(columns) for item in items) _tables.write_table(outfile, columns, rows, sep='\t', fix='-')
null
185,905
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info def _get_format(file, default='tsv'): def _get_format_handlers(group, fmt): def read_decls(infile, fmt=None): if fmt is None: fmt = _get_format(infile) read_all, _ = _get_format_handlers('decls', fmt) for decl, _ in read_all(infile): yield decl
null
185,906
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info def _get_format(file, default='tsv'): if isinstance(file, str): filename = file else: filename = getattr(file, 'name', '') _, ext = os.path.splitext(filename) return ext[1:] if ext else default def _get_format_handlers(group, fmt): # XXX Use a registry. if group != 'decls': raise NotImplementedError(group) if fmt == 'tsv': return (_iter_decls_tsv, _write_decls_tsv) else: raise NotImplementedError(fmt) def write_decls(decls, outfile, fmt=None, *, backup=False): if fmt is None: fmt = _get_format(infile) _, write_all = _get_format_handlers('decls', fmt) write_all(decls, outfile, backup=backup)
null
185,907
from cpython.__main__ import main, configure_logger (cmd, cmd_kwargs, verbosity, traceback_cm) = parse_args() with traceback_cm: main(cmd, cmd_kwargs) def add_verbosity_cli(parser): parser.add_argument('-q', '--quiet', action='count', default=0) parser.add_argument('-v', '--verbose', action='count', default=0) def process_args(args, *, argv=None): ns = vars(args) key = 'verbosity' if key in ns: parser.error(f'duplicate arg {key!r}') ns[key] = max(0, VERBOSITY + ns.pop('verbose') - ns.pop('quiet')) return key return process_args def add_traceback_cli(parser): parser.add_argument('--traceback', '--tb', action='store_true', default=TRACEBACK) parser.add_argument('--no-traceback', '--no-tb', dest='traceback', action='store_const', const=False) def process_args(args, *, argv=None): ns = vars(args) key = 'traceback_cm' if key in ns: parser.error(f'duplicate arg {key!r}') showtb = ns.pop('traceback') def traceback_cm(): restore = loggingutil.hide_emit_errors() try: yield except BrokenPipeError: # It was piped to "head" or something similar. pass except NotImplementedError: raise # re-raise except Exception as exc: if not showtb: sys.exit(f'ERROR: {exc}') raise # re-raise except KeyboardInterrupt: if not showtb: sys.exit('\nINTERRUPTED') raise # re-raise except BaseException as exc: if not showtb: sys.exit(f'{type(exc).__name__}: {exc}') raise # re-raise finally: restore() ns[key] = traceback_cm() return key return process_args def process_args_by_key(args, argv, processors, keys): extracted = process_args(args, argv, processors, keys=keys) return [extracted[key] for key in keys] def _cli_check(parser, **kwargs): return c_analyzer._cli_check(parser, CHECKS, **kwargs, **FILES_KWARGS) def parse_args(): import argparse from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, process_args_by_key, ) from cpython.__main__ import _cli_check parser = argparse.ArgumentParser() processors = [ add_verbosity_cli(parser), add_traceback_cli(parser), _cli_check(parser, checks='<globals>'), ] args = parser.parse_args() ns = vars(args) cmd = 'check' verbosity, traceback_cm = process_args_by_key( args, argv, processors, ['verbosity', 'traceback_cm'], ) return cmd, ns, verbosity, traceback_cm
null
185,908
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) def fmt_raw(analysis): for item in analysis: yield from item.render('raw')
null
185,909
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) KINDS = [ KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, KIND.FUNCTION, KIND.VARIABLE, KIND.STATEMENT, ] class KIND(enum.Enum): # XXX Use these in the raw parser code. TYPEDEF = 'typedef' STRUCT = 'struct' UNION = 'union' ENUM = 'enum' FUNCTION = 'function' VARIABLE = 'variable' STATEMENT = 'statement' def _from_raw(cls, raw): if raw is None: return None elif isinstance(raw, cls): return raw elif type(raw) is str: # We could use cls[raw] for the upper-case form, # but there's no need to go to the trouble. return cls(raw.lower()) else: raise NotImplementedError(raw) def by_priority(cls, group=None): if group is None: return cls._ALL_BY_PRIORITY.copy() elif group == 'type': return cls._TYPE_DECLS_BY_PRIORITY.copy() elif group == 'decl': return cls._ALL_DECLS_BY_PRIORITY.copy() elif isinstance(group, str): raise NotImplementedError(group) else: # XXX Treat group as a set of kinds & return in priority order? raise NotImplementedError(group) def is_type_decl(cls, kind): if kind in cls.TYPES: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def is_decl(cls, kind): if kind in cls.DECLS: return True if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') return False def get_group(cls, kind, *, groups=None): if not isinstance(kind, cls): raise TypeError(f'expected KIND, got {kind!r}') if groups is None: groups = ['type'] elif not groups: groups = () elif isinstance(groups, str): group = groups if group not in cls._GROUPS: raise ValueError(f'unsupported group {group!r}') groups = [group] else: unsupported = [g for g in groups if g not in cls._GROUPS] if unsupported: raise ValueError(f'unsupported groups {", ".join(repr(unsupported))}') for group in groups: if kind in cls._GROUPS[group]: return group else: return kind.value def resolve_group(cls, group): if isinstance(group, cls): return {group} elif isinstance(group, str): try: return cls._GROUPS[group].copy() except KeyError: raise ValueError(f'unsupported group {group!r}') else: resolved = set() for gr in group: resolve.update(cls.resolve_group(gr)) return resolved #return {*cls.resolve_group(g) for g in group} KIND._TYPE_DECLS_BY_PRIORITY = [ # These are in preferred order. KIND.TYPEDEF, KIND.STRUCT, KIND.UNION, KIND.ENUM, ] KIND._ALL_DECLS_BY_PRIORITY = [ # These are in preferred order. *KIND._TYPE_DECLS_BY_PRIORITY, KIND.FUNCTION, KIND.VARIABLE, ] KIND._ALL_BY_PRIORITY = [ # These are in preferred order. *KIND._ALL_DECLS_BY_PRIORITY, KIND.STATEMENT, ] KIND.TYPES = frozenset(KIND._TYPE_DECLS_BY_PRIORITY) KIND.DECLS = frozenset(KIND._ALL_DECLS_BY_PRIORITY) KIND._GROUPS = { 'type': KIND.TYPES, 'decl': KIND.DECLS, } KIND._GROUPS.update((k.value, {k}) for k in KIND) def fmt_brief(analysis): # XXX Support sorting. items = sorted(analysis) for kind in KINDS: if kind is KIND.STATEMENT: continue for item in items: if item.kind is not kind: continue yield from item.render('brief') yield f' total: {len(items)}'
null
185,910
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) def fmt_full(analysis): # XXX Support sorting. items = sorted(analysis, key=lambda v: v.key) yield '' for item in items: yield from item.render('full') yield '' yield f'total: {len(items)}'
null
185,911
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) def add_checks_cli(parser, checks=None, *, add_flags=None): default = False if not checks: checks = list(CHECKS) default = True elif isinstance(checks, str): checks = [checks] if (add_flags is None and len(checks) > 1) or default: add_flags = True process_checks = add_sepval_cli(parser, '--check', 'checks', checks) if add_flags: for check in checks: parser.add_argument(f'--{check}', dest='checks', action='append_const', const=check) return [ process_checks, ] def add_output_cli(parser, *, default='summary'): parser.add_argument('--format', dest='fmt', default=default, choices=tuple(FORMATS)) def process_args(args, *, argv=None): pass return process_args def add_files_cli(parser, *, excluded=None, nargs=None): process_files = add_file_filtering_cli(parser, excluded=excluded) parser.add_argument('filenames', nargs=nargs or '+', metavar='FILENAME') return [ process_files, ] def add_progress_cli(parser, *, threshold=VERBOSITY, **kwargs): parser.add_argument('--progress', dest='track_progress', action='store_const', const=True) parser.add_argument('--no-progress', dest='track_progress', action='store_false') parser.set_defaults(track_progress=True) def process_args(args, *, argv=None): if args.track_progress: ns = vars(args) verbosity = ns.get('verbosity', VERBOSITY) if verbosity <= threshold: args.track_progress = track_progress_compact else: args.track_progress = track_progress_flat return process_args def _cli_check(parser, checks=None, **kwargs): if isinstance(checks, str): checks = [checks] if checks is False: process_checks = None elif checks is None: process_checks = add_checks_cli(parser) elif len(checks) == 1 and type(checks) is not dict and re.match(r'^<.*>$', checks[0]): check = checks[0][1:-1] def process_checks(args, *, argv=None): args.checks = [check] else: process_checks = add_checks_cli(parser, checks=checks) process_progress = add_progress_cli(parser) process_output = add_output_cli(parser, default=None) process_files = add_files_cli(parser, **kwargs) return [ process_checks, process_progress, process_output, process_files, ]
null
185,912
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) logger = logging.getLogger(__name__) CHECKS = { #'globals': _check_globals, } def _get_check_handlers(fmt, printer, verbosity=VERBOSITY): div = None def handle_after(): pass if not fmt: div = '' def handle_failure(failure, data): data = repr(data) if verbosity >= 3: logger.info(f'failure: {failure}') logger.info(f'data: {data}') else: logger.warn(f'failure: {failure} (data: {data})') elif fmt == 'raw': def handle_failure(failure, data): print(f'{failure!r} {data!r}') elif fmt == 'brief': def handle_failure(failure, data): parent = data.parent or '' funcname = parent if isinstance(parent, str) else parent.name name = f'({funcname}).{data.name}' if funcname else data.name failure = failure.split('\t')[0] print(f'{data.filename}:{name} - {failure}') elif fmt == 'summary': def handle_failure(failure, data): print(_fmt_one_summary(data, failure)) elif fmt == 'full': div = '' def handle_failure(failure, data): name = data.shortkey if data.kind is KIND.VARIABLE else data.name parent = data.parent or '' funcname = parent if isinstance(parent, str) else parent.name known = 'yes' if data.is_known else '*** NO ***' print(f'{data.kind.value} {name!r} failed ({failure})') print(f' file: {data.filename}') print(f' func: {funcname or "-"}') print(f' name: {data.name}') print(f' data: ...') print(f' type unknown: {known}') else: if fmt in FORMATS: raise NotImplementedError(fmt) raise ValueError(f'unsupported fmt {fmt!r}') return handle_failure, handle_after, div def _fmt_one_summary(item, extra=None): parent = item.parent or '' funcname = parent if isinstance(parent, str) else parent.name if extra: return f'{item.filename:35}\t{funcname or "-":35}\t{item.name:40}\t{extra}' else: return f'{item.filename:35}\t{funcname or "-":35}\t{item.name}' VERBOSITY = 3 class Printer: def __init__(self, verbosity=VERBOSITY): self.verbosity = verbosity def info(self, *args, **kwargs): if self.verbosity < 3: return print(*args, **kwargs) def filter_filenames(filenames, process_filenames=None, relroot=fsutil.USE_CWD): # We expect each filename to be a normalized, absolute path. for filename, _, check, _ in _iter_filenames(filenames, process_filenames, relroot): if (reason := check()): logger.debug(f'{filename}: {reason}') continue yield filename def filter_forward(items, *, markpublic=False): if markpublic: public = set() actual = [] for item in items: if is_public_api(item): public.add(item.id) elif not _match.is_forward_decl(item): actual.append(item) else: # non-public duplicate! # XXX raise Exception(item) for item in actual: _info.set_flag(item, 'is_public', item.id in public) yield item else: for item in items: if _match.is_forward_decl(item): continue yield item def group_by_storage(decls, **kwargs): def is_module_global(decl): if not is_module_global_decl(decl): return False if decl.kind == _KIND.VARIABLE: if _info.get_effective_storage(decl) == 'static': # This is covered by is_static_module_global(). return False return True def is_static_module_global(decl): if not _match.is_global_var(decl): return False return _info.get_effective_storage(decl) == 'static' def is_static_local(decl): if not _match.is_local_var(decl): return False return _info.get_effective_storage(decl) == 'static' #def is_local(decl): # if not _match.is_local_var(decl): # return False # return _info.get_effective_storage(decl) != 'static' categories = { #'extern': is_extern, 'published': is_public_impl, 'module-global': is_module_global, 'static-module-global': is_static_module_global, 'static-local': is_static_local, } return _match.group_by_category(decls, categories, **kwargs) def cmd_check(filenames, *, checks=None, ignored=None, fmt=None, failfast=False, iter_filenames=None, relroot=fsutil.USE_CWD, track_progress=None, verbosity=VERBOSITY, _analyze=_analyze, _CHECKS=CHECKS, **kwargs ): if not checks: checks = _CHECKS elif isinstance(checks, str): checks = [checks] checks = [_CHECKS[c] if isinstance(c, str) else c for c in checks] printer = Printer(verbosity) (handle_failure, handle_after, div ) = _get_check_handlers(fmt, printer, verbosity) filenames, relroot = fsutil.fix_filenames(filenames, relroot=relroot) filenames = filter_filenames(filenames, iter_filenames, relroot) if track_progress: filenames = track_progress(filenames) logger.info('analyzing files...') analyzed = _analyze(filenames, **kwargs) analyzed.fix_filenames(relroot, normalize=False) decls = filter_forward(analyzed, markpublic=True) logger.info('checking analysis results...') failed = [] for data, failure in _check_all(decls, checks, failfast=failfast): if data is None: printer.info('stopping after one failure') break if div is not None and len(failed) > 0: printer.info(div) failed.append(data) handle_failure(failure, data) handle_after() printer.info('-------------------------') logger.info(f'total failures: {len(failed)}') logger.info('done checking') if fmt == 'summary': print('Categorized by storage:') print() from .match import group_by_storage grouped = group_by_storage(failed, ignore_non_match=False) for group, decls in grouped.items(): print() print(group) for decl in decls: print(' ', _fmt_one_summary(decl)) print(f'subtotal: {len(decls)}') if len(failed) > 0: sys.exit(len(failed))
null
185,913
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) def add_output_cli(parser, *, default='summary'): parser.add_argument('--format', dest='fmt', default=default, choices=tuple(FORMATS)) def process_args(args, *, argv=None): pass return process_args def add_files_cli(parser, *, excluded=None, nargs=None): process_files = add_file_filtering_cli(parser, excluded=excluded) parser.add_argument('filenames', nargs=nargs or '+', metavar='FILENAME') return [ process_files, ] def add_progress_cli(parser, *, threshold=VERBOSITY, **kwargs): parser.add_argument('--progress', dest='track_progress', action='store_const', const=True) parser.add_argument('--no-progress', dest='track_progress', action='store_false') parser.set_defaults(track_progress=True) def process_args(args, *, argv=None): if args.track_progress: ns = vars(args) verbosity = ns.get('verbosity', VERBOSITY) if verbosity <= threshold: args.track_progress = track_progress_compact else: args.track_progress = track_progress_flat return process_args def _cli_analyze(parser, **kwargs): process_progress = add_progress_cli(parser) process_output = add_output_cli(parser) process_files = add_files_cli(parser, **kwargs) return [ process_progress, process_output, process_files, ]
null
185,914
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) logger = logging.getLogger(__name__) FORMATS = { 'raw': fmt_raw, 'brief': fmt_brief, 'summary': fmt_summary, 'full': fmt_full, } def filter_filenames(filenames, process_filenames=None, relroot=fsutil.USE_CWD): # We expect each filename to be a normalized, absolute path. for filename, _, check, _ in _iter_filenames(filenames, process_filenames, relroot): if (reason := check()): logger.debug(f'{filename}: {reason}') continue yield filename def filter_forward(items, *, markpublic=False): if markpublic: public = set() actual = [] for item in items: if is_public_api(item): public.add(item.id) elif not _match.is_forward_decl(item): actual.append(item) else: # non-public duplicate! # XXX raise Exception(item) for item in actual: _info.set_flag(item, 'is_public', item.id in public) yield item else: for item in items: if _match.is_forward_decl(item): continue yield item def cmd_analyze(filenames, *, fmt=None, iter_filenames=None, relroot=fsutil.USE_CWD, track_progress=None, verbosity=None, _analyze=_analyze, formats=FORMATS, **kwargs ): verbosity = verbosity if verbosity is not None else 3 try: do_fmt = formats[fmt] except KeyError: raise ValueError(f'unsupported fmt {fmt!r}') filenames, relroot = fsutil.fix_filenames(filenames, relroot=relroot) filenames = filter_filenames(filenames, iter_filenames, relroot) if track_progress: filenames = track_progress(filenames) logger.info('analyzing files...') analyzed = _analyze(filenames, **kwargs) analyzed.fix_filenames(relroot, normalize=False) decls = filter_forward(analyzed, markpublic=True) for line in do_fmt(decls): print(line)
null
185,915
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) def add_verbosity_cli(parser): parser.add_argument('-q', '--quiet', action='count', default=0) parser.add_argument('-v', '--verbose', action='count', default=0) def process_args(args, *, argv=None): ns = vars(args) key = 'verbosity' if key in ns: parser.error(f'duplicate arg {key!r}') ns[key] = max(0, VERBOSITY + ns.pop('verbose') - ns.pop('quiet')) return key return process_args def add_traceback_cli(parser): parser.add_argument('--traceback', '--tb', action='store_true', default=TRACEBACK) parser.add_argument('--no-traceback', '--no-tb', dest='traceback', action='store_const', const=False) def process_args(args, *, argv=None): ns = vars(args) key = 'traceback_cm' if key in ns: parser.error(f'duplicate arg {key!r}') showtb = ns.pop('traceback') def traceback_cm(): restore = loggingutil.hide_emit_errors() try: yield except BrokenPipeError: # It was piped to "head" or something similar. pass except NotImplementedError: raise # re-raise except Exception as exc: if not showtb: sys.exit(f'ERROR: {exc}') raise # re-raise except KeyboardInterrupt: if not showtb: sys.exit('\nINTERRUPTED') raise # re-raise except BaseException as exc: if not showtb: sys.exit(f'{type(exc).__name__}: {exc}') raise # re-raise finally: restore() ns[key] = traceback_cm() return key return process_args def add_progress_cli(parser, *, threshold=VERBOSITY, **kwargs): parser.add_argument('--progress', dest='track_progress', action='store_const', const=True) parser.add_argument('--no-progress', dest='track_progress', action='store_false') parser.set_defaults(track_progress=True) def process_args(args, *, argv=None): if args.track_progress: ns = vars(args) verbosity = ns.get('verbosity', VERBOSITY) if verbosity <= threshold: args.track_progress = track_progress_compact else: args.track_progress = track_progress_flat return process_args def _cli_data(parser, filenames=None, known=None): ArgumentParser = type(parser) common = ArgumentParser(add_help=False) # These flags will get processed by the top-level parse_args(). add_verbosity_cli(common) add_traceback_cli(common) subs = parser.add_subparsers(dest='datacmd') sub = subs.add_parser('show', parents=[common]) if known is None: sub.add_argument('--known', required=True) if filenames is None: sub.add_argument('filenames', metavar='FILE', nargs='+') sub = subs.add_parser('dump', parents=[common]) if known is None: sub.add_argument('--known') sub.add_argument('--show', action='store_true') process_progress = add_progress_cli(sub) sub = subs.add_parser('check', parents=[common]) if known is None: sub.add_argument('--known', required=True) def process_args(args, *, argv): if args.datacmd == 'dump': process_progress(args, argv) return process_args
null
185,916
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) FORMATS = { 'raw': fmt_raw, 'brief': fmt_brief, 'summary': fmt_summary, 'full': fmt_full, } def cmd_data(datacmd, filenames, known=None, *, _analyze=_analyze, formats=FORMATS, extracolumns=None, relroot=fsutil.USE_CWD, track_progress=None, **kwargs ): kwargs.pop('verbosity', None) usestdout = kwargs.pop('show', None) if datacmd == 'show': do_fmt = formats['summary'] if isinstance(known, str): known, _ = _datafiles.get_known(known, extracolumns, relroot) for line in do_fmt(known): print(line) elif datacmd == 'dump': filenames, relroot = fsutil.fix_filenames(filenames, relroot=relroot) if track_progress: filenames = track_progress(filenames) analyzed = _analyze(filenames, **kwargs) analyzed.fix_filenames(relroot, normalize=False) if known is None or usestdout: outfile = io.StringIO() _datafiles.write_known(analyzed, outfile, extracolumns, relroot=relroot) print(outfile.getvalue()) else: _datafiles.write_known(analyzed, known, extracolumns, relroot=relroot) elif datacmd == 'check': raise NotImplementedError(datacmd) else: raise ValueError(f'unsupported data command {datacmd!r}')
null
185,917
import io import logging import os import os.path import re import sys from c_common import fsutil from c_common.logging import VERBOSITY, Printer from c_common.scriptutil import ( add_verbosity_cli, add_traceback_cli, add_sepval_cli, add_progress_cli, add_files_cli, add_commands_cli, process_args_by_key, configure_logger, get_prog, filter_filenames, iter_marks, ) from c_parser.info import KIND from c_parser.match import is_type_decl from .match import filter_forward from . import ( analyze as _analyze, datafiles as _datafiles, check_all as _check_all, ) COMMANDS = { 'check': ( 'analyze and fail if the given C source/header files have any problems', [_cli_check], cmd_check, ), 'analyze': ( 'report on the state of the given C source/header files', [_cli_analyze], cmd_analyze, ), 'data': ( 'check/manage local data (e.g. known types, ignored vars, caches)', [_cli_data], cmd_data, ), } def get_prog(spec=None, *, absolute=False, allowsuffix=True): def add_verbosity_cli(parser): def add_traceback_cli(parser): def add_commands_cli(parser, commands, *, commonspecs=COMMON_CLI, subset=None): def process_args_by_key(args, argv, processors, keys): def parse_args(argv=sys.argv[1:], prog=sys.argv[0], *, subset=None): import argparse parser = argparse.ArgumentParser( prog=prog or get_prog(), ) processors = add_commands_cli( parser, commands={k: v[1] for k, v in COMMANDS.items()}, commonspecs=[ add_verbosity_cli, add_traceback_cli, ], subset=subset, ) args = parser.parse_args(argv) ns = vars(args) cmd = ns.pop('cmd') verbosity, traceback_cm = process_args_by_key( args, argv, processors[cmd], ['verbosity', 'traceback_cm'], ) # "verbosity" is sent to the commands, so we put it back. args.verbosity = verbosity return cmd, ns, verbosity, traceback_cm
null
185,918
import os.path from c_parser import ( info as _info, match as _match, ) _KIND = _info.KIND def is_public(decl): if not decl.filename.endswith('.h'): return False if 'Include' not in decl.filename.split(os.path.sep): return False return True def is_public_declaration(decl): if not is_public(decl): return False if decl.kind is _KIND.TYPEDEF: return True elif _match.is_type_decl(decl): return _match.is_forward_decl(decl) else: return _match.is_external_reference(decl)
null
185,919
import os.path from c_parser import ( info as _info, match as _match, ) _KIND = _info.KIND def is_public(decl): if not decl.filename.endswith('.h'): return False if 'Include' not in decl.filename.split(os.path.sep): return False return True def is_public_definition(decl): if not is_public(decl): return False if decl.kind is _KIND.TYPEDEF: return True elif _match.is_type_decl(decl): return not _match.is_forward_decl(decl) else: return not _match.is_external_reference(decl)
null
185,920
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info import c_parser.match as _match import c_parser.datafiles as _parser from . import analyze as _analyze def read_known(infile, extracolumns=None, relroot=fsutil.USE_CWD): extracolumns = EXTRA_COLUMNS + ( list(extracolumns) if extracolumns else [] ) known = {} for decl, extra in _parser.iter_decls_tsv(infile, extracolumns, relroot): known[decl] = extra return known def analyze_known(known, *, analyze_resolved=None, handle_unresolved=True, ): knowntypes = knowntypespecs = {} collated = _match.group_by_kinds(known) types = {decl: None for decl in collated['type']} typespecs = _analyze.get_typespecs(types) def analyze_decl(decl): return _analyze.analyze_decl( decl, typespecs, knowntypespecs, types, knowntypes, analyze_resolved=analyze_resolved, ) _analyze.analyze_type_decls(types, analyze_decl, handle_unresolved) return types, typespecs def get_known(known, extracolumns=None, *, analyze_resolved=None, handle_unresolved=True, relroot=fsutil.USE_CWD, ): if isinstance(known, str): known = read_known(known, extracolumns, relroot) return analyze_known( known, handle_unresolved=handle_unresolved, analyze_resolved=analyze_resolved, )
null
185,921
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info import c_parser.match as _match import c_parser.datafiles as _parser from . import analyze as _analyze EXTRA_COLUMNS = [ #'typedecl', ] def write_known(rows, outfile, extracolumns=None, *, relroot=fsutil.USE_CWD, backup=True, ): extracolumns = EXTRA_COLUMNS + ( list(extracolumns) if extracolumns else [] ) _parser.write_decls_tsv( rows, outfile, extracolumns, relroot=relroot, backup=backup, )
null
185,922
import os.path from c_common import fsutil import c_common.tables as _tables import c_parser.info as _info import c_parser.match as _match import c_parser.datafiles as _parser from . import analyze as _analyze IGNORED_HEADER = '\t'.join(IGNORED_COLUMNS) def write_ignored(variables, outfile, relroot=fsutil.USE_CWD): raise NotImplementedError if relroot and relroot is not fsutil.USE_CWD: relroot = os.path.abspath(relroot) reason = '???' #if not isinstance(varid, DeclID): # varid = getattr(varid, 'parsed', varid).id decls = (d.fix_filename(relroot, fixroot=False) for d in decls) _tables.write_table( outfile, IGNORED_HEADER, sep='\t', rows=(r.render_rowdata() + (reason,) for r in decls), )
null
185,923
import argparse import contextlib import fnmatch import logging import os import os.path import shutil import sys from . import fsutil, strutil, iterutil, logging as loggingutil def _is_standalone(filename): def is_installed(filename, *, allowsuffix=True): if not allowsuffix and filename.endswith('.py'): return False filename = os.path.abspath(os.path.normalize(filename)) found = shutil.which(os.path.basename(filename)) if not found: return False if found != filename: return False return _is_standalone(filename)
null
185,924
import argparse import contextlib import fnmatch import logging import os import os.path import shutil import sys from . import fsutil, strutil, iterutil, logging as loggingutil def _is_standalone(filename): return fsutil.is_executable(filename) def is_standalone(filename): filename = os.path.abspath(os.path.normalize(filename)) return _is_standalone(filename)
null
185,925
import argparse import contextlib import fnmatch import logging import os import os.path import shutil import sys from . import fsutil, strutil, iterutil, logging as loggingutil import logging def configure_logger(verbosity, logger=None, **kwargs): if logger is None: # Configure the root logger. logger = logging.getLogger() loggingutil.configure_logger(logger, verbosity, **kwargs)
null
185,926
import argparse import contextlib import fnmatch import logging import os import os.path import shutil import sys from . import fsutil, strutil, iterutil, logging as loggingutil The provided code snippet includes necessary dependencies for implementing the `set_command` function. Write a Python function `def set_command(name, add_cli)` to solve the following problem: A decorator factory to set CLI info. Here is the function: def set_command(name, add_cli): """A decorator factory to set CLI info.""" def decorator(func): if hasattr(func, '__cli__'): raise Exception(f'already set') func.__cli__ = (name, add_cli) return func return decorator
A decorator factory to set CLI info.
185,927
import logging import sys VERBOSITY = 3 _logger = logging.getLogger(__name__.rpartition('.')[0]) import logging def configure_logger(logger, verbosity=VERBOSITY, *, logfile=None, maxlevel=logging.CRITICAL, ): level = max(1, # 0 disables it, so we use the next lowest. min(maxlevel, maxlevel - verbosity * 10)) logger.setLevel(level) #logger.propagate = False if not logger.handlers: if logfile: handler = logging.FileHandler(logfile) else: handler = logging.StreamHandler(sys.stdout) handler.setLevel(level) #handler.setFormatter(logging.Formatter()) logger.addHandler(handler) # In case the provided logger is in a sub-package... if logger is not _logger: configure_logger( _logger, verbosity, logfile=logfile, maxlevel=maxlevel, )
null
185,928
import csv import re import textwrap from . import NOT_SET, strutil, fsutil def _parse_row(line, sep, ncols, default): def _normalize_table_file_props(header, sep): def parse_table(entries, sep, header=None, rawsep=None, *, default=NOT_SET, strict=True, ): header, sep = _normalize_table_file_props(header, sep) if not sep: raise ValueError('missing "sep"') ncols = None if header: if strict: ncols = len(header.split(sep)) cur_file = None for line, filename in strutil.parse_entries(entries, ignoresep=sep): _sep = sep if filename: if header and cur_file != filename: cur_file = filename # Skip the first line if it's the header. if line.strip() == header: continue else: # We expected the header. raise NotImplementedError((header, line)) elif rawsep and sep not in line: _sep = rawsep row = _parse_row(line, _sep, ncols, default) if strict and not ncols: ncols = len(row) yield row, filename
null
185,929
import csv import re import textwrap from . import NOT_SET, strutil, fsutil def _parse_row(line, sep, ncols, default): def parse_row(line, sep, *, ncols=None, default=NOT_SET): if not sep: raise ValueError('missing "sep"') return _parse_row(line, sep, ncols, default)
null
185,930
import logging def unrepr(value): raise NotImplementedError
null