id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
176,471
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope def _get_code_for_stack(code_lines, leaf, position): # It might happen that we're on whitespace or on a comment. This means # that we would not get the right leaf. if leaf.start_pos >= position: # If we're not on a comment simply get the previous leaf and proceed. leaf = leaf.get_previous_leaf() if leaf is None: return '' # At the beginning of the file. is_after_newline = leaf.type == 'newline' while leaf.type == 'newline': leaf = leaf.get_previous_leaf() if leaf is None: return '' if leaf.type == 'error_leaf' or leaf.type == 'string': if leaf.start_pos[0] < position[0]: # On a different line, we just begin anew. return '' # Error leafs cannot be parsed, completion in strings is also # impossible. raise OnErrorLeaf(leaf) else: user_stmt = leaf while True: if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'): break user_stmt = user_stmt.parent if is_after_newline: if user_stmt.start_pos[1] > position[1]: # This means that it's actually a dedent and that means that we # start without value (part of a suite). return '' # This is basically getting the relevant lines. return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) def dedent(text: str) -> str: ... class Parser(BaseParser): """ This class is used to parse a Python file, it then divides them into a class structure of different scopes. :param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar. """ node_map = { 'expr_stmt': tree.ExprStmt, 'classdef': tree.Class, 'funcdef': tree.Function, 'file_input': tree.Module, 'import_name': tree.ImportName, 'import_from': tree.ImportFrom, 'break_stmt': tree.KeywordStatement, 'continue_stmt': tree.KeywordStatement, 'return_stmt': tree.ReturnStmt, 'raise_stmt': tree.KeywordStatement, 'yield_expr': tree.YieldExpr, 'del_stmt': tree.KeywordStatement, 'pass_stmt': tree.KeywordStatement, 'global_stmt': tree.GlobalStmt, 'nonlocal_stmt': tree.KeywordStatement, 'print_stmt': tree.KeywordStatement, 'assert_stmt': tree.AssertStmt, 'if_stmt': tree.IfStmt, 'with_stmt': tree.WithStmt, 'for_stmt': tree.ForStmt, 'while_stmt': tree.WhileStmt, 'try_stmt': tree.TryStmt, 'sync_comp_for': tree.SyncCompFor, # Not sure if this is the best idea, but IMO it's the easiest way to # avoid extreme amounts of work around the subtle difference of 2/3 # grammar in list comoprehensions. 'decorator': tree.Decorator, 'lambdef': tree.Lambda, 'lambdef_nocond': tree.Lambda, 'namedexpr_test': tree.NamedExpr, } default_node = tree.PythonNode # Names/Keywords are handled separately _leaf_map = { PythonTokenTypes.STRING: tree.String, PythonTokenTypes.NUMBER: tree.Number, PythonTokenTypes.NEWLINE: tree.Newline, PythonTokenTypes.ENDMARKER: tree.EndMarker, PythonTokenTypes.FSTRING_STRING: tree.FStringString, PythonTokenTypes.FSTRING_START: tree.FStringStart, PythonTokenTypes.FSTRING_END: tree.FStringEnd, } def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'): super().__init__(pgen_grammar, start_nonterminal, error_recovery=error_recovery) self.syntax_errors = [] self._omit_dedent_list = [] self._indent_counter = 0 def parse(self, tokens): if self._error_recovery: if self._start_nonterminal != 'file_input': raise NotImplementedError tokens = self._recovery_tokenize(tokens) return super().parse(tokens) def convert_node(self, nonterminal, children): """ Convert raw node information to a PythonBaseNode instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. """ try: node = self.node_map[nonterminal](children) except KeyError: if nonterminal == 'suite': # We don't want the INDENT/DEDENT in our parser tree. Those # leaves are just cancer. They are virtual leaves and not real # ones and therefore have pseudo start/end positions and no # prefixes. Just ignore them. children = [children[0]] + children[2:-1] node = self.default_node(nonterminal, children) return node def convert_leaf(self, type, value, prefix, start_pos): # print('leaf', repr(value), token.tok_name[type]) if type == NAME: if value in self._pgen_grammar.reserved_syntax_strings: return tree.Keyword(value, start_pos, prefix) else: return tree.Name(value, start_pos, prefix) return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix) def error_recovery(self, token): tos_nodes = self.stack[-1].nodes if tos_nodes: last_leaf = tos_nodes[-1].get_last_leaf() else: last_leaf = None if self._start_nonterminal == 'file_input' and \ (token.type == PythonTokenTypes.ENDMARKER or token.type == DEDENT and not last_leaf.value.endswith('\n') and not last_leaf.value.endswith('\r')): # In Python statements need to end with a newline. But since it's # possible (and valid in Python) that there's no newline at the # end of a file, we have to recover even if the user doesn't want # error recovery. if self.stack[-1].dfa.from_rule == 'simple_stmt': try: plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE] except KeyError: pass else: if plan.next_dfa.is_final and not plan.dfa_pushes: # We are ignoring here that the newline would be # required for a simple_stmt. self.stack[-1].dfa = plan.next_dfa self._add_token(token) return if not self._error_recovery: return super().error_recovery(token) def current_suite(stack): # For now just discard everything that is not a suite or # file_input, if we detect an error. for until_index, stack_node in reversed(list(enumerate(stack))): # `suite` can sometimes be only simple_stmt, not stmt. if stack_node.nonterminal == 'file_input': break elif stack_node.nonterminal == 'suite': # In the case where we just have a newline we don't want to # do error recovery here. In all other cases, we want to do # error recovery. if len(stack_node.nodes) != 1: break return until_index until_index = current_suite(self.stack) if self._stack_removal(until_index + 1): self._add_token(token) else: typ, value, start_pos, prefix = token if typ == INDENT: # For every deleted INDENT we have to delete a DEDENT as well. # Otherwise the parser will get into trouble and DEDENT too early. self._omit_dedent_list.append(self._indent_counter) error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix) self.stack[-1].nodes.append(error_leaf) tos = self.stack[-1] if tos.nonterminal == 'suite': # Need at least one statement in the suite. This happend with the # error recovery above. try: tos.dfa = tos.dfa.arcs['stmt'] except KeyError: # We're already in a final state. pass def _stack_removal(self, start_index): all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes] if all_nodes: node = tree.PythonErrorNode(all_nodes) self.stack[start_index - 1].nodes.append(node) self.stack[start_index:] = [] return bool(all_nodes) def _recovery_tokenize(self, tokens): for token in tokens: typ = token[0] if typ == DEDENT: # We need to count indents, because if we just omit any DEDENT, # we might omit them in the wrong place. o = self._omit_dedent_list if o and o[-1] == self._indent_counter: o.pop() self._indent_counter -= 1 continue self._indent_counter -= 1 elif typ == INDENT: self._indent_counter += 1 yield token The provided code snippet includes necessary dependencies for implementing the `get_stack_at_position` function. Write a Python function `def get_stack_at_position(grammar, code_lines, leaf, pos)` to solve the following problem: Returns the possible node names (e.g. import_from, xor_test or yield_stmt). Here is the function: def get_stack_at_position(grammar, code_lines, leaf, pos): """ Returns the possible node names (e.g. import_from, xor_test or yield_stmt). """ class EndMarkerReached(Exception): pass def tokenize_without_endmarker(code): # TODO This is for now not an official parso API that exists purely # for Jedi. tokens = grammar._tokenize(code) for token in tokens: if token.string == safeword: raise EndMarkerReached() elif token.prefix.endswith(safeword): # This happens with comments. raise EndMarkerReached() elif token.string.endswith(safeword): yield token # Probably an f-string literal that was not finished. raise EndMarkerReached() else: yield token # The code might be indedented, just remove it. code = dedent(_get_code_for_stack(code_lines, leaf, pos)) # We use a word to tell Jedi when we have reached the start of the # completion. # Use Z as a prefix because it's not part of a number suffix. safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' code = code + ' ' + safeword p = Parser(grammar._pgen_grammar, error_recovery=True) try: p.parse(tokens=tokenize_without_endmarker(code)) except EndMarkerReached: return p.stack raise SystemError( "This really shouldn't happen. There's a bug in Jedi:\n%s" % list(tokenize_without_endmarker(code)) )
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
176,472
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope def filter_follow_imports(names, follow_builtin_imports=False): for name in names: if name.is_import(): new_names = list(filter_follow_imports( name.goto(), follow_builtin_imports=follow_builtin_imports, )) found_builtin = False if follow_builtin_imports: for new_name in new_names: if new_name.start_pos is None: found_builtin = True if found_builtin: yield name else: yield from new_names else: yield name
null
176,473
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope def _iter_arguments(nodes, position): def remove_after_pos(name): if name.type != 'name': return None return name.value[:position[1] - name.start_pos[1]] # Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]] nodes_before = [c for c in nodes if c.start_pos < position] if nodes_before[-1].type == 'arglist': yield from _iter_arguments(nodes_before[-1].children, position) return previous_node_yielded = False stars_seen = 0 for i, node in enumerate(nodes_before): if node.type == 'argument': previous_node_yielded = True first = node.children[0] second = node.children[1] if second == '=': if second.start_pos < position and first.type == 'name': yield 0, first.value, True else: yield 0, remove_after_pos(first), False elif first in ('*', '**'): yield len(first.value), remove_after_pos(second), False else: # Must be a Comprehension first_leaf = node.get_first_leaf() if first_leaf.type == 'name' and first_leaf.start_pos >= position: yield 0, remove_after_pos(first_leaf), False else: yield 0, None, False stars_seen = 0 elif node.type == 'testlist_star_expr': for n in node.children[::2]: if n.type == 'star_expr': stars_seen = 1 n = n.children[1] yield stars_seen, remove_after_pos(n), False stars_seen = 0 # The count of children is even if there's a comma at the end. previous_node_yielded = bool(len(node.children) % 2) elif isinstance(node, tree.PythonLeaf) and node.value == ',': if not previous_node_yielded: yield stars_seen, '', False stars_seen = 0 previous_node_yielded = False elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'): stars_seen = len(node.value) elif node == '=' and nodes_before[-1]: previous_node_yielded = True before = nodes_before[i - 1] if before.type == 'name': yield 0, before.value, True else: yield 0, None, False # Just ignore the star that is probably a syntax error. stars_seen = 0 if not previous_node_yielded: if nodes_before[-1].type == 'name': yield stars_seen, remove_after_pos(nodes_before[-1]), False else: yield stars_seen, '', False
null
176,474
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope The provided code snippet includes necessary dependencies for implementing the `_get_index_and_key` function. Write a Python function `def _get_index_and_key(nodes, position)` to solve the following problem: Returns the amount of commas and the keyword argument string. Here is the function: def _get_index_and_key(nodes, position): """ Returns the amount of commas and the keyword argument string. """ nodes_before = [c for c in nodes if c.start_pos < position] if nodes_before[-1].type == 'arglist': return _get_index_and_key(nodes_before[-1].children, position) key_str = None last = nodes_before[-1] if last.type == 'argument' and last.children[1] == '=' \ and last.children[1].end_pos <= position: # Checked if the argument key_str = last.children[0].value elif last == '=': key_str = nodes_before[-2].value return nodes_before.count(','), key_str
Returns the amount of commas and the keyword argument string.
176,475
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope class CallDetails: def __init__(self, bracket_leaf, children, position): self.bracket_leaf = bracket_leaf self._children = children self._position = position def index(self): return _get_index_and_key(self._children, self._position)[0] def keyword_name_str(self): return _get_index_and_key(self._children, self._position)[1] def _list_arguments(self): return list(_iter_arguments(self._children, self._position)) def calculate_index(self, param_names): positional_count = 0 used_names = set() star_count = -1 args = self._list_arguments() if not args: if param_names: return 0 else: return None is_kwarg = False for i, (star_count, key_start, had_equal) in enumerate(args): is_kwarg |= had_equal | (star_count == 2) if star_count: pass # For now do nothing, we don't know what's in there here. else: if i + 1 != len(args): # Not last if had_equal: used_names.add(key_start) else: positional_count += 1 for i, param_name in enumerate(param_names): kind = param_name.get_kind() if not is_kwarg: if kind == Parameter.VAR_POSITIONAL: return i if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY): if i == positional_count: return i if key_start is not None and not star_count == 1 or star_count == 2: if param_name.string_name not in used_names \ and (kind == Parameter.KEYWORD_ONLY or kind == Parameter.POSITIONAL_OR_KEYWORD and positional_count <= i): if star_count: return i if had_equal: if param_name.string_name == key_start: return i else: if param_name.string_name.startswith(key_start): return i if kind == Parameter.VAR_KEYWORD: return i return None def iter_used_keyword_arguments(self): for star_count, key_start, had_equal in list(self._list_arguments()): if had_equal and key_start: yield key_start def count_positional_arguments(self): count = 0 for star_count, key_start, had_equal in self._list_arguments()[:-1]: if star_count or key_start: break count += 1 return count def _get_signature_details_from_error_node(node, additional_children, position): for index, element in reversed(list(enumerate(node.children))): # `index > 0` means that it's a trailer and not an atom. if element == '(' and element.end_pos <= position and index > 0: # It's an error node, we don't want to match too much, just # until the parentheses is enough. children = node.children[index:] name = element.get_previous_leaf() if name is None: continue if name.type == 'name' or name.parent.type in ('trailer', 'atom'): return CallDetails(element, children + additional_children, position) def get_signature_details(module, position): leaf = module.get_leaf_for_position(position, include_prefixes=True) # It's easier to deal with the previous token than the next one in this # case. if leaf.start_pos >= position: # Whitespace / comments after the leaf count towards the previous leaf. leaf = leaf.get_previous_leaf() if leaf is None: return None # Now that we know where we are in the syntax tree, we start to look at # parents for possible function definitions. node = leaf.parent while node is not None: if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'): # Don't show signatures if there's stuff before it that just # makes it feel strange to have a signature. return None additional_children = [] for n in reversed(node.children): if n.start_pos < position: if n.type == 'error_node': result = _get_signature_details_from_error_node( n, additional_children, position ) if result is not None: return result additional_children[0:0] = n.children continue additional_children.insert(0, n) # Find a valid trailer if node.type == 'trailer' and node.children[0] == '(' \ or node.type == 'decorator' and node.children[2] == '(': # Additionally we have to check that an ending parenthesis isn't # interpreted wrong. There are two cases: # 1. Cursor before paren -> The current signature is good # 2. Cursor after paren -> We need to skip the current signature if not (leaf is node.children[-1] and position >= leaf.end_pos): leaf = node.get_previous_leaf() if leaf is None: return None return CallDetails( node.children[0] if node.type == 'trailer' else node.children[2], node.children, position ) node = node.parent return None
null
176,476
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope def match(string, like_name, fuzzy=False): if fuzzy: return _fuzzy_match(string, like_name) else: return _start_match(string, like_name) def infer(inference_state, context, leaf): if leaf.type == 'name': return inference_state.infer(context, leaf) parent = leaf.parent definitions = NO_VALUES if parent.type == 'atom': # e.g. `(a + b)` definitions = context.infer_node(leaf.parent) elif parent.type == 'trailer': # e.g. `a()` definitions = infer_call_of_leaf(context, leaf) elif isinstance(leaf, tree.Literal): # e.g. `"foo"` or `1.0` return infer_atom(context, leaf) elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'): return get_string_value_set(inference_state) return definitions The provided code snippet includes necessary dependencies for implementing the `cache_signatures` function. Write a Python function `def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos)` to solve the following problem: This function calculates the cache key. Here is the function: def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos): """This function calculates the cache key.""" line_index = user_pos[0] - 1 before_cursor = code_lines[line_index][:user_pos[1]] other_lines = code_lines[bracket_leaf.start_pos[0]:line_index] whole = ''.join(other_lines + [before_cursor]) before_bracket = re.match(r'.*\(', whole, re.DOTALL) module_path = context.get_root_context().py__file__() if module_path is None: yield None # Don't cache! else: yield (module_path, before_bracket, bracket_leaf.start_pos) yield infer( inference_state, context, bracket_leaf.get_previous_leaf(), )
This function calculates the cache key.
176,477
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... def validate_line_column(func): @wraps(func) def wrapper(self, line=None, column=None, *args, **kwargs): line = max(len(self._code_lines), 1) if line is None else line if not (0 < line <= len(self._code_lines)): raise ValueError('`line` parameter is not in a valid range.') line_string = self._code_lines[line - 1] line_len = len(line_string) if line_string.endswith('\r\n'): line_len -= 2 elif line_string.endswith('\n'): line_len -= 1 column = line_len if column is None else column if not (0 <= column <= line_len): raise ValueError('`column` parameter (%d) is not in a valid range ' '(0-%d) for line %d (%r).' % ( column, line_len, line, line_string)) return func(self, line, column, *args, **kwargs) return wrapper
null
176,478
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope class chain(Iterator[_T], Generic[_T]): def __init__(self, *iterables: Iterable[_T]) -> None: ... def __next__(self) -> _T: ... def __iter__(self) -> Iterator[_T]: ... def from_iterable(iterable: Iterable[Iterable[_S]]) -> Iterator[_S]: ... def get_parent_scope(node, include_flows=False): """ Returns the underlying scope. """ scope = node.parent if scope is None: return None # It's a module already. while True: if is_scope(scope): if scope.type in ('classdef', 'funcdef', 'lambdef'): index = scope.children.index(':') if scope.children[index].start_pos >= node.start_pos: if node.parent.type == 'param' and node.parent.name == node: pass elif node.parent.type == 'tfpdef' and node.parent.children[0] == node: pass else: scope = scope.parent continue return scope elif include_flows and isinstance(scope, tree.Flow): # The cursor might be on `if foo`, so the parent scope will not be # the if, but the parent of the if. if not (scope.type == 'if_stmt' and any(n.start_pos <= node.start_pos < n.end_pos for n in scope.get_test_nodes())): return scope scope = scope.parent The provided code snippet includes necessary dependencies for implementing the `get_module_names` function. Write a Python function `def get_module_names(module, all_scopes, definitions=True, references=False)` to solve the following problem: Returns a dictionary with name parts as keys and their call paths as values. Here is the function: def get_module_names(module, all_scopes, definitions=True, references=False): """ Returns a dictionary with name parts as keys and their call paths as values. """ def def_ref_filter(name): is_def = name.is_definition() return definitions and is_def or references and not is_def names = list(chain.from_iterable(module.get_used_names().values())) if not all_scopes: # We have to filter all the names that don't have the module as a # parent_scope. There's None as a parent, because nodes in the module # node have the parent module and not suite as all the others. # Therefore it's important to catch that case. def is_module_scope_name(name): parent_scope = get_parent_scope(name) # async functions have an extra wrapper. Strip it. if parent_scope and parent_scope.type == 'async_stmt': parent_scope = parent_scope.parent return parent_scope in (module, None) names = [n for n in names if is_module_scope_name(n)] return filter(def_ref_filter, names)
Returns a dictionary with name parts as keys and their call paths as values.
176,479
import re from collections import namedtuple from textwrap import dedent from itertools import chain from functools import wraps from inspect import Parameter from parso.python.parser import Parser from parso.python import tree from jedi.inference.base_value import NO_VALUES from jedi.inference.syntax_tree import infer_atom from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.compiled import get_string_value_set from jedi.cache import signature_time_cache, memoize_method from jedi.parser_utils import get_parent_scope def split_search_string(name): type, _, dotted_names = name.rpartition(' ') if type == 'def': type = 'function' return type, dotted_names.split('.')
null
176,480
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager class ParamNameWithEquals(ParamNameWrapper): def get_public_name(self): return self.string_name + '=' class Parameter: def __init__(self, name: str, kind: _ParameterKind, *, default: Any = ..., annotation: Any = ...) -> None: ... empty: Any = ... name: str default: Any annotation: Any kind: _ParameterKind POSITIONAL_ONLY: ClassVar[Literal[_ParameterKind.POSITIONAL_ONLY]] POSITIONAL_OR_KEYWORD: ClassVar[Literal[_ParameterKind.POSITIONAL_OR_KEYWORD]] VAR_POSITIONAL: ClassVar[Literal[_ParameterKind.VAR_POSITIONAL]] KEYWORD_ONLY: ClassVar[Literal[_ParameterKind.KEYWORD_ONLY]] VAR_KEYWORD: ClassVar[Literal[_ParameterKind.VAR_KEYWORD]] def replace( self, *, name: Optional[str] = ..., kind: Optional[_ParameterKind] = ..., default: Any = ..., annotation: Any = ... ) -> Parameter: ... def _get_signature_param_names(signatures, positional_count, used_kwargs): # Add named params for call_sig in signatures: for i, p in enumerate(call_sig.params): kind = p.kind if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD: continue if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \ and p.name not in used_kwargs: yield ParamNameWithEquals(p._name)
null
176,481
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager class Parameter: def __init__(self, name: str, kind: _ParameterKind, *, default: Any = ..., annotation: Any = ...) -> None: ... empty: Any = ... name: str default: Any annotation: Any kind: _ParameterKind POSITIONAL_ONLY: ClassVar[Literal[_ParameterKind.POSITIONAL_ONLY]] POSITIONAL_OR_KEYWORD: ClassVar[Literal[_ParameterKind.POSITIONAL_OR_KEYWORD]] VAR_POSITIONAL: ClassVar[Literal[_ParameterKind.VAR_POSITIONAL]] KEYWORD_ONLY: ClassVar[Literal[_ParameterKind.KEYWORD_ONLY]] VAR_KEYWORD: ClassVar[Literal[_ParameterKind.VAR_KEYWORD]] def replace( self, *, name: Optional[str] = ..., kind: Optional[_ParameterKind] = ..., default: Any = ..., annotation: Any = ... ) -> Parameter: ... def _must_be_kwarg(signatures, positional_count, used_kwargs): if used_kwargs: return True must_be_kwarg = True for signature in signatures: for i, p in enumerate(signature.params): kind = p.kind if kind is Parameter.VAR_POSITIONAL: # In case there were not already kwargs, the next param can # always be a normal argument. return False if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY): must_be_kwarg = False break if not must_be_kwarg: break return must_be_kwarg
null
176,482
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager class Completion: def __init__(self, inference_state, module_context, code_lines, position, signatures_callback, fuzzy=False): self._inference_state = inference_state self._module_context = module_context self._module_node = module_context.tree_node self._code_lines = code_lines # The first step of completions is to get the name self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position) # The actual cursor position is not what we need to calculate # everything. We want the start of the name we're on. self._original_position = position self._signatures_callback = signatures_callback self._fuzzy = fuzzy def complete(self): leaf = self._module_node.get_leaf_for_position( self._original_position, include_prefixes=True ) string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position) prefixed_completions = complete_dict( self._module_context, self._code_lines, start_leaf or leaf, self._original_position, None if string is None else quote + string, fuzzy=self._fuzzy, ) if string is not None and not prefixed_completions: prefixed_completions = list(complete_file_name( self._inference_state, self._module_context, start_leaf, quote, string, self._like_name, self._signatures_callback, self._code_lines, self._original_position, self._fuzzy )) if string is not None: if not prefixed_completions and '\n' in string: # Complete only multi line strings prefixed_completions = self._complete_in_string(start_leaf, string) return prefixed_completions cached_name, completion_names = self._complete_python(leaf) completions = list(filter_names(self._inference_state, completion_names, self.stack, self._like_name, self._fuzzy, cached_name=cached_name)) return ( # Removing duplicates mostly to remove False/True/None duplicates. _remove_duplicates(prefixed_completions, completions) + sorted(completions, key=lambda x: (x.name.startswith('__'), x.name.startswith('_'), x.name.lower())) ) def _complete_python(self, leaf): """ Analyzes the current context of a completion and decides what to return. Technically this works by generating a parser stack and analysing the current stack for possible grammar nodes. Possible enhancements: - global/nonlocal search global - yield from / raise from <- could be only exceptions/generators - In args: */**: no completion - In params (also lambda): no completion before = """ grammar = self._inference_state.grammar self.stack = stack = None self._position = ( self._original_position[0], self._original_position[1] - len(self._like_name) ) cached_name = None try: self.stack = stack = helpers.get_stack_at_position( grammar, self._code_lines, leaf, self._position ) except helpers.OnErrorLeaf as e: value = e.error_leaf.value if value == '.': # After ErrorLeaf's that are dots, we will not do any # completions since this probably just confuses the user. return cached_name, [] # If we don't have a value, just use global completion. return cached_name, self._complete_global_scope() allowed_transitions = \ list(stack._allowed_transition_names_and_token_types()) if 'if' in allowed_transitions: leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) previous_leaf = leaf.get_previous_leaf() indent = self._position[1] if not (leaf.start_pos <= self._position <= leaf.end_pos): indent = leaf.start_pos[1] if previous_leaf is not None: stmt = previous_leaf while True: stmt = search_ancestor( stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt', 'error_node', ) if stmt is None: break type_ = stmt.type if type_ == 'error_node': first = stmt.children[0] if isinstance(first, Leaf): type_ = first.value + '_stmt' # Compare indents if stmt.start_pos[1] == indent: if type_ == 'if_stmt': allowed_transitions += ['elif', 'else'] elif type_ == 'try_stmt': allowed_transitions += ['except', 'finally', 'else'] elif type_ == 'for_stmt': allowed_transitions.append('else') completion_names = [] kwargs_only = False if any(t in allowed_transitions for t in (PythonTokenTypes.NAME, PythonTokenTypes.INDENT)): # This means that we actually have to do type inference. nonterminals = [stack_node.nonterminal for stack_node in stack] nodes = _gather_nodes(stack) if nodes and nodes[-1] in ('as', 'def', 'class'): # No completions for ``with x as foo`` and ``import x as foo``. # Also true for defining names as a class or function. return cached_name, list(self._complete_inherited(is_function=True)) elif "import_stmt" in nonterminals: level, names = parse_dotted_names(nodes, "import_from" in nonterminals) only_modules = not ("import_from" in nonterminals and 'import' in nodes) completion_names += self._get_importer_names( names, level, only_modules=only_modules, ) elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': dot = self._module_node.get_leaf_for_position(self._position) if dot.type == "endmarker": # This is a bit of a weird edge case, maybe we can somehow # generalize this. dot = leaf.get_previous_leaf() cached_name, n = self._complete_trailer(dot.get_previous_leaf()) completion_names += n elif self._is_parameter_completion(): completion_names += self._complete_params(leaf) else: # Apparently this looks like it's good enough to filter most cases # so that signature completions don't randomly appear. # To understand why this works, three things are important: # 1. trailer with a `,` in it is either a subscript or an arglist. # 2. If there's no `,`, it's at the start and only signatures start # with `(`. Other trailers could start with `.` or `[`. # 3. Decorators are very primitive and have an optional `(` with # optional arglist in them. if nodes[-1] in ['(', ','] \ and nonterminals[-1] in ('trailer', 'arglist', 'decorator'): signatures = self._signatures_callback(*self._position) if signatures: call_details = signatures[0]._call_details used_kwargs = list(call_details.iter_used_keyword_arguments()) positional_count = call_details.count_positional_arguments() completion_names += _get_signature_param_names( signatures, positional_count, used_kwargs, ) kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs) if not kwargs_only: completion_names += self._complete_global_scope() completion_names += self._complete_inherited(is_function=False) if not kwargs_only: current_line = self._code_lines[self._position[0] - 1][:self._position[1]] completion_names += self._complete_keywords( allowed_transitions, only_values=not (not current_line or current_line[-1] in ' \t.;' and current_line[-3:] != '...') ) return cached_name, completion_names def _is_parameter_completion(self): tos = self.stack[-1] if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1: # We are at the position `lambda `, where basically the next node # is a param. return True if tos.nonterminal in 'parameters': # Basically we are at the position `foo(`, there's nothing there # yet, so we have no `typedargslist`. return True # var args is for lambdas and typed args for normal functions return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ',' def _complete_params(self, leaf): stack_node = self.stack[-2] if stack_node.nonterminal == 'parameters': stack_node = self.stack[-3] if stack_node.nonterminal == 'funcdef': context = get_user_context(self._module_context, self._position) node = search_ancestor(leaf, 'error_node', 'funcdef') if node is not None: if node.type == 'error_node': n = node.children[0] if n.type == 'decorators': decorators = n.children elif n.type == 'decorator': decorators = [n] else: decorators = [] else: decorators = node.get_decorators() function_name = stack_node.nodes[1] return complete_param_names(context, function_name.value, decorators) return [] def _complete_keywords(self, allowed_transitions, only_values): for k in allowed_transitions: if isinstance(k, str) and k.isalpha(): if not only_values or k in ('True', 'False', 'None'): yield keywords.KeywordName(self._inference_state, k) def _complete_global_scope(self): context = get_user_context(self._module_context, self._position) debug.dbg('global completion scope: %s', context) flow_scope_node = get_flow_scope_node(self._module_node, self._position) filters = get_global_filters( context, self._position, flow_scope_node ) completion_names = [] for filter in filters: completion_names += filter.values() return completion_names def _complete_trailer(self, previous_leaf): inferred_context = self._module_context.create_context(previous_leaf) values = infer_call_of_leaf(inferred_context, previous_leaf) debug.dbg('trailer completion values: %s', values, color='MAGENTA') # The cached name simply exists to make speed optimizations for certain # modules. cached_name = None if len(values) == 1: v, = values if v.is_module(): if len(v.string_names) == 1: module_name = v.string_names[0] if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'): cached_name = module_name return cached_name, self._complete_trailer_for_values(values) def _complete_trailer_for_values(self, values): user_context = get_user_context(self._module_context, self._position) return complete_trailer(user_context, values) def _get_importer_names(self, names, level=0, only_modules=True): names = [n.value for n in names] i = imports.Importer(self._inference_state, names, self._module_context, level) return i.completion_names(self._inference_state, only_modules=only_modules) def _complete_inherited(self, is_function=True): """ Autocomplete inherited methods when overriding in child class. """ leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) cls = tree.search_ancestor(leaf, 'classdef') if cls is None: return # Complete the methods that are defined in the super classes. class_value = self._module_context.create_value(cls) if cls.start_pos[1] >= leaf.start_pos[1]: return filters = class_value.get_filters(is_instance=True) # The first dict is the dictionary of class itself. next(filters) for filter in filters: for name in filter.values(): # TODO we should probably check here for properties if (name.api_type == 'function') == is_function: yield name def _complete_in_string(self, start_leaf, string): """ To make it possible for people to have completions in doctests or generally in "Python" code in docstrings, we use the following heuristic: - Having an indented block of code - Having some doctest code that starts with `>>>` - Having backticks that doesn't have whitespace inside it """ def iter_relevant_lines(lines): include_next_line = False for l in code_lines: if include_next_line or l.startswith('>>>') or l.startswith(' '): yield re.sub(r'^( *>>> ?| +)', '', l) else: yield None include_next_line = bool(re.match(' *>>>', l)) string = dedent(string) code_lines = split_lines(string, keepends=True) relevant_code_lines = list(iter_relevant_lines(code_lines)) if relevant_code_lines[-1] is not None: # Some code lines might be None, therefore get rid of that. relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines] return self._complete_code_lines(relevant_code_lines) match = re.search(r'`([^`\s]+)', code_lines[-1]) if match: return self._complete_code_lines([match.group(1)]) return [] def _complete_code_lines(self, code_lines): module_node = self._inference_state.grammar.parse(''.join(code_lines)) module_value = DocstringModule( in_module_context=self._module_context, inference_state=self._inference_state, module_node=module_node, code_lines=code_lines, ) return Completion( self._inference_state, module_value.as_context(), code_lines=code_lines, position=module_node.end_pos, signatures_callback=lambda *args, **kwargs: [], fuzzy=self._fuzzy ).complete() def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name): comp_dct = set() if settings.case_insensitive_completion: like_name = like_name.lower() for name in completion_names: string = name.string_name if settings.case_insensitive_completion: string = string.lower() if helpers.match(string, like_name, fuzzy=fuzzy): new = classes.Completion( inference_state, name, stack, len(like_name), is_fuzzy=fuzzy, cached_name=cached_name, ) k = (new.name, new.complete) # key if k not in comp_dct: comp_dct.add(k) tree_name = name.tree_name if tree_name is not None: definition = tree_name.get_definition() if definition is not None and definition.type == 'del_stmt': continue yield new
null
176,483
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager def _remove_duplicates(completions, other_completions): names = {d.name for d in other_completions} return [c for c in completions if c.name not in names]
null
176,484
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager The provided code snippet includes necessary dependencies for implementing the `get_user_context` function. Write a Python function `def get_user_context(module_context, position)` to solve the following problem: Returns the scope in which the user resides. This includes flows. Here is the function: def get_user_context(module_context, position): """ Returns the scope in which the user resides. This includes flows. """ leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True) return module_context.create_context(leaf)
Returns the scope in which the user resides. This includes flows.
176,485
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager def get_flow_scope_node(module_node, position): node = module_node.get_leaf_for_position(position, include_prefixes=True) while not isinstance(node, (tree.Scope, tree.Flow)): node = node.parent return node
null
176,486
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager def complete_param_names(context, function_name, decorator_nodes): # Basically there's no way to do param completion. The plugins are # responsible for this. return []
null
176,487
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager def _gather_nodes(stack): nodes = [] for stack_node in stack: if stack_node.dfa.from_rule == 'small_stmt': nodes = [] else: nodes += stack_node.nodes return nodes
null
176,488
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager _string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")') def cut_value_at_position(leaf, position): """ Cuts of the value of the leaf at position """ lines = split_lines(leaf.value, keepends=True)[:position[0] - leaf.line + 1] column = position[1] if leaf.line == position[0]: column -= leaf.column if not lines: return '' lines[-1] = lines[-1][:column] return ''.join(lines) def _extract_string_while_in_string(leaf, position): def return_part_of_leaf(leaf): kwargs = {} if leaf.line == position[0]: kwargs['endpos'] = position[1] - leaf.column match = _string_start.match(leaf.value, **kwargs) if not match: return None, None, None start = match.group(0) if leaf.line == position[0] and position[1] < leaf.column + match.end(): return None, None, None return cut_value_at_position(leaf, position)[match.end():], leaf, start if position < leaf.start_pos: return None, None, None if leaf.type == 'string': return return_part_of_leaf(leaf) leaves = [] while leaf is not None: if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value): if len(leaf.value) > 1: return return_part_of_leaf(leaf) prefix_leaf = None if not leaf.prefix: prefix_leaf = leaf.get_previous_leaf() if prefix_leaf is None or prefix_leaf.type != 'name' \ or not all(c in 'rubf' for c in prefix_leaf.value.lower()): prefix_leaf = None return ( ''.join(cut_value_at_position(l, position) for l in leaves), prefix_leaf or leaf, ('' if prefix_leaf is None else prefix_leaf.value) + cut_value_at_position(leaf, position), ) if leaf.line != position[0]: # Multi line strings are always simple error leaves and contain the # whole string, single line error leaves are atherefore important # now and since the line is different, it's not really a single # line string anymore. break leaves.insert(0, leaf) leaf = leaf.get_previous_leaf() return None, None, None
null
176,489
import re from textwrap import dedent from inspect import Parameter from parso.python.token import PythonTokenTypes from parso.python import tree from parso.tree import search_ancestor, Leaf from parso import split_lines from jedi import debug from jedi import settings from jedi.api import classes from jedi.api import helpers from jedi.api import keywords from jedi.api.strings import complete_dict from jedi.api.file_name import complete_file_name from jedi.inference import imports from jedi.inference.base_value import ValueSet from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names from jedi.inference.context import get_global_filters from jedi.inference.value import TreeInstance from jedi.inference.docstring_utils import DocstringModule from jedi.inference.names import ParamNameWrapper, SubModuleName from jedi.inference.gradual.conversion import convert_values, convert_names from jedi.parser_utils import cut_value_at_position from jedi.plugins import plugin_manager class Completion: def __init__(self, inference_state, module_context, code_lines, position, signatures_callback, fuzzy=False): self._inference_state = inference_state self._module_context = module_context self._module_node = module_context.tree_node self._code_lines = code_lines # The first step of completions is to get the name self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position) # The actual cursor position is not what we need to calculate # everything. We want the start of the name we're on. self._original_position = position self._signatures_callback = signatures_callback self._fuzzy = fuzzy def complete(self): leaf = self._module_node.get_leaf_for_position( self._original_position, include_prefixes=True ) string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position) prefixed_completions = complete_dict( self._module_context, self._code_lines, start_leaf or leaf, self._original_position, None if string is None else quote + string, fuzzy=self._fuzzy, ) if string is not None and not prefixed_completions: prefixed_completions = list(complete_file_name( self._inference_state, self._module_context, start_leaf, quote, string, self._like_name, self._signatures_callback, self._code_lines, self._original_position, self._fuzzy )) if string is not None: if not prefixed_completions and '\n' in string: # Complete only multi line strings prefixed_completions = self._complete_in_string(start_leaf, string) return prefixed_completions cached_name, completion_names = self._complete_python(leaf) completions = list(filter_names(self._inference_state, completion_names, self.stack, self._like_name, self._fuzzy, cached_name=cached_name)) return ( # Removing duplicates mostly to remove False/True/None duplicates. _remove_duplicates(prefixed_completions, completions) + sorted(completions, key=lambda x: (x.name.startswith('__'), x.name.startswith('_'), x.name.lower())) ) def _complete_python(self, leaf): """ Analyzes the current context of a completion and decides what to return. Technically this works by generating a parser stack and analysing the current stack for possible grammar nodes. Possible enhancements: - global/nonlocal search global - yield from / raise from <- could be only exceptions/generators - In args: */**: no completion - In params (also lambda): no completion before = """ grammar = self._inference_state.grammar self.stack = stack = None self._position = ( self._original_position[0], self._original_position[1] - len(self._like_name) ) cached_name = None try: self.stack = stack = helpers.get_stack_at_position( grammar, self._code_lines, leaf, self._position ) except helpers.OnErrorLeaf as e: value = e.error_leaf.value if value == '.': # After ErrorLeaf's that are dots, we will not do any # completions since this probably just confuses the user. return cached_name, [] # If we don't have a value, just use global completion. return cached_name, self._complete_global_scope() allowed_transitions = \ list(stack._allowed_transition_names_and_token_types()) if 'if' in allowed_transitions: leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) previous_leaf = leaf.get_previous_leaf() indent = self._position[1] if not (leaf.start_pos <= self._position <= leaf.end_pos): indent = leaf.start_pos[1] if previous_leaf is not None: stmt = previous_leaf while True: stmt = search_ancestor( stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt', 'error_node', ) if stmt is None: break type_ = stmt.type if type_ == 'error_node': first = stmt.children[0] if isinstance(first, Leaf): type_ = first.value + '_stmt' # Compare indents if stmt.start_pos[1] == indent: if type_ == 'if_stmt': allowed_transitions += ['elif', 'else'] elif type_ == 'try_stmt': allowed_transitions += ['except', 'finally', 'else'] elif type_ == 'for_stmt': allowed_transitions.append('else') completion_names = [] kwargs_only = False if any(t in allowed_transitions for t in (PythonTokenTypes.NAME, PythonTokenTypes.INDENT)): # This means that we actually have to do type inference. nonterminals = [stack_node.nonterminal for stack_node in stack] nodes = _gather_nodes(stack) if nodes and nodes[-1] in ('as', 'def', 'class'): # No completions for ``with x as foo`` and ``import x as foo``. # Also true for defining names as a class or function. return cached_name, list(self._complete_inherited(is_function=True)) elif "import_stmt" in nonterminals: level, names = parse_dotted_names(nodes, "import_from" in nonterminals) only_modules = not ("import_from" in nonterminals and 'import' in nodes) completion_names += self._get_importer_names( names, level, only_modules=only_modules, ) elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': dot = self._module_node.get_leaf_for_position(self._position) if dot.type == "endmarker": # This is a bit of a weird edge case, maybe we can somehow # generalize this. dot = leaf.get_previous_leaf() cached_name, n = self._complete_trailer(dot.get_previous_leaf()) completion_names += n elif self._is_parameter_completion(): completion_names += self._complete_params(leaf) else: # Apparently this looks like it's good enough to filter most cases # so that signature completions don't randomly appear. # To understand why this works, three things are important: # 1. trailer with a `,` in it is either a subscript or an arglist. # 2. If there's no `,`, it's at the start and only signatures start # with `(`. Other trailers could start with `.` or `[`. # 3. Decorators are very primitive and have an optional `(` with # optional arglist in them. if nodes[-1] in ['(', ','] \ and nonterminals[-1] in ('trailer', 'arglist', 'decorator'): signatures = self._signatures_callback(*self._position) if signatures: call_details = signatures[0]._call_details used_kwargs = list(call_details.iter_used_keyword_arguments()) positional_count = call_details.count_positional_arguments() completion_names += _get_signature_param_names( signatures, positional_count, used_kwargs, ) kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs) if not kwargs_only: completion_names += self._complete_global_scope() completion_names += self._complete_inherited(is_function=False) if not kwargs_only: current_line = self._code_lines[self._position[0] - 1][:self._position[1]] completion_names += self._complete_keywords( allowed_transitions, only_values=not (not current_line or current_line[-1] in ' \t.;' and current_line[-3:] != '...') ) return cached_name, completion_names def _is_parameter_completion(self): tos = self.stack[-1] if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1: # We are at the position `lambda `, where basically the next node # is a param. return True if tos.nonterminal in 'parameters': # Basically we are at the position `foo(`, there's nothing there # yet, so we have no `typedargslist`. return True # var args is for lambdas and typed args for normal functions return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ',' def _complete_params(self, leaf): stack_node = self.stack[-2] if stack_node.nonterminal == 'parameters': stack_node = self.stack[-3] if stack_node.nonterminal == 'funcdef': context = get_user_context(self._module_context, self._position) node = search_ancestor(leaf, 'error_node', 'funcdef') if node is not None: if node.type == 'error_node': n = node.children[0] if n.type == 'decorators': decorators = n.children elif n.type == 'decorator': decorators = [n] else: decorators = [] else: decorators = node.get_decorators() function_name = stack_node.nodes[1] return complete_param_names(context, function_name.value, decorators) return [] def _complete_keywords(self, allowed_transitions, only_values): for k in allowed_transitions: if isinstance(k, str) and k.isalpha(): if not only_values or k in ('True', 'False', 'None'): yield keywords.KeywordName(self._inference_state, k) def _complete_global_scope(self): context = get_user_context(self._module_context, self._position) debug.dbg('global completion scope: %s', context) flow_scope_node = get_flow_scope_node(self._module_node, self._position) filters = get_global_filters( context, self._position, flow_scope_node ) completion_names = [] for filter in filters: completion_names += filter.values() return completion_names def _complete_trailer(self, previous_leaf): inferred_context = self._module_context.create_context(previous_leaf) values = infer_call_of_leaf(inferred_context, previous_leaf) debug.dbg('trailer completion values: %s', values, color='MAGENTA') # The cached name simply exists to make speed optimizations for certain # modules. cached_name = None if len(values) == 1: v, = values if v.is_module(): if len(v.string_names) == 1: module_name = v.string_names[0] if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'): cached_name = module_name return cached_name, self._complete_trailer_for_values(values) def _complete_trailer_for_values(self, values): user_context = get_user_context(self._module_context, self._position) return complete_trailer(user_context, values) def _get_importer_names(self, names, level=0, only_modules=True): names = [n.value for n in names] i = imports.Importer(self._inference_state, names, self._module_context, level) return i.completion_names(self._inference_state, only_modules=only_modules) def _complete_inherited(self, is_function=True): """ Autocomplete inherited methods when overriding in child class. """ leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) cls = tree.search_ancestor(leaf, 'classdef') if cls is None: return # Complete the methods that are defined in the super classes. class_value = self._module_context.create_value(cls) if cls.start_pos[1] >= leaf.start_pos[1]: return filters = class_value.get_filters(is_instance=True) # The first dict is the dictionary of class itself. next(filters) for filter in filters: for name in filter.values(): # TODO we should probably check here for properties if (name.api_type == 'function') == is_function: yield name def _complete_in_string(self, start_leaf, string): """ To make it possible for people to have completions in doctests or generally in "Python" code in docstrings, we use the following heuristic: - Having an indented block of code - Having some doctest code that starts with `>>>` - Having backticks that doesn't have whitespace inside it """ def iter_relevant_lines(lines): include_next_line = False for l in code_lines: if include_next_line or l.startswith('>>>') or l.startswith(' '): yield re.sub(r'^( *>>> ?| +)', '', l) else: yield None include_next_line = bool(re.match(' *>>>', l)) string = dedent(string) code_lines = split_lines(string, keepends=True) relevant_code_lines = list(iter_relevant_lines(code_lines)) if relevant_code_lines[-1] is not None: # Some code lines might be None, therefore get rid of that. relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines] return self._complete_code_lines(relevant_code_lines) match = re.search(r'`([^`\s]+)', code_lines[-1]) if match: return self._complete_code_lines([match.group(1)]) return [] def _complete_code_lines(self, code_lines): module_node = self._inference_state.grammar.parse(''.join(code_lines)) module_value = DocstringModule( in_module_context=self._module_context, inference_state=self._inference_state, module_node=module_node, code_lines=code_lines, ) return Completion( self._inference_state, module_value.as_context(), code_lines=code_lines, position=module_node.end_pos, signatures_callback=lambda *args, **kwargs: [], fuzzy=self._fuzzy ).complete() def complete_trailer(user_context, values): completion_names = [] for value in values: for filter in value.get_filters(origin_scope=user_context.tree_node): completion_names += filter.values() if not value.is_stub() and isinstance(value, TreeInstance): completion_names += _complete_getattr(user_context, value) python_values = convert_values(values) for c in python_values: if c not in values: for filter in c.get_filters(origin_scope=user_context.tree_node): completion_names += filter.values() return completion_names class SubModuleName(ImportName): _level = 1 def convert_names(names, only_stubs=False, prefer_stubs=False, prefer_stub_to_compiled=True): if only_stubs and prefer_stubs: raise ValueError("You cannot use both of only_stubs and prefer_stubs.") with debug.increase_indent_cm('convert names'): if only_stubs or prefer_stubs: return _python_to_stub_names(names, fallback_to_python=prefer_stubs) else: return _try_stub_to_python_names( names, prefer_stub_to_compiled=prefer_stub_to_compiled) def search_in_module(inference_state, module_context, names, wanted_names, wanted_type, complete=False, fuzzy=False, ignore_imports=False, convert=False): for s in wanted_names[:-1]: new_names = [] for n in names: if s == n.string_name: if n.tree_name is not None and n.api_type in ('module', 'namespace') \ and ignore_imports: continue new_names += complete_trailer( module_context, n.infer() ) debug.dbg('dot lookup on search %s from %s', new_names, names[:10]) names = new_names last_name = wanted_names[-1].lower() for n in names: string = n.string_name.lower() if complete and helpers.match(string, last_name, fuzzy=fuzzy) \ or not complete and string == last_name: if isinstance(n, SubModuleName): names = [v.name for v in n.infer()] else: names = [n] if convert: names = convert_names(names) for n2 in names: if complete: def_ = classes.Completion( inference_state, n2, stack=None, like_name_length=len(last_name), is_fuzzy=fuzzy, ) else: def_ = classes.Name(inference_state, n2) if not wanted_type or wanted_type == def_.type: yield def_
null
176,490
import __main__ from collections import namedtuple import logging import traceback import re import os import sys from jedi import Interpreter READLINE_DEBUG = False The provided code snippet includes necessary dependencies for implementing the `setup_readline` function. Write a Python function `def setup_readline(namespace_module=__main__, fuzzy=False)` to solve the following problem: This function sets up :mod:`readline` to use Jedi in a Python interactive shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically ``$HOME/.pythonrc.py``), you can add this piece of code:: try: from jedi.utils import setup_readline except ImportError: # Fallback to the stdlib readline completer if it is installed. # Taken from http://docs.python.org/2/library/rlcompleter.html print("Jedi is not installed, falling back to readline") try: import readline import rlcompleter readline.parse_and_bind("tab: complete") except ImportError: print("Readline is not installed either. No tab completion is enabled.") else: setup_readline() This will fallback to the readline completer if Jedi is not installed. The readline completer will only complete names in the global namespace, so for example:: ran<TAB> will complete to ``range``. With Jedi the following code:: range(10).cou<TAB> will complete to ``range(10).count``, this does not work with the default cPython :mod:`readline` completer. You will also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to your shell profile (usually ``.bash_profile`` or ``.profile`` if you use bash). Here is the function: def setup_readline(namespace_module=__main__, fuzzy=False): """ This function sets up :mod:`readline` to use Jedi in a Python interactive shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically ``$HOME/.pythonrc.py``), you can add this piece of code:: try: from jedi.utils import setup_readline except ImportError: # Fallback to the stdlib readline completer if it is installed. # Taken from http://docs.python.org/2/library/rlcompleter.html print("Jedi is not installed, falling back to readline") try: import readline import rlcompleter readline.parse_and_bind("tab: complete") except ImportError: print("Readline is not installed either. No tab completion is enabled.") else: setup_readline() This will fallback to the readline completer if Jedi is not installed. The readline completer will only complete names in the global namespace, so for example:: ran<TAB> will complete to ``range``. With Jedi the following code:: range(10).cou<TAB> will complete to ``range(10).count``, this does not work with the default cPython :mod:`readline` completer. You will also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to your shell profile (usually ``.bash_profile`` or ``.profile`` if you use bash). """ if READLINE_DEBUG: logging.basicConfig( filename='/tmp/jedi.log', filemode='a', level=logging.DEBUG ) class JediRL: def complete(self, text, state): """ This complete stuff is pretty weird, a generator would make a lot more sense, but probably due to backwards compatibility this is still the way how it works. The only important part is stuff in the ``state == 0`` flow, everything else has been copied from the ``rlcompleter`` std. library module. """ if state == 0: sys.path.insert(0, os.getcwd()) # Calling python doesn't have a path, so add to sys.path. try: logging.debug("Start REPL completion: " + repr(text)) interpreter = Interpreter(text, [namespace_module.__dict__]) completions = interpreter.complete(fuzzy=fuzzy) logging.debug("REPL completions: %s", completions) self.matches = [ text[:len(text) - c._like_name_length] + c.name_with_symbols for c in completions ] except: logging.error("REPL Completion error:\n" + traceback.format_exc()) raise finally: sys.path.pop(0) try: return self.matches[state] except IndexError: return None try: # Need to import this one as well to make sure it's executed before # this code. This didn't use to be an issue until 3.3. Starting with # 3.4 this is different, it always overwrites the completer if it's not # already imported here. import rlcompleter # noqa: F401 import readline except ImportError: print("Jedi: Module readline not available.") else: readline.set_completer(JediRL().complete) readline.parse_and_bind("tab: complete") # jedi itself does the case matching readline.parse_and_bind("set completion-ignore-case on") # because it's easier to hit the tab just once readline.parse_and_bind("set show-all-if-unmodified") readline.parse_and_bind("set show-all-if-ambiguous on") # don't repeat all the things written in the readline all the time readline.parse_and_bind("set completion-prefix-display-length 2") # No delimiters, Jedi handles that. readline.set_completer_delims('')
This function sets up :mod:`readline` to use Jedi in a Python interactive shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically ``$HOME/.pythonrc.py``), you can add this piece of code:: try: from jedi.utils import setup_readline except ImportError: # Fallback to the stdlib readline completer if it is installed. # Taken from http://docs.python.org/2/library/rlcompleter.html print("Jedi is not installed, falling back to readline") try: import readline import rlcompleter readline.parse_and_bind("tab: complete") except ImportError: print("Readline is not installed either. No tab completion is enabled.") else: setup_readline() This will fallback to the readline completer if Jedi is not installed. The readline completer will only complete names in the global namespace, so for example:: ran<TAB> will complete to ``range``. With Jedi the following code:: range(10).cou<TAB> will complete to ``range(10).count``, this does not work with the default cPython :mod:`readline` completer. You will also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to your shell profile (usually ``.bash_profile`` or ``.profile`` if you use bash).
176,491
import __main__ from collections import namedtuple import logging import traceback import re import os import sys from jedi import Interpreter def namedtuple( typename: Union[str, unicode], field_names: Union[str, unicode, Iterable[Union[str, unicode]]], verbose: bool = ..., rename: bool = ..., ) -> Type[Tuple[Any, ...]]: ... __version__ = '0.18.2' The provided code snippet includes necessary dependencies for implementing the `version_info` function. Write a Python function `def version_info()` to solve the following problem: Returns a namedtuple of Jedi's version, similar to Python's ``sys.version_info``. Here is the function: def version_info(): """ Returns a namedtuple of Jedi's version, similar to Python's ``sys.version_info``. """ Version = namedtuple('Version', 'major, minor, micro') from jedi import __version__ tupl = re.findall(r'[a-z]+|\d+', __version__) return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
Returns a namedtuple of Jedi's version, similar to Python's ``sys.version_info``.
176,492
from jedi import settings from jedi import debug from jedi.parser_utils import get_parent_scope from jedi.inference.cache import inference_state_method_cache from jedi.inference.arguments import TreeArguments from jedi.inference.param import get_executed_param_names from jedi.inference.helpers import is_stdlib_path from jedi.inference.utils import to_list from jedi.inference.value import instance from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.references import get_module_contexts_containing_name from jedi.inference import recursion NO_VALUES = ValueSet([]) def _avoid_recursions(func): def wrapper(function_value, param_index): inf = function_value.inference_state with recursion.execution_allowed(inf, function_value.tree_node) as allowed: # We need to catch recursions that may occur, because an # anonymous functions can create an anonymous parameter that is # more or less self referencing. if allowed: inf.dynamic_params_depth += 1 try: return func(function_value, param_index) finally: inf.dynamic_params_depth -= 1 return NO_VALUES return wrapper
null
176,493
from jedi import settings from jedi import debug from jedi.parser_utils import get_parent_scope from jedi.inference.cache import inference_state_method_cache from jedi.inference.arguments import TreeArguments from jedi.inference.param import get_executed_param_names from jedi.inference.helpers import is_stdlib_path from jedi.inference.utils import to_list from jedi.inference.value import instance from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.references import get_module_contexts_containing_name from jedi.inference import recursion def _search_function_arguments(module_context, funcdef, string_name): """ Returns a list of param names. """ compare_node = funcdef if string_name == '__init__': cls = get_parent_scope(funcdef) if cls.type == 'classdef': string_name = cls.name.value compare_node = cls found_arguments = False i = 0 inference_state = module_context.inference_state if settings.dynamic_params_for_other_modules: module_contexts = get_module_contexts_containing_name( inference_state, [module_context], string_name, # Limit the amounts of files to be opened massively. limit_reduction=5, ) else: module_contexts = [module_context] for for_mod_context in module_contexts: for name, trailer in _get_potential_nodes(for_mod_context, string_name): i += 1 # This is a simple way to stop Jedi's dynamic param recursion # from going wild: The deeper Jedi's in the recursion, the less # code should be inferred. if i * inference_state.dynamic_params_depth > MAX_PARAM_SEARCHES: return random_context = for_mod_context.create_context(name) for arguments in _check_name_for_execution( inference_state, random_context, compare_node, name, trailer): found_arguments = True yield arguments # If there are results after processing a module, we're probably # good to process. This is a speed optimization. if found_arguments: return def _get_lambda_name(node): stmt = node.parent if stmt.type == 'expr_stmt': first_operator = next(stmt.yield_operators(), None) if first_operator == '=': first = stmt.children[0] if first.type == 'name': return first.value return None def get_executed_param_names(function_value, arguments): """ Return a list of `ExecutedParamName`s corresponding to the arguments of the function execution `function_value`, containing the inferred value of those arguments (whether explicit or default). Any issues building this list (for example required arguments which are missing in the invocation) are ignored. For example, given: ``` def foo(a, b, c=None, d='d'): ... foo(42, c='c') ``` Then for the execution of `foo`, this will return a list containing entries for each parameter a, b, c & d; the entries for a, c, & d will have their values (42, 'c' and 'd' respectively) included. """ return get_executed_param_names_and_issues(function_value, arguments)[0] def is_stdlib_path(path): # Python standard library paths look like this: # /usr/lib/python3.9/... # TODO The implementation below is probably incorrect and not complete. parts = path.parts if 'dist-packages' in parts or 'site-packages' in parts: return False base_path = os.path.join(sys.prefix, 'lib', 'python') return bool(re.match(re.escape(base_path) + r'\d.\d', str(path))) class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict NO_VALUES = ValueSet([]) The provided code snippet includes necessary dependencies for implementing the `dynamic_param_lookup` function. Write a Python function `def dynamic_param_lookup(function_value, param_index)` to solve the following problem: A dynamic search for param values. If you try to complete a type: >>> def func(foo): ... foo >>> func(1) >>> func("") It is not known what the type ``foo`` without analysing the whole code. You have to look for all calls to ``func`` to find out what ``foo`` possibly is. Here is the function: def dynamic_param_lookup(function_value, param_index): """ A dynamic search for param values. If you try to complete a type: >>> def func(foo): ... foo >>> func(1) >>> func("") It is not known what the type ``foo`` without analysing the whole code. You have to look for all calls to ``func`` to find out what ``foo`` possibly is. """ funcdef = function_value.tree_node if not settings.dynamic_params: return NO_VALUES path = function_value.get_root_context().py__file__() if path is not None and is_stdlib_path(path): # We don't want to search for references in the stdlib. Usually people # don't work with it (except if you are a core maintainer, sorry). # This makes everything slower. Just disable it and run the tests, # you will see the slowdown, especially in 3.6. return NO_VALUES if funcdef.type == 'lambdef': string_name = _get_lambda_name(funcdef) if string_name is None: return NO_VALUES else: string_name = funcdef.name.value debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA') module_context = function_value.get_root_context() arguments_list = _search_function_arguments(module_context, funcdef, string_name) values = ValueSet.from_sets( get_executed_param_names( function_value, arguments )[param_index].infer() for arguments in arguments_list ) debug.dbg('Dynamic param result finished', color='MAGENTA') return values
A dynamic search for param values. If you try to complete a type: >>> def func(foo): ... foo >>> func(1) >>> func("") It is not known what the type ``foo`` without analysing the whole code. You have to look for all calls to ``func`` to find out what ``foo`` possibly is.
176,494
from jedi import debug from jedi import settings from jedi.inference import recursion from jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, \ ValueWrapper from jedi.inference.lazy_value import LazyKnownValues from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.cache import inference_state_method_cache def _internal_check_array_additions(context, sequence): """ Checks if a `Array` has "add" (append, insert, extend) statements: >>> a = [""] >>> a.append(1) """ from jedi.inference import arguments debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA') module_context = context.get_root_context() if not settings.dynamic_array_additions or module_context.is_compiled(): debug.dbg('Dynamic array search aborted.', color='MAGENTA') return NO_VALUES def find_additions(context, arglist, add_name): params = list(arguments.TreeArguments(context.inference_state, context, arglist).unpack()) result = set() if add_name in ['insert']: params = params[1:] if add_name in ['append', 'add', 'insert']: for key, lazy_value in params: result.add(lazy_value) elif add_name in ['extend', 'update']: for key, lazy_value in params: result |= set(lazy_value.infer().iterate()) return result temp_param_add, settings.dynamic_params_for_other_modules = \ settings.dynamic_params_for_other_modules, False is_list = sequence.name.string_name == 'list' search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update']) added_types = set() for add_name in search_names: try: possible_names = module_context.tree_node.get_used_names()[add_name] except KeyError: continue else: for name in possible_names: value_node = context.tree_node if not (value_node.start_pos < name.start_pos < value_node.end_pos): continue trailer = name.parent power = trailer.parent trailer_pos = power.children.index(trailer) try: execution_trailer = power.children[trailer_pos + 1] except IndexError: continue else: if execution_trailer.type != 'trailer' \ or execution_trailer.children[0] != '(' \ or execution_trailer.children[1] == ')': continue random_context = context.create_context(name) with recursion.execution_allowed(context.inference_state, power) as allowed: if allowed: found = infer_call_of_leaf( random_context, name, cut_own_trailer=True ) if sequence in found: # The arrays match. Now add the results added_types |= find_additions( random_context, execution_trailer.children[1], add_name ) # reset settings settings.dynamic_params_for_other_modules = temp_param_add debug.dbg('Dynamic array result %s', added_types, color='MAGENTA') return added_types NO_VALUES = ValueSet([]) The provided code snippet includes necessary dependencies for implementing the `check_array_additions` function. Write a Python function `def check_array_additions(context, sequence)` to solve the following problem: Just a mapper function for the internal _internal_check_array_additions Here is the function: def check_array_additions(context, sequence): """ Just a mapper function for the internal _internal_check_array_additions """ if sequence.array_type not in ('list', 'set'): # TODO also check for dict updates return NO_VALUES return _internal_check_array_additions(context, sequence)
Just a mapper function for the internal _internal_check_array_additions
176,495
from jedi import debug from jedi import settings from jedi.inference import recursion from jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, \ ValueWrapper from jedi.inference.lazy_value import LazyKnownValues from jedi.inference.helpers import infer_call_of_leaf from jedi.inference.cache import inference_state_method_cache class _DynamicArrayAdditions(HelperValueMixin): """ Used for the usage of set() and list(). This is definitely a hack, but a good one :-) It makes it possible to use set/list conversions. This is not a proper context, because it doesn't have to be. It's not used in the wild, it's just used within typeshed as an argument to `__init__` for set/list and never used in any other place. """ def __init__(self, instance, arguments): self._instance = instance self._arguments = arguments def py__class__(self): tuple_, = self._instance.inference_state.builtins_module.py__getattribute__('tuple') return tuple_ def py__iter__(self, contextualized_node=None): arguments = self._arguments try: _, lazy_value = next(arguments.unpack()) except StopIteration: pass else: yield from lazy_value.infer().iterate() from jedi.inference.arguments import TreeArguments if isinstance(arguments, TreeArguments): additions = _internal_check_array_additions(arguments.context, self._instance) yield from additions def iterate(self, contextualized_node=None, is_async=False): return self.py__iter__(contextualized_node) class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict The provided code snippet includes necessary dependencies for implementing the `get_dynamic_array_instance` function. Write a Python function `def get_dynamic_array_instance(instance, arguments)` to solve the following problem: Used for set() and list() instances. Here is the function: def get_dynamic_array_instance(instance, arguments): """Used for set() and list() instances.""" ai = _DynamicArrayAdditions(instance, arguments) from jedi.inference import arguments return arguments.ValuesArguments([ValueSet([ai])])
Used for set() and list() instances.
176,496
from parso.python import tree from jedi import debug from jedi.inference.cache import inference_state_method_cache, CachedMetaClass from jedi.inference import compiled from jedi.inference import recursion from jedi.inference import docstrings from jedi.inference import flow_analysis from jedi.inference.signature import TreeSignature from jedi.inference.filters import ParserTreeFilter, FunctionExecutionFilter, \ AnonymousFunctionExecutionFilter from jedi.inference.names import ValueName, AbstractNameDefinition, \ AnonymousParamName, ParamName, NameWrapper from jedi.inference.base_value import ContextualizedNode, NO_VALUES, \ ValueSet, TreeValue, ValueWrapper from jedi.inference.lazy_value import LazyKnownValues, LazyKnownValue, \ LazyTreeValue from jedi.inference.context import ValueContext, TreeContextMixin from jedi.inference.value import iterable from jedi import parser_utils from jedi.inference.parser_cache import get_yield_exprs from jedi.inference.helpers import values_from_qualified_names from jedi.inference.gradual.generics import TupleGenericManager class ParserTreeFilter(_AbstractUsedNamesFilter): def __init__(self, parent_context, node_context=None, until_position=None, origin_scope=None): def _filter(self, names): def _is_name_reachable(self, name): def _check_flows(self, names): def _find_overload_functions(context, tree_node): def _is_overload_decorated(funcdef): if funcdef.parent.type == 'decorated': decorators = funcdef.parent.children[0] if decorators.type == 'decorator': decorators = [decorators] else: decorators = decorators.children for decorator in decorators: dotted_name = decorator.children[1] if dotted_name.type == 'name' and dotted_name.value == 'overload': # TODO check with values if it's the right overload return True return False if tree_node.type == 'lambdef': return if _is_overload_decorated(tree_node): yield tree_node while True: filter = ParserTreeFilter( context, until_position=tree_node.start_pos ) names = filter.get(tree_node.name.value) assert isinstance(names, list) if not names: break found = False for name in names: funcdef = name.tree_name.parent if funcdef.type == 'funcdef' and _is_overload_decorated(funcdef): tree_node = funcdef found = True yield funcdef if not found: break
null
176,497
from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ LazyTreeValue from jedi.inference.helpers import get_int_or_none, is_string, \ reraise_getitem_errors, SimpleGetItemNotFound from jedi.inference.utils import safe_property, to_list from jedi.inference.cache import inference_state_method_cache from jedi.inference.filters import LazyAttributeOverwrite, publish_method from jedi.inference.base_value import ValueSet, Value, NO_VALUES, \ ContextualizedNode, iterate_values, sentinel, \ LazyValueWrapper from jedi.parser_utils import get_sync_comp_fors from jedi.inference.context import CompForContext from jedi.inference.value.dynamic_arrays import check_array_additions class ContextualizedNode: def __init__(self, context, node): self.context = context self.node = node def get_root_context(self): return self.context.get_root_context() def infer(self): return self.context.infer_node(self.node) def __repr__(self): return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context) The provided code snippet includes necessary dependencies for implementing the `unpack_tuple_to_dict` function. Write a Python function `def unpack_tuple_to_dict(context, types, exprlist)` to solve the following problem: Unpacking tuple assignments in for statements and expr_stmts. Here is the function: def unpack_tuple_to_dict(context, types, exprlist): """ Unpacking tuple assignments in for statements and expr_stmts. """ if exprlist.type == 'name': return {exprlist.value: types} elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['): return unpack_tuple_to_dict(context, types, exprlist.children[1]) elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', 'testlist_star_expr'): dct = {} parts = iter(exprlist.children[::2]) n = 0 for lazy_value in types.iterate(ContextualizedNode(context, exprlist)): n += 1 try: part = next(parts) except StopIteration: analysis.add(context, 'value-error-too-many-values', part, message="ValueError: too many values to unpack (expected %s)" % n) else: dct.update(unpack_tuple_to_dict(context, lazy_value.infer(), part)) has_parts = next(parts, None) if types and has_parts is not None: analysis.add(context, 'value-error-too-few-values', has_parts, message="ValueError: need more than %s values to unpack" % n) return dct elif exprlist.type == 'power' or exprlist.type == 'atom_expr': # Something like ``arr[x], var = ...``. # This is something that is not yet supported, would also be difficult # to write into a dict. return {} elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings # Currently we're not supporting them. return {} raise NotImplementedError
Unpacking tuple assignments in for statements and expr_stmts.
176,498
from abc import abstractmethod from inspect import Parameter from typing import Optional, Tuple from parso.tree import search_ancestor from jedi.parser_utils import find_statement_documentation, clean_scope_docstring from jedi.inference.utils import unite from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.cache import inference_state_method_cache from jedi.inference import docstrings from jedi.cache import memoize_method from jedi.inference.helpers import deep_ast_copy, infer_call_of_leaf from jedi.plugins import plugin_manager def _merge_name_docs(names): doc = '' for name in names: if doc: # In case we have multiple values, just return all of them # separated by a few dashes. doc += '\n' + '-' * 30 + '\n' doc += name.py__doc__() return doc
null
176,499
import functools import re import os def to_list(func): def wrapper(*args, **kwargs): return list(func(*args, **kwargs)) return wrapper
null
176,500
import functools import re import os def to_tuple(func): def wrapper(*args, **kwargs): return tuple(func(*args, **kwargs)) return wrapper
null
176,501
import functools import re import os def reraise_uncaught(func): """ Re-throw uncaught `AttributeError`. Usage: Put ``@rethrow_uncaught`` in front of the function which does **not** suppose to raise `AttributeError`. AttributeError is easily get caught by `hasattr` and another ``except AttributeError`` clause. This becomes problem when you use a lot of "dynamic" attributes (e.g., using ``@property``) because you can't distinguish if the property does not exist for real or some code inside of the "dynamic" attribute through that error. In a well written code, such error should not exist but getting there is very difficult. This decorator is to help us getting there by changing `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. This helps us noticing bugs earlier and facilitates debugging. """ def wrapper(*args, **kwds): try: return func(*args, **kwds) except AttributeError as e: raise UncaughtAttributeError(e) from e return wrapper def safe_property(func): return property(reraise_uncaught(func))
null
176,502
import re from itertools import zip_longest from parso.python import tree from jedi import debug from jedi.inference.utils import PushBackIterator from jedi.inference import analysis from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ LazyTreeValue, get_merged_lazy_value from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode from jedi.inference.value import iterable from jedi.inference.cache import inference_state_as_method_param_cache The provided code snippet includes necessary dependencies for implementing the `try_iter_content` function. Write a Python function `def try_iter_content(types, depth=0)` to solve the following problem: Helper method for static analysis. Here is the function: def try_iter_content(types, depth=0): """Helper method for static analysis.""" if depth > 10: # It's possible that a loop has references on itself (especially with # CompiledValue). Therefore don't loop infinitely. return for typ in types: try: f = typ.py__iter__ except AttributeError: pass else: for lazy_value in f(): try_iter_content(lazy_value.infer(), depth + 1)
Helper method for static analysis.
176,503
import re from itertools import zip_longest from parso.python import tree from jedi import debug from jedi.inference.utils import PushBackIterator from jedi.inference import analysis from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ LazyTreeValue, get_merged_lazy_value from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode from jedi.inference.value import iterable from jedi.inference.cache import inference_state_as_method_param_cache class ParamIssue(Exception): pass def iterate_argument_clinic(inference_state, arguments, clinic_string): """Uses a list with argument clinic information (see PEP 436).""" clinic_args = list(_parse_argument_clinic(clinic_string)) iterator = PushBackIterator(arguments.unpack()) for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args): if stars == 1: lazy_values = [] for key, argument in iterator: if key is not None: iterator.push_back((key, argument)) break lazy_values.append(argument) yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)]) lazy_values continue elif stars == 2: raise NotImplementedError() key, argument = next(iterator, (None, None)) if key is not None: debug.warning('Keyword arguments in argument clinic are currently not supported.') raise ParamIssue if argument is None and not optional: debug.warning('TypeError: %s expected at least %s arguments, got %s', name, len(clinic_args), i) raise ParamIssue value_set = NO_VALUES if argument is None else argument.infer() if not value_set and not optional: # For the stdlib we always want values. If we don't get them, # that's ok, maybe something is too hard to resolve, however, # we will not proceed with the type inference of that function. debug.warning('argument_clinic "%s" not resolvable.', name) raise ParamIssue yield value_set NO_VALUES = ValueSet([]) The provided code snippet includes necessary dependencies for implementing the `repack_with_argument_clinic` function. Write a Python function `def repack_with_argument_clinic(clinic_string)` to solve the following problem: Transforms a function or method with arguments to the signature that is given as an argument clinic notation. Argument clinic is part of CPython and used for all the functions that are implemented in C (Python 3.7): str.split.__text_signature__ # Results in: '($self, /, sep=None, maxsplit=-1)' Here is the function: def repack_with_argument_clinic(clinic_string): """ Transforms a function or method with arguments to the signature that is given as an argument clinic notation. Argument clinic is part of CPython and used for all the functions that are implemented in C (Python 3.7): str.split.__text_signature__ # Results in: '($self, /, sep=None, maxsplit=-1)' """ def decorator(func): def wrapper(value, arguments): try: args = tuple(iterate_argument_clinic( value.inference_state, arguments, clinic_string, )) except ParamIssue: return NO_VALUES else: return func(value, *args) return wrapper return decorator
Transforms a function or method with arguments to the signature that is given as an argument clinic notation. Argument clinic is part of CPython and used for all the functions that are implemented in C (Python 3.7): str.split.__text_signature__ # Results in: '($self, /, sep=None, maxsplit=-1)'
176,504
import re from itertools import zip_longest from parso.python import tree from jedi import debug from jedi.inference.utils import PushBackIterator from jedi.inference import analysis from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ LazyTreeValue, get_merged_lazy_value from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode from jedi.inference.value import iterable from jedi.inference.cache import inference_state_as_method_param_cache def unpack_arglist(arglist): if arglist is None: return if arglist.type != 'arglist' and not ( arglist.type == 'argument' and arglist.children[0] in ('*', '**')): yield 0, arglist return iterator = iter(arglist.children) for child in iterator: if child == ',': continue elif child in ('*', '**'): c = next(iterator, None) assert c is not None yield len(child.value), c elif child.type == 'argument' and \ child.children[0] in ('*', '**'): assert len(child.children) == 2 yield len(child.children[0].value), child.children[1] else: yield 0, child
null
176,505
import re from itertools import zip_longest from parso.python import tree from jedi import debug from jedi.inference.utils import PushBackIterator from jedi.inference import analysis from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ LazyTreeValue, get_merged_lazy_value from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode from jedi.inference.value import iterable from jedi.inference.cache import inference_state_as_method_param_cache def _iterate_star_args(context, array, input_node, funcdef=None): if not array.py__getattribute__('__iter__'): if funcdef is not None: # TODO this funcdef should not be needed. m = "TypeError: %s() argument after * must be a sequence, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star', input_node, message=m) try: iter_ = array.py__iter__ except AttributeError: pass else: yield from iter_()
null
176,506
import re from itertools import zip_longest from parso.python import tree from jedi import debug from jedi.inference.utils import PushBackIterator from jedi.inference import analysis from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ LazyTreeValue, get_merged_lazy_value from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode from jedi.inference.value import iterable from jedi.inference.cache import inference_state_as_method_param_cache class CompiledInstance(AbstractInstanceValue): # This is not really a compiled class, it's just an instance from a # compiled class. def __init__(self, inference_state, parent_context, class_value, arguments): super().__init__(inference_state, parent_context, class_value) self._arguments = arguments def get_filters(self, origin_scope=None, include_self_names=True): class_value = self.get_annotated_class_object() class_filters = class_value.get_filters( origin_scope=origin_scope, is_instance=True, ) for f in class_filters: yield CompiledInstanceClassFilter(self, f) def name(self): return compiled.CompiledValueName(self, self.class_value.name.string_name) def is_stub(self): return False def _star_star_dict(context, array, input_node, funcdef): from jedi.inference.value.instance import CompiledInstance if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': # For now ignore this case. In the future add proper iterators and just # make one call without crazy isinstance checks. return {} elif isinstance(array, iterable.Sequence) and array.array_type == 'dict': return array.exact_key_items() else: if funcdef is not None: m = "TypeError: %s argument after ** must be a mapping, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star-star', input_node, message=m) return {}
null
176,507
from contextlib import contextmanager from jedi import debug from jedi.inference.base_value import NO_VALUES NO_VALUES = ValueSet([]) def execution_recursion_decorator(default=NO_VALUES): def decorator(func): def wrapper(self, **kwargs): detector = self.inference_state.execution_recursion_detector limit_reached = detector.push_execution(self) try: if limit_reached: result = default else: result = func(self, **kwargs) finally: detector.pop_execution() return result return wrapper return decorator
null
176,508
import os import re from pathlib import Path from importlib.machinery import all_suffixes from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import ContextualizedNode from jedi.inference.helpers import is_string, get_str_or_none from jedi.parser_utils import get_cached_code_lines from jedi.file_io import FileIO from jedi import settings from jedi import debug _BUILDOUT_PATH_INSERTION_LIMIT = 10 def _get_paths_from_buildout_script(inference_state, buildout_script_path): file_io = FileIO(str(buildout_script_path)) try: module_node = inference_state.parse( file_io=file_io, cache=True, cache_path=settings.cache_directory ) except IOError: debug.warning('Error trying to read buildout_script: %s', buildout_script_path) return from jedi.inference.value import ModuleValue module_context = ModuleValue( inference_state, module_node, file_io=file_io, string_names=None, code_lines=get_cached_code_lines(inference_state.grammar, buildout_script_path), ).as_context() yield from check_sys_path_modifications(module_context) def _get_buildout_script_paths(search_path: Path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin directory that look like python files. :param search_path: absolute path to the module. """ project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg') if not project_root: return bin_path = project_root.joinpath('bin') if not bin_path.exists(): return for filename in os.listdir(bin_path): try: filepath = bin_path.joinpath(filename) with open(filepath, 'r') as f: firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: yield filepath except (UnicodeDecodeError, IOError) as e: # Probably a binary file; permission error or race cond. because # file got deleted. Ignore it. debug.warning(str(e)) continue def discover_buildout_paths(inference_state, script_path): buildout_script_paths = set() for buildout_script_path in _get_buildout_script_paths(script_path): for path in _get_paths_from_buildout_script(inference_state, buildout_script_path): buildout_script_paths.add(path) if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT: break return buildout_script_paths
null
176,509
from typing import Dict, Optional from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope from jedi.inference.recursion import execution_allowed from jedi.inference.helpers import is_big_annoying_library REACHABLE = Status(True, 'reachable') UNREACHABLE = Status(False, 'unreachable') UNSURE = Status(None, 'unsure') def _get_flow_scopes(node): while True: node = get_parent_scope(node, include_flows=True) if node is None or is_scope(node): return yield node def _break_check(context, value_scope, flow_scope, node): reachable = REACHABLE if flow_scope.type == 'if_stmt': if flow_scope.is_node_after_else(node): for check_node in flow_scope.get_test_nodes(): reachable = _check_if(context, check_node) if reachable in (REACHABLE, UNSURE): break reachable = reachable.invert() else: flow_node = flow_scope.get_corresponding_test_node(node) if flow_node is not None: reachable = _check_if(context, flow_node) elif flow_scope.type in ('try_stmt', 'while_stmt'): return UNSURE # Only reachable branches need to be examined further. if reachable in (UNREACHABLE, UNSURE): return reachable if value_scope != flow_scope and value_scope != flow_scope.parent: flow_scope = get_parent_scope(flow_scope, include_flows=True) return reachable & _break_check(context, value_scope, flow_scope, node) else: return reachable def get_flow_branch_keyword(flow_node, node): start_pos = node.start_pos if not (flow_node.start_pos < start_pos <= flow_node.end_pos): raise ValueError('The node is not part of the flow.') keyword = None for i, child in enumerate(flow_node.children): if start_pos < child.start_pos: return keyword first_leaf = child.get_first_leaf() if first_leaf in _FLOW_KEYWORDS: keyword = first_leaf return None def get_parent_scope(node, include_flows=False): """ Returns the underlying scope. """ scope = node.parent if scope is None: return None # It's a module already. while True: if is_scope(scope): if scope.type in ('classdef', 'funcdef', 'lambdef'): index = scope.children.index(':') if scope.children[index].start_pos >= node.start_pos: if node.parent.type == 'param' and node.parent.name == node: pass elif node.parent.type == 'tfpdef' and node.parent.children[0] == node: pass else: scope = scope.parent continue return scope elif include_flows and isinstance(scope, tree.Flow): # The cursor might be on `if foo`, so the parent scope will not be # the if, but the parent of the if. if not (scope.type == 'if_stmt' and any(n.start_pos <= node.start_pos < n.end_pos for n in scope.get_test_nodes())): return scope scope = scope.parent def is_big_annoying_library(context): string_names = context.get_root_context().string_names if string_names is None: return False # Especially pandas and tensorflow are huge complicated Python libraries # that get even slower than they already are when Jedi tries to undrstand # dynamic features like decorators, ifs and other stuff. return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib') def reachability_check(context, value_scope, node, origin_scope=None): if is_big_annoying_library(context) \ or not context.inference_state.flow_analysis_enabled: return UNSURE first_flow_scope = get_parent_scope(node, include_flows=True) if origin_scope is not None: origin_flow_scopes = list(_get_flow_scopes(origin_scope)) node_flow_scopes = list(_get_flow_scopes(node)) branch_matches = True for flow_scope in origin_flow_scopes: if flow_scope in node_flow_scopes: node_keyword = get_flow_branch_keyword(flow_scope, node) origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope) branch_matches = node_keyword == origin_keyword if flow_scope.type == 'if_stmt': if not branch_matches: return UNREACHABLE elif flow_scope.type == 'try_stmt': if not branch_matches and origin_keyword == 'else' \ and node_keyword == 'except': return UNREACHABLE if branch_matches: break # Direct parents get resolved, we filter scopes that are separate # branches. This makes sense for autocompletion and static analysis. # For actual Python it doesn't matter, because we're talking about # potentially unreachable code. # e.g. `if 0:` would cause all name lookup within the flow make # unaccessible. This is not a "problem" in Python, because the code is # never called. In Jedi though, we still want to infer types. while origin_scope is not None: if first_flow_scope == origin_scope and branch_matches: return REACHABLE origin_scope = origin_scope.parent return _break_check(context, value_scope, first_flow_scope, node)
null
176,510
import re import warnings from parso import parse, ParserSyntaxError from jedi import debug from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import iterator_to_value_set, ValueSet, \ NO_VALUES from jedi.inference.lazy_value import LazyKnownValues def _search_param_in_docstr(docstr, param_str): """ Search `docstr` for type(-s) of `param_str`. >>> _search_param_in_docstr(':type param: int', 'param') ['int'] >>> _search_param_in_docstr('@type param: int', 'param') ['int'] >>> _search_param_in_docstr( ... ':type param: :class:`threading.Thread`', 'param') ['threading.Thread'] >>> bool(_search_param_in_docstr('no document', 'param')) False >>> _search_param_in_docstr(':param int param: some description', 'param') ['int'] """ # look at #40 to see definitions of those params patterns = [re.compile(p % re.escape(param_str)) for p in DOCSTRING_PARAM_PATTERNS] for pattern in patterns: match = pattern.search(docstr) if match: return [_strip_rst_role(match.group(1))] return _search_param_in_numpydocstr(docstr, param_str) def _infer_for_statement_string(module_context, string): if string is None: return [] potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string) # Try to import module part in dotted name. # (e.g., 'threading' in 'threading.Thread'). imports = "\n".join(f"import {p}" for p in potential_imports) string = f'{imports}\n{string}' debug.dbg('Parse docstring code %s', string, color='BLUE') grammar = module_context.inference_state.grammar try: module = grammar.parse(string, error_recovery=False) except ParserSyntaxError: return [] try: # It's not the last item, because that's an end marker. stmt = module.children[-2] except (AttributeError, IndexError): return [] if stmt.type not in ('name', 'atom', 'atom_expr'): return [] # Here we basically use a fake module that also uses the filters in # the actual module. from jedi.inference.docstring_utils import DocstringModule m = DocstringModule( in_module_context=module_context, inference_state=module_context.inference_state, module_node=module, code_lines=[], ) return list(_execute_types_in_stmt(m.as_context(), stmt)) class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict NO_VALUES = ValueSet([]) def infer_param(function_value, param): def infer_docstring(docstring): return ValueSet( p for param_str in _search_param_in_docstr(docstring, param.name.value) for p in _infer_for_statement_string(module_context, param_str) ) module_context = function_value.get_root_context() func = param.get_parent_function() if func.type == 'lambdef': return NO_VALUES types = infer_docstring(function_value.py__doc__()) if function_value.is_bound_method() \ and function_value.py__name__() == '__init__': types |= infer_docstring(function_value.class_context.py__doc__()) debug.dbg('Found param types for docstring: %s', types, color='BLUE') return types
null
176,511
import re import warnings from parso import parse, ParserSyntaxError from jedi import debug from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import iterator_to_value_set, ValueSet, \ NO_VALUES from jedi.inference.lazy_value import LazyKnownValues DOCSTRING_RETURN_PATTERNS = [ re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc ] def _search_return_in_numpydocstr(docstr): """ Search `docstr` (in numpydoc format) for type(-s) of function returns. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") try: doc = _get_numpy_doc_string_cls()(docstr) except Exception: return try: # This is a non-public API. If it ever changes we should be # prepared and return gracefully. returns = doc._parsed_data['Returns'] returns += doc._parsed_data['Yields'] except Exception: return for r_name, r_type, r_descr in returns: # Return names are optional and if so the type is in the name if not r_type: r_type = r_name yield from _expand_typestr(r_type) def _strip_rst_role(type_str): """ Strip off the part looks like a ReST role in `type_str`. >>> _strip_rst_role(':class:`ClassName`') # strip off :class: 'ClassName' >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain 'module.Object' >>> _strip_rst_role('ClassName') # do nothing when not ReST role 'ClassName' See also: http://sphinx-doc.org/domains.html#cross-referencing-python-objects """ match = REST_ROLE_PATTERN.match(type_str) if match: return match.group(1) else: return type_str def _infer_for_statement_string(module_context, string): if string is None: return [] potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string) # Try to import module part in dotted name. # (e.g., 'threading' in 'threading.Thread'). imports = "\n".join(f"import {p}" for p in potential_imports) string = f'{imports}\n{string}' debug.dbg('Parse docstring code %s', string, color='BLUE') grammar = module_context.inference_state.grammar try: module = grammar.parse(string, error_recovery=False) except ParserSyntaxError: return [] try: # It's not the last item, because that's an end marker. stmt = module.children[-2] except (AttributeError, IndexError): return [] if stmt.type not in ('name', 'atom', 'atom_expr'): return [] # Here we basically use a fake module that also uses the filters in # the actual module. from jedi.inference.docstring_utils import DocstringModule m = DocstringModule( in_module_context=module_context, inference_state=module_context.inference_state, module_node=module, code_lines=[], ) return list(_execute_types_in_stmt(m.as_context(), stmt)) def infer_return_types(function_value): def search_return_in_docstr(code): for p in DOCSTRING_RETURN_PATTERNS: match = p.search(code) if match: yield _strip_rst_role(match.group(1)) # Check for numpy style return hint yield from _search_return_in_numpydocstr(code) for type_str in search_return_in_docstr(function_value.py__doc__()): yield from _infer_for_statement_string(function_value.get_root_context(), type_str)
null
176,512
from inspect import Parameter from parso import tree from jedi.inference.utils import to_list from jedi.inference.names import ParamNameWrapper from jedi.inference.helpers import is_big_annoying_library def _iter_nodes_for_param(param_name): from parso.python.tree import search_ancestor from jedi.inference.arguments import TreeArguments execution_context = param_name.parent_context # Walk up the parso tree to get the FunctionNode we want. We use the parso # tree rather than going via the execution context so that we're agnostic of # the specific scope we're evaluating within (i.e: module or function, # etc.). function_node = tree.search_ancestor(param_name.tree_name, 'funcdef', 'lambdef') module_node = function_node.get_root_node() start = function_node.children[-1].start_pos end = function_node.children[-1].end_pos for name in module_node.get_used_names().get(param_name.string_name): if start <= name.start_pos < end: # Is used in the function argument = name.parent if argument.type == 'argument' \ and argument.children[0] == '*' * param_name.star_count: trailer = search_ancestor(argument, 'trailer') if trailer is not None: # Make sure we're in a function context = execution_context.create_context(trailer) if _goes_to_param_name(param_name, context, name): values = _to_callables(context, trailer) args = TreeArguments.create_cached( execution_context.inference_state, context=context, argument_node=trailer.children[1], trailer=trailer, ) for c in values: yield c, args def _remove_given_params(arguments, param_names): count = 0 used_keys = set() for key, _ in arguments.unpack(): if key is None: count += 1 else: used_keys.add(key) for p in param_names: if count and p.maybe_positional_argument(): count -= 1 continue if p.string_name in used_keys and p.maybe_keyword_argument(): continue yield p class ParamNameFixedKind(ParamNameWrapper): def __init__(self, param_name, new_kind): super().__init__(param_name) self._new_kind = new_kind def get_kind(self): return self._new_kind class Parameter: def __init__(self, name: str, kind: _ParameterKind, *, default: Any = ..., annotation: Any = ...) -> None: ... empty: Any = ... name: str default: Any annotation: Any kind: _ParameterKind POSITIONAL_ONLY: ClassVar[Literal[_ParameterKind.POSITIONAL_ONLY]] POSITIONAL_OR_KEYWORD: ClassVar[Literal[_ParameterKind.POSITIONAL_OR_KEYWORD]] VAR_POSITIONAL: ClassVar[Literal[_ParameterKind.VAR_POSITIONAL]] KEYWORD_ONLY: ClassVar[Literal[_ParameterKind.KEYWORD_ONLY]] VAR_KEYWORD: ClassVar[Literal[_ParameterKind.VAR_KEYWORD]] def replace( self, *, name: Optional[str] = ..., kind: Optional[_ParameterKind] = ..., default: Any = ..., annotation: Any = ... ) -> Parameter: ... def is_big_annoying_library(context): string_names = context.get_root_context().string_names if string_names is None: return False # Especially pandas and tensorflow are huge complicated Python libraries # that get even slower than they already are when Jedi tries to undrstand # dynamic features like decorators, ifs and other stuff. return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib') def process_params(param_names, star_count=3): # default means both * and ** if param_names: if is_big_annoying_library(param_names[0].parent_context): # At first this feature can look innocent, but it does a lot of # type inference in some cases, so we just ditch it. yield from param_names return used_names = set() arg_callables = [] kwarg_callables = [] kw_only_names = [] kwarg_names = [] arg_names = [] original_arg_name = None original_kwarg_name = None for p in param_names: kind = p.get_kind() if kind == Parameter.VAR_POSITIONAL: if star_count & 1: arg_callables = _iter_nodes_for_param(p) original_arg_name = p elif p.get_kind() == Parameter.VAR_KEYWORD: if star_count & 2: kwarg_callables = list(_iter_nodes_for_param(p)) original_kwarg_name = p elif kind == Parameter.KEYWORD_ONLY: if star_count & 2: kw_only_names.append(p) elif kind == Parameter.POSITIONAL_ONLY: if star_count & 1: yield p else: if star_count == 1: yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY) elif star_count == 2: kw_only_names.append(ParamNameFixedKind(p, Parameter.KEYWORD_ONLY)) else: used_names.add(p.string_name) yield p # First process *args longest_param_names = () found_arg_signature = False found_kwarg_signature = False for func_and_argument in arg_callables: func, arguments = func_and_argument new_star_count = star_count if func_and_argument in kwarg_callables: kwarg_callables.remove(func_and_argument) else: new_star_count = 1 for signature in func.get_signatures(): found_arg_signature = True if new_star_count == 3: found_kwarg_signature = True args_for_this_func = [] for p in process_params( list(_remove_given_params( arguments, signature.get_param_names(resolve_stars=False) )), new_star_count): if p.get_kind() == Parameter.VAR_KEYWORD: kwarg_names.append(p) elif p.get_kind() == Parameter.VAR_POSITIONAL: arg_names.append(p) elif p.get_kind() == Parameter.KEYWORD_ONLY: kw_only_names.append(p) else: args_for_this_func.append(p) if len(args_for_this_func) > len(longest_param_names): longest_param_names = args_for_this_func for p in longest_param_names: if star_count == 1 and p.get_kind() != Parameter.VAR_POSITIONAL: yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY) else: if p.get_kind() == Parameter.POSITIONAL_OR_KEYWORD: used_names.add(p.string_name) yield p if not found_arg_signature and original_arg_name is not None: yield original_arg_name elif arg_names: yield arg_names[0] # Then process **kwargs for func, arguments in kwarg_callables: for signature in func.get_signatures(): found_kwarg_signature = True for p in process_params( list(_remove_given_params( arguments, signature.get_param_names(resolve_stars=False) )), star_count=2): if p.get_kind() == Parameter.VAR_KEYWORD: kwarg_names.append(p) elif p.get_kind() == Parameter.KEYWORD_ONLY: kw_only_names.append(p) for p in kw_only_names: if p.string_name in used_names: continue yield p used_names.add(p.string_name) if not found_kwarg_signature and original_kwarg_name is not None: yield original_kwarg_name elif kwarg_names: yield kwarg_names[0]
null
176,513
from abc import abstractmethod from typing import List, MutableMapping, Type import weakref from parso.tree import search_ancestor from parso.python.tree import Name, UsedNamesMapping from jedi.inference import flow_analysis from jedi.inference.base_value import ValueSet, ValueWrapper, \ LazyValueWrapper from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node from jedi.inference.utils import to_list from jedi.inference.names import TreeNameDefinition, ParamName, \ AnonymousParamName, AbstractNameDefinition, NameWrapper _definition_name_cache: MutableMapping[UsedNamesMapping, List[Name]] _definition_name_cache = weakref.WeakKeyDictionary() def _get_definition_names(parso_cache_node, used_names, name_key): if parso_cache_node is None: names = used_names.get(name_key, ()) return tuple(name for name in names if name.is_definition(include_setitem=True)) try: for_module = _definition_name_cache[parso_cache_node] except KeyError: for_module = _definition_name_cache[parso_cache_node] = {} try: return for_module[name_key] except KeyError: names = used_names.get(name_key, ()) result = for_module[name_key] = tuple( name for name in names if name.is_definition(include_setitem=True) ) return result
null
176,514
from abc import abstractmethod from typing import List, MutableMapping, Type import weakref from parso.tree import search_ancestor from parso.python.tree import Name, UsedNamesMapping from jedi.inference import flow_analysis from jedi.inference.base_value import ValueSet, ValueWrapper, \ LazyValueWrapper from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node from jedi.inference.utils import to_list from jedi.inference.names import TreeNameDefinition, ParamName, \ AnonymousParamName, AbstractNameDefinition, NameWrapper def publish_method(method_name): def decorator(func): dct = func.__dict__.setdefault('registered_overwritten_methods', {}) dct[method_name] = func return func return decorator
null
176,515
from functools import wraps from jedi import debug _NO_DEFAULT = object() def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False, second_arg_is_inference_state=False): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. Preventing recursion is in this case the much bigger use than speed. I don't think, that there is a big speed difference, but there are many cases where recursion could happen (think about a = b; b = a). """ def func(function): def wrapper(obj, *args, **kwargs): # TODO These checks are kind of ugly and slow. if inference_state_is_first_arg: cache = obj.memoize_cache elif second_arg_is_inference_state: cache = args[0].memoize_cache # needed for meta classes else: cache = obj.inference_state.memoize_cache try: memo = cache[function] except KeyError: cache[function] = memo = {} key = (obj, args, frozenset(kwargs.items())) if key in memo: return memo[key] else: if default is not _NO_DEFAULT: memo[key] = default rv = function(obj, *args, **kwargs) memo[key] = rv return rv return wrapper return func def inference_state_function_cache(default=_NO_DEFAULT): def decorator(func): return _memoize_default(default=default, inference_state_is_first_arg=True)(func) return decorator
null
176,516
from functools import wraps from jedi import debug _NO_DEFAULT = object() def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False, second_arg_is_inference_state=False): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. Preventing recursion is in this case the much bigger use than speed. I don't think, that there is a big speed difference, but there are many cases where recursion could happen (think about a = b; b = a). """ def func(function): def wrapper(obj, *args, **kwargs): # TODO These checks are kind of ugly and slow. if inference_state_is_first_arg: cache = obj.memoize_cache elif second_arg_is_inference_state: cache = args[0].memoize_cache # needed for meta classes else: cache = obj.inference_state.memoize_cache try: memo = cache[function] except KeyError: cache[function] = memo = {} key = (obj, args, frozenset(kwargs.items())) if key in memo: return memo[key] else: if default is not _NO_DEFAULT: memo[key] = default rv = function(obj, *args, **kwargs) memo[key] = rv return rv return wrapper return func def inference_state_method_cache(default=_NO_DEFAULT): def decorator(func): return _memoize_default(default=default)(func) return decorator
null
176,517
from functools import wraps from jedi import debug def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False, second_arg_is_inference_state=False): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. Preventing recursion is in this case the much bigger use than speed. I don't think, that there is a big speed difference, but there are many cases where recursion could happen (think about a = b; b = a). """ def func(function): def wrapper(obj, *args, **kwargs): # TODO These checks are kind of ugly and slow. if inference_state_is_first_arg: cache = obj.memoize_cache elif second_arg_is_inference_state: cache = args[0].memoize_cache # needed for meta classes else: cache = obj.inference_state.memoize_cache try: memo = cache[function] except KeyError: cache[function] = memo = {} key = (obj, args, frozenset(kwargs.items())) if key in memo: return memo[key] else: if default is not _NO_DEFAULT: memo[key] = default rv = function(obj, *args, **kwargs) memo[key] = rv return rv return wrapper return func def inference_state_as_method_param_cache(): def decorator(call): return _memoize_default(second_arg_is_inference_state=True)(call) return decorator
null
176,518
from functools import wraps from jedi import debug _RECURSION_SENTINEL = object() def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... The provided code snippet includes necessary dependencies for implementing the `inference_state_method_generator_cache` function. Write a Python function `def inference_state_method_generator_cache()` to solve the following problem: This is a special memoizer. It memoizes generators and also checks for recursion errors and returns no further iterator elemends in that case. Here is the function: def inference_state_method_generator_cache(): """ This is a special memoizer. It memoizes generators and also checks for recursion errors and returns no further iterator elemends in that case. """ def func(function): @wraps(function) def wrapper(obj, *args, **kwargs): cache = obj.inference_state.memoize_cache try: memo = cache[function] except KeyError: cache[function] = memo = {} key = (obj, args, frozenset(kwargs.items())) if key in memo: actual_generator, cached_lst = memo[key] else: actual_generator = function(obj, *args, **kwargs) cached_lst = [] memo[key] = actual_generator, cached_lst i = 0 while True: try: next_element = cached_lst[i] if next_element is _RECURSION_SENTINEL: debug.warning('Found a generator recursion for %s' % obj) # This means we have hit a recursion. return except IndexError: cached_lst.append(_RECURSION_SENTINEL) next_element = next(actual_generator, None) if next_element is None: cached_lst.pop() return cached_lst[-1] = next_element yield next_element i += 1 return wrapper return func
This is a special memoizer. It memoizes generators and also checks for recursion errors and returns no further iterator elemends in that case.
176,519
import inspect import types import traceback import sys import operator as op from collections import namedtuple import warnings import re import builtins import typing from pathlib import Path from typing import Optional from jedi.inference.compiled.getattr_static import getattr_static _sentinel = object() ALLOWED_DESCRIPTOR_ACCESS = ( types.FunctionType, types.GetSetDescriptorType, types.MemberDescriptorType, MethodDescriptorType, WrapperDescriptorType, ClassMethodDescriptorType, staticmethod, classmethod, ) def getattr_static(obj, attr, default=_sentinel): def safe_getattr(obj, name, default=_sentinel): try: attr, is_get_descriptor = getattr_static(obj, name) except AttributeError: if default is _sentinel: raise return default else: if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS): # In case of descriptors that have get methods we cannot return # it's value, because that would mean code execution. # Since it's an isinstance call, code execution is still possible, # but this is not really a security feature, but much more of a # safety feature. Code execution is basically always possible when # a module is imported. This is here so people don't shoot # themselves in the foot. return getattr(obj, name) return attr
null
176,520
import inspect import types import traceback import sys import operator as op from collections import namedtuple import warnings import re import builtins import typing from pathlib import Path from typing import Optional from jedi.inference.compiled.getattr_static import getattr_static def shorten_repr(func): def wrapper(self): r = func(self) if len(r) > 50: r = r[:50] + '..' return r return wrapper
null
176,521
import inspect import types import traceback import sys import operator as op from collections import namedtuple import warnings import re import builtins import typing from pathlib import Path from typing import Optional from jedi.inference.compiled.getattr_static import getattr_static def create_access_path(inference_state, obj): access = create_access(inference_state, obj) return AccessPath(access.get_access_path_tuples()) def load_module(inference_state, dotted_name, sys_path): temp, sys.path = sys.path, sys_path try: __import__(dotted_name) except ImportError: # If a module is "corrupt" or not really a Python module or whatever. warnings.warn( "Module %s not importable in path %s." % (dotted_name, sys_path), UserWarning, stacklevel=2, ) return None except Exception: # Since __import__ pretty much makes code execution possible, just # catch any error here and print it. warnings.warn( "Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2 ) return None finally: sys.path = temp # Just access the cache after import, because of #59 as well as the very # complicated import structure of Python. module = sys.modules[dotted_name] return create_access_path(inference_state, module)
null
176,522
import inspect import types import traceback import sys import operator as op from collections import namedtuple import warnings import re import builtins import typing from pathlib import Path from typing import Optional from jedi.inference.compiled.getattr_static import getattr_static NOT_CLASS_TYPES = ( types.BuiltinFunctionType, types.CodeType, types.FrameType, types.FunctionType, types.GeneratorType, types.GetSetDescriptorType, types.LambdaType, types.MemberDescriptorType, types.MethodType, types.ModuleType, types.TracebackType, MethodDescriptorType, types.MappingProxyType, types.SimpleNamespace, types.DynamicClassAttribute, ) The provided code snippet includes necessary dependencies for implementing the `_is_class_instance` function. Write a Python function `def _is_class_instance(obj)` to solve the following problem: Like inspect.* methods. Here is the function: def _is_class_instance(obj): """Like inspect.* methods.""" try: cls = obj.__class__ except AttributeError: return False else: # The isinstance check for cls is just there so issubclass doesn't # raise an exception. return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)
Like inspect.* methods.
176,523
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO def get_compiled_method_return(inference_state, id, attribute, *args, **kwargs): handle = inference_state.compiled_subprocess.get_access_handle(id) return getattr(handle.access, attribute)(*args, **kwargs)
null
176,524
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO def create_simple_object(inference_state, obj): return access.create_access_path(inference_state, obj)
null
176,525
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO The provided code snippet includes necessary dependencies for implementing the `_test_raise_error` function. Write a Python function `def _test_raise_error(inference_state, exception_type)` to solve the following problem: Raise an error to simulate certain problems for unit tests. Here is the function: def _test_raise_error(inference_state, exception_type): """ Raise an error to simulate certain problems for unit tests. """ raise exception_type
Raise an error to simulate certain problems for unit tests.
176,526
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 7): # Nearly the same args as for 3.6, except for capture_output and text def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[str] = ..., text: Literal[True], timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: str, errors: Optional[str] = ..., input: Optional[str] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: str, input: Optional[str] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., *, universal_newlines: Literal[True], startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., # where the *real* keyword only args start capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[str] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: None = ..., errors: None = ..., input: Optional[bytes] = ..., text: Literal[None, False] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[bytes]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[_TXT] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[Any]: ... else: # Nearly same args as Popen.__init__ except for timeout, input, and check def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: str, errors: Optional[str] = ..., input: Optional[str] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: Optional[str] = ..., errors: str, input: Optional[str] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., *, universal_newlines: Literal[True], startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., # where the *real* keyword only args start check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[str] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: None = ..., errors: None = ..., input: Optional[bytes] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[bytes]: ... def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[_TXT] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[Any]: ... if sys.version_info >= (3, 7): # 3.7 added text def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., text: Literal[True], ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: str, errors: Optional[str] = ..., text: Optional[bool] = ..., ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: str, text: Optional[bool] = ..., ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., *, universal_newlines: Literal[True], startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., # where the real keyword only ones start timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., text: Optional[bool] = ..., ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: None = ..., errors: None = ..., text: Literal[None, False] = ..., ) -> bytes: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., text: Optional[bool] = ..., ) -> Any: ... # morally: -> _TXT else: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: str, errors: Optional[str] = ..., ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: str, ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, universal_newlines: Literal[True], timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., ) -> str: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: None = ..., errors: None = ..., ) -> bytes: ... def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., ) -> Any: ... # morally: -> _TXT if sys.platform == "win32": class STARTUPINFO: if sys.version_info >= (3, 7): def __init__( self, *, dwFlags: int = ..., hStdInput: Optional[Any] = ..., hStdOutput: Optional[Any] = ..., hStdError: Optional[Any] = ..., wShowWindow: int = ..., lpAttributeList: Optional[Mapping[str, Any]] = ..., ) -> None: ... dwFlags: int hStdInput: Optional[Any] hStdOutput: Optional[Any] hStdError: Optional[Any] wShowWindow: int if sys.version_info >= (3, 7): lpAttributeList: Mapping[str, Any] STD_INPUT_HANDLE: Any STD_OUTPUT_HANDLE: Any STD_ERROR_HANDLE: Any SW_HIDE: int STARTF_USESTDHANDLES: int STARTF_USESHOWWINDOW: int CREATE_NEW_CONSOLE: int CREATE_NEW_PROCESS_GROUP: int if sys.version_info >= (3, 7): ABOVE_NORMAL_PRIORITY_CLASS: int BELOW_NORMAL_PRIORITY_CLASS: int HIGH_PRIORITY_CLASS: int IDLE_PRIORITY_CLASS: int NORMAL_PRIORITY_CLASS: int REALTIME_PRIORITY_CLASS: int CREATE_NO_WINDOW: int DETACHED_PROCESS: int CREATE_DEFAULT_ERROR_MODE: int CREATE_BREAKAWAY_FROM_JOB: int The provided code snippet includes necessary dependencies for implementing the `_test_print` function. Write a Python function `def _test_print(inference_state, stderr=None, stdout=None)` to solve the following problem: Force some prints in the subprocesses. This exists for unit tests. Here is the function: def _test_print(inference_state, stderr=None, stdout=None): """ Force some prints in the subprocesses. This exists for unit tests. """ if stderr is not None: print(stderr, file=sys.stderr) sys.stderr.flush() if stdout is not None: print(stdout) sys.stdout.flush()
Force some prints in the subprocesses. This exists for unit tests.
176,527
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO The provided code snippet includes necessary dependencies for implementing the `_get_init_path` function. Write a Python function `def _get_init_path(directory_path)` to solve the following problem: The __init__ file can be searched in a directory. If found return it, else None. Here is the function: def _get_init_path(directory_path): """ The __init__ file can be searched in a directory. If found return it, else None. """ for suffix in all_suffixes(): path = os.path.join(directory_path, '__init__' + suffix) if os.path.exists(path): return path return None
The __init__ file can be searched in a directory. If found return it, else None.
176,528
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO def safe_literal_eval(inference_state, value): return parser_utils.safe_literal_eval(value)
null
176,529
import sys import os import inspect import importlib import warnings from pathlib import Path from zipfile import ZipFile from zipimport import zipimporter, ZipImportError from importlib.machinery import all_suffixes from jedi.inference.compiled import access from jedi import debug from jedi import parser_utils from jedi.file_io import KnownContentFileIO, ZipFileIO def _iter_module_names(inference_state, paths): # Python modules/packages for path in paths: try: dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path)) except OSError: try: zip_import_info = zipimporter(path) # Unfortunately, there is no public way to access zipimporter's # private _files member. We therefore have to use a # custom function to iterate over the files. dir_entries = _zip_list_subdirectory( zip_import_info.archive, zip_import_info.prefix) except ZipImportError: # The file might not exist or reading it might lead to an error. debug.warning("Not possible to list directory: %s", path) continue for name, is_dir in dir_entries: # First Namespaces then modules/stubs if is_dir: # pycache is obviously not an interesting namespace. Also the # name must be a valid identifier. if name != '__pycache__' and name.isidentifier(): yield name else: if name.endswith('.pyi'): # Stub files modname = name[:-4] else: modname = inspect.getmodulename(name) if modname and '.' not in modname: if modname != '__init__': yield modname def iter_module_names(*args, **kwargs): return list(_iter_module_names(*args, **kwargs))
null
176,530
import os import sys from importlib.abc import MetaPathFinder from importlib.machinery import PathFinder del sys.path[0] sys.meta_path.insert(0, _ExactImporter(_get_paths())) from jedi.inference.compiled import subprocess sys.meta_path.pop(0) import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 7): # Nearly the same args as for 3.6, except for capture_output and text def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[str] = ..., text: Literal[True], timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: str, errors: Optional[str] = ..., input: Optional[str] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: str, input: Optional[str] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., *, universal_newlines: Literal[True], startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., # where the *real* keyword only args start capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[str] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: None = ..., errors: None = ..., input: Optional[bytes] = ..., text: Literal[None, False] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[bytes]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, capture_output: bool = ..., check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[_TXT] = ..., text: Optional[bool] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[Any]: else: # Nearly same args as Popen.__init__ except for timeout, input, and check def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: str, errors: Optional[str] = ..., input: Optional[str] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: Optional[str] = ..., errors: str, input: Optional[str] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., *, universal_newlines: Literal[True], startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., # where the *real* keyword only args start check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[str] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[str]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: None = ..., errors: None = ..., input: Optional[bytes] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[bytes]: def run( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stdout: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, check: bool = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., input: Optional[_TXT] = ..., timeout: Optional[float] = ..., ) -> CompletedProcess[Any]: if sys.version_info >= (3, 7): # 3.7 added text def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., text: Literal[True], ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: str, errors: Optional[str] = ..., text: Optional[bool] = ..., ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: str, text: Optional[bool] = ..., ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., *, universal_newlines: Literal[True], startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., # where the real keyword only ones start timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., text: Optional[bool] = ..., ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: None = ..., errors: None = ..., text: Literal[None, False] = ..., ) -> bytes: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., text: Optional[bool] = ..., ) -> Any: # morally: -> _TXT else: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: str, errors: Optional[str] = ..., ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: str, ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, universal_newlines: Literal[True], timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., ) -> str: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: Literal[False] = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: None = ..., errors: None = ..., ) -> bytes: def check_output( args: _CMD, bufsize: int = ..., executable: Optional[AnyPath] = ..., stdin: _FILE = ..., stderr: _FILE = ..., preexec_fn: Callable[[], Any] = ..., close_fds: bool = ..., shell: bool = ..., cwd: Optional[AnyPath] = ..., env: Optional[_ENV] = ..., universal_newlines: bool = ..., startupinfo: Any = ..., creationflags: int = ..., restore_signals: bool = ..., start_new_session: bool = ..., pass_fds: Any = ..., *, timeout: Optional[float] = ..., input: _TXT = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., ) -> Any: # morally: -> _TXT if sys.platform == "win32": class STARTUPINFO: def __init__( self, *, dwFlags: int = ..., hStdInput: Optional[Any] = ..., hStdOutput: Optional[Any] = ..., hStdError: Optional[Any] = ..., wShowWindow: int = ..., lpAttributeList: Optional[Mapping[str, Any]] = ..., ) -> None: STD_INPUT_HANDLE: Any STD_OUTPUT_HANDLE: Any STD_ERROR_HANDLE: Any SW_HIDE: int STARTF_USESTDHANDLES: int STARTF_USESHOWWINDOW: int CREATE_NEW_CONSOLE: int CREATE_NEW_PROCESS_GROUP: int if sys.version_info >= (3, 7): ABOVE_NORMAL_PRIORITY_CLASS: int BELOW_NORMAL_PRIORITY_CLASS: int HIGH_PRIORITY_CLASS: int IDLE_PRIORITY_CLASS: int NORMAL_PRIORITY_CLASS: int REALTIME_PRIORITY_CLASS: int CREATE_NO_WINDOW: int DETACHED_PROCESS: int CREATE_DEFAULT_ERROR_MODE: int CREATE_BREAKAWAY_FROM_JOB: int def _get_paths(): # Get the path to jedi. _d = os.path.dirname _jedi_path = _d(_d(_d(_d(_d(__file__))))) _parso_path = sys.argv[1] # The paths are the directory that jedi and parso lie in. return {'jedi': _jedi_path, 'parso': _parso_path}
null
176,531
import inspect from pathlib import Path from jedi.parser_utils import get_cached_code_lines from jedi import settings from jedi.cache import memoize_method from jedi.inference import compiled from jedi.file_io import FileIO from jedi.inference.names import NameWrapper from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES from jedi.inference.value import ModuleValue from jedi.inference.cache import inference_state_function_cache, \ inference_state_method_cache from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type from jedi.inference.gradual.conversion import to_stub from jedi.inference.context import CompiledContext, CompiledModuleContext, \ TreeContextMixin class MixedObject(ValueWrapper): """ A ``MixedObject`` is used in two ways: 1. It uses the default logic of ``parser.python.tree`` objects, 2. except for getattr calls and signatures. The names dicts are generated in a fashion like ``CompiledValue``. This combined logic makes it possible to provide more powerful REPL completion. It allows side effects that are not noticable with the default parser structure to still be completable. The biggest difference from CompiledValue to MixedObject is that we are generally dealing with Python code and not with C code. This will generate fewer special cases, because we in Python you don't have the same freedoms to modify the runtime. """ def __init__(self, compiled_value, tree_value): super().__init__(tree_value) self.compiled_value = compiled_value self.access_handle = compiled_value.access_handle def get_filters(self, *args, **kwargs): yield MixedObjectFilter( self.inference_state, self.compiled_value, self._wrapped_value) def get_signatures(self): # Prefer `inspect.signature` over somehow analyzing Python code. It # should be very precise, especially for stuff like `partial`. return self.compiled_value.get_signatures() def py__call__(self, arguments): # Fallback to the wrapped value if to stub returns no values. values = to_stub(self._wrapped_value) if not values: values = self._wrapped_value return values.py__call__(arguments) def get_safe_value(self, default=_sentinel): if default is _sentinel: return self.compiled_value.get_safe_value() else: return self.compiled_value.get_safe_value(default) def array_type(self): return self.compiled_value.array_type def get_key_values(self): return self.compiled_value.get_key_values() def py__simple_getitem__(self, index): python_object = self.compiled_value.access_handle.access._obj if type(python_object) in ALLOWED_GETITEM_TYPES: return self.compiled_value.py__simple_getitem__(index) return self._wrapped_value.py__simple_getitem__(index) def negate(self): return self.compiled_value.negate() def _as_context(self): if self.parent_context is None: return MixedModuleContext(self) return MixedContext(self) def __repr__(self): return '<%s: %s; %s>' % ( type(self).__name__, self.access_handle.get_repr(), self._wrapped_value, ) def _find_syntax_node_name(inference_state, python_object): original_object = python_object try: python_object = _get_object_to_check(python_object) path = inspect.getsourcefile(python_object) except (OSError, TypeError): # The type might not be known (e.g. class_with_dict.__weakref__) return None path = None if path is None else Path(path) try: if path is None or not path.exists(): # The path might not exist or be e.g. <stdin>. return None except OSError: # Might raise an OSError on Windows: # # [WinError 123] The filename, directory name, or volume label # syntax is incorrect: '<string>' return None file_io = FileIO(path) module_node = _load_module(inference_state, path) if inspect.ismodule(python_object): # We don't need to check names for modules, because there's not really # a way to write a module in a module in Python (and also __name__ can # be something like ``email.utils``). code_lines = get_cached_code_lines(inference_state.grammar, path) return module_node, module_node, file_io, code_lines try: name_str = python_object.__name__ except AttributeError: # Stuff like python_function.__code__. return None if name_str == '<lambda>': return None # It's too hard to find lambdas. # Doesn't always work (e.g. os.stat_result) names = module_node.get_used_names().get(name_str, []) # Only functions and classes are relevant. If a name e.g. points to an # import, it's probably a builtin (like collections.deque) and needs to be # ignored. names = [ n for n in names if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n ] if not names: return None try: code = python_object.__code__ # By using the line number of a code object we make the lookup in a # file pretty easy. There's still a possibility of people defining # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people # do so we just don't care. line_nr = code.co_firstlineno except AttributeError: pass else: line_names = [name for name in names if name.start_pos[0] == line_nr] # There's a chance that the object is not available anymore, because # the code has changed in the background. if line_names: names = line_names code_lines = get_cached_code_lines(inference_state.grammar, path) # It's really hard to actually get the right definition, here as a last # resort we just return the last one. This chance might lead to odd # completions at some points but will lead to mostly correct type # inference, because people tend to define a public name in a module only # once. tree_node = names[-1].parent if tree_node.type == 'funcdef' and get_api_type(original_object) == 'instance': # If an instance is given and we're landing on a function (e.g. # partial in 3.5), something is completely wrong and we should not # return that. return None return module_node, tree_node, file_io, code_lines class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict def to_stub(value): if value.is_stub(): return ValueSet([value]) was_instance = value.is_instance() if was_instance: value = value.py__class__() qualified_names = value.get_qualified_names() stub_module = _load_stub_module(value.get_root_context().get_value()) if stub_module is None or qualified_names is None: return NO_VALUES was_bound_method = value.is_bound_method() if was_bound_method: # Infer the object first. We can infer the method later. method_name = qualified_names[-1] qualified_names = qualified_names[:-1] was_instance = True stub_values = ValueSet([stub_module]) for name in qualified_names: stub_values = stub_values.py__getattribute__(name) if was_instance: stub_values = ValueSet.from_sets( c.execute_with_values() for c in stub_values if c.is_class() ) if was_bound_method: # Now that the instance has been properly created, we can simply get # the method. stub_values = stub_values.py__getattribute__(method_name) return stub_values def _create(inference_state, compiled_value, module_context): # TODO accessing this is bad, but it probably doesn't matter that much, # because we're working with interpreters only here. python_object = compiled_value.access_handle.access._obj result = _find_syntax_node_name(inference_state, python_object) if result is None: # TODO Care about generics from stuff like `[1]` and don't return like this. if type(python_object) in (dict, list, tuple): return ValueSet({compiled_value}) tree_values = to_stub(compiled_value) if not tree_values: return ValueSet({compiled_value}) else: module_node, tree_node, file_io, code_lines = result if module_context is None or module_context.tree_node != module_node: root_compiled_value = compiled_value.get_root_context().get_value() # TODO this __name__ might be wrong. name = root_compiled_value.py__name__() string_names = tuple(name.split('.')) module_value = ModuleValue( inference_state, module_node, file_io=file_io, string_names=string_names, code_lines=code_lines, is_package=root_compiled_value.is_package(), ) if name is not None: inference_state.module_cache.add(string_names, ValueSet([module_value])) module_context = module_value.as_context() tree_values = ValueSet({module_context.create_value(tree_node)}) if tree_node.type == 'classdef': if not compiled_value.is_class(): # Is an instance, not a class. tree_values = tree_values.execute_with_values() return ValueSet( MixedObject(compiled_value, tree_value=tree_value) for tree_value in tree_values )
null
176,532
import re from functools import partial from inspect import Parameter from pathlib import Path from typing import Optional from jedi import debug from jedi.inference.utils import to_list from jedi.cache import memoize_method from jedi.inference.filters import AbstractFilter from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \ ParamNameInterface from jedi.inference.base_value import Value, ValueSet, NO_VALUES from jedi.inference.lazy_value import LazyKnownValue from jedi.inference.compiled.access import _sentinel from jedi.inference.cache import inference_state_function_cache from jedi.inference.helpers import reraise_getitem_errors from jedi.inference.signature import BuiltinSignature from jedi.inference.context import CompiledContext, CompiledModuleContext docstr_defaults = { 'floating point number': 'float', 'character': 'str', 'integer': 'int', 'dictionary': 'dict', 'string': 'str', } The provided code snippet includes necessary dependencies for implementing the `_parse_function_doc` function. Write a Python function `def _parse_function_doc(doc)` to solve the following problem: Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None TODO docstrings like 'tuple of integers' Here is the function: def _parse_function_doc(doc): """ Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None TODO docstrings like 'tuple of integers' """ # parse round parentheses: def func(a, (b,c)) try: count = 0 start = doc.index('(') for i, s in enumerate(doc[start:]): if s == '(': count += 1 elif s == ')': count -= 1 if count == 0: end = start + i break param_str = doc[start + 1:end] except (ValueError, UnboundLocalError): # ValueError for doc.index # UnboundLocalError for undefined end in last line debug.dbg('no brackets found - no param') end = 0 param_str = '' else: # remove square brackets, that show an optional param ( = None) def change_options(m): args = m.group(1).split(',') for i, a in enumerate(args): if a and '=' not in a: args[i] += '=None' return ','.join(args) while True: param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', change_options, param_str) if changes == 0: break param_str = param_str.replace('-', '_') # see: isinstance.__doc__ # parse return value r = re.search('-[>-]* ', doc[end:end + 7]) if r is None: ret = '' else: index = end + r.end() # get result type, which can contain newlines pattern = re.compile(r'(,\n|[^\n-])+') ret_str = pattern.match(doc, index).group(0).strip() # New object -> object() ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) ret = docstr_defaults.get(ret_str, ret_str) return param_str, ret
Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None TODO docstrings like 'tuple of integers'
176,533
import re from functools import partial from inspect import Parameter from pathlib import Path from typing import Optional from jedi import debug from jedi.inference.utils import to_list from jedi.cache import memoize_method from jedi.inference.filters import AbstractFilter from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \ ParamNameInterface from jedi.inference.base_value import Value, ValueSet, NO_VALUES from jedi.inference.lazy_value import LazyKnownValue from jedi.inference.compiled.access import _sentinel from jedi.inference.cache import inference_state_function_cache from jedi.inference.helpers import reraise_getitem_errors from jedi.inference.signature import BuiltinSignature from jedi.inference.context import CompiledContext, CompiledModuleContext def create_cached_compiled_value(inference_state, access_handle, parent_context): assert not isinstance(parent_context, CompiledValue) if parent_context is None: cls = CompiledModule else: cls = CompiledValue return cls(inference_state, access_handle, parent_context) def create_from_name(inference_state, compiled_value, name): access_paths = compiled_value.access_handle.getattr_paths(name, default=None) value = None for access_path in access_paths: value = create_cached_compiled_value( inference_state, access_path, parent_context=None if value is None else value.as_context(), ) return value
null
176,534
import re from functools import partial from inspect import Parameter from pathlib import Path from typing import Optional from jedi import debug from jedi.inference.utils import to_list from jedi.cache import memoize_method from jedi.inference.filters import AbstractFilter from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \ ParamNameInterface from jedi.inference.base_value import Value, ValueSet, NO_VALUES from jedi.inference.lazy_value import LazyKnownValue from jedi.inference.compiled.access import _sentinel from jedi.inference.cache import inference_state_function_cache from jedi.inference.helpers import reraise_getitem_errors from jedi.inference.signature import BuiltinSignature from jedi.inference.context import CompiledContext, CompiledModuleContext The provided code snippet includes necessary dependencies for implementing the `_normalize_create_args` function. Write a Python function `def _normalize_create_args(func)` to solve the following problem: The cache doesn't care about keyword vs. normal args. Here is the function: def _normalize_create_args(func): """The cache doesn't care about keyword vs. normal args.""" def wrapper(inference_state, obj, parent_context=None): return func(inference_state, obj, parent_context) return wrapper
The cache doesn't care about keyword vs. normal args.
176,535
import re from functools import partial from inspect import Parameter from pathlib import Path from typing import Optional from jedi import debug from jedi.inference.utils import to_list from jedi.cache import memoize_method from jedi.inference.filters import AbstractFilter from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \ ParamNameInterface from jedi.inference.base_value import Value, ValueSet, NO_VALUES from jedi.inference.lazy_value import LazyKnownValue from jedi.inference.compiled.access import _sentinel from jedi.inference.cache import inference_state_function_cache from jedi.inference.helpers import reraise_getitem_errors from jedi.inference.signature import BuiltinSignature from jedi.inference.context import CompiledContext, CompiledModuleContext def create_cached_compiled_value(inference_state, access_handle, parent_context): assert not isinstance(parent_context, CompiledValue) if parent_context is None: cls = CompiledModule else: cls = CompiledValue return cls(inference_state, access_handle, parent_context) def create_from_access_path(inference_state, access_path): value = None for name, access in access_path.accesses: value = create_cached_compiled_value( inference_state, access, parent_context=None if value is None else value.as_context() ) return value
null
176,536
import os import re from parso import python_bytes_to_unicode from jedi.debug import dbg from jedi.file_io import KnownContentFileIO, FolderIO from jedi.inference.names import SubModuleName from jedi.inference.imports import load_module_from_path from jedi.inference.filters import ParserTreeFilter from jedi.inference.gradual.conversion import convert_names def _dictionarize(names): return dict( (n if n.tree_name is None else n.tree_name, n) for n in names ) def _find_defining_names(module_context, tree_name): found_names = _find_names(module_context, tree_name) for name in list(found_names): # Convert from/to stubs, because those might also be usages. found_names |= set(convert_names( [name], only_stubs=not name.get_root_context().is_stub(), prefer_stub_to_compiled=False )) found_names |= set(_find_global_variables(found_names, tree_name.value)) for name in list(found_names): if name.api_type == 'param' or name.tree_name is None \ or name.tree_name.parent.type == 'trailer': continue found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name)) return set(_resolve_names(found_names)) def _find_names(module_context, tree_name): name = module_context.create_name(tree_name) found_names = set(name.goto()) found_names.add(name) return set(_resolve_names(found_names)) def get_module_contexts_containing_name(inference_state, module_contexts, name, limit_reduction=1): """ Search a name in the directories of modules. :param limit_reduction: Divides the limits on opening/parsing files by this factor. """ # Skip non python modules for module_context in module_contexts: if module_context.is_compiled(): continue yield module_context # Very short names are not searched in other modules for now to avoid lots # of file lookups. if len(name) <= 2: return # Currently not used, because there's only `scope=project` and `scope=file` # At the moment there is no such thing as `scope=sys.path`. # file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts) file_io_iterator = _find_project_modules(inference_state, module_contexts) yield from search_in_file_ios(inference_state, file_io_iterator, name, limit_reduction=limit_reduction) def find_references(module_context, tree_name, only_in_module=False): inf = module_context.inference_state search_name = tree_name.value # We disable flow analysis, because if we have ifs that are only true in # certain cases, we want both sides. try: inf.flow_analysis_enabled = False found_names = _find_defining_names(module_context, tree_name) finally: inf.flow_analysis_enabled = True found_names_dct = _dictionarize(found_names) module_contexts = [module_context] if not only_in_module: for m in set(d.get_root_context() for d in found_names): if m != module_context and m.tree_node is not None \ and inf.project.path in m.py__file__().parents: module_contexts.append(m) # For param no search for other modules is necessary. if only_in_module or any(n.api_type == 'param' for n in found_names): potential_modules = module_contexts else: potential_modules = get_module_contexts_containing_name( inf, module_contexts, search_name, ) non_matching_reference_maps = {} for module_context in potential_modules: for name_leaf in module_context.tree_node.get_used_names().get(search_name, []): new = _dictionarize(_find_names(module_context, name_leaf)) if any(tree_name in found_names_dct for tree_name in new): found_names_dct.update(new) for tree_name in new: for dct in non_matching_reference_maps.get(tree_name, []): # A reference that was previously searched for matches # with a now found name. Merge. found_names_dct.update(dct) try: del non_matching_reference_maps[tree_name] except KeyError: pass else: for name in new: non_matching_reference_maps.setdefault(name, []).append(new) result = found_names_dct.values() if only_in_module: return [n for n in result if n.get_root_context() == module_context] return result
null
176,537
import os import re from parso import python_bytes_to_unicode from jedi.debug import dbg from jedi.file_io import KnownContentFileIO, FolderIO from jedi.inference.names import SubModuleName from jedi.inference.imports import load_module_from_path from jedi.inference.filters import ParserTreeFilter from jedi.inference.gradual.conversion import convert_names def recurse_find_python_files(folder_io, except_paths=()): for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths): if file_io is not None: yield file_io def _find_python_files_in_sys_path(inference_state, module_contexts): sys_path = inference_state.get_sys_path() except_paths = set() yielded_paths = [m.py__file__() for m in module_contexts] for module_context in module_contexts: file_io = module_context.get_value().file_io if file_io is None: continue folder_io = file_io.get_parent_folder() while True: path = folder_io.path if not any(path.startswith(p) for p in sys_path) or path in except_paths: break for file_io in recurse_find_python_files(folder_io, except_paths): if file_io.path not in yielded_paths: yield file_io except_paths.add(path) folder_io = folder_io.get_parent_folder()
null
176,538
import copy import sys import re import os from itertools import chain from contextlib import contextmanager from parso.python import tree The provided code snippet includes necessary dependencies for implementing the `deep_ast_copy` function. Write a Python function `def deep_ast_copy(obj)` to solve the following problem: Much, much faster than copy.deepcopy, but just for parser tree nodes. Here is the function: def deep_ast_copy(obj): """ Much, much faster than copy.deepcopy, but just for parser tree nodes. """ # If it's already in the cache, just return it. new_obj = copy.copy(obj) # Copy children new_children = [] for child in obj.children: if isinstance(child, tree.Leaf): new_child = copy.copy(child) new_child.parent = new_obj else: new_child = deep_ast_copy(child) new_child.parent = new_obj new_children.append(new_child) new_obj.children = new_children return new_obj
Much, much faster than copy.deepcopy, but just for parser tree nodes.
176,539
import copy import sys import re import os from itertools import chain from contextlib import contextmanager from parso.python import tree def _get_safe_value_or_none(value, accept): value = value.get_safe_value(default=None) if isinstance(value, accept): return value def get_int_or_none(value): return _get_safe_value_or_none(value, int)
null
176,540
import copy import sys import re import os from itertools import chain from contextlib import contextmanager from parso.python import tree class SimpleGetItemNotFound(Exception): pass def reraise_getitem_errors(*exception_classes): try: yield except exception_classes as e: raise SimpleGetItemNotFound(e)
null
176,541
import copy import sys import re import os from itertools import chain from contextlib import contextmanager from parso.python import tree def values_from_qualified_names(inference_state, *names): return inference_state.import_module(names[:-1]).py__getattribute__(names[-1])
null
176,542
from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.common import monkeypatch class MergedLazyValues(AbstractLazyValue): """data is a list of lazy values.""" def infer(self): return ValueSet.from_sets(l.infer() for l in self.data) def get_merged_lazy_value(lazy_values): if len(lazy_values) > 1: return MergedLazyValues(lazy_values) else: return lazy_values[0]
null
176,543
from jedi.inference.cache import inference_state_function_cache def get_yield_exprs(inference_state, funcdef): return list(funcdef.iter_yield_exprs())
null
176,544
from parso.python import tree from jedi import debug from jedi.inference.helpers import is_string class Error: def __init__(self, name, module_path, start_pos, message=None): def line(self): def column(self): def code(self): def __str__(self): def __eq__(self, other): def __ne__(self, other): def __hash__(self): def __repr__(self): class Warning(Error): def add(node_context, error_name, node, message=None, typ=Error, payload=None): def _check_for_setattr(instance): def add_attribute_error(name_context, lookup_value, name): message = ('AttributeError: %s has no attribute %s.' % (lookup_value, name)) # Check for __getattr__/__getattribute__ existance and issue a warning # instead of an error, if that happens. typ = Error if lookup_value.is_instance() and not lookup_value.is_compiled(): # TODO maybe make a warning for __getattr__/__getattribute__ if _check_for_setattr(lookup_value): typ = Warning payload = lookup_value, name add(name_context, 'attribute-error', name, message, typ, payload)
null
176,545
from functools import reduce from operator import add from itertools import zip_longest from parso.python.tree import Name from jedi import debug from jedi.parser_utils import clean_scope_docstring from jedi.inference.helpers import SimpleGetItemNotFound from jedi.inference.utils import safe_property from jedi.inference.cache import inference_state_as_method_param_cache from jedi.cache import memoize_method class ValueSet: def __init__(self, iterable): def _from_frozen_set(cls, frozenset_): def from_sets(cls, sets): def __or__(self, other): def __and__(self, other): def __iter__(self): def __bool__(self): def __len__(self): def __repr__(self): def filter(self, filter_func): def __getattr__(self, name): def mapper(*args, **kwargs): def __eq__(self, other): def __ne__(self, other): def __hash__(self): def py__class__(self): def iterate(self, contextualized_node=None, is_async=False): def execute(self, arguments): def execute_with_values(self, *args, **kwargs): def goto(self, *args, **kwargs): def py__getattribute__(self, *args, **kwargs): def get_item(self, *args, **kwargs): def try_merge(self, function_name): def gather_annotation_classes(self): def get_signatures(self): def get_type_hint(self, add_class_info=True): def infer_type_vars(self, value_set): NO_VALUES = ValueSet([]) def add(__a: Any, __b: Any) -> Any: class SimpleGetItemNotFound(Exception): def _getitem(value, index_values, contextualized_node): # The actual getitem call. result = NO_VALUES unused_values = set() for index_value in index_values: index = index_value.get_safe_value(default=None) if type(index) in (float, int, str, slice, bytes): try: result |= value.py__simple_getitem__(index) continue except SimpleGetItemNotFound: pass unused_values.add(index_value) # The index was somehow not good enough or simply a wrong type. # Therefore we now iterate through all the values and just take # all results. if unused_values or not index_values: result |= value.py__getitem__( ValueSet(unused_values), contextualized_node ) debug.dbg('py__getitem__ result: %s', result) return result
null
176,546
from functools import reduce from operator import add from itertools import zip_longest from parso.python.tree import Name from jedi import debug from jedi.parser_utils import clean_scope_docstring from jedi.inference.helpers import SimpleGetItemNotFound from jedi.inference.utils import safe_property from jedi.inference.cache import inference_state_as_method_param_cache from jedi.cache import memoize_method class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict def iterator_to_value_set(func): def wrapper(*args, **kwargs): return ValueSet(func(*args, **kwargs)) return wrapper
null
176,547
from abc import abstractmethod from contextlib import contextmanager from pathlib import Path from typing import Optional from parso.tree import search_ancestor from parso.python.tree import Name from jedi.inference.filters import ParserTreeFilter, MergedFilter, \ GlobalNameFilter from jedi.inference.names import AnonymousParamName, TreeNameDefinition from jedi.inference.base_value import NO_VALUES, ValueSet from jedi.parser_utils import get_parent_scope from jedi import debug from jedi import parser_utils def get_global_filters(context, until_position, origin_scope): """ Returns all filters in order of priority for name resolution. For global name lookups. The filters will handle name resolution themselves, but here we gather possible filters downwards. >>> from jedi import Script >>> script = Script(''' ... x = ['a', 'b', 'c'] ... def func(): ... y = None ... ''') >>> module_node = script._module_node >>> scope = next(module_node.iter_funcdefs()) >>> scope <Function: func@3-5> >>> context = script._get_module_context().create_context(scope) >>> filters = list(get_global_filters(context, (4, 0), None)) First we get the names from the function scope. >>> print(filters[0]) # doctest: +ELLIPSIS MergedFilter(<ParserTreeFilter: ...>, <GlobalNameFilter: ...>) >>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE ['<TreeNameDefinition: string_name=func start_pos=(3, 4)>', '<TreeNameDefinition: string_name=x start_pos=(2, 0)>'] >>> filters[0]._filters[0]._until_position (4, 0) >>> filters[0]._filters[1]._until_position Then it yields the names from one level "lower". In this example, this is the module scope (including globals). As a side note, you can see, that the position in the filter is None on the globals filter, because there the whole module is searched. >>> list(filters[1].values()) # package modules -> Also empty. [] >>> sorted(name.string_name for name in filters[2].values()) # Module attributes ['__doc__', '__name__', '__package__'] Finally, it yields the builtin filter, if `include_builtin` is true (default). >>> list(filters[3].values()) # doctest: +ELLIPSIS [...] """ base_context = context from jedi.inference.value.function import BaseFunctionExecutionContext while context is not None: # Names in methods cannot be resolved within the class. yield from context.get_filters( until_position=until_position, origin_scope=origin_scope ) if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)): # The position should be reset if the current scope is a function. until_position = None context = context.parent_context b = next(base_context.inference_state.builtins_module.get_filters(), None) assert b is not None # Add builtins to the global scope. yield b def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]': """ Recursively looks at the parents of a node and returns the first found node that matches ``node_types``. Returns ``None`` if no matching node is found. This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` instead. :param node: The ancestors of this node will be checked. :param node_types: type names that are searched for. """ n = node.parent while n is not None: if n.type in node_types: return n n = n.parent return None def _get_global_filters_for_name(context, name_or_none, position): # For functions and classes the defaults don't belong to the # function and get inferred in the value before the function. So # make sure to exclude the function/class name. if name_or_none is not None: ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef') lambdef = None if ancestor == 'lambdef': # For lambdas it's even more complicated since parts will # be inferred later. lambdef = ancestor ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef') if ancestor is not None: colon = ancestor.children[-2] if position is not None and position < colon.start_pos: if lambdef is None or position < lambdef.children[-2].start_pos: position = ancestor.start_pos return get_global_filters(context, position, name_or_none)
null
176,548
from parso.tree import search_ancestor from parso.python.tree import Name from jedi import settings from jedi.inference.arguments import TreeArguments from jedi.inference.value import iterable from jedi.inference.base_value import NO_VALUES from jedi.parser_utils import is_scope def _remove_del_stmt(names): # Catch del statements and remove them from results. for name in names: if name.tree_name is not None: definition = name.tree_name.get_definition() if definition is not None and definition.type == 'del_stmt': continue yield name class Name(_LeafWithoutNewlines): """ A string. Sometimes it is important to know if the string belongs to a name or not. """ type = 'name' __slots__ = () def __repr__(self): return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, self.line, self.column) def is_definition(self, include_setitem=False): """ Returns True if the name is being defined. """ return self.get_definition(include_setitem=include_setitem) is not None def get_definition(self, import_name_always=False, include_setitem=False): """ Returns None if there's no definition for a name. :param import_name_always: Specifies if an import name is always a definition. Normally foo in `from foo import bar` is not a definition. """ node = self.parent type_ = node.type if type_ in ('funcdef', 'classdef'): if self == node.name: return node return None if type_ == 'except_clause': if self.get_previous_sibling() == 'as': return node.parent # The try_stmt. return None while node is not None: if node.type == 'suite': return None if node.type in _GET_DEFINITION_TYPES: if self in node.get_defined_names(include_setitem): return node if import_name_always and node.type in _IMPORTS: return node return None node = node.parent return None The provided code snippet includes necessary dependencies for implementing the `filter_name` function. Write a Python function `def filter_name(filters, name_or_str)` to solve the following problem: Searches names that are defined in a scope (the different ``filters``), until a name fits. Here is the function: def filter_name(filters, name_or_str): """ Searches names that are defined in a scope (the different ``filters``), until a name fits. """ string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str names = [] for filter in filters: names = filter.get(string_name) if names: break return list(_remove_del_stmt(names))
Searches names that are defined in a scope (the different ``filters``), until a name fits.
176,549
from parso.tree import search_ancestor from parso.python.tree import Name from jedi import settings from jedi.inference.arguments import TreeArguments from jedi.inference.value import iterable from jedi.inference.base_value import NO_VALUES from jedi.parser_utils import is_scope def _check_isinstance_type(value, node, search_name): lazy_cls = None trailer = _get_isinstance_trailer_arglist(node) if trailer is not None and len(trailer.children) == 3: arglist = trailer.children[1] args = TreeArguments(value.inference_state, value, arglist, trailer) param_list = list(args.unpack()) # Disallow keyword arguments if len(param_list) == 2 and len(arglist.children) == 3: (key1, _), (key2, lazy_value_cls) = param_list if key1 is None and key2 is None: call = _get_call_string(search_name) is_instance_call = _get_call_string(arglist.children[0]) # Do a simple get_code comparison of the strings . They should # just have the same code, and everything will be all right. # There are ways that this is not correct, if some stuff is # redefined in between. However here we don't care, because # it's a heuristic that works pretty well. if call == is_instance_call: lazy_cls = lazy_value_cls if lazy_cls is None: return None value_set = NO_VALUES for cls_or_tup in lazy_cls.infer(): if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': for lazy_value in cls_or_tup.py__iter__(): value_set |= lazy_value.infer().execute_with_values() else: value_set |= cls_or_tup.execute_with_values() return value_set def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]': """ Recursively looks at the parents of a node and returns the first found node that matches ``node_types``. Returns ``None`` if no matching node is found. This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` instead. :param node: The ancestors of this node will be checked. :param node_types: type names that are searched for. """ n = node.parent while n is not None: if n.type in node_types: return n n = n.parent return None def is_scope(node): t = node.type if t == 'comp_for': # Starting with Python 3.8, async is outside of the statement. return node.children[1].type != 'sync_comp_for' return t in ('file_input', 'classdef', 'funcdef', 'lambdef', 'sync_comp_for') The provided code snippet includes necessary dependencies for implementing the `check_flow_information` function. Write a Python function `def check_flow_information(value, flow, search_name, pos)` to solve the following problem: Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: if isinstance(k, str): k. # <- completion here ensures that `k` is a string. Here is the function: def check_flow_information(value, flow, search_name, pos): """ Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: if isinstance(k, str): k. # <- completion here ensures that `k` is a string. """ if not settings.dynamic_flow_information: return None result = None if is_scope(flow): # Check for asserts. module_node = flow.get_root_node() try: names = module_node.get_used_names()[search_name.value] except KeyError: return None names = reversed([ n for n in names if flow.start_pos <= n.start_pos < (pos or flow.end_pos) ]) for name in names: ass = search_ancestor(name, 'assert_stmt') if ass is not None: result = _check_isinstance_type(value, ass.assertion, search_name) if result is not None: return result if flow.type in ('if_stmt', 'while_stmt'): potential_ifs = [c for c in flow.children[1::4] if c != ':'] for if_test in reversed(potential_ifs): if search_name.start_pos > if_test.end_pos: return _check_isinstance_type(value, if_test, search_name) return result
Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: if isinstance(k, str): k. # <- completion here ensures that `k` is a string.
176,550
import os from pathlib import Path from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings from jedi.file_io import FolderIO from jedi.parser_utils import get_cached_code_lines from jedi.inference import sys_path from jedi.inference import helpers from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.utils import unite from jedi.inference.cache import inference_state_method_cache from jedi.inference.names import ImportName, SubModuleName from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.typeshed import import_module_decorator, \ create_stub_module, parse_stub_module from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo from jedi.plugins import plugin_manager def _prepare_infer_import(module_context, tree_name): import_node = search_ancestor(tree_name, 'import_name', 'import_from') import_path = import_node.get_path_for_name(tree_name) from_import_name = None try: from_names = import_node.get_from_names() except AttributeError: # Is an import_name pass else: if len(from_names) + 1 == len(import_path): # We have to fetch the from_names part first and then check # if from_names exists in the modules. from_import_name = import_path[-1] import_path = from_names importer = Importer(module_context.inference_state, tuple(import_path), module_context, import_node.level) return from_import_name, tuple(import_path), import_node.level, importer.follow() class Importer: def __init__(self, inference_state, import_path, module_context, level=0): """ An implementation similar to ``__import__``. Use `follow` to actually follow the imports. *level* specifies whether to use absolute or relative imports. 0 (the default) means only perform absolute imports. Positive values for level indicate the number of parent directories to search relative to the directory of the module calling ``__import__()`` (see PEP 328 for the details). :param import_path: List of namespaces (strings or Names). """ debug.speed('import %s %s' % (import_path, module_context)) self._inference_state = inference_state self.level = level self._module_context = module_context self._fixed_sys_path = None self._infer_possible = True if level: base = module_context.get_value().py__package__() # We need to care for two cases, the first one is if it's a valid # Python import. This import has a properly defined module name # chain like `foo.bar.baz` and an import in baz is made for # `..lala.` It can then resolve to `foo.bar.lala`. # The else here is a heuristic for all other cases, if for example # in `foo` you search for `...bar`, it's obviously out of scope. # However since Jedi tries to just do it's best, we help the user # here, because he might have specified something wrong in his # project. if level <= len(base): # Here we basically rewrite the level to 0. base = tuple(base) if level > 1: base = base[:-level + 1] import_path = base + tuple(import_path) else: path = module_context.py__file__() project_path = self._inference_state.project.path import_path = list(import_path) if path is None: # If no path is defined, our best guess is that the current # file is edited by a user on the current working # directory. We need to add an initial path, because it # will get removed as the name of the current file. directory = project_path else: directory = os.path.dirname(path) base_import_path, base_directory = _level_to_base_import_path( project_path, directory, level, ) if base_directory is None: # Everything is lost, the relative import does point # somewhere out of the filesystem. self._infer_possible = False else: self._fixed_sys_path = [base_directory] if base_import_path is None: if import_path: _add_error( module_context, import_path[0], message='Attempted relative import beyond top-level package.' ) else: import_path = base_import_path + import_path self.import_path = import_path def _str_import_path(self): """Returns the import path as pure strings instead of `Name`.""" return tuple( name.value if isinstance(name, tree.Name) else name for name in self.import_path ) def _sys_path_with_modifications(self, is_completion): if self._fixed_sys_path is not None: return self._fixed_sys_path return ( # For import completions we don't want to see init paths, but for # inference we want to show the user as much as possible. # See GH #1446. self._inference_state.get_sys_path(add_init_paths=not is_completion) + [ str(p) for p in sys_path.check_sys_path_modifications(self._module_context) ] ) def follow(self): if not self.import_path: if self._fixed_sys_path: # This is a bit of a special case, that maybe should be # revisited. If the project path is wrong or the user uses # relative imports the wrong way, we might end up here, where # the `fixed_sys_path == project.path` in that case we kind of # use the project.path.parent directory as our path. This is # usually not a problem, except if imports in other places are # using the same names. Example: # # foo/ < #1 # - setup.py # - foo/ < #2 # - __init__.py # - foo.py < #3 # # If the top foo is our project folder and somebody uses # `from . import foo` in `setup.py`, it will resolve to foo #2, # which means that the import for foo.foo is cached as # `__init__.py` (#2) and not as `foo.py` (#3). This is usually # not an issue, because this case is probably pretty rare, but # might be an issue for some people. # # However for most normal cases where we work with different # file names, this code path hits where we basically change the # project path to an ancestor of project path. from jedi.inference.value.namespace import ImplicitNamespaceValue import_path = (os.path.basename(self._fixed_sys_path[0]),) ns = ImplicitNamespaceValue( self._inference_state, string_names=import_path, paths=self._fixed_sys_path, ) return ValueSet({ns}) return NO_VALUES if not self._infer_possible: return NO_VALUES # Check caches first from_cache = self._inference_state.stub_module_cache.get(self._str_import_path) if from_cache is not None: return ValueSet({from_cache}) from_cache = self._inference_state.module_cache.get(self._str_import_path) if from_cache is not None: return from_cache sys_path = self._sys_path_with_modifications(is_completion=False) return import_module_by_names( self._inference_state, self.import_path, sys_path, self._module_context ) def _get_module_names(self, search_path=None, in_module=None): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. """ if search_path is None: sys_path = self._sys_path_with_modifications(is_completion=True) else: sys_path = search_path return list(iter_module_names( self._inference_state, self._module_context, sys_path, module_cls=ImportName if in_module is None else SubModuleName, add_builtin_modules=search_path is None and in_module is None, )) def completion_names(self, inference_state, only_modules=False): """ :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ if not self._infer_possible: return [] names = [] if self.import_path: # flask if self._str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): modname = mod.string_name if modname.startswith('flask_'): extname = modname[len('flask_'):] names.append(ImportName(self._module_context, extname)) # Now the old style: ``flaskext.foo`` for dir in self._sys_path_with_modifications(is_completion=True): flaskext = os.path.join(dir, 'flaskext') if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) values = self.follow() for value in values: # Non-modules are not completable. if value.api_type not in ('module', 'namespace'): # not a module continue if not value.is_compiled(): # sub_modules_dict is not implemented for compiled modules. names += value.sub_modules_dict().values() if not only_modules: from jedi.inference.gradual.conversion import convert_values both_values = values | convert_values(values) for c in both_values: for filter in c.get_filters(): names += filter.values() else: if self.level: # We only get here if the level cannot be properly calculated. names += self._get_module_names(self._fixed_sys_path) else: # This is just the list of global imports. names += self._get_module_names() return names def unite(iterable): """Turns a two dimensional array into a one dimensional.""" return set(typ for types in iterable for typ in types) def goto_import(context, tree_name): module_context = context.get_root_context() from_import_name, import_path, level, values = \ _prepare_infer_import(module_context, tree_name) if not values: return [] if from_import_name is not None: names = unite([ c.goto( from_import_name, name_context=context, analysis_errors=False ) for c in values ]) # Avoid recursion on the same names. if names and not any(n.tree_name is tree_name for n in names): return names path = import_path + (from_import_name,) importer = Importer(context.inference_state, path, module_context, level) values = importer.follow() return set(s.name for s in values)
null
176,551
import os from pathlib import Path from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings from jedi.file_io import FolderIO from jedi.parser_utils import get_cached_code_lines from jedi.inference import sys_path from jedi.inference import helpers from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.utils import unite from jedi.inference.cache import inference_state_method_cache from jedi.inference.names import ImportName, SubModuleName from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.typeshed import import_module_decorator, \ create_stub_module, parse_stub_module from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo from jedi.plugins import plugin_manager The provided code snippet includes necessary dependencies for implementing the `_level_to_base_import_path` function. Write a Python function `def _level_to_base_import_path(project_path, directory, level)` to solve the following problem: In case the level is outside of the currently known package (something like import .....foo), we can still try our best to help the user for completions. Here is the function: def _level_to_base_import_path(project_path, directory, level): """ In case the level is outside of the currently known package (something like import .....foo), we can still try our best to help the user for completions. """ for i in range(level - 1): old = directory directory = os.path.dirname(directory) if old == directory: return None, None d = directory level_import_paths = [] # Now that we are on the level that the user wants to be, calculate the # import path for it. while True: if d == project_path: return level_import_paths, d dir_name = os.path.basename(d) if dir_name: level_import_paths.insert(0, dir_name) d = os.path.dirname(d) else: return None, directory
In case the level is outside of the currently known package (something like import .....foo), we can still try our best to help the user for completions.
176,552
import os from pathlib import Path from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings from jedi.file_io import FolderIO from jedi.parser_utils import get_cached_code_lines from jedi.inference import sys_path from jedi.inference import helpers from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.utils import unite from jedi.inference.cache import inference_state_method_cache from jedi.inference.names import ImportName, SubModuleName from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.typeshed import import_module_decorator, \ create_stub_module, parse_stub_module from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo from jedi.plugins import plugin_manager def _add_error(value, name, message): def import_module(inference_state, import_names, parent_module_value, sys_path): class ValueSet: def __init__(self, iterable): def _from_frozen_set(cls, frozenset_): def from_sets(cls, sets): def __or__(self, other): def __and__(self, other): def __iter__(self): def __bool__(self): def __len__(self): def __repr__(self): def filter(self, filter_func): def __getattr__(self, name): def mapper(*args, **kwargs): def __eq__(self, other): def __ne__(self, other): def __hash__(self): def py__class__(self): def iterate(self, contextualized_node=None, is_async=False): def execute(self, arguments): def execute_with_values(self, *args, **kwargs): def goto(self, *args, **kwargs): def py__getattribute__(self, *args, **kwargs): def get_item(self, *args, **kwargs): def try_merge(self, function_name): def gather_annotation_classes(self): def get_signatures(self): def get_type_hint(self, add_class_info=True): def infer_type_vars(self, value_set): NO_VALUES = ValueSet([]) def import_module_by_names(inference_state, import_names, sys_path=None, module_context=None, prefer_stubs=True): if sys_path is None: sys_path = inference_state.get_sys_path() str_import_names = tuple( i.value if isinstance(i, tree.Name) else i for i in import_names ) value_set = [None] for i, name in enumerate(import_names): value_set = ValueSet.from_sets([ import_module( inference_state, str_import_names[:i+1], parent_module_value, sys_path, prefer_stubs=prefer_stubs, ) for parent_module_value in value_set ]) if not value_set: message = 'No module named ' + '.'.join(str_import_names) if module_context is not None: _add_error(module_context, name, message) else: debug.warning(message) return NO_VALUES return value_set
null
176,553
import os from pathlib import Path from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings from jedi.file_io import FolderIO from jedi.parser_utils import get_cached_code_lines from jedi.inference import sys_path from jedi.inference import helpers from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.utils import unite from jedi.inference.cache import inference_state_method_cache from jedi.inference.names import ImportName, SubModuleName from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.typeshed import import_module_decorator, \ create_stub_module, parse_stub_module from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo from jedi.plugins import plugin_manager class Path(PurePath): def __new__(cls: Type[_P], *args: Union[str, _PathLike], **kwargs: Any) -> _P: ... def __enter__(self: _P) -> _P: ... def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType] ) -> Optional[bool]: ... def cwd(cls: Type[_P]) -> _P: ... def stat(self) -> os.stat_result: ... def chmod(self, mode: int) -> None: ... def exists(self) -> bool: ... def glob(self: _P, pattern: str) -> Generator[_P, None, None]: ... def group(self) -> str: ... def is_dir(self) -> bool: ... def is_file(self) -> bool: ... if sys.version_info >= (3, 7): def is_mount(self) -> bool: ... def is_symlink(self) -> bool: ... def is_socket(self) -> bool: ... def is_fifo(self) -> bool: ... def is_block_device(self) -> bool: ... def is_char_device(self) -> bool: ... def iterdir(self: _P) -> Generator[_P, None, None]: ... def lchmod(self, mode: int) -> None: ... def lstat(self) -> os.stat_result: ... def mkdir(self, mode: int = ..., parents: bool = ..., exist_ok: bool = ...) -> None: ... # Adapted from builtins.open # Text mode: always returns a TextIOWrapper def open( self, mode: OpenTextMode = ..., buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., ) -> TextIOWrapper: ... # Unbuffered binary mode: returns a FileIO def open( self, mode: OpenBinaryMode, buffering: Literal[0], encoding: None = ..., errors: None = ..., newline: None = ... ) -> FileIO: ... # Buffering is on: return BufferedRandom, BufferedReader, or BufferedWriter def open( self, mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1] = ..., encoding: None = ..., errors: None = ..., newline: None = ..., ) -> BufferedRandom: ... def open( self, mode: OpenBinaryModeWriting, buffering: Literal[-1, 1] = ..., encoding: None = ..., errors: None = ..., newline: None = ..., ) -> BufferedWriter: ... def open( self, mode: OpenBinaryModeReading, buffering: Literal[-1, 1] = ..., encoding: None = ..., errors: None = ..., newline: None = ..., ) -> BufferedReader: ... # Buffering cannot be determined: fall back to BinaryIO def open( self, mode: OpenBinaryMode, buffering: int, encoding: None = ..., errors: None = ..., newline: None = ... ) -> BinaryIO: ... # Fallback if mode is not specified def open( self, mode: str, buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., ) -> IO[Any]: ... def owner(self) -> str: ... if sys.version_info >= (3, 9): def readlink(self: _P) -> _P: ... if sys.version_info >= (3, 8): def rename(self: _P, target: Union[str, PurePath]) -> _P: ... def replace(self: _P, target: Union[str, PurePath]) -> _P: ... else: def rename(self, target: Union[str, PurePath]) -> None: ... def replace(self, target: Union[str, PurePath]) -> None: ... def resolve(self: _P, strict: bool = ...) -> _P: ... def rglob(self: _P, pattern: str) -> Generator[_P, None, None]: ... def rmdir(self) -> None: ... def symlink_to(self, target: Union[str, Path], target_is_directory: bool = ...) -> None: ... def touch(self, mode: int = ..., exist_ok: bool = ...) -> None: ... if sys.version_info >= (3, 8): def unlink(self, missing_ok: bool = ...) -> None: ... else: def unlink(self) -> None: ... def home(cls: Type[_P]) -> _P: ... def absolute(self: _P) -> _P: ... def expanduser(self: _P) -> _P: ... def read_bytes(self) -> bytes: ... def read_text(self, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> str: ... def samefile(self, other_path: Union[str, bytes, int, Path]) -> bool: ... def write_bytes(self, data: bytes) -> int: ... def write_text(self, data: str, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> int: ... if sys.version_info >= (3, 8): def link_to(self, target: Union[str, bytes, os.PathLike[str]]) -> None: ... class ImplicitNamespaceValue(Value, SubModuleDictMixin): """ Provides support for implicit namespace packages """ api_type = 'namespace' parent_context = None def __init__(self, inference_state, string_names, paths): super().__init__(inference_state, parent_context=None) self.inference_state = inference_state self.string_names = string_names self._paths = paths def get_filters(self, origin_scope=None): yield DictFilter(self.sub_modules_dict()) def get_qualified_names(self): return () def name(self): string_name = self.py__package__()[-1] return ImplicitNSName(self, string_name) def py__file__(self) -> Optional[Path]: return None def py__package__(self): """Return the fullname """ return self.string_names def py__path__(self): return self._paths def py__name__(self): return '.'.join(self.string_names) def is_namespace(self): return True def is_stub(self): return False def is_package(self): return True def as_context(self): return NamespaceContext(self) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.py__name__()) def load_namespace_from_path(inference_state, folder_io): import_names, is_package = sys_path.transform_path_to_dotted( inference_state.get_sys_path(), Path(folder_io.path) ) from jedi.inference.value.namespace import ImplicitNamespaceValue return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path])
null
176,554
import os from pathlib import Path from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings from jedi.file_io import FolderIO from jedi.parser_utils import get_cached_code_lines from jedi.inference import sys_path from jedi.inference import helpers from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.utils import unite from jedi.inference.cache import inference_state_method_cache from jedi.inference.names import ImportName, SubModuleName from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.typeshed import import_module_decorator, \ create_stub_module, parse_stub_module from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo from jedi.plugins import plugin_manager class Importer: def __init__(self, inference_state, import_path, module_context, level=0): def _str_import_path(self): def _sys_path_with_modifications(self, is_completion): def follow(self): def _get_module_names(self, search_path=None, in_module=None): def completion_names(self, inference_state, only_modules=False): def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]': def follow_error_node_imports_if_possible(context, name): error_node = tree.search_ancestor(name, 'error_node') if error_node is not None: # Get the first command start of a started simple_stmt. The error # node is sometimes a small_stmt and sometimes a simple_stmt. Check # for ; leaves that start a new statements. start_index = 0 for index, n in enumerate(error_node.children): if n.start_pos > name.start_pos: break if n == ';': start_index = index + 1 nodes = error_node.children[start_index:] first_name = nodes[0].get_first_leaf().value # Make it possible to infer stuff like `import foo.` or # `from foo.bar`. if first_name in ('from', 'import'): is_import_from = first_name == 'from' level, names = helpers.parse_dotted_names( nodes, is_import_from=is_import_from, until_node=name, ) return Importer( context.inference_state, names, context.get_root_context(), level).follow() return None
null
176,555
import os from pathlib import Path from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings from jedi.file_io import FolderIO from jedi.parser_utils import get_cached_code_lines from jedi.inference import sys_path from jedi.inference import helpers from jedi.inference import compiled from jedi.inference import analysis from jedi.inference.utils import unite from jedi.inference.cache import inference_state_method_cache from jedi.inference.names import ImportName, SubModuleName from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.typeshed import import_module_decorator, \ create_stub_module, parse_stub_module from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo from jedi.plugins import plugin_manager class ImportName(AbstractNameDefinition): start_pos = (1, 0) _level = 0 def __init__(self, parent_context, string_name): self._from_module_context = parent_context self.string_name = string_name def get_qualified_names(self, include_module_names=False): if include_module_names: if self._level: assert self._level == 1, "Everything else is not supported for now" module_names = self._from_module_context.string_names if module_names is None: return module_names return module_names + (self.string_name,) return (self.string_name,) return () def parent_context(self): m = self._from_module_context import_values = self.infer() if not import_values: return m # It's almost always possible to find the import or to not find it. The # importing returns only one value, pretty much always. return next(iter(import_values)).as_context() def infer(self): from jedi.inference.imports import Importer m = self._from_module_context return Importer(m.inference_state, [self.string_name], m, level=self._level).follow() def goto(self): return [m.name for m in self.infer()] def api_type(self): return 'module' def py__doc__(self): return _merge_name_docs(self.goto()) The provided code snippet includes necessary dependencies for implementing the `iter_module_names` function. Write a Python function `def iter_module_names(inference_state, module_context, search_path, module_cls=ImportName, add_builtin_modules=True)` to solve the following problem: Get the names of all modules in the search_path. This means file names and not names defined in the files. Here is the function: def iter_module_names(inference_state, module_context, search_path, module_cls=ImportName, add_builtin_modules=True): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. """ # add builtin module names if add_builtin_modules: for name in inference_state.compiled_subprocess.get_builtin_module_names(): yield module_cls(module_context, name) for name in inference_state.compiled_subprocess.iter_module_names(search_path): yield module_cls(module_context, name)
Get the names of all modules in the search_path. This means file names and not names defined in the files.
176,556
import copy import itertools from parso.python import tree from jedi import debug from jedi import parser_utils from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ iterator_to_value_set, iterate_values from jedi.inference.lazy_value import LazyTreeValue from jedi.inference import compiled from jedi.inference import recursion from jedi.inference import analysis from jedi.inference import imports from jedi.inference import arguments from jedi.inference.value import ClassValue, FunctionValue from jedi.inference.value import iterable from jedi.inference.value.dynamic_arrays import ListModification, DictModification from jedi.inference.value import TreeInstance from jedi.inference.helpers import is_string, is_literal, is_number, \ get_names_of_node, is_big_annoying_library from jedi.inference.compiled.access import COMPARISON_OPERATORS from jedi.inference.cache import inference_state_method_cache from jedi.inference.gradual.stub_value import VersionInfo from jedi.inference.gradual import annotation from jedi.inference.names import TreeNameDefinition from jedi.inference.context import CompForContext from jedi.inference.value.decorator import Decoratee from jedi.plugins import plugin_manager NO_VALUES = ValueSet([]) The provided code snippet includes necessary dependencies for implementing the `_limit_value_infers` function. Write a Python function `def _limit_value_infers(func)` to solve the following problem: This is for now the way how we limit type inference going wild. There are other ways to ensure recursion limits as well. This is mostly necessary because of instance (self) access that can be quite tricky to limit. I'm still not sure this is the way to go, but it looks okay for now and we can still go anther way in the future. Tests are there. ~ dave Here is the function: def _limit_value_infers(func): """ This is for now the way how we limit type inference going wild. There are other ways to ensure recursion limits as well. This is mostly necessary because of instance (self) access that can be quite tricky to limit. I'm still not sure this is the way to go, but it looks okay for now and we can still go anther way in the future. Tests are there. ~ dave """ def wrapper(context, *args, **kwargs): n = context.tree_node inference_state = context.inference_state try: inference_state.inferred_element_counts[n] += 1 maximum = 300 if context.parent_context is None \ and context.get_value() is inference_state.builtins_module: # Builtins should have a more generous inference limit. # It is important that builtins can be executed, otherwise some # functions that depend on certain builtins features would be # broken, see e.g. GH #1432 maximum *= 100 if inference_state.inferred_element_counts[n] > maximum: debug.warning('In value %s there were too many inferences.', n) return NO_VALUES except KeyError: inference_state.inferred_element_counts[n] = 1 return func(context, *args, **kwargs) return wrapper
This is for now the way how we limit type inference going wild. There are other ways to ensure recursion limits as well. This is mostly necessary because of instance (self) access that can be quite tricky to limit. I'm still not sure this is the way to go, but it looks okay for now and we can still go anther way in the future. Tests are there. ~ dave
176,557
import copy import itertools from parso.python import tree from jedi import debug from jedi import parser_utils from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ iterator_to_value_set, iterate_values from jedi.inference.lazy_value import LazyTreeValue from jedi.inference import compiled from jedi.inference import recursion from jedi.inference import analysis from jedi.inference import imports from jedi.inference import arguments from jedi.inference.value import ClassValue, FunctionValue from jedi.inference.value import iterable from jedi.inference.value.dynamic_arrays import ListModification, DictModification from jedi.inference.value import TreeInstance from jedi.inference.helpers import is_string, is_literal, is_number, \ get_names_of_node, is_big_annoying_library from jedi.inference.compiled.access import COMPARISON_OPERATORS from jedi.inference.cache import inference_state_method_cache from jedi.inference.gradual.stub_value import VersionInfo from jedi.inference.gradual import annotation from jedi.inference.names import TreeNameDefinition from jedi.inference.context import CompForContext from jedi.inference.value.decorator import Decoratee from jedi.plugins import plugin_manager def infer_node(context, element): def infer_atom(context, atom): def infer_expr_stmt(context, stmt, seek_name=None): def _apply_decorators(context, node): def check_tuple_assignments(name, value_set): def iterate_values(values, contextualized_node=None, is_async=False): class ContextualizedNode: def __init__(self, context, node): def get_root_context(self): def infer(self): def __repr__(self): class ValueSet: def __init__(self, iterable): def _from_frozen_set(cls, frozenset_): def from_sets(cls, sets): def __or__(self, other): def __and__(self, other): def __iter__(self): def __bool__(self): def __len__(self): def __repr__(self): def filter(self, filter_func): def __getattr__(self, name): def mapper(*args, **kwargs): def __eq__(self, other): def __ne__(self, other): def __hash__(self): def py__class__(self): def iterate(self, contextualized_node=None, is_async=False): def execute(self, arguments): def execute_with_values(self, *args, **kwargs): def goto(self, *args, **kwargs): def py__getattribute__(self, *args, **kwargs): def get_item(self, *args, **kwargs): def try_merge(self, function_name): def gather_annotation_classes(self): def get_signatures(self): def get_type_hint(self, add_class_info=True): def infer_type_vars(self, value_set): NO_VALUES = ValueSet([]) class TreeNameDefinition(AbstractTreeName): def infer(self): def api_type(self): def assignment_indexes(self): def inference_state(self): def py__doc__(self): def tree_name_to_values(inference_state, context, tree_name): value_set = NO_VALUES module_node = context.get_root_context().tree_node # First check for annotations, like: `foo: int = 3` if module_node is not None: names = module_node.get_used_names().get(tree_name.value, []) found_annotation = False for name in names: expr_stmt = name.parent if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": correct_scope = parser_utils.get_parent_scope(name) == context.tree_node if correct_scope: found_annotation = True value_set |= annotation.infer_annotation( context, expr_stmt.children[1].children[1] ).execute_annotation() if found_annotation: return value_set types = [] node = tree_name.get_definition(import_name_always=True, include_setitem=True) if node is None: node = tree_name.parent if node.type == 'global_stmt': c = context.create_context(tree_name) if c.is_module(): # In case we are already part of the module, there is no point # in looking up the global statement anymore, because it's not # valid at that point anyway. return NO_VALUES # For global_stmt lookups, we only need the first possible scope, # which means the function itself. filter = next(c.get_filters()) names = filter.get(tree_name.value) return ValueSet.from_sets(name.infer() for name in names) elif node.type not in ('import_from', 'import_name'): c = context.create_context(tree_name) return infer_atom(c, tree_name) typ = node.type if typ == 'for_stmt': types = annotation.find_type_from_comment_hint_for(context, node, tree_name) if types: return types if typ == 'with_stmt': types = annotation.find_type_from_comment_hint_with(context, node, tree_name) if types: return types if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): try: types = context.predefined_names[node][tree_name.value] except KeyError: cn = ContextualizedNode(context, node.children[3]) for_types = iterate_values( cn.infer(), contextualized_node=cn, is_async=node.parent.type == 'async_stmt', ) n = TreeNameDefinition(context, tree_name) types = check_tuple_assignments(n, for_types) elif typ == 'expr_stmt': types = infer_expr_stmt(context, node, tree_name) elif typ == 'with_stmt': value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) if node.parent.type == 'async_stmt': # In the case of `async with` statements, we need to # first get the coroutine from the `__aenter__` method, # then "unwrap" via the `__await__` method enter_methods = value_managers.py__getattribute__('__aenter__') coro = enter_methods.execute_with_values() return coro.py__await__().py__stop_iteration_returns() enter_methods = value_managers.py__getattribute__('__enter__') return enter_methods.execute_with_values() elif typ in ('import_from', 'import_name'): types = imports.infer_import(context, tree_name) elif typ in ('funcdef', 'classdef'): types = _apply_decorators(context, node) elif typ == 'try_stmt': # TODO an exception can also be a tuple. Check for those. # TODO check for types that are not classes and add it to # the static analysis report. exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) types = exceptions.execute_with_values() elif typ == 'param': types = NO_VALUES elif typ == 'del_stmt': types = NO_VALUES elif typ == 'namedexpr_test': types = infer_node(context, node) else: raise ValueError("Should not happen. type: %s" % typ) return types
null
176,558
from pathlib import Path from jedi.inference.gradual.typeshed import TYPESHED_PATH, create_stub_module class Path(PurePath): def __new__(cls: Type[_P], *args: Union[str, _PathLike], **kwargs: Any) -> _P: ... def __enter__(self: _P) -> _P: ... def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType] ) -> Optional[bool]: ... def cwd(cls: Type[_P]) -> _P: ... def stat(self) -> os.stat_result: ... def chmod(self, mode: int) -> None: ... def exists(self) -> bool: ... def glob(self: _P, pattern: str) -> Generator[_P, None, None]: ... def group(self) -> str: ... def is_dir(self) -> bool: ... def is_file(self) -> bool: ... if sys.version_info >= (3, 7): def is_mount(self) -> bool: ... def is_symlink(self) -> bool: ... def is_socket(self) -> bool: ... def is_fifo(self) -> bool: ... def is_block_device(self) -> bool: ... def is_char_device(self) -> bool: ... def iterdir(self: _P) -> Generator[_P, None, None]: ... def lchmod(self, mode: int) -> None: ... def lstat(self) -> os.stat_result: ... def mkdir(self, mode: int = ..., parents: bool = ..., exist_ok: bool = ...) -> None: ... # Adapted from builtins.open # Text mode: always returns a TextIOWrapper def open( self, mode: OpenTextMode = ..., buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., ) -> TextIOWrapper: ... # Unbuffered binary mode: returns a FileIO def open( self, mode: OpenBinaryMode, buffering: Literal[0], encoding: None = ..., errors: None = ..., newline: None = ... ) -> FileIO: ... # Buffering is on: return BufferedRandom, BufferedReader, or BufferedWriter def open( self, mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1] = ..., encoding: None = ..., errors: None = ..., newline: None = ..., ) -> BufferedRandom: ... def open( self, mode: OpenBinaryModeWriting, buffering: Literal[-1, 1] = ..., encoding: None = ..., errors: None = ..., newline: None = ..., ) -> BufferedWriter: ... def open( self, mode: OpenBinaryModeReading, buffering: Literal[-1, 1] = ..., encoding: None = ..., errors: None = ..., newline: None = ..., ) -> BufferedReader: ... # Buffering cannot be determined: fall back to BinaryIO def open( self, mode: OpenBinaryMode, buffering: int, encoding: None = ..., errors: None = ..., newline: None = ... ) -> BinaryIO: ... # Fallback if mode is not specified def open( self, mode: str, buffering: int = ..., encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., ) -> IO[Any]: ... def owner(self) -> str: ... if sys.version_info >= (3, 9): def readlink(self: _P) -> _P: ... if sys.version_info >= (3, 8): def rename(self: _P, target: Union[str, PurePath]) -> _P: ... def replace(self: _P, target: Union[str, PurePath]) -> _P: ... else: def rename(self, target: Union[str, PurePath]) -> None: ... def replace(self, target: Union[str, PurePath]) -> None: ... def resolve(self: _P, strict: bool = ...) -> _P: ... def rglob(self: _P, pattern: str) -> Generator[_P, None, None]: ... def rmdir(self) -> None: ... def symlink_to(self, target: Union[str, Path], target_is_directory: bool = ...) -> None: ... def touch(self, mode: int = ..., exist_ok: bool = ...) -> None: ... if sys.version_info >= (3, 8): def unlink(self, missing_ok: bool = ...) -> None: ... else: def unlink(self) -> None: ... def home(cls: Type[_P]) -> _P: ... def absolute(self: _P) -> _P: ... def expanduser(self: _P) -> _P: ... def read_bytes(self) -> bytes: ... def read_text(self, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> str: ... def samefile(self, other_path: Union[str, bytes, int, Path]) -> bool: ... def write_bytes(self, data: bytes) -> int: ... def write_text(self, data: str, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> int: ... if sys.version_info >= (3, 8): def link_to(self, target: Union[str, bytes, os.PathLike[str]]) -> None: ... TYPESHED_PATH = _jedi_path.joinpath('third_party', 'typeshed') def create_stub_module(inference_state, grammar, python_value_set, stub_module_node, file_io, import_names): if import_names == ('typing',): module_cls = TypingModuleWrapper else: module_cls = StubModuleValue file_name = os.path.basename(file_io.path) stub_module_value = module_cls( python_value_set, inference_state, stub_module_node, file_io=file_io, string_names=import_names, # The code was loaded with latest_grammar, so use # that. code_lines=get_cached_code_lines(grammar, file_io.path), is_package=file_name == '__init__.pyi', ) return stub_module_value The provided code snippet includes necessary dependencies for implementing the `load_proper_stub_module` function. Write a Python function `def load_proper_stub_module(inference_state, grammar, file_io, import_names, module_node)` to solve the following problem: This function is given a random .pyi file and should return the proper module. Here is the function: def load_proper_stub_module(inference_state, grammar, file_io, import_names, module_node): """ This function is given a random .pyi file and should return the proper module. """ path = file_io.path path = Path(path) assert path.suffix == '.pyi' try: relative_path = path.relative_to(TYPESHED_PATH) except ValueError: pass else: # /[...]/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__ rest = relative_path.with_suffix('') # Remove the stdlib/3 or third_party/3.6 part import_names = rest.parts[2:] if rest.name == '__init__': import_names = import_names[:-1] if import_names is not None: actual_value_set = inference_state.import_module(import_names, prefer_stubs=False) stub = create_stub_module( inference_state, grammar, actual_value_set, module_node, file_io, import_names ) inference_state.stub_module_cache[import_names] = stub return stub return None
This function is given a random .pyi file and should return the proper module.
176,559
import re from inspect import Parameter from parso import ParserSyntaxError, parse from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass from jedi.inference.gradual.generics import TupleGenericManager from jedi.inference.gradual.type_var import TypeVar from jedi.inference.helpers import is_string from jedi.inference.compiled import builtin_from_name from jedi.inference.param import get_executed_param_names from jedi import debug from jedi import parser_utils def _infer_param(function_value, param): class ValueSet: def __init__(self, iterable): def _from_frozen_set(cls, frozenset_): def from_sets(cls, sets): def __or__(self, other): def __and__(self, other): def __iter__(self): def __bool__(self): def __len__(self): def __repr__(self): def filter(self, filter_func): def __getattr__(self, name): def mapper(*args, **kwargs): def __eq__(self, other): def __ne__(self, other): def __hash__(self): def py__class__(self): def iterate(self, contextualized_node=None, is_async=False): def execute(self, arguments): def execute_with_values(self, *args, **kwargs): def goto(self, *args, **kwargs): def py__getattribute__(self, *args, **kwargs): def get_item(self, *args, **kwargs): def try_merge(self, function_name): def gather_annotation_classes(self): def get_signatures(self): def get_type_hint(self, add_class_info=True): def infer_type_vars(self, value_set): class GenericClass(DefineGenericBaseClass, ClassMixin): def __init__(self, class_value, generics_manager): def _get_wrapped_value(self): def get_type_hint(self, add_class_info=True): def get_type_var_filter(self): def py__call__(self, arguments): def _as_context(self): def py__bases__(self): def _create_instance_with_generics(self, generics_manager): def is_sub_class_of(self, class_value): def with_generics(self, generics_tuple): def infer_type_vars(self, value_set): class TupleGenericManager(_AbstractGenericManager): def __init__(self, tup): def __getitem__(self, index): def __len__(self): def to_tuple(self): def is_homogenous_tuple(self): def __repr__(self): def builtin_from_name(inference_state, string): def infer_param(function_value, param, ignore_stars=False): values = _infer_param(function_value, param) if ignore_stars or not values: return values inference_state = function_value.inference_state if param.star_count == 1: tuple_ = builtin_from_name(inference_state, 'tuple') return ValueSet([GenericClass( tuple_, TupleGenericManager((values,)), )]) elif param.star_count == 2: dct = builtin_from_name(inference_state, 'dict') generics = ( ValueSet([builtin_from_name(inference_state, 'str')]), values ) return ValueSet([GenericClass( dct, TupleGenericManager(generics), )]) return values
null
176,560
import re from inspect import Parameter from parso import ParserSyntaxError, parse from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass from jedi.inference.gradual.generics import TupleGenericManager from jedi.inference.gradual.type_var import TypeVar from jedi.inference.helpers import is_string from jedi.inference.compiled import builtin_from_name from jedi.inference.param import get_executed_param_names from jedi import debug from jedi import parser_utils def infer_annotation(context, annotation): """ Inferes an annotation node. This means that it inferes the part of `int` here: foo: int = 3 Also checks for forward references (strings) """ value_set = context.infer_node(annotation) if len(value_set) != 1: debug.warning("Inferred typing index %s should lead to 1 object, " " not %s" % (annotation, value_set)) return value_set inferred_value = list(value_set)[0] if is_string(inferred_value): result = _get_forward_reference_node(context, inferred_value.get_safe_value()) if result is not None: return context.infer_node(result) return value_set def _infer_annotation_string(context, string, index=None): node = _get_forward_reference_node(context, string) if node is None: return NO_VALUES value_set = context.infer_node(node) if index is not None: value_set = value_set.filter( lambda value: ( value.array_type == 'tuple' and len(list(value.py__iter__())) >= index ) ).py__simple_getitem__(index) return value_set def py__annotations__(funcdef): dct = {} for function_param in funcdef.get_params(): param_annotation = function_param.annotation if param_annotation is not None: dct[function_param.name.value] = param_annotation return_annotation = funcdef.annotation if return_annotation: dct['return'] = return_annotation return dct def resolve_forward_references(context, all_annotations): def resolve(node): if node is None or node.type != 'string': return node node = _get_forward_reference_node( context, context.inference_state.compiled_subprocess.safe_literal_eval( node.value, ), ) if node is None: # There was a string, but it's not a valid annotation return None # The forward reference tree has an additional root node ('eval_input') # that we don't want. Extract the node we do want, that is equivalent to # the nodes returned by `py__annotations__` for a non-quoted node. node = node.children[0] return node return {name: resolve(node) for name, node in all_annotations.items()} def infer_type_vars_for_execution(function, arguments, annotation_dict): """ Some functions use type vars that are not defined by the class, but rather only defined in the function. See for example `iter`. In those cases we want to: 1. Search for undefined type vars. 2. Infer type vars with the execution state we have. 3. Return the union of all type vars that have been found. """ context = function.get_default_param_context() annotation_variable_results = {} executed_param_names = get_executed_param_names(function, arguments) for executed_param_name in executed_param_names: try: annotation_node = annotation_dict[executed_param_name.string_name] except KeyError: continue annotation_variables = find_unknown_type_vars(context, annotation_node) if annotation_variables: # Infer unknown type var annotation_value_set = context.infer_node(annotation_node) kind = executed_param_name.get_kind() actual_value_set = executed_param_name.infer() if kind is Parameter.VAR_POSITIONAL: actual_value_set = actual_value_set.merge_types_of_iterate() elif kind is Parameter.VAR_KEYWORD: # TODO _dict_values is not public. actual_value_set = actual_value_set.try_merge('_dict_values') merge_type_var_dicts( annotation_variable_results, annotation_value_set.infer_type_vars(actual_value_set), ) return annotation_variable_results def find_unknown_type_vars(context, node): def check_node(node): if node.type in ('atom_expr', 'power'): trailer = node.children[-1] if trailer.type == 'trailer' and trailer.children[0] == '[': for subscript_node in _unpack_subscriptlist(trailer.children[1]): check_node(subscript_node) else: found[:] = _filter_type_vars(context.infer_node(node), found) found = [] # We're not using a set, because the order matters. check_node(node) return found class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict NO_VALUES = ValueSet([]) class DefineGenericBaseClass(LazyValueWrapper): def __init__(self, generics_manager): self._generics_manager = generics_manager def _create_instance_with_generics(self, generics_manager): raise NotImplementedError def get_generics(self): return self._generics_manager.to_tuple() def define_generics(self, type_var_dict): from jedi.inference.gradual.type_var import TypeVar changed = False new_generics = [] for generic_set in self.get_generics(): values = NO_VALUES for generic in generic_set: if isinstance(generic, (DefineGenericBaseClass, TypeVar)): result = generic.define_generics(type_var_dict) values |= result if result != ValueSet({generic}): changed = True else: values |= ValueSet([generic]) new_generics.append(values) if not changed: # There might not be any type vars that change. In that case just # return itself, because it does not make sense to potentially lose # cached results. return ValueSet([self]) return ValueSet([self._create_instance_with_generics( TupleGenericManager(tuple(new_generics)) )]) def is_same_class(self, other): if not isinstance(other, DefineGenericBaseClass): return False if self.tree_node != other.tree_node: # TODO not sure if this is nice. return False given_params1 = self.get_generics() given_params2 = other.get_generics() if len(given_params1) != len(given_params2): # If the amount of type vars doesn't match, the class doesn't # match. return False # Now compare generics return all( any( # TODO why is this ordering the correct one? cls2.is_same_class(cls1) # TODO I'm still not sure gather_annotation_classes is a good # idea. They are essentially here to avoid comparing Tuple <=> # tuple and instead compare tuple <=> tuple, but at the moment # the whole `is_same_class` and `is_sub_class` matching is just # not in the best shape. for cls1 in class_set1.gather_annotation_classes() for cls2 in class_set2.gather_annotation_classes() ) for class_set1, class_set2 in zip(given_params1, given_params2) ) def get_signatures(self): return [] def __repr__(self): return '<%s: %s%s>' % ( self.__class__.__name__, self._wrapped_value, list(self.get_generics()), ) class TypeVar(BaseTypingValue): def __init__(self, parent_context, tree_name, var_name, unpacked_args): super().__init__(parent_context, tree_name) self._var_name = var_name self._constraints_lazy_values = [] self._bound_lazy_value = None self._covariant_lazy_value = None self._contravariant_lazy_value = None for key, lazy_value in unpacked_args: if key is None: self._constraints_lazy_values.append(lazy_value) else: if key == 'bound': self._bound_lazy_value = lazy_value elif key == 'covariant': self._covariant_lazy_value = lazy_value elif key == 'contravariant': self._contra_variant_lazy_value = lazy_value else: debug.warning('Invalid TypeVar param name %s', key) def py__name__(self): return self._var_name def get_filters(self, *args, **kwargs): return iter([]) def _get_classes(self): if self._bound_lazy_value is not None: return self._bound_lazy_value.infer() if self._constraints_lazy_values: return self.constraints debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name) return NO_VALUES def is_same_class(self, other): # Everything can match an undefined type var. return True def constraints(self): return ValueSet.from_sets( lazy.infer() for lazy in self._constraints_lazy_values ) def define_generics(self, type_var_dict): try: found = type_var_dict[self.py__name__()] except KeyError: pass else: if found: return found return ValueSet({self}) def execute_annotation(self): return self._get_classes().execute_annotation() def infer_type_vars(self, value_set): def iterate(): for v in value_set: cls = v.py__class__() if v.is_function() or v.is_class(): cls = TypeWrapper(cls, v) yield cls annotation_name = self.py__name__() return {annotation_name: ValueSet(iterate())} def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.py__name__()) The provided code snippet includes necessary dependencies for implementing the `infer_return_types` function. Write a Python function `def infer_return_types(function, arguments)` to solve the following problem: Infers the type of a function's return value, according to type annotations. Here is the function: def infer_return_types(function, arguments): """ Infers the type of a function's return value, according to type annotations. """ context = function.get_default_param_context() all_annotations = resolve_forward_references( context, py__annotations__(function.tree_node), ) annotation = all_annotations.get("return", None) if annotation is None: # If there is no Python 3-type annotation, look for an annotation # comment. node = function.tree_node comment = parser_utils.get_following_comment_same_line(node) if comment is None: return NO_VALUES match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) if not match: return NO_VALUES return _infer_annotation_string( context, match.group(1).strip() ).execute_annotation() unknown_type_vars = find_unknown_type_vars(context, annotation) annotation_values = infer_annotation(context, annotation) if not unknown_type_vars: return annotation_values.execute_annotation() type_var_dict = infer_type_vars_for_execution(function, arguments, all_annotations) return ValueSet.from_sets( ann.define_generics(type_var_dict) if isinstance(ann, (DefineGenericBaseClass, TypeVar)) else ValueSet({ann}) for ann in annotation_values ).execute_annotation()
Infers the type of a function's return value, according to type annotations.
176,561
import re from inspect import Parameter from parso import ParserSyntaxError, parse from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass from jedi.inference.gradual.generics import TupleGenericManager from jedi.inference.gradual.type_var import TypeVar from jedi.inference.helpers import is_string from jedi.inference.compiled import builtin_from_name from jedi.inference.param import get_executed_param_names from jedi import debug from jedi import parser_utils def _infer_type_vars_for_callable(arguments, lazy_params): """ Infers type vars for the Calllable class: def x() -> Callable[[Callable[..., _T]], _T]: ... """ annotation_variable_results = {} for (_, lazy_value), lazy_callable_param in zip(arguments.unpack(), lazy_params): callable_param_values = lazy_callable_param.infer() # Infer unknown type var actual_value_set = lazy_value.infer() merge_type_var_dicts( annotation_variable_results, callable_param_values.infer_type_vars(actual_value_set), ) return annotation_variable_results class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict class DefineGenericBaseClass(LazyValueWrapper): def __init__(self, generics_manager): self._generics_manager = generics_manager def _create_instance_with_generics(self, generics_manager): raise NotImplementedError def get_generics(self): return self._generics_manager.to_tuple() def define_generics(self, type_var_dict): from jedi.inference.gradual.type_var import TypeVar changed = False new_generics = [] for generic_set in self.get_generics(): values = NO_VALUES for generic in generic_set: if isinstance(generic, (DefineGenericBaseClass, TypeVar)): result = generic.define_generics(type_var_dict) values |= result if result != ValueSet({generic}): changed = True else: values |= ValueSet([generic]) new_generics.append(values) if not changed: # There might not be any type vars that change. In that case just # return itself, because it does not make sense to potentially lose # cached results. return ValueSet([self]) return ValueSet([self._create_instance_with_generics( TupleGenericManager(tuple(new_generics)) )]) def is_same_class(self, other): if not isinstance(other, DefineGenericBaseClass): return False if self.tree_node != other.tree_node: # TODO not sure if this is nice. return False given_params1 = self.get_generics() given_params2 = other.get_generics() if len(given_params1) != len(given_params2): # If the amount of type vars doesn't match, the class doesn't # match. return False # Now compare generics return all( any( # TODO why is this ordering the correct one? cls2.is_same_class(cls1) # TODO I'm still not sure gather_annotation_classes is a good # idea. They are essentially here to avoid comparing Tuple <=> # tuple and instead compare tuple <=> tuple, but at the moment # the whole `is_same_class` and `is_sub_class` matching is just # not in the best shape. for cls1 in class_set1.gather_annotation_classes() for cls2 in class_set2.gather_annotation_classes() ) for class_set1, class_set2 in zip(given_params1, given_params2) ) def get_signatures(self): return [] def __repr__(self): return '<%s: %s%s>' % ( self.__class__.__name__, self._wrapped_value, list(self.get_generics()), ) class TypeVar(BaseTypingValue): def __init__(self, parent_context, tree_name, var_name, unpacked_args): super().__init__(parent_context, tree_name) self._var_name = var_name self._constraints_lazy_values = [] self._bound_lazy_value = None self._covariant_lazy_value = None self._contravariant_lazy_value = None for key, lazy_value in unpacked_args: if key is None: self._constraints_lazy_values.append(lazy_value) else: if key == 'bound': self._bound_lazy_value = lazy_value elif key == 'covariant': self._covariant_lazy_value = lazy_value elif key == 'contravariant': self._contra_variant_lazy_value = lazy_value else: debug.warning('Invalid TypeVar param name %s', key) def py__name__(self): return self._var_name def get_filters(self, *args, **kwargs): return iter([]) def _get_classes(self): if self._bound_lazy_value is not None: return self._bound_lazy_value.infer() if self._constraints_lazy_values: return self.constraints debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name) return NO_VALUES def is_same_class(self, other): # Everything can match an undefined type var. return True def constraints(self): return ValueSet.from_sets( lazy.infer() for lazy in self._constraints_lazy_values ) def define_generics(self, type_var_dict): try: found = type_var_dict[self.py__name__()] except KeyError: pass else: if found: return found return ValueSet({self}) def execute_annotation(self): return self._get_classes().execute_annotation() def infer_type_vars(self, value_set): def iterate(): for v in value_set: cls = v.py__class__() if v.is_function() or v.is_class(): cls = TypeWrapper(cls, v) yield cls annotation_name = self.py__name__() return {annotation_name: ValueSet(iterate())} def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.py__name__()) def infer_return_for_callable(arguments, param_values, result_values): all_type_vars = {} for pv in param_values: if pv.array_type == 'list': type_var_dict = _infer_type_vars_for_callable(arguments, pv.py__iter__()) all_type_vars.update(type_var_dict) return ValueSet.from_sets( v.define_generics(all_type_vars) if isinstance(v, (DefineGenericBaseClass, TypeVar)) else ValueSet({v}) for v in result_values ).execute_annotation()
null
176,562
import re from inspect import Parameter from parso import ParserSyntaxError, parse from jedi.inference.cache import inference_state_method_cache from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass from jedi.inference.gradual.generics import TupleGenericManager from jedi.inference.gradual.type_var import TypeVar from jedi.inference.helpers import is_string from jedi.inference.compiled import builtin_from_name from jedi.inference.param import get_executed_param_names from jedi import debug from jedi import parser_utils def merge_type_var_dicts(base_dict, new_dict): for type_var_name, values in new_dict.items(): if values: try: base_dict[type_var_name] |= values except KeyError: base_dict[type_var_name] = values class DefineGenericBaseClass(LazyValueWrapper): def __init__(self, generics_manager): self._generics_manager = generics_manager def _create_instance_with_generics(self, generics_manager): raise NotImplementedError def get_generics(self): return self._generics_manager.to_tuple() def define_generics(self, type_var_dict): from jedi.inference.gradual.type_var import TypeVar changed = False new_generics = [] for generic_set in self.get_generics(): values = NO_VALUES for generic in generic_set: if isinstance(generic, (DefineGenericBaseClass, TypeVar)): result = generic.define_generics(type_var_dict) values |= result if result != ValueSet({generic}): changed = True else: values |= ValueSet([generic]) new_generics.append(values) if not changed: # There might not be any type vars that change. In that case just # return itself, because it does not make sense to potentially lose # cached results. return ValueSet([self]) return ValueSet([self._create_instance_with_generics( TupleGenericManager(tuple(new_generics)) )]) def is_same_class(self, other): if not isinstance(other, DefineGenericBaseClass): return False if self.tree_node != other.tree_node: # TODO not sure if this is nice. return False given_params1 = self.get_generics() given_params2 = other.get_generics() if len(given_params1) != len(given_params2): # If the amount of type vars doesn't match, the class doesn't # match. return False # Now compare generics return all( any( # TODO why is this ordering the correct one? cls2.is_same_class(cls1) # TODO I'm still not sure gather_annotation_classes is a good # idea. They are essentially here to avoid comparing Tuple <=> # tuple and instead compare tuple <=> tuple, but at the moment # the whole `is_same_class` and `is_sub_class` matching is just # not in the best shape. for cls1 in class_set1.gather_annotation_classes() for cls2 in class_set2.gather_annotation_classes() ) for class_set1, class_set2 in zip(given_params1, given_params2) ) def get_signatures(self): return [] def __repr__(self): return '<%s: %s%s>' % ( self.__class__.__name__, self._wrapped_value, list(self.get_generics()), ) The provided code snippet includes necessary dependencies for implementing the `merge_pairwise_generics` function. Write a Python function `def merge_pairwise_generics(annotation_value, annotated_argument_class)` to solve the following problem: Match up the generic parameters from the given argument class to the target annotation. This walks the generic parameters immediately within the annotation and argument's type, in order to determine the concrete values of the annotation's parameters for the current case. For example, given the following code: def values(mapping: Mapping[K, V]) -> List[V]: ... for val in values({1: 'a'}): val Then this function should be given representations of `Mapping[K, V]` and `Mapping[int, str]`, so that it can determine that `K` is `int and `V` is `str`. Note that it is responsibility of the caller to traverse the MRO of the argument type as needed in order to find the type matching the annotation (in this case finding `Mapping[int, str]` as a parent of `Dict[int, str]`). Parameters ---------- `annotation_value`: represents the annotation to infer the concrete parameter types of. `annotated_argument_class`: represents the annotated class of the argument being passed to the object annotated by `annotation_value`. Here is the function: def merge_pairwise_generics(annotation_value, annotated_argument_class): """ Match up the generic parameters from the given argument class to the target annotation. This walks the generic parameters immediately within the annotation and argument's type, in order to determine the concrete values of the annotation's parameters for the current case. For example, given the following code: def values(mapping: Mapping[K, V]) -> List[V]: ... for val in values({1: 'a'}): val Then this function should be given representations of `Mapping[K, V]` and `Mapping[int, str]`, so that it can determine that `K` is `int and `V` is `str`. Note that it is responsibility of the caller to traverse the MRO of the argument type as needed in order to find the type matching the annotation (in this case finding `Mapping[int, str]` as a parent of `Dict[int, str]`). Parameters ---------- `annotation_value`: represents the annotation to infer the concrete parameter types of. `annotated_argument_class`: represents the annotated class of the argument being passed to the object annotated by `annotation_value`. """ type_var_dict = {} if not isinstance(annotated_argument_class, DefineGenericBaseClass): return type_var_dict annotation_generics = annotation_value.get_generics() actual_generics = annotated_argument_class.get_generics() for annotation_generics_set, actual_generic_set in zip(annotation_generics, actual_generics): merge_type_var_dicts( type_var_dict, annotation_generics_set.infer_type_vars(actual_generic_set.execute_annotation()), ) return type_var_dict
Match up the generic parameters from the given argument class to the target annotation. This walks the generic parameters immediately within the annotation and argument's type, in order to determine the concrete values of the annotation's parameters for the current case. For example, given the following code: def values(mapping: Mapping[K, V]) -> List[V]: ... for val in values({1: 'a'}): val Then this function should be given representations of `Mapping[K, V]` and `Mapping[int, str]`, so that it can determine that `K` is `int and `V` is `str`. Note that it is responsibility of the caller to traverse the MRO of the argument type as needed in order to find the type matching the annotation (in this case finding `Mapping[int, str]` as a parent of `Dict[int, str]`). Parameters ---------- `annotation_value`: represents the annotation to infer the concrete parameter types of. `annotated_argument_class`: represents the annotated class of the argument being passed to the object annotated by `annotation_value`.
176,563
from jedi import debug from jedi.cache import memoize_method from jedi.inference.utils import to_tuple from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.value.iterable import SequenceLiteralValue from jedi.inference.helpers import is_string def is_string(value): return value.is_compiled() and isinstance(value.get_safe_value(default=None), str) def _get_forward_reference_node(context, string): try: new_node = context.inference_state.grammar.parse( string, start_symbol='eval_input', error_recovery=False ) except ParserSyntaxError: debug.warning('Annotation not parsed: %s' % string) return None else: module = context.tree_node.get_root_node() parser_utils.move(new_node, module.end_pos[0]) new_node.parent = context.tree_node return new_node def _resolve_forward_references(context, value_set): for value in value_set: if is_string(value): from jedi.inference.gradual.annotation import _get_forward_reference_node node = _get_forward_reference_node(context, value.get_safe_value()) if node is not None: for c in context.infer_node(node): yield c else: yield value
null
176,564
import os import re from functools import wraps from collections import namedtuple from typing import Dict, Mapping, Tuple from pathlib import Path from jedi import settings from jedi.file_io import FileIO from jedi.parser_utils import get_cached_code_lines from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue from jedi.inference.value import ModuleValue def try_to_load_stub_cached(inference_state, import_names, *args, **kwargs): if import_names is None: return None try: return inference_state.stub_module_cache[import_names] except KeyError: pass # TODO is this needed? where are the exceptions coming from that make this # necessary? Just remove this line. inference_state.stub_module_cache[import_names] = None inference_state.stub_module_cache[import_names] = result = \ _try_to_load_stub(inference_state, import_names, *args, **kwargs) return result def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... class ValueSet: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict def import_module_decorator(func): @wraps(func) def wrapper(inference_state, import_names, parent_module_value, sys_path, prefer_stubs): python_value_set = inference_state.module_cache.get(import_names) if python_value_set is None: if parent_module_value is not None and parent_module_value.is_stub(): parent_module_values = parent_module_value.non_stub_value_set else: parent_module_values = [parent_module_value] if import_names == ('os', 'path'): # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. python_value_set = ValueSet.from_sets( func(inference_state, (n,), None, sys_path,) for n in ['posixpath', 'ntpath', 'macpath', 'os2emxpath'] ) else: python_value_set = ValueSet.from_sets( func(inference_state, import_names, p, sys_path,) for p in parent_module_values ) inference_state.module_cache.add(import_names, python_value_set) if not prefer_stubs or import_names[0] in settings.auto_import_modules: return python_value_set stub = try_to_load_stub_cached(inference_state, import_names, python_value_set, parent_module_value, sys_path) if stub is not None: return ValueSet([stub]) return python_value_set return wrapper
null
176,565
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines _EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test', 'or_test', 'and_test', 'not_test', 'comparison', 'expr', 'xor_expr', 'and_expr', 'shift_expr', 'arith_expr', 'atom_expr', 'term', 'factor', 'power', 'atom'} The provided code snippet includes necessary dependencies for implementing the `get_executable_nodes` function. Write a Python function `def get_executable_nodes(node, last_added=False)` to solve the following problem: For static analysis. Here is the function: def get_executable_nodes(node, last_added=False): """ For static analysis. """ result = [] typ = node.type if typ == 'name': next_leaf = node.get_next_leaf() if last_added is False and node.parent.type != 'param' and next_leaf != '=': result.append(node) elif typ == 'expr_stmt': # I think inferring the statement (and possibly returned arrays), # should be enough for static analysis. result.append(node) for child in node.children: result += get_executable_nodes(child, last_added=True) elif typ == 'decorator': # decorator if node.children[-2] == ')': node = node.children[-3] if node != '(': result += get_executable_nodes(node) else: try: children = node.children except AttributeError: pass else: if node.type in _EXECUTE_NODES and not last_added: result.append(node) for child in children: result += get_executable_nodes(child, last_added) return result
For static analysis.
176,566
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines def get_sync_comp_fors(comp_for): yield comp_for last = comp_for.children[-1] while True: if last.type == 'comp_for': yield last.children[1] # Ignore the async. elif last.type == 'sync_comp_for': yield last elif not last.type == 'comp_if': break last = last.children[-1]
null
176,567
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines def safe_literal_eval(value): first_two = value[:2].lower() if first_two[0] == 'f' or first_two in ('fr', 'rf'): # literal_eval is not able to resovle f literals. We have to do that # manually, but that's right now not implemented. return '' return literal_eval(value) def cleandoc(doc: str) -> str: ... The provided code snippet includes necessary dependencies for implementing the `clean_scope_docstring` function. Write a Python function `def clean_scope_docstring(scope_node)` to solve the following problem: Returns a cleaned version of the docstring token. Here is the function: def clean_scope_docstring(scope_node): """ Returns a cleaned version of the docstring token. """ node = scope_node.get_doc_node() if node is not None: # TODO We have to check next leaves until there are no new # leaves anymore that might be part of the docstring. A # docstring can also look like this: ``'foo' 'bar' # Returns a literal cleaned version of the ``Token``. return cleandoc(safe_literal_eval(node.value)) return ''
Returns a cleaned version of the docstring token.
176,568
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines def safe_literal_eval(value): first_two = value[:2].lower() if first_two[0] == 'f' or first_two in ('fr', 'rf'): # literal_eval is not able to resovle f literals. We have to do that # manually, but that's right now not implemented. return '' return literal_eval(value) def cleandoc(doc: str) -> str: ... def find_statement_documentation(tree_node): if tree_node.type == 'expr_stmt': tree_node = tree_node.parent # simple_stmt maybe_string = tree_node.get_next_sibling() if maybe_string is not None: if maybe_string.type == 'simple_stmt': maybe_string = maybe_string.children[0] if maybe_string.type == 'string': return cleandoc(safe_literal_eval(maybe_string.value)) return ''
null
176,569
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines The provided code snippet includes necessary dependencies for implementing the `get_signature` function. Write a Python function `def get_signature(funcdef, width=72, call_string=None, omit_first_param=False, omit_return_annotation=False)` to solve the following problem: Generate a string signature of a function. :param width: Fold lines if a line is longer than this value. :type width: int :arg func_name: Override function name when given. :type func_name: str :rtype: str Here is the function: def get_signature(funcdef, width=72, call_string=None, omit_first_param=False, omit_return_annotation=False): """ Generate a string signature of a function. :param width: Fold lines if a line is longer than this value. :type width: int :arg func_name: Override function name when given. :type func_name: str :rtype: str """ # Lambdas have no name. if call_string is None: if funcdef.type == 'lambdef': call_string = '<lambda>' else: call_string = funcdef.name.value params = funcdef.get_params() if omit_first_param: params = params[1:] p = '(' + ''.join(param.get_code() for param in params).strip() + ')' # TODO this is pretty bad, we should probably just normalize. p = re.sub(r'\s+', ' ', p) if funcdef.annotation and not omit_return_annotation: rtype = " ->" + funcdef.annotation.get_code() else: rtype = "" code = call_string + p + rtype return '\n'.join(textwrap.wrap(code, width))
Generate a string signature of a function. :param width: Fold lines if a line is longer than this value. :type width: int :arg func_name: Override function name when given. :type func_name: str :rtype: str
176,570
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines class WeakKeyDictionary(MutableMapping[_KT, _VT]): def __init__(self, dict: None = ...) -> None: ... def __init__(self, dict: Union[Mapping[_KT, _VT], Iterable[Tuple[_KT, _VT]]]) -> None: ... def __len__(self) -> int: ... def __getitem__(self, k: _KT) -> _VT: ... def __setitem__(self, k: _KT, v: _VT) -> None: ... def __delitem__(self, v: _KT) -> None: ... if sys.version_info < (3, 0): def has_key(self, key: object) -> bool: ... def __contains__(self, o: object) -> bool: ... def __iter__(self) -> Iterator[_KT]: ... def __str__(self) -> str: ... def copy(self) -> WeakKeyDictionary[_KT, _VT]: ... if sys.version_info < (3, 0): def keys(self) -> List[_KT]: ... def values(self) -> List[_VT]: ... def items(self) -> List[Tuple[_KT, _VT]]: ... def iterkeys(self) -> Iterator[_KT]: ... def itervalues(self) -> Iterator[_VT]: ... def iteritems(self) -> Iterator[Tuple[_KT, _VT]]: ... def iterkeyrefs(self) -> Iterator[ref[_KT]]: ... else: # These are incompatible with Mapping def keys(self) -> Iterator[_KT]: ... # type: ignore def values(self) -> Iterator[_VT]: ... # type: ignore def items(self) -> Iterator[Tuple[_KT, _VT]]: ... # type: ignore def keyrefs(self) -> List[ref[_KT]]: ... def _get_parent_scope_cache(func): cache = WeakKeyDictionary() def wrapper(parso_cache_node, node, include_flows=False): if parso_cache_node is None: return func(node, include_flows) try: for_module = cache[parso_cache_node] except KeyError: for_module = cache[parso_cache_node] = {} try: return for_module[node] except KeyError: result = for_module[node] = func(node, include_flows) return result return wrapper
null