sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
oraios/serena:test/solidlsp/kotlin/test_kotlin_basic.py | import os
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_utils import SymbolUtils
from test.conftest import is_ci
# Kotlin LSP (IntelliJ-based, pre-alpha v261) crashes on JVM restart under CI resource constraints
# (2 CPUs, 7GB RAM). First start succeeds but subsequent starts fail with cancelled (-32800).
# Tests pass reliably on developer machines. See PR #1061 for investigation details.
@pytest.mark.skipif(is_ci, reason="Kotlin LSP JVM restart is unstable on CI runners")
@pytest.mark.kotlin
class TestKotlinLanguageServer:
@pytest.mark.parametrize("language_server", [Language.KOTLIN], indirect=True)
def test_find_symbol(self, language_server: SolidLanguageServer) -> None:
symbols = language_server.request_full_symbol_tree()
assert SymbolUtils.symbol_tree_contains_name(symbols, "Main"), "Main class not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Utils"), "Utils class not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Model"), "Model class not found in symbol tree"
@pytest.mark.parametrize("language_server", [Language.KOTLIN], indirect=True)
def test_find_referencing_symbols(self, language_server: SolidLanguageServer) -> None:
# Use correct Kotlin file paths
file_path = os.path.join("src", "main", "kotlin", "test_repo", "Utils.kt")
refs = language_server.request_references(file_path, 3, 12)
assert any("Main.kt" in ref.get("relativePath", "") for ref in refs), "Main should reference Utils.printHello"
# Dynamically determine the correct line/column for the 'Model' class name
file_path = os.path.join("src", "main", "kotlin", "test_repo", "Model.kt")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
model_symbol = None
for sym in symbols[0]:
print(sym)
print("\n")
if sym.get("name") == "Model" and sym.get("kind") == 23: # 23 = Class
model_symbol = sym
break
assert model_symbol is not None, "Could not find 'Model' class symbol in Model.kt"
# Use selectionRange if present, otherwise fall back to range
if "selectionRange" in model_symbol:
sel_start = model_symbol["selectionRange"]["start"]
else:
sel_start = model_symbol["range"]["start"]
refs = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert any(
"Main.kt" in ref.get("relativePath", "") for ref in refs
), "Main should reference Model (tried all positions in selectionRange)"
@pytest.mark.parametrize("language_server", [Language.KOTLIN], indirect=True)
def test_overview_methods(self, language_server: SolidLanguageServer) -> None:
symbols = language_server.request_full_symbol_tree()
assert SymbolUtils.symbol_tree_contains_name(symbols, "Main"), "Main missing from overview"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Utils"), "Utils missing from overview"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Model"), "Model missing from overview"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/kotlin/test_kotlin_basic.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/lua/test_lua_basic.py | """
Tests for the Lua language server implementation.
These tests validate symbol finding and cross-file reference capabilities
for Lua modules and functions.
"""
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
@pytest.mark.lua
class TestLuaLanguageServer:
"""Test Lua language server symbol finding and cross-file references."""
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_find_symbols_in_calculator(self, language_server: SolidLanguageServer) -> None:
"""Test finding specific functions in calculator.lua."""
symbols = language_server.request_document_symbols("src/calculator.lua").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
# Extract function names from the returned structure
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
function_names = set()
for symbol in symbol_list:
if isinstance(symbol, dict):
name = symbol.get("name", "")
# Handle both plain names and module-prefixed names
if "." in name:
name = name.split(".")[-1]
if symbol.get("kind") == SymbolKind.Function:
function_names.add(name)
# Verify exact calculator functions exist
expected_functions = {"add", "subtract", "multiply", "divide", "factorial"}
found_functions = function_names & expected_functions
assert found_functions == expected_functions, f"Expected exactly {expected_functions}, found {found_functions}"
# Verify specific functions
assert "add" in function_names, "add function not found"
assert "multiply" in function_names, "multiply function not found"
assert "factorial" in function_names, "factorial function not found"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_find_symbols_in_utils(self, language_server: SolidLanguageServer) -> None:
"""Test finding specific functions in utils.lua."""
symbols = language_server.request_document_symbols("src/utils.lua").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
function_names = set()
all_symbols = set()
for symbol in symbol_list:
if isinstance(symbol, dict):
name = symbol.get("name", "")
all_symbols.add(name)
# Handle both plain names and module-prefixed names
if "." in name:
name = name.split(".")[-1]
if symbol.get("kind") == SymbolKind.Function:
function_names.add(name)
# Verify exact string utility functions
expected_utils = {"trim", "split", "starts_with", "ends_with"}
found_utils = function_names & expected_utils
assert found_utils == expected_utils, f"Expected exactly {expected_utils}, found {found_utils}"
# Verify exact table utility functions
table_utils = {"deep_copy", "table_contains", "table_merge"}
found_table_utils = function_names & table_utils
assert found_table_utils == table_utils, f"Expected exactly {table_utils}, found {found_table_utils}"
# Check for Logger class/table
assert "Logger" in all_symbols or any("Logger" in s for s in all_symbols), "Logger not found in symbols"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_find_symbols_in_main(self, language_server: SolidLanguageServer) -> None:
"""Test finding functions in main.lua."""
symbols = language_server.request_document_symbols("main.lua").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
function_names = set()
for symbol in symbol_list:
if isinstance(symbol, dict) and symbol.get("kind") == SymbolKind.Function:
function_names.add(symbol.get("name", ""))
# Verify exact main functions exist
expected_funcs = {"print_banner", "test_calculator", "test_utils"}
found_funcs = function_names & expected_funcs
assert found_funcs == expected_funcs, f"Expected exactly {expected_funcs}, found {found_funcs}"
assert "test_calculator" in function_names, "test_calculator function not found"
assert "test_utils" in function_names, "test_utils function not found"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_cross_file_references_calculator_add(self, language_server: SolidLanguageServer) -> None:
"""Test finding cross-file references to calculator.add function."""
symbols = language_server.request_document_symbols("src/calculator.lua").get_all_symbols_and_roots()
assert symbols is not None
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find the add function
add_symbol = None
for sym in symbol_list:
if isinstance(sym, dict):
name = sym.get("name", "")
if "add" in name or name == "add":
add_symbol = sym
break
assert add_symbol is not None, "add function not found in calculator.lua"
# Get references to the add function
range_info = add_symbol.get("selectionRange", add_symbol.get("range"))
assert range_info is not None, "add function has no range information"
range_start = range_info["start"]
refs = language_server.request_references("src/calculator.lua", range_start["line"], range_start["character"])
assert refs is not None
assert isinstance(refs, list)
# add function appears in: main.lua (lines 16, 71), test_calculator.lua (lines 22, 23, 24)
# Note: The declaration itself may or may not be included as a reference
assert len(refs) >= 5, f"Should find at least 5 references to calculator.add, found {len(refs)}"
# Verify exact reference locations
ref_files: dict[str, list[int]] = {}
for ref in refs:
filename = ref.get("uri", "").split("/")[-1]
if filename not in ref_files:
ref_files[filename] = []
ref_files[filename].append(ref["range"]["start"]["line"])
# The declaration may or may not be included
if "calculator.lua" in ref_files:
assert (
5 in ref_files["calculator.lua"]
), f"If declaration is included, it should be at line 6 (0-indexed: 5), found at {ref_files['calculator.lua']}"
# Check main.lua has usages
assert "main.lua" in ref_files, "Should find add usages in main.lua"
assert (
15 in ref_files["main.lua"] or 70 in ref_files["main.lua"]
), f"Should find add usage in main.lua, found at lines {ref_files.get('main.lua', [])}"
# Check for cross-file references from main.lua
main_refs = [ref for ref in refs if "main.lua" in ref.get("uri", "")]
assert len(main_refs) > 0, "calculator.add should be called in main.lua"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_cross_file_references_utils_trim(self, language_server: SolidLanguageServer) -> None:
"""Test finding cross-file references to utils.trim function."""
symbols = language_server.request_document_symbols("src/utils.lua").get_all_symbols_and_roots()
assert symbols is not None
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find the trim function
trim_symbol = None
for sym in symbol_list:
if isinstance(sym, dict):
name = sym.get("name", "")
if "trim" in name or name == "trim":
trim_symbol = sym
break
assert trim_symbol is not None, "trim function not found in utils.lua"
# Get references to the trim function
range_info = trim_symbol.get("selectionRange", trim_symbol.get("range"))
assert range_info is not None, "trim function has no range information"
range_start = range_info["start"]
refs = language_server.request_references("src/utils.lua", range_start["line"], range_start["character"])
assert refs is not None
assert isinstance(refs, list)
# trim function appears in: usage (line 32 in main.lua)
# Note: The declaration itself may or may not be included as a reference
assert len(refs) >= 1, f"Should find at least 1 reference to utils.trim, found {len(refs)}"
# Verify exact reference locations
ref_files: dict[str, list[int]] = {}
for ref in refs:
filename = ref.get("uri", "").split("/")[-1]
if filename not in ref_files:
ref_files[filename] = []
ref_files[filename].append(ref["range"]["start"]["line"])
# The declaration may or may not be included
if "utils.lua" in ref_files:
assert (
5 in ref_files["utils.lua"]
), f"If declaration is included, it should be at line 6 (0-indexed: 5), found at {ref_files['utils.lua']}"
# Check main.lua has usage
assert "main.lua" in ref_files, "Should find trim usage in main.lua"
assert (
31 in ref_files["main.lua"]
), f"Should find trim usage at line 32 (0-indexed: 31) in main.lua, found at lines {ref_files.get('main.lua', [])}"
# Check for cross-file references from main.lua
main_refs = [ref for ref in refs if "main.lua" in ref.get("uri", "")]
assert len(main_refs) > 0, "utils.trim should be called in main.lua"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_hover_information(self, language_server: SolidLanguageServer) -> None:
"""Test hover information for symbols."""
# Get hover info for a function
hover_info = language_server.request_hover("src/calculator.lua", 5, 10) # Position near add function
assert hover_info is not None, "Should provide hover information"
# Hover info could be a dict with 'contents' or a string
if isinstance(hover_info, dict):
assert "contents" in hover_info or "value" in hover_info, "Hover should have contents"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_full_symbol_tree(self, language_server: SolidLanguageServer) -> None:
"""Test that full symbol tree is not empty."""
symbols = language_server.request_full_symbol_tree()
assert symbols is not None
assert len(symbols) > 0, "Symbol tree should not be empty"
# The tree should have at least one root node
root = symbols[0]
assert isinstance(root, dict), "Root should be a dict"
assert "name" in root, "Root should have a name"
@pytest.mark.parametrize("language_server", [Language.LUA], indirect=True)
def test_references_between_test_and_source(self, language_server: SolidLanguageServer) -> None:
"""Test finding references from test files to source files."""
# Check if test_calculator.lua references calculator module
test_symbols = language_server.request_document_symbols("tests/test_calculator.lua").get_all_symbols_and_roots()
assert test_symbols is not None
assert len(test_symbols) > 0
# The test file should have some content that references calculator
symbol_list = test_symbols[0] if isinstance(test_symbols, tuple) else test_symbols
assert len(symbol_list) > 0, "test_calculator.lua should have symbols"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/lua/test_lua_basic.py",
"license": "MIT License",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/markdown/test_markdown_basic.py | """
Basic integration tests for the markdown language server functionality.
These tests validate the functionality of the language server APIs
like request_document_symbols using the markdown test repository.
"""
import pytest
from serena.symbol import LanguageServerSymbol
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
@pytest.mark.markdown
class TestMarkdownLanguageServerBasics:
"""Test basic functionality of the markdown language server."""
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_language_server_initialization(self, language_server: SolidLanguageServer) -> None:
"""Test that markdown language server can be initialized successfully."""
assert language_server is not None
assert language_server.language == Language.MARKDOWN
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_request_document_symbols(self, language_server: SolidLanguageServer) -> None:
"""Test request_document_symbols for markdown files."""
all_symbols, _root_symbols = language_server.request_document_symbols("README.md").get_all_symbols_and_roots()
heading_names = [symbol["name"] for symbol in all_symbols]
# Should detect headings from README.md
assert "Test Repository" in heading_names or len(all_symbols) > 0, "Should find at least one heading"
# Verify that markdown headings are remapped from String to Namespace
for symbol in all_symbols:
assert (
symbol["kind"] == SymbolKind.Namespace
), f"Heading '{symbol['name']}' should have kind Namespace, got {SymbolKind(symbol['kind']).name}"
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_request_symbols_from_guide(self, language_server: SolidLanguageServer) -> None:
"""Test symbol detection in guide.md file."""
all_symbols, _root_symbols = language_server.request_document_symbols("guide.md").get_all_symbols_and_roots()
# At least some headings should be found
assert len(all_symbols) > 0, f"Should find headings in guide.md, found {len(all_symbols)}"
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_request_symbols_from_api(self, language_server: SolidLanguageServer) -> None:
"""Test symbol detection in api.md file."""
all_symbols, _root_symbols = language_server.request_document_symbols("api.md").get_all_symbols_and_roots()
# Should detect headings from api.md
assert len(all_symbols) > 0, f"Should find headings in api.md, found {len(all_symbols)}"
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_request_document_symbols_with_body(self, language_server: SolidLanguageServer) -> None:
"""Test request_document_symbols with body extraction."""
all_symbols, _root_symbols = language_server.request_document_symbols("README.md").get_all_symbols_and_roots()
# Should have found some symbols
assert len(all_symbols) > 0, "Should find symbols in README.md"
# Note: Not all markdown LSPs provide body information for symbols
# This test is more lenient and just verifies the API works
assert all_symbols is not None, "Should return symbols even if body extraction is limited"
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_headings_not_low_level(self, language_server: SolidLanguageServer) -> None:
"""Test that markdown headings are not classified as low-level symbols.
Verifies the fix for the issue where Marksman's SymbolKind.String (15)
caused all headings to be filtered out of get_symbols_overview.
"""
all_symbols, _root_symbols = language_server.request_document_symbols("README.md").get_all_symbols_and_roots()
assert len(all_symbols) > 0, "Should find headings in README.md"
for symbol in all_symbols:
ls_symbol = LanguageServerSymbol(symbol)
assert (
not ls_symbol.is_low_level()
), f"Heading '{symbol['name']}' should not be low-level (kind={SymbolKind(symbol['kind']).name})"
@pytest.mark.parametrize("language_server", [Language.MARKDOWN], indirect=True)
def test_markdown_nested_headings_remapped(self, language_server: SolidLanguageServer) -> None:
"""Test that nested headings (h1-h5) are all remapped from String to Namespace."""
all_symbols, _root_symbols = language_server.request_document_symbols("api.md").get_all_symbols_and_roots()
# api.md has deeply nested headings (h1 through h5)
assert len(all_symbols) > 5, "api.md should have many headings"
for symbol in all_symbols:
assert symbol["kind"] == SymbolKind.Namespace, f"Nested heading '{symbol['name']}' should be remapped to Namespace"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/markdown/test_markdown_basic.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/nix/test_nix_basic.py | """
Tests for the Nix language server implementation using nixd.
These tests validate symbol finding and cross-file reference capabilities for Nix expressions.
"""
import platform
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from test.conftest import is_ci
# Skip all Nix tests on Windows as Nix doesn't support Windows
pytestmark = pytest.mark.skipif(platform.system() == "Windows", reason="Nix and nil are not available on Windows")
@pytest.mark.nix
class TestNixLanguageServer:
"""Test Nix language server symbol finding capabilities."""
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_find_symbols_in_default_nix(self, language_server: SolidLanguageServer) -> None:
"""Test finding specific symbols in default.nix."""
symbols = language_server.request_document_symbols("default.nix").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
# Extract symbol names from the returned structure
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify specific function exists
assert "makeGreeting" in symbol_names, "makeGreeting function not found"
# Verify exact attribute sets are found
expected_attrs = {"listUtils", "stringUtils"}
found_attrs = symbol_names & expected_attrs
assert found_attrs == expected_attrs, f"Expected exactly {expected_attrs}, found {found_attrs}"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_find_symbols_in_utils(self, language_server: SolidLanguageServer) -> None:
"""Test finding symbols in lib/utils.nix."""
symbols = language_server.request_document_symbols("lib/utils.nix").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify exact utility modules are found
expected_modules = {"math", "strings", "lists", "attrs"}
found_modules = symbol_names & expected_modules
assert found_modules == expected_modules, f"Expected exactly {expected_modules}, found {found_modules}"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_find_symbols_in_flake(self, language_server: SolidLanguageServer) -> None:
"""Test finding symbols in flake.nix."""
symbols = language_server.request_document_symbols("flake.nix").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Flakes must have either inputs or outputs
assert "inputs" in symbol_names or "outputs" in symbol_names, "Flake must have inputs or outputs"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_find_symbols_in_module(self, language_server: SolidLanguageServer) -> None:
"""Test finding symbols in a NixOS module."""
symbols = language_server.request_document_symbols("modules/example.nix").get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# NixOS modules must have either options or config
assert "options" in symbol_names or "config" in symbol_names, "Module must have options or config"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_find_references_within_file(self, language_server: SolidLanguageServer) -> None:
"""Test finding references within the same file."""
symbols = language_server.request_document_symbols("default.nix").get_all_symbols_and_roots()
assert symbols is not None
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find makeGreeting function
greeting_symbol = None
for sym in symbol_list:
if sym.get("name") == "makeGreeting":
greeting_symbol = sym
break
assert greeting_symbol is not None, "makeGreeting function not found"
assert "range" in greeting_symbol, "Symbol must have range information"
range_start = greeting_symbol["range"]["start"]
refs = language_server.request_references("default.nix", range_start["line"], range_start["character"])
assert refs is not None
assert isinstance(refs, list)
# nixd finds at least the inherit statement (line 67)
assert len(refs) >= 1, f"Should find at least 1 reference to makeGreeting, found {len(refs)}"
# Verify makeGreeting is referenced at expected locations
if refs:
ref_lines = sorted([ref["range"]["start"]["line"] for ref in refs])
# Check if we found the inherit (line 67, 0-indexed: 66)
assert 66 in ref_lines, f"Should find makeGreeting inherit at line 67, found at lines {[l+1 for l in ref_lines]}"
@pytest.mark.xfail(is_ci, reason="Test is flaky") # TODO: Re-enable if the hover test becomes more stable (#1040)
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_hover_information(self, language_server: SolidLanguageServer) -> None:
"""Test hover information for symbols."""
# Get hover info for makeGreeting function
hover_info = language_server.request_hover("default.nix", 12, 5) # Position at makeGreeting
assert hover_info is not None, "Should provide hover information"
if isinstance(hover_info, dict) and len(hover_info) > 0:
# If hover info is provided, it should have proper structure
assert "contents" in hover_info or "value" in hover_info, "Hover should have contents or value"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_cross_file_references_utils_import(self, language_server: SolidLanguageServer) -> None:
"""Test finding cross-file references for imported utils."""
# Find references to 'utils' which is imported in default.nix from lib/utils.nix
# Line 10 in default.nix: utils = import ./lib/utils.nix { inherit lib; };
refs = language_server.request_references("default.nix", 9, 2) # Position of 'utils'
assert refs is not None
assert isinstance(refs, list)
# Should find references within default.nix where utils is used
default_refs = [ref for ref in refs if "default.nix" in ref.get("uri", "")]
# utils is: imported (line 10), used in listUtils.unique (line 24), inherited in exports (line 69)
assert len(default_refs) >= 2, f"Should find at least 2 references to utils in default.nix, found {len(default_refs)}"
# Verify utils is referenced at expected locations (0-indexed)
if default_refs:
ref_lines = sorted([ref["range"]["start"]["line"] for ref in default_refs])
# Check for key references - at least the import (line 10) or usage (line 24)
assert (
9 in ref_lines or 23 in ref_lines
), f"Should find utils import or usage, found references at lines {[l+1 for l in ref_lines]}"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_verify_imports_exist(self, language_server: SolidLanguageServer) -> None:
"""Verify that our test files have proper imports set up."""
# Verify that default.nix imports utils from lib/utils.nix
symbols = language_server.request_document_symbols("default.nix").get_all_symbols_and_roots()
assert symbols is not None
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Check that makeGreeting exists (defined in default.nix)
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
assert "makeGreeting" in symbol_names, "makeGreeting should be found in default.nix"
# Verify lib/utils.nix has the expected structure
utils_symbols = language_server.request_document_symbols("lib/utils.nix").get_all_symbols_and_roots()
assert utils_symbols is not None
utils_list = utils_symbols[0] if isinstance(utils_symbols, tuple) else utils_symbols
utils_names = {sym.get("name") for sym in utils_list if isinstance(sym, dict)}
# Verify key functions exist in utils
assert "math" in utils_names, "math should be found in lib/utils.nix"
assert "strings" in utils_names, "strings should be found in lib/utils.nix"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_go_to_definition_cross_file(self, language_server: SolidLanguageServer) -> None:
"""Test go-to-definition from default.nix to lib/utils.nix."""
# Line 24 in default.nix: unique = utils.lists.unique;
# Test go-to-definition for 'utils'
definitions = language_server.request_definition("default.nix", 23, 14) # Position of 'utils'
assert definitions is not None
assert isinstance(definitions, list)
if len(definitions) > 0:
# Should point to the import statement or utils.nix
assert any(
"utils" in def_item.get("uri", "") or "default.nix" in def_item.get("uri", "") for def_item in definitions
), "Definition should relate to utils import or utils.nix file"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_definition_navigation_in_flake(self, language_server: SolidLanguageServer) -> None:
"""Test definition navigation in flake.nix."""
# Test that we can navigate to definitions within flake.nix
# Line 69: default = hello-custom;
definitions = language_server.request_definition("flake.nix", 68, 20) # Position of 'hello-custom'
assert definitions is not None
assert isinstance(definitions, list)
# nixd should find the definition of hello-custom in the same file
if len(definitions) > 0:
assert any(
"flake.nix" in def_item.get("uri", "") for def_item in definitions
), "Should find hello-custom definition in flake.nix"
@pytest.mark.parametrize("language_server", [Language.NIX], indirect=True)
def test_full_symbol_tree(self, language_server: SolidLanguageServer) -> None:
"""Test that full symbol tree is not empty."""
symbols = language_server.request_full_symbol_tree()
assert symbols is not None
assert len(symbols) > 0, "Symbol tree should not be empty"
# The tree should have at least one root node
root = symbols[0]
assert isinstance(root, dict), "Root should be a dict"
assert "name" in root, "Root should have a name"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/nix/test_nix_basic.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/perl/test_perl_basic.py | import platform
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
@pytest.mark.perl
@pytest.mark.skipif(platform.system() == "Windows", reason="Perl::LanguageServer does not support native Windows operation")
class TestPerlLanguageServer:
"""
Tests for Perl::LanguageServer integration.
Perl::LanguageServer provides comprehensive LSP support for Perl including:
- Document symbols (functions, variables)
- Go to definition (including cross-file)
- Find references (including cross-file) - this was not available in PLS
"""
@pytest.mark.parametrize("language_server", [Language.PERL], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.PERL], indirect=True)
def test_ls_is_running(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test that the language server starts and stops successfully."""
# The fixture already handles start and stop
assert language_server.is_running()
assert Path(language_server.language_server.repository_root_path).resolve() == repo_path.resolve()
@pytest.mark.parametrize("language_server", [Language.PERL], indirect=True)
def test_document_symbols(self, language_server: SolidLanguageServer) -> None:
"""Test that document symbols are correctly identified."""
# Request document symbols
all_symbols, _ = language_server.request_document_symbols("main.pl").get_all_symbols_and_roots()
assert all_symbols, "Expected to find symbols in main.pl"
assert len(all_symbols) > 0, "Expected at least one symbol"
# DEBUG: Print all symbols
print("\n=== All symbols in main.pl ===")
for s in all_symbols:
line = s.get("range", {}).get("start", {}).get("line", "?")
print(f"Line {line}: {s.get('name')} (kind={s.get('kind')})")
# Check that we can find function symbols
function_symbols = [s for s in all_symbols if s.get("kind") == 12] # 12 = Function/Method
assert len(function_symbols) >= 2, f"Expected at least 2 functions (greet, use_helper_function), found {len(function_symbols)}"
function_names = [s.get("name") for s in function_symbols]
assert "greet" in function_names, f"Expected 'greet' function in symbols, found: {function_names}"
assert "use_helper_function" in function_names, f"Expected 'use_helper_function' in symbols, found: {function_names}"
# @pytest.mark.skip(reason="Perl::LanguageServer cross-file definition tracking needs configuration")
@pytest.mark.parametrize("language_server", [Language.PERL], indirect=True)
def test_find_definition_across_files(self, language_server: SolidLanguageServer) -> None:
definition_location_list = language_server.request_definition("main.pl", 17, 0)
assert len(definition_location_list) == 1
definition_location = definition_location_list[0]
print(f"Found definition: {definition_location}")
assert definition_location["uri"].endswith("helper.pl")
assert definition_location["range"]["start"]["line"] == 4 # add method on line 2 (0-indexed 1)
@pytest.mark.parametrize("language_server", [Language.PERL], indirect=True)
def test_find_references_across_files(self, language_server: SolidLanguageServer) -> None:
"""Test finding references to a function across multiple files."""
reference_locations = language_server.request_references("helper.pl", 4, 5)
assert len(reference_locations) >= 2, f"Expected at least 2 references to helper_function, found {len(reference_locations)}"
main_pl_refs = [ref for ref in reference_locations if ref["uri"].endswith("main.pl")]
assert len(main_pl_refs) >= 2, f"Expected at least 2 references in main.pl, found {len(main_pl_refs)}"
main_pl_lines = sorted([ref["range"]["start"]["line"] for ref in main_pl_refs])
assert 17 in main_pl_lines, f"Expected reference at line 18 (0-indexed 17), found: {main_pl_lines}"
assert 20 in main_pl_lines, f"Expected reference at line 21 (0-indexed 20), found: {main_pl_lines}"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/perl/test_perl_basic.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/r/test_r_basic.py | """
Basic tests for R Language Server integration
"""
import os
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
@pytest.mark.r
class TestRLanguageServer:
"""Test basic functionality of the R language server."""
@pytest.mark.parametrize("language_server", [Language.R], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.R], indirect=True)
def test_server_initialization(self, language_server: SolidLanguageServer, repo_path: Path):
"""Test that the R language server initializes properly."""
assert language_server is not None
assert language_server.language_id == "r"
assert language_server.is_running()
assert Path(language_server.language_server.repository_root_path).resolve() == repo_path.resolve()
@pytest.mark.parametrize("language_server", [Language.R], indirect=True)
def test_symbol_retrieval(self, language_server: SolidLanguageServer):
"""Test R document symbol extraction."""
all_symbols, _root_symbols = language_server.request_document_symbols(os.path.join("R", "utils.R")).get_all_symbols_and_roots()
# Should find the three exported functions
function_symbols = [s for s in all_symbols if s.get("kind") == 12] # Function kind
assert len(function_symbols) >= 3
# Check that we found the expected functions
function_names = {s.get("name") for s in function_symbols}
expected_functions = {"calculate_mean", "process_data", "create_data_frame"}
assert expected_functions.issubset(function_names), f"Expected functions {expected_functions} but found {function_names}"
@pytest.mark.parametrize("language_server", [Language.R], indirect=True)
def test_find_definition_across_files(self, language_server: SolidLanguageServer):
"""Test finding function definitions across files."""
analysis_file = os.path.join("examples", "analysis.R")
# In analysis.R line 7: create_data_frame(n = 50)
# The function create_data_frame is defined in R/utils.R
# Find definition of create_data_frame function call (0-indexed: line 6)
definition_location_list = language_server.request_definition(analysis_file, 6, 17) # cursor on 'create_data_frame'
assert definition_location_list, f"Expected non-empty definition_location_list but got {definition_location_list=}"
assert len(definition_location_list) >= 1
definition_location = definition_location_list[0]
assert definition_location["uri"].endswith("utils.R")
# Definition should be around line 37 (0-indexed: 36) where create_data_frame is defined
assert definition_location["range"]["start"]["line"] >= 35
@pytest.mark.parametrize("language_server", [Language.R], indirect=True)
def test_find_references_across_files(self, language_server: SolidLanguageServer):
"""Test finding function references across files."""
analysis_file = os.path.join("examples", "analysis.R")
# Test from usage side: find references to calculate_mean from its usage in analysis.R
# In analysis.R line 13: calculate_mean(clean_data$value)
# calculate_mean function call is at line 13 (0-indexed: line 12)
references = language_server.request_references(analysis_file, 12, 15) # cursor on 'calculate_mean'
assert references, f"Expected non-empty references for calculate_mean but got {references=}"
# Must find the definition in utils.R (cross-file reference)
reference_files = [ref["uri"] for ref in references]
assert any(uri.endswith("utils.R") for uri in reference_files), "Cross-file reference to definition in utils.R not found"
# Verify we actually found the right location in utils.R
utils_refs = [ref for ref in references if ref["uri"].endswith("utils.R")]
assert len(utils_refs) >= 1, "Should find at least one reference in utils.R"
utils_ref = utils_refs[0]
# Should be around line 6 where calculate_mean is defined (0-indexed: line 5)
assert (
utils_ref["range"]["start"]["line"] == 5
), f"Expected reference at line 5 in utils.R, got line {utils_ref['range']['start']['line']}"
def test_file_matching(self):
"""Test that R files are properly matched."""
from solidlsp.ls_config import Language
matcher = Language.R.get_source_fn_matcher()
assert matcher.is_relevant_filename("script.R")
assert matcher.is_relevant_filename("analysis.r")
assert not matcher.is_relevant_filename("script.py")
assert not matcher.is_relevant_filename("README.md")
def test_r_language_enum(self):
"""Test R language enum value."""
assert Language.R == "r"
assert str(Language.R) == "r"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/r/test_r_basic.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/rego/test_rego_basic.py | """Tests for Rego language server (Regal) functionality."""
import os
import pytest
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_utils import SymbolUtils
@pytest.mark.rego
class TestRegoLanguageServer:
"""Test Regal language server functionality for Rego."""
@pytest.mark.parametrize("language_server", [Language.REGO], indirect=True)
def test_request_document_symbols_authz(self, language_server: SolidLanguageServer) -> None:
"""Test that document symbols can be retrieved from authz.rego."""
file_path = os.path.join("policies", "authz.rego")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
# Extract symbol names
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify specific Rego rules/functions are found
assert "allow" in symbol_names, "allow rule not found"
assert "allow_read" in symbol_names, "allow_read rule not found"
assert "is_admin" in symbol_names, "is_admin function not found"
assert "admin_roles" in symbol_names, "admin_roles constant not found"
@pytest.mark.parametrize("language_server", [Language.REGO], indirect=True)
def test_request_document_symbols_helpers(self, language_server: SolidLanguageServer) -> None:
"""Test that document symbols can be retrieved from helpers.rego."""
file_path = os.path.join("utils", "helpers.rego")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
# Extract symbol names
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify specific helper functions are found
assert "is_valid_user" in symbol_names, "is_valid_user function not found"
assert "is_valid_email" in symbol_names, "is_valid_email function not found"
assert "is_valid_username" in symbol_names, "is_valid_username function not found"
@pytest.mark.parametrize("language_server", [Language.REGO], indirect=True)
def test_find_symbol_full_tree(self, language_server: SolidLanguageServer) -> None:
"""Test finding symbols across entire workspace using symbol tree."""
symbols = language_server.request_full_symbol_tree()
# Use SymbolUtils to check for expected symbols
assert SymbolUtils.symbol_tree_contains_name(symbols, "allow"), "allow rule not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "is_valid_user"), "is_valid_user function not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "is_admin"), "is_admin function not found in symbol tree"
@pytest.mark.parametrize("language_server", [Language.REGO], indirect=True)
def test_request_definition_within_file(self, language_server: SolidLanguageServer) -> None:
"""Test go-to-definition for symbols within the same file."""
# In authz.rego, check_permission references admin_roles
file_path = os.path.join("policies", "authz.rego")
# Get document symbols
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find the is_admin symbol which references admin_roles
is_admin_symbol = next((s for s in symbol_list if s.get("name") == "is_admin"), None)
assert is_admin_symbol is not None, "is_admin symbol should always be found in authz.rego"
assert "range" in is_admin_symbol, "is_admin symbol should have a range"
# Request definition from within is_admin (line 25, which references admin_roles at line 21)
# Line 25 is: admin_roles[_] == user.role
line = is_admin_symbol["range"]["start"]["line"] + 1
char = 4 # Position at "admin_roles"
definitions = language_server.request_definition(file_path, line, char)
assert definitions is not None and len(definitions) > 0, "Should find definition for admin_roles"
# Verify the definition points to admin_roles in the same file
assert any("authz.rego" in defn.get("relativePath", "") for defn in definitions), "Definition should be in authz.rego"
@pytest.mark.parametrize("language_server", [Language.REGO], indirect=True)
def test_request_definition_across_files(self, language_server: SolidLanguageServer) -> None:
"""Test go-to-definition for symbols across files (cross-file references)."""
# In authz.rego line 11, the allow rule calls utils.is_valid_user
# This function is defined in utils/helpers.rego
file_path = os.path.join("policies", "authz.rego")
# Get document symbols
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find the allow symbol
allow_symbol = next((s for s in symbol_list if s.get("name") == "allow"), None)
assert allow_symbol is not None, "allow symbol should always be found in authz.rego"
assert "range" in allow_symbol, "allow symbol should have a range"
# Request definition from line 11 where utils.is_valid_user is called
# Line 11: utils.is_valid_user(input.user)
line = 10 # 0-indexed, so line 11 in file is line 10 in LSP
char = 7 # Position at "is_valid_user" in "utils.is_valid_user"
definitions = language_server.request_definition(file_path, line, char)
assert definitions is not None and len(definitions) > 0, "Should find cross-file definition for is_valid_user"
# Verify the definition points to helpers.rego (cross-file)
assert any(
"helpers.rego" in defn.get("relativePath", "") for defn in definitions
), "Definition should be in utils/helpers.rego (cross-file reference)"
@pytest.mark.parametrize("language_server", [Language.REGO], indirect=True)
def test_find_symbols_validation(self, language_server: SolidLanguageServer) -> None:
"""Test finding symbols in validation.rego which has imports."""
file_path = os.path.join("policies", "validation.rego")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
# Extract symbol names
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify expected symbols
assert "validate_user_input" in symbol_names, "validate_user_input rule not found"
assert "has_valid_credentials" in symbol_names, "has_valid_credentials function not found"
assert "validate_request" in symbol_names, "validate_request rule not found"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/rego/test_rego_basic.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/ruby/test_ruby_basic.py | import os
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_utils import SymbolUtils
@pytest.mark.ruby
class TestRubyLanguageServer:
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_find_symbol(self, language_server: SolidLanguageServer) -> None:
symbols = language_server.request_full_symbol_tree()
assert SymbolUtils.symbol_tree_contains_name(symbols, "DemoClass"), "DemoClass not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "helper_function"), "helper_function not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "print_value"), "print_value not found in symbol tree"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_find_referencing_symbols(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("main.rb")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
helper_symbol = None
for sym in symbols[0]:
if sym.get("name") == "helper_function":
helper_symbol = sym
break
print(helper_symbol)
assert helper_symbol is not None, "Could not find 'helper_function' symbol in main.rb"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.RUBY], indirect=True)
def test_find_definition_across_files(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
# Test finding Calculator.add method definition from line 17: Calculator.new.add(demo.value, 10)
definition_location_list = language_server.request_definition(
str(repo_path / "main.rb"), 16, 17
) # add method at line 17 (0-indexed 16), position 17
assert len(definition_location_list) == 1
definition_location = definition_location_list[0]
print(f"Found definition: {definition_location}")
assert definition_location["uri"].endswith("lib.rb")
assert definition_location["range"]["start"]["line"] == 1 # add method on line 2 (0-indexed 1)
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/ruby/test_ruby_basic.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/ruby/test_ruby_symbol_retrieval.py | """
Tests for the Ruby language server symbol-related functionality.
These tests focus on the following methods:
- request_containing_symbol
- request_referencing_symbols
- request_defining_symbol
- request_document_symbols integration
"""
import os
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
pytestmark = pytest.mark.ruby
class TestRubyLanguageServerSymbols:
"""Test the Ruby language server's symbol-related functionality."""
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_method(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a method."""
# Test for a position inside the create_user method
file_path = os.path.join("services.rb")
# Look for a position inside the create_user method body
containing_symbol = language_server.request_containing_symbol(file_path, 11, 10, include_body=True)
# Verify that we found the containing symbol
assert containing_symbol is not None, "Should find containing symbol for method position"
assert containing_symbol["name"] == "create_user", f"Expected 'create_user', got '{containing_symbol['name']}'"
assert (
containing_symbol["kind"] == SymbolKind.Method.value
), f"Expected Method kind ({SymbolKind.Method.value}), got {containing_symbol['kind']}"
# Verify location information
assert "location" in containing_symbol, "Containing symbol should have location information"
location = containing_symbol["location"]
assert "range" in location, "Location should contain range information"
assert "start" in location["range"], "Range should have start position"
assert "end" in location["range"], "Range should have end position"
# Verify container information
if "containerName" in containing_symbol:
assert containing_symbol["containerName"] in [
"Services::UserService",
"UserService",
], f"Expected UserService container, got '{containing_symbol['containerName']}'"
# Verify body content if available
if "body" in containing_symbol:
body = containing_symbol["body"].get_text()
assert "def create_user" in body, "Method body should contain method definition"
assert len(body.strip()) > 0, "Method body should not be empty"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a class."""
# Test for a position inside the UserService class but outside any method
file_path = os.path.join("services.rb")
# Line around the class definition
containing_symbol = language_server.request_containing_symbol(file_path, 5, 5)
# Verify that we found the containing symbol
assert containing_symbol is not None, "Should find containing symbol for class position"
assert containing_symbol["name"] == "UserService", f"Expected 'UserService', got '{containing_symbol['name']}'"
assert (
containing_symbol["kind"] == SymbolKind.Class.value
), f"Expected Class kind ({SymbolKind.Class.value}), got {containing_symbol['kind']}"
# Verify location information exists
assert "location" in containing_symbol, "Class symbol should have location information"
location = containing_symbol["location"]
assert "range" in location, "Location should contain range"
assert "start" in location["range"] and "end" in location["range"], "Range should have start and end positions"
# Verify the class is properly nested in the Services module
if "containerName" in containing_symbol:
assert (
containing_symbol["containerName"] == "Services"
), f"Expected 'Services' as container, got '{containing_symbol['containerName']}'"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_module(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a module context."""
# Test that we can find the Services module in document symbols
file_path = os.path.join("services.rb")
symbols, _roots = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Verify Services module appears in document symbols
services_module = None
for symbol in symbols:
if symbol.get("name") == "Services" and symbol.get("kind") == SymbolKind.Module:
services_module = symbol
break
assert services_module is not None, "Services module not found in document symbols"
# Test that UserService class has Services as container
# Position inside UserService class
containing_symbol = language_server.request_containing_symbol(file_path, 4, 8)
assert containing_symbol is not None
assert containing_symbol["name"] == "UserService"
assert containing_symbol["kind"] == SymbolKind.Class
# Verify the module context is preserved in containerName (if supported by the language server)
# ruby-lsp doesn't provide containerName, but Solargraph does
if "containerName" in containing_symbol:
assert containing_symbol.get("containerName") == "Services"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_nested_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol with nested classes."""
# Test for a position inside a nested class method
file_path = os.path.join("nested.rb")
# Position inside NestedClass.find_me method
containing_symbol = language_server.request_containing_symbol(file_path, 20, 10)
# Verify that we found the innermost containing symbol
assert containing_symbol is not None
assert containing_symbol["name"] == "find_me"
assert containing_symbol["kind"] == SymbolKind.Method
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_none(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a position with no containing symbol."""
# Test for a position outside any class/method (e.g., in requires)
file_path = os.path.join("services.rb")
# Line 1 is a require statement, not inside any class or method
containing_symbol = language_server.request_containing_symbol(file_path, 1, 5)
# Should return None or an empty dictionary
assert containing_symbol is None or containing_symbol == {}
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_referencing_symbols_method(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a method."""
# Test referencing symbols for create_user method
file_path = os.path.join("services.rb")
# Line containing the create_user method definition
symbols, _roots = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
create_user_symbol = None
# Find create_user method in the document symbols (Ruby returns flat list)
for symbol in symbols:
if symbol.get("name") == "create_user":
create_user_symbol = symbol
break
if not create_user_symbol or "selectionRange" not in create_user_symbol:
pytest.skip("create_user symbol or its selectionRange not found")
sel_start = create_user_symbol["selectionRange"]["start"]
ref_symbols = [
ref.symbol for ref in language_server.request_referencing_symbols(file_path, sel_start["line"], sel_start["character"])
]
# We might not have references in our simple test setup, so just verify structure
for symbol in ref_symbols:
assert "name" in symbol
assert "kind" in symbol
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_referencing_symbols_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a class."""
# Test referencing symbols for User class
file_path = os.path.join("models.rb")
# Find User class in document symbols
symbols, _roots = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
user_symbol = None
for symbol in symbols:
if symbol.get("name") == "User":
user_symbol = symbol
break
if not user_symbol or "selectionRange" not in user_symbol:
pytest.skip("User symbol or its selectionRange not found")
sel_start = user_symbol["selectionRange"]["start"]
ref_symbols = [
ref.symbol for ref in language_server.request_referencing_symbols(file_path, sel_start["line"], sel_start["character"])
]
# Verify structure of referencing symbols
for symbol in ref_symbols:
assert "name" in symbol
assert "kind" in symbol
if "location" in symbol and "range" in symbol["location"]:
assert "start" in symbol["location"]["range"]
assert "end" in symbol["location"]["range"]
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_variable(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a variable usage."""
# Test finding the definition of a variable in a method
file_path = os.path.join("services.rb")
# Look for @users variable usage
defining_symbol = language_server.request_defining_symbol(file_path, 12, 10)
# This test might fail if the language server doesn't support it well
if defining_symbol is not None:
assert "name" in defining_symbol
assert "kind" in defining_symbol
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a class reference."""
# Test finding the definition of the User class used in services
file_path = os.path.join("services.rb")
# Line that references User class
defining_symbol = language_server.request_defining_symbol(file_path, 11, 15)
# This might not work perfectly in all Ruby language servers
if defining_symbol is not None:
assert "name" in defining_symbol
# The name might be "User" or the method that contains it
assert defining_symbol.get("name") is not None
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_none(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a position with no symbol."""
# Test for a position with no symbol (e.g., whitespace or comment)
file_path = os.path.join("services.rb")
# Line 3 is likely a blank line or comment
defining_symbol = language_server.request_defining_symbol(file_path, 3, 0)
# Should return None for positions with no symbol
assert defining_symbol is None or defining_symbol == {}
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_nested_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for nested class access."""
# Test finding definition of NestedClass
file_path = os.path.join("nested.rb")
# Position where NestedClass is referenced
defining_symbol = language_server.request_defining_symbol(file_path, 44, 25)
# This is challenging for many language servers
if defining_symbol is not None:
assert "name" in defining_symbol
assert defining_symbol.get("name") in ["NestedClass", "OuterClass"]
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_symbol_methods_integration(self, language_server: SolidLanguageServer) -> None:
"""Test the integration between different symbol-related methods."""
file_path = os.path.join("models.rb")
# Step 1: Find a method we know exists
containing_symbol = language_server.request_containing_symbol(file_path, 8, 5) # inside initialize method
if containing_symbol is not None:
assert containing_symbol["name"] == "initialize"
# Step 2: Get the defining symbol for the same position
defining_symbol = language_server.request_defining_symbol(file_path, 8, 5)
if defining_symbol is not None:
assert defining_symbol["name"] == "initialize"
# Step 3: Verify that they refer to the same symbol type
assert defining_symbol["kind"] == containing_symbol["kind"]
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_symbol_tree_structure_basic(self, language_server: SolidLanguageServer) -> None:
"""Test that the symbol tree structure includes Ruby symbols."""
# Get all symbols in the test repository
repo_structure = language_server.request_full_symbol_tree()
assert len(repo_structure) >= 1
# Look for our Ruby files in the structure
found_ruby_files = False
for root in repo_structure:
if "children" in root:
for child in root["children"]:
if child.get("name") in ["models", "services", "nested"]:
found_ruby_files = True
break
# We should find at least some Ruby files in the symbol tree
assert found_ruby_files, "Ruby files not found in symbol tree"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_document_symbols_detailed(self, language_server: SolidLanguageServer) -> None:
"""Test document symbols for detailed Ruby file structure."""
file_path = os.path.join("models.rb")
symbols, roots = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Verify we have symbols
assert len(symbols) > 0 or len(roots) > 0
# Look for expected class names
symbol_names = set()
all_symbols = symbols if symbols else roots
for symbol in all_symbols:
symbol_names.add(symbol.get("name"))
# Add children names too
if "children" in symbol:
for child in symbol["children"]:
symbol_names.add(child.get("name"))
# We should find at least some of our defined classes/methods
expected_symbols = {"User", "Item", "Order", "ItemHelpers"}
found_symbols = symbol_names.intersection(expected_symbols)
assert len(found_symbols) > 0, f"Expected symbols not found. Found: {symbol_names}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_module_and_class_hierarchy(self, language_server: SolidLanguageServer) -> None:
"""Test symbol detection for modules and nested class hierarchies."""
file_path = os.path.join("nested.rb")
symbols, roots = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Verify we can detect the nested structure
assert len(symbols) > 0 or len(roots) > 0
# Look for OuterClass and its nested elements
symbol_names = set()
all_symbols = symbols if symbols else roots
for symbol in all_symbols:
symbol_names.add(symbol.get("name"))
if "children" in symbol:
for child in symbol["children"]:
symbol_names.add(child.get("name"))
# Check deeply nested too
if "children" in child:
for grandchild in child["children"]:
symbol_names.add(grandchild.get("name"))
# Should find the outer class at minimum
assert "OuterClass" in symbol_names, f"OuterClass not found in symbols: {symbol_names}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_references_to_variables(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a variable with detailed verification."""
file_path = os.path.join("variables.rb")
# Test references to @status variable in DataContainer class (around line 9)
ref_symbols = [ref.symbol for ref in language_server.request_referencing_symbols(file_path, 8, 4)]
if len(ref_symbols) > 0:
# Verify we have references
assert len(ref_symbols) > 0, "Should find references to @status variable"
# Check that we have location information
ref_with_locations = [ref for ref in ref_symbols if "location" in ref and "range" in ref["location"]]
assert len(ref_with_locations) > 0, "References should include location information"
# Verify line numbers are reasonable (should be within the file)
ref_lines = [ref["location"]["range"]["start"]["line"] for ref in ref_with_locations]
assert all(line >= 0 for line in ref_lines), "Reference lines should be valid"
# Check for specific reference locations we expect
# Lines where @status is modified/accessed
expected_line_ranges = [(20, 40), (45, 70)] # Approximate ranges
found_in_expected_range = any(any(start <= line <= end for start, end in expected_line_ranges) for line in ref_lines)
assert found_in_expected_range, f"Expected references in ranges {expected_line_ranges}, found lines: {ref_lines}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_referencing_symbols_parameter(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a method parameter."""
# Test referencing symbols for a method parameter in get_user method
file_path = os.path.join("services.rb")
# Find get_user method and test parameter references
symbols, _roots = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
get_user_symbol = None
for symbol in symbols:
if symbol.get("name") == "get_user":
get_user_symbol = symbol
break
if not get_user_symbol or "selectionRange" not in get_user_symbol:
pytest.skip("get_user symbol or its selectionRange not found")
# Test parameter reference within method body
method_start_line = get_user_symbol["selectionRange"]["start"]["line"]
ref_symbols = [
ref.symbol
for ref in language_server.request_referencing_symbols(file_path, method_start_line + 1, 10) # Position within method body
]
# Verify structure of referencing symbols
for symbol in ref_symbols:
assert "name" in symbol, "Symbol should have name"
assert "kind" in symbol, "Symbol should have kind"
if "location" in symbol and "range" in symbol["location"]:
range_info = symbol["location"]["range"]
assert "start" in range_info, "Range should have start"
assert "end" in range_info, "Range should have end"
# Verify line number is valid (references can be before method definition too)
assert range_info["start"]["line"] >= 0, "Reference line should be valid"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_referencing_symbols_none(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a position with no symbol."""
# Test for a position with no symbol (comment or blank line)
file_path = os.path.join("services.rb")
# Try multiple positions that should have no symbols
test_positions = [(1, 0), (2, 0)] # Comment/require lines
for line, char in test_positions:
try:
ref_symbols = [ref.symbol for ref in language_server.request_referencing_symbols(file_path, line, char)]
# If we get here, make sure we got an empty result or minimal results
if ref_symbols:
# Some language servers might return minimal info, verify it's reasonable
assert len(ref_symbols) <= 3, f"Expected few/no references at line {line}, got {len(ref_symbols)}"
except Exception as e:
# Some language servers throw exceptions for invalid positions, which is acceptable
assert (
"symbol" in str(e).lower() or "position" in str(e).lower() or "reference" in str(e).lower()
), f"Exception should be related to symbol/position/reference issues, got: {e}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_dir_overview(self, language_server: SolidLanguageServer) -> None:
"""Test that request_dir_overview returns correct symbol information for files in a directory."""
# Get overview of the test repo directory
overview = language_server.request_dir_overview(".")
# Verify that we have entries for our main files
expected_files = ["services.rb", "models.rb", "variables.rb", "nested.rb"]
found_files = []
for file_path in overview.keys():
for expected in expected_files:
if expected in file_path:
found_files.append(expected)
break
assert len(found_files) >= 2, f"Should find at least 2 expected files, found: {found_files}"
# Test specific symbols from services.rb if it exists
services_file_key = None
for file_path in overview.keys():
if "services.rb" in file_path:
services_file_key = file_path
break
if services_file_key:
services_symbols = overview[services_file_key]
assert len(services_symbols) > 0, "services.rb should have symbols"
# Check for expected symbols with detailed verification
symbol_names = [s[0] for s in services_symbols if isinstance(s, tuple) and len(s) > 0]
if not symbol_names: # If not tuples, try different format
symbol_names = [s.get("name") for s in services_symbols if hasattr(s, "get")]
expected_symbols = ["Services", "UserService", "ItemService"]
found_expected = [name for name in expected_symbols if name in symbol_names]
assert len(found_expected) >= 1, f"Should find at least one expected symbol, found: {found_expected} in {symbol_names}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_document_overview(self, language_server: SolidLanguageServer) -> None:
"""Test that request_document_overview returns correct symbol information for a file."""
# Get overview of the user_management.rb file
file_path = os.path.join("examples", "user_management.rb")
overview = language_server.request_document_overview(file_path)
# Verify that we have symbol information
assert len(overview) > 0, "Document overview should contain symbols"
# Look for expected symbols from the file
symbol_names = set()
for s_info in overview:
if isinstance(s_info, tuple) and len(s_info) > 0:
symbol_names.add(s_info[0])
elif hasattr(s_info, "get"):
symbol_names.add(s_info.get("name"))
elif isinstance(s_info, str):
symbol_names.add(s_info)
# We should find some of our defined classes/methods
expected_symbols = {"UserStats", "UserManager", "process_user_data", "main"}
found_symbols = symbol_names.intersection(expected_symbols)
assert len(found_symbols) > 0, f"Expected to find some symbols from {expected_symbols}, found: {symbol_names}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_variable(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol where the target is a variable."""
# Test for a position inside a variable definition or usage
file_path = os.path.join("variables.rb")
# Position around a variable assignment (e.g., @status = "pending")
containing_symbol = language_server.request_containing_symbol(file_path, 10, 5)
# Verify that we found a containing symbol (likely the method or class)
if containing_symbol is not None:
assert "name" in containing_symbol, "Containing symbol should have a name"
assert "kind" in containing_symbol, "Containing symbol should have a kind"
# The containing symbol should be a method, class, or similar construct
expected_kinds = [SymbolKind.Method, SymbolKind.Class, SymbolKind.Function, SymbolKind.Constructor]
assert containing_symbol["kind"] in [
k.value for k in expected_kinds
], f"Expected containing symbol to be method/class/function, got kind: {containing_symbol['kind']}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_function(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a function (not method)."""
# Test for a position inside a standalone function
file_path = os.path.join("variables.rb")
# Position inside the demonstrate_variable_usage function
containing_symbol = language_server.request_containing_symbol(file_path, 100, 10)
if containing_symbol is not None:
assert containing_symbol["name"] in [
"demonstrate_variable_usage",
"main",
], f"Expected function name, got: {containing_symbol['name']}"
assert containing_symbol["kind"] in [
SymbolKind.Function.value,
SymbolKind.Method.value,
], f"Expected function or method kind, got: {containing_symbol['kind']}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_containing_symbol_nested(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol with nested scopes."""
# Test for a position inside a method which is inside a class
file_path = os.path.join("services.rb")
# Position inside create_user method within UserService class
containing_symbol = language_server.request_containing_symbol(file_path, 12, 15)
# Verify that we found the innermost containing symbol (the method)
assert containing_symbol is not None
assert containing_symbol["name"] == "create_user"
assert containing_symbol["kind"] == SymbolKind.Method
# Verify the container context is preserved
if "containerName" in containing_symbol:
assert "UserService" in containing_symbol["containerName"]
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_symbol_tree_structure_subdir(self, language_server: SolidLanguageServer) -> None:
"""Test that the symbol tree structure correctly handles subdirectories."""
# Get symbols within the examples subdirectory
examples_structure = language_server.request_full_symbol_tree(within_relative_path="examples")
if len(examples_structure) > 0:
# Should find the examples directory structure
assert len(examples_structure) >= 1, "Should find examples directory structure"
# Look for the user_management file in the structure
found_user_management = False
for root in examples_structure:
if "children" in root:
for child in root["children"]:
if "user_management" in child.get("name", ""):
found_user_management = True
# Verify the structure includes symbol information
if "children" in child:
child_names = [c.get("name") for c in child["children"]]
expected_names = ["UserStats", "UserManager", "process_user_data"]
found_expected = [name for name in expected_names if name in child_names]
assert (
len(found_expected) > 0
), f"Should find symbols in user_management, expected {expected_names}, found {child_names}"
break
if not found_user_management:
pytest.skip("user_management file not found in examples subdirectory structure")
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_imported_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for an imported/required class."""
# Test finding the definition of a class used from another file
file_path = os.path.join("examples", "user_management.rb")
# Position where Services::UserService is referenced
defining_symbol = language_server.request_defining_symbol(file_path, 25, 20)
# This might not work perfectly in all Ruby language servers due to require complexity
if defining_symbol is not None:
assert "name" in defining_symbol
# The defining symbol should relate to UserService or Services
# The defining symbol should relate to UserService, Services, or the containing class
# Different language servers may resolve this differently
expected_names = ["UserService", "Services", "new", "UserManager"]
assert defining_symbol.get("name") in expected_names, f"Expected one of {expected_names}, got: {defining_symbol.get('name')}"
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_method_call(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a method call."""
# Test finding the definition of a method being called
file_path = os.path.join("examples", "user_management.rb")
# Position at a method call like create_user
defining_symbol = language_server.request_defining_symbol(file_path, 30, 15)
# Verify that we can find method definitions
if defining_symbol is not None:
assert "name" in defining_symbol
assert "kind" in defining_symbol
# Should be a method or constructor
assert defining_symbol.get("kind") in [SymbolKind.Method.value, SymbolKind.Constructor.value, SymbolKind.Function.value]
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_request_defining_symbol_nested_function(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a nested function or block."""
# Test finding definition within nested contexts
file_path = os.path.join("nested.rb")
# Position inside or referencing nested functionality
defining_symbol = language_server.request_defining_symbol(file_path, 15, 10)
# This is challenging for many language servers
if defining_symbol is not None:
assert "name" in defining_symbol
assert "kind" in defining_symbol
# Could be method, function, or variable depending on implementation
valid_kinds = [SymbolKind.Method.value, SymbolKind.Function.value, SymbolKind.Variable.value, SymbolKind.Class.value]
assert defining_symbol.get("kind") in valid_kinds
@pytest.mark.parametrize("language_server", [Language.RUBY], indirect=True)
def test_containing_symbol_of_var_is_file(self, language_server: SolidLanguageServer) -> None:
"""Test that the containing symbol of a file-level variable is handled appropriately."""
# Test behavior with file-level variables or constants
file_path = os.path.join("variables.rb")
# Position at file-level variable/constant
containing_symbol = language_server.request_containing_symbol(file_path, 5, 5)
# Different language servers handle file-level symbols differently
# Some return None, others return file-level containers
if containing_symbol is not None:
# If we get a symbol, verify its structure
assert "name" in containing_symbol
assert "kind" in containing_symbol
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/ruby/test_ruby_symbol_retrieval.py",
"license": "MIT License",
"lines": 521,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/rust/test_rust_2024_edition.py | import os
from collections.abc import Iterator
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_utils import SymbolUtils
from test.conftest import start_ls_context
@pytest.fixture(scope="class")
def rust_language_server() -> Iterator[SolidLanguageServer]:
"""Set up the test class with the Rust 2024 edition test repository."""
test_repo_2024_path = TestRust2024EditionLanguageServer.test_repo_2024_path
if not test_repo_2024_path.exists():
pytest.skip("Rust 2024 edition test repository not found")
# Create and start the language server for the 2024 edition repo
with start_ls_context(Language.RUST, str(test_repo_2024_path)) as ls:
yield ls
@pytest.mark.rust
class TestRust2024EditionLanguageServer:
test_repo_2024_path = Path(__file__).parent.parent.parent / "resources" / "repos" / "rust" / "test_repo_2024"
def test_find_references_raw(self, rust_language_server) -> None:
# Test finding references to the 'add' function defined in main.rs
file_path = os.path.join("src", "main.rs")
symbols = rust_language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
add_symbol = None
for sym in symbols[0]:
if sym.get("name") == "add":
add_symbol = sym
break
assert add_symbol is not None, "Could not find 'add' function symbol in main.rs"
sel_start = add_symbol["selectionRange"]["start"]
refs = rust_language_server.request_references(file_path, sel_start["line"], sel_start["character"])
# The add function should be referenced within main.rs itself (in the main function)
assert any("main.rs" in ref.get("relativePath", "") for ref in refs), "main.rs should reference add function"
def test_find_symbol(self, rust_language_server) -> None:
symbols = rust_language_server.request_full_symbol_tree()
assert SymbolUtils.symbol_tree_contains_name(symbols, "main"), "main function not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "add"), "add function not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "multiply"), "multiply function not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Calculator"), "Calculator struct not found in symbol tree"
def test_find_referencing_symbols_multiply(self, rust_language_server) -> None:
# Find references to 'multiply' function defined in lib.rs
file_path = os.path.join("src", "lib.rs")
symbols = rust_language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
multiply_symbol = None
for sym in symbols[0]:
if sym.get("name") == "multiply":
multiply_symbol = sym
break
assert multiply_symbol is not None, "Could not find 'multiply' function symbol in lib.rs"
sel_start = multiply_symbol["selectionRange"]["start"]
refs = rust_language_server.request_references(file_path, sel_start["line"], sel_start["character"])
# The multiply function exists but may not be referenced anywhere, which is fine
# This test just verifies we can find the symbol and request references without error
assert isinstance(refs, list), "Should return a list of references (even if empty)"
def test_find_calculator_struct_and_impl(self, rust_language_server) -> None:
# Test finding the Calculator struct and its impl block
file_path = os.path.join("src", "lib.rs")
symbols = rust_language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Find the Calculator struct
calculator_struct = None
calculator_impl = None
for sym in symbols[0]:
if sym.get("name") == "Calculator" and sym.get("kind") == 23: # Struct kind
calculator_struct = sym
elif sym.get("name") == "Calculator" and sym.get("kind") == 11: # Interface/Impl kind
calculator_impl = sym
assert calculator_struct is not None, "Could not find 'Calculator' struct symbol in lib.rs"
# The struct should have the 'result' field
struct_children = calculator_struct.get("children", [])
field_names = [child.get("name") for child in struct_children]
assert "result" in field_names, "Calculator struct should have 'result' field"
# Find the impl block and check its methods
if calculator_impl is not None:
impl_children = calculator_impl.get("children", [])
method_names = [child.get("name") for child in impl_children]
assert "new" in method_names, "Calculator impl should have 'new' method"
assert "add" in method_names, "Calculator impl should have 'add' method"
assert "get_result" in method_names, "Calculator impl should have 'get_result' method"
def test_overview_methods(self, rust_language_server) -> None:
symbols = rust_language_server.request_full_symbol_tree()
assert SymbolUtils.symbol_tree_contains_name(symbols, "main"), "main missing from overview"
assert SymbolUtils.symbol_tree_contains_name(symbols, "add"), "add missing from overview"
assert SymbolUtils.symbol_tree_contains_name(symbols, "multiply"), "multiply missing from overview"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Calculator"), "Calculator missing from overview"
def test_rust_2024_edition_specific(self) -> None:
# Verify we're actually working with the 2024 edition repository
cargo_toml_path = self.test_repo_2024_path / "Cargo.toml"
assert cargo_toml_path.exists(), "Cargo.toml should exist in test repository"
with open(cargo_toml_path) as f:
content = f.read()
assert 'edition = "2024"' in content, "Should be using Rust 2024 edition"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/rust/test_rust_2024_edition.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/swift/test_swift_basic.py | """
Basic integration tests for the Swift language server functionality.
These tests validate the functionality of the language server APIs
like request_references using the Swift test repository.
"""
import os
import platform
import pytest
from serena.project import Project
from serena.util.text_utils import LineType
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from test.conftest import is_ci
# Skip Swift tests on Windows due to complex GitHub Actions configuration
WINDOWS_SKIP = platform.system() == "Windows"
WINDOWS_SKIP_REASON = "GitHub Actions configuration for Swift on Windows is complex, skipping for now."
pytestmark = [pytest.mark.swift, pytest.mark.skipif(WINDOWS_SKIP, reason=WINDOWS_SKIP_REASON)]
class TestSwiftLanguageServerBasics:
"""Test basic functionality of the Swift language server."""
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_goto_definition_calculator_class(self, language_server: SolidLanguageServer) -> None:
"""Test goto_definition on Calculator class usage."""
file_path = os.path.join("src", "main.swift")
# Find the Calculator usage at line 5: let calculator = Calculator()
# Position should be at the "Calculator()" call
definitions = language_server.request_definition(file_path, 4, 23) # Position at Calculator() call
assert isinstance(definitions, list), "Definitions should be a list"
assert len(definitions) > 0, "Should find definition for Calculator class"
# Verify the definition points to the Calculator class definition
calculator_def = definitions[0]
assert calculator_def.get("uri", "").endswith("main.swift"), "Definition should be in main.swift"
# The Calculator class is defined starting at line 16
start_line = calculator_def.get("range", {}).get("start", {}).get("line")
assert start_line == 15, f"Calculator class definition should be at line 16, got {start_line + 1}"
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_goto_definition_user_struct(self, language_server: SolidLanguageServer) -> None:
"""Test goto_definition on User struct usage."""
file_path = os.path.join("src", "main.swift")
# Find the User usage at line 9: let user = User(name: "Alice", age: 30)
# Position should be at the "User(...)" call
definitions = language_server.request_definition(file_path, 8, 18) # Position at User(...) call
assert isinstance(definitions, list), "Definitions should be a list"
assert len(definitions) > 0, "Should find definition for User struct"
# Verify the definition points to the User struct definition
user_def = definitions[0]
assert user_def.get("uri", "").endswith("main.swift"), "Definition should be in main.swift"
# The User struct is defined starting at line 26
start_line = user_def.get("range", {}).get("start", {}).get("line")
assert start_line == 25, f"User struct definition should be at line 26, got {start_line + 1}"
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_goto_definition_calculator_method(self, language_server: SolidLanguageServer) -> None:
"""Test goto_definition on Calculator method usage."""
file_path = os.path.join("src", "main.swift")
# Find the add method usage at line 6: let result = calculator.add(5, 3)
# Position should be at the "add" method call
definitions = language_server.request_definition(file_path, 5, 28) # Position at add method call
assert isinstance(definitions, list), "Definitions should be a list"
# Verify the definition points to the add method definition
add_def = definitions[0]
assert add_def.get("uri", "").endswith("main.swift"), "Definition should be in main.swift"
# The add method is defined starting at line 17
start_line = add_def.get("range", {}).get("start", {}).get("line")
assert start_line == 16, f"add method definition should be at line 17, got {start_line + 1}"
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_goto_definition_cross_file(self, language_server: SolidLanguageServer) -> None:
"""Test goto_definition across files - Utils struct."""
utils_file = os.path.join("src", "utils.swift")
# First, let's check if Utils is used anywhere (it might not be in this simple test)
# We'll test goto_definition on Utils struct itself
symbols = language_server.request_document_symbols(utils_file).get_all_symbols_and_roots()
utils_symbol = next(s for s in symbols[0] if s.get("name") == "Utils")
sel_start = utils_symbol["selectionRange"]["start"]
definitions = language_server.request_definition(utils_file, sel_start["line"], sel_start["character"])
assert isinstance(definitions, list), "Definitions should be a list"
# Should find the Utils struct definition itself
utils_def = definitions[0]
assert utils_def.get("uri", "").endswith("utils.swift"), "Definition should be in utils.swift"
@pytest.mark.xfail(is_ci, reason="Test is flaky in CI") # See #1040
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_request_references_calculator_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on the Calculator class."""
# Get references to the Calculator class in main.swift
file_path = os.path.join("src", "main.swift")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
calculator_symbol = next(s for s in symbols[0] if s.get("name") == "Calculator")
sel_start = calculator_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert isinstance(references, list), "References should be a list"
assert len(references) > 0, "Calculator class should be referenced"
# Validate that Calculator is referenced in the main function
calculator_refs = [ref for ref in references if ref.get("uri", "").endswith("main.swift")]
assert len(calculator_refs) > 0, "Calculator class should be referenced in main.swift"
# Check that one reference is at line 5 (let calculator = Calculator())
line_5_refs = [ref for ref in calculator_refs if ref.get("range", {}).get("start", {}).get("line") == 4]
assert len(line_5_refs) > 0, "Calculator should be referenced at line 5"
@pytest.mark.xfail(is_ci, reason="Test is flaky in CI") # See #1040
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_request_references_user_struct(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on the User struct."""
# Get references to the User struct in main.swift
file_path = os.path.join("src", "main.swift")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
user_symbol = next(s for s in symbols[0] if s.get("name") == "User")
sel_start = user_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert isinstance(references, list), "References should be a list"
# Validate that User is referenced in the main function
user_refs = [ref for ref in references if ref.get("uri", "").endswith("main.swift")]
assert len(user_refs) > 0, "User struct should be referenced in main.swift"
# Check that one reference is at line 9 (let user = User(...))
line_9_refs = [ref for ref in user_refs if ref.get("range", {}).get("start", {}).get("line") == 8]
assert len(line_9_refs) > 0, "User should be referenced at line 9"
@pytest.mark.xfail(is_ci, reason="Test is flaky in CI") # See #1040
@pytest.mark.parametrize("language_server", [Language.SWIFT], indirect=True)
def test_request_references_utils_struct(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on the Utils struct."""
# Get references to the Utils struct in utils.swift
file_path = os.path.join("src", "utils.swift")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
utils_symbol = next((s for s in symbols[0] if s.get("name") == "Utils"), None)
if not utils_symbol or "selectionRange" not in utils_symbol:
raise AssertionError("Utils symbol or its selectionRange not found")
sel_start = utils_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert isinstance(references, list), "References should be a list"
assert len(references) > 0, "Utils struct should be referenced"
# Validate that Utils is referenced in main.swift
utils_refs = [ref for ref in references if ref.get("uri", "").endswith("main.swift")]
assert len(utils_refs) > 0, "Utils struct should be referenced in main.swift"
# Check that one reference is at line 12 (Utils.calculateArea call)
line_12_refs = [ref for ref in utils_refs if ref.get("range", {}).get("start", {}).get("line") == 11]
assert len(line_12_refs) > 0, "Utils should be referenced at line 12"
class TestSwiftProjectBasics:
@pytest.mark.parametrize("project", [Language.SWIFT], indirect=True)
def test_retrieve_content_around_line(self, project: Project) -> None:
"""Test retrieve_content_around_line functionality with various scenarios."""
file_path = os.path.join("src", "main.swift")
# Scenario 1: Find Calculator class definition
calculator_line = None
for line_num in range(1, 50): # Search first 50 lines
try:
line_content = project.retrieve_content_around_line(file_path, line_num)
if line_content.lines and "class Calculator" in line_content.lines[0].line_content:
calculator_line = line_num
break
except:
continue
assert calculator_line is not None, "Calculator class not found"
line_calc = project.retrieve_content_around_line(file_path, calculator_line)
assert len(line_calc.lines) == 1
assert "class Calculator" in line_calc.lines[0].line_content
assert line_calc.lines[0].line_number == calculator_line
assert line_calc.lines[0].match_type == LineType.MATCH
# Scenario 2: Context above and below Calculator class
with_context_around_calculator = project.retrieve_content_around_line(file_path, calculator_line, 2, 2)
assert len(with_context_around_calculator.lines) == 5
assert "class Calculator" in with_context_around_calculator.matched_lines[0].line_content
assert with_context_around_calculator.num_matched_lines == 1
# Scenario 3: Search for struct definitions
struct_pattern = r"struct\s+\w+"
matches = project.search_source_files_for_pattern(struct_pattern)
assert len(matches) > 0, "Should find struct definitions"
# Should find User struct
user_matches = [m for m in matches if "User" in str(m)]
assert len(user_matches) > 0, "Should find User struct"
# Scenario 4: Search for class definitions
class_pattern = r"class\s+\w+"
matches = project.search_source_files_for_pattern(class_pattern)
assert len(matches) > 0, "Should find class definitions"
# Should find Calculator and Circle classes
calculator_matches = [m for m in matches if "Calculator" in str(m)]
circle_matches = [m for m in matches if "Circle" in str(m)]
assert len(calculator_matches) > 0, "Should find Calculator class"
assert len(circle_matches) > 0, "Should find Circle class"
# Scenario 5: Search for enum definitions
enum_pattern = r"enum\s+\w+"
matches = project.search_source_files_for_pattern(enum_pattern)
assert len(matches) > 0, "Should find enum definitions"
# Should find Status enum
status_matches = [m for m in matches if "Status" in str(m)]
assert len(status_matches) > 0, "Should find Status enum"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/swift/test_swift_basic.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/util/test_zip.py | import sys
import zipfile
from pathlib import Path
import pytest
from solidlsp.util.zip import SafeZipExtractor
@pytest.fixture
def temp_zip_file(tmp_path: Path) -> Path:
"""Create a temporary ZIP file for testing."""
zip_path = tmp_path / "test.zip"
with zipfile.ZipFile(zip_path, "w") as zipf:
zipf.writestr("file1.txt", "Hello World 1")
zipf.writestr("file2.txt", "Hello World 2")
zipf.writestr("folder/file3.txt", "Hello World 3")
return zip_path
def test_extract_all_success(temp_zip_file: Path, tmp_path: Path) -> None:
"""All files should extract without error."""
dest_dir = tmp_path / "extracted"
extractor = SafeZipExtractor(temp_zip_file, dest_dir, verbose=False)
extractor.extract_all()
assert (dest_dir / "file1.txt").read_text() == "Hello World 1"
assert (dest_dir / "file2.txt").read_text() == "Hello World 2"
assert (dest_dir / "folder" / "file3.txt").read_text() == "Hello World 3"
def test_include_patterns(temp_zip_file: Path, tmp_path: Path) -> None:
"""Only files matching include_patterns should be extracted."""
dest_dir = tmp_path / "extracted"
extractor = SafeZipExtractor(temp_zip_file, dest_dir, verbose=False, include_patterns=["*.txt"])
extractor.extract_all()
assert (dest_dir / "file1.txt").exists()
assert (dest_dir / "file2.txt").exists()
assert (dest_dir / "folder" / "file3.txt").exists()
def test_exclude_patterns(temp_zip_file: Path, tmp_path: Path) -> None:
"""Files matching exclude_patterns should be skipped."""
dest_dir = tmp_path / "extracted"
extractor = SafeZipExtractor(temp_zip_file, dest_dir, verbose=False, exclude_patterns=["file2.txt"])
extractor.extract_all()
assert (dest_dir / "file1.txt").exists()
assert not (dest_dir / "file2.txt").exists()
assert (dest_dir / "folder" / "file3.txt").exists()
def test_include_and_exclude_patterns(temp_zip_file: Path, tmp_path: Path) -> None:
"""Exclude should override include if both match."""
dest_dir = tmp_path / "extracted"
extractor = SafeZipExtractor(
temp_zip_file,
dest_dir,
verbose=False,
include_patterns=["*.txt"],
exclude_patterns=["file1.txt"],
)
extractor.extract_all()
assert not (dest_dir / "file1.txt").exists()
assert (dest_dir / "file2.txt").exists()
assert (dest_dir / "folder" / "file3.txt").exists()
def test_skip_on_error(monkeypatch, temp_zip_file: Path, tmp_path: Path) -> None:
"""Should skip a file that raises an error and continue extracting others."""
dest_dir = tmp_path / "extracted"
original_open = zipfile.ZipFile.open
def failing_open(self, member, *args, **kwargs):
if member.filename == "file2.txt":
raise OSError("Simulated failure")
return original_open(self, member, *args, **kwargs)
# Patch the method on the class, not on an instance
monkeypatch.setattr(zipfile.ZipFile, "open", failing_open)
extractor = SafeZipExtractor(temp_zip_file, dest_dir, verbose=False)
extractor.extract_all()
assert (dest_dir / "file1.txt").exists()
assert not (dest_dir / "file2.txt").exists()
assert (dest_dir / "folder" / "file3.txt").exists()
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows-only test")
def test_long_path_normalization(temp_zip_file: Path, tmp_path: Path) -> None:
r"""Ensure _normalize_path adds \\?\\ prefix on Windows."""
dest_dir = tmp_path / ("a" * 250) # Simulate long path
extractor = SafeZipExtractor(temp_zip_file, dest_dir, verbose=False)
norm_path = extractor._normalize_path(dest_dir / "file.txt")
assert str(norm_path).startswith("\\\\?\\")
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/util/test_zip.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/zig/test_zig_basic.py | """
Basic integration tests for Zig language server functionality.
These tests validate symbol finding and navigation capabilities using the Zig Language Server (ZLS).
Note: ZLS requires files to be open in the editor to find cross-file references (performance optimization).
"""
import os
import sys
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
@pytest.mark.zig
@pytest.mark.skipif(
sys.platform == "win32", reason="ZLS is disabled on Windows - cross-file references don't work reliably. Reason unknown."
)
class TestZigLanguageServer:
"""Test Zig language server symbol finding and navigation capabilities.
NOTE: All tests are skipped on Windows as ZLS is disabled on that platform
due to unreliable cross-file reference functionality. Reason unknown.
"""
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_find_symbols_in_main(self, language_server: SolidLanguageServer) -> None:
"""Test finding specific symbols in main.zig."""
file_path = os.path.join("src", "main.zig")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
# Extract symbol names from the returned structure
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify specific symbols exist
assert "main" in symbol_names, "main function not found"
assert "greeting" in symbol_names, "greeting function not found"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_find_symbols_in_calculator(self, language_server: SolidLanguageServer) -> None:
"""Test finding Calculator struct and its methods."""
file_path = os.path.join("src", "calculator.zig")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find Calculator struct
calculator_symbol = None
for sym in symbol_list:
if sym.get("name") == "Calculator":
calculator_symbol = sym
break
assert calculator_symbol is not None, "Calculator struct not found"
# ZLS may use different symbol kinds for structs (14 = Namespace, 5 = Class, 23 = Struct)
assert calculator_symbol.get("kind") in [
SymbolKind.Class,
SymbolKind.Struct,
SymbolKind.Namespace,
5,
14,
23,
], "Calculator should be a struct/class/namespace"
# Check for Calculator methods (init, add, subtract, etc.)
# Methods might be in children or at the same level
all_symbols = []
for sym in symbol_list:
all_symbols.append(sym.get("name"))
if "children" in sym:
for child in sym["children"]:
all_symbols.append(child.get("name"))
# Verify exact calculator methods exist
expected_methods = {"init", "add", "subtract", "multiply", "divide"}
found_methods = set(all_symbols) & expected_methods
assert found_methods == expected_methods, f"Expected exactly {expected_methods}, found: {found_methods}"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_find_symbols_in_math_utils(self, language_server: SolidLanguageServer) -> None:
"""Test finding functions in math_utils.zig."""
file_path = os.path.join("src", "math_utils.zig")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert symbols is not None
assert len(symbols) > 0
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
symbol_names = {sym.get("name") for sym in symbol_list if isinstance(sym, dict)}
# Verify math utility functions exist
assert "factorial" in symbol_names, "factorial function not found"
assert "isPrime" in symbol_names, "isPrime function not found"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_find_references_within_file(self, language_server: SolidLanguageServer) -> None:
"""Test finding references within the same file."""
file_path = os.path.join("src", "calculator.zig")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
# Find Calculator struct
calculator_symbol = None
for sym in symbol_list:
if sym.get("name") == "Calculator":
calculator_symbol = sym
break
assert calculator_symbol is not None, "Calculator struct not found"
# Find references to Calculator within the same file
sel_range = calculator_symbol.get("selectionRange", calculator_symbol.get("range"))
assert sel_range is not None, "Calculator symbol has no range information"
sel_start = sel_range["start"]
refs = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert refs is not None
assert isinstance(refs, list)
# ZLS finds references within the same file
# Calculator is used in 4 test usages (lines 45, 51, 57, 63)
# Note: ZLS may not include the declaration itself as a reference
assert len(refs) >= 4, f"Should find at least 4 Calculator references within calculator.zig, found {len(refs)}"
# Verify we found the test usages
ref_lines = sorted([ref["range"]["start"]["line"] for ref in refs])
test_lines = [44, 50, 56, 62] # 0-indexed: tests at lines 45, 51, 57, 63
for line in test_lines:
assert line in ref_lines, f"Should find Calculator reference at line {line + 1}, found at lines {[l + 1 for l in ref_lines]}"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
@pytest.mark.skipif(
sys.platform == "win32", reason="ZLS cross-file references don't work reliably on Windows - URI path handling issues"
)
def test_cross_file_references_with_open_files(self, language_server: SolidLanguageServer) -> None:
"""
Test finding cross-file references with files open.
ZLS limitation: Cross-file references (textDocument/references) only work when
target files are open. This is a performance optimization in ZLS.
NOTE: Disabled on Windows as cross-file references cannot be made to work reliably
due to URI path handling differences between Windows and Unix systems.
"""
import time
# Open the files that contain references to enable cross-file search
with language_server.open_file("build.zig"):
with language_server.open_file(os.path.join("src", "main.zig")):
with language_server.open_file(os.path.join("src", "calculator.zig")):
# Give ZLS a moment to analyze the open files
time.sleep(1)
# Find Calculator struct
symbols = language_server.request_document_symbols(os.path.join("src", "calculator.zig")).get_all_symbols_and_roots()
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
calculator_symbol = None
for sym in symbol_list:
if sym.get("name") == "Calculator":
calculator_symbol = sym
break
assert calculator_symbol is not None, "Calculator struct not found"
sel_range = calculator_symbol.get("selectionRange", calculator_symbol.get("range"))
assert sel_range is not None, "Calculator symbol has no range information"
# Find references to Calculator
sel_start = sel_range["start"]
refs = language_server.request_references(
os.path.join("src", "calculator.zig"), sel_start["line"], sel_start["character"]
)
assert refs is not None
assert isinstance(refs, list)
# With files open, ZLS should find cross-file references
main_refs = [ref for ref in refs if "main.zig" in ref.get("uri", "")]
assert len(main_refs) >= 1, f"Should find at least 1 Calculator reference in main.zig, found {len(main_refs)}"
# Verify exact location in main.zig (line 8, 0-indexed: 7)
main_ref_line = main_refs[0]["range"]["start"]["line"]
assert (
main_ref_line == 7
), f"Calculator reference in main.zig should be at line 8 (0-indexed: 7), found at line {main_ref_line + 1}"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_cross_file_references_within_file(self, language_server: SolidLanguageServer) -> None:
"""
Test that ZLS finds references within the same file.
Note: ZLS is designed to be lightweight and only analyzes files that are explicitly opened.
Cross-file references require manually opening the relevant files first.
"""
# Find references to Calculator from calculator.zig
file_path = os.path.join("src", "calculator.zig")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
symbol_list = symbols[0] if isinstance(symbols, tuple) else symbols
calculator_symbol = None
for sym in symbol_list:
if sym.get("name") == "Calculator":
calculator_symbol = sym
break
assert calculator_symbol is not None, "Calculator struct not found"
sel_range = calculator_symbol.get("selectionRange", calculator_symbol.get("range"))
assert sel_range is not None, "Calculator symbol has no range information"
sel_start = sel_range["start"]
refs = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert refs is not None
assert isinstance(refs, list)
# ZLS finds references within the same file
# Calculator is used in 4 test usages (lines 45, 51, 57, 63)
# Note: ZLS may not include the declaration itself as a reference
assert len(refs) >= 4, f"Should find at least 4 Calculator references within calculator.zig, found {len(refs)}"
# Verify we found the test usages
ref_lines = sorted([ref["range"]["start"]["line"] for ref in refs])
test_lines = [44, 50, 56, 62] # 0-indexed: tests at lines 45, 51, 57, 63
for line in test_lines:
assert line in ref_lines, f"Should find Calculator reference at line {line + 1}, found at lines {[l + 1 for l in ref_lines]}"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
@pytest.mark.skipif(
sys.platform == "win32", reason="ZLS cross-file references don't work reliably on Windows - URI path handling issues"
)
def test_go_to_definition_cross_file(self, language_server: SolidLanguageServer) -> None:
"""
Test go-to-definition from main.zig to calculator.zig.
ZLS capability: Go-to-definition (textDocument/definition) works cross-file
WITHOUT requiring files to be open.
NOTE: Disabled on Windows as cross-file references cannot be made to work reliably
due to URI path handling differences between Windows and Unix systems.
"""
file_path = os.path.join("src", "main.zig")
# Line 8: const calc = calculator.Calculator.init();
# Test go-to-definition for Calculator
definitions = language_server.request_definition(file_path, 7, 25) # Position of "Calculator"
assert definitions is not None
assert isinstance(definitions, list)
assert len(definitions) > 0, "Should find definition of Calculator"
# Should point to calculator.zig
calc_def = definitions[0]
assert "calculator.zig" in calc_def.get("uri", ""), "Definition should be in calculator.zig"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
@pytest.mark.skipif(
sys.platform == "win32", reason="ZLS cross-file references don't work reliably on Windows - URI path handling issues"
)
def test_cross_file_function_usage(self, language_server: SolidLanguageServer) -> None:
"""Test finding usage of functions from math_utils in main.zig.
NOTE: Disabled on Windows as cross-file references cannot be made to work reliably
due to URI path handling differences between Windows and Unix systems.
"""
# Line 23 in main.zig: const factorial_result = math_utils.factorial(5);
definitions = language_server.request_definition(os.path.join("src", "main.zig"), 22, 40) # Position of "factorial"
assert definitions is not None
assert isinstance(definitions, list)
if len(definitions) > 0:
# Should find factorial definition in math_utils.zig
math_def = [d for d in definitions if "math_utils.zig" in d.get("uri", "")]
assert len(math_def) > 0, "Should find factorial definition in math_utils.zig"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_verify_cross_file_imports(self, language_server: SolidLanguageServer) -> None:
"""Verify that our test files have proper cross-file imports."""
# Verify main.zig imports
main_symbols = language_server.request_document_symbols(os.path.join("src", "main.zig")).get_all_symbols_and_roots()
assert main_symbols is not None
main_list = main_symbols[0] if isinstance(main_symbols, tuple) else main_symbols
main_names = {sym.get("name") for sym in main_list if isinstance(sym, dict)}
# main.zig should have main and greeting functions
assert "main" in main_names, "main function should be in main.zig"
assert "greeting" in main_names, "greeting function should be in main.zig"
# Verify calculator.zig exports Calculator
calc_symbols = language_server.request_document_symbols(os.path.join("src", "calculator.zig")).get_all_symbols_and_roots()
assert calc_symbols is not None
calc_list = calc_symbols[0] if isinstance(calc_symbols, tuple) else calc_symbols
calc_names = {sym.get("name") for sym in calc_list if isinstance(sym, dict)}
assert "Calculator" in calc_names, "Calculator struct should be in calculator.zig"
# Verify math_utils.zig exports functions
math_symbols = language_server.request_document_symbols(os.path.join("src", "math_utils.zig")).get_all_symbols_and_roots()
assert math_symbols is not None
math_list = math_symbols[0] if isinstance(math_symbols, tuple) else math_symbols
math_names = {sym.get("name") for sym in math_list if isinstance(sym, dict)}
assert "factorial" in math_names, "factorial function should be in math_utils.zig"
assert "isPrime" in math_names, "isPrime function should be in math_utils.zig"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_hover_information(self, language_server: SolidLanguageServer) -> None:
"""Test hover information for symbols."""
file_path = os.path.join("src", "main.zig")
# Get hover info for the main function
hover_info = language_server.request_hover(file_path, 4, 8) # Position of "main" function
assert hover_info is not None, "Should provide hover information for main function"
# Hover info could be a dict with 'contents' or a string
if isinstance(hover_info, dict):
assert "contents" in hover_info or "value" in hover_info, "Hover should have contents"
@pytest.mark.parametrize("language_server", [Language.ZIG], indirect=True)
def test_full_symbol_tree(self, language_server: SolidLanguageServer) -> None:
"""Test that full symbol tree is not empty."""
symbols = language_server.request_full_symbol_tree()
assert symbols is not None
assert len(symbols) > 0, "Symbol tree should not be empty"
# The tree should have at least one root node
root = symbols[0]
assert isinstance(root, dict), "Root should be a dict"
assert "name" in root, "Root should have a name"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/zig/test_zig_basic.py",
"license": "MIT License",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:src/serena/util/logging.py | import queue
import threading
from collections.abc import Callable
from dataclasses import dataclass
from typing import Optional
from sensai.util import logging
from serena.constants import LOG_MESSAGES_BUFFER_SIZE, SERENA_LOG_FORMAT
lg = logging
@dataclass
class LogMessages:
messages: list[str]
"""
the list of log messages, ordered from oldest to newest
"""
max_idx: int
"""
the 0-based index of the last message in `messages` (in the full log history)
"""
class MemoryLogHandler(logging.Handler):
def __init__(self, level: int = logging.NOTSET, max_messages: int | None = LOG_MESSAGES_BUFFER_SIZE) -> None:
super().__init__(level=level)
self.setFormatter(logging.Formatter(SERENA_LOG_FORMAT))
self._log_buffer = LogBuffer(max_messages=max_messages)
self._log_queue: queue.Queue[str] = queue.Queue()
self._stop_event = threading.Event()
self._emit_callbacks: list[Callable[[str], None]] = []
# start background thread to process logs
self.worker_thread = threading.Thread(target=self._process_queue, daemon=True)
self.worker_thread.start()
def add_emit_callback(self, callback: Callable[[str], None]) -> None:
"""
Adds a callback that will be called with each log message.
The callback should accept a single string argument (the log message).
"""
self._emit_callbacks.append(callback)
def emit(self, record: logging.LogRecord) -> None:
msg = self.format(record)
self._log_queue.put_nowait(msg)
def _process_queue(self) -> None:
while not self._stop_event.is_set():
try:
msg = self._log_queue.get(timeout=1)
self._log_buffer.append(msg)
for callback in self._emit_callbacks:
try:
callback(msg)
except:
pass
self._log_queue.task_done()
except queue.Empty:
continue
def get_log_messages(self, from_idx: int = 0) -> LogMessages:
return self._log_buffer.get_log_messages(from_idx=from_idx)
def clear_log_messages(self) -> None:
self._log_buffer.clear()
class LogBuffer:
"""
A thread-safe buffer for storing (an optionally limited number of) log messages.
"""
def __init__(self, max_messages: int | None = None) -> None:
self._max_messages = max_messages
self._log_messages: list[str] = []
self._lock = threading.Lock()
self._max_idx = -1
"""
the 0-based index of the most recently added log message
"""
def append(self, msg: str) -> None:
with self._lock:
self._log_messages.append(msg)
self._max_idx += 1
if self._max_messages is not None and len(self._log_messages) > self._max_messages:
excess = len(self._log_messages) - self._max_messages
self._log_messages = self._log_messages[excess:]
def clear(self) -> None:
with self._lock:
self._log_messages = []
self._max_idx = -1
def get_log_messages(self, from_idx: int = 0) -> LogMessages:
"""
:param from_idx: the 0-based index of the first log message to return.
If from_idx is less than or equal to the index of the oldest message in the buffer,
then all messages in the buffer will be returned.
:return: the list of messages
"""
from_idx = max(from_idx, 0)
with self._lock:
first_stored_idx = self._max_idx - len(self._log_messages) + 1
if from_idx <= first_stored_idx:
messages = self._log_messages.copy()
else:
start_idx = from_idx - first_stored_idx
messages = self._log_messages[start_idx:].copy()
return LogMessages(messages=messages, max_idx=self._max_idx)
class SuspendedLoggersContext:
"""A context manager that provides an isolated logging environment.
Temporarily removes all root log handlers upon entry, providing a clean slate
for defining new log handlers within the context. Upon exit, restores the original
logging configuration. This is useful when you need to temporarily configure
an isolated logging setup with well-defined log handlers.
The context manager:
- Removes all existing (root) log handlers on entry
- Allows defining new temporary handlers within the context
- Restores the original configuration (handlers and root log level) on exit
Example:
>>> with SuspendedLoggersContext():
... # No handlers are active here (configure your own and set desired log level)
... pass
>>> # Original log handlers are restored here
"""
def __init__(self) -> None:
self.saved_root_handlers: list = []
self.saved_root_level: Optional[int] = None
def __enter__(self) -> "SuspendedLoggersContext":
root_logger = lg.getLogger()
self.saved_root_handlers = root_logger.handlers.copy()
self.saved_root_level = root_logger.level
root_logger.handlers.clear()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore
root_logger = lg.getLogger()
root_logger.handlers = self.saved_root_handlers
if self.saved_root_level is not None:
root_logger.setLevel(self.saved_root_level)
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/util/logging.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/cli.py | import collections
import glob
import json
import os
import shutil
import subprocess
import sys
from collections.abc import Iterator, Sequence
from logging import Logger
from pathlib import Path
from typing import Any, Literal
import click
from sensai.util import logging
from sensai.util.logging import FileLoggerContext, datetime_tag
from sensai.util.string import dict_string
from tqdm import tqdm
from serena.agent import SerenaAgent
from serena.config.context_mode import SerenaAgentContext, SerenaAgentMode
from serena.config.serena_config import (
LanguageBackend,
ModeSelectionDefinition,
ProjectConfig,
RegisteredProject,
SerenaConfig,
SerenaPaths,
)
from serena.constants import (
DEFAULT_CONTEXT,
PROMPT_TEMPLATES_DIR_INTERNAL,
SERENA_LOG_FORMAT,
SERENAS_OWN_CONTEXT_YAMLS_DIR,
SERENAS_OWN_MODE_YAMLS_DIR,
)
from serena.mcp import SerenaMCPFactory
from serena.project import Project
from serena.tools import FindReferencingSymbolsTool, FindSymbolTool, GetSymbolsOverviewTool, SearchForPatternTool, ToolRegistry
from serena.util.dataclass import get_dataclass_default
from serena.util.logging import MemoryLogHandler
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
from solidlsp.util.subprocess_util import subprocess_kwargs
log = logging.getLogger(__name__)
_MAX_CONTENT_WIDTH = 100
_MODES_EXPLANATION = f"""\b\nBuilt-in mode names or paths to custom mode YAMLs with which to
override the default modes defined in the global Serena configuration or
the active project.
For details on mode configuration, see
https://oraios.github.io/serena/02-usage/050_configuration.html#modes.
If no configuration changes were made, the base defaults are:
{get_dataclass_default(SerenaConfig, "default_modes")}.
Overriding them means that they no longer apply, so you will need to
re-specify them in addition to further modes if you want to keep them."""
def find_project_root(root: str | Path | None = None) -> str | None:
"""Find project root by walking up from CWD.
Checks for .serena/project.yml first (explicit Serena project), then .git (git root).
:param root: If provided, constrains the search to this directory and below
(acts as a virtual filesystem root). Search stops at this boundary.
:return: absolute path to project root or None if not suitable root is found
"""
current = Path.cwd().resolve()
boundary = Path(root).resolve() if root is not None else None
def ancestors() -> Iterator[Path]:
"""Yield current directory and ancestors up to boundary."""
yield current
for parent in current.parents:
yield parent
if boundary is not None and parent == boundary:
return
# First pass: look for .serena
for directory in ancestors():
if (directory / ".serena" / "project.yml").is_file():
return str(directory)
# Second pass: look for .git
for directory in ancestors():
if (directory / ".git").exists(): # .git can be file (worktree) or dir
return str(directory)
return None
# --------------------- Utilities -------------------------------------
def _open_in_editor(path: str) -> None:
"""Open the given file in the system's default editor or viewer."""
editor = os.environ.get("EDITOR")
run_kwargs = subprocess_kwargs()
try:
if editor:
subprocess.run([editor, path], check=False, **run_kwargs)
elif sys.platform.startswith("win"):
try:
os.startfile(path)
except OSError:
subprocess.run(["notepad.exe", path], check=False, **run_kwargs)
elif sys.platform == "darwin":
subprocess.run(["open", path], check=False, **run_kwargs)
else:
subprocess.run(["xdg-open", path], check=False, **run_kwargs)
except Exception as e:
print(f"Failed to open {path}: {e}")
class ProjectType(click.ParamType):
"""ParamType allowing either a project name or a path to a project directory."""
name = "[PROJECT_NAME|PROJECT_PATH]"
def convert(self, value: str, param: Any, ctx: Any) -> str:
path = Path(value).resolve()
if path.exists() and path.is_dir():
return str(path)
return value
PROJECT_TYPE = ProjectType()
class AutoRegisteringGroup(click.Group):
"""
A click.Group subclass that automatically registers any click.Command
attributes defined on the class into the group.
After initialization, it inspects its own class for attributes that are
instances of click.Command (typically created via @click.command) and
calls self.add_command(cmd) on each. This lets you define your commands
as static methods on the subclass for IDE-friendly organization without
manual registration.
"""
def __init__(self, name: str, help: str):
super().__init__(name=name, help=help)
# Scan class attributes for click.Command instances and register them.
for attr in dir(self.__class__):
cmd = getattr(self.__class__, attr)
if isinstance(cmd, click.Command):
self.add_command(cmd)
class TopLevelCommands(AutoRegisteringGroup):
"""Root CLI group containing the core Serena commands."""
def __init__(self) -> None:
super().__init__(name="serena", help="Serena CLI commands. You can run `<command> --help` for more info on each command.")
@staticmethod
@click.command("start-mcp-server", help="Starts the Serena MCP server.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.option("--project", "project", type=PROJECT_TYPE, default=None, help="Path or name of project to activate at startup.")
@click.option("--project-file", "project", type=PROJECT_TYPE, default=None, help="[DEPRECATED] Use --project instead.")
@click.argument("project_file_arg", type=PROJECT_TYPE, required=False, default=None, metavar="")
@click.option(
"--context", type=str, default=DEFAULT_CONTEXT, show_default=True, help="Built-in context name or path to custom context YAML."
)
@click.option(
"--mode",
"modes",
type=str,
multiple=True,
default=(),
show_default=False,
help=_MODES_EXPLANATION,
)
@click.option(
"--language-backend",
type=click.Choice([lb.value for lb in LanguageBackend]),
default=None,
help="Override the configured language backend.",
)
@click.option(
"--transport",
type=click.Choice(["stdio", "sse", "streamable-http"]),
default="stdio",
show_default=True,
help="Transport protocol.",
)
@click.option(
"--host",
type=str,
default="0.0.0.0",
show_default=True,
help="Listen address for the MCP server (when using corresponding transport).",
)
@click.option(
"--port", type=int, default=8000, show_default=True, help="Listen port for the MCP server (when using corresponding transport)."
)
@click.option(
"--enable-web-dashboard",
type=bool,
is_flag=False,
default=None,
help="Enable the web dashboard (overriding the setting in Serena's config). "
"It is recommended to always enable the dashboard. If you don't want the browser to open on startup, set open-web-dashboard to False. "
"For more information, see\nhttps://oraios.github.io/serena/02-usage/060_dashboard.html",
)
@click.option(
"--enable-gui-log-window",
type=bool,
is_flag=False,
default=None,
help="Enable the gui log window (currently only displays logs; overriding the setting in Serena's config).",
)
@click.option(
"--open-web-dashboard",
type=bool,
is_flag=False,
default=None,
help="Open Serena's dashboard in your browser after MCP server startup (overriding the setting in Serena's config).",
)
@click.option(
"--log-level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default=None,
help="Override log level in config.",
)
@click.option("--trace-lsp-communication", type=bool, is_flag=False, default=None, help="Whether to trace LSP communication.")
@click.option("--tool-timeout", type=float, default=None, help="Override tool execution timeout in config.")
@click.option(
"--project-from-cwd",
is_flag=True,
default=False,
help="Auto-detect project from current working directory (searches for .serena/project.yml or .git, falls back to CWD). Intended for CLI-based agents like Claude Code, Gemini and Codex.",
)
def start_mcp_server(
project: str | None,
project_file_arg: str | None,
project_from_cwd: bool | None,
context: str,
modes: Sequence[str],
language_backend: str | None,
transport: Literal["stdio", "sse", "streamable-http"],
host: str,
port: int,
enable_web_dashboard: bool | None,
open_web_dashboard: bool | None,
enable_gui_log_window: bool | None,
log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] | None,
trace_lsp_communication: bool | None,
tool_timeout: float | None,
) -> None:
# initialize logging, using INFO level initially (will later be adjusted by SerenaAgent according to the config)
# * memory log handler (for use by GUI/Dashboard)
# * stream handler for stderr (for direct console output, which will also be captured by clients like Claude Desktop)
# * file handler
# (Note that stdout must never be used for logging, as it is used by the MCP server to communicate with the client.)
Logger.root.setLevel(logging.INFO)
formatter = logging.Formatter(SERENA_LOG_FORMAT)
memory_log_handler = MemoryLogHandler()
Logger.root.addHandler(memory_log_handler)
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.formatter = formatter
Logger.root.addHandler(stderr_handler)
log_path = SerenaPaths().get_next_log_file_path("mcp")
file_handler = logging.FileHandler(log_path, mode="w")
file_handler.formatter = formatter
Logger.root.addHandler(file_handler)
log.info("Initializing Serena MCP server")
log.info("Storing logs in %s", log_path)
# Handle --project-from-cwd flag
if project_from_cwd:
if project is not None or project_file_arg is not None:
raise click.UsageError("--project-from-cwd cannot be used with --project or positional project argument")
project = find_project_root()
if project is not None:
log.info("Auto-detected project root: %s", project)
else:
log.warning("No project root found from %s; not activating any project", os.getcwd())
project_file = project_file_arg or project
factory = SerenaMCPFactory(context=context, project=project_file, memory_log_handler=memory_log_handler)
server = factory.create_mcp_server(
host=host,
port=port,
modes=modes,
language_backend=LanguageBackend.from_str(language_backend) if language_backend else None,
enable_web_dashboard=enable_web_dashboard,
open_web_dashboard=open_web_dashboard,
enable_gui_log_window=enable_gui_log_window,
log_level=log_level,
trace_lsp_communication=trace_lsp_communication,
tool_timeout=tool_timeout,
)
if project_file_arg:
log.warning(
"Positional project arg is deprecated; use --project instead. Used: %s",
project_file,
)
log.info("Starting MCP server …")
server.run(transport=transport)
@staticmethod
@click.command(
"print-system-prompt", help="Print the system prompt for a project.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH}
)
@click.argument("project", type=click.Path(exists=True), default=os.getcwd(), required=False)
@click.option(
"--log-level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default="WARNING",
help="Log level for prompt generation.",
)
@click.option("--only-instructions", is_flag=True, help="Print only the initial instructions, without prefix/postfix.")
@click.option(
"--context", type=str, default=DEFAULT_CONTEXT, show_default=True, help="Built-in context name or path to custom context YAML."
)
@click.option(
"--mode",
"modes",
type=str,
multiple=True,
default=(),
show_default=False,
help=_MODES_EXPLANATION,
)
def print_system_prompt(
project: str, log_level: str, only_instructions: bool, context: str, modes: Sequence[str] | None = None
) -> None:
prefix = "You will receive access to Serena's symbolic tools. Below are instructions for using them, take them into account."
postfix = "You begin by acknowledging that you understood the above instructions and are ready to receive tasks."
from serena.tools.workflow_tools import InitialInstructionsTool
lvl = logging.getLevelNamesMapping()[log_level.upper()]
logging.configure(level=lvl)
context_instance = SerenaAgentContext.load(context)
modes_selection_def: ModeSelectionDefinition | None = None
if modes:
modes_selection_def = ModeSelectionDefinition(default_modes=modes)
agent = SerenaAgent(
project=os.path.abspath(project),
serena_config=SerenaConfig(web_dashboard=False, log_level=lvl),
context=context_instance,
modes=modes_selection_def,
)
tool = agent.get_tool(InitialInstructionsTool)
instr = tool.apply()
if only_instructions:
print(instr)
else:
print(f"{prefix}\n{instr}\n{postfix}")
@staticmethod
@click.command(
"start-project-server",
help="Starts the Serena project server, which exposes project querying capabilities via HTTP.",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.option(
"--host",
type=str,
default="127.0.0.1",
show_default=True,
help="Listen address for the project server.",
)
@click.option(
"--port",
type=int,
default=None,
help="Listen port for the project server (default: ProjectServer.PORT).",
)
@click.option(
"--log-level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default=None,
help="Override log level in config.",
)
def start_project_server(
host: str,
port: int | None,
log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] | None,
) -> None:
from serena.project_server import ProjectServer
# initialize logging
Logger.root.setLevel(logging.INFO)
formatter = logging.Formatter(SERENA_LOG_FORMAT)
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.formatter = formatter
Logger.root.addHandler(stderr_handler)
log_path = SerenaPaths().get_next_log_file_path("project-server")
file_handler = logging.FileHandler(log_path, mode="w")
file_handler.formatter = formatter
Logger.root.addHandler(file_handler)
if log_level is not None:
Logger.root.setLevel(logging.getLevelNamesMapping()[log_level])
log.info("Starting Serena project server")
log.info("Storing logs in %s", log_path)
server = ProjectServer()
run_kwargs: dict[str, Any] = {"host": host}
if port is not None:
run_kwargs["port"] = port
server.run(**run_kwargs)
class ModeCommands(AutoRegisteringGroup):
"""Group for 'mode' subcommands."""
def __init__(self) -> None:
super().__init__(name="mode", help="Manage Serena modes. You can run `mode <command> --help` for more info on each command.")
@staticmethod
@click.command("list", help="List available modes.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
def list() -> None:
mode_names = SerenaAgentMode.list_registered_mode_names()
max_len_name = max(len(name) for name in mode_names) if mode_names else 20
for name in mode_names:
mode_yml_path = SerenaAgentMode.get_path(name)
is_internal = Path(mode_yml_path).is_relative_to(SERENAS_OWN_MODE_YAMLS_DIR)
descriptor = "(internal)" if is_internal else f"(at {mode_yml_path})"
name_descr_string = f"{name:<{max_len_name + 4}}{descriptor}"
click.echo(name_descr_string)
@staticmethod
@click.command("create", help="Create a new mode or copy an internal one.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.option(
"--name",
"-n",
type=str,
default=None,
help="Name for the new mode. If --from-internal is passed may be left empty to create a mode of the same name, which will then override the internal mode.",
)
@click.option("--from-internal", "from_internal", type=str, default=None, help="Copy from an internal mode.")
def create(name: str, from_internal: str) -> None:
if not (name or from_internal):
raise click.UsageError("Provide at least one of --name or --from-internal.")
mode_name = name or from_internal
dest = os.path.join(SerenaPaths().user_modes_dir, f"{mode_name}.yml")
src = (
os.path.join(SERENAS_OWN_MODE_YAMLS_DIR, f"{from_internal}.yml")
if from_internal
else os.path.join(SERENAS_OWN_MODE_YAMLS_DIR, "mode.template.yml")
)
if not os.path.exists(src):
raise FileNotFoundError(
f"Internal mode '{from_internal}' not found in {SERENAS_OWN_MODE_YAMLS_DIR}. Available modes: {SerenaAgentMode.list_registered_mode_names()}"
)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copyfile(src, dest)
click.echo(f"Created mode '{mode_name}' at {dest}")
_open_in_editor(dest)
@staticmethod
@click.command("edit", help="Edit a custom mode YAML file.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.argument("mode_name")
def edit(mode_name: str) -> None:
path = os.path.join(SerenaPaths().user_modes_dir, f"{mode_name}.yml")
if not os.path.exists(path):
if mode_name in SerenaAgentMode.list_registered_mode_names(include_user_modes=False):
click.echo(
f"Mode '{mode_name}' is an internal mode and cannot be edited directly. "
f"Use 'mode create --from-internal {mode_name}' to create a custom mode that overrides it before editing."
)
else:
click.echo(f"Custom mode '{mode_name}' not found. Create it with: mode create --name {mode_name}.")
return
_open_in_editor(path)
@staticmethod
@click.command("delete", help="Delete a custom mode file.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.argument("mode_name")
def delete(mode_name: str) -> None:
path = os.path.join(SerenaPaths().user_modes_dir, f"{mode_name}.yml")
if not os.path.exists(path):
click.echo(f"Custom mode '{mode_name}' not found.")
return
os.remove(path)
click.echo(f"Deleted custom mode '{mode_name}'.")
class ContextCommands(AutoRegisteringGroup):
"""Group for 'context' subcommands."""
def __init__(self) -> None:
super().__init__(
name="context", help="Manage Serena contexts. You can run `context <command> --help` for more info on each command."
)
@staticmethod
@click.command("list", help="List available contexts.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
def list() -> None:
context_names = SerenaAgentContext.list_registered_context_names()
max_len_name = max(len(name) for name in context_names) if context_names else 20
for name in context_names:
context_yml_path = SerenaAgentContext.get_path(name)
is_internal = Path(context_yml_path).is_relative_to(SERENAS_OWN_CONTEXT_YAMLS_DIR)
descriptor = "(internal)" if is_internal else f"(at {context_yml_path})"
name_descr_string = f"{name:<{max_len_name + 4}}{descriptor}"
click.echo(name_descr_string)
@staticmethod
@click.command(
"create", help="Create a new context or copy an internal one.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH}
)
@click.option(
"--name",
"-n",
type=str,
default=None,
help="Name for the new context. If --from-internal is passed may be left empty to create a context of the same name, which will then override the internal context",
)
@click.option("--from-internal", "from_internal", type=str, default=None, help="Copy from an internal context.")
def create(name: str, from_internal: str) -> None:
if not (name or from_internal):
raise click.UsageError("Provide at least one of --name or --from-internal.")
ctx_name = name or from_internal
dest = os.path.join(SerenaPaths().user_contexts_dir, f"{ctx_name}.yml")
src = (
os.path.join(SERENAS_OWN_CONTEXT_YAMLS_DIR, f"{from_internal}.yml")
if from_internal
else os.path.join(SERENAS_OWN_CONTEXT_YAMLS_DIR, "context.template.yml")
)
if not os.path.exists(src):
raise FileNotFoundError(
f"Internal context '{from_internal}' not found in {SERENAS_OWN_CONTEXT_YAMLS_DIR}. Available contexts: {SerenaAgentContext.list_registered_context_names()}"
)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copyfile(src, dest)
click.echo(f"Created context '{ctx_name}' at {dest}")
_open_in_editor(dest)
@staticmethod
@click.command("edit", help="Edit a custom context YAML file.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.argument("context_name")
def edit(context_name: str) -> None:
path = os.path.join(SerenaPaths().user_contexts_dir, f"{context_name}.yml")
if not os.path.exists(path):
if context_name in SerenaAgentContext.list_registered_context_names(include_user_contexts=False):
click.echo(
f"Context '{context_name}' is an internal context and cannot be edited directly. "
f"Use 'context create --from-internal {context_name}' to create a custom context that overrides it before editing."
)
else:
click.echo(f"Custom context '{context_name}' not found. Create it with: context create --name {context_name}.")
return
_open_in_editor(path)
@staticmethod
@click.command("delete", help="Delete a custom context file.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.argument("context_name")
def delete(context_name: str) -> None:
path = os.path.join(SerenaPaths().user_contexts_dir, f"{context_name}.yml")
if not os.path.exists(path):
click.echo(f"Custom context '{context_name}' not found.")
return
os.remove(path)
click.echo(f"Deleted custom context '{context_name}'.")
class SerenaConfigCommands(AutoRegisteringGroup):
"""Group for 'config' subcommands."""
def __init__(self) -> None:
super().__init__(name="config", help="Manage Serena configuration.")
@staticmethod
@click.command(
"edit",
help="Edit serena_config.yml in your default editor. Will create a config file from the template if no config is found.",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
def edit() -> None:
serena_config = SerenaConfig.from_config_file()
assert serena_config.config_file_path is not None
_open_in_editor(serena_config.config_file_path)
class ProjectCommands(AutoRegisteringGroup):
"""Group for 'project' subcommands."""
def __init__(self) -> None:
super().__init__(
name="project", help="Manage Serena projects. You can run `project <command> --help` for more info on each command."
)
@staticmethod
def _create_project(project_path: str, name: str | None, language: tuple[str, ...]) -> RegisteredProject:
"""
Helper method to create a project configuration file.
:param project_path: Path to the project directory
:param name: Optional project name (defaults to directory name if not specified)
:param language: Tuple of language names
:raises FileExistsError: If project.yml already exists
:raises ValueError: If an unsupported language is specified
:return: the RegisteredProject instance
"""
project_root = Path(project_path).resolve()
serena_config = SerenaConfig.from_config_file()
yml_path = serena_config.get_project_yml_location(str(project_root))
if os.path.exists(yml_path):
raise FileExistsError(f"Project file {yml_path} already exists.")
languages: list[Language] = []
if language:
for lang in language:
try:
languages.append(Language(lang.lower()))
except ValueError:
all_langs = [l.value for l in Language]
raise ValueError(f"Unknown language '{lang}'. Supported: {all_langs}")
generated_conf = ProjectConfig.autogenerate(
project_root=project_path,
serena_config=serena_config,
project_name=name,
languages=languages if languages else None,
interactive=True,
)
languages_str = ", ".join([lang.value for lang in generated_conf.languages]) if generated_conf.languages else "N/A"
click.echo(f"Generated project with languages {{{languages_str}}} at {yml_path}.")
registered_project = serena_config.get_registered_project(str(project_root))
if registered_project is None:
registered_project = RegisteredProject(str(project_root), generated_conf)
serena_config.add_registered_project(registered_project)
return registered_project
@staticmethod
@click.command("create", help="Create a new Serena project configuration.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.argument("project_path", type=click.Path(exists=True, file_okay=False), default=os.getcwd())
@click.option("--name", type=str, default=None, help="Project name; defaults to directory name if not specified.")
@click.option(
"--language", type=str, multiple=True, help="Programming language(s); inferred if not specified. Can be passed multiple times."
)
@click.option("--index", is_flag=True, help="Index the project after creation.")
@click.option(
"--log-level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default="WARNING",
help="Log level for indexing (only used if --index is set).",
)
@click.option("--timeout", type=float, default=10, help="Timeout for indexing a single file (only used if --index is set).")
def create(project_path: str, name: str | None, language: tuple[str, ...], index: bool, log_level: str, timeout: float) -> None:
try:
registered_project = ProjectCommands._create_project(project_path, name, language)
if index:
click.echo("Indexing project...")
ProjectCommands._index_project(registered_project, log_level, timeout=timeout)
except FileExistsError as e:
raise click.ClickException(f"Project already exists: {e}\nUse 'serena project index' to index an existing project.")
except ValueError as e:
raise click.ClickException(str(e))
@staticmethod
@click.command(
"index",
help="Index a project by saving symbols to the LSP cache. Auto-creates project.yml if it doesn't exist.",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.argument("project", type=PROJECT_TYPE, default=os.getcwd(), required=False)
@click.option("--name", type=str, default=None, help="Project name (only used if auto-creating project.yml).")
@click.option(
"--language",
type=str,
multiple=True,
help="Programming language(s) (only used if auto-creating project.yml). Inferred if not specified.",
)
@click.option(
"--log-level",
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default="WARNING",
help="Log level for indexing.",
)
@click.option("--timeout", type=float, default=10, help="Timeout for indexing a single file.")
def index(project: str, name: str | None, language: tuple[str, ...], log_level: str, timeout: float) -> None:
serena_config = SerenaConfig.from_config_file()
registered_project = serena_config.get_registered_project(project, autoregister=True)
if registered_project is None:
# Project not found; auto-create it
click.echo(f"No existing project found for '{project}'. Attempting auto-creation ...")
try:
registered_project = ProjectCommands._create_project(project, name, language)
except Exception as e:
raise click.ClickException(str(e))
ProjectCommands._index_project(registered_project, log_level, timeout=timeout)
@staticmethod
def _index_project(registered_project: RegisteredProject, log_level: str, timeout: float) -> None:
lvl = logging.getLevelNamesMapping()[log_level.upper()]
logging.configure(level=lvl)
serena_config = SerenaConfig.from_config_file()
proj = registered_project.get_project_instance(serena_config=serena_config)
click.echo(f"Indexing symbols in {proj} …")
ls_mgr = proj.create_language_server_manager()
try:
log_file = os.path.join(proj.project_root, ".serena", "logs", "indexing.txt")
files = proj.gather_source_files()
collected_exceptions: list[Exception] = []
files_failed = []
language_file_counts: dict[Language, int] = collections.defaultdict(lambda: 0)
for i, f in enumerate(tqdm(files, desc="Indexing")):
try:
ls = ls_mgr.get_language_server(f)
ls.request_document_symbols(f)
language_file_counts[ls.language] += 1
except Exception as e:
log.error(f"Failed to index {f}, continuing.")
collected_exceptions.append(e)
files_failed.append(f)
if (i + 1) % 10 == 0:
ls_mgr.save_all_caches()
reported_language_file_counts = {k.value: v for k, v in language_file_counts.items()}
click.echo(f"Indexed files per language: {dict_string(reported_language_file_counts, brackets=None)}")
ls_mgr.save_all_caches()
if len(files_failed) > 0:
os.makedirs(os.path.dirname(log_file), exist_ok=True)
with open(log_file, "w") as f:
for file, exception in zip(files_failed, collected_exceptions, strict=True):
f.write(f"{file}\n")
f.write(f"{exception}\n")
click.echo(f"Failed to index {len(files_failed)} files, see:\n{log_file}")
finally:
ls_mgr.stop_all()
@staticmethod
@click.command(
"is_ignored_path",
help="Check if a path is ignored by the project configuration.",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.argument("path", type=click.Path(exists=False, file_okay=True, dir_okay=True))
@click.argument("project", type=click.Path(exists=True, file_okay=False, dir_okay=True), default=os.getcwd())
def is_ignored_path(path: str, project: str) -> None:
"""
Check if a given path is ignored by the project configuration.
:param path: The path to check.
:param project: The path to the project directory, defaults to the current working directory.
"""
serena_config = SerenaConfig.from_config_file()
proj = Project.load(os.path.abspath(project), serena_config=serena_config)
if os.path.isabs(path):
path = os.path.relpath(path, start=proj.project_root)
is_ignored = proj.is_ignored_path(path)
click.echo(f"Path '{path}' IS {'ignored' if is_ignored else 'IS NOT ignored'} by the project configuration.")
@staticmethod
@click.command(
"index-file",
help="Index a single file by saving its symbols to the LSP cache.",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.argument("file", type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument("project", type=click.Path(exists=True, file_okay=False, dir_okay=True), default=os.getcwd())
@click.option("--verbose", "-v", is_flag=True, help="Print detailed information about the indexed symbols.")
def index_file(file: str, project: str, verbose: bool) -> None:
"""
Index a single file by saving its symbols to the LSP cache, useful for debugging.
:param file: path to the file to index, must be inside the project directory.
:param project: path to the project directory, defaults to the current working directory.
:param verbose: if set, prints detailed information about the indexed symbols.
"""
serena_config = SerenaConfig.from_config_file()
proj = Project.load(os.path.abspath(project), serena_config=serena_config)
if os.path.isabs(file):
file = os.path.relpath(file, start=proj.project_root)
if proj.is_ignored_path(file, ignore_non_source_files=True):
click.echo(f"'{file}' is ignored or declared as non-code file by the project configuration, won't index.")
exit(1)
ls_mgr = proj.create_language_server_manager()
try:
for ls in ls_mgr.iter_language_servers():
click.echo(f"Indexing for language {ls.language.value} …")
document_symbols = ls.request_document_symbols(file)
symbols, _ = document_symbols.get_all_symbols_and_roots()
if verbose:
click.echo(f"Symbols in file '{file}':")
for symbol in symbols:
click.echo(f" - {symbol['name']} at line {symbol['selectionRange']['start']['line']} of kind {symbol['kind']}")
ls.save_cache()
click.echo(f"Successfully indexed file '{file}', {len(symbols)} symbols saved to cache in {ls.cache_dir}.")
finally:
ls_mgr.stop_all()
@staticmethod
@click.command(
"health-check",
help="Perform a comprehensive health check of the project's tools and language server.",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.argument("project", type=click.Path(exists=True, file_okay=False, dir_okay=True), default=os.getcwd())
def health_check(project: str) -> None:
"""
Perform a comprehensive health check of the project's tools and language server.
:param project: path to the project directory, defaults to the current working directory.
"""
# NOTE: completely written by Claude Code, only functionality was reviewed, not implementation
logging.configure(level=logging.INFO)
project_path = os.path.abspath(project)
serena_config = SerenaConfig.from_config_file()
serena_config.language_backend = LanguageBackend.LSP
serena_config.gui_log_window = False
serena_config.web_dashboard = False
proj = Project.load(project_path, serena_config=serena_config)
# Create log file with timestamp
timestamp = datetime_tag()
log_dir = os.path.join(project_path, ".serena", "logs", "health-checks")
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, f"health_check_{timestamp}.log")
with FileLoggerContext(log_file, append=False, enabled=True):
log.info("Starting health check for project: %s", project_path)
try:
# Create SerenaAgent with dashboard disabled
log.info("Creating SerenaAgent with disabled dashboard...")
agent = SerenaAgent(project=project_path, serena_config=serena_config)
log.info("SerenaAgent created successfully")
# Find first non-empty file that can be analyzed
log.info("Searching for analyzable files...")
files = proj.gather_source_files()
target_file = None
for file_path in files:
try:
full_path = os.path.join(project_path, file_path)
if os.path.getsize(full_path) > 0:
target_file = file_path
log.info("Found analyzable file: %s", target_file)
break
except (OSError, FileNotFoundError):
continue
if not target_file:
log.error("No analyzable files found in project")
click.echo("❌ Health check failed: No analyzable files found")
click.echo(f"Log saved to: {log_file}")
return
# Get tools from agent
overview_tool = agent.get_tool(GetSymbolsOverviewTool)
find_symbol_tool = agent.get_tool(FindSymbolTool)
find_refs_tool = agent.get_tool(FindReferencingSymbolsTool)
search_pattern_tool = agent.get_tool(SearchForPatternTool)
# Test 1: Get symbols overview
log.info("Testing GetSymbolsOverviewTool on file: %s", target_file)
overview_data = agent.execute_task(lambda: overview_tool.get_symbol_overview(target_file))
log.info(f"GetSymbolsOverviewTool returned: {overview_data}")
if not overview_data:
log.error("No symbols found in file %s", target_file)
click.echo("❌ Health check failed: No symbols found in target file")
click.echo(f"Log saved to: {log_file}")
return
# Extract suitable symbol (prefer class or function over variables)
preferred_kinds = {SymbolKind.Class.name, SymbolKind.Function.name, SymbolKind.Method.name, SymbolKind.Constructor.name}
selected_symbol = None
for symbol in overview_data:
if symbol.get("kind") in preferred_kinds:
selected_symbol = symbol
break
# If no preferred symbol found, use first available
if not selected_symbol:
selected_symbol = overview_data[0]
log.info("No class or function found, using first available symbol")
symbol_name = selected_symbol["name"]
symbol_kind = selected_symbol["kind"]
log.info("Using symbol for testing: %s (kind: %s)", symbol_name, symbol_kind)
# Test 2: FindSymbolTool
log.info("Testing FindSymbolTool for symbol: %s", symbol_name)
find_symbol_result = agent.execute_task(
lambda: find_symbol_tool.apply(symbol_name, relative_path=target_file, include_body=True)
)
find_symbol_data = json.loads(find_symbol_result)
log.info("FindSymbolTool found %d matches for symbol %s", len(find_symbol_data), symbol_name)
# Test 3: FindReferencingSymbolsTool
log.info("Testing FindReferencingSymbolsTool for symbol: %s", symbol_name)
try:
find_refs_result = agent.execute_task(lambda: find_refs_tool.apply(symbol_name, relative_path=target_file))
find_refs_data = json.loads(find_refs_result)
log.info("FindReferencingSymbolsTool found %d references for symbol %s", len(find_refs_data), symbol_name)
except Exception as e:
log.warning("FindReferencingSymbolsTool failed for symbol %s: %s", symbol_name, str(e))
find_refs_data = []
# Test 4: SearchForPatternTool to verify references
log.info("Testing SearchForPatternTool for pattern: %s", symbol_name)
try:
search_result = agent.execute_task(
lambda: search_pattern_tool.apply(substring_pattern=symbol_name, restrict_search_to_code_files=True)
)
search_data = json.loads(search_result)
pattern_matches = sum(len(matches) for matches in search_data.values())
log.info("SearchForPatternTool found %d pattern matches for %s", pattern_matches, symbol_name)
except Exception as e:
log.warning("SearchForPatternTool failed for pattern %s: %s", symbol_name, str(e))
pattern_matches = 0
# Verify tools worked as expected
tools_working = True
if not find_symbol_data:
log.error("FindSymbolTool returned no results")
tools_working = False
if len(find_refs_data) == 0 and pattern_matches == 0:
log.warning("Both FindReferencingSymbolsTool and SearchForPatternTool found no matches - this might indicate an issue")
log.info("Health check completed successfully")
if tools_working:
click.echo("✅ Health check passed - All tools working correctly")
else:
click.echo("⚠️ Health check completed with warnings - Check log for details")
except Exception as e:
log.exception("Health check failed with exception: %s", str(e))
click.echo(f"❌ Health check failed: {e!s}")
finally:
click.echo(f"Log saved to: {log_file}")
class ToolCommands(AutoRegisteringGroup):
"""Group for 'tool' subcommands."""
def __init__(self) -> None:
super().__init__(
name="tools",
help="Commands related to Serena's tools. You can run `serena tools <command> --help` for more info on each command.",
)
@staticmethod
@click.command(
"list",
help="Prints an overview of the tools that are active by default (not just the active ones for your project). For viewing all tools, pass `--all / -a`",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.option("--quiet", "-q", is_flag=True)
@click.option("--all", "-a", "include_optional", is_flag=True, help="List all tools, including those not enabled by default.")
@click.option("--only-optional", is_flag=True, help="List only optional tools (those not enabled by default).")
def list(quiet: bool = False, include_optional: bool = False, only_optional: bool = False) -> None:
tool_registry = ToolRegistry()
if quiet:
if only_optional:
tool_names = tool_registry.get_tool_names_optional()
elif include_optional:
tool_names = tool_registry.get_tool_names()
else:
tool_names = tool_registry.get_tool_names_default_enabled()
for tool_name in tool_names:
click.echo(tool_name)
else:
ToolRegistry().print_tool_overview(include_optional=include_optional, only_optional=only_optional)
@staticmethod
@click.command(
"description",
help="Print the description of a tool, optionally with a specific context (the latter may modify the default description).",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.argument("tool_name", type=str)
@click.option("--context", type=str, default=None, help="Context name or path to context file.")
def description(tool_name: str, context: str | None = None) -> None:
# Load the context
serena_context = None
if context:
serena_context = SerenaAgentContext.load(context)
agent = SerenaAgent(
project=None,
serena_config=SerenaConfig(web_dashboard=False, log_level=logging.INFO),
context=serena_context,
)
tool = agent.get_tool_by_name(tool_name)
mcp_tool = SerenaMCPFactory.make_mcp_tool(tool)
click.echo(mcp_tool.description)
class PromptCommands(AutoRegisteringGroup):
def __init__(self) -> None:
super().__init__(name="prompts", help="Commands related to Serena's prompts that are outside of contexts and modes.")
@staticmethod
def _get_user_prompt_yaml_path(prompt_yaml_name: str) -> str:
templates_dir = SerenaPaths().user_prompt_templates_dir
os.makedirs(templates_dir, exist_ok=True)
return os.path.join(templates_dir, prompt_yaml_name)
@staticmethod
@click.command(
"list", help="Lists yamls that are used for defining prompts.", context_settings={"max_content_width": _MAX_CONTENT_WIDTH}
)
def list() -> None:
serena_prompt_yaml_names = [os.path.basename(f) for f in glob.glob(PROMPT_TEMPLATES_DIR_INTERNAL + "/*.yml")]
for prompt_yaml_name in serena_prompt_yaml_names:
user_prompt_yaml_path = PromptCommands._get_user_prompt_yaml_path(prompt_yaml_name)
if os.path.exists(user_prompt_yaml_path):
click.echo(f"{user_prompt_yaml_path} merged with default prompts in {prompt_yaml_name}")
else:
click.echo(prompt_yaml_name)
@staticmethod
@click.command(
"create-override",
help="Create an override of an internal prompts yaml for customizing Serena's prompts",
context_settings={"max_content_width": _MAX_CONTENT_WIDTH},
)
@click.argument("prompt_yaml_name")
def create_override(prompt_yaml_name: str) -> None:
"""
:param prompt_yaml_name: The yaml name of the prompt you want to override. Call the `list` command for discovering valid prompt yaml names.
:return:
"""
# for convenience, we can pass names without .yml
if not prompt_yaml_name.endswith(".yml"):
prompt_yaml_name = prompt_yaml_name + ".yml"
user_prompt_yaml_path = PromptCommands._get_user_prompt_yaml_path(prompt_yaml_name)
if os.path.exists(user_prompt_yaml_path):
raise FileExistsError(f"{user_prompt_yaml_path} already exists.")
serena_prompt_yaml_path = os.path.join(PROMPT_TEMPLATES_DIR_INTERNAL, prompt_yaml_name)
shutil.copyfile(serena_prompt_yaml_path, user_prompt_yaml_path)
_open_in_editor(user_prompt_yaml_path)
@staticmethod
@click.command(
"edit-override", help="Edit an existing prompt override file", context_settings={"max_content_width": _MAX_CONTENT_WIDTH}
)
@click.argument("prompt_yaml_name")
def edit_override(prompt_yaml_name: str) -> None:
"""
:param prompt_yaml_name: The yaml name of the prompt override to edit.
:return:
"""
# for convenience, we can pass names without .yml
if not prompt_yaml_name.endswith(".yml"):
prompt_yaml_name = prompt_yaml_name + ".yml"
user_prompt_yaml_path = PromptCommands._get_user_prompt_yaml_path(prompt_yaml_name)
if not os.path.exists(user_prompt_yaml_path):
click.echo(f"Override file '{prompt_yaml_name}' not found. Create it with: prompts create-override {prompt_yaml_name}")
return
_open_in_editor(user_prompt_yaml_path)
@staticmethod
@click.command("list-overrides", help="List existing prompt override files", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
def list_overrides() -> None:
user_templates_dir = SerenaPaths().user_prompt_templates_dir
os.makedirs(user_templates_dir, exist_ok=True)
serena_prompt_yaml_names = [os.path.basename(f) for f in glob.glob(PROMPT_TEMPLATES_DIR_INTERNAL + "/*.yml")]
override_files = glob.glob(os.path.join(user_templates_dir, "*.yml"))
for file_path in override_files:
if os.path.basename(file_path) in serena_prompt_yaml_names:
click.echo(file_path)
@staticmethod
@click.command("delete-override", help="Delete a prompt override file", context_settings={"max_content_width": _MAX_CONTENT_WIDTH})
@click.argument("prompt_yaml_name")
def delete_override(prompt_yaml_name: str) -> None:
"""
:param prompt_yaml_name: The yaml name of the prompt override to delete."
:return:
"""
# for convenience, we can pass names without .yml
if not prompt_yaml_name.endswith(".yml"):
prompt_yaml_name = prompt_yaml_name + ".yml"
user_prompt_yaml_path = PromptCommands._get_user_prompt_yaml_path(prompt_yaml_name)
if not os.path.exists(user_prompt_yaml_path):
click.echo(f"Override file '{prompt_yaml_name}' not found.")
return
os.remove(user_prompt_yaml_path)
click.echo(f"Deleted override file '{prompt_yaml_name}'.")
# Expose groups so we can reference them in pyproject.toml
mode = ModeCommands()
context = ContextCommands()
project = ProjectCommands()
config = SerenaConfigCommands()
tools = ToolCommands()
prompts = PromptCommands()
# Expose toplevel commands for the same reason
top_level = TopLevelCommands()
start_mcp_server = top_level.start_mcp_server
# needed for the help script to work - register all subcommands to the top-level group
for subgroup in (mode, context, project, config, tools, prompts):
top_level.add_command(subgroup)
def get_help() -> str:
"""Retrieve the help text for the top-level Serena CLI."""
return top_level.get_help(click.Context(top_level, info_name="serena"))
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/cli.py",
"license": "MIT License",
"lines": 990,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/analytics.py | from __future__ import annotations
import logging
import threading
from abc import ABC, abstractmethod
from collections import defaultdict
from copy import copy
from dataclasses import asdict, dataclass
from enum import Enum
from anthropic.types import MessageParam, MessageTokensCount
from dotenv import load_dotenv
log = logging.getLogger(__name__)
class TokenCountEstimator(ABC):
@abstractmethod
def estimate_token_count(self, text: str) -> int:
"""
Estimate the number of tokens in the given text.
This is an abstract method that should be implemented by subclasses.
"""
class TiktokenCountEstimator(TokenCountEstimator):
"""
Approximate token count using tiktoken.
"""
def __init__(self, model_name: str = "gpt-4o"):
"""
The tokenizer will be downloaded on the first initialization, which may take some time.
:param model_name: see `tiktoken.model` to see available models.
"""
import tiktoken
log.info(f"Loading tiktoken encoding for model {model_name}, this may take a while on the first run.")
self._encoding = tiktoken.encoding_for_model(model_name)
def estimate_token_count(self, text: str) -> int:
return len(self._encoding.encode(text))
class AnthropicTokenCount(TokenCountEstimator):
"""
The exact count using the Anthropic API.
Counting is free, but has a rate limit and will require an API key,
(typically, set through an env variable).
See https://docs.anthropic.com/en/docs/build-with-claude/token-counting
"""
def __init__(self, model_name: str = "claude-sonnet-4-20250514", api_key: str | None = None):
import anthropic
self._model_name = model_name
if api_key is None:
load_dotenv()
self._anthropic_client = anthropic.Anthropic(api_key=api_key)
def _send_count_tokens_request(self, text: str) -> MessageTokensCount:
return self._anthropic_client.messages.count_tokens(
model=self._model_name,
messages=[MessageParam(role="user", content=text)],
)
def estimate_token_count(self, text: str) -> int:
return self._send_count_tokens_request(text).input_tokens
class CharCountEstimator(TokenCountEstimator):
"""
A naive character count estimator that estimates tokens based on character count.
"""
def __init__(self, avg_chars_per_token: int = 4):
self._avg_chars_per_token = avg_chars_per_token
def estimate_token_count(self, text: str) -> int:
# Assuming an average of 4 characters per token
return len(text) // self._avg_chars_per_token
_registered_token_estimator_instances_cache: dict[RegisteredTokenCountEstimator, TokenCountEstimator] = {}
class RegisteredTokenCountEstimator(Enum):
TIKTOKEN_GPT4O = "TIKTOKEN_GPT4O"
ANTHROPIC_CLAUDE_SONNET_4 = "ANTHROPIC_CLAUDE_SONNET_4"
CHAR_COUNT = "CHAR_COUNT"
@classmethod
def get_valid_names(cls) -> list[str]:
"""
Get a list of all registered token count estimator names.
"""
return [estimator.name for estimator in cls]
def _create_estimator(self) -> TokenCountEstimator:
match self:
case RegisteredTokenCountEstimator.TIKTOKEN_GPT4O:
return TiktokenCountEstimator(model_name="gpt-4o")
case RegisteredTokenCountEstimator.ANTHROPIC_CLAUDE_SONNET_4:
return AnthropicTokenCount(model_name="claude-sonnet-4-20250514")
case RegisteredTokenCountEstimator.CHAR_COUNT:
return CharCountEstimator(avg_chars_per_token=4)
case _:
raise ValueError(f"Unknown token count estimator: {self}")
def load_estimator(self) -> TokenCountEstimator:
estimator_instance = _registered_token_estimator_instances_cache.get(self)
if estimator_instance is None:
estimator_instance = self._create_estimator()
_registered_token_estimator_instances_cache[self] = estimator_instance
return estimator_instance
class ToolUsageStats:
"""
A class to record and manage tool usage statistics.
"""
def __init__(self, token_count_estimator: RegisteredTokenCountEstimator = RegisteredTokenCountEstimator.TIKTOKEN_GPT4O):
self._token_count_estimator = token_count_estimator.load_estimator()
self._token_estimator_name = token_count_estimator.value
self._tool_stats: dict[str, ToolUsageStats.Entry] = defaultdict(ToolUsageStats.Entry)
self._tool_stats_lock = threading.Lock()
@property
def token_estimator_name(self) -> str:
"""
Get the name of the registered token count estimator used.
"""
return self._token_estimator_name
@dataclass(kw_only=True)
class Entry:
num_times_called: int = 0
input_tokens: int = 0
output_tokens: int = 0
def update_on_call(self, input_tokens: int, output_tokens: int) -> None:
"""
Update the entry with the number of tokens used for a single call.
"""
self.num_times_called += 1
self.input_tokens += input_tokens
self.output_tokens += output_tokens
def _estimate_token_count(self, text: str) -> int:
return self._token_count_estimator.estimate_token_count(text)
def get_stats(self, tool_name: str) -> ToolUsageStats.Entry:
"""
Get (a copy of) the current usage statistics for a specific tool.
"""
with self._tool_stats_lock:
return copy(self._tool_stats[tool_name])
def record_tool_usage(self, tool_name: str, input_str: str, output_str: str) -> None:
input_tokens = self._estimate_token_count(input_str)
output_tokens = self._estimate_token_count(output_str)
with self._tool_stats_lock:
entry = self._tool_stats[tool_name]
entry.update_on_call(input_tokens, output_tokens)
def get_tool_stats_dict(self) -> dict[str, dict[str, int]]:
with self._tool_stats_lock:
return {name: asdict(entry) for name, entry in self._tool_stats.items()}
def clear(self) -> None:
with self._tool_stats_lock:
self._tool_stats.clear()
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/analytics.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/vts_language_server.py | """
Language Server implementation for TypeScript/JavaScript using https://github.com/yioneko/vtsls,
which provides TypeScript language server functionality via VSCode's TypeScript extension
(contrary to typescript-language-server, which uses the TypeScript compiler directly).
"""
import logging
import os
import pathlib
import shutil
import threading
from typing import cast
from overrides import override
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_utils import PlatformId, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
from .common import RuntimeDependency, RuntimeDependencyCollection
log = logging.getLogger(__name__)
class VtsLanguageServer(SolidLanguageServer):
"""
Provides TypeScript specific instantiation of the LanguageServer class using vtsls.
Contains various configurations and settings specific to TypeScript via vtsls wrapper.
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a VtsLanguageServer instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
vts_lsp_executable_path = self._setup_runtime_dependencies(config, solidlsp_settings)
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=vts_lsp_executable_path, cwd=repository_root_path),
"typescript",
solidlsp_settings,
)
self.server_ready = threading.Event()
self.initialize_searcher_command_available = threading.Event()
@override
def is_ignored_dirname(self, dirname: str) -> bool:
return super().is_ignored_dirname(dirname) or dirname in [
"node_modules",
"dist",
"build",
"coverage",
]
@classmethod
def _setup_runtime_dependencies(cls, config: LanguageServerConfig, solidlsp_settings: SolidLSPSettings) -> str:
"""
Setup runtime dependencies for VTS Language Server and return the command to start the server.
"""
platform_id = PlatformUtils.get_platform_id()
valid_platforms = [
PlatformId.LINUX_x64,
PlatformId.LINUX_arm64,
PlatformId.OSX,
PlatformId.OSX_x64,
PlatformId.OSX_arm64,
PlatformId.WIN_x64,
PlatformId.WIN_arm64,
]
assert platform_id in valid_platforms, f"Platform {platform_id} is not supported for vtsls at the moment"
deps = RuntimeDependencyCollection(
[
RuntimeDependency(
id="vtsls",
description="vtsls language server package",
command="npm install --prefix ./ @vtsls/language-server@0.2.9",
platform_id="any",
),
]
)
vts_ls_dir = os.path.join(cls.ls_resources_dir(solidlsp_settings), "vts-lsp")
vts_executable_path = os.path.join(vts_ls_dir, "vtsls")
# Verify both node and npm are installed
is_node_installed = shutil.which("node") is not None
assert is_node_installed, "node is not installed or isn't in PATH. Please install NodeJS and try again."
is_npm_installed = shutil.which("npm") is not None
assert is_npm_installed, "npm is not installed or isn't in PATH. Please install npm and try again."
# Install vtsls if not already installed
if not os.path.exists(vts_ls_dir):
os.makedirs(vts_ls_dir, exist_ok=True)
deps.install(vts_ls_dir)
vts_executable_path = os.path.join(vts_ls_dir, "node_modules", ".bin", "vtsls")
assert os.path.exists(vts_executable_path), "vtsls executable not found. Please install @vtsls/language-server and try again."
return f"{vts_executable_path} --stdio"
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the VTS Language Server.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"locale": "en",
"capabilities": {
"textDocument": {
"synchronization": {"didSave": True, "dynamicRegistration": True},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {"dynamicRegistration": True},
"codeAction": {"dynamicRegistration": True},
},
"workspace": {
"workspaceFolders": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"symbol": {"dynamicRegistration": True},
"configuration": True, # This might be needed for vtsls
},
},
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Starts the VTS Language Server, waits for the server to be ready and yields the LanguageServer instance.
Usage:
```
async with lsp.start_server():
# LanguageServer has been initialized and ready to serve requests
await lsp.request_definition(...)
await lsp.request_references(...)
# Shutdown the LanguageServer on exit from scope
# LanguageServer has been shutdown
"""
def register_capability_handler(params: dict) -> None:
assert "registrations" in params
for registration in params["registrations"]:
if registration["method"] == "workspace/executeCommand":
self.initialize_searcher_command_available.set()
return
def execute_client_command_handler(params: dict) -> list:
return []
def workspace_configuration_handler(params: dict) -> list[dict] | dict:
# VTS may request workspace configuration
# Return empty configuration for each requested item
if "items" in params:
return [{}] * len(params["items"])
return {}
def do_nothing(params: dict) -> None:
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
def check_experimental_status(params: dict) -> None:
"""
Also listen for experimental/serverStatus as a backup signal
"""
if params.get("quiescent") is True:
self.server_ready.set()
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_request("workspace/configuration", workspace_configuration_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("experimental/serverStatus", check_experimental_status)
log.info("Starting VTS server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
# VTS-specific capability checks
# Be more flexible with capabilities since vtsls might have different structure
log.debug(f"VTS init response capabilities: {init_response['capabilities']}")
# Basic checks to ensure essential capabilities are present
assert "textDocumentSync" in init_response["capabilities"]
assert "completionProvider" in init_response["capabilities"]
# Log the actual values for debugging
log.debug(f"textDocumentSync: {init_response['capabilities']['textDocumentSync']}")
log.debug(f"completionProvider: {init_response['capabilities']['completionProvider']}")
self.server.notify.initialized({})
if self.server_ready.wait(timeout=1.0):
log.info("VTS server is ready")
else:
log.info("Timeout waiting for VTS server to become ready, proceeding anyway")
# Fallback: assume server is ready after timeout
self.server_ready.set()
@override
def _get_wait_time_for_cross_file_referencing(self) -> float:
return 1
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/vts_language_server.py",
"license": "MIT License",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/code_editor.py | import json
import logging
import os
from abc import ABC, abstractmethod
from collections.abc import Iterable, Iterator, Reversible
from contextlib import contextmanager
from typing import Generic, TypeVar, cast
from serena.config.serena_config import ProjectConfig
from serena.jetbrains.jetbrains_plugin_client import JetBrainsPluginClient
from serena.symbol import JetBrainsSymbol, LanguageServerSymbol, LanguageServerSymbolRetriever, PositionInFile, Symbol
from solidlsp import SolidLanguageServer, ls_types
from solidlsp.ls import LSPFileBuffer
from solidlsp.ls_utils import PathUtils, TextUtils
from .constants import DEFAULT_SOURCE_FILE_ENCODING
from .project import Project
log = logging.getLogger(__name__)
TSymbol = TypeVar("TSymbol", bound=Symbol)
class CodeEditor(Generic[TSymbol], ABC):
def __init__(self, project_root: str, project_config: ProjectConfig | None = None) -> None:
self.project_root = project_root
# set encoding based on project configuration, if available
self.encoding = project_config.encoding if project_config is not None else DEFAULT_SOURCE_FILE_ENCODING
class EditedFile(ABC):
def __init__(self, relative_path: str) -> None:
self.relative_path = relative_path
@abstractmethod
def get_contents(self) -> str:
"""
:return: the contents of the file.
"""
@abstractmethod
def set_contents(self, contents: str) -> None:
"""
Fully resets the contents of the file.
:param contents: the new contents
"""
@abstractmethod
def delete_text_between_positions(self, start_pos: PositionInFile, end_pos: PositionInFile) -> None:
pass
@abstractmethod
def insert_text_at_position(self, pos: PositionInFile, text: str) -> None:
pass
@contextmanager
def _open_file_context(self, relative_path: str) -> Iterator["CodeEditor.EditedFile"]:
"""
Context manager for opening a file
"""
raise NotImplementedError("This method must be overridden for each subclass")
@contextmanager
def edited_file_context(self, relative_path: str) -> Iterator["CodeEditor.EditedFile"]:
"""
Context manager for editing a file.
"""
with self._open_file_context(relative_path) as edited_file:
yield edited_file
# save the file
self._save_edited_file(edited_file)
def _save_edited_file(self, edited_file: "CodeEditor.EditedFile") -> None:
abs_path = os.path.join(self.project_root, edited_file.relative_path)
new_contents = edited_file.get_contents()
with open(abs_path, "w", encoding=self.encoding) as f:
f.write(new_contents)
@abstractmethod
def _find_unique_symbol(self, name_path: str, relative_file_path: str) -> TSymbol:
"""
Finds the unique symbol with the given name in the given file.
If no such symbol exists, raises a ValueError.
:param name_path: the name path
:param relative_file_path: the relative path of the file in which to search for the symbol.
:return: the unique symbol
"""
def replace_body(self, name_path: str, relative_file_path: str, body: str) -> None:
"""
Replaces the body of the symbol with the given name_path in the given file.
:param name_path: the name path of the symbol to replace.
:param relative_file_path: the relative path of the file in which the symbol is defined.
:param body: the new body
"""
symbol = self._find_unique_symbol(name_path, relative_file_path)
start_pos = symbol.get_body_start_position_or_raise()
end_pos = symbol.get_body_end_position_or_raise()
with self.edited_file_context(relative_file_path) as edited_file:
# make sure the replacement adds no additional newlines (before or after) - all newlines
# and whitespace before/after should remain the same, so we strip it entirely
body = body.strip()
edited_file.delete_text_between_positions(start_pos, end_pos)
edited_file.insert_text_at_position(start_pos, body)
@staticmethod
def _count_leading_newlines(text: Iterable) -> int:
cnt = 0
for c in text:
if c == "\n":
cnt += 1
elif c == "\r":
continue
else:
break
return cnt
@classmethod
def _count_trailing_newlines(cls, text: Reversible) -> int:
return cls._count_leading_newlines(reversed(text))
def insert_after_symbol(self, name_path: str, relative_file_path: str, body: str) -> None:
"""
Inserts content after the symbol with the given name in the given file.
"""
symbol = self._find_unique_symbol(name_path, relative_file_path)
# make sure body always ends with at least one newline
if not body.endswith("\n"):
body += "\n"
pos = symbol.get_body_end_position_or_raise()
# start at the beginning of the next line
col = 0
line = pos.line + 1
# make sure a suitable number of leading empty lines is used (at least 0/1 depending on the symbol type,
# otherwise as many as the caller wanted to insert)
original_leading_newlines = self._count_leading_newlines(body)
body = body.lstrip("\r\n")
min_empty_lines = 0
if symbol.is_neighbouring_definition_separated_by_empty_line():
min_empty_lines = 1
num_leading_empty_lines = max(min_empty_lines, original_leading_newlines)
if num_leading_empty_lines:
body = ("\n" * num_leading_empty_lines) + body
# make sure the one line break succeeding the original symbol, which we repurposed as prefix via
# `line += 1`, is replaced
body = body.rstrip("\r\n") + "\n"
with self.edited_file_context(relative_file_path) as edited_file:
edited_file.insert_text_at_position(PositionInFile(line, col), body)
def insert_before_symbol(self, name_path: str, relative_file_path: str, body: str) -> None:
"""
Inserts content before the symbol with the given name in the given file.
"""
symbol = self._find_unique_symbol(name_path, relative_file_path)
symbol_start_pos = symbol.get_body_start_position_or_raise()
# insert position is the start of line where the symbol is defined
line = symbol_start_pos.line
col = 0
original_trailing_empty_lines = self._count_trailing_newlines(body) - 1
# ensure eol is present at end
body = body.rstrip() + "\n"
# add suitable number of trailing empty lines after the body (at least 0/1 depending on the symbol type,
# otherwise as many as the caller wanted to insert)
min_trailing_empty_lines = 0
if symbol.is_neighbouring_definition_separated_by_empty_line():
min_trailing_empty_lines = 1
num_trailing_newlines = max(min_trailing_empty_lines, original_trailing_empty_lines)
body += "\n" * num_trailing_newlines
# apply edit
with self.edited_file_context(relative_file_path) as edited_file:
edited_file.insert_text_at_position(PositionInFile(line=line, col=col), body)
def insert_at_line(self, relative_path: str, line: int, content: str) -> None:
"""
Inserts content at the given line in the given file.
:param relative_path: the relative path of the file in which to insert content
:param line: the 0-based index of the line to insert content at
:param content: the content to insert
"""
with self.edited_file_context(relative_path) as edited_file:
edited_file.insert_text_at_position(PositionInFile(line, 0), content)
def delete_lines(self, relative_path: str, start_line: int, end_line: int) -> None:
"""
Deletes lines in the given file.
:param relative_path: the relative path of the file in which to delete lines
:param start_line: the 0-based index of the first line to delete (inclusive)
:param end_line: the 0-based index of the last line to delete (inclusive)
"""
start_col = 0
end_line_for_delete = end_line + 1
end_col = 0
with self.edited_file_context(relative_path) as edited_file:
start_pos = PositionInFile(line=start_line, col=start_col)
end_pos = PositionInFile(line=end_line_for_delete, col=end_col)
edited_file.delete_text_between_positions(start_pos, end_pos)
def delete_symbol(self, name_path: str, relative_file_path: str) -> None:
"""
Deletes the symbol with the given name in the given file.
"""
symbol = self._find_unique_symbol(name_path, relative_file_path)
start_pos = symbol.get_body_start_position_or_raise()
end_pos = symbol.get_body_end_position_or_raise()
with self.edited_file_context(relative_file_path) as edited_file:
edited_file.delete_text_between_positions(start_pos, end_pos)
@abstractmethod
def rename_symbol(self, name_path: str, relative_file_path: str, new_name: str) -> str:
"""
Renames the symbol with the given name throughout the codebase.
:param name_path: the name path of the symbol to rename
:param relative_file_path: the relative path of the file containing the symbol
:param new_name: the new name for the symbol
:return: a status message
"""
class LanguageServerCodeEditor(CodeEditor[LanguageServerSymbol]):
def __init__(self, symbol_retriever: LanguageServerSymbolRetriever, project_config: ProjectConfig | None = None):
super().__init__(project_root=symbol_retriever.get_root_path(), project_config=project_config)
self._symbol_retriever = symbol_retriever
def _get_language_server(self, relative_path: str) -> SolidLanguageServer:
return self._symbol_retriever.get_language_server(relative_path)
class EditedFile(CodeEditor.EditedFile):
def __init__(self, lang_server: SolidLanguageServer, relative_path: str, file_buffer: LSPFileBuffer):
super().__init__(relative_path)
self._lang_server = lang_server
self._file_buffer = file_buffer
def get_contents(self) -> str:
return self._file_buffer.contents
def set_contents(self, contents: str) -> None:
self._file_buffer.contents = contents
def delete_text_between_positions(self, start_pos: PositionInFile, end_pos: PositionInFile) -> None:
self._lang_server.delete_text_between_positions(self.relative_path, start_pos.to_lsp_position(), end_pos.to_lsp_position())
def insert_text_at_position(self, pos: PositionInFile, text: str) -> None:
self._lang_server.insert_text_at_position(self.relative_path, pos.line, pos.col, text)
def apply_text_edits(self, text_edits: list[ls_types.TextEdit]) -> None:
return self._lang_server.apply_text_edits_to_file(self.relative_path, text_edits)
@contextmanager
def _open_file_context(self, relative_path: str) -> Iterator["CodeEditor.EditedFile"]:
lang_server = self._get_language_server(relative_path)
with lang_server.open_file(relative_path) as file_buffer:
yield self.EditedFile(lang_server, relative_path, file_buffer)
def _get_code_file_content(self, relative_path: str) -> str:
"""Get the content of a file using the language server."""
lang_server = self._get_language_server(relative_path)
return lang_server.language_server.retrieve_full_file_content(relative_path)
def _find_unique_symbol(self, name_path: str, relative_file_path: str) -> LanguageServerSymbol:
return self._symbol_retriever.find_unique(name_path, within_relative_path=relative_file_path)
def _relative_path_from_uri(self, uri: str) -> str:
return os.path.relpath(PathUtils.uri_to_path(uri), self.project_root)
class EditOperation(ABC):
@abstractmethod
def apply(self) -> None:
pass
class EditOperationFileTextEdits(EditOperation):
def __init__(self, code_editor: "LanguageServerCodeEditor", file_uri: str, text_edits: list[ls_types.TextEdit]):
self._code_editor = code_editor
self._relative_path = code_editor._relative_path_from_uri(file_uri)
self._text_edits = text_edits
def apply(self) -> None:
with self._code_editor.edited_file_context(self._relative_path) as edited_file:
edited_file = cast(LanguageServerCodeEditor.EditedFile, edited_file)
edited_file.apply_text_edits(self._text_edits)
class EditOperationRenameFile(EditOperation):
def __init__(self, code_editor: "LanguageServerCodeEditor", old_uri: str, new_uri: str):
self._code_editor = code_editor
self._old_relative_path = code_editor._relative_path_from_uri(old_uri)
self._new_relative_path = code_editor._relative_path_from_uri(new_uri)
def apply(self) -> None:
old_abs_path = os.path.join(self._code_editor.project_root, self._old_relative_path)
new_abs_path = os.path.join(self._code_editor.project_root, self._new_relative_path)
os.rename(old_abs_path, new_abs_path)
def _workspace_edit_to_edit_operations(self, workspace_edit: ls_types.WorkspaceEdit) -> list["LanguageServerCodeEditor.EditOperation"]:
operations: list[LanguageServerCodeEditor.EditOperation] = []
if "changes" in workspace_edit:
for uri, edits in workspace_edit["changes"].items():
operations.append(self.EditOperationFileTextEdits(self, uri, edits))
if "documentChanges" in workspace_edit:
for change in workspace_edit["documentChanges"]:
if "textDocument" in change and "edits" in change:
operations.append(self.EditOperationFileTextEdits(self, change["textDocument"]["uri"], change["edits"]))
elif "kind" in change:
if change["kind"] == "rename":
operations.append(self.EditOperationRenameFile(self, change["oldUri"], change["newUri"]))
else:
raise ValueError(f"Unhandled document change kind: {change}; Please report to Serena developers.")
else:
raise ValueError(f"Unhandled document change format: {change}; Please report to Serena developers.")
return operations
def _apply_workspace_edit(self, workspace_edit: ls_types.WorkspaceEdit) -> int:
"""
Applies a WorkspaceEdit
:param workspace_edit: the edit to apply
:return: number of edit operations applied
"""
operations = self._workspace_edit_to_edit_operations(workspace_edit)
for operation in operations:
operation.apply()
return len(operations)
def rename_symbol(self, name_path: str, relative_file_path: str, new_name: str) -> str:
symbol = self._find_unique_symbol(name_path, relative_file_path)
if not symbol.location.has_position_in_file():
raise ValueError(f"Symbol '{name_path}' does not have a valid position in file for renaming")
# After has_position_in_file check, line and column are guaranteed to be non-None
assert symbol.location.line is not None
assert symbol.location.column is not None
lang_server = self._get_language_server(relative_file_path)
rename_result = lang_server.request_rename_symbol_edit(
relative_file_path=relative_file_path, line=symbol.location.line, column=symbol.location.column, new_name=new_name
)
if rename_result is None:
raise ValueError(
f"Language server for {lang_server.language_id} returned no rename edits for symbol '{name_path}'. "
f"The symbol might not support renaming."
)
num_changes = self._apply_workspace_edit(rename_result)
if num_changes == 0:
raise ValueError(
f"Renaming symbol '{name_path}' to '{new_name}' resulted in no changes being applied; renaming may not be supported."
)
msg = f"Successfully renamed '{name_path}' to '{new_name}' ({num_changes} changes applied)"
return msg
class JetBrainsCodeEditor(CodeEditor[JetBrainsSymbol]):
def __init__(self, project: Project) -> None:
self._project = project
super().__init__(project_root=project.project_root, project_config=project.project_config)
class EditedFile(CodeEditor.EditedFile):
def __init__(self, relative_path: str, project: Project):
super().__init__(relative_path)
path = os.path.join(project.project_root, relative_path)
log.info("Editing file: %s", path)
with open(path, encoding=project.project_config.encoding) as f:
self._content = f.read()
def get_contents(self) -> str:
return self._content
def set_contents(self, contents: str) -> None:
self._content = contents
def delete_text_between_positions(self, start_pos: PositionInFile, end_pos: PositionInFile) -> None:
self._content, _ = TextUtils.delete_text_between_positions(
self._content, start_pos.line, start_pos.col, end_pos.line, end_pos.col
)
def insert_text_at_position(self, pos: PositionInFile, text: str) -> None:
self._content, _, _ = TextUtils.insert_text_at_position(self._content, pos.line, pos.col, text)
@contextmanager
def _open_file_context(self, relative_path: str) -> Iterator["CodeEditor.EditedFile"]:
yield self.EditedFile(relative_path, self._project)
def _save_edited_file(self, edited_file: "CodeEditor.EditedFile") -> None:
super()._save_edited_file(edited_file)
with JetBrainsPluginClient.from_project(self._project) as client:
client.refresh_file(edited_file.relative_path)
def _find_unique_symbol(self, name_path: str, relative_file_path: str) -> JetBrainsSymbol:
with JetBrainsPluginClient.from_project(self._project) as client:
result = client.find_symbol(name_path, relative_path=relative_file_path, include_body=False, depth=0, include_location=True)
symbols = result["symbols"]
if not symbols:
raise ValueError(f"No symbol with name {name_path} found in file {relative_file_path}")
if len(symbols) > 1:
raise ValueError(
f"Found multiple {len(symbols)} symbols with name {name_path} in file {relative_file_path}: "
+ json.dumps(symbols, indent=2)
)
return JetBrainsSymbol(symbols[0], self._project)
def rename_symbol(self, name_path: str, relative_file_path: str, new_name: str) -> str:
with JetBrainsPluginClient.from_project(self._project) as client:
client.rename_symbol(
name_path=name_path,
relative_path=relative_file_path,
new_name=new_name,
rename_in_comments=False,
rename_in_text_occurrences=False,
)
return "Success"
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/code_editor.py",
"license": "MIT License",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/project.py | import json
import logging
import os
import re
import shutil
import threading
from collections.abc import Sequence
from pathlib import Path
from typing import Any, Literal
import pathspec
from sensai.util.logging import LogTime
from sensai.util.string import ToStringMixin
from serena.config.serena_config import (
ProjectConfig,
SerenaConfig,
SerenaPaths,
)
from serena.constants import SERENA_FILE_ENCODING
from serena.ls_manager import LanguageServerFactory, LanguageServerManager
from serena.util.file_system import GitignoreParser, match_path
from serena.util.text_utils import ContentReplacer, MatchedConsecutiveLines, search_files
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_utils import FileUtils
log = logging.getLogger(__name__)
class MemoriesManager:
GLOBAL_TOPIC = "global"
_global_memory_dir = SerenaPaths().global_memories_path
def __init__(self, serena_data_folder: str | Path | None, read_only_memory_patterns: Sequence[str] = ()):
"""
:param serena_data_folder: the absolute path to the project's .serena data folder
:param read_only_memory_patterns: whether to allow writing global memories in tool execution contexts
"""
self._project_memory_dir: Path | None = None
if serena_data_folder is not None:
self._project_memory_dir = Path(serena_data_folder) / "memories"
self._project_memory_dir.mkdir(parents=True, exist_ok=True)
self._encoding = SERENA_FILE_ENCODING
self._read_only_memory_patterns = [re.compile(pattern) for pattern in set(read_only_memory_patterns)]
def _is_read_only_memory(self, name: str) -> bool:
for pattern in self._read_only_memory_patterns:
if pattern.fullmatch(name):
return True
return False
def _is_global(self, name: str) -> bool:
return name == self.GLOBAL_TOPIC or name.startswith(self.GLOBAL_TOPIC + "/")
def get_memory_file_path(self, name: str) -> Path:
# Strip .md extension if present
name = name.replace(".md", "")
if self._is_global(name):
if name == self.GLOBAL_TOPIC:
raise ValueError(
f'Bare "{self.GLOBAL_TOPIC}" is not a valid memory name. '
f'Use "{self.GLOBAL_TOPIC}/<name>" to address a global memory.'
)
# Strip "global/" prefix and resolve against global dir
sub_name = name[len(self.GLOBAL_TOPIC) + 1 :]
parts = sub_name.split("/")
filename = f"{parts[-1]}.md"
if len(parts) > 1:
subdir = self._global_memory_dir / "/".join(parts[:-1])
subdir.mkdir(parents=True, exist_ok=True)
return subdir / filename
return self._global_memory_dir / filename
# Project-local memory
assert self._project_memory_dir is not None, "Project dir was not passed at initialization"
parts = name.split("/")
filename = f"{parts[-1]}.md"
if len(parts) > 1:
# Create subdirectory path
subdir = self._project_memory_dir / "/".join(parts[:-1])
subdir.mkdir(parents=True, exist_ok=True)
return subdir / filename
return self._project_memory_dir / filename
def _check_write_access(self, name: str, is_tool_context: bool) -> None:
# in tool context, memories can be read-only
if is_tool_context and self._is_read_only_memory(name):
raise PermissionError(f"Attempted to write to read_only memory: '{name}')")
def load_memory(self, name: str) -> str:
memory_file_path = self.get_memory_file_path(name)
if not memory_file_path.exists():
return f"Memory file {name} not found, consider creating it with the `write_memory` tool if you need it."
with open(memory_file_path, encoding=self._encoding) as f:
return f.read()
def save_memory(self, name: str, content: str, is_tool_context: bool) -> str:
self._check_write_access(name, is_tool_context)
memory_file_path = self.get_memory_file_path(name)
with open(memory_file_path, "w", encoding=self._encoding) as f:
f.write(content)
return f"Memory {name} written."
class MemoriesList:
def __init__(self) -> None:
self.memories: list[str] = []
self.read_only_memories: list[str] = []
def __len__(self) -> int:
return len(self.memories) + len(self.read_only_memories)
def add(self, memory_name: str, is_read_only: bool) -> None:
if is_read_only:
self.read_only_memories.append(memory_name)
else:
self.memories.append(memory_name)
def extend(self, other: "MemoriesManager.MemoriesList") -> None:
self.memories.extend(other.memories)
self.read_only_memories.extend(other.read_only_memories)
def to_dict(self) -> dict[str, list[str]]:
result = {}
if self.memories:
result["memories"] = sorted(self.memories)
if self.read_only_memories:
result["read_only_memories"] = sorted(self.read_only_memories)
return result
def get_full_list(self) -> list[str]:
return sorted(self.memories + self.read_only_memories)
def _list_memories(self, search_dir: Path, base_dir: Path, prefix: str = "") -> MemoriesList:
result = self.MemoriesList()
if not search_dir.exists():
return result
for md_file in search_dir.rglob("*.md"):
rel = str(md_file.relative_to(base_dir).with_suffix("")).replace(os.sep, "/")
memory_name = prefix + rel
result.add(memory_name, is_read_only=self._is_read_only_memory(memory_name))
return result
def list_global_memories(self, subtopic: str = "") -> MemoriesList:
dir_path = self._global_memory_dir
if subtopic:
dir_path = dir_path / subtopic.replace("/", os.sep)
return self._list_memories(dir_path, self._global_memory_dir, self.GLOBAL_TOPIC + "/")
def list_project_memories(self, topic: str = "") -> MemoriesList:
assert self._project_memory_dir is not None, "Project dir was not passed at initialization"
dir_path = self._project_memory_dir
if topic:
dir_path = dir_path / topic.replace("/", os.sep)
return self._list_memories(dir_path, self._project_memory_dir)
def list_memories(self, topic: str = "") -> MemoriesList:
"""
Lists all memories, optionally filtered by topic.
If the topic is omitted, both global and project-specific memories are returned.
"""
memories: MemoriesManager.MemoriesList
if topic:
if self._is_global(topic):
topic_parts = topic.split("/")
subtopic = "/".join(topic_parts[1:])
memories = self.list_global_memories(subtopic=subtopic)
else:
memories = self.list_project_memories(topic=topic)
else:
memories = self.list_project_memories()
memories.extend(self.list_global_memories())
return memories
def delete_memory(self, name: str, is_tool_context: bool) -> str:
self._check_write_access(name, is_tool_context)
memory_file_path = self.get_memory_file_path(name)
if not memory_file_path.exists():
return f"Memory {name} not found."
memory_file_path.unlink()
return f"Memory {name} deleted."
def move_memory(self, old_name: str, new_name: str, is_tool_context: bool) -> str:
"""
Rename or move a memory file.
Moving between global and project scope (e.g. "global/foo" -> "bar") is supported.
"""
self._check_write_access(new_name, is_tool_context)
old_path = self.get_memory_file_path(old_name)
new_path = self.get_memory_file_path(new_name)
if not old_path.exists():
raise FileNotFoundError(f"Memory {old_name} not found.")
if new_path.exists():
raise FileExistsError(f"Memory {new_name} already exists.")
new_path.parent.mkdir(parents=True, exist_ok=True)
shutil.move(old_path, new_path)
return f"Memory renamed from {old_name} to {new_name}."
def edit_memory(
self, name: str, needle: str, repl: str, mode: Literal["literal", "regex"], allow_multiple_occurrences: bool, is_tool_context: bool
) -> str:
"""
Edit a memory by replacing content matching a pattern.
:param name: the memory name
:param needle: the string or regex to search for
:param repl: the replacement string
:param mode: "literal" or "regex"
:param allow_multiple_occurrences:
"""
self._check_write_access(name, is_tool_context)
memory_file_path = self.get_memory_file_path(name)
if not memory_file_path.exists():
raise FileNotFoundError(f"Memory {name} not found.")
with open(memory_file_path, encoding=self._encoding) as f:
original_content = f.read()
replacer = ContentReplacer(mode=mode, allow_multiple_occurrences=allow_multiple_occurrences)
updated_content = replacer.replace(original_content, needle, repl)
with open(memory_file_path, "w", encoding=self._encoding) as f:
f.write(updated_content)
return f"Memory {name} edited successfully."
class Project(ToStringMixin):
def __init__(
self,
*,
project_root: str,
project_config: ProjectConfig,
serena_config: SerenaConfig,
is_newly_created: bool = False,
):
assert serena_config is not None
self.project_root = project_root
self.project_config = project_config
self._serena_config = serena_config
self._serena_data_folder = serena_config.get_project_serena_folder(self.project_root)
log.info("Serena project data folder: %s", self._serena_data_folder)
read_only_memory_patterns = serena_config.read_only_memory_patterns + project_config.read_only_memory_patterns
self.memories_manager = MemoriesManager(self._serena_data_folder, read_only_memory_patterns=read_only_memory_patterns)
self.language_server_manager: LanguageServerManager | None = None
self._is_newly_created = is_newly_created
# create .gitignore file in the project's Serena data folder if not yet present
serena_data_gitignore_path = os.path.join(self._serena_data_folder, ".gitignore")
if not os.path.exists(serena_data_gitignore_path):
os.makedirs(os.path.dirname(serena_data_gitignore_path), exist_ok=True)
log.info(f"Creating .gitignore file in {serena_data_gitignore_path}")
with open(serena_data_gitignore_path, "w", encoding="utf-8") as f:
f.write(f"/{SolidLanguageServer.CACHE_FOLDER_NAME}\n")
f.write(f"/{ProjectConfig.SERENA_LOCAL_PROJECT_FILE}\n")
# prepare ignore spec asynchronously, ensuring immediate project activation.
self.__ignored_patterns: list[str]
self.__ignore_spec: pathspec.PathSpec
self._ignore_spec_available = threading.Event()
threading.Thread(name=f"gather-ignorespec[{self.project_config.project_name}]", target=self._gather_ignorespec, daemon=True).start()
def _gather_ignorespec(self) -> None:
with LogTime(f"Gathering ignore spec for project {self.project_config.project_name}", logger=log):
# gather ignored paths from the global configuration, project configuration, and gitignore files
global_ignored_paths = self._serena_config.ignored_paths
ignored_patterns = list(global_ignored_paths) + list(self.project_config.ignored_paths)
if len(global_ignored_paths) > 0:
log.info(f"Using {len(global_ignored_paths)} ignored paths from the global configuration.")
log.debug(f"Global ignored paths: {list(global_ignored_paths)}")
if len(self.project_config.ignored_paths) > 0:
log.info(f"Using {len(self.project_config.ignored_paths)} ignored paths from the project configuration.")
log.debug(f"Project ignored paths: {self.project_config.ignored_paths}")
log.debug(f"Combined ignored patterns: {ignored_patterns}")
if self.project_config.ignore_all_files_in_gitignore:
gitignore_parser = GitignoreParser(self.project_root)
for spec in gitignore_parser.get_ignore_specs():
log.debug(f"Adding {len(spec.patterns)} patterns from {spec.file_path} to the ignored paths.")
ignored_patterns.extend(spec.patterns)
self.__ignored_patterns = ignored_patterns
# Set up the pathspec matcher for the ignored paths
# for all absolute paths in ignored_paths, convert them to relative paths
processed_patterns = []
for pattern in ignored_patterns:
# Normalize separators (pathspec expects forward slashes)
pattern = pattern.replace(os.path.sep, "/")
processed_patterns.append(pattern)
log.debug(f"Processing {len(processed_patterns)} ignored paths")
self.__ignore_spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, processed_patterns)
self._ignore_spec_available.set()
def _tostring_includes(self) -> list[str]:
return []
def _tostring_additional_entries(self) -> dict[str, Any]:
return {"root": self.project_root, "name": self.project_name}
@property
def project_name(self) -> str:
return self.project_config.project_name
@classmethod
def load(
cls,
project_root: str | Path,
serena_config: "SerenaConfig",
autogenerate: bool = True,
) -> "Project":
assert serena_config is not None
project_root = Path(project_root).resolve()
if not project_root.exists():
raise FileNotFoundError(f"Project root not found: {project_root}")
project_config = ProjectConfig.load(project_root, serena_config=serena_config, autogenerate=autogenerate)
return Project(project_root=str(project_root), project_config=project_config, serena_config=serena_config)
def save_config(self) -> None:
"""
Saves the current project configuration to disk.
"""
self.project_config.save(self.path_to_project_yml())
def path_to_serena_data_folder(self) -> str:
return self._serena_data_folder
def path_to_project_yml(self) -> str:
return os.path.join(self._serena_data_folder, ProjectConfig.SERENA_DEFAULT_PROJECT_FILE)
def get_activation_message(self) -> str:
"""
:return: a message providing information about the project upon activation (e.g. programming language, memories, initial prompt)
"""
if self._is_newly_created:
msg = f"Created and activated a new project with name '{self.project_name}' at {self.project_root}. "
else:
msg = f"The project with name '{self.project_name}' at {self.project_root} is activated."
languages_str = ", ".join([lang.value for lang in self.project_config.languages])
msg += f"\nProgramming languages: {languages_str}; file encoding: {self.project_config.encoding}"
project_memories = self.memories_manager.list_project_memories()
if project_memories:
msg += (
f"\nAvailable project memories: {json.dumps(project_memories.to_dict())}\n"
+ "Use the `read_memory` tool to read these memories later if they are relevant to the task."
)
if self.project_config.initial_prompt:
msg += f"\nAdditional project-specific instructions:\n {self.project_config.initial_prompt}"
return msg
def read_file(self, relative_path: str) -> str:
"""
Reads a file relative to the project root.
:param relative_path: the path to the file relative to the project root
:return: the content of the file
"""
abs_path = Path(self.project_root) / relative_path
return FileUtils.read_file(str(abs_path), self.project_config.encoding)
@property
def _ignore_spec(self) -> pathspec.PathSpec:
"""
:return: the pathspec matcher for the paths that were configured to be ignored,
either explicitly or implicitly through .gitignore files.
"""
if not self._ignore_spec_available.is_set():
log.info("Waiting for ignore spec to become available ...")
self._ignore_spec_available.wait()
log.info("Ignore spec is now available for project; proceeding")
return self.__ignore_spec
@property
def _ignored_patterns(self) -> list[str]:
"""
:return: the list of ignored path patterns
"""
if not self._ignore_spec_available.is_set():
log.info("Waiting for ignored patterns to become available ...")
self._ignore_spec_available.wait()
log.info("Ignore patterns are now available for project; proceeding")
return self.__ignored_patterns
def _is_ignored_relative_path(self, relative_path: str | Path, ignore_non_source_files: bool = True) -> bool:
"""
Determine whether an existing path should be ignored based on file type and ignore patterns.
Raises `FileNotFoundError` if the path does not exist.
:param relative_path: Relative path to check
:param ignore_non_source_files: whether files that are not source files (according to the file masks
determined by the project's programming language) shall be ignored
:return: whether the path should be ignored
"""
# special case, never ignore the project root itself
# If the user ignores hidden files, "." might match against the corresponding PathSpec pattern.
# The empty string also points to the project root and should never be ignored.
if str(relative_path) in [".", ""]:
return False
abs_path = os.path.join(self.project_root, relative_path)
if not os.path.exists(abs_path):
raise FileNotFoundError(f"File {abs_path} not found, the ignore check cannot be performed")
# Check file extension if it's a file
is_file = os.path.isfile(abs_path)
if is_file and ignore_non_source_files:
is_file_in_supported_language = False
for language in self.project_config.languages:
fn_matcher = language.get_source_fn_matcher()
if fn_matcher.is_relevant_filename(abs_path):
is_file_in_supported_language = True
break
if not is_file_in_supported_language:
return True
# Create normalized path for consistent handling
rel_path = Path(relative_path)
# always ignore paths inside .git
if len(rel_path.parts) > 0 and rel_path.parts[0] == ".git":
return True
return match_path(str(relative_path), self._ignore_spec, root_path=self.project_root)
def is_ignored_path(self, path: str | Path, ignore_non_source_files: bool = False) -> bool:
"""
Checks whether the given path is ignored
:param path: the path to check, can be absolute or relative
:param ignore_non_source_files: whether to ignore files that are not source files
(according to the file masks determined by the project's programming language)
"""
path = Path(path)
if path.is_absolute():
try:
relative_path = path.relative_to(self.project_root)
except ValueError:
# If the path is not relative to the project root, we consider it as an absolute path outside the project
# (which we ignore)
log.warning(f"Path {path} is not relative to the project root {self.project_root} and was therefore ignored")
return True
else:
relative_path = path
return self._is_ignored_relative_path(str(relative_path), ignore_non_source_files=ignore_non_source_files)
def is_path_in_project(self, path: str | Path) -> bool:
"""
Checks if the given (absolute or relative) path is inside the project directory.
Note: This is intended to catch cases where ".." segments would lead outside of the project directory,
but we intentionally allow symlinks, as the assumption is that they point to relevant project files.
"""
if not os.path.isabs(path):
path = os.path.join(self.project_root, path)
# collapse any ".." or "." segments (purely lexically)
path = os.path.normpath(path)
try:
return os.path.commonpath([self.project_root, path]) == self.project_root
except ValueError:
# occurs, in particular, if paths are on different drives on Windows
return False
def relative_path_exists(self, relative_path: str) -> bool:
"""
Checks if the given relative path exists in the project directory.
:param relative_path: the path to check, relative to the project root
:return: True if the path exists, False otherwise
"""
abs_path = Path(self.project_root) / relative_path
return abs_path.exists()
def validate_relative_path(self, relative_path: str, require_not_ignored: bool = False) -> None:
"""
Validates that the given relative path to an existing file/dir is safe to read or edit,
meaning it's inside the project directory.
Passing a path to a non-existing file will lead to a `FileNotFoundError`.
:param relative_path: the path to validate, relative to the project root
:param require_not_ignored: if True, the path must not be ignored according to the project's ignore settings
"""
if not self.is_path_in_project(relative_path):
raise ValueError(f"{relative_path=} points to path outside of the repository root; cannot access for safety reasons")
if require_not_ignored:
if self.is_ignored_path(relative_path):
raise ValueError(f"Path {relative_path} is ignored; cannot access for safety reasons")
def gather_source_files(self, relative_path: str = "") -> list[str]:
"""Retrieves relative paths of all source files, optionally limited to the given path
:param relative_path: if provided, restrict search to this path
"""
rel_file_paths = []
start_path = os.path.join(self.project_root, relative_path)
if not os.path.exists(start_path):
raise FileNotFoundError(f"Relative path {start_path} not found.")
if os.path.isfile(start_path):
return [relative_path]
else:
for root, dirs, files in os.walk(start_path, followlinks=True):
# prevent recursion into ignored directories
dirs[:] = [d for d in dirs if not self.is_ignored_path(os.path.join(root, d))]
# collect non-ignored files
for file in files:
abs_file_path = os.path.join(root, file)
try:
if not self.is_ignored_path(abs_file_path, ignore_non_source_files=True):
try:
rel_file_path = os.path.relpath(abs_file_path, start=self.project_root)
except Exception:
log.warning(
"Ignoring path '%s' because it appears to be outside of the project root (%s)",
abs_file_path,
self.project_root,
)
continue
rel_file_paths.append(rel_file_path)
except FileNotFoundError:
log.warning(
f"File {abs_file_path} not found (possibly due it being a symlink), skipping it in request_parsed_files",
)
return rel_file_paths
def search_source_files_for_pattern(
self,
pattern: str,
relative_path: str = "",
context_lines_before: int = 0,
context_lines_after: int = 0,
paths_include_glob: str | None = None,
paths_exclude_glob: str | None = None,
) -> list[MatchedConsecutiveLines]:
"""
Search for a pattern across all (non-ignored) source files
:param pattern: Regular expression pattern to search for, either as a compiled Pattern or string
:param relative_path:
:param context_lines_before: Number of lines of context to include before each match
:param context_lines_after: Number of lines of context to include after each match
:param paths_include_glob: Glob pattern to filter which files to include in the search
:param paths_exclude_glob: Glob pattern to filter which files to exclude from the search. Takes precedence over paths_include_glob.
:return: List of matched consecutive lines with context
"""
relative_file_paths = self.gather_source_files(relative_path=relative_path)
return search_files(
relative_file_paths,
pattern,
root_path=self.project_root,
file_reader=self.read_file,
context_lines_before=context_lines_before,
context_lines_after=context_lines_after,
paths_include_glob=paths_include_glob,
paths_exclude_glob=paths_exclude_glob,
)
def retrieve_content_around_line(
self, relative_file_path: str, line: int, context_lines_before: int = 0, context_lines_after: int = 0
) -> MatchedConsecutiveLines:
"""
Retrieve the content of the given file around the given line.
:param relative_file_path: The relative path of the file to retrieve the content from
:param line: The line number to retrieve the content around
:param context_lines_before: The number of lines to retrieve before the given line
:param context_lines_after: The number of lines to retrieve after the given line
:return MatchedConsecutiveLines: A container with the desired lines.
"""
file_contents = self.read_file(relative_file_path)
return MatchedConsecutiveLines.from_file_contents(
file_contents,
line=line,
context_lines_before=context_lines_before,
context_lines_after=context_lines_after,
source_file_path=relative_file_path,
)
def create_language_server_manager(self) -> LanguageServerManager:
"""
Creates the language server manager for the project, starting one language server per configured programming language.
:return: the language server manager, which is also stored in the project instance
"""
# determine timeout to use for LS calls
tool_timeout = self._serena_config.tool_timeout
if tool_timeout is None or tool_timeout < 0:
ls_timeout = None
else:
if tool_timeout < 10:
raise ValueError(f"Tool timeout must be at least 10 seconds, but is {tool_timeout} seconds")
ls_timeout = tool_timeout - 5 # the LS timeout is for a single call, it should be smaller than the tool timeout
# if there is an existing instance, stop its language servers first
if self.language_server_manager is not None:
log.info("Stopping existing language server manager ...")
self.language_server_manager.stop_all()
self.language_server_manager = None
log.info(f"Creating language server manager for {self.project_root}")
factory = LanguageServerFactory(
project_root=self.project_root,
project_data_path=self._serena_data_folder,
encoding=self.project_config.encoding,
ignored_patterns=self._ignored_patterns,
ls_timeout=ls_timeout,
ls_specific_settings=self._serena_config.ls_specific_settings,
trace_lsp_communication=self._serena_config.trace_lsp_communication,
)
self.language_server_manager = LanguageServerManager.from_languages(self.project_config.languages, factory)
return self.language_server_manager
def add_language(self, language: Language) -> None:
"""
Adds a new programming language to the project configuration, starting the corresponding
language server instance if the LS manager is active.
The project configuration is saved to disk after adding the language.
:param language: the programming language to add
"""
if language in self.project_config.languages:
log.info(f"Language {language.value} is already present in the project configuration.")
return
# start the language server (if the LS manager is active)
if self.language_server_manager is None:
log.info("Language server manager is not active; skipping language server startup for the new language.")
else:
log.info("Adding and starting the language server for new language %s ...", language.value)
self.language_server_manager.add_language_server(language)
# update the project configuration
self.project_config.languages.append(language)
self.save_config()
def remove_language(self, language: Language) -> None:
"""
Removes a programming language from the project configuration, stopping the corresponding
language server instance if the LS manager is active.
The project configuration is saved to disk after removing the language.
:param language: the programming language to remove
"""
if language not in self.project_config.languages:
log.info(f"Language {language.value} is not present in the project configuration.")
return
# update the project configuration
self.project_config.languages.remove(language)
self.save_config()
# stop the language server (if the LS manager is active)
if self.language_server_manager is None:
log.info("Language server manager is not active; skipping language server shutdown for the removed language.")
else:
log.info("Removing and stopping the language server for language %s ...", language.value)
self.language_server_manager.remove_language_server(language)
def shutdown(self, timeout: float = 2.0) -> None:
if self.language_server_manager is not None:
self.language_server_manager.stop_all(save_cache=True, timeout=timeout)
self.language_server_manager = None
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/project.py",
"license": "MIT License",
"lines": 575,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/tools/jetbrains_tools.py | import logging
from typing import Any, Literal
import serena.jetbrains.jetbrains_types as jb
from serena.jetbrains.jetbrains_plugin_client import JetBrainsPluginClient
from serena.symbol import JetBrainsSymbolDictGrouper
from serena.tools import Tool, ToolMarkerOptional, ToolMarkerSymbolicRead
log = logging.getLogger(__name__)
class JetBrainsFindSymbolTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Performs a global (or local) search for symbols using the JetBrains backend
"""
def apply(
self,
name_path_pattern: str,
depth: int = 0,
relative_path: str | None = None,
include_body: bool = False,
include_info: bool = False,
search_deps: bool = False,
max_answer_chars: int = -1,
) -> str:
"""
Retrieves information on all symbols/code entities (classes, methods, etc.) based on the given name path pattern.
The returned symbol information can be used for edits or further queries.
Specify `depth > 0` to retrieve children (e.g., methods of a class).
Important: through `search_deps=True` dependencies can be searched, which
should be preferred to web search or other less sophisticated approaches to analyzing dependencies.
A name path is a path in the symbol tree *within a source file*.
For example, the method `my_method` defined in class `MyClass` would have the name path `MyClass/my_method`.
If a symbol is overloaded (e.g., in Java), a 0-based index is appended (e.g. "MyClass/my_method[0]") to
uniquely identify it.
To search for a symbol, you provide a name path pattern that is used to match against name paths.
It can be
* a simple name (e.g. "method"), which will match any symbol with that name
* a relative path like "class/method", which will match any symbol with that name path suffix
* an absolute name path "/class/method" (absolute name path), which requires an exact match of the full name path within the source file.
Append an index `[i]` to match a specific overload only, e.g. "MyClass/my_method[1]".
:param name_path_pattern: the name path matching pattern (see above)
:param depth: depth up to which descendants shall be retrieved (e.g. use 1 to also retrieve immediate children;
for the case where the symbol is a class, this will return its methods).
Default 0.
:param relative_path: Optional. Restrict search to this file or directory. If None, searches entire codebase.
If a directory is passed, the search will be restricted to the files in that directory.
If a file is passed, the search will be restricted to that file.
If you have some knowledge about the codebase, you should use this parameter, as it will significantly
speed up the search as well as reduce the number of results.
:param include_body: If True, include the symbol's source code. Use judiciously.
:param include_info: whether to include additional info (hover-like, typically including docstring and signature),
about the symbol (ignored if include_body is True).
Default False; info is never included for child symbols and is not included when body is requested.
:param search_deps: If True, also search in project dependencies (e.g., libraries).
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned.
-1 means the default value from the config will be used.
:return: JSON string: a list of symbols (with locations) matching the name.
"""
if relative_path == ".":
relative_path = None
with JetBrainsPluginClient.from_project(self.project) as client:
if include_body:
include_quick_info = False
include_documentation = False
else:
if include_info:
include_documentation = True
include_quick_info = False
else:
# If no additional information is requested, we still include the quick info (type signature)
include_documentation = False
include_quick_info = True
response_dict = client.find_symbol(
name_path=name_path_pattern,
relative_path=relative_path,
depth=depth,
include_body=include_body,
include_documentation=include_documentation,
include_quick_info=include_quick_info,
search_deps=search_deps,
)
result = self._to_json(response_dict)
return self._limit_length(result, max_answer_chars)
class JetBrainsFindReferencingSymbolsTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Finds symbols that reference the given symbol using the JetBrains backend
"""
symbol_dict_grouper = JetBrainsSymbolDictGrouper(["relative_path", "type"], ["type"], collapse_singleton=True)
# TODO: (maybe) - add content snippets showing the references like in LS based version?
def apply(
self,
name_path: str,
relative_path: str,
max_answer_chars: int = -1,
) -> str:
"""
Finds symbols that reference the symbol at the given `name_path`.
The result will contain metadata about the referencing symbols.
:param name_path: name path of the symbol for which to find references; matching logic as described in find symbol tool.
:param relative_path: the relative path to the file containing the symbol for which to find references.
Note that here you can't pass a directory but must pass a file.
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned. -1 means the
default value from the config will be used.
:return: a list of JSON objects with the symbols referencing the requested symbol
"""
with JetBrainsPluginClient.from_project(self.project) as client:
response_dict = client.find_references(
name_path=name_path,
relative_path=relative_path,
include_quick_info=False,
)
symbol_dicts = response_dict["symbols"]
result = self.symbol_dict_grouper.group(symbol_dicts)
result_json = self._to_json(result)
return self._limit_length(result_json, max_answer_chars)
class JetBrainsGetSymbolsOverviewTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Retrieves an overview of the top-level symbols within a specified file using the JetBrains backend
"""
USE_COMPACT_FORMAT = True
symbol_dict_grouper = JetBrainsSymbolDictGrouper(["type"], ["type"], collapse_singleton=True, map_name_path_to_name=True)
def apply(
self,
relative_path: str,
depth: int = 0,
max_answer_chars: int = -1,
include_file_documentation: bool = False,
) -> str:
"""
Gets an overview of the top-level symbols in the given file.
Calling this is often a good idea before more targeted reading, searching or editing operations on the code symbols.
Before requesting a symbol overview, it is usually a good idea to narrow down the scope of the overview
by first understanding the basic directory structure of the repository that you can get from memories
or by using the `list_dir` and `find_file` tools (or similar).
:param relative_path: the relative path to the file to get the overview of
:param depth: depth up to which descendants shall be retrieved (e.g., use 1 to also retrieve immediate children).
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned.
-1 means the default value from the config will be used.
:param include_file_documentation: whether to include the file's docstring. Default False.
:return: a JSON object containing the symbols grouped by kind in a compact format.
"""
with JetBrainsPluginClient.from_project(self.project) as client:
symbol_overview = client.get_symbols_overview(
relative_path=relative_path, depth=depth, include_file_documentation=include_file_documentation
)
if self.USE_COMPACT_FORMAT:
symbols = symbol_overview["symbols"]
result: dict[str, Any] = {"symbols": self.symbol_dict_grouper.group(symbols)}
documentation = symbol_overview.pop("documentation", None)
if documentation:
result["docstring"] = documentation
json_result = self._to_json(result)
else:
json_result = self._to_json(symbol_overview)
return self._limit_length(json_result, max_answer_chars)
class JetBrainsTypeHierarchyTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Retrieves the type hierarchy (supertypes and/or subtypes) of a symbol using the JetBrains backend
"""
@staticmethod
def _transform_hierarchy_nodes(nodes: list[jb.TypeHierarchyNodeDTO] | None) -> dict[str, list]:
"""
Transform a list of TypeHierarchyNode into a file-grouped compact format.
Returns a dict where keys are relative_paths and values are lists of either:
- "SymbolNamePath" (leaf node)
- {"SymbolNamePath": {nested_file_grouped_children}} (node with children)
"""
if not nodes:
return {}
result: dict[str, list] = {}
for node in nodes:
symbol = node["symbol"]
name_path = symbol["name_path"]
rel_path = symbol["relative_path"]
children = node.get("children", [])
if rel_path not in result:
result[rel_path] = []
if children:
# Node with children - recurse
nested = JetBrainsTypeHierarchyTool._transform_hierarchy_nodes(children)
result[rel_path].append({name_path: nested})
else:
# Leaf node
result[rel_path].append(name_path)
return result
def apply(
self,
name_path: str,
relative_path: str,
hierarchy_type: Literal["super", "sub", "both"] = "both",
depth: int | None = 1,
max_answer_chars: int = -1,
) -> str:
"""
Gets the type hierarchy of a symbol (supertypes, subtypes, or both).
:param name_path: name path of the symbol for which to get the type hierarchy.
:param relative_path: the relative path to the file containing the symbol.
:param hierarchy_type: which hierarchy to retrieve: "super" for parent classes/interfaces,
"sub" for subclasses/implementations, or "both" for both directions. Default is "sub".
:param depth: depth limit for hierarchy traversal (None or 0 for unlimited). Default is 1.
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned.
-1 means the default value from the config will be used.
:return: Compact JSON with file-grouped hierarchy. Error string if not applicable.
"""
with JetBrainsPluginClient.from_project(self.project) as client:
subtypes = None
supertypes = None
levels_not_included = {}
if hierarchy_type in ("super", "both"):
supertypes_response = client.get_supertypes(
name_path=name_path,
relative_path=relative_path,
depth=depth,
)
if "num_levels_not_included" in supertypes_response:
levels_not_included["supertypes"] = supertypes_response["num_levels_not_included"]
supertypes = self._transform_hierarchy_nodes(supertypes_response.get("hierarchy"))
if hierarchy_type in ("sub", "both"):
subtypes_response = client.get_subtypes(
name_path=name_path,
relative_path=relative_path,
depth=depth,
)
if "num_levels_not_included" in subtypes_response:
levels_not_included["subtypes"] = subtypes_response["num_levels_not_included"]
subtypes = self._transform_hierarchy_nodes(subtypes_response.get("hierarchy"))
result_dict: dict[str, dict | list] = {}
if supertypes is not None:
result_dict["supertypes"] = supertypes
if subtypes is not None:
result_dict["subtypes"] = subtypes
if levels_not_included:
result_dict["levels_not_included"] = levels_not_included
result = self._to_json(result_dict)
return self._limit_length(result, max_answer_chars)
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/jetbrains_tools.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/common.py | from __future__ import annotations
import logging
import os
import platform
import subprocess
from collections.abc import Iterable, Mapping, Sequence
from dataclasses import dataclass, replace
from typing import Any, cast
from solidlsp.ls_utils import FileUtils, PlatformUtils
from solidlsp.util.subprocess_util import subprocess_kwargs
log = logging.getLogger(__name__)
@dataclass(kw_only=True)
class RuntimeDependency:
"""Represents a runtime dependency for a language server."""
id: str
platform_id: str | None = None
url: str | None = None
archive_type: str | None = None
binary_name: str | None = None
command: str | list[str] | None = None
package_name: str | None = None
package_version: str | None = None
extract_path: str | None = None
description: str | None = None
class RuntimeDependencyCollection:
"""Utility to handle installation of runtime dependencies."""
def __init__(self, dependencies: Sequence[RuntimeDependency], overrides: Iterable[Mapping[str, Any]] = ()) -> None:
"""Initialize the collection with a list of dependencies and optional overrides.
:param dependencies: List of base RuntimeDependency instances. The combination of 'id' and 'platform_id' must be unique.
:param overrides: List of dictionaries which represent overrides or additions to the base dependencies.
Each entry must contain at least the 'id' key, and optionally 'platform_id' to uniquely identify the dependency to override.
"""
self._id_and_platform_id_to_dep: dict[tuple[str, str | None], RuntimeDependency] = {}
for dep in dependencies:
dep_key = (dep.id, dep.platform_id)
if dep_key in self._id_and_platform_id_to_dep:
raise ValueError(f"Duplicate runtime dependency with id '{dep.id}' and platform_id '{dep.platform_id}':\n{dep}")
self._id_and_platform_id_to_dep[dep_key] = dep
for dep_values_override in overrides:
override_key = cast(tuple[str, str | None], (dep_values_override["id"], dep_values_override.get("platform_id")))
base_dep = self._id_and_platform_id_to_dep.get(override_key)
if base_dep is None:
new_runtime_dep = RuntimeDependency(**dep_values_override)
self._id_and_platform_id_to_dep[override_key] = new_runtime_dep
else:
self._id_and_platform_id_to_dep[override_key] = replace(base_dep, **dep_values_override)
def get_dependencies_for_platform(self, platform_id: str) -> list[RuntimeDependency]:
return [d for d in self._id_and_platform_id_to_dep.values() if d.platform_id in (platform_id, "any", "platform-agnostic", None)]
def get_dependencies_for_current_platform(self) -> list[RuntimeDependency]:
return self.get_dependencies_for_platform(PlatformUtils.get_platform_id().value)
def get_single_dep_for_current_platform(self, dependency_id: str | None = None) -> RuntimeDependency:
deps = self.get_dependencies_for_current_platform()
if dependency_id is not None:
deps = [d for d in deps if d.id == dependency_id]
if len(deps) != 1:
raise RuntimeError(
f"Expected exactly one runtime dependency for platform-{PlatformUtils.get_platform_id().value} and {dependency_id=}, found {len(deps)}"
)
return deps[0]
def binary_path(self, target_dir: str) -> str:
dep = self.get_single_dep_for_current_platform()
if not dep.binary_name:
return target_dir
return os.path.join(target_dir, dep.binary_name)
def install(self, target_dir: str) -> dict[str, str]:
"""Install all dependencies for the current platform into *target_dir*.
Returns a mapping from dependency id to the resolved binary path.
"""
os.makedirs(target_dir, exist_ok=True)
results: dict[str, str] = {}
for dep in self.get_dependencies_for_current_platform():
if dep.url:
self._install_from_url(dep, target_dir)
if dep.command:
self._run_command(dep.command, target_dir)
if dep.binary_name:
results[dep.id] = os.path.join(target_dir, dep.binary_name)
else:
results[dep.id] = target_dir
return results
@staticmethod
def _run_command(command: str | list[str], cwd: str) -> None:
kwargs = subprocess_kwargs()
if not PlatformUtils.get_platform_id().is_windows():
import pwd
kwargs["user"] = pwd.getpwuid(os.getuid()).pw_name # type: ignore
is_windows = platform.system() == "Windows"
if not isinstance(command, str) and not is_windows:
# Since we are using the shell, we need to convert the command list to a single string
# on Linux/macOS
command = " ".join(command)
log.info("Running command %s in '%s'", f"'{command}'" if isinstance(command, str) else command, cwd)
completed_process = subprocess.run(
command,
shell=True,
check=True,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs,
) # type: ignore
if completed_process.returncode != 0:
log.warning("Command '%s' failed with return code %d", command, completed_process.returncode)
log.warning("Command output:\n%s", completed_process.stdout)
else:
log.info(
"Command completed successfully",
)
@staticmethod
def _install_from_url(dep: RuntimeDependency, target_dir: str) -> None:
if not dep.url:
raise ValueError(f"Dependency {dep.id} has no URL")
if dep.archive_type in ("gz", "binary") and dep.binary_name:
dest = os.path.join(target_dir, dep.binary_name)
FileUtils.download_and_extract_archive(dep.url, dest, dep.archive_type)
else:
FileUtils.download_and_extract_archive(dep.url, target_dir, dep.archive_type or "zip")
def quote_windows_path(path: str) -> str:
"""
Quote a path for Windows command execution if needed.
On Windows, paths need to be quoted for proper command execution.
The function checks if the path is already quoted to avoid double-quoting.
On other platforms, the path is returned unchanged.
Args:
path: The file path to potentially quote
Returns:
The quoted path on Windows (if not already quoted), unchanged path on other platforms
"""
if platform.system() == "Windows":
# Check if already quoted to avoid double-quoting
if path.startswith('"') and path.endswith('"'):
return path
return f'"{path}"'
return path
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/common.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/dart_language_server.py | import logging
import os
import pathlib
from typing import cast
from solidlsp.ls import SolidLanguageServer
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
from ..ls_config import LanguageServerConfig
from ..lsp_protocol_handler.lsp_types import InitializeParams
from .common import RuntimeDependency, RuntimeDependencyCollection
log = logging.getLogger(__name__)
class DartLanguageServer(SolidLanguageServer):
"""
Provides Dart specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Dart.
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings) -> None:
"""
Creates a DartServer instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
executable_path = self._setup_runtime_dependencies(solidlsp_settings)
super().__init__(
config, repository_root_path, ProcessLaunchInfo(cmd=executable_path, cwd=repository_root_path), "dart", solidlsp_settings
)
@classmethod
def _setup_runtime_dependencies(cls, solidlsp_settings: SolidLSPSettings) -> str:
deps = RuntimeDependencyCollection(
[
RuntimeDependency(
id="DartLanguageServer",
description="Dart Language Server for Linux (x64)",
url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-linux-x64-release.zip",
platform_id="linux-x64",
archive_type="zip",
binary_name="dart-sdk/bin/dart",
),
RuntimeDependency(
id="DartLanguageServer",
description="Dart Language Server for Windows (x64)",
url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-windows-x64-release.zip",
platform_id="win-x64",
archive_type="zip",
binary_name="dart-sdk/bin/dart.exe",
),
RuntimeDependency(
id="DartLanguageServer",
description="Dart Language Server for Windows (arm64)",
url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-windows-arm64-release.zip",
platform_id="win-arm64",
archive_type="zip",
binary_name="dart-sdk/bin/dart.exe",
),
RuntimeDependency(
id="DartLanguageServer",
description="Dart Language Server for macOS (x64)",
url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-macos-x64-release.zip",
platform_id="osx-x64",
archive_type="zip",
binary_name="dart-sdk/bin/dart",
),
RuntimeDependency(
id="DartLanguageServer",
description="Dart Language Server for macOS (arm64)",
url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-macos-arm64-release.zip",
platform_id="osx-arm64",
archive_type="zip",
binary_name="dart-sdk/bin/dart",
),
]
)
dart_ls_dir = cls.ls_resources_dir(solidlsp_settings)
dart_executable_path = deps.binary_path(dart_ls_dir)
if not os.path.exists(dart_executable_path):
deps.install(dart_ls_dir)
assert os.path.exists(dart_executable_path)
os.chmod(dart_executable_path, 0o755)
return f"{dart_executable_path} language-server --client-id multilspy.dart --client-version 1.2"
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Dart Language Server.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"capabilities": {},
"initializationOptions": {
"onlyAnalyzeProjectsWithOpenFiles": False,
"closingLabels": False,
"outline": False,
"flutterOutline": False,
"allowOpenUri": False,
},
"trace": "verbose",
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": pathlib.Path(repository_absolute_path).as_uri(),
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Start the language server and yield when the server is ready.
"""
def execute_client_command_handler(params: dict) -> list:
return []
def do_nothing(params: dict) -> None:
return
def check_experimental_status(params: dict) -> None:
pass
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
self.server.on_request("client/registerCapability", do_nothing)
self.server.on_notification("language/status", do_nothing)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("language/actionableNotification", do_nothing)
self.server.on_notification("experimental/serverStatus", check_experimental_status)
log.info("Starting dart-language-server server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.debug("Sending initialize request to dart-language-server")
init_response = self.server.send_request("initialize", initialize_params) # type: ignore
log.info(f"Received initialize response from dart-language-server: {init_response}")
self.server.notify.initialized({})
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/dart_language_server.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/solidlsp/language_servers/eclipse_jdtls.py | """
Provides Java specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Java.
"""
import dataclasses
import logging
import os
import pathlib
import shutil
import threading
import uuid
from pathlib import PurePath
from time import sleep
from typing import cast
from overrides import override
from solidlsp import ls_types
from solidlsp.ls import LanguageServerDependencyProvider, LSPFileBuffer, SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_types import UnifiedSymbolInformation
from solidlsp.ls_utils import FileUtils, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import DocumentSymbol, InitializeParams, SymbolInformation
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
@dataclasses.dataclass
class RuntimeDependencyPaths:
"""
Stores the paths to the runtime dependencies of EclipseJDTLS
"""
gradle_path: str
lombok_jar_path: str
jre_path: str
jre_home_path: str
jdtls_launcher_jar_path: str
jdtls_readonly_config_path: str
intellicode_jar_path: str
intellisense_members_path: str
class EclipseJDTLS(SolidLanguageServer):
r"""
The EclipseJDTLS class provides a Java specific implementation of the LanguageServer class
You can configure the following options in ls_specific_settings (in serena_config.yml):
- maven_user_settings: Path to Maven settings.xml file (default: ~/.m2/settings.xml)
- gradle_user_home: Path to Gradle user home directory (default: ~/.gradle)
- gradle_wrapper_enabled: Whether to use the project's Gradle wrapper (default: false)
- gradle_java_home: Path to JDK for Gradle (default: null, uses bundled JRE)
- use_system_java_home: Whether to use the system's JAVA_HOME for JDTLS itself (default: false)
Example configuration in ~/.serena/serena_config.yml:
```yaml
ls_specific_settings:
java:
maven_user_settings: "/home/user/.m2/settings.xml" # Unix/Linux/Mac
# maven_user_settings: 'C:\\Users\\YourName\\.m2\\settings.xml' # Windows (use single quotes!)
gradle_user_home: "/home/user/.gradle" # Unix/Linux/Mac
# gradle_user_home: 'C:\\Users\\YourName\\.gradle' # Windows (use single quotes!)
gradle_wrapper_enabled: true # set to true for projects with custom plugins/repositories
gradle_java_home: "/path/to/jdk" # set to override Gradle's JDK
use_system_java_home: true # set to true to use system JAVA_HOME for JDTLS
```
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a new EclipseJDTLS instance initializing the language server settings appropriately.
This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
super().__init__(config, repository_root_path, None, "java", solidlsp_settings)
# Extract runtime_dependency_paths from the dependency provider
assert isinstance(self._dependency_provider, self.DependencyProvider)
self.runtime_dependency_paths = self._dependency_provider.runtime_dependency_paths
self._service_ready_event = threading.Event()
self._project_ready_event = threading.Event()
self._intellicode_enable_command_available = threading.Event()
def _create_dependency_provider(self) -> LanguageServerDependencyProvider:
ls_resources_dir = self.ls_resources_dir(self._solidlsp_settings)
return self.DependencyProvider(self._custom_settings, ls_resources_dir, self._solidlsp_settings, self.repository_root_path)
@override
def is_ignored_dirname(self, dirname: str) -> bool:
# Ignore common Java build directories from different build tools:
# - Maven: target
# - Gradle: build, .gradle
# - Eclipse: bin, .settings
# - IntelliJ IDEA: out, .idea
# - General: classes, dist, lib
return super().is_ignored_dirname(dirname) or dirname in [
"target", # Maven
"build", # Gradle
"bin", # Eclipse
"out", # IntelliJ IDEA
"classes", # General
"dist", # General
"lib", # General
]
class DependencyProvider(LanguageServerDependencyProvider):
def __init__(
self,
custom_settings: SolidLSPSettings.CustomLSSettings,
ls_resources_dir: str,
solidlsp_settings: SolidLSPSettings,
repository_root_path: str,
):
super().__init__(custom_settings, ls_resources_dir)
self._solidlsp_settings = solidlsp_settings
self._repository_root_path = repository_root_path
self.runtime_dependency_paths = self._setup_runtime_dependencies(ls_resources_dir)
@classmethod
def _setup_runtime_dependencies(cls, ls_resources_dir: str) -> RuntimeDependencyPaths:
"""
Setup runtime dependencies for EclipseJDTLS and return the paths.
"""
platformId = PlatformUtils.get_platform_id()
runtime_dependencies = {
"gradle": {
"platform-agnostic": {
"url": "https://services.gradle.org/distributions/gradle-8.14.2-bin.zip",
"archiveType": "zip",
"relative_extraction_path": ".",
}
},
"vscode-java": {
"darwin-arm64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-darwin-arm64-1.42.0-561.vsix",
"archiveType": "zip",
"relative_extraction_path": "vscode-java",
},
"osx-arm64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-darwin-arm64-1.42.0-561.vsix",
"archiveType": "zip",
"relative_extraction_path": "vscode-java",
"jre_home_path": "extension/jre/21.0.7-macosx-aarch64",
"jre_path": "extension/jre/21.0.7-macosx-aarch64/bin/java",
"lombok_jar_path": "extension/lombok/lombok-1.18.36.jar",
"jdtls_launcher_jar_path": "extension/server/plugins/org.eclipse.equinox.launcher_1.7.0.v20250424-1814.jar",
"jdtls_readonly_config_path": "extension/server/config_mac_arm",
},
"osx-x64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-darwin-x64-1.42.0-561.vsix",
"archiveType": "zip",
"relative_extraction_path": "vscode-java",
"jre_home_path": "extension/jre/21.0.7-macosx-x86_64",
"jre_path": "extension/jre/21.0.7-macosx-x86_64/bin/java",
"lombok_jar_path": "extension/lombok/lombok-1.18.36.jar",
"jdtls_launcher_jar_path": "extension/server/plugins/org.eclipse.equinox.launcher_1.7.0.v20250424-1814.jar",
"jdtls_readonly_config_path": "extension/server/config_mac",
},
"linux-arm64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-linux-arm64-1.42.0-561.vsix",
"archiveType": "zip",
"relative_extraction_path": "vscode-java",
"jre_home_path": "extension/jre/21.0.7-linux-aarch64",
"jre_path": "extension/jre/21.0.7-linux-aarch64/bin/java",
"lombok_jar_path": "extension/lombok/lombok-1.18.36.jar",
"jdtls_launcher_jar_path": "extension/server/plugins/org.eclipse.equinox.launcher_1.7.0.v20250424-1814.jar",
"jdtls_readonly_config_path": "extension/server/config_linux_arm",
},
"linux-x64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-linux-x64-1.42.0-561.vsix",
"archiveType": "zip",
"relative_extraction_path": "vscode-java",
"jre_home_path": "extension/jre/21.0.7-linux-x86_64",
"jre_path": "extension/jre/21.0.7-linux-x86_64/bin/java",
"lombok_jar_path": "extension/lombok/lombok-1.18.36.jar",
"jdtls_launcher_jar_path": "extension/server/plugins/org.eclipse.equinox.launcher_1.7.0.v20250424-1814.jar",
"jdtls_readonly_config_path": "extension/server/config_linux",
},
"win-x64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-win32-x64-1.42.0-561.vsix",
"archiveType": "zip",
"relative_extraction_path": "vscode-java",
"jre_home_path": "extension/jre/21.0.7-win32-x86_64",
"jre_path": "extension/jre/21.0.7-win32-x86_64/bin/java.exe",
"lombok_jar_path": "extension/lombok/lombok-1.18.36.jar",
"jdtls_launcher_jar_path": "extension/server/plugins/org.eclipse.equinox.launcher_1.7.0.v20250424-1814.jar",
"jdtls_readonly_config_path": "extension/server/config_win",
},
},
"intellicode": {
"platform-agnostic": {
"url": "https://VisualStudioExptTeam.gallery.vsassets.io/_apis/public/gallery/publisher/VisualStudioExptTeam/extension/vscodeintellicode/1.2.30/assetbyname/Microsoft.VisualStudio.Services.VSIXPackage",
"alternate_url": "https://marketplace.visualstudio.com/_apis/public/gallery/publishers/VisualStudioExptTeam/vsextensions/vscodeintellicode/1.2.30/vspackage",
"archiveType": "zip",
"relative_extraction_path": "intellicode",
"intellicode_jar_path": "extension/dist/com.microsoft.jdtls.intellicode.core-0.7.0.jar",
"intellisense_members_path": "extension/dist/bundledModels/java_intellisense-members",
}
},
}
gradle_path = str(
PurePath(
ls_resources_dir,
"gradle-8.14.2",
)
)
if not os.path.exists(gradle_path):
FileUtils.download_and_extract_archive(
runtime_dependencies["gradle"]["platform-agnostic"]["url"],
str(PurePath(gradle_path).parent),
runtime_dependencies["gradle"]["platform-agnostic"]["archiveType"],
)
assert os.path.exists(gradle_path)
dependency = runtime_dependencies["vscode-java"][platformId.value]
vscode_java_path = str(PurePath(ls_resources_dir, dependency["relative_extraction_path"]))
os.makedirs(vscode_java_path, exist_ok=True)
jre_home_path = str(PurePath(vscode_java_path, dependency["jre_home_path"]))
jre_path = str(PurePath(vscode_java_path, dependency["jre_path"]))
lombok_jar_path = str(PurePath(vscode_java_path, dependency["lombok_jar_path"]))
jdtls_launcher_jar_path = str(PurePath(vscode_java_path, dependency["jdtls_launcher_jar_path"]))
jdtls_readonly_config_path = str(PurePath(vscode_java_path, dependency["jdtls_readonly_config_path"]))
if not all(
[
os.path.exists(vscode_java_path),
os.path.exists(jre_home_path),
os.path.exists(jre_path),
os.path.exists(lombok_jar_path),
os.path.exists(jdtls_launcher_jar_path),
os.path.exists(jdtls_readonly_config_path),
]
):
FileUtils.download_and_extract_archive(dependency["url"], vscode_java_path, dependency["archiveType"])
os.chmod(jre_path, 0o755)
assert os.path.exists(vscode_java_path)
assert os.path.exists(jre_home_path)
assert os.path.exists(jre_path)
assert os.path.exists(lombok_jar_path)
assert os.path.exists(jdtls_launcher_jar_path)
assert os.path.exists(jdtls_readonly_config_path)
dependency = runtime_dependencies["intellicode"]["platform-agnostic"]
intellicode_directory_path = str(PurePath(ls_resources_dir, dependency["relative_extraction_path"]))
os.makedirs(intellicode_directory_path, exist_ok=True)
intellicode_jar_path = str(PurePath(intellicode_directory_path, dependency["intellicode_jar_path"]))
intellisense_members_path = str(PurePath(intellicode_directory_path, dependency["intellisense_members_path"]))
if not all(
[
os.path.exists(intellicode_directory_path),
os.path.exists(intellicode_jar_path),
os.path.exists(intellisense_members_path),
]
):
FileUtils.download_and_extract_archive(dependency["url"], intellicode_directory_path, dependency["archiveType"])
assert os.path.exists(intellicode_directory_path)
assert os.path.exists(intellicode_jar_path)
assert os.path.exists(intellisense_members_path)
return RuntimeDependencyPaths(
gradle_path=gradle_path,
lombok_jar_path=lombok_jar_path,
jre_path=jre_path,
jre_home_path=jre_home_path,
jdtls_launcher_jar_path=jdtls_launcher_jar_path,
jdtls_readonly_config_path=jdtls_readonly_config_path,
intellicode_jar_path=intellicode_jar_path,
intellisense_members_path=intellisense_members_path,
)
def create_launch_command(self) -> list[str]:
# ws_dir is the workspace directory for the EclipseJDTLS server
ws_dir = str(
PurePath(
self._solidlsp_settings.ls_resources_dir,
"EclipseJDTLS",
"workspaces",
uuid.uuid4().hex,
)
)
# shared_cache_location is the global cache used by Eclipse JDTLS across all workspaces
shared_cache_location = str(PurePath(self._solidlsp_settings.ls_resources_dir, "lsp", "EclipseJDTLS", "sharedIndex"))
os.makedirs(shared_cache_location, exist_ok=True)
os.makedirs(ws_dir, exist_ok=True)
jre_path = self.runtime_dependency_paths.jre_path
lombok_jar_path = self.runtime_dependency_paths.lombok_jar_path
jdtls_launcher_jar = self.runtime_dependency_paths.jdtls_launcher_jar_path
data_dir = str(PurePath(ws_dir, "data_dir"))
jdtls_config_path = str(PurePath(ws_dir, "config_path"))
jdtls_readonly_config_path = self.runtime_dependency_paths.jdtls_readonly_config_path
if not os.path.exists(jdtls_config_path):
shutil.copytree(jdtls_readonly_config_path, jdtls_config_path)
for static_path in [
jre_path,
lombok_jar_path,
jdtls_launcher_jar,
jdtls_config_path,
jdtls_readonly_config_path,
]:
assert os.path.exists(static_path), static_path
cmd = [
jre_path,
"--add-modules=ALL-SYSTEM",
"--add-opens",
"java.base/java.util=ALL-UNNAMED",
"--add-opens",
"java.base/java.lang=ALL-UNNAMED",
"--add-opens",
"java.base/sun.nio.fs=ALL-UNNAMED",
"-Declipse.application=org.eclipse.jdt.ls.core.id1",
"-Dosgi.bundles.defaultStartLevel=4",
"-Declipse.product=org.eclipse.jdt.ls.core.product",
"-Djava.import.generatesMetadataFilesAtProjectRoot=false",
"-Dfile.encoding=utf8",
"-noverify",
"-XX:+UseParallelGC",
"-XX:GCTimeRatio=4",
"-XX:AdaptiveSizePolicyWeight=90",
"-Dsun.zip.disableMemoryMapping=true",
"-Djava.lsp.joinOnCompletion=true",
"-Xmx3G",
"-Xms100m",
"-Xlog:disable",
"-Dlog.level=ALL",
f"-javaagent:{lombok_jar_path}",
f"-Djdt.core.sharedIndexLocation={shared_cache_location}",
"-jar",
f"{jdtls_launcher_jar}",
"-configuration",
f"{jdtls_config_path}",
"-data",
f"{data_dir}",
]
return cmd
def create_launch_command_env(self) -> dict[str, str]:
use_system_java_home = self._custom_settings.get("use_system_java_home", False)
if use_system_java_home:
system_java_home = os.environ.get("JAVA_HOME")
if system_java_home:
log.info(f"Using system JAVA_HOME for JDTLS: {system_java_home}")
return {"syntaxserver": "false", "JAVA_HOME": system_java_home}
else:
log.warning("use_system_java_home is set but JAVA_HOME is not set in environment, falling back to bundled JRE")
java_home = self.runtime_dependency_paths.jre_home_path
log.info(f"Using bundled JRE for JDTLS: {java_home}")
return {"syntaxserver": "false", "JAVA_HOME": java_home}
def _get_initialize_params(self, repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize parameters for the EclipseJDTLS server.
"""
# Look into https://github.com/eclipse/eclipse.jdt.ls/blob/master/org.eclipse.jdt.ls.core/src/org/eclipse/jdt/ls/core/internal/preferences/Preferences.java to understand all the options available
if not os.path.isabs(repository_absolute_path):
repository_absolute_path = os.path.abspath(repository_absolute_path)
repo_uri = pathlib.Path(repository_absolute_path).as_uri()
# Load user's Maven and Gradle configuration paths from ls_specific_settings["java"]
# Maven settings: default to ~/.m2/settings.xml
default_maven_settings_path = os.path.join(os.path.expanduser("~"), ".m2", "settings.xml")
custom_maven_settings_path = self._custom_settings.get("maven_user_settings")
if custom_maven_settings_path is not None:
# User explicitly provided a path
if not os.path.exists(custom_maven_settings_path):
error_msg = (
f"Provided maven settings file not found: {custom_maven_settings_path}. "
f"Fix: create the file, update path in ~/.serena/serena_config.yml (ls_specific_settings -> java -> maven_user_settings), "
f"or remove the setting to use default ({default_maven_settings_path})"
)
log.error(error_msg)
raise FileNotFoundError(error_msg)
maven_settings_path = custom_maven_settings_path
log.info(f"Using Maven settings from custom location: {maven_settings_path}")
elif os.path.exists(default_maven_settings_path):
maven_settings_path = default_maven_settings_path
log.info(f"Using Maven settings from default location: {maven_settings_path}")
else:
maven_settings_path = None
log.info(f"Maven settings not found at default location ({default_maven_settings_path}), will use JDTLS defaults")
# Gradle user home: default to ~/.gradle
default_gradle_home = os.path.join(os.path.expanduser("~"), ".gradle")
custom_gradle_home = self._custom_settings.get("gradle_user_home")
if custom_gradle_home is not None:
# User explicitly provided a path
if not os.path.exists(custom_gradle_home):
error_msg = (
f"Gradle user home directory not found: {custom_gradle_home}. "
f"Fix: create the directory, update path in ~/.serena/serena_config.yml (ls_specific_settings -> java -> gradle_user_home), "
f"or remove the setting to use default (~/.gradle)"
)
log.error(error_msg)
raise FileNotFoundError(error_msg)
gradle_user_home = custom_gradle_home
log.info(f"Using Gradle user home from custom location: {gradle_user_home}")
elif os.path.exists(default_gradle_home):
gradle_user_home = default_gradle_home
log.info(f"Using Gradle user home from default location: {gradle_user_home}")
else:
gradle_user_home = None
log.info(f"Gradle user home not found at default location ({default_gradle_home}), will use JDTLS defaults")
# Gradle wrapper: default to False to preserve existing behaviour
gradle_wrapper_enabled = self._custom_settings.get("gradle_wrapper_enabled", False)
log.info(
f"Gradle wrapper {'enabled' if gradle_wrapper_enabled else 'disabled'} (configurable via ls_specific_settings -> java -> gradle_wrapper_enabled)"
)
# Gradle Java home: default to None, which means the bundled JRE is used
gradle_java_home = self._custom_settings.get("gradle_java_home")
if gradle_java_home is not None:
if not os.path.exists(gradle_java_home):
error_msg = (
f"Gradle Java home not found: {gradle_java_home}. "
f"Fix: update path in ~/.serena/serena_config.yml (ls_specific_settings -> java -> gradle_java_home), "
f"or remove the setting to use the bundled JRE"
)
log.error(error_msg)
raise FileNotFoundError(error_msg)
log.info(f"Using Gradle Java home from custom location: {gradle_java_home}")
else:
log.info(f"Using bundled JRE for Gradle: {self.runtime_dependency_paths.jre_path}")
initialize_params = {
"locale": "en",
"rootPath": repository_absolute_path,
"rootUri": pathlib.Path(repository_absolute_path).as_uri(),
"capabilities": {
"workspace": {
"applyEdit": True,
"workspaceEdit": {
"documentChanges": True,
"resourceOperations": ["create", "rename", "delete"],
"failureHandling": "textOnlyTransactional",
"normalizesLineEndings": True,
"changeAnnotationSupport": {"groupsOnLabel": True},
},
"didChangeConfiguration": {"dynamicRegistration": True},
"didChangeWatchedFiles": {"dynamicRegistration": True, "relativePatternSupport": True},
"symbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"tagSupport": {"valueSet": [1]},
"resolveSupport": {"properties": ["location.range"]},
},
"codeLens": {"refreshSupport": True},
"executeCommand": {"dynamicRegistration": True},
"configuration": True,
"workspaceFolders": True,
"semanticTokens": {"refreshSupport": True},
"fileOperations": {
"dynamicRegistration": True,
"didCreate": True,
"didRename": True,
"didDelete": True,
"willCreate": True,
"willRename": True,
"willDelete": True,
},
"inlineValue": {"refreshSupport": True},
"inlayHint": {"refreshSupport": True},
"diagnostics": {"refreshSupport": True},
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": True,
"versionSupport": False,
"tagSupport": {"valueSet": [1, 2]},
"codeDescriptionSupport": True,
"dataSupport": True,
},
"synchronization": {"dynamicRegistration": True, "willSave": True, "willSaveWaitUntil": True, "didSave": True},
# TODO: we have an assert that completion provider is not included in the capabilities at server startup
# Removing this will cause the assert to fail. Investigate why this is the case, simplify config
"completion": {
"dynamicRegistration": True,
"contextSupport": True,
"completionItem": {
"snippetSupport": False,
"commitCharactersSupport": True,
"documentationFormat": ["markdown", "plaintext"],
"deprecatedSupport": True,
"preselectSupport": True,
"tagSupport": {"valueSet": [1]},
"insertReplaceSupport": False,
"resolveSupport": {"properties": ["documentation", "detail", "additionalTextEdits"]},
"insertTextModeSupport": {"valueSet": [1, 2]},
"labelDetailsSupport": True,
},
"insertTextMode": 2,
"completionItemKind": {
"valueSet": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
},
"completionList": {"itemDefaults": ["commitCharacters", "editRange", "insertTextFormat", "insertTextMode"]},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {
"dynamicRegistration": True,
"signatureInformation": {
"documentationFormat": ["markdown", "plaintext"],
"parameterInformation": {"labelOffsetSupport": True},
"activeParameterSupport": True,
},
},
"definition": {"dynamicRegistration": True, "linkSupport": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"hierarchicalDocumentSymbolSupport": True,
"tagSupport": {"valueSet": [1]},
"labelSupport": True,
},
"rename": {
"dynamicRegistration": True,
"prepareSupport": True,
"prepareSupportDefaultBehavior": 1,
"honorsChangeAnnotations": True,
},
"documentLink": {"dynamicRegistration": True, "tooltipSupport": True},
"typeDefinition": {"dynamicRegistration": True, "linkSupport": True},
"implementation": {"dynamicRegistration": True, "linkSupport": True},
"colorProvider": {"dynamicRegistration": True},
"declaration": {"dynamicRegistration": True, "linkSupport": True},
"selectionRange": {"dynamicRegistration": True},
"callHierarchy": {"dynamicRegistration": True},
"semanticTokens": {
"dynamicRegistration": True,
"tokenTypes": [
"namespace",
"type",
"class",
"enum",
"interface",
"struct",
"typeParameter",
"parameter",
"variable",
"property",
"enumMember",
"event",
"function",
"method",
"macro",
"keyword",
"modifier",
"comment",
"string",
"number",
"regexp",
"operator",
"decorator",
],
"tokenModifiers": [
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary",
],
"formats": ["relative"],
"requests": {"range": True, "full": {"delta": True}},
"multilineTokenSupport": False,
"overlappingTokenSupport": False,
"serverCancelSupport": True,
"augmentsSyntaxTokens": True,
},
"typeHierarchy": {"dynamicRegistration": True},
"inlineValue": {"dynamicRegistration": True},
"diagnostic": {"dynamicRegistration": True, "relatedDocumentSupport": False},
},
"general": {
"staleRequestSupport": {
"cancel": True,
"retryOnContentModified": [
"textDocument/semanticTokens/full",
"textDocument/semanticTokens/range",
"textDocument/semanticTokens/full/delta",
],
},
"regularExpressions": {"engine": "ECMAScript", "version": "ES2020"},
"positionEncodings": ["utf-16"],
},
"notebookDocument": {"synchronization": {"dynamicRegistration": True, "executionSummarySupport": True}},
},
"initializationOptions": {
"bundles": ["intellicode-core.jar"],
"settings": {
"java": {
"home": None,
"jdt": {
"ls": {
"java": {"home": None},
"vmargs": "-XX:+UseParallelGC -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90 -Dsun.zip.disableMemoryMapping=true -Xmx1G -Xms100m -Xlog:disable",
"lombokSupport": {"enabled": True},
"protobufSupport": {"enabled": True},
"androidSupport": {"enabled": True},
}
},
"errors": {"incompleteClasspath": {"severity": "error"}},
"configuration": {
"checkProjectSettingsExclusions": False,
"updateBuildConfiguration": "interactive",
"maven": {
"userSettings": maven_settings_path,
"globalSettings": None,
"notCoveredPluginExecutionSeverity": "warning",
"defaultMojoExecutionAction": "ignore",
},
"workspaceCacheLimit": 90,
"runtimes": [
{"name": "JavaSE-21", "path": "static/vscode-java/extension/jre/21.0.7-linux-x86_64", "default": True}
],
},
"trace": {"server": "verbose"},
"import": {
"maven": {
"enabled": True,
"offline": {"enabled": False},
"disableTestClasspathFlag": False,
},
"gradle": {
"enabled": True,
"wrapper": {"enabled": gradle_wrapper_enabled},
"version": None,
"home": "abs(static/gradle-7.3.3)",
"offline": {"enabled": False},
"arguments": None,
"jvmArguments": None,
"user": {"home": gradle_user_home},
"annotationProcessing": {"enabled": True},
},
"exclusions": [
"**/node_modules/**",
"**/.metadata/**",
"**/archetype-resources/**",
"**/META-INF/maven/**",
],
"generatesMetadataFilesAtProjectRoot": False,
},
# Set updateSnapshots to False to improve performance and avoid unnecessary network calls
# Snapshots will only be updated when explicitly requested by the user
"maven": {"downloadSources": True, "updateSnapshots": False},
"eclipse": {"downloadSources": True},
"signatureHelp": {"enabled": True, "description": {"enabled": True}},
"hover": {"javadoc": {"enabled": True}},
"implementationsCodeLens": {"enabled": True},
"format": {
"enabled": True,
"settings": {"url": None, "profile": None},
"comments": {"enabled": True},
"onType": {"enabled": True},
"insertSpaces": True,
"tabSize": 4,
},
"saveActions": {"organizeImports": False},
"project": {
"referencedLibraries": ["lib/**/*.jar"],
"importOnFirstTimeStartup": "automatic",
"importHint": True,
"resourceFilters": ["node_modules", "\\.git"],
"encoding": "ignore",
"exportJar": {"targetPath": "${workspaceFolder}/${workspaceFolderBasename}.jar"},
},
"contentProvider": {"preferred": None},
"autobuild": {"enabled": True},
"maxConcurrentBuilds": 1,
"selectionRange": {"enabled": True},
"showBuildStatusOnStart": {"enabled": "notification"},
"server": {"launchMode": "Standard"},
"sources": {"organizeImports": {"starThreshold": 99, "staticStarThreshold": 99}},
"imports": {"gradle": {"wrapper": {"checksums": []}}},
"templates": {"fileHeader": [], "typeComment": []},
"references": {"includeAccessors": True, "includeDecompiledSources": True},
"typeHierarchy": {"lazyLoad": False},
"settings": {"url": None},
"symbols": {"includeSourceMethodDeclarations": False},
"inlayHints": {"parameterNames": {"enabled": "literals", "exclusions": []}},
"codeAction": {"sortMembers": {"avoidVolatileChanges": True}},
"compile": {
"nullAnalysis": {
"nonnull": [
"javax.annotation.Nonnull",
"org.eclipse.jdt.annotation.NonNull",
"org.springframework.lang.NonNull",
],
"nullable": [
"javax.annotation.Nullable",
"org.eclipse.jdt.annotation.Nullable",
"org.springframework.lang.Nullable",
],
"mode": "automatic",
}
},
"sharedIndexes": {"enabled": "auto", "location": ""},
"silentNotification": False,
"dependency": {
"showMembers": False,
"syncWithFolderExplorer": True,
"autoRefresh": True,
"refreshDelay": 2000,
"packagePresentation": "flat",
},
"help": {"firstView": "auto", "showReleaseNotes": True, "collectErrorLog": False},
"test": {"defaultConfig": "", "config": {}},
}
},
},
"trace": "verbose",
"processId": os.getpid(),
"workspaceFolders": [
{
"uri": repo_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
initialize_params["initializationOptions"]["workspaceFolders"] = [repo_uri] # type: ignore
bundles = [self.runtime_dependency_paths.intellicode_jar_path]
initialize_params["initializationOptions"]["bundles"] = bundles # type: ignore
initialize_params["initializationOptions"]["settings"]["java"]["configuration"]["runtimes"] = [ # type: ignore
{"name": "JavaSE-21", "path": self.runtime_dependency_paths.jre_home_path, "default": True}
]
for runtime in initialize_params["initializationOptions"]["settings"]["java"]["configuration"]["runtimes"]: # type: ignore
assert "name" in runtime
assert "path" in runtime
assert os.path.exists(runtime["path"]), f"Runtime required for eclipse_jdtls at path {runtime['path']} does not exist"
gradle_settings = initialize_params["initializationOptions"]["settings"]["java"]["import"]["gradle"] # type: ignore
gradle_settings["home"] = self.runtime_dependency_paths.gradle_path
gradle_settings["java"] = {"home": gradle_java_home if gradle_java_home is not None else self.runtime_dependency_paths.jre_path}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Starts the Eclipse JDTLS Language Server
"""
def register_capability_handler(params: dict) -> None:
assert "registrations" in params
for registration in params["registrations"]:
if registration["method"] == "textDocument/completion":
assert registration["registerOptions"]["resolveProvider"] == True
assert registration["registerOptions"]["triggerCharacters"] == [
".",
"@",
"#",
"*",
" ",
]
if registration["method"] == "workspace/executeCommand":
if "java.intellicode.enable" in registration["registerOptions"]["commands"]:
self._intellicode_enable_command_available.set()
return
def lang_status_handler(params: dict) -> None:
log.info("Language status update: %s", params)
if params["type"] == "ServiceReady" and params["message"] == "ServiceReady":
self._service_ready_event.set()
if params["type"] == "ProjectStatus":
if params["message"] == "OK":
self._project_ready_event.set()
def execute_client_command_handler(params: dict) -> list:
assert params["command"] == "_java.reloadBundles.command"
assert params["arguments"] == []
return []
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
def do_nothing(params: dict) -> None:
return
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("language/status", lang_status_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("language/actionableNotification", do_nothing)
log.info("Starting EclipseJDTLS server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
assert init_response["capabilities"]["textDocumentSync"]["change"] == 2 # type: ignore
assert "completionProvider" not in init_response["capabilities"]
assert "executeCommandProvider" not in init_response["capabilities"]
self.server.notify.initialized({})
self.server.notify.workspace_did_change_configuration({"settings": initialize_params["initializationOptions"]["settings"]}) # type: ignore
self._intellicode_enable_command_available.wait()
java_intellisense_members_path = self.runtime_dependency_paths.intellisense_members_path
assert os.path.exists(java_intellisense_members_path)
intellicode_enable_result = self.server.send.execute_command(
{
"command": "java.intellicode.enable",
"arguments": [True, java_intellisense_members_path],
}
)
assert intellicode_enable_result
if not self._service_ready_event.is_set():
log.info("Waiting for service to be ready ...")
self._service_ready_event.wait()
log.info("Service is ready")
if not self._project_ready_event.is_set():
log.info("Waiting for project to be ready ...")
project_ready_timeout = 20 # Hotfix: Using timeout until we figure out why sometimes we don't get the project ready event
if self._project_ready_event.wait(timeout=project_ready_timeout):
log.info("Project is ready")
else:
log.warning("Did not receive project ready status within %d seconds; proceeding anyway", project_ready_timeout)
else:
log.info("Project is ready")
log.info("Startup complete")
@override
def _request_hover(self, file_buffer: LSPFileBuffer, line: int, column: int) -> ls_types.Hover | None:
# Eclipse JDTLS lazily loads javadocs on first hover request, then caches them.
# This means the first request often returns incomplete info (just the signature),
# while subsequent requests return the full javadoc.
#
# The response format also differs based on javadoc presence:
# - contents: list[...] when javadoc IS present (preferred, richer format)
# - contents: {value: info} when javadoc is NOT present
#
# There's no LSP signal for "javadoc fully loaded" and no way to request
# hover with "wait for complete info". The retry approach is the only viable
# workaround - we keep requesting until we get the richer list format or
# the content stops growing.
#
# The file is kept open by the caller (request_hover), so retries are cheap
# and don't cause repeated didOpen/didClose cycles.
def content_score(result: ls_types.Hover | None) -> tuple[int, int]:
"""Return (format_priority, length) for comparison. Higher is better."""
if result is None:
return (0, 0)
contents = result["contents"]
if isinstance(contents, list):
return (2, len(contents)) # List format (has javadoc) is best
elif isinstance(contents, dict):
return (1, len(contents.get("value", "")))
else:
return (1, len(contents))
max_retries = 5
best_result = super()._request_hover(file_buffer, line, column)
best_score = content_score(best_result)
for _ in range(max_retries):
sleep(0.05)
new_result = super()._request_hover(file_buffer, line, column)
new_score = content_score(new_result)
if new_score > best_score:
best_result = new_result
best_score = new_score
return best_result
def _request_document_symbols(
self, relative_file_path: str, file_data: LSPFileBuffer | None
) -> list[SymbolInformation] | list[DocumentSymbol] | None:
result = super()._request_document_symbols(relative_file_path, file_data=file_data)
if result is None:
return None
# JDTLS sometimes returns symbol names with type information to handle overloads,
# e.g. "myMethod(int) <T>", but we want overloads to be handled via overload_idx,
# which requires the name to be just "myMethod".
def fix_name(symbol: SymbolInformation | DocumentSymbol | UnifiedSymbolInformation) -> None:
if "(" in symbol["name"]:
symbol["name"] = symbol["name"][: symbol["name"].index("(")]
children = symbol.get("children")
if children:
for child in children: # type: ignore
fix_name(child)
for root_symbol in result:
fix_name(root_symbol)
return result
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/eclipse_jdtls.py",
"license": "MIT License",
"lines": 841,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/elixir_tools/elixir_tools.py | import logging
import os
import pathlib
import stat
import subprocess
import threading
from typing import Any, cast
from overrides import override
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_utils import FileUtils, PlatformId, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
from ..common import RuntimeDependency
log = logging.getLogger(__name__)
class ElixirTools(SolidLanguageServer):
"""
Provides Elixir specific instantiation of the LanguageServer class using Expert, the official Elixir language server.
"""
@override
def _get_wait_time_for_cross_file_referencing(self) -> float:
return 10.0 # Elixir projects need time to compile and index before cross-file references work
@override
def is_ignored_dirname(self, dirname: str) -> bool:
# For Elixir projects, we should ignore:
# - _build: compiled artifacts
# - deps: dependencies
# - node_modules: if the project has JavaScript components
# - .elixir_ls: ElixirLS artifacts (in case both are present)
# - cover: coverage reports
# - .expert: Expert artifacts
return super().is_ignored_dirname(dirname) or dirname in ["_build", "deps", "node_modules", ".elixir_ls", ".expert", "cover"]
@override
def is_ignored_path(self, relative_path: str, ignore_unsupported_files: bool = True) -> bool:
"""Check if a path should be ignored for symbol indexing."""
if relative_path.endswith("mix.exs"):
# These are project configuration files, not source code with symbols to index
return True
return super().is_ignored_path(relative_path, ignore_unsupported_files)
@classmethod
def _get_elixir_version(cls) -> str | None:
"""Get the installed Elixir version or None if not found."""
try:
result = subprocess.run(["elixir", "--version"], capture_output=True, text=True, check=False)
if result.returncode == 0:
return result.stdout.strip()
except FileNotFoundError:
return None
return None
@classmethod
def _setup_runtime_dependencies(cls, config: LanguageServerConfig, solidlsp_settings: SolidLSPSettings) -> str:
"""
Setup runtime dependencies for Expert.
Downloads the Expert binary for the current platform and returns the path to the executable.
"""
# Check if Elixir is available first
elixir_version = cls._get_elixir_version()
if not elixir_version:
raise RuntimeError(
"Elixir is not installed. Please install Elixir from https://elixir-lang.org/install.html and make sure it is added to your PATH."
)
log.info(f"Found Elixir: {elixir_version}")
# First, check if expert is already in PATH (user may have installed it manually)
import shutil
expert_in_path = shutil.which("expert")
if expert_in_path:
log.info(f"Found Expert in PATH: {expert_in_path}")
return expert_in_path
platform_id = PlatformUtils.get_platform_id()
valid_platforms = [
PlatformId.LINUX_x64,
PlatformId.LINUX_arm64,
PlatformId.OSX_x64,
PlatformId.OSX_arm64,
PlatformId.WIN_x64,
PlatformId.WIN_arm64,
]
assert platform_id in valid_platforms, f"Platform {platform_id} is not supported for Expert at the moment"
expert_dir = os.path.join(cls.ls_resources_dir(solidlsp_settings), "expert")
EXPERT_VERSION = "nightly"
# Define runtime dependencies inline
runtime_deps = {
PlatformId.LINUX_x64: RuntimeDependency(
id="expert_linux_amd64",
platform_id="linux-x64",
url=f"https://github.com/elixir-lang/expert/releases/download/{EXPERT_VERSION}/expert_linux_amd64",
archive_type="binary",
binary_name="expert_linux_amd64",
extract_path="expert",
),
PlatformId.LINUX_arm64: RuntimeDependency(
id="expert_linux_arm64",
platform_id="linux-arm64",
url=f"https://github.com/elixir-lang/expert/releases/download/{EXPERT_VERSION}/expert_linux_arm64",
archive_type="binary",
binary_name="expert_linux_arm64",
extract_path="expert",
),
PlatformId.OSX_x64: RuntimeDependency(
id="expert_darwin_amd64",
platform_id="osx-x64",
url=f"https://github.com/elixir-lang/expert/releases/download/{EXPERT_VERSION}/expert_darwin_amd64",
archive_type="binary",
binary_name="expert_darwin_amd64",
extract_path="expert",
),
PlatformId.OSX_arm64: RuntimeDependency(
id="expert_darwin_arm64",
platform_id="osx-arm64",
url=f"https://github.com/elixir-lang/expert/releases/download/{EXPERT_VERSION}/expert_darwin_arm64",
archive_type="binary",
binary_name="expert_darwin_arm64",
extract_path="expert",
),
PlatformId.WIN_x64: RuntimeDependency(
id="expert_windows_amd64",
platform_id="win-x64",
url=f"https://github.com/elixir-lang/expert/releases/download/{EXPERT_VERSION}/expert_windows_amd64.exe",
archive_type="binary",
binary_name="expert_windows_amd64.exe",
extract_path="expert.exe",
),
PlatformId.WIN_arm64: RuntimeDependency(
id="expert_windows_arm64",
platform_id="win-arm64",
url=f"https://github.com/elixir-lang/expert/releases/download/{EXPERT_VERSION}/expert_windows_arm64.exe",
archive_type="binary",
binary_name="expert_windows_arm64.exe",
extract_path="expert.exe",
),
}
dependency = runtime_deps[platform_id]
# On Windows, use .exe extension
executable_name = "expert.exe" if platform_id.value.startswith("win") else "expert"
executable_path = os.path.join(expert_dir, executable_name)
assert dependency.binary_name is not None
binary_path = os.path.join(expert_dir, dependency.binary_name)
if not os.path.exists(executable_path):
log.info(f"Downloading Expert binary from {dependency.url}")
assert dependency.url is not None
FileUtils.download_file(dependency.url, binary_path)
# Make the binary executable on Unix-like systems
if not platform_id.value.startswith("win"):
os.chmod(binary_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
# Create a symlink with the expected name on Unix-like systems
if binary_path != executable_path and not platform_id.value.startswith("win"):
if os.path.exists(executable_path):
os.remove(executable_path)
os.symlink(os.path.basename(binary_path), executable_path)
assert os.path.exists(executable_path), f"Expert executable not found at {executable_path}"
log.info(f"Expert binary ready at: {executable_path}")
return executable_path
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
expert_executable_path = self._setup_runtime_dependencies(config, solidlsp_settings)
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=f"{expert_executable_path} --stdio", cwd=repository_root_path),
"elixir",
solidlsp_settings,
)
self.server_ready = threading.Event()
self.request_id = 0
# Set generous timeout for Expert which can be slow to initialize and respond
self.set_request_timeout(180.0)
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Expert Language Server.
"""
# Ensure the path is absolute
abs_path = os.path.abspath(repository_absolute_path)
root_uri = pathlib.Path(abs_path).as_uri()
initialize_params = {
"processId": os.getpid(),
"locale": "en",
"rootPath": abs_path,
"rootUri": root_uri,
"initializationOptions": {
"mix_env": "dev",
"mix_target": "host",
"experimental": {"completions": {"enable": False}},
"extensions": {"credo": {"enable": True, "cli_options": []}},
},
"capabilities": {
"textDocument": {
"synchronization": {"didSave": True, "dynamicRegistration": True},
"completion": {
"dynamicRegistration": True,
"completionItem": {"snippetSupport": True, "documentationFormat": ["markdown", "plaintext"]},
},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"formatting": {"dynamicRegistration": True},
"codeAction": {
"dynamicRegistration": True,
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": [
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports",
]
}
},
},
},
"workspace": {
"workspaceFolders": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"executeCommand": {"dynamicRegistration": True},
},
"window": {
"showMessage": {"messageActionItem": {"additionalPropertiesSupport": True}},
"showDocument": {"support": True},
"workDoneProgress": True,
},
},
"workspaceFolders": [{"uri": root_uri, "name": os.path.basename(repository_absolute_path)}],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""Start Expert server process"""
def register_capability_handler(params: Any) -> None:
log.debug(f"LSP: client/registerCapability: {params}")
return
def window_log_message(msg: Any) -> None:
"""Handle window/logMessage notifications from Expert"""
message_type = msg.get("type", 4) # 1=Error, 2=Warning, 3=Info, 4=Log
message_text = msg.get("message", "")
# Log at appropriate level based on message type
if message_type == 1:
log.error(f"Expert: {message_text}")
elif message_type == 2:
log.warning(f"Expert: {message_text}")
else:
log.debug(f"Expert: {message_text}")
def check_server_ready(params: Any) -> None:
"""
Handle $/progress notifications from Expert.
Expert sends progress updates during compilation and indexing.
The server is considered ready when project build completes.
"""
value = params.get("value", {})
kind = value.get("kind", "")
title = value.get("title", "")
if kind == "begin":
# Track when building the project starts (not "Building engine")
if title.startswith("Building ") and not title.startswith("Building engine"):
self._building_project = True
elif kind == "end":
# Project build completion is the main readiness signal
if getattr(self, "_building_project", False):
log.debug("Expert project build completed - server is ready")
self._building_project = False
self.server_ready.set()
def work_done_progress_create(params: Any) -> None:
"""Handle window/workDoneProgress/create requests from Expert."""
return
def publish_diagnostics(params: Any) -> None:
"""Handle textDocument/publishDiagnostics notifications."""
return
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_notification("$/progress", check_server_ready)
self.server.on_request("window/workDoneProgress/create", work_done_progress_create)
self.server.on_notification("textDocument/publishDiagnostics", publish_diagnostics)
log.debug("Starting Expert server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.debug("Sending initialize request to Expert")
init_response = self.server.send.initialize(initialize_params)
# Verify basic server capabilities
assert "textDocumentSync" in init_response["capabilities"], f"Missing textDocumentSync in {init_response['capabilities']}"
self.server.notify.initialized({})
# Expert needs time to compile the project and build indexes on first run.
# This can take 2-3+ minutes for mid-sized codebases.
# After the first run, subsequent startups are much faster.
ready_timeout = 300.0 # 5 minutes
log.debug(f"Waiting up to {ready_timeout}s for Expert to compile and index...")
if self.server_ready.wait(timeout=ready_timeout):
log.debug("Expert is ready for requests")
else:
log.warning(f"Expert did not signal readiness within {ready_timeout}s. Proceeding with requests anyway.")
self.server_ready.set() # Mark as ready anyway to allow requests
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/elixir_tools/elixir_tools.py",
"license": "MIT License",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/jedi_server.py | """
Provides Python specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Python.
"""
import logging
import os
import pathlib
import threading
from typing import cast
from overrides import override
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
class JediServer(SolidLanguageServer):
"""
Provides Python specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Python.
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a JediServer instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd="jedi-language-server", cwd=repository_root_path),
"python",
solidlsp_settings,
)
@override
def is_ignored_dirname(self, dirname: str) -> bool:
return super().is_ignored_dirname(dirname) or dirname in ["venv", "__pycache__"]
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Jedi Language Server.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"processId": os.getpid(),
"clientInfo": {"name": "Serena", "version": "0.1.0"},
"locale": "en",
"rootPath": repository_absolute_path,
"rootUri": root_uri,
# Note: this is not necessarily the minimal set of capabilities...
"capabilities": {
"workspace": {
"applyEdit": True,
"workspaceEdit": {
"documentChanges": True,
"resourceOperations": ["create", "rename", "delete"],
"failureHandling": "textOnlyTransactional",
"normalizesLineEndings": True,
"changeAnnotationSupport": {"groupsOnLabel": True},
},
"configuration": True,
"didChangeWatchedFiles": {"dynamicRegistration": True, "relativePatternSupport": True},
"symbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"tagSupport": {"valueSet": [1]},
"resolveSupport": {"properties": ["location.range"]},
},
"workspaceFolders": True,
"fileOperations": {
"dynamicRegistration": True,
"didCreate": True,
"didRename": True,
"didDelete": True,
"willCreate": True,
"willRename": True,
"willDelete": True,
},
"inlineValue": {"refreshSupport": True},
"inlayHint": {"refreshSupport": True},
"diagnostics": {"refreshSupport": True},
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": True,
"versionSupport": False,
"tagSupport": {"valueSet": [1, 2]},
"codeDescriptionSupport": True,
"dataSupport": True,
},
"synchronization": {"dynamicRegistration": True, "willSave": True, "willSaveWaitUntil": True, "didSave": True},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {
"dynamicRegistration": True,
"signatureInformation": {
"documentationFormat": ["markdown", "plaintext"],
"parameterInformation": {"labelOffsetSupport": True},
"activeParameterSupport": True,
},
"contextSupport": True,
},
"definition": {"dynamicRegistration": True, "linkSupport": True},
"references": {"dynamicRegistration": True},
"documentHighlight": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"hierarchicalDocumentSymbolSupport": True,
"tagSupport": {"valueSet": [1]},
"labelSupport": True,
},
"documentLink": {"dynamicRegistration": True, "tooltipSupport": True},
"typeDefinition": {"dynamicRegistration": True, "linkSupport": True},
"implementation": {"dynamicRegistration": True, "linkSupport": True},
"declaration": {"dynamicRegistration": True, "linkSupport": True},
"selectionRange": {"dynamicRegistration": True},
"callHierarchy": {"dynamicRegistration": True},
"linkedEditingRange": {"dynamicRegistration": True},
"typeHierarchy": {"dynamicRegistration": True},
"inlineValue": {"dynamicRegistration": True},
"inlayHint": {
"dynamicRegistration": True,
"resolveSupport": {"properties": ["tooltip", "textEdits", "label.tooltip", "label.location", "label.command"]},
},
"diagnostic": {"dynamicRegistration": True, "relatedDocumentSupport": False},
},
"notebookDocument": {"synchronization": {"dynamicRegistration": True, "executionSummarySupport": True}},
"experimental": {
"serverStatusNotification": True,
"openServerLogs": True,
},
},
# See https://github.com/pappasam/jedi-language-server?tab=readme-ov-file
# We use the default options except for maxSymbols, where 0 means no limit
"initializationOptions": {
"workspace": {
"symbols": {"ignoreFolders": [".nox", ".tox", ".venv", "__pycache__", "venv"], "maxSymbols": 0},
},
},
"trace": "verbose",
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Starts the JEDI Language Server
"""
completions_available = threading.Event()
def execute_client_command_handler(params: dict) -> list:
return []
def do_nothing(params: dict) -> None:
return
def check_experimental_status(params: dict) -> None:
if params["quiescent"] == True:
completions_available.set()
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
self.server.on_request("client/registerCapability", do_nothing)
self.server.on_notification("language/status", do_nothing)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("language/actionableNotification", do_nothing)
self.server.on_notification("experimental/serverStatus", check_experimental_status)
log.info("Starting jedi-language-server server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
assert init_response["capabilities"]["textDocumentSync"]["change"] == 2 # type: ignore
assert "completionProvider" in init_response["capabilities"]
assert init_response["capabilities"]["completionProvider"] == {
"triggerCharacters": [".", "'", '"'],
"resolveProvider": True,
}
self.server.notify.initialized({})
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/jedi_server.py",
"license": "MIT License",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/solidlsp/language_servers/kotlin_language_server.py | """
Provides Kotlin specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Kotlin.
You can configure the following options in ls_specific_settings (in serena_config.yml):
ls_specific_settings:
kotlin:
ls_path: '/path/to/kotlin-lsp.sh' # Custom path to Kotlin Language Server executable
kotlin_lsp_version: '261.13587.0' # Kotlin Language Server version (default: current bundled version)
jvm_options: '-Xmx2G' # JVM options for Kotlin Language Server (default: -Xmx2G)
Example configuration for large projects:
ls_specific_settings:
kotlin:
jvm_options: '-Xmx4G -XX:+UseG1GC'
"""
import logging
import os
import pathlib
import stat
import threading
from typing import cast
from overrides import override
from solidlsp.ls import (
LanguageServerDependencyProvider,
LanguageServerDependencyProviderSinglePath,
SolidLanguageServer,
)
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_utils import FileUtils, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
# Default JVM options for Kotlin Language Server
# -Xmx2G: 2GB heap is sufficient for most projects; override via ls_specific_settings for large codebases
DEFAULT_KOTLIN_JVM_OPTIONS = "-Xmx2G"
# Default Kotlin Language Server version (can be overridden via ls_specific_settings)
DEFAULT_KOTLIN_LSP_VERSION = "261.13587.0"
# Platform-specific Kotlin LSP download suffixes
PLATFORM_KOTLIN_SUFFIX = {
"win-x64": "win-x64",
"linux-x64": "linux-x64",
"linux-arm64": "linux-aarch64",
"osx-x64": "mac-x64",
"osx-arm64": "mac-aarch64",
}
class KotlinLanguageServer(SolidLanguageServer):
"""
Provides Kotlin specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Kotlin.
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a Kotlin Language Server instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
super().__init__(
config,
repository_root_path,
None,
"kotlin",
solidlsp_settings,
)
# Indexing synchronisation: starts SET (= already done), cleared if the server
# sends window/workDoneProgress/create (async-indexing servers like KLS v261+),
# set again once all progress tokens have ended.
self._indexing_complete = threading.Event()
self._indexing_complete.set()
self._active_progress_tokens: set[str] = set()
self._progress_lock = threading.Lock()
def _create_dependency_provider(self) -> LanguageServerDependencyProvider:
return self.DependencyProvider(self._custom_settings, self._ls_resources_dir)
class DependencyProvider(LanguageServerDependencyProviderSinglePath):
def __init__(self, custom_settings: SolidLSPSettings.CustomLSSettings, ls_resources_dir: str):
super().__init__(custom_settings, ls_resources_dir)
self._java_home_path: str | None = None
def _get_or_install_core_dependency(self) -> str:
"""
Setup runtime dependencies for Kotlin Language Server and return the path to the executable script.
"""
platform_id = PlatformUtils.get_platform_id()
# Verify platform support
assert (
platform_id.value.startswith("win-") or platform_id.value.startswith("linux-") or platform_id.value.startswith("osx-")
), "Only Windows, Linux and macOS platforms are supported for Kotlin in multilspy at the moment"
kotlin_suffix = PLATFORM_KOTLIN_SUFFIX.get(platform_id.value)
assert kotlin_suffix, f"Unsupported platform for Kotlin LSP: {platform_id.value}"
# Setup paths for dependencies
static_dir = os.path.join(self._ls_resources_dir, "kotlin_language_server")
os.makedirs(static_dir, exist_ok=True)
# Setup Kotlin Language Server
kotlin_script_name = "kotlin-lsp.cmd" if platform_id.value.startswith("win-") else "kotlin-lsp.sh"
kotlin_script = os.path.join(static_dir, kotlin_script_name)
if not os.path.exists(kotlin_script):
kotlin_lsp_version = self._custom_settings.get("kotlin_lsp_version", DEFAULT_KOTLIN_LSP_VERSION)
kotlin_url = f"https://download-cdn.jetbrains.com/kotlin-lsp/{kotlin_lsp_version}/kotlin-lsp-{kotlin_lsp_version}-{kotlin_suffix}.zip"
log.info("Downloading Kotlin Language Server...")
FileUtils.download_and_extract_archive(kotlin_url, static_dir, "zip")
if os.path.exists(kotlin_script) and not platform_id.value.startswith("win-"):
os.chmod(
kotlin_script,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
)
if not os.path.exists(kotlin_script):
raise FileNotFoundError(f"Kotlin Language Server script not found at {kotlin_script}")
log.info(f"Using Kotlin Language Server script at {kotlin_script}")
return kotlin_script
def _create_launch_command(self, core_path: str) -> list[str]:
return [core_path, "--stdio"]
def create_launch_command_env(self) -> dict[str, str]:
"""Provides JAVA_HOME and JVM options for the Kotlin Language Server process."""
env: dict[str, str] = {}
if self._java_home_path is not None:
env["JAVA_HOME"] = self._java_home_path
# Get JVM options from settings or use default
# Note: an explicit empty string means "no JVM options", which is distinct from not setting the key
_sentinel = object()
custom_jvm_options = self._custom_settings.get("jvm_options", _sentinel)
if custom_jvm_options is not _sentinel:
jvm_options = custom_jvm_options
else:
jvm_options = DEFAULT_KOTLIN_JVM_OPTIONS
env["JAVA_TOOL_OPTIONS"] = jvm_options
return env
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Kotlin Language Server.
"""
if not os.path.isabs(repository_absolute_path):
repository_absolute_path = os.path.abspath(repository_absolute_path)
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"clientInfo": {"name": "Multilspy Kotlin Client", "version": "1.0.0"},
"locale": "en",
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"capabilities": {
"workspace": {
"applyEdit": True,
"workspaceEdit": {
"documentChanges": True,
"resourceOperations": ["create", "rename", "delete"],
"failureHandling": "textOnlyTransactional",
"normalizesLineEndings": True,
"changeAnnotationSupport": {"groupsOnLabel": True},
},
"didChangeConfiguration": {"dynamicRegistration": True},
"didChangeWatchedFiles": {"dynamicRegistration": True, "relativePatternSupport": True},
"symbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"tagSupport": {"valueSet": [1]},
"resolveSupport": {"properties": ["location.range"]},
},
"codeLens": {"refreshSupport": True},
"executeCommand": {"dynamicRegistration": True},
"configuration": True,
"workspaceFolders": True,
"semanticTokens": {"refreshSupport": True},
"fileOperations": {
"dynamicRegistration": True,
"didCreate": True,
"didRename": True,
"didDelete": True,
"willCreate": True,
"willRename": True,
"willDelete": True,
},
"inlineValue": {"refreshSupport": True},
"inlayHint": {"refreshSupport": True},
"diagnostics": {"refreshSupport": True},
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": True,
"versionSupport": False,
"tagSupport": {"valueSet": [1, 2]},
"codeDescriptionSupport": True,
"dataSupport": True,
},
"synchronization": {"dynamicRegistration": True, "willSave": True, "willSaveWaitUntil": True, "didSave": True},
"completion": {
"dynamicRegistration": True,
"contextSupport": True,
"completionItem": {
"snippetSupport": False,
"commitCharactersSupport": True,
"documentationFormat": ["markdown", "plaintext"],
"deprecatedSupport": True,
"preselectSupport": True,
"tagSupport": {"valueSet": [1]},
"insertReplaceSupport": False,
"resolveSupport": {"properties": ["documentation", "detail", "additionalTextEdits"]},
"insertTextModeSupport": {"valueSet": [1, 2]},
"labelDetailsSupport": True,
},
"insertTextMode": 2,
"completionItemKind": {
"valueSet": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
},
"completionList": {"itemDefaults": ["commitCharacters", "editRange", "insertTextFormat", "insertTextMode"]},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {
"dynamicRegistration": True,
"signatureInformation": {
"documentationFormat": ["markdown", "plaintext"],
"parameterInformation": {"labelOffsetSupport": True},
"activeParameterSupport": True,
},
"contextSupport": True,
},
"definition": {"dynamicRegistration": True, "linkSupport": True},
"references": {"dynamicRegistration": True},
"documentHighlight": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"hierarchicalDocumentSymbolSupport": True,
"tagSupport": {"valueSet": [1]},
"labelSupport": True,
},
"codeAction": {
"dynamicRegistration": True,
"isPreferredSupport": True,
"disabledSupport": True,
"dataSupport": True,
"resolveSupport": {"properties": ["edit"]},
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": [
"",
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports",
]
}
},
"honorsChangeAnnotations": False,
},
"codeLens": {"dynamicRegistration": True},
"formatting": {"dynamicRegistration": True},
"rangeFormatting": {"dynamicRegistration": True},
"onTypeFormatting": {"dynamicRegistration": True},
"rename": {
"dynamicRegistration": True,
"prepareSupport": True,
"prepareSupportDefaultBehavior": 1,
"honorsChangeAnnotations": True,
},
"documentLink": {"dynamicRegistration": True, "tooltipSupport": True},
"typeDefinition": {"dynamicRegistration": True, "linkSupport": True},
"implementation": {"dynamicRegistration": True, "linkSupport": True},
"colorProvider": {"dynamicRegistration": True},
"foldingRange": {
"dynamicRegistration": True,
"rangeLimit": 5000,
"lineFoldingOnly": True,
"foldingRangeKind": {"valueSet": ["comment", "imports", "region"]},
"foldingRange": {"collapsedText": False},
},
"declaration": {"dynamicRegistration": True, "linkSupport": True},
"selectionRange": {"dynamicRegistration": True},
"callHierarchy": {"dynamicRegistration": True},
"semanticTokens": {
"dynamicRegistration": True,
"tokenTypes": [
"namespace",
"type",
"class",
"enum",
"interface",
"struct",
"typeParameter",
"parameter",
"variable",
"property",
"enumMember",
"event",
"function",
"method",
"macro",
"keyword",
"modifier",
"comment",
"string",
"number",
"regexp",
"operator",
"decorator",
],
"tokenModifiers": [
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary",
],
"formats": ["relative"],
"requests": {"range": True, "full": {"delta": True}},
"multilineTokenSupport": False,
"overlappingTokenSupport": False,
"serverCancelSupport": True,
"augmentsSyntaxTokens": True,
},
"linkedEditingRange": {"dynamicRegistration": True},
"typeHierarchy": {"dynamicRegistration": True},
"inlineValue": {"dynamicRegistration": True},
"inlayHint": {
"dynamicRegistration": True,
"resolveSupport": {"properties": ["tooltip", "textEdits", "label.tooltip", "label.location", "label.command"]},
},
"diagnostic": {"dynamicRegistration": True, "relatedDocumentSupport": False},
},
"window": {
"showMessage": {"messageActionItem": {"additionalPropertiesSupport": True}},
"showDocument": {"support": True},
"workDoneProgress": True,
},
"general": {
"staleRequestSupport": {
"cancel": True,
"retryOnContentModified": [
"textDocument/semanticTokens/full",
"textDocument/semanticTokens/range",
"textDocument/semanticTokens/full/delta",
],
},
"regularExpressions": {"engine": "ECMAScript", "version": "ES2020"},
"markdown": {"parser": "marked", "version": "1.1.0"},
"positionEncodings": ["utf-16"],
},
"notebookDocument": {"synchronization": {"dynamicRegistration": True, "executionSummarySupport": True}},
},
"initializationOptions": {
"workspaceFolders": [root_uri],
"storagePath": None,
"codegen": {"enabled": False},
"compiler": {"jvm": {"target": "default"}},
"completion": {"snippets": {"enabled": True}},
"diagnostics": {"enabled": True, "level": 4, "debounceTime": 250},
"scripts": {"enabled": True, "buildScriptsEnabled": True},
"indexing": {"enabled": True},
"externalSources": {"useKlsScheme": False, "autoConvertToKotlin": False},
"inlayHints": {"typeHints": False, "parameterHints": False, "chainedHints": False},
"formatting": {
"formatter": "ktfmt",
"ktfmt": {
"style": "google",
"indent": 4,
"maxWidth": 100,
"continuationIndent": 8,
"removeUnusedImports": True,
},
},
},
"trace": "off",
"processId": os.getpid(),
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Starts the Kotlin Language Server
"""
def execute_client_command_handler(params: dict) -> list:
return []
def do_nothing(params: dict) -> None:
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
def work_done_progress_create(params: dict) -> dict:
"""Handle window/workDoneProgress/create: the server is about to report async progress.
Clear the indexing-complete event so _start_server waits until all tokens finish.
This is triggered by newer KLS versions (261+) that index asynchronously after initialized.
Older versions (0.253.x) never send this, so _indexing_complete stays set and wait() returns instantly.
"""
token = str(params.get("token", ""))
log.debug(f"Kotlin LSP workDoneProgress/create: token={token!r}")
with self._progress_lock:
self._active_progress_tokens.add(token)
self._indexing_complete.clear()
return {}
def progress_handler(params: dict) -> None:
"""Track $/progress begin/end to detect when all async indexing work finishes."""
token = str(params.get("token", ""))
value = params.get("value", {})
kind = value.get("kind")
if kind == "begin":
title = value.get("title", "")
log.info(f"Kotlin LSP progress [{token}]: started - {title}")
with self._progress_lock:
self._active_progress_tokens.add(token)
self._indexing_complete.clear()
elif kind == "report":
pct = value.get("percentage")
msg = value.get("message", "")
pct_str = f" ({pct}%)" if pct is not None else ""
log.debug(f"Kotlin LSP progress [{token}]: {msg}{pct_str}")
elif kind == "end":
msg = value.get("message", "")
log.info(f"Kotlin LSP progress [{token}]: ended - {msg}")
with self._progress_lock:
self._active_progress_tokens.discard(token)
if not self._active_progress_tokens:
self._indexing_complete.set()
self.server.on_request("client/registerCapability", do_nothing)
self.server.on_notification("language/status", do_nothing)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_request("window/workDoneProgress/create", work_done_progress_create)
self.server.on_notification("$/progress", progress_handler)
self.server.on_notification("$/logTrace", do_nothing)
self.server.on_notification("$/cancelRequest", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("language/actionableNotification", do_nothing)
log.info("Starting Kotlin server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
capabilities = init_response["capabilities"]
assert "textDocumentSync" in capabilities, "Server must support textDocumentSync"
assert "hoverProvider" in capabilities, "Server must support hover"
assert "completionProvider" in capabilities, "Server must support code completion"
assert "signatureHelpProvider" in capabilities, "Server must support signature help"
assert "definitionProvider" in capabilities, "Server must support go to definition"
assert "referencesProvider" in capabilities, "Server must support find references"
assert "documentSymbolProvider" in capabilities, "Server must support document symbols"
assert "workspaceSymbolProvider" in capabilities, "Server must support workspace symbols"
assert "semanticTokensProvider" in capabilities, "Server must support semantic tokens"
self.server.notify.initialized({})
# Wait for any async indexing to complete.
# - Older KLS (0.253.x): indexing is synchronous inside `initialize`, no $/progress is sent,
# _indexing_complete stays SET -> wait() returns immediately.
# - Newer KLS (261+): server sends window/workDoneProgress/create after initialized,
# which clears the event; wait() blocks until all progress tokens end.
_INDEXING_TIMEOUT = 120.0
log.info("Waiting for Kotlin LSP indexing to complete (if async)...")
if self._indexing_complete.wait(timeout=_INDEXING_TIMEOUT):
log.info("Kotlin LSP ready")
else:
log.warning("Kotlin LSP did not signal indexing completion within %.0fs; proceeding anyway", _INDEXING_TIMEOUT)
@override
def _get_wait_time_for_cross_file_referencing(self) -> float:
"""Small safety buffer since we already waited for indexing to complete in _start_server."""
return 1.0
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/kotlin_language_server.py",
"license": "MIT License",
"lines": 457,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/rust_analyzer.py | """
Provides Rust specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Rust.
"""
import logging
import os
import pathlib
import platform
import shutil
import subprocess
import threading
from typing import cast
from overrides import override
from solidlsp.ls import LanguageServerDependencyProvider, LanguageServerDependencyProviderSinglePath, SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
class RustAnalyzer(SolidLanguageServer):
"""
Provides Rust specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Rust.
"""
@staticmethod
def _determine_log_level(line: str) -> int:
"""Classify rust-analyzer stderr output to avoid false-positive errors."""
line_lower = line.lower()
# Known informational/warning messages from rust-analyzer that aren't critical errors
if any(
[
"failed to find any projects in" in line_lower,
"fetchworkspaceerror" in line_lower,
]
):
return logging.DEBUG
return SolidLanguageServer._determine_log_level(line)
class DependencyProvider(LanguageServerDependencyProviderSinglePath):
@staticmethod
def _get_rustup_version() -> str | None:
"""Get installed rustup version or None if not found."""
try:
result = subprocess.run(["rustup", "--version"], capture_output=True, text=True, check=False)
if result.returncode == 0:
return result.stdout.strip()
except FileNotFoundError:
return None
return None
@staticmethod
def _get_rust_analyzer_via_rustup() -> str | None:
"""Get rust-analyzer path via rustup. Returns None if not found."""
try:
result = subprocess.run(["rustup", "which", "rust-analyzer"], capture_output=True, text=True, check=False)
if result.returncode == 0:
return result.stdout.strip()
except FileNotFoundError:
pass
return None
@staticmethod
def _ensure_rust_analyzer_installed() -> str:
"""
Ensure rust-analyzer is available.
Priority order:
1. Rustup existing installation (preferred - matches toolchain version)
2. Rustup auto-install if rustup is available (ensures correct version)
3. Common installation locations as fallback (only if rustup not available)
4. System PATH last (can pick up incompatible versions)
:return: path to rust-analyzer executable
"""
# Try rustup FIRST (preferred - avoids picking up incompatible versions from PATH)
rustup_path = RustAnalyzer.DependencyProvider._get_rust_analyzer_via_rustup()
if rustup_path:
return rustup_path
# If rustup is available but rust-analyzer not installed, auto-install it BEFORE
# checking common paths. This ensures we get the correct version matching the toolchain.
if RustAnalyzer.DependencyProvider._get_rustup_version():
result = subprocess.run(["rustup", "component", "add", "rust-analyzer"], check=False, capture_output=True, text=True)
if result.returncode == 0:
# Verify installation worked
rustup_path = RustAnalyzer.DependencyProvider._get_rust_analyzer_via_rustup()
if rustup_path:
return rustup_path
# If auto-install failed, fall through to common paths as last resort
# Determine platform-specific binary name and paths
is_windows = platform.system() == "Windows"
binary_name = "rust-analyzer.exe" if is_windows else "rust-analyzer"
# Fallback to common installation locations (only used if rustup not available)
common_paths: list[str | None] = []
if is_windows:
# Windows-specific paths
home = pathlib.Path.home()
common_paths.extend(
[
str(home / ".cargo" / "bin" / binary_name), # cargo install / rustup
str(home / "scoop" / "shims" / binary_name), # Scoop package manager
str(home / "scoop" / "apps" / "rust-analyzer" / "current" / binary_name), # Scoop direct
str(
pathlib.Path(os.environ.get("LOCALAPPDATA", "")) / "Programs" / "rust-analyzer" / binary_name
), # Standalone install
]
)
else:
# Unix-like paths (macOS, Linux)
common_paths.extend(
[
"/opt/homebrew/bin/rust-analyzer", # macOS Homebrew (Apple Silicon)
"/usr/local/bin/rust-analyzer", # macOS Homebrew (Intel) / Linux system
os.path.expanduser("~/.cargo/bin/rust-analyzer"), # cargo install
os.path.expanduser("~/.local/bin/rust-analyzer"), # User local bin
]
)
for path in common_paths:
if path and os.path.isfile(path) and os.access(path, os.X_OK):
return path
# Last resort: check system PATH (can pick up incorrect aliases, hence checked last)
path_result = shutil.which("rust-analyzer")
if path_result and os.path.isfile(path_result) and os.access(path_result, os.X_OK):
return path_result
# Provide helpful error message with all searched locations
searched = [p for p in common_paths if p]
install_instructions = [
" - Rustup: rustup component add rust-analyzer",
" - Cargo: cargo install rust-analyzer",
]
if is_windows:
install_instructions.extend(
[
" - Scoop: scoop install rust-analyzer",
" - Chocolatey: choco install rust-analyzer",
" - Standalone: Download from https://github.com/rust-lang/rust-analyzer/releases",
]
)
else:
install_instructions.extend(
[
" - Homebrew (macOS): brew install rust-analyzer",
" - System package manager (Linux): apt/dnf/pacman install rust-analyzer",
]
)
raise RuntimeError(
"rust-analyzer is not installed or not in PATH.\n"
"Searched locations:\n" + "\n".join(f" - {p}" for p in searched) + "\n"
"Please install rust-analyzer via:\n" + "\n".join(install_instructions)
)
def _get_or_install_core_dependency(self) -> str:
return self._ensure_rust_analyzer_installed()
def _create_launch_command(self, core_path: str) -> list[str]:
return [core_path]
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a RustAnalyzer instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
super().__init__(
config,
repository_root_path,
None,
"rust",
solidlsp_settings,
)
self.server_ready = threading.Event()
self.service_ready_event = threading.Event()
self.initialize_searcher_command_available = threading.Event()
self.resolve_main_method_available = threading.Event()
def _create_dependency_provider(self) -> LanguageServerDependencyProvider:
return self.DependencyProvider(self._custom_settings, self._ls_resources_dir)
@override
def is_ignored_dirname(self, dirname: str) -> bool:
return super().is_ignored_dirname(dirname) or dirname in ["target"]
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Rust Analyzer Language Server.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"clientInfo": {"name": "Visual Studio Code - Insiders", "version": "1.82.0-insider"},
"locale": "en",
"capabilities": {
"workspace": {
"applyEdit": True,
"workspaceEdit": {
"documentChanges": True,
"resourceOperations": ["create", "rename", "delete"],
"failureHandling": "textOnlyTransactional",
"normalizesLineEndings": True,
"changeAnnotationSupport": {"groupsOnLabel": True},
},
"configuration": True,
"didChangeWatchedFiles": {"dynamicRegistration": True, "relativePatternSupport": True},
"symbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"tagSupport": {"valueSet": [1]},
"resolveSupport": {"properties": ["location.range"]},
},
"codeLens": {"refreshSupport": True},
"executeCommand": {"dynamicRegistration": True},
"didChangeConfiguration": {"dynamicRegistration": True},
"workspaceFolders": True,
"semanticTokens": {"refreshSupport": True},
"fileOperations": {
"dynamicRegistration": True,
"didCreate": True,
"didRename": True,
"didDelete": True,
"willCreate": True,
"willRename": True,
"willDelete": True,
},
"inlineValue": {"refreshSupport": True},
"inlayHint": {"refreshSupport": True},
"diagnostics": {"refreshSupport": True},
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": True,
"versionSupport": False,
"tagSupport": {"valueSet": [1, 2]},
"codeDescriptionSupport": True,
"dataSupport": True,
},
"synchronization": {"dynamicRegistration": True, "willSave": True, "willSaveWaitUntil": True, "didSave": True},
"completion": {
"dynamicRegistration": True,
"contextSupport": True,
"completionItem": {
"snippetSupport": True,
"commitCharactersSupport": True,
"documentationFormat": ["markdown", "plaintext"],
"deprecatedSupport": True,
"preselectSupport": True,
"tagSupport": {"valueSet": [1]},
"insertReplaceSupport": True,
"resolveSupport": {"properties": ["documentation", "detail", "additionalTextEdits"]},
"insertTextModeSupport": {"valueSet": [1, 2]},
"labelDetailsSupport": True,
},
"insertTextMode": 2,
"completionItemKind": {
"valueSet": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
},
"completionList": {"itemDefaults": ["commitCharacters", "editRange", "insertTextFormat", "insertTextMode"]},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {
"dynamicRegistration": True,
"signatureInformation": {
"documentationFormat": ["markdown", "plaintext"],
"parameterInformation": {"labelOffsetSupport": True},
"activeParameterSupport": True,
},
"contextSupport": True,
},
"definition": {"dynamicRegistration": True, "linkSupport": True},
"references": {"dynamicRegistration": True},
"documentHighlight": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
"hierarchicalDocumentSymbolSupport": True,
"tagSupport": {"valueSet": [1]},
"labelSupport": True,
},
"codeAction": {
"dynamicRegistration": True,
"isPreferredSupport": True,
"disabledSupport": True,
"dataSupport": True,
"resolveSupport": {"properties": ["edit"]},
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": [
"",
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports",
]
}
},
"honorsChangeAnnotations": False,
},
"codeLens": {"dynamicRegistration": True},
"formatting": {"dynamicRegistration": True},
"rangeFormatting": {"dynamicRegistration": True},
"onTypeFormatting": {"dynamicRegistration": True},
"rename": {
"dynamicRegistration": True,
"prepareSupport": True,
"prepareSupportDefaultBehavior": 1,
"honorsChangeAnnotations": True,
},
"documentLink": {"dynamicRegistration": True, "tooltipSupport": True},
"typeDefinition": {"dynamicRegistration": True, "linkSupport": True},
"implementation": {"dynamicRegistration": True, "linkSupport": True},
"colorProvider": {"dynamicRegistration": True},
"foldingRange": {
"dynamicRegistration": True,
"rangeLimit": 5000,
"lineFoldingOnly": True,
"foldingRangeKind": {"valueSet": ["comment", "imports", "region"]},
"foldingRange": {"collapsedText": False},
},
"declaration": {"dynamicRegistration": True, "linkSupport": True},
"selectionRange": {"dynamicRegistration": True},
"callHierarchy": {"dynamicRegistration": True},
"semanticTokens": {
"dynamicRegistration": True,
"tokenTypes": [
"namespace",
"type",
"class",
"enum",
"interface",
"struct",
"typeParameter",
"parameter",
"variable",
"property",
"enumMember",
"event",
"function",
"method",
"macro",
"keyword",
"modifier",
"comment",
"string",
"number",
"regexp",
"operator",
"decorator",
],
"tokenModifiers": [
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary",
],
"formats": ["relative"],
"requests": {"range": True, "full": {"delta": True}},
"multilineTokenSupport": False,
"overlappingTokenSupport": False,
"serverCancelSupport": True,
"augmentsSyntaxTokens": False,
},
"linkedEditingRange": {"dynamicRegistration": True},
"typeHierarchy": {"dynamicRegistration": True},
"inlineValue": {"dynamicRegistration": True},
"inlayHint": {
"dynamicRegistration": True,
"resolveSupport": {"properties": ["tooltip", "textEdits", "label.tooltip", "label.location", "label.command"]},
},
"diagnostic": {"dynamicRegistration": True, "relatedDocumentSupport": False},
},
"window": {
"showMessage": {"messageActionItem": {"additionalPropertiesSupport": True}},
"showDocument": {"support": True},
"workDoneProgress": True,
},
"general": {
"staleRequestSupport": {
"cancel": True,
"retryOnContentModified": [
"textDocument/semanticTokens/full",
"textDocument/semanticTokens/range",
"textDocument/semanticTokens/full/delta",
],
},
"regularExpressions": {"engine": "ECMAScript", "version": "ES2020"},
"markdown": {
"parser": "marked",
"version": "1.1.0",
"allowedTags": [
"ul",
"li",
"p",
"code",
"blockquote",
"ol",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"em",
"pre",
"table",
"thead",
"tbody",
"tr",
"th",
"td",
"div",
"del",
"a",
"strong",
"br",
"img",
"span",
],
},
"positionEncodings": ["utf-16"],
},
"notebookDocument": {"synchronization": {"dynamicRegistration": True, "executionSummarySupport": True}},
"experimental": {
"snippetTextEdit": True,
"codeActionGroup": True,
"hoverActions": True,
"serverStatusNotification": True,
"colorDiagnosticOutput": True,
"openServerLogs": True,
"localDocs": True,
"commands": {
"commands": [
"rust-analyzer.runSingle",
"rust-analyzer.debugSingle",
"rust-analyzer.showReferences",
"rust-analyzer.gotoLocation",
"editor.action.triggerParameterHints",
]
},
},
},
"initializationOptions": {
"cargoRunner": None,
"runnables": {"extraEnv": None, "problemMatcher": ["$rustc"], "command": None, "extraArgs": []},
"statusBar": {"clickAction": "openLogs"},
"server": {"path": None, "extraEnv": None},
"trace": {"server": "verbose", "extension": False},
"debug": {
"engine": "auto",
"sourceFileMap": {"/rustc/<id>": "${env:USERPROFILE}/.rustup/toolchains/<toolchain-id>/lib/rustlib/src/rust"},
"openDebugPane": False,
"engineSettings": {},
},
"restartServerOnConfigChange": False,
"typing": {"continueCommentsOnNewline": True, "autoClosingAngleBrackets": {"enable": False}},
"diagnostics": {
"previewRustcOutput": False,
"useRustcErrorCode": False,
"disabled": [],
"enable": True,
"experimental": {"enable": False},
"remapPrefix": {},
"warningsAsHint": [],
"warningsAsInfo": [],
},
"discoverProjectRunner": None,
"showUnlinkedFileNotification": True,
"showDependenciesExplorer": True,
"assist": {"emitMustUse": False, "expressionFillDefault": "todo"},
"cachePriming": {"enable": True, "numThreads": 0},
"cargo": {
"autoreload": True,
"buildScripts": {
"enable": True,
"invocationLocation": "workspace",
"invocationStrategy": "per_workspace",
"overrideCommand": None,
"useRustcWrapper": True,
},
"cfgs": [],
"extraArgs": [],
"extraEnv": {},
"features": [],
"noDefaultFeatures": False,
"sysroot": "discover",
"sysrootSrc": None,
"target": None,
"unsetTest": ["core"],
},
"checkOnSave": True,
"check": {
"allTargets": True,
"command": "check",
"extraArgs": [],
"extraEnv": {},
"features": None,
"ignore": [],
"invocationLocation": "workspace",
"invocationStrategy": "per_workspace",
"noDefaultFeatures": None,
"overrideCommand": None,
"targets": None,
},
"completion": {
"autoimport": {"enable": True},
"autoself": {"enable": True},
"callable": {"snippets": "fill_arguments"},
"fullFunctionSignatures": {"enable": False},
"limit": None,
"postfix": {"enable": True},
"privateEditable": {"enable": False},
"snippets": {
"custom": {
"Arc::new": {
"postfix": "arc",
"body": "Arc::new(${receiver})",
"requires": "std::sync::Arc",
"description": "Put the expression into an `Arc`",
"scope": "expr",
},
"Rc::new": {
"postfix": "rc",
"body": "Rc::new(${receiver})",
"requires": "std::rc::Rc",
"description": "Put the expression into an `Rc`",
"scope": "expr",
},
"Box::pin": {
"postfix": "pinbox",
"body": "Box::pin(${receiver})",
"requires": "std::boxed::Box",
"description": "Put the expression into a pinned `Box`",
"scope": "expr",
},
"Ok": {
"postfix": "ok",
"body": "Ok(${receiver})",
"description": "Wrap the expression in a `Result::Ok`",
"scope": "expr",
},
"Err": {
"postfix": "err",
"body": "Err(${receiver})",
"description": "Wrap the expression in a `Result::Err`",
"scope": "expr",
},
"Some": {
"postfix": "some",
"body": "Some(${receiver})",
"description": "Wrap the expression in an `Option::Some`",
"scope": "expr",
},
}
},
},
"files": {"excludeDirs": [], "watcher": "client"},
"highlightRelated": {
"breakPoints": {"enable": True},
"closureCaptures": {"enable": True},
"exitPoints": {"enable": True},
"references": {"enable": True},
"yieldPoints": {"enable": True},
},
"hover": {
"actions": {
"debug": {"enable": True},
"enable": True,
"gotoTypeDef": {"enable": True},
"implementations": {"enable": True},
"references": {"enable": False},
"run": {"enable": True},
},
"documentation": {"enable": True, "keywords": {"enable": True}},
"links": {"enable": True},
"memoryLayout": {"alignment": "hexadecimal", "enable": True, "niches": False, "offset": "hexadecimal", "size": "both"},
},
"imports": {
"granularity": {"enforce": False, "group": "crate"},
"group": {"enable": True},
"merge": {"glob": True},
"preferNoStd": False,
"preferPrelude": False,
"prefix": "plain",
},
"inlayHints": {
"bindingModeHints": {"enable": False},
"chainingHints": {"enable": True},
"closingBraceHints": {"enable": True, "minLines": 25},
"closureCaptureHints": {"enable": False},
"closureReturnTypeHints": {"enable": "never"},
"closureStyle": "impl_fn",
"discriminantHints": {"enable": "never"},
"expressionAdjustmentHints": {"enable": "never", "hideOutsideUnsafe": False, "mode": "prefix"},
"lifetimeElisionHints": {"enable": "never", "useParameterNames": False},
"maxLength": 25,
"parameterHints": {"enable": True},
"reborrowHints": {"enable": "never"},
"renderColons": True,
"typeHints": {"enable": True, "hideClosureInitialization": False, "hideNamedConstructor": False},
},
"interpret": {"tests": False},
"joinLines": {"joinAssignments": True, "joinElseIf": True, "removeTrailingComma": True, "unwrapTrivialBlock": True},
"lens": {
"debug": {"enable": True},
"enable": True,
"forceCustomCommands": True,
"implementations": {"enable": True},
"location": "above_name",
"references": {
"adt": {"enable": False},
"enumVariant": {"enable": False},
"method": {"enable": False},
"trait": {"enable": False},
},
"run": {"enable": True},
},
"linkedProjects": [],
"lru": {"capacity": None, "query": {"capacities": {}}},
"notifications": {"cargoTomlNotFound": True},
"numThreads": None,
"procMacro": {"attributes": {"enable": True}, "enable": True, "ignored": {}, "server": None},
"references": {"excludeImports": False},
"rust": {"analyzerTargetDir": None},
"rustc": {"source": None},
"rustfmt": {"extraArgs": [], "overrideCommand": None, "rangeFormatting": {"enable": False}},
"semanticHighlighting": {
"doc": {"comment": {"inject": {"enable": True}}},
"nonStandardTokens": True,
"operator": {"enable": True, "specialization": {"enable": False}},
"punctuation": {"enable": False, "separate": {"macro": {"bang": False}}, "specialization": {"enable": False}},
"strings": {"enable": True},
},
"signatureInfo": {"detail": "full", "documentation": {"enable": True}},
"workspace": {"symbol": {"search": {"kind": "only_types", "limit": 128, "scope": "workspace"}}},
},
"trace": "verbose",
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Starts the Rust Analyzer Language Server
"""
def register_capability_handler(params: dict) -> None:
assert "registrations" in params
for registration in params["registrations"]:
if registration["method"] == "workspace/executeCommand":
self.initialize_searcher_command_available.set()
self.resolve_main_method_available.set()
return
def lang_status_handler(params: dict) -> None:
# TODO: Should we wait for
# server -> client: {'jsonrpc': '2.0', 'method': 'language/status', 'params': {'type': 'ProjectStatus', 'message': 'OK'}}
# Before proceeding?
if params["type"] == "ServiceReady" and params["message"] == "ServiceReady":
self.service_ready_event.set()
def execute_client_command_handler(params: dict) -> list:
return []
def do_nothing(params: dict) -> None:
return
def check_experimental_status(params: dict) -> None:
if params["quiescent"] == True:
self.server_ready.set()
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("language/status", lang_status_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("language/actionableNotification", do_nothing)
self.server.on_notification("experimental/serverStatus", check_experimental_status)
log.info("Starting RustAnalyzer server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
assert init_response["capabilities"]["textDocumentSync"]["change"] == 2 # type: ignore
assert "completionProvider" in init_response["capabilities"]
assert init_response["capabilities"]["completionProvider"] == {
"resolveProvider": True,
"triggerCharacters": [":", ".", "'", "("],
"completionItem": {"labelDetailsSupport": True},
}
self.server.notify.initialized({})
self.server_ready.wait()
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/rust_analyzer.py",
"license": "MIT License",
"lines": 686,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/solidlsp/language_servers/terraform_ls.py | import logging
import os
import shutil
from typing import cast
from overrides import override
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_utils import PathUtils, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
from .common import RuntimeDependency, RuntimeDependencyCollection
log = logging.getLogger(__name__)
class TerraformLS(SolidLanguageServer):
"""
Provides Terraform specific instantiation of the LanguageServer class using terraform-ls.
"""
@override
def is_ignored_dirname(self, dirname: str) -> bool:
return super().is_ignored_dirname(dirname) or dirname in [".terraform", "terraform.tfstate.d"]
@staticmethod
def _determine_log_level(line: str) -> int:
"""Classify terraform-ls stderr output to avoid false-positive errors."""
line_lower = line.lower()
# File discovery messages that are not actual errors
if any(
[
"discover.go:" in line_lower,
"walker.go:" in line_lower,
"walking of {file://" in line_lower,
"bus: -> discover" in line_lower,
]
):
return logging.DEBUG
# Known informational messages from terraform-ls that contain "error" but aren't errors
# Note: pattern match is flexible to handle file paths between keywords
if any(
[
"loading module metadata returned error:" in line_lower and "state not changed" in line_lower,
"incoming notification for" in line_lower,
]
):
return logging.DEBUG
return SolidLanguageServer._determine_log_level(line)
@staticmethod
def _ensure_tf_command_available() -> None:
log.debug("Starting terraform version detection...")
# 1. Try to find terraform using shutil.which
terraform_cmd = shutil.which("terraform")
if terraform_cmd is not None:
log.debug(f"Found terraform via shutil.which: {terraform_cmd}")
return
# TODO: is this needed?
# 2. Fallback to TERRAFORM_CLI_PATH (set by hashicorp/setup-terraform action)
if not terraform_cmd:
terraform_cli_path = os.environ.get("TERRAFORM_CLI_PATH")
if terraform_cli_path:
log.debug(f"Trying TERRAFORM_CLI_PATH: {terraform_cli_path}")
# TODO: use binary name from runtime dependencies if we keep this code
if os.name == "nt":
terraform_binary = os.path.join(terraform_cli_path, "terraform.exe")
else:
terraform_binary = os.path.join(terraform_cli_path, "terraform")
if os.path.exists(terraform_binary):
terraform_cmd = terraform_binary
log.debug(f"Found terraform via TERRAFORM_CLI_PATH: {terraform_cmd}")
return
raise RuntimeError(
"Terraform executable not found, please ensure Terraform is installed."
"See https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli for instructions."
)
@classmethod
def _setup_runtime_dependencies(cls, solidlsp_settings: SolidLSPSettings) -> str:
"""
Setup runtime dependencies for terraform-ls.
Downloads and installs terraform-ls if not already present.
"""
cls._ensure_tf_command_available()
platform_id = PlatformUtils.get_platform_id()
deps = RuntimeDependencyCollection(
[
RuntimeDependency(
id="TerraformLS",
description="terraform-ls for macOS (ARM64)",
url="https://releases.hashicorp.com/terraform-ls/0.36.5/terraform-ls_0.36.5_darwin_arm64.zip",
platform_id="osx-arm64",
archive_type="zip",
binary_name="terraform-ls",
),
RuntimeDependency(
id="TerraformLS",
description="terraform-ls for macOS (x64)",
url="https://releases.hashicorp.com/terraform-ls/0.36.5/terraform-ls_0.36.5_darwin_amd64.zip",
platform_id="osx-x64",
archive_type="zip",
binary_name="terraform-ls",
),
RuntimeDependency(
id="TerraformLS",
description="terraform-ls for Linux (ARM64)",
url="https://releases.hashicorp.com/terraform-ls/0.36.5/terraform-ls_0.36.5_linux_arm64.zip",
platform_id="linux-arm64",
archive_type="zip",
binary_name="terraform-ls",
),
RuntimeDependency(
id="TerraformLS",
description="terraform-ls for Linux (x64)",
url="https://releases.hashicorp.com/terraform-ls/0.36.5/terraform-ls_0.36.5_linux_amd64.zip",
platform_id="linux-x64",
archive_type="zip",
binary_name="terraform-ls",
),
RuntimeDependency(
id="TerraformLS",
description="terraform-ls for Windows (x64)",
url="https://releases.hashicorp.com/terraform-ls/0.36.5/terraform-ls_0.36.5_windows_amd64.zip",
platform_id="win-x64",
archive_type="zip",
binary_name="terraform-ls.exe",
),
]
)
dependency = deps.get_single_dep_for_current_platform()
terraform_ls_executable_path = deps.binary_path(cls.ls_resources_dir(solidlsp_settings))
if not os.path.exists(terraform_ls_executable_path):
log.info(f"Downloading terraform-ls from {dependency.url}")
deps.install(cls.ls_resources_dir(solidlsp_settings))
assert os.path.exists(terraform_ls_executable_path), f"terraform-ls executable not found at {terraform_ls_executable_path}"
# Make the executable file executable on Unix-like systems
if platform_id.value != "win-x64":
os.chmod(terraform_ls_executable_path, 0o755)
return terraform_ls_executable_path
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a TerraformLS instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
terraform_ls_executable_path = self._setup_runtime_dependencies(solidlsp_settings)
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=f"{terraform_ls_executable_path} serve", cwd=repository_root_path),
"terraform",
solidlsp_settings,
)
self.request_id = 0
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Terraform Language Server.
"""
root_uri = PathUtils.path_to_uri(repository_absolute_path)
result = {
"processId": os.getpid(),
"locale": "en",
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"capabilities": {
"textDocument": {
"synchronization": {"didSave": True, "dynamicRegistration": True},
"completion": {"dynamicRegistration": True, "completionItem": {"snippetSupport": True}},
"definition": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
},
"workspace": {"workspaceFolders": True, "didChangeConfiguration": {"dynamicRegistration": True}},
},
"workspaceFolders": [
{
"name": os.path.basename(repository_absolute_path),
"uri": root_uri,
}
],
}
return cast(InitializeParams, result)
def _start_server(self) -> None:
"""Start terraform-ls server process"""
def register_capability_handler(params: dict) -> None:
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
def do_nothing(params: dict) -> None:
return
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
log.info("Starting terraform-ls server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
# Verify server capabilities
assert "textDocumentSync" in init_response["capabilities"]
assert "completionProvider" in init_response["capabilities"]
assert "definitionProvider" in init_response["capabilities"]
self.server.notify.initialized({})
# terraform-ls server is typically ready immediately after initialization
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/language_servers/terraform_ls.py",
"license": "MIT License",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:test/serena/config/test_serena_config.py | import logging
import os
import shutil
import tempfile
from pathlib import Path
import pytest
from serena.agent import SerenaAgent
from serena.config.serena_config import (
DEFAULT_PROJECT_SERENA_FOLDER_LOCATION,
LanguageBackend,
ProjectConfig,
RegisteredProject,
SerenaConfig,
SerenaConfigError,
)
from serena.constants import PROJECT_TEMPLATE_FILE, SERENA_MANAGED_DIR_NAME
from serena.project import MemoriesManager, Project
from solidlsp.ls_config import Language
from test.conftest import create_default_serena_config
class TestProjectConfigAutogenerate:
"""Test class for ProjectConfig autogeneration functionality."""
def setup_method(self):
"""Set up test environment before each test method."""
# Create a temporary directory for testing
self.test_dir = tempfile.mkdtemp()
self.serena_config = create_default_serena_config()
self.project_path = Path(self.test_dir)
def teardown_method(self):
"""Clean up test environment after each test method."""
# Remove the temporary directory
shutil.rmtree(self.test_dir)
def test_autogenerate_empty_directory(self):
"""Test that autogenerate raises ValueError with helpful message for empty directory."""
with pytest.raises(ValueError) as exc_info:
ProjectConfig.autogenerate(self.project_path, self.serena_config, save_to_disk=False)
error_message = str(exc_info.value)
assert "No source files found" in error_message
def test_autogenerate_with_python_files(self):
"""Test successful autogeneration with Python source files."""
# Create a Python file
python_file = self.project_path / "main.py"
python_file.write_text("def hello():\n print('Hello, world!')\n")
# Run autogenerate
config = ProjectConfig.autogenerate(self.project_path, self.serena_config, save_to_disk=False)
# Verify the configuration
assert config.project_name == self.project_path.name
assert config.languages == [Language.PYTHON]
def test_autogenerate_with_js_files(self):
"""Test successful autogeneration with JavaScript source files."""
# Create files for multiple languages
(self.project_path / "small.js").write_text("console.log('JS');")
# Run autogenerate - should pick Python as dominant
config = ProjectConfig.autogenerate(self.project_path, self.serena_config, save_to_disk=False)
assert config.languages == [Language.TYPESCRIPT]
def test_autogenerate_with_multiple_languages(self):
"""Test autogeneration picks dominant language when multiple are present."""
# Create files for multiple languages
(self.project_path / "main.py").write_text("print('Python')")
(self.project_path / "util.py").write_text("def util(): pass")
(self.project_path / "small.js").write_text("console.log('JS');")
# Run autogenerate - should pick Python as dominant
config = ProjectConfig.autogenerate(self.project_path, self.serena_config, save_to_disk=False)
assert config.languages == [Language.PYTHON]
def test_autogenerate_saves_to_disk(self):
"""Test that autogenerate can save the configuration to disk."""
# Create a Go file
go_file = self.project_path / "main.go"
go_file.write_text("package main\n\nfunc main() {}\n")
# Run autogenerate with save_to_disk=True
config = ProjectConfig.autogenerate(self.project_path, self.serena_config, save_to_disk=True)
# Verify the configuration file was created
config_path = self.project_path / ".serena" / "project.yml"
assert config_path.exists()
# Verify the content
assert config.languages == [Language.GO]
def test_autogenerate_nonexistent_path(self):
"""Test that autogenerate raises FileNotFoundError for non-existent path."""
non_existent = self.project_path / "does_not_exist"
with pytest.raises(FileNotFoundError) as exc_info:
ProjectConfig.autogenerate(non_existent, self.serena_config, save_to_disk=False)
assert "Project root not found" in str(exc_info.value)
def test_autogenerate_with_gitignored_files_only(self):
"""Test autogenerate behavior when only gitignored files exist."""
# Create a .gitignore that ignores all Python files
gitignore = self.project_path / ".gitignore"
gitignore.write_text("*.py\n")
# Create Python files that will be ignored
(self.project_path / "ignored.py").write_text("print('ignored')")
# Should still raise ValueError as no source files are detected
with pytest.raises(ValueError) as exc_info:
ProjectConfig.autogenerate(self.project_path, self.serena_config, save_to_disk=False)
assert "No source files found" in str(exc_info.value)
def test_autogenerate_custom_project_name(self):
"""Test autogenerate with custom project name."""
# Create a TypeScript file
ts_file = self.project_path / "index.ts"
ts_file.write_text("const greeting: string = 'Hello';\n")
# Run autogenerate with custom name
custom_name = "my-custom-project"
config = ProjectConfig.autogenerate(self.project_path, self.serena_config, project_name=custom_name, save_to_disk=False)
assert config.project_name == custom_name
assert config.languages == [Language.TYPESCRIPT]
class TestProjectConfig:
def test_template_is_complete(self):
_, is_complete = ProjectConfig._load_yaml_dict(PROJECT_TEMPLATE_FILE)
assert is_complete, "Project template YAML is incomplete; all fields must be present (with descriptions)."
class TestProjectConfigLanguageBackend:
"""Tests for the per-project language_backend field."""
def test_language_backend_defaults_to_none(self):
config = ProjectConfig(
project_name="test",
languages=[Language.PYTHON],
)
assert config.language_backend is None
def test_language_backend_can_be_set(self):
config = ProjectConfig(
project_name="test",
languages=[Language.PYTHON],
language_backend=LanguageBackend.JETBRAINS,
)
assert config.language_backend == LanguageBackend.JETBRAINS
def test_language_backend_roundtrips_through_yaml(self):
config = ProjectConfig(
project_name="test",
languages=[Language.PYTHON],
language_backend=LanguageBackend.JETBRAINS,
)
d = config._to_yaml_dict()
assert d["language_backend"] == "JetBrains"
def test_language_backend_none_roundtrips_through_yaml(self):
config = ProjectConfig(
project_name="test",
languages=[Language.PYTHON],
)
d = config._to_yaml_dict()
assert d["language_backend"] is None
def test_language_backend_parsed_from_dict(self):
"""Test that _from_dict parses language_backend correctly."""
template_path = PROJECT_TEMPLATE_FILE
data, _ = ProjectConfig._load_yaml_dict(template_path)
data["project_name"] = "test"
data["languages"] = ["python"]
data["language_backend"] = "JetBrains"
config = ProjectConfig._from_dict(data)
assert config.language_backend == LanguageBackend.JETBRAINS
def test_language_backend_none_when_missing_from_dict(self):
"""Test that _from_dict handles missing language_backend gracefully."""
template_path = PROJECT_TEMPLATE_FILE
data, _ = ProjectConfig._load_yaml_dict(template_path)
data["project_name"] = "test"
data["languages"] = ["python"]
data.pop("language_backend", None)
config = ProjectConfig._from_dict(data)
assert config.language_backend is None
def _make_config_with_project(
project_name: str,
language_backend: LanguageBackend | None = None,
global_backend: LanguageBackend = LanguageBackend.LSP,
) -> tuple[SerenaConfig, str]:
"""Create a SerenaConfig with a single registered project and return (config, project_name)."""
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
log_level=logging.ERROR,
language_backend=global_backend,
)
project = Project(
project_root=str(Path(__file__).parent.parent / "resources" / "repos" / "python" / "test_repo"),
project_config=ProjectConfig(
project_name=project_name,
languages=[Language.PYTHON],
language_backend=language_backend,
),
serena_config=config,
)
config.projects = [RegisteredProject.from_project_instance(project)]
return config, project_name
class TestEffectiveLanguageBackend:
"""Tests for per-project language_backend override logic in SerenaAgent."""
def test_default_backend_is_global(self):
"""When no project override, effective backend matches global config."""
config, name = _make_config_with_project("test_proj", language_backend=None, global_backend=LanguageBackend.LSP)
agent = SerenaAgent(project=name, serena_config=config)
try:
assert agent.get_language_backend().is_lsp()
finally:
agent.shutdown(timeout=5)
def test_project_overrides_global_backend(self):
"""When startup project has language_backend set, it overrides the global."""
config, name = _make_config_with_project(
"test_jetbrains", language_backend=LanguageBackend.JETBRAINS, global_backend=LanguageBackend.LSP
)
agent = SerenaAgent(project=name, serena_config=config)
try:
assert agent.get_language_backend().is_jetbrains()
finally:
agent.shutdown(timeout=5)
def test_no_project_uses_global_backend(self):
"""When no startup project is provided, effective backend is the global one."""
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
log_level=logging.ERROR,
language_backend=LanguageBackend.LSP,
)
agent = SerenaAgent(project=None, serena_config=config)
try:
assert agent.get_language_backend() == LanguageBackend.LSP
finally:
agent.shutdown(timeout=5)
def test_activate_project_rejects_backend_mismatch(self):
"""Post-init activation of a project with mismatched backend raises ValueError."""
# Start with LSP backend
config, name = _make_config_with_project("lsp_proj", language_backend=None, global_backend=LanguageBackend.LSP)
# Add a second project that requires JetBrains
jb_project = Project(
project_root=str(Path(__file__).parent.parent / "resources" / "repos" / "python" / "test_repo"),
project_config=ProjectConfig(
project_name="jb_proj",
languages=[Language.PYTHON],
language_backend=LanguageBackend.JETBRAINS,
),
serena_config=config,
)
config.projects.append(RegisteredProject.from_project_instance(jb_project))
agent = SerenaAgent(project=name, serena_config=config)
try:
with pytest.raises(ValueError, match="Cannot activate project"):
agent.activate_project_from_path_or_name("jb_proj")
finally:
agent.shutdown(timeout=5)
def test_activate_project_allows_matching_backend(self):
"""Post-init activation of a project with matching backend succeeds."""
config, name = _make_config_with_project("lsp_proj", language_backend=None, global_backend=LanguageBackend.LSP)
# Add a second project that also uses LSP
lsp_project2 = Project(
project_root=str(Path(__file__).parent.parent / "resources" / "repos" / "python" / "test_repo"),
project_config=ProjectConfig(
project_name="lsp_proj2",
languages=[Language.PYTHON],
language_backend=LanguageBackend.LSP,
),
serena_config=config,
)
config.projects.append(RegisteredProject.from_project_instance(lsp_project2))
agent = SerenaAgent(project=name, serena_config=config)
try:
# Should not raise
agent.activate_project_from_path_or_name("lsp_proj2")
finally:
agent.shutdown(timeout=5)
def test_activate_project_allows_none_backend(self):
"""Post-init activation of a project with no backend override succeeds."""
config, name = _make_config_with_project("lsp_proj", language_backend=None, global_backend=LanguageBackend.LSP)
# Add a second project with no backend override
proj2 = Project(
project_root=str(Path(__file__).parent.parent / "resources" / "repos" / "python" / "test_repo"),
project_config=ProjectConfig(
project_name="proj2",
languages=[Language.PYTHON],
language_backend=None,
),
serena_config=config,
)
config.projects.append(RegisteredProject.from_project_instance(proj2))
agent = SerenaAgent(project=name, serena_config=config)
try:
# Should not raise — None means "inherit session backend"
agent.activate_project_from_path_or_name("proj2")
finally:
agent.shutdown(timeout=5)
class TestGetConfiguredProjectSerenaFolder:
"""Tests for SerenaConfig.get_configured_project_serena_folder (pure template resolution)."""
def test_default_location(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
)
result = config.get_configured_project_serena_folder("/home/user/myproject")
assert result == os.path.abspath("/home/user/myproject/.serena")
def test_custom_location_with_project_folder_name(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="/projects-metadata/$projectFolderName/.serena",
)
result = config.get_configured_project_serena_folder("/home/user/myproject")
assert result == os.path.abspath("/projects-metadata/myproject/.serena")
def test_custom_location_with_project_dir(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="$projectDir/.custom-serena",
)
result = config.get_configured_project_serena_folder("/home/user/myproject")
assert result == os.path.abspath("/home/user/myproject/.custom-serena")
def test_custom_location_with_both_placeholders(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="/data/$projectFolderName/$projectDir/.serena",
)
result = config.get_configured_project_serena_folder("/home/user/proj")
assert result == os.path.abspath("/data/proj/home/user/proj/.serena")
def test_default_field_value(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
)
assert config.project_serena_folder_location == DEFAULT_PROJECT_SERENA_FOLDER_LOCATION
def test_rejects_unknown_placeholder(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="$projectDir/$unknownVar/.serena",
)
with pytest.raises(SerenaConfigError, match=r"Unknown placeholder '\$unknownVar'"):
config.get_configured_project_serena_folder("/home/user/myproject")
def test_rejects_typo_projectDirs(self):
"""$projectDirs should not be silently treated as $projectDir + 's'."""
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="$projectDirs/.serena",
)
with pytest.raises(SerenaConfigError, match=r"Unknown placeholder '\$projectDirs'"):
config.get_configured_project_serena_folder("/home/user/myproject")
def test_rejects_typo_projectfoldername_lowercase(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="/data/$projectfoldername/.serena",
)
with pytest.raises(SerenaConfigError, match=r"Unknown placeholder '\$projectfoldername'"):
config.get_configured_project_serena_folder("/home/user/myproject")
def test_no_placeholders_is_valid(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="/fixed/path/.serena",
)
result = config.get_configured_project_serena_folder("/home/user/myproject")
assert result == os.path.abspath("/fixed/path/.serena")
def test_error_message_lists_supported_placeholders(self):
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="$bogus/.serena",
)
with pytest.raises(SerenaConfigError, match=r"\$projectDir.*\$projectFolderName|\$projectFolderName.*\$projectDir"):
config.get_configured_project_serena_folder("/home/user/myproject")
class TestProjectSerenaDataFolder:
"""Tests for SerenaConfig.get_project_serena_folder fallback logic (via Project)."""
def setup_method(self):
self.test_dir = tempfile.mkdtemp()
self.project_path = Path(self.test_dir) / "myproject"
self.project_path.mkdir()
(self.project_path / "main.py").write_text("print('hello')\n")
def teardown_method(self):
shutil.rmtree(self.test_dir)
def _make_project(self, serena_config: "SerenaConfig | None" = None) -> Project:
project_config = ProjectConfig(
project_name="myproject",
languages=[Language.PYTHON],
)
project = Project(
project_root=str(self.project_path),
project_config=project_config,
serena_config=serena_config,
)
project._ignore_spec_available.wait()
return project
def test_default_config_creates_in_project_dir(self):
config = SerenaConfig(gui_log_window=False, web_dashboard=False)
project = self._make_project(config)
expected = os.path.abspath(str(self.project_path / SERENA_MANAGED_DIR_NAME))
assert project.path_to_serena_data_folder() == expected
def test_custom_location_creates_outside_project(self):
custom_base = Path(self.test_dir) / "metadata"
custom_base.mkdir()
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location=str(custom_base) + "/$projectFolderName/.serena",
)
project = self._make_project(config)
expected = os.path.abspath(str(custom_base / "myproject" / ".serena"))
assert project.path_to_serena_data_folder() == expected
def test_fallback_to_existing_project_dir(self):
"""If config points to a non-existent path but .serena exists in the project root, use the existing one."""
existing_serena = self.project_path / SERENA_MANAGED_DIR_NAME
existing_serena.mkdir()
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location="/nonexistent/path/$projectFolderName/.serena",
)
project = self._make_project(config)
assert project.path_to_serena_data_folder() == str(existing_serena)
def test_configured_path_takes_precedence_when_exists(self):
"""If both config path and project root path exist, use the config path."""
existing_serena = self.project_path / SERENA_MANAGED_DIR_NAME
existing_serena.mkdir()
custom_base = Path(self.test_dir) / "metadata"
custom_serena = custom_base / "myproject" / ".serena"
custom_serena.mkdir(parents=True)
config = SerenaConfig(
gui_log_window=False,
web_dashboard=False,
project_serena_folder_location=str(custom_base) + "/$projectFolderName/.serena",
)
project = self._make_project(config)
assert project.path_to_serena_data_folder() == str(custom_serena)
class TestMemoriesManagerCustomPath:
"""Tests for MemoriesManager with a custom serena data folder."""
def setup_method(self):
self.test_dir = tempfile.mkdtemp()
self.data_folder = Path(self.test_dir) / "custom_serena"
def teardown_method(self):
shutil.rmtree(self.test_dir)
def test_memories_subdir_is_created(self):
assert not self.data_folder.exists()
MemoriesManager(str(self.data_folder))
assert (self.data_folder / "memories").exists()
def test_save_and_load_memory(self):
manager = MemoriesManager(str(self.data_folder))
manager.save_memory("test_topic", "test content", is_tool_context=False)
content = manager.load_memory("test_topic")
assert content == "test content"
def test_list_memories(self):
manager = MemoriesManager(str(self.data_folder))
manager.save_memory("topic_a", "content a", is_tool_context=False)
manager.save_memory("topic_b", "content b", is_tool_context=False)
memories = manager.list_project_memories()
assert sorted(memories.get_full_list()) == ["topic_a", "topic_b"]
| {
"repo_id": "oraios/serena",
"file_path": "test/serena/config/test_serena_config.py",
"license": "MIT License",
"lines": 435,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/serena/util/test_exception.py | import os
from unittest.mock import MagicMock, Mock, patch
import pytest
from serena.util.exception import is_headless_environment, show_fatal_exception_safe
class TestHeadlessEnvironmentDetection:
"""Test class for headless environment detection functionality."""
def test_is_headless_no_display(self):
"""Test that environment without DISPLAY is detected as headless on Linux."""
with patch("sys.platform", "linux"):
with patch.dict(os.environ, {}, clear=True):
assert is_headless_environment() is True
def test_is_headless_ssh_connection(self):
"""Test that SSH sessions are detected as headless."""
with patch("sys.platform", "linux"):
with patch.dict(os.environ, {"SSH_CONNECTION": "192.168.1.1 22 192.168.1.2 22", "DISPLAY": ":0"}):
assert is_headless_environment() is True
with patch.dict(os.environ, {"SSH_CLIENT": "192.168.1.1 22 22", "DISPLAY": ":0"}):
assert is_headless_environment() is True
def test_is_headless_wsl(self):
"""Test that WSL environment is detected as headless."""
# Skip this test on Windows since os.uname doesn't exist
if not hasattr(os, "uname"):
pytest.skip("os.uname not available on this platform")
with patch("sys.platform", "linux"):
with patch("os.uname") as mock_uname:
mock_uname.return_value = Mock(release="5.15.153.1-microsoft-standard-WSL2")
with patch.dict(os.environ, {"DISPLAY": ":0"}):
assert is_headless_environment() is True
def test_is_headless_docker(self):
"""Test that Docker containers are detected as headless."""
with patch("sys.platform", "linux"):
# Test with CI environment variable
with patch.dict(os.environ, {"CI": "true", "DISPLAY": ":0"}):
assert is_headless_environment() is True
# Test with CONTAINER environment variable
with patch.dict(os.environ, {"CONTAINER": "docker", "DISPLAY": ":0"}):
assert is_headless_environment() is True
# Test with .dockerenv file
with patch("os.path.exists") as mock_exists:
mock_exists.return_value = True
with patch.dict(os.environ, {"DISPLAY": ":0"}):
assert is_headless_environment() is True
def test_is_not_headless_windows(self):
"""Test that Windows is never detected as headless."""
with patch("sys.platform", "win32"):
# Even without DISPLAY, Windows should not be headless
with patch.dict(os.environ, {}, clear=True):
assert is_headless_environment() is False
class TestShowFatalExceptionSafe:
"""Test class for safe fatal exception display functionality."""
@patch("serena.util.exception.is_headless_environment", return_value=True)
@patch("serena.util.exception.log")
def test_show_fatal_exception_safe_headless(self, mock_log, mock_is_headless):
"""Test that GUI is not attempted in headless environment."""
test_exception = ValueError("Test error")
# The import should never happen in headless mode
with patch("serena.gui_log_viewer.show_fatal_exception") as mock_show_gui:
show_fatal_exception_safe(test_exception)
mock_show_gui.assert_not_called()
# Verify debug log about skipping GUI
mock_log.debug.assert_called_once_with("Skipping GUI error display in headless environment")
@patch("serena.util.exception.is_headless_environment", return_value=False)
@patch("serena.util.exception.log")
def test_show_fatal_exception_safe_with_gui(self, mock_log, mock_is_headless):
"""Test that GUI is attempted when not in headless environment."""
test_exception = ValueError("Test error")
# Mock the GUI function
with patch("serena.gui_log_viewer.show_fatal_exception") as mock_show_gui:
show_fatal_exception_safe(test_exception)
mock_show_gui.assert_called_once_with(test_exception)
@patch("serena.util.exception.is_headless_environment", return_value=False)
@patch("serena.util.exception.log")
def test_show_fatal_exception_safe_gui_failure(self, mock_log, mock_is_headless):
"""Test graceful handling when GUI display fails."""
test_exception = ValueError("Test error")
gui_error = ImportError("No module named 'tkinter'")
# Mock the GUI function to raise an exception
with patch("serena.gui_log_viewer.show_fatal_exception", side_effect=gui_error):
show_fatal_exception_safe(test_exception)
# Verify debug log about GUI failure
mock_log.debug.assert_called_with(f"Failed to show GUI error dialog: {gui_error}")
def test_show_fatal_exception_safe_prints_to_stderr(self):
"""Test that exceptions are always printed to stderr."""
test_exception = ValueError("Test error message")
with patch("sys.stderr", new_callable=MagicMock) as mock_stderr:
with patch("serena.util.exception.is_headless_environment", return_value=True):
with patch("serena.util.exception.log"):
show_fatal_exception_safe(test_exception)
# Verify print was called with the correct arguments
mock_stderr.write.assert_any_call("Fatal exception: Test error message")
| {
"repo_id": "oraios/serena",
"file_path": "test/serena/util/test_exception.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/elixir/test_elixir_basic.py | """
Basic integration tests for the Elixir language server functionality.
These tests validate the functionality of the language server APIs
like request_references using the test repository.
"""
import os
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from . import EXPERT_UNAVAILABLE, EXPERT_UNAVAILABLE_REASON
# These marks will be applied to all tests in this module
pytestmark = [pytest.mark.elixir, pytest.mark.skipif(EXPERT_UNAVAILABLE, reason=f"Next LS not available: {EXPERT_UNAVAILABLE_REASON}")]
class TestElixirBasic:
"""Basic Elixir language server functionality tests."""
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_references_function_definition(self, language_server: SolidLanguageServer):
"""Test finding references to a function definition."""
file_path = os.path.join("lib", "models.ex")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Find the User module's 'new' function
user_new_symbol = None
for symbol in symbols[0]: # Top level symbols
if symbol.get("name") == "User" and symbol.get("kind") == 2: # Module
for child in symbol.get("children", []):
if child.get("name", "").startswith("def new(") and child.get("kind") == 12: # Function
user_new_symbol = child
break
break
if not user_new_symbol or "selectionRange" not in user_new_symbol:
pytest.skip("User.new function or its selectionRange not found")
sel_start = user_new_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert references is not None
assert len(references) > 0
# Should find at least one reference (the definition itself)
found_definition = any(ref["uri"].endswith("models.ex") for ref in references)
assert found_definition, "Should find the function definition"
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_references_create_user_function(self, language_server: SolidLanguageServer):
"""Test finding references to create_user function."""
file_path = os.path.join("lib", "services.ex")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Find the UserService module's 'create_user' function
create_user_symbol = None
for symbol in symbols[0]: # Top level symbols
if symbol.get("name") == "UserService" and symbol.get("kind") == 2: # Module
for child in symbol.get("children", []):
if child.get("name", "").startswith("def create_user(") and child.get("kind") == 12: # Function
create_user_symbol = child
break
break
if not create_user_symbol or "selectionRange" not in create_user_symbol:
pytest.skip("UserService.create_user function or its selectionRange not found")
sel_start = create_user_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert references is not None
assert len(references) > 0
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_referencing_symbols_function(self, language_server: SolidLanguageServer):
"""Test finding symbols that reference a specific function."""
file_path = os.path.join("lib", "models.ex")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Find the User module's 'new' function
user_new_symbol = None
for symbol in symbols[0]: # Top level symbols
if symbol.get("name") == "User" and symbol.get("kind") == 2: # Module
for child in symbol.get("children", []):
if child.get("name", "").startswith("def new(") and child.get("kind") == 12: # Function
user_new_symbol = child
break
break
if not user_new_symbol or "selectionRange" not in user_new_symbol:
pytest.skip("User.new function or its selectionRange not found")
sel_start = user_new_symbol["selectionRange"]["start"]
referencing_symbols = language_server.request_referencing_symbols(file_path, sel_start["line"], sel_start["character"])
assert referencing_symbols is not None
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_timeout_enumeration_bug(self, language_server: SolidLanguageServer):
"""Test that enumeration doesn't timeout (regression test)."""
# This should complete without timing out
symbols = language_server.request_document_symbols("lib/models.ex").get_all_symbols_and_roots()
assert symbols is not None
# Test multiple symbol requests in succession
for _ in range(3):
symbols = language_server.request_document_symbols("lib/services.ex").get_all_symbols_and_roots()
assert symbols is not None
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/elixir/test_elixir_basic.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/elixir/test_elixir_ignored_dirs.py | import os
from collections.abc import Generator
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from test.conftest import start_ls_context
from . import EXPERT_UNAVAILABLE, EXPERT_UNAVAILABLE_REASON
# These marks will be applied to all tests in this module
pytestmark = [pytest.mark.elixir, pytest.mark.skipif(EXPERT_UNAVAILABLE, reason=f"Expert not available: {EXPERT_UNAVAILABLE_REASON}")]
# Skip slow tests in CI - they require multiple Expert instances which is too slow
IN_CI = bool(os.environ.get("CI") or os.environ.get("GITHUB_ACTIONS"))
SKIP_SLOW_IN_CI = pytest.mark.skipif(
IN_CI,
reason="Slow tests skipped in CI - require multiple Expert instances (~60-90s each)",
)
@pytest.fixture(scope="session")
def ls_with_ignored_dirs() -> Generator[SolidLanguageServer, None, None]:
"""Fixture to set up an LS for the elixir test repo with the 'scripts' directory ignored.
Uses session scope to avoid restarting Expert for each test.
"""
ignored_paths = ["scripts", "ignored_dir"]
with start_ls_context(language=Language.ELIXIR, ignored_paths=ignored_paths) as ls:
yield ls
@pytest.mark.slow
@SKIP_SLOW_IN_CI
def test_symbol_tree_ignores_dir(ls_with_ignored_dirs: SolidLanguageServer):
"""Tests that request_full_symbol_tree ignores the configured directory.
Note: This test uses a separate Expert instance with custom ignored paths,
which adds ~60-90s startup time.
"""
root = ls_with_ignored_dirs.request_full_symbol_tree()[0]
root_children = root["children"]
children_names = {child["name"] for child in root_children}
# Should have lib and test directories, but not scripts or ignored_dir
expected_dirs = {"lib", "test"}
assert expected_dirs.issubset(children_names), f"Expected {expected_dirs} to be in {children_names}"
assert "scripts" not in children_names, f"scripts should not be in {children_names}"
assert "ignored_dir" not in children_names, f"ignored_dir should not be in {children_names}"
@pytest.mark.slow
@SKIP_SLOW_IN_CI
def test_find_references_ignores_dir(ls_with_ignored_dirs: SolidLanguageServer):
"""Tests that find_references ignores the configured directory.
Note: This test uses a separate Expert instance with custom ignored paths,
which adds ~60-90s startup time.
"""
# Location of User struct, which is referenced in scripts and ignored_dir
definition_file = "lib/models.ex"
# Find the User struct definition
symbols = ls_with_ignored_dirs.request_document_symbols(definition_file).get_all_symbols_and_roots()
user_symbol = None
for symbol_group in symbols:
user_symbol = next((s for s in symbol_group if "User" in s.get("name", "")), None)
if user_symbol:
break
if not user_symbol or "selectionRange" not in user_symbol:
pytest.skip("User symbol not found for reference testing")
sel_start = user_symbol["selectionRange"]["start"]
references = ls_with_ignored_dirs.request_references(definition_file, sel_start["line"], sel_start["character"])
# Assert that scripts and ignored_dir do not appear in the references
assert not any("scripts" in ref["relativePath"] for ref in references), "scripts should be ignored"
assert not any("ignored_dir" in ref["relativePath"] for ref in references), "ignored_dir should be ignored"
@pytest.mark.slow
@SKIP_SLOW_IN_CI
@pytest.mark.parametrize("repo_path", [Language.ELIXIR], indirect=True)
def test_refs_and_symbols_with_glob_patterns(repo_path: Path) -> None:
"""Tests that refs and symbols with glob patterns are ignored.
Note: This test uses a separate Expert instance with custom ignored paths,
which adds ~60-90s startup time.
"""
ignored_paths = ["*cripts", "ignored_*"] # codespell:ignore cripts
with start_ls_context(language=Language.ELIXIR, repo_path=str(repo_path), ignored_paths=ignored_paths) as ls:
# Same as in the above tests
root = ls.request_full_symbol_tree()[0]
root_children = root["children"]
children_names = {child["name"] for child in root_children}
# Should have lib and test directories, but not scripts or ignored_dir
expected_dirs = {"lib", "test"}
assert expected_dirs.issubset(children_names), f"Expected {expected_dirs} to be in {children_names}"
assert "scripts" not in children_names, f"scripts should not be in {children_names} (glob pattern)"
assert "ignored_dir" not in children_names, f"ignored_dir should not be in {children_names} (glob pattern)"
# Test that the refs and symbols with glob patterns are ignored
definition_file = "lib/models.ex"
# Find the User struct definition
symbols = ls.request_document_symbols(definition_file).get_all_symbols_and_roots()
user_symbol = None
for symbol_group in symbols:
user_symbol = next((s for s in symbol_group if "User" in s.get("name", "")), None)
if user_symbol:
break
if user_symbol and "selectionRange" in user_symbol:
sel_start = user_symbol["selectionRange"]["start"]
references = ls.request_references(definition_file, sel_start["line"], sel_start["character"])
# Assert that scripts and ignored_dir do not appear in references
assert not any("scripts" in ref["relativePath"] for ref in references), "scripts should be ignored (glob)"
assert not any("ignored_dir" in ref["relativePath"] for ref in references), "ignored_dir should be ignored (glob)"
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_default_ignored_directories(language_server: SolidLanguageServer):
"""Test that default Elixir directories are ignored."""
# Test that Elixir-specific directories are ignored by default
assert language_server.is_ignored_dirname("_build"), "_build should be ignored"
assert language_server.is_ignored_dirname("deps"), "deps should be ignored"
assert language_server.is_ignored_dirname(".elixir_ls"), ".elixir_ls should be ignored"
assert language_server.is_ignored_dirname("cover"), "cover should be ignored"
assert language_server.is_ignored_dirname("node_modules"), "node_modules should be ignored"
# Test that important directories are not ignored
assert not language_server.is_ignored_dirname("lib"), "lib should not be ignored"
assert not language_server.is_ignored_dirname("test"), "test should not be ignored"
assert not language_server.is_ignored_dirname("config"), "config should not be ignored"
assert not language_server.is_ignored_dirname("priv"), "priv should not be ignored"
@pytest.mark.xfail(
reason="Expert 0.1.0 bug: document_symbols may return nil for some files (flaky)",
raises=Exception,
)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_symbol_tree_excludes_build_dirs(language_server: SolidLanguageServer):
"""Test that symbol tree excludes build and dependency directories."""
symbol_tree = language_server.request_full_symbol_tree()
if symbol_tree:
root = symbol_tree[0]
children_names = {child["name"] for child in root.get("children", [])}
# Build and dependency directories should not appear
ignored_dirs = {"_build", "deps", ".elixir_ls", "cover", "node_modules"}
found_ignored = ignored_dirs.intersection(children_names)
assert len(found_ignored) == 0, f"Found ignored directories in symbol tree: {found_ignored}"
# Important directories should appear
important_dirs = {"lib", "test"}
found_important = important_dirs.intersection(children_names)
assert len(found_important) > 0, f"Expected to find important directories: {important_dirs}, got: {children_names}"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/elixir/test_elixir_ignored_dirs.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/elixir/test_elixir_integration.py | """
Integration tests for Elixir language server with test repository.
These tests verify that the language server works correctly with a real Elixir project
and can perform advanced operations like cross-file symbol resolution.
"""
import os
from pathlib import Path
import pytest
from serena.project import Project
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from . import EXPERT_UNAVAILABLE, EXPERT_UNAVAILABLE_REASON
# These marks will be applied to all tests in this module
pytestmark = [pytest.mark.elixir, pytest.mark.skipif(EXPERT_UNAVAILABLE, reason=f"Next LS not available: {EXPERT_UNAVAILABLE_REASON}")]
class TestElixirIntegration:
"""Integration tests for Elixir language server with test repository."""
@pytest.fixture
def elixir_test_repo_path(self):
"""Get the path to the Elixir test repository."""
test_dir = Path(__file__).parent.parent.parent
return str(test_dir / "resources" / "repos" / "elixir" / "test_repo")
def test_elixir_repo_structure(self, elixir_test_repo_path):
"""Test that the Elixir test repository has the expected structure."""
repo_path = Path(elixir_test_repo_path)
# Check that key files exist
assert (repo_path / "mix.exs").exists(), "mix.exs should exist"
assert (repo_path / "lib" / "test_repo.ex").exists(), "main module should exist"
assert (repo_path / "lib" / "utils.ex").exists(), "utils module should exist"
assert (repo_path / "lib" / "models.ex").exists(), "models module should exist"
assert (repo_path / "lib" / "services.ex").exists(), "services module should exist"
assert (repo_path / "lib" / "examples.ex").exists(), "examples module should exist"
assert (repo_path / "test" / "test_repo_test.exs").exists(), "test file should exist"
assert (repo_path / "test" / "models_test.exs").exists(), "models test should exist"
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_cross_file_symbol_resolution(self, language_server: SolidLanguageServer):
"""Test that symbols can be resolved across different files."""
# Test that User struct from models.ex can be found when referenced in services.ex
services_file = os.path.join("lib", "services.ex")
# Find where User is referenced in services.ex
content = language_server.retrieve_full_file_content(services_file)
lines = content.split("\n")
user_reference_line = None
for i, line in enumerate(lines):
if "alias TestRepo.Models.{User" in line:
user_reference_line = i
break
if user_reference_line is None:
pytest.skip("Could not find User reference in services.ex")
# Try to find the definition
defining_symbol = language_server.request_defining_symbol(services_file, user_reference_line, 30)
if defining_symbol and "location" in defining_symbol:
# Should point to models.ex
assert "models.ex" in defining_symbol["location"]["uri"]
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_module_hierarchy_understanding(self, language_server: SolidLanguageServer):
"""Test that the language server understands Elixir module hierarchy."""
models_file = os.path.join("lib", "models.ex")
symbols = language_server.request_document_symbols(models_file).get_all_symbols_and_roots()
if symbols:
# Flatten symbol structure
all_symbols = []
for symbol_group in symbols:
if isinstance(symbol_group, list):
all_symbols.extend(symbol_group)
else:
all_symbols.append(symbol_group)
symbol_names = [s.get("name", "") for s in all_symbols]
# Should understand nested module structure
expected_modules = ["TestRepo.Models", "User", "Item", "Order"]
found_modules = [name for name in expected_modules if any(name in symbol_name for symbol_name in symbol_names)]
assert len(found_modules) > 0, f"Expected modules {expected_modules}, found symbols {symbol_names}"
def test_file_extension_matching(self):
"""Test that the Elixir language recognizes the correct file extensions."""
language = Language.ELIXIR
matcher = language.get_source_fn_matcher()
# Test Elixir file extensions
assert matcher.is_relevant_filename("lib/test_repo.ex")
assert matcher.is_relevant_filename("test/test_repo_test.exs")
assert matcher.is_relevant_filename("config/config.exs")
assert matcher.is_relevant_filename("mix.exs")
assert matcher.is_relevant_filename("lib/models.ex")
assert matcher.is_relevant_filename("lib/services.ex")
# Test non-Elixir files
assert not matcher.is_relevant_filename("README.md")
assert not matcher.is_relevant_filename("lib/test_repo.py")
assert not matcher.is_relevant_filename("package.json")
assert not matcher.is_relevant_filename("Cargo.toml")
class TestElixirProject:
@pytest.mark.parametrize("project", [Language.ELIXIR], indirect=True)
def test_comprehensive_symbol_search(self, project: Project):
"""Test comprehensive symbol search across the entire project."""
# Search for all function definitions
function_pattern = r"def\s+\w+\s*[\(\s]"
function_matches = project.search_source_files_for_pattern(function_pattern)
# Should find functions across multiple files
if function_matches:
files_with_functions = set()
for match in function_matches:
if match.source_file_path:
files_with_functions.add(os.path.basename(match.source_file_path))
# Should find functions in multiple files
expected_files = {"models.ex", "services.ex", "examples.ex", "utils.ex", "test_repo.ex"}
found_files = expected_files.intersection(files_with_functions)
assert len(found_files) > 0, f"Expected functions in {expected_files}, found in {files_with_functions}"
# Search for struct definitions
struct_pattern = r"defstruct\s+\["
struct_matches = project.search_source_files_for_pattern(struct_pattern)
if struct_matches:
# Should find structs primarily in models.ex
models_structs = [m for m in struct_matches if m.source_file_path and "models.ex" in m.source_file_path]
assert len(models_structs) > 0, "Should find struct definitions in models.ex"
@pytest.mark.parametrize("project", [Language.ELIXIR], indirect=True)
def test_protocol_and_implementation_understanding(self, project: Project):
"""Test that the language server understands Elixir protocols and implementations."""
# Search for protocol definitions
protocol_pattern = r"defprotocol\s+\w+"
protocol_matches = project.search_source_files_for_pattern(protocol_pattern, paths_include_glob="**/models.ex")
if protocol_matches:
# Should find the Serializable protocol
serializable_matches = [m for m in protocol_matches if "Serializable" in str(m)]
assert len(serializable_matches) > 0, "Should find Serializable protocol definition"
# Search for protocol implementations
impl_pattern = r"defimpl\s+\w+"
impl_matches = project.search_source_files_for_pattern(impl_pattern, paths_include_glob="**/models.ex")
if impl_matches:
# Should find multiple implementations
assert len(impl_matches) >= 3, f"Should find at least 3 protocol implementations, found {len(impl_matches)}"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/elixir/test_elixir_integration.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/elixir/test_elixir_symbol_retrieval.py | """
Tests for the Elixir language server symbol-related functionality.
These tests focus on the following methods:
- request_containing_symbol
- request_referencing_symbols
- request_defining_symbol
"""
import os
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
from . import EXPERT_UNAVAILABLE, EXPERT_UNAVAILABLE_REASON
# These marks will be applied to all tests in this module
pytestmark = [pytest.mark.elixir, pytest.mark.skipif(EXPERT_UNAVAILABLE, reason=f"Next LS not available: {EXPERT_UNAVAILABLE_REASON}")]
class TestElixirLanguageServerSymbols:
"""Test the Elixir language server's symbol-related functionality."""
@pytest.mark.xfail(
reason="Expert 0.1.0 bug: document_symbols returns nil for some files (FunctionClauseError in XPExpert.EngineApi.document_symbols/2)"
)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_containing_symbol_function(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a function."""
# Test for a position inside the create_user function
file_path = os.path.join("lib", "services.ex")
# Find the create_user function in the file
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
create_user_line = None
for i, line in enumerate(lines):
if "def create_user(" in line:
create_user_line = i + 2 # Go inside the function body
break
if create_user_line is None:
pytest.skip("Could not find create_user function")
containing_symbol = language_server.request_containing_symbol(file_path, create_user_line, 10, include_body=True)
# Verify that we found the containing symbol
if containing_symbol:
# Next LS returns the full function signature instead of just the function name
assert containing_symbol["name"] == "def create_user(pid, id, name, email, roles \\\\ [])"
assert containing_symbol["kind"] == SymbolKind.Method or containing_symbol["kind"] == SymbolKind.Function
if "body" in containing_symbol:
assert "def create_user" in containing_symbol["body"].get_text()
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_containing_symbol_module(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a module."""
# Test for a position inside the UserService module but outside any function
file_path = os.path.join("lib", "services.ex")
# Find the UserService module definition
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
user_service_line = None
for i, line in enumerate(lines):
if "defmodule UserService do" in line:
user_service_line = i + 1 # Go inside the module
break
if user_service_line is None:
pytest.skip("Could not find UserService module")
containing_symbol = language_server.request_containing_symbol(file_path, user_service_line, 5)
# Verify that we found the containing symbol
if containing_symbol:
assert "UserService" in containing_symbol["name"]
assert containing_symbol["kind"] == SymbolKind.Module or containing_symbol["kind"] == SymbolKind.Class
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_containing_symbol_nested(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol with nested scopes."""
# Test for a position inside a function which is inside a module
file_path = os.path.join("lib", "services.ex")
# Find a function inside UserService
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
function_body_line = None
for i, line in enumerate(lines):
if "def create_user(" in line:
function_body_line = i + 3 # Go deeper into the function body
break
if function_body_line is None:
pytest.skip("Could not find function body")
containing_symbol = language_server.request_containing_symbol(file_path, function_body_line, 15)
# Verify that we found the innermost containing symbol (the function)
if containing_symbol:
expected_names = ["create_user", "UserService"]
assert any(name in containing_symbol["name"] for name in expected_names)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_containing_symbol_none(self, language_server: SolidLanguageServer) -> None:
"""Test request_containing_symbol for a position with no containing symbol."""
# Test for a position outside any function/module (e.g., in module doc)
file_path = os.path.join("lib", "services.ex")
# Line 1-3 are likely in module documentation or imports
containing_symbol = language_server.request_containing_symbol(file_path, 2, 10)
# Should return None or an empty dictionary, or the top-level module
# This is acceptable behavior for module-level positions
assert containing_symbol is None or containing_symbol == {} or "TestRepo.Services" in str(containing_symbol)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_referencing_symbols_struct(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a struct."""
# Test referencing symbols for User struct
file_path = os.path.join("lib", "models.ex")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
user_symbol = None
for symbol_group in symbols:
user_symbol = next((s for s in symbol_group if "User" in s.get("name", "")), None)
if user_symbol:
break
if not user_symbol or "selectionRange" not in user_symbol:
pytest.skip("User symbol or its selectionRange not found")
sel_start = user_symbol["selectionRange"]["start"]
ref_symbols = [
ref.symbol for ref in language_server.request_referencing_symbols(file_path, sel_start["line"], sel_start["character"])
]
if ref_symbols:
services_references = [
symbol
for symbol in ref_symbols
if "location" in symbol and "uri" in symbol["location"] and "services.ex" in symbol["location"]["uri"]
]
# We expect some references from services.ex
assert len(services_references) >= 0 # At least attempt to find references
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_referencing_symbols_none(self, language_server: SolidLanguageServer) -> None:
"""Test request_referencing_symbols for a position with no symbol."""
file_path = os.path.join("lib", "services.ex")
# Line 3 is likely a blank line or comment
try:
ref_symbols = [ref.symbol for ref in language_server.request_referencing_symbols(file_path, 3, 0)]
# If we get here, make sure we got an empty result
assert ref_symbols == [] or ref_symbols is None
except Exception:
# The method might raise an exception for invalid positions
# which is acceptable behavior
pass
# Tests for request_defining_symbol
@pytest.mark.xfail(
reason="Expert 0.1.0 bug: definition request crashes (FunctionClauseError in XPExpert.Protocol.Conversions.to_elixir/2)"
)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_defining_symbol_function_call(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a function call."""
# Find a place where User.new is called in services.ex
file_path = os.path.join("lib", "services.ex")
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
user_new_call_line = None
for i, line in enumerate(lines):
if "User.new(" in line:
user_new_call_line = i
break
if user_new_call_line is None:
pytest.skip("Could not find User.new call")
# Try to find the definition of User.new
defining_symbol = language_server.request_defining_symbol(file_path, user_new_call_line, 15)
if defining_symbol:
assert defining_symbol.get("name") == "new" or "User" in defining_symbol.get("name", "")
if "location" in defining_symbol and "uri" in defining_symbol["location"]:
assert "models.ex" in defining_symbol["location"]["uri"]
@pytest.mark.xfail(
reason="Expert 0.1.0 bug: definition request crashes (FunctionClauseError in XPExpert.Protocol.Conversions.to_elixir/2)"
)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_defining_symbol_struct_usage(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a struct usage."""
# Find a place where User struct is used in services.ex
file_path = os.path.join("lib", "services.ex")
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
user_usage_line = None
for i, line in enumerate(lines):
if "alias TestRepo.Models.{User" in line:
user_usage_line = i
break
if user_usage_line is None:
pytest.skip("Could not find User struct usage")
defining_symbol = language_server.request_defining_symbol(file_path, user_usage_line, 30)
if defining_symbol:
assert "User" in defining_symbol.get("name", "")
@pytest.mark.xfail(
reason="Expert 0.1.0 bug: definition request crashes (FunctionClauseError in XPExpert.Protocol.Conversions.to_elixir/2)"
)
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_defining_symbol_none(self, language_server: SolidLanguageServer) -> None:
"""Test request_defining_symbol for a position with no symbol."""
# Test for a position with no symbol (e.g., whitespace or comment)
file_path = os.path.join("lib", "services.ex")
# Line 3 is likely a blank line
defining_symbol = language_server.request_defining_symbol(file_path, 3, 0)
# Should return None or empty
assert defining_symbol is None or defining_symbol == {}
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_symbol_methods_integration(self, language_server: SolidLanguageServer) -> None:
"""Test integration between different symbol methods."""
file_path = os.path.join("lib", "models.ex")
# Find User struct definition
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
user_struct_line = None
for i, line in enumerate(lines):
if "defmodule User do" in line:
user_struct_line = i
break
if user_struct_line is None:
pytest.skip("Could not find User struct")
# Test containing symbol
containing = language_server.request_containing_symbol(file_path, user_struct_line + 5, 10)
if containing:
# Test that we can find references to this symbol
if "location" in containing and "range" in containing["location"]:
start_pos = containing["location"]["range"]["start"]
refs = [
ref.symbol for ref in language_server.request_referencing_symbols(file_path, start_pos["line"], start_pos["character"])
]
# We should find some references or none (both are valid outcomes)
assert isinstance(refs, list)
@pytest.mark.xfail(reason="Flaky test, sometimes fails with an Expert-internal error")
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_symbol_tree_structure(self, language_server: SolidLanguageServer) -> None:
"""Test that symbol tree structure is correctly built."""
symbol_tree = language_server.request_full_symbol_tree()
# Should get a tree structure
assert len(symbol_tree) > 0
# Should have our test repository structure
root = symbol_tree[0]
assert "children" in root
# Look for lib directory
lib_dir = None
for child in root["children"]:
if child["name"] == "lib":
lib_dir = child
break
if lib_dir:
# Expert returns module names instead of file names (e.g., 'services' instead of 'services.ex')
file_names = [child["name"] for child in lib_dir.get("children", [])]
expected_modules = ["models", "services", "examples", "utils", "test_repo"]
found_modules = [name for name in expected_modules if name in file_names]
assert len(found_modules) > 0, f"Expected to find some modules from {expected_modules}, but got {file_names}"
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_request_dir_overview(self, language_server: SolidLanguageServer) -> None:
"""Test request_dir_overview functionality."""
lib_overview = language_server.request_dir_overview("lib")
# Should get an overview of the lib directory
assert lib_overview is not None
# Expert returns keys like 'lib/services.ex' instead of just 'lib'
overview_keys = list(lib_overview.keys()) if hasattr(lib_overview, "keys") else []
lib_files = [key for key in overview_keys if key.startswith("lib/")]
assert len(lib_files) > 0, f"Expected to find lib/ files in overview keys: {overview_keys}"
# Should contain information about our modules
overview_text = str(lib_overview).lower()
expected_terms = ["models", "services", "user", "item"]
found_terms = [term for term in expected_terms if term in overview_text]
assert len(found_terms) > 0, f"Expected to find some terms from {expected_terms} in overview"
# @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
# def test_request_document_overview(self, language_server: SolidLanguageServer) -> None:
# """Test request_document_overview functionality."""
# # COMMENTED OUT: Expert document overview doesn't contain expected terms
# # Expert return value: [('TestRepo.Models', 2, 0, 0)] - only module info, no detailed content
# # Expected terms like 'user', 'item', 'order', 'struct', 'defmodule' are not present
# # This appears to be a limitation of Expert document overview functionality
# #
# file_path = os.path.join("lib", "models.ex")
# doc_overview = language_server.request_document_overview(file_path)
#
# # Should get an overview of the models.ex file
# assert doc_overview is not None
#
# # Should contain information about our structs and functions
# overview_text = str(doc_overview).lower()
# expected_terms = ["user", "item", "order", "struct", "defmodule"]
# found_terms = [term for term in expected_terms if term in overview_text]
# assert len(found_terms) > 0, f"Expected to find some terms from {expected_terms} in overview"
@pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True)
def test_containing_symbol_of_module_attribute(self, language_server: SolidLanguageServer) -> None:
"""Test containing symbol for module attributes."""
file_path = os.path.join("lib", "models.ex")
# Find a module attribute like @type or @doc
content = language_server.retrieve_full_file_content(file_path)
lines = content.split("\n")
attribute_line = None
for i, line in enumerate(lines):
if line.strip().startswith("@type") or line.strip().startswith("@doc"):
attribute_line = i
break
if attribute_line is None:
pytest.skip("Could not find module attribute")
containing_symbol = language_server.request_containing_symbol(file_path, attribute_line, 5)
if containing_symbol:
# Should be contained within a module
assert "name" in containing_symbol
# The containing symbol should be a module
expected_names = ["User", "Item", "Order", "TestRepo.Models"]
assert any(name in containing_symbol["name"] for name in expected_names)
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/elixir/test_elixir_symbol_retrieval.py",
"license": "MIT License",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/terraform/test_terraform_basic.py | """
Basic integration tests for the Terraform language server functionality.
These tests validate the functionality of the language server APIs
like request_references using the test repository.
"""
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
@pytest.mark.terraform
class TestLanguageServerBasics:
"""Test basic functionality of the Terraform language server."""
@pytest.mark.parametrize("language_server", [Language.TERRAFORM], indirect=True)
def test_basic_definition(self, language_server: SolidLanguageServer) -> None:
"""Test basic definition lookup functionality."""
# Simple test to verify the language server is working
file_path = "main.tf"
# Just try to get document symbols - this should work without hanging
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
assert len(symbols) > 0, "Should find at least some symbols in main.tf"
@pytest.mark.parametrize("language_server", [Language.TERRAFORM], indirect=True)
def test_request_references_aws_instance(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on an aws_instance resource."""
# Get references to an aws_instance resource in main.tf
file_path = "main.tf"
# Find aws_instance resources
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
aws_instance_symbol = next((s for s in symbols[0] if s.get("name") == 'resource "aws_instance" "web_server"'), None)
if not aws_instance_symbol or "selectionRange" not in aws_instance_symbol:
raise AssertionError("aws_instance symbol or its selectionRange not found")
sel_start = aws_instance_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert len(references) >= 1, "aws_instance should be referenced at least once"
@pytest.mark.parametrize("language_server", [Language.TERRAFORM], indirect=True)
def test_request_references_variable(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on a variable."""
# Get references to a variable in variables.tf
file_path = "variables.tf"
# Find variable definitions
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
var_symbol = next((s for s in symbols[0] if s.get("name") == 'variable "instance_type"'), None)
if not var_symbol or "selectionRange" not in var_symbol:
raise AssertionError("variable symbol or its selectionRange not found")
sel_start = var_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert len(references) >= 1, "variable should be referenced at least once"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/terraform/test_terraform_basic.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:src/serena/config/serena_config.py | """
The Serena Model Context Protocol (MCP) Server
"""
import dataclasses
import os
import re
import shutil
from collections.abc import Iterator, Sequence
from copy import deepcopy
from dataclasses import dataclass, field
from datetime import UTC, datetime
from enum import Enum
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Self, TypeVar
import yaml
from ruamel.yaml.comments import CommentedMap
from sensai.util import logging
from sensai.util.logging import LogTime, datetime_tag
from sensai.util.string import ToStringMixin
from serena.constants import (
DEFAULT_SOURCE_FILE_ENCODING,
PROJECT_LOCAL_TEMPLATE_FILE,
PROJECT_TEMPLATE_FILE,
REPO_ROOT,
SERENA_CONFIG_TEMPLATE_FILE,
SERENA_FILE_ENCODING,
SERENA_MANAGED_DIR_NAME,
)
from serena.util.inspection import determine_programming_language_composition
from serena.util.yaml import YamlCommentNormalisation, load_yaml, normalise_yaml_comments, save_yaml, transfer_missing_yaml_comments
from solidlsp.ls_config import Language
from ..analytics import RegisteredTokenCountEstimator
from ..util.class_decorators import singleton
from ..util.cli_util import ask_yes_no
from ..util.dataclass import get_dataclass_default
if TYPE_CHECKING:
from ..project import Project
log = logging.getLogger(__name__)
T = TypeVar("T")
DEFAULT_TOOL_TIMEOUT: float = 240
DictType = dict | CommentedMap
TDict = TypeVar("TDict", bound=DictType)
@singleton
class SerenaPaths:
"""
Provides paths to various Serena-related directories and files.
"""
def __init__(self) -> None:
home_dir = os.getenv("SERENA_HOME")
if home_dir is None or home_dir.strip() == "":
home_dir = str(Path.home() / SERENA_MANAGED_DIR_NAME)
else:
home_dir = home_dir.strip()
self.serena_user_home_dir: str = home_dir
"""
the path to the Serena home directory, where the user's configuration/data is stored.
This is ~/.serena by default, but it can be overridden via the SERENA_HOME environment variable.
"""
self.user_prompt_templates_dir: str = os.path.join(self.serena_user_home_dir, "prompt_templates")
"""
directory containing prompt templates defined by the user.
Prompts defined by the user take precedence over Serena's built-in prompt templates.
"""
self.user_contexts_dir: str = os.path.join(self.serena_user_home_dir, "contexts")
"""
directory containing contexts defined by the user.
If a name of a context matches a name of a context in SERENAS_OWN_CONTEXT_YAMLS_DIR,
the user context will override the default context definition.
"""
self.user_modes_dir: str = os.path.join(self.serena_user_home_dir, "modes")
"""
directory containing modes defined by the user.
If a name of a mode matches a name of a mode in SERENAS_OWN_MODES_YAML_DIR,
the user mode will override the default mode definition.
"""
self.news_snippet_id_file: str = os.path.join(self.serena_user_home_dir, "last_read_news_snippet_id.txt")
"""
file containing the ID of the last read news snippet
"""
global_memories_path = Path(os.path.join(self.serena_user_home_dir, "memories", "global"))
global_memories_path.mkdir(parents=True, exist_ok=True)
self.global_memories_path = global_memories_path
"""
directory where global memories are stored, i.e. memories that are available across all projects
"""
self.last_returned_log_file_path: str | None = None
"""
the path to the last log file returned by `get_next_log_file_path`. If this is not None, the logs
are currently being written to this file
"""
def get_next_log_file_path(self, prefix: str) -> str:
"""
:param prefix: the filename prefix indicating the type of the log file
:return: the full path to the log file to use
"""
log_dir = os.path.join(self.serena_user_home_dir, "logs", datetime.now().strftime("%Y-%m-%d"))
os.makedirs(log_dir, exist_ok=True)
self.last_returned_log_file_path = os.path.join(log_dir, prefix + "_" + datetime_tag() + f"_{os.getpid()}" + ".txt")
return self.last_returned_log_file_path
# TODO: Paths from constants.py should be moved here
@dataclass
class ToolInclusionDefinition:
"""
Defines which tools to include/exclude in Serena's operation.
This can mean either
* defining exclusions/inclusions to apply to an existing set of tools [incremental mode], or
* defining a fixed set of tools to use [fixed mode].
"""
excluded_tools: Sequence[str] = ()
"""
the names of tools to exclude from use [incremental mode]
"""
included_optional_tools: Sequence[str] = ()
"""
the names of optional tools to include [incremental mode]
"""
fixed_tools: Sequence[str] = ()
"""
the names of tools to use as a fixed set of tools [fixed mode]
"""
def is_fixed_tool_set(self) -> bool:
num_fixed = len(self.fixed_tools)
num_incremental = len(self.excluded_tools) + len(self.included_optional_tools)
if num_fixed > 0 and num_incremental > 0:
raise ValueError("Cannot use both fixed_tools and excluded_tools/included_optional_tools at the same time.")
return num_fixed > 0
@dataclass
class NamedToolInclusionDefinition(ToolInclusionDefinition):
name: str | None = None
def __str__(self) -> str:
return f"ToolInclusionDefinition[{self.name}]"
@dataclass
class ModeSelectionDefinition:
base_modes: Sequence[str] | None = None
default_modes: Sequence[str] | None = None
class LanguageBackend(Enum):
LSP = "LSP"
"""
Use the language server protocol (LSP), spawning freely available language servers
via the SolidLSP library that is part of Serena
"""
JETBRAINS = "JetBrains"
"""
Use the Serena plugin in your JetBrains IDE.
(requires the plugin to be installed and the project being worked on to be open in your IDE)
"""
@staticmethod
def from_str(backend_str: str) -> "LanguageBackend":
for backend in LanguageBackend:
if backend.value.lower() == backend_str.lower():
return backend
raise ValueError(f"Unknown language backend '{backend_str}': valid values are {[b.value for b in LanguageBackend]}")
def is_lsp(self) -> bool:
return self == LanguageBackend.LSP
def is_jetbrains(self) -> bool:
return self == LanguageBackend.JETBRAINS
@dataclass
class SharedConfig(ModeSelectionDefinition, ToolInclusionDefinition, ToStringMixin):
"""Shared between SerenaConfig and ProjectConfig, the latter used to override values in the form
(same as in ModeSelectionDefinition).
The defaults here shall be none and should be set to the global default values in SerenaConfig.
"""
symbol_info_budget: float | None = None
language_backend: LanguageBackend | None = None
read_only_memory_patterns: list[str] = field(default_factory=list)
class SerenaConfigError(Exception):
pass
DEFAULT_PROJECT_SERENA_FOLDER_LOCATION = "$projectDir/" + SERENA_MANAGED_DIR_NAME
"""
The default template for the project Serena folder location.
Uses $projectDir and $projectFolderName as placeholders.
"""
@dataclass(kw_only=True)
class ProjectConfig(SharedConfig):
project_name: str
languages: list[Language]
ignored_paths: list[str] = field(default_factory=list)
read_only: bool = False
ignore_all_files_in_gitignore: bool = True
initial_prompt: str = ""
encoding: str = DEFAULT_SOURCE_FILE_ENCODING
SERENA_PROJECT_FILE = "project.yml"
SERENA_LOCAL_PROJECT_FILE = "project.local.yml"
FIELDS_WITHOUT_DEFAULTS = {"project_name", "languages"}
YAML_COMMENT_NORMALISATION = YamlCommentNormalisation.LEADING
"""
the comment normalisation strategy to use when loading/saving project configuration files.
The template file must match this configuration (i.e. it must use leading comments if this is set to LEADING).
"""
def _tostring_includes(self) -> list[str]:
return ["project_name"]
@classmethod
def autogenerate(
cls,
project_root: str | Path,
serena_config: "SerenaConfig",
project_name: str | None = None,
languages: list[Language] | None = None,
save_to_disk: bool = True,
interactive: bool = False,
) -> Self:
"""
Autogenerate a project configuration for a given project root.
:param project_root: the path to the project root
:param serena_config: the global Serena configuration
:param project_name: the name of the project; if None, the name of the project will be the name of the directory
containing the project
:param languages: the languages of the project; if None, they will be determined automatically
:param save_to_disk: whether to save the project configuration to disk
:param interactive: whether to run in interactive CLI mode, asking the user for input where appropriate
:return: the project configuration
"""
project_root = Path(project_root).resolve()
if not project_root.exists():
raise FileNotFoundError(f"Project root not found: {project_root}")
with LogTime("Project configuration auto-generation", logger=log):
log.info("Project root: %s", project_root)
project_folder_name = project_root.name
project_name = project_name or project_folder_name
if languages is None:
# determine languages automatically
log.info("Determining programming languages used in the project")
language_composition = determine_programming_language_composition(str(project_root))
log.info("Language composition: %s", language_composition)
if len(language_composition) == 0:
language_values = ", ".join([lang.value for lang in Language])
raise ValueError(
f"No source files found in {project_root}\n\n"
f"To use Serena with this project, you need to either\n"
f" 1. specify a programming language by adding parameters --language <language>\n"
f" when creating the project via the Serena CLI command OR\n"
f" 2. add source files in one of the supported languages first.\n\n"
f"Supported languages are: {language_values}\n"
f"Read the documentation for more information."
)
# sort languages by number of files found
languages_and_percentages = sorted(
language_composition.items(), key=lambda item: (item[1], item[0].get_priority()), reverse=True
)
# find the language with the highest percentage and enable it
top_language_pair = languages_and_percentages[0]
other_language_pairs = languages_and_percentages[1:]
languages_to_use: list[str] = [top_language_pair[0].value]
# if in interactive mode, ask the user which other languages to enable
if len(other_language_pairs) > 0 and interactive:
print(
"Detected and enabled main language '%s' (%.2f%% of source files)."
% (top_language_pair[0].value, top_language_pair[1])
)
print(f"Additionally detected {len(other_language_pairs)} other language(s).\n")
print("Note: Enable only languages you need symbolic retrieval/editing capabilities for.")
print(" Additional language servers use resources and some languages may require additional")
print(" system-level installations/configuration (see Serena documentation).")
print("\nWhich additional languages do you want to enable?")
for lang, perc in other_language_pairs:
enable = ask_yes_no("Enable %s (%.2f%% of source files)?" % (lang.value, perc), default=False)
if enable:
languages_to_use.append(lang.value)
print()
log.info("Using languages: %s", languages_to_use)
else:
languages_to_use = [lang.value for lang in languages]
config_with_comments, _ = cls._load_yaml_dict(PROJECT_TEMPLATE_FILE)
config_with_comments["project_name"] = project_name
config_with_comments["languages"] = languages_to_use
if save_to_disk:
project_yml_path = serena_config.get_project_yml_location(str(project_root))
log.info("Saving project configuration to %s", project_yml_path)
save_yaml(project_yml_path, config_with_comments)
project_local_yml_path = os.path.join(os.path.dirname(project_yml_path), cls.SERENA_LOCAL_PROJECT_FILE)
shutil.copy(PROJECT_LOCAL_TEMPLATE_FILE, project_local_yml_path)
return cls._from_dict(config_with_comments)
@classmethod
def default_project_yml_path(cls, project_root: str | Path) -> str:
"""
:return: the default path to the project.yml file (inside ``$projectDir/.serena/``).
This is suitable as a fallback when no ``SerenaConfig`` is available to resolve
a potentially customised location.
"""
return os.path.join(str(project_root), SERENA_MANAGED_DIR_NAME, cls.SERENA_PROJECT_FILE)
@classmethod
def _load_yaml_dict(
cls, yml_path: str, comment_normalisation: YamlCommentNormalisation = YamlCommentNormalisation.NONE
) -> tuple[CommentedMap, bool]:
"""
Load the project configuration as a CommentedMap, preserving comments and ensuring
completeness of the configuration by applying default values for missing fields
and backward compatibility adjustments.
:param yml_path: the path to the project.yml file
:return: a tuple `(dict, was_complete)` where dict is a CommentedMap representing a
full project configuration and `was_complete` indicates whether the loaded configuration
was complete (i.e., did not require any default values to be applied)
"""
data = load_yaml(yml_path, comment_normalisation=comment_normalisation)
# apply defaults
was_complete = True
for field_info in dataclasses.fields(cls):
key = field_info.name
if key in cls.FIELDS_WITHOUT_DEFAULTS:
continue
if key not in data:
was_complete = False
default_value = get_dataclass_default(cls, key)
data.setdefault(key, default_value)
# backward compatibility: handle single "language" field
if "languages" not in data and "language" in data:
data["languages"] = [data["language"]]
del data["language"]
return data, was_complete
@classmethod
def _from_dict(cls, data: dict[str, Any]) -> Self:
"""
Create a ProjectConfig instance from a (full) configuration dictionary
"""
lang_name_mapping = {"javascript": "typescript"}
languages: list[Language] = []
for language_str in data["languages"]:
orig_language_str = language_str
try:
language_str = language_str.lower()
if language_str in lang_name_mapping:
language_str = lang_name_mapping[language_str]
language = Language(language_str)
languages.append(language)
except ValueError as e:
raise ValueError(
f"Invalid language: {orig_language_str}.\nValid language_strings are: {[l.value for l in Language]}"
) from e
# Validate symbol_info_budget
symbol_info_budget_raw = data["symbol_info_budget"]
symbol_info_budget = symbol_info_budget_raw
if symbol_info_budget is not None:
try:
symbol_info_budget = float(symbol_info_budget_raw)
except (TypeError, ValueError) as e:
raise ValueError(f"symbol_info_budget must be a number or null, got: {symbol_info_budget_raw}") from e
if symbol_info_budget < 0:
raise ValueError(f"symbol_info_budget cannot be negative, got: {symbol_info_budget}")
language_backend_value = data.get("language_backend")
language_backend = LanguageBackend.from_str(language_backend_value) if language_backend_value else None
return cls(
project_name=data["project_name"],
languages=languages,
ignored_paths=data["ignored_paths"],
excluded_tools=data["excluded_tools"],
fixed_tools=data["fixed_tools"],
included_optional_tools=data["included_optional_tools"],
read_only=data["read_only"],
read_only_memory_patterns=data.get("read_only_memory_patterns", []),
ignore_all_files_in_gitignore=data["ignore_all_files_in_gitignore"],
initial_prompt=data["initial_prompt"],
encoding=data["encoding"],
language_backend=language_backend,
base_modes=data["base_modes"],
default_modes=data["default_modes"],
symbol_info_budget=symbol_info_budget,
)
def _to_yaml_dict(self) -> dict:
"""
:return: a yaml-serializable dictionary representation of this configuration
"""
d = dataclasses.asdict(self)
d["languages"] = [lang.value for lang in self.languages]
d["language_backend"] = self.language_backend.value if self.language_backend is not None else None
return d
@classmethod
def load(cls, project_root: Path | str, serena_config: "SerenaConfig", autogenerate: bool = False) -> Self:
"""
Load a ProjectConfig instance from the path to the project root.
:param project_root: the path to the project root
:param serena_config: the global Serena configuration
:param autogenerate: whether to auto-generate the configuration if it does not exist
"""
project_root = Path(project_root)
project_folder_name = project_root.name
yaml_path = serena_config.get_project_yml_location(project_root)
# auto-generate if necessary
if not os.path.exists(yaml_path):
if autogenerate:
return cls.autogenerate(project_root, serena_config)
else:
raise FileNotFoundError(f"Project configuration file not found: {yaml_path}")
# load the configuration dictionary
yaml_data, was_complete = cls._load_yaml_dict(str(yaml_path))
if "project_name" not in yaml_data:
yaml_data["project_name"] = project_folder_name
# instantiate the ProjectConfig
project_config = cls._from_dict(yaml_data)
# if the configuration was incomplete, re-save it to disk
if not was_complete:
log.info("Project configuration in %s was incomplete, re-saving with default values for missing fields", yaml_path)
project_config.save(str(yaml_path))
return project_config
def save(self, project_yml_path: str) -> None:
"""
Saves the project configuration to disk.
:param project_yml_path: the path to the project.yml file
"""
config_path = project_yml_path
log.info("Saving updated project configuration to %s", config_path)
# load original commented map and update it with current values
config_with_comments, _ = self._load_yaml_dict(config_path, self.YAML_COMMENT_NORMALISATION)
config_with_comments.update(self._to_yaml_dict())
# transfer missing comments from the template file
template_config, _ = self._load_yaml_dict(PROJECT_TEMPLATE_FILE, self.YAML_COMMENT_NORMALISATION)
transfer_missing_yaml_comments(template_config, config_with_comments, self.YAML_COMMENT_NORMALISATION)
save_yaml(config_path, config_with_comments)
class RegisteredProject(ToStringMixin):
def __init__(
self,
project_root: str,
project_config: "ProjectConfig",
project_instance: Optional["Project"] = None,
) -> None:
"""
Represents a registered project in the Serena configuration.
:param project_root: the root directory of the project
:param project_config: the configuration of the project
:param project_instance: an existing project instance (if already loaded)
"""
self.project_root = Path(project_root).resolve()
self.project_config = project_config
self._project_instance = project_instance
def _tostring_exclude_private(self) -> bool:
return True
@property
def project_name(self) -> str:
return self.project_config.project_name
@classmethod
def from_project_instance(cls, project_instance: "Project") -> "RegisteredProject":
return RegisteredProject(
project_root=project_instance.project_root,
project_config=project_instance.project_config,
project_instance=project_instance,
)
@classmethod
def from_project_root(cls, project_root: str | Path, serena_config: "SerenaConfig") -> "RegisteredProject":
project_config = ProjectConfig.load(project_root, serena_config=serena_config)
return RegisteredProject(
project_root=str(project_root),
project_config=project_config,
)
def matches_root_path(self, path: str | Path) -> bool:
"""
Check if the given path matches the project root path.
:param path: the path to check
:return: True if the path matches the project root, False otherwise
"""
return self.project_root.samefile(Path(path).resolve())
def get_project_instance(self, serena_config: "SerenaConfig") -> "Project":
"""
Returns the project instance for this registered project, loading it if necessary.
"""
if self._project_instance is None:
from ..project import Project
with LogTime(f"Loading project instance for {self}", logger=log):
self._project_instance = Project(
project_root=str(self.project_root),
project_config=self.project_config,
serena_config=serena_config,
)
return self._project_instance
@dataclass(kw_only=True)
class SerenaConfig(SharedConfig):
"""
Holds the Serena agent configuration, which is typically loaded from a YAML configuration file
(when instantiated via :method:`from_config_file`), which is updated when projects are added or removed.
For testing purposes, it can also be instantiated directly with the desired parameters.
"""
# *** fields that are mapped directly to/from the configuration file (DO NOT RENAME) ***
projects: list[RegisteredProject] = field(default_factory=list)
gui_log_window: bool = False
log_level: int = logging.INFO
trace_lsp_communication: bool = False
web_dashboard: bool = True
web_dashboard_open_on_launch: bool = True
web_dashboard_listen_address: str = "127.0.0.1"
jetbrains_plugin_server_address: str = "127.0.0.1"
tool_timeout: float = DEFAULT_TOOL_TIMEOUT
language_backend: LanguageBackend = LanguageBackend.LSP
"""
the language backend to use for code understanding features
"""
token_count_estimator: str = RegisteredTokenCountEstimator.CHAR_COUNT.name
"""Only relevant if `record_tool_usage` is True; the name of the token count estimator to use for tool usage statistics.
See the `RegisteredTokenCountEstimator` enum for available options.
Note: some token estimators (like tiktoken) may require downloading data files
on the first run, which can take some time and require internet access. Others, like the Anthropic ones, may require an API key
and rate limits may apply.
"""
default_max_tool_answer_chars: int = 150_000
"""Used as default for tools where the apply method has a default maximal answer length.
Even though the value of the max_answer_chars can be changed when calling the tool, it may make sense to adjust this default
through the global configuration.
"""
ls_specific_settings: dict = field(default_factory=dict)
"""Advanced configuration option allowing to configure language server implementation specific options, see SolidLSPSettings for more info."""
ignored_paths: list[str] = field(default_factory=list)
"""List of paths to ignore across all projects. Same syntax as gitignore, so you can use * and **.
These patterns are merged additively with each project's own ignored_paths."""
project_serena_folder_location: str = DEFAULT_PROJECT_SERENA_FOLDER_LOCATION
"""
Template for the location of the per-project .serena data folder (memories, caches, etc.).
Supports the following placeholders:
- $projectDir: the absolute path to the project root directory
- $projectFolderName: the name of the project folder
Examples:
- "$projectDir/.serena" (default, stores data inside the project)
- "/projects-metadata/$projectFolderName/.serena" (stores data in a central location)
"""
# settings with overridden defaults
default_modes: Sequence[str] | None = ("interactive", "editing")
symbol_info_budget: float = 10.0
"""
Time budget (seconds) for requests when tools request include_info (currently
only supported for LSP-based tools).
If the budget is exceeded, Serena stops issuing further requests and returns partial info results.
0 disables the budget (no early stopping). Negative values are invalid.
"""
# *** fields that are NOT mapped to/from the configuration file ***
_loaded_commented_yaml: CommentedMap | None = None
_config_file_path: str | None = None
"""
the path to the configuration file to which updates of the configuration shall be saved;
if None, the configuration is not saved to disk
"""
# *** static members ***
CONFIG_FILE = "serena_config.yml"
CONFIG_FIELDS_WITH_TYPE_CONVERSION = {"projects", "language_backend"}
# *** methods ***
@classmethod
def get_config_file_creation_date(cls) -> datetime | None:
"""
:return: the creation date of the configuration file, or None if the configuration file does not exist
"""
config_file_path = cls._determine_config_file_path()
if not os.path.exists(config_file_path):
return None
# for unix systems st_ctime is the inode change time (change of metadata),
# which is good enough for our purposes
creation_timestamp = os.stat(config_file_path).st_ctime
return datetime.fromtimestamp(creation_timestamp, UTC)
@property
def config_file_path(self) -> str | None:
return self._config_file_path
def _iter_config_file_mapped_fields_without_type_conversion(self) -> Iterator[str]:
for field_info in dataclasses.fields(self):
field_name = field_info.name
if field_name.startswith("_"):
continue
if field_name in self.CONFIG_FIELDS_WITH_TYPE_CONVERSION:
continue
yield field_name
def _tostring_includes(self) -> list[str]:
return ["config_file_path"]
@classmethod
def _generate_config_file(cls, config_file_path: str) -> None:
"""
Generates a Serena configuration file at the specified path from the template file.
:param config_file_path: the path where the configuration file should be generated
"""
log.info(f"Auto-generating Serena configuration file in {config_file_path}")
loaded_commented_yaml = load_yaml(SERENA_CONFIG_TEMPLATE_FILE)
save_yaml(config_file_path, loaded_commented_yaml)
@classmethod
def _determine_config_file_path(cls) -> str:
"""
:return: the location where the Serena configuration file is stored/should be stored
"""
config_path = os.path.join(SerenaPaths().serena_user_home_dir, cls.CONFIG_FILE)
# if the config file does not exist, check if we can migrate it from the old location
if not os.path.exists(config_path):
old_config_path = os.path.join(REPO_ROOT, cls.CONFIG_FILE)
if os.path.exists(old_config_path):
log.info(f"Moving Serena configuration file from {old_config_path} to {config_path}")
os.makedirs(os.path.dirname(config_path), exist_ok=True)
shutil.move(old_config_path, config_path)
return config_path
@classmethod
def from_config_file(cls, generate_if_missing: bool = True) -> "SerenaConfig":
"""
Static constructor to create SerenaConfig from the configuration file
"""
config_file_path = cls._determine_config_file_path()
# create the configuration file from the template if necessary
if not os.path.exists(config_file_path):
if not generate_if_missing:
raise FileNotFoundError(f"Serena configuration file not found: {config_file_path}")
log.info(f"Serena configuration file not found at {config_file_path}, autogenerating...")
cls._generate_config_file(config_file_path)
# load the configuration
log.info(f"Loading Serena configuration from {config_file_path}")
try:
loaded_commented_yaml = load_yaml(config_file_path)
except Exception as e:
raise ValueError(f"Error loading Serena configuration from {config_file_path}: {e}") from e
# create the configuration instance
instance = cls(_loaded_commented_yaml=loaded_commented_yaml, _config_file_path=config_file_path)
num_migrations = 0
def get_value_or_default(field_name: str) -> Any:
nonlocal num_migrations
if field_name not in loaded_commented_yaml:
num_migrations += 1
return loaded_commented_yaml.get(field_name, get_dataclass_default(SerenaConfig, field_name))
# transfer regular fields that do not require type conversion
for field_name in instance._iter_config_file_mapped_fields_without_type_conversion():
assert hasattr(instance, field_name)
setattr(instance, field_name, get_value_or_default(field_name))
# read projects
if "projects" not in loaded_commented_yaml:
raise SerenaConfigError("`projects` key not found in Serena configuration. Please update your `serena_config.yml` file.")
instance.projects = []
for path in loaded_commented_yaml["projects"]:
path = Path(path).resolve()
if not path.exists() or (path.is_dir() and not os.path.isfile(instance.get_project_yml_location(str(path)))):
log.warning(f"Project path {path} does not exist or no associated project configuration file found, skipping.")
continue
if path.is_file():
path = cls._migrate_out_of_project_config_file(path)
if path is None:
continue
num_migrations += 1
project_config = ProjectConfig.load(path, serena_config=instance) # instance is sufficiently populated
project = RegisteredProject(
project_root=str(path),
project_config=project_config,
)
instance.projects.append(project)
# determine language backend
language_backend = get_dataclass_default(SerenaConfig, "language_backend")
if "language_backend" in loaded_commented_yaml:
backend_str = loaded_commented_yaml["language_backend"]
language_backend = LanguageBackend.from_str(backend_str)
else:
# backward compatibility (migrate Boolean field "jetbrains")
if "jetbrains" in loaded_commented_yaml:
num_migrations += 1
if loaded_commented_yaml["jetbrains"]:
language_backend = LanguageBackend.JETBRAINS
del loaded_commented_yaml["jetbrains"]
instance.language_backend = language_backend
# migrate deprecated "gui_log_level" field if necessary
if "gui_log_level" in loaded_commented_yaml:
num_migrations += 1
if "log_level" not in loaded_commented_yaml:
instance.log_level = loaded_commented_yaml["gui_log_level"]
del loaded_commented_yaml["gui_log_level"]
# migrate "edit_global_memories"
if "edit_global_memories" in loaded_commented_yaml:
num_migrations += 1
edit_global_memories = loaded_commented_yaml["edit_global_memories"]
if not edit_global_memories:
instance.read_only_memory_patterns.append("global/.*")
del loaded_commented_yaml["edit_global_memories"]
# re-save the configuration file if any migrations were performed
if num_migrations > 0:
log.info("Legacy configuration was migrated; re-saving configuration file")
instance.save()
return instance
@classmethod
def _migrate_out_of_project_config_file(cls, path: Path) -> Path | None:
"""
Migrates a legacy project configuration file (which is a YAML file containing the project root) to the
in-project configuration file (project.yml) inside the project root directory.
:param path: the path to the legacy project configuration file
:return: the project root path if the migration was successful, None otherwise.
"""
log.info(f"Found legacy project configuration file {path}, migrating to in-project configuration.")
try:
with open(path, encoding=SERENA_FILE_ENCODING) as f:
project_config_data = yaml.safe_load(f)
if "project_name" not in project_config_data:
project_name = path.stem
with open(path, "a", encoding=SERENA_FILE_ENCODING) as f:
f.write(f"\nproject_name: {project_name}")
project_root = project_config_data["project_root"]
shutil.move(str(path), ProjectConfig.default_project_yml_path(project_root))
return Path(project_root).resolve()
except Exception as e:
log.error(f"Error migrating configuration file: {e}")
return None
@cached_property
def project_paths(self) -> list[str]:
return sorted(str(project.project_root) for project in self.projects)
@cached_property
def project_names(self) -> list[str]:
return sorted(project.project_config.project_name for project in self.projects)
def get_registered_project(self, project_root_or_name: str, autoregister: bool = False) -> Optional[RegisteredProject]:
"""
:param project_root_or_name: path to the project root or the name of the project
:param autoregister: whether to register the project if it exists but is not registered yet
:return: the registered project, or None if not found
"""
# look for project by name
project_candidates = []
for project in self.projects:
if project.project_config.project_name == project_root_or_name:
project_candidates.append(project)
if len(project_candidates) == 1:
return project_candidates[0]
elif len(project_candidates) > 1:
raise ValueError(
f"Multiple projects found with name '{project_root_or_name}'. Please reference it by location instead. "
f"Locations: {[p.project_root for p in project_candidates]}"
)
# no project found by name; check if it's a path
if os.path.isdir(project_root_or_name):
for project in self.projects:
if project.matches_root_path(project_root_or_name):
return project
# no registered project found; auto-register if project configuration exists
if autoregister:
config_path = self.get_project_yml_location(project_root_or_name)
if os.path.isfile(config_path):
registered_project = RegisteredProject.from_project_root(project_root_or_name, serena_config=self)
self.add_registered_project(registered_project)
return registered_project
# nothing found
return None
def get_project(self, project_root_or_name: str) -> Optional["Project"]:
registered_project = self.get_registered_project(project_root_or_name)
if registered_project is None:
return None
else:
return registered_project.get_project_instance(serena_config=self)
def add_registered_project(self, registered_project: RegisteredProject) -> None:
"""
Adds a registered project, saving the configuration file.
"""
self.projects.append(registered_project)
self.save()
def add_project_from_path(self, project_root: Path | str) -> "Project":
"""
Add a new project to the Serena configuration from a given path, auto-generating the project
with defaults if it does not exist.
Will raise a FileExistsError if a project already exists at the path.
:param project_root: the path to the project to add
:return: the project that was added
"""
from ..project import Project
project_root = Path(project_root).resolve()
if not project_root.exists():
raise FileNotFoundError(f"Error: Path does not exist: {project_root}")
if not project_root.is_dir():
raise FileNotFoundError(f"Error: Path is not a directory: {project_root}")
for already_registered_project in self.projects:
if str(already_registered_project.project_root) == str(project_root):
raise FileExistsError(
f"Project with path {project_root} was already added with name '{already_registered_project.project_name}'."
)
project_config = ProjectConfig.load(project_root, serena_config=self, autogenerate=True)
new_project = Project(
project_root=str(project_root),
project_config=project_config,
is_newly_created=True,
serena_config=self,
)
self.add_registered_project(RegisteredProject.from_project_instance(new_project))
return new_project
def remove_project(self, project_name: str) -> None:
# find the index of the project with the desired name and remove it
for i, project in enumerate(list(self.projects)):
if project.project_name == project_name:
del self.projects[i]
break
else:
raise ValueError(f"Project '{project_name}' not found in Serena configuration; valid project names: {self.project_names}")
self.save()
def save(self) -> None:
"""
Saves the configuration to the file from which it was loaded (if any)
"""
if self.config_file_path is None:
return
assert self._loaded_commented_yaml is not None, "Cannot save configuration without loaded YAML"
commented_yaml = deepcopy(self._loaded_commented_yaml)
# update fields with current values
for field_name in self._iter_config_file_mapped_fields_without_type_conversion():
commented_yaml[field_name] = getattr(self, field_name)
# convert project objects into list of paths
commented_yaml["projects"] = sorted({str(project.project_root) for project in self.projects})
# convert language backend to string
commented_yaml["language_backend"] = self.language_backend.value
# transfer comments from the template file
# NOTE: The template file now uses leading comments, but we previously used trailing comments,
# so we apply a conversion, which detects the old style and transforms it.
# For some keys, we force updates, because old comments are problematic/misleading.
normalise_yaml_comments(commented_yaml, YamlCommentNormalisation.LEADING_WITH_CONVERSION_FROM_TRAILING)
template_yaml = load_yaml(SERENA_CONFIG_TEMPLATE_FILE, comment_normalisation=YamlCommentNormalisation.LEADING)
transfer_missing_yaml_comments(template_yaml, commented_yaml, YamlCommentNormalisation.LEADING, forced_update_keys=["projects"])
save_yaml(self.config_file_path, commented_yaml)
@staticmethod
def _resolve_serena_folder_location(template: str, placeholders: dict[str, str]) -> str:
"""
Resolves a folder location template by replacing known ``$placeholder`` tokens
and raising on any unrecognised ones.
:param template: the template string (e.g. ``"$projectDir/.serena"``)
:param placeholders: mapping from placeholder name (without ``$``) to replacement value
:return: the resolved absolute path
:raises SerenaConfigError: if the template contains an unknown ``$placeholder``
"""
def _replace(match: re.Match[str]) -> str:
name = match.group(1)
if name not in placeholders:
raise SerenaConfigError(
f"Unknown placeholder '${name}' in project_serena_folder_location. "
f"Supported placeholders: {', '.join('$' + k for k in placeholders)}"
)
return placeholders[name]
result = re.sub(r"\$([A-Za-z_]\w*)", _replace, template)
return os.path.abspath(result)
def get_configured_project_serena_folder(self, project_root: str | Path) -> str:
"""
Returns the resolved absolute path to the .serena data folder for a project,
applying placeholder substitution to ``project_serena_folder_location``
without any fallback logic.
:param project_root: the absolute path to the project root directory
:return: the resolved absolute path to the project's .serena folder
:raises SerenaConfigError: if the template contains an unknown placeholder
"""
project_folder_name = Path(project_root).name
placeholders = {
"projectDir": str(project_root),
"projectFolderName": project_folder_name,
}
return self._resolve_serena_folder_location(self.project_serena_folder_location, placeholders)
def get_project_serena_folder(self, project_root: str | Path) -> str:
"""
Resolves the location of the project's .serena data folder using fallback logic:
1. If the folder exists at the configured path (``project_serena_folder_location``), use it.
2. Otherwise, if it exists at the default location inside the project root, use that.
3. If neither exists, return the configured path (for creation).
:param project_root: the absolute path to the project root directory
:return: the resolved absolute path to the .serena data folder
:raises SerenaConfigError: if the configured template contains an unknown placeholder
"""
configured_path = self.get_configured_project_serena_folder(project_root)
if os.path.isdir(configured_path):
return configured_path
default_path = os.path.join(str(project_root), SERENA_MANAGED_DIR_NAME)
if configured_path != default_path and os.path.isdir(default_path):
return default_path
return configured_path
def get_project_yml_location(self, project_root: str | Path) -> str:
"""
Returns the resolved absolute path to the project.yml configuration file,
based on the resolved .serena data folder (with fallback logic).
:param project_root: the absolute path to the project root directory
:return: the resolved absolute path to the project's project.yml file
"""
serena_folder = self.get_project_serena_folder(project_root)
return os.path.join(serena_folder, ProjectConfig.SERENA_PROJECT_FILE)
def propagate_settings(self) -> None:
"""
Propagate settings from this configuration to individual components that are statically configured
"""
from serena.tools import JetBrainsPluginClient
JetBrainsPluginClient.set_server_address(self.jetbrains_plugin_server_address)
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/config/serena_config.py",
"license": "MIT License",
"lines": 859,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/tools/cmd_tools.py | """
Tools supporting the execution of (external) commands
"""
import os.path
from serena.tools import Tool, ToolMarkerCanEdit
from serena.util.shell import execute_shell_command
class ExecuteShellCommandTool(Tool, ToolMarkerCanEdit):
"""
Executes a shell command.
"""
def apply(
self,
command: str,
cwd: str | None = None,
capture_stderr: bool = True,
max_answer_chars: int = -1,
) -> str:
"""
Execute a shell command and return its output. If there is a memory about suggested commands, read that first.
Never execute unsafe shell commands!
IMPORTANT: Do not use this tool to start
* long-running processes (e.g. servers) that are not intended to terminate quickly,
* processes that require user interaction.
:param command: the shell command to execute
:param cwd: the working directory to execute the command in. If None, the project root will be used.
:param capture_stderr: whether to capture and return stderr output
:param max_answer_chars: if the output is longer than this number of characters,
no content will be returned. -1 means using the default value, don't adjust unless there is no other way to get the content
required for the task.
:return: a JSON object containing the command's stdout and optionally stderr output
"""
if cwd is None:
_cwd = self.get_project_root()
else:
if os.path.isabs(cwd):
_cwd = cwd
else:
_cwd = os.path.join(self.get_project_root(), cwd)
if not os.path.isdir(_cwd):
raise FileNotFoundError(
f"Specified a relative working directory ({cwd}), but the resulting path is not a directory: {_cwd}"
)
result = execute_shell_command(command, cwd=_cwd, capture_stderr=capture_stderr)
result = result.json()
return self._limit_length(result, max_answer_chars)
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/cmd_tools.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:src/serena/tools/config_tools.py | from serena.tools import Tool, ToolMarkerDoesNotRequireActiveProject, ToolMarkerOptional
class OpenDashboardTool(Tool, ToolMarkerOptional, ToolMarkerDoesNotRequireActiveProject):
"""
Opens the Serena web dashboard in the default web browser.
The dashboard provides logs, session information, and tool usage statistics.
"""
def apply(self) -> str:
"""
Opens the Serena web dashboard in the default web browser.
"""
if self.agent.open_dashboard():
return f"Serena web dashboard has been opened in the user's default web browser: {self.agent.get_dashboard_url()}"
else:
return f"Serena web dashboard could not be opened automatically; tell the user to open it via {self.agent.get_dashboard_url()}"
class ActivateProjectTool(Tool, ToolMarkerDoesNotRequireActiveProject):
"""
Activates a project based on the project name or path.
"""
def apply(self, project: str) -> str:
"""
Activates the project with the given name or path.
:param project: the name of a registered project to activate or a path to a project directory
"""
active_project = self.agent.activate_project_from_path_or_name(project)
result = active_project.get_activation_message()
result += "\nIMPORTANT: If you have not yet read the 'Serena Instructions Manual', do it now before continuing!"
return result
class RemoveProjectTool(Tool, ToolMarkerDoesNotRequireActiveProject, ToolMarkerOptional):
"""
Removes a project from the Serena configuration.
"""
def apply(self, project_name: str) -> str:
"""
Removes a project from the Serena configuration.
:param project_name: Name of the project to remove
"""
self.agent.serena_config.remove_project(project_name)
return f"Successfully removed project '{project_name}' from configuration."
class SwitchModesTool(Tool, ToolMarkerOptional):
"""
Activates modes by providing a list of their names
"""
def apply(self, modes: list[str]) -> str:
"""
Activates the desired modes, like ["editing", "interactive"] or ["planning", "one-shot"]
:param modes: the names of the modes to activate
"""
self.agent.set_modes(modes)
# Inform the Agent about the activated modes and the currently active tools
mode_instances = self.agent.get_active_modes()
result_str = f"Active modes: {', '.join([mode.name for mode in mode_instances])}" + "\n"
result_str += "\n".join([mode_instance.prompt for mode_instance in mode_instances]) + "\n"
result_str += f"Currently active tools: {', '.join(self.agent.get_active_tool_names())}"
return result_str
class GetCurrentConfigTool(Tool):
"""
Prints the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
"""
def apply(self) -> str:
"""
Print the current configuration of the agent, including the active and available projects, tools, contexts, and modes.
"""
return self.agent.get_current_config_overview()
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/config_tools.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:src/serena/tools/file_tools.py | """
File and file system-related tools, specifically for
* listing directory contents
* reading files
* creating files
* editing at the file level
"""
import os
from collections import defaultdict
from fnmatch import fnmatch
from pathlib import Path
from typing import Literal
from serena.tools import SUCCESS_RESULT, EditedFileContext, Tool, ToolMarkerCanEdit, ToolMarkerOptional
from serena.util.file_system import scan_directory
from serena.util.text_utils import ContentReplacer, search_files
class ReadFileTool(Tool):
"""
Reads a file within the project directory.
"""
def apply(self, relative_path: str, start_line: int = 0, end_line: int | None = None, max_answer_chars: int = -1) -> str:
"""
Reads the given file or a chunk of it. Generally, symbolic operations
like find_symbol or find_referencing_symbols should be preferred if you know which symbols you are looking for.
:param relative_path: the relative path to the file to read
:param start_line: the 0-based index of the first line to be retrieved.
:param end_line: the 0-based index of the last line to be retrieved (inclusive). If None, read until the end of the file.
:param max_answer_chars: if the file (chunk) is longer than this number of characters,
no content will be returned. Don't adjust unless there is really no other way to get the content
required for the task.
:return: the full text of the file at the given relative path
"""
self.project.validate_relative_path(relative_path, require_not_ignored=True)
result = self.project.read_file(relative_path)
result_lines = result.splitlines()
if end_line is None:
result_lines = result_lines[start_line:]
else:
result_lines = result_lines[start_line : end_line + 1]
result = "\n".join(result_lines)
return self._limit_length(result, max_answer_chars)
class CreateTextFileTool(Tool, ToolMarkerCanEdit):
"""
Creates/overwrites a file in the project directory.
"""
def apply(self, relative_path: str, content: str) -> str:
"""
Write a new file or overwrite an existing file.
:param relative_path: the relative path to the file to create
:param content: the (appropriately encoded) content to write to the file
:return: a message indicating success or failure
"""
project_root = self.get_project_root()
abs_path = (Path(project_root) / relative_path).resolve()
will_overwrite_existing = abs_path.exists()
if will_overwrite_existing:
self.project.validate_relative_path(relative_path, require_not_ignored=True)
else:
assert abs_path.is_relative_to(
self.get_project_root()
), f"Cannot create file outside of the project directory, got {relative_path=}"
abs_path.parent.mkdir(parents=True, exist_ok=True)
abs_path.write_text(content, encoding=self.project.project_config.encoding)
answer = f"File created: {relative_path}."
if will_overwrite_existing:
answer += " Overwrote existing file."
return answer
class ListDirTool(Tool):
"""
Lists files and directories in the given directory (optionally with recursion).
"""
def apply(self, relative_path: str, recursive: bool, skip_ignored_files: bool = False, max_answer_chars: int = -1) -> str:
"""
Lists files and directories in the given directory (optionally with recursion).
:param relative_path: the relative path to the directory to list; pass "." to scan the project root
:param recursive: whether to scan subdirectories recursively
:param skip_ignored_files: whether to skip files and directories that are ignored
:param max_answer_chars: if the output is longer than this number of characters,
no content will be returned. -1 means the default value from the config will be used.
Don't adjust unless there is really no other way to get the content required for the task.
:return: a JSON object with the names of directories and files within the given directory
"""
# Check if the directory exists before validation
if not self.project.relative_path_exists(relative_path):
error_info = {
"error": f"Directory not found: {relative_path}",
"project_root": self.get_project_root(),
"hint": "Check if the path is correct relative to the project root",
}
return self._to_json(error_info)
self.project.validate_relative_path(relative_path, require_not_ignored=skip_ignored_files)
dirs, files = scan_directory(
os.path.join(self.get_project_root(), relative_path),
relative_to=self.get_project_root(),
recursive=recursive,
is_ignored_dir=self.project.is_ignored_path if skip_ignored_files else None,
is_ignored_file=self.project.is_ignored_path if skip_ignored_files else None,
)
result = self._to_json({"dirs": dirs, "files": files})
return self._limit_length(result, max_answer_chars)
class FindFileTool(Tool):
"""
Finds files in the given relative paths
"""
def apply(self, file_mask: str, relative_path: str) -> str:
"""
Finds non-gitignored files matching the given file mask within the given relative path
:param file_mask: the filename or file mask (using the wildcards * or ?) to search for
:param relative_path: the relative path to the directory to search in; pass "." to scan the project root
:return: a JSON object with the list of matching files
"""
self.project.validate_relative_path(relative_path, require_not_ignored=True)
dir_to_scan = os.path.join(self.get_project_root(), relative_path)
# find the files by ignoring everything that doesn't match
def is_ignored_file(abs_path: str) -> bool:
if self.project.is_ignored_path(abs_path):
return True
filename = os.path.basename(abs_path)
return not fnmatch(filename, file_mask)
_dirs, files = scan_directory(
path=dir_to_scan,
recursive=True,
is_ignored_dir=self.project.is_ignored_path,
is_ignored_file=is_ignored_file,
relative_to=self.get_project_root(),
)
result = self._to_json({"files": files})
return result
class ReplaceContentTool(Tool, ToolMarkerCanEdit):
"""
Replaces content in a file (optionally using regular expressions).
"""
def apply(
self,
relative_path: str,
needle: str,
repl: str,
mode: Literal["literal", "regex"],
allow_multiple_occurrences: bool = False,
) -> str:
r"""
Replaces one or more occurrences of a given pattern in a file with new content.
This is the preferred way to replace content in a file whenever the symbol-level
tools are not appropriate.
VERY IMPORTANT: The "regex" mode allows very large sections of code to be replaced without fully quoting them!
Use a regex of the form "beginning.*?end-of-text-to-be-replaced" to be faster and more economical!
ALWAYS try to use wildcards to avoid specifying the exact content to be replaced,
especially if it spans several lines. Note that you cannot make mistakes, because if the regex should match
multiple occurrences while you disabled `allow_multiple_occurrences`, an error will be returned, and you can retry
with a revised regex.
Therefore, using regex mode with suitable wildcards is usually the best choice!
:param relative_path: the relative path to the file
:param needle: the string or regex pattern to search for.
If `mode` is "literal", this string will be matched exactly.
If `mode` is "regex", this string will be treated as a regular expression (syntax of Python's `re` module,
with flags DOTALL and MULTILINE enabled).
:param repl: the replacement string (verbatim).
If mode is "regex", the string can contain backreferences to matched groups in the needle regex,
specified using the syntax $!1, $!2, etc. for groups 1, 2, etc.
:param mode: either "literal" or "regex", specifying how the `needle` parameter is to be interpreted.
:param allow_multiple_occurrences: whether to allow matching and replacing multiple occurrences.
If false and multiple occurrences are found, an error will be returned
"""
return self.replace_content(
relative_path, needle, repl, mode=mode, allow_multiple_occurrences=allow_multiple_occurrences, require_not_ignored=True
)
def replace_content(
self,
relative_path: str,
needle: str,
repl: str,
mode: Literal["literal", "regex"],
allow_multiple_occurrences: bool = False,
require_not_ignored: bool = True,
) -> str:
"""
Performs the replacement, with additional options not exposed in the tool.
This function can be used internally by other tools.
"""
self.project.validate_relative_path(relative_path, require_not_ignored=require_not_ignored)
with EditedFileContext(relative_path, self.create_code_editor()) as context:
original_content = context.get_original_content()
replacer = ContentReplacer(mode=mode, allow_multiple_occurrences=allow_multiple_occurrences)
updated_content = replacer.replace(original_content, needle, repl)
context.set_updated_content(updated_content)
return SUCCESS_RESULT
class DeleteLinesTool(Tool, ToolMarkerCanEdit, ToolMarkerOptional):
"""
Deletes a range of lines within a file.
"""
def apply(
self,
relative_path: str,
start_line: int,
end_line: int,
) -> str:
"""
Deletes the given lines in the file.
Requires that the same range of lines was previously read using the `read_file` tool to verify correctness
of the operation.
:param relative_path: the relative path to the file
:param start_line: the 0-based index of the first line to be deleted
:param end_line: the 0-based index of the last line to be deleted
"""
code_editor = self.create_code_editor()
code_editor.delete_lines(relative_path, start_line, end_line)
return SUCCESS_RESULT
class ReplaceLinesTool(Tool, ToolMarkerCanEdit, ToolMarkerOptional):
"""
Replaces a range of lines within a file with new content.
"""
def apply(
self,
relative_path: str,
start_line: int,
end_line: int,
content: str,
) -> str:
"""
Replaces the given range of lines in the given file.
Requires that the same range of lines was previously read using the `read_file` tool to verify correctness
of the operation.
:param relative_path: the relative path to the file
:param start_line: the 0-based index of the first line to be deleted
:param end_line: the 0-based index of the last line to be deleted
:param content: the content to insert
"""
if not content.endswith("\n"):
content += "\n"
result = self.agent.get_tool(DeleteLinesTool).apply(relative_path, start_line, end_line)
if result != SUCCESS_RESULT:
return result
self.agent.get_tool(InsertAtLineTool).apply(relative_path, start_line, content)
return SUCCESS_RESULT
class InsertAtLineTool(Tool, ToolMarkerCanEdit, ToolMarkerOptional):
"""
Inserts content at a given line in a file.
"""
def apply(
self,
relative_path: str,
line: int,
content: str,
) -> str:
"""
Inserts the given content at the given line in the file, pushing existing content of the line down.
In general, symbolic insert operations like insert_after_symbol or insert_before_symbol should be preferred if you know which
symbol you are looking for.
However, this can also be useful for small targeted edits of the body of a longer symbol (without replacing the entire body).
:param relative_path: the relative path to the file
:param line: the 0-based index of the line to insert content at
:param content: the content to be inserted
"""
if not content.endswith("\n"):
content += "\n"
code_editor = self.create_code_editor()
code_editor.insert_at_line(relative_path, line, content)
return SUCCESS_RESULT
class SearchForPatternTool(Tool):
"""
Performs a search for a pattern in the project.
"""
def apply(
self,
substring_pattern: str,
context_lines_before: int = 0,
context_lines_after: int = 0,
paths_include_glob: str = "",
paths_exclude_glob: str = "",
relative_path: str = "",
restrict_search_to_code_files: bool = False,
max_answer_chars: int = -1,
) -> str:
"""
Offers a flexible search for arbitrary patterns in the codebase, including the
possibility to search in non-code files.
Generally, symbolic operations like find_symbol or find_referencing_symbols
should be preferred if you know which symbols you are looking for.
Pattern Matching Logic:
For each match, the returned result will contain the full lines where the
substring pattern is found, as well as optionally some lines before and after it. The pattern will be compiled with
DOTALL, meaning that the dot will match all characters including newlines.
This also means that it never makes sense to have .* at the beginning or end of the pattern,
but it may make sense to have it in the middle for complex patterns.
If a pattern matches multiple lines, all those lines will be part of the match.
Be careful to not use greedy quantifiers unnecessarily, it is usually better to use non-greedy quantifiers like .*? to avoid
matching too much content.
File Selection Logic:
The files in which the search is performed can be restricted very flexibly.
Using `restrict_search_to_code_files` is useful if you are only interested in code symbols (i.e., those
symbols that can be manipulated with symbolic tools like find_symbol).
You can also restrict the search to a specific file or directory,
and provide glob patterns to include or exclude certain files on top of that.
The globs are matched against relative file paths from the project root (not to the `relative_path` parameter that
is used to further restrict the search).
Smartly combining the various restrictions allows you to perform very targeted searches.
:param substring_pattern: Regular expression for a substring pattern to search for
:param context_lines_before: Number of lines of context to include before each match
:param context_lines_after: Number of lines of context to include after each match
:param paths_include_glob: optional glob pattern specifying files to include in the search.
Matches against relative file paths from the project root (e.g., "*.py", "src/**/*.ts").
Supports standard glob patterns (*, ?, [seq], **, etc.) and brace expansion {a,b,c}.
Only matches files, not directories. If left empty, all non-ignored files will be included.
:param paths_exclude_glob: optional glob pattern specifying files to exclude from the search.
Matches against relative file paths from the project root (e.g., "*test*", "**/*_generated.py").
Supports standard glob patterns (*, ?, [seq], **, etc.) and brace expansion {a,b,c}.
Takes precedence over paths_include_glob. Only matches files, not directories. If left empty, no files are excluded.
:param relative_path: only subpaths of this path (relative to the repo root) will be analyzed. If a path to a single
file is passed, only that will be searched. The path must exist, otherwise a `FileNotFoundError` is raised.
:param max_answer_chars: if the output is longer than this number of characters,
no content will be returned.
-1 means the default value from the config will be used.
Don't adjust unless there is really no other way to get the content
required for the task. Instead, if the output is too long, you should
make a stricter query.
:param restrict_search_to_code_files: whether to restrict the search to only those files where
analyzed code symbols can be found. Otherwise, will search all non-ignored files.
Set this to True if your search is only meant to discover code that can be manipulated with symbolic tools.
For example, for finding classes or methods from a name pattern.
Setting to False is a better choice if you also want to search in non-code files, like in html or yaml files,
which is why it is the default.
:return: A mapping of file paths to lists of matched consecutive lines.
"""
abs_path = os.path.join(self.get_project_root(), relative_path)
if not os.path.exists(abs_path):
raise FileNotFoundError(f"Relative path {relative_path} does not exist.")
if restrict_search_to_code_files:
matches = self.project.search_source_files_for_pattern(
pattern=substring_pattern,
relative_path=relative_path,
context_lines_before=context_lines_before,
context_lines_after=context_lines_after,
paths_include_glob=paths_include_glob.strip(),
paths_exclude_glob=paths_exclude_glob.strip(),
)
else:
if os.path.isfile(abs_path):
rel_paths_to_search = [relative_path]
else:
_dirs, rel_paths_to_search = scan_directory(
path=abs_path,
recursive=True,
is_ignored_dir=self.project.is_ignored_path,
is_ignored_file=self.project.is_ignored_path,
relative_to=self.get_project_root(),
)
# TODO (maybe): not super efficient to walk through the files again and filter if glob patterns are provided
# but it probably never matters and this version required no further refactoring
matches = search_files(
rel_paths_to_search,
substring_pattern,
file_reader=self.project.read_file,
root_path=self.get_project_root(),
paths_include_glob=paths_include_glob,
paths_exclude_glob=paths_exclude_glob,
)
# group matches by file
file_to_matches: dict[str, list[str]] = defaultdict(list)
for match in matches:
assert match.source_file_path is not None
file_to_matches[match.source_file_path].append(match.to_display_string())
result = self._to_json(file_to_matches)
return self._limit_length(result, max_answer_chars)
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/file_tools.py",
"license": "MIT License",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:src/serena/tools/memory_tools.py | from typing import Literal
from serena.tools import Tool, ToolMarkerCanEdit
class WriteMemoryTool(Tool, ToolMarkerCanEdit):
"""
Write some information (utf-8-encoded) about this project that can be useful for future tasks to a memory in md format.
The memory name should be meaningful.
"""
def apply(self, memory_name: str, content: str, max_chars: int = -1) -> str:
"""
Write information (utf-8-encoded) about this project that can be useful for future tasks to a memory in md format.
The memory name should be meaningful and can include "/" to organize into topics (e.g., "auth/login/logic").
If explicitly instructed, use the "global/" prefix for writing a memory that is shared across projects
(e.g., "global/java/style_guide")
:param max_chars: the maximum number of characters to write. By default, determined by the config,
change only if instructed to do so.
"""
# NOTE: utf-8 encoding is configured in the MemoriesManager
if max_chars == -1:
max_chars = self.agent.serena_config.default_max_tool_answer_chars
if len(content) > max_chars:
raise ValueError(
f"Content for {memory_name} is too long. Max length is {max_chars} characters. " + "Please make the content shorter."
)
return self.memories_manager.save_memory(memory_name, content, is_tool_context=True)
class ReadMemoryTool(Tool):
"""
Read the content of a memory file. This tool should only be used if the information
is relevant to the current task. You can infer whether the information
is relevant from the memory file name.
You should not read the same memory file multiple times in the same conversation.
"""
def apply(self, memory_name: str) -> str:
"""
Reads the contents of a memory. Should only be used if the information
is likely to be relevant to the current task, inferring relevance from the memory name.
"""
return self.memories_manager.load_memory(memory_name)
class ListMemoriesTool(Tool):
"""
List available memories. Any memory can be read using the `read_memory` tool.
"""
def apply(self, topic: str = "") -> str:
"""
Lists available memories, optionally filtered by topic.
"""
return self._to_json(self.memories_manager.list_memories(topic).to_dict())
class DeleteMemoryTool(Tool, ToolMarkerCanEdit):
"""
Delete a memory file. Should only happen if a user asks for it explicitly,
for example by saying that the information retrieved from a memory file is no longer correct
or no longer relevant for the project.
"""
def apply(self, memory_name: str) -> str:
"""
Delete a memory, only call if instructed explicitly or permission was granted by the user.
"""
return self.memories_manager.delete_memory(memory_name, is_tool_context=True)
class RenameMemoryTool(Tool, ToolMarkerCanEdit):
"""
Renames or moves a memory. Moving between project and global scope is supported
(e.g., renaming "global/foo" to "bar" moves it from global to project scope).
"""
def apply(self, old_name: str, new_name: str) -> str:
"""
Rename or move a memory, use "/" in the name to organize into topics.
The "global" topic should only be used if explicitly instructed.
"""
return self.memories_manager.move_memory(old_name, new_name, is_tool_context=True)
class EditMemoryTool(Tool, ToolMarkerCanEdit):
"""
Replaces content matching a regular expression in a memory.
"""
def apply(
self,
memory_name: str,
needle: str,
repl: str,
mode: Literal["literal", "regex"],
allow_multiple_occurrences: bool = False,
) -> str:
r"""
Replaces content matching a regular expression in a memory.
:param memory_name: the name of the memory
:param needle: the string or regex pattern to search for.
If `mode` is "literal", this string will be matched exactly.
If `mode` is "regex", this string will be treated as a regular expression (syntax of Python's `re` module,
with flags DOTALL and MULTILINE enabled).
:param repl: the replacement string (verbatim).
:param mode: either "literal" or "regex", specifying how the `needle` parameter is to be interpreted.
:param allow_multiple_occurrences: whether to allow matching and replacing multiple occurrences.
If false and multiple occurrences are found, an error will be returned.
"""
return self.memories_manager.edit_memory(memory_name, needle, repl, mode, allow_multiple_occurrences, is_tool_context=True)
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/memory_tools.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:src/serena/tools/tools_base.py | import inspect
import json
from abc import ABC
from collections.abc import Iterable
from dataclasses import dataclass
from types import TracebackType
from typing import TYPE_CHECKING, Any, Protocol, Self, TypeVar, cast
from mcp import Implementation
from mcp.server.fastmcp import Context
from mcp.server.fastmcp.utilities.func_metadata import FuncMetadata, func_metadata
from sensai.util import logging
from sensai.util.string import dict_string
from serena.config.serena_config import LanguageBackend
from serena.project import MemoriesManager, Project
from serena.prompt_factory import PromptFactory
from serena.util.class_decorators import singleton
from serena.util.inspection import iter_subclasses
from solidlsp.ls_exceptions import SolidLSPException
if TYPE_CHECKING:
from serena.agent import SerenaAgent
from serena.code_editor import CodeEditor
from serena.symbol import LanguageServerSymbolRetriever
log = logging.getLogger(__name__)
T = TypeVar("T")
SUCCESS_RESULT = "OK"
class Component(ABC):
def __init__(self, agent: "SerenaAgent"):
self.agent = agent
def get_project_root(self) -> str:
"""
:return: the root directory of the active project, raises a ValueError if no active project configuration is set
"""
return self.project.project_root
@property
def prompt_factory(self) -> PromptFactory:
return self.agent.prompt_factory
@property
def memories_manager(self) -> "MemoriesManager":
return self.project.memories_manager
def create_language_server_symbol_retriever(self) -> "LanguageServerSymbolRetriever":
from serena.symbol import LanguageServerSymbolRetriever
assert self.agent.get_language_backend().is_lsp(), "Language server symbol retriever can only be created for LSP language backend"
language_server_manager = self.agent.get_language_server_manager_or_raise()
return LanguageServerSymbolRetriever(language_server_manager, agent=self.agent)
@property
def project(self) -> Project:
return self.agent.get_active_project_or_raise()
def create_code_editor(self) -> "CodeEditor":
from ..code_editor import JetBrainsCodeEditor, LanguageServerCodeEditor
match self.agent.get_language_backend():
case LanguageBackend.LSP:
return LanguageServerCodeEditor(self.create_language_server_symbol_retriever(), project_config=self.project.project_config)
case LanguageBackend.JETBRAINS:
return JetBrainsCodeEditor(project=self.project)
case _:
raise ValueError
class ToolMarker:
"""
Base class for tool markers.
"""
class ToolMarkerCanEdit(ToolMarker):
"""
Marker class for all tools that can perform editing operations on files.
"""
class ToolMarkerDoesNotRequireActiveProject(ToolMarker):
pass
class ToolMarkerOptional(ToolMarker):
"""
Marker class for optional tools that are disabled by default.
"""
class ToolMarkerSymbolicRead(ToolMarker):
"""
Marker class for tools that perform symbol read operations.
"""
class ToolMarkerSymbolicEdit(ToolMarkerCanEdit):
"""
Marker class for tools that perform symbolic edit operations.
"""
class ApplyMethodProtocol(Protocol):
"""Callable protocol for the apply method of a tool."""
def __call__(self, *args: Any, **kwargs: Any) -> str:
pass
class Tool(Component):
# NOTE: each tool should implement the apply method, which is then used in
# the central method of the Tool class `apply_ex`.
# Failure to do so will result in a RuntimeError at tool execution time.
# The apply method is not declared as part of the base Tool interface since we cannot
# know the signature of the (input parameters of the) method in advance.
#
# The docstring and types of the apply method are used to generate the tool description
# (which is use by the LLM, so a good description is important)
# and to validate the tool call arguments.
_last_tool_call_client_str: str | None = None
"""We can only get the client info from within a tool call. Each tool call will update this variable."""
@classmethod
def set_last_tool_call_client_str(cls, client_str: str | None) -> None:
cls._last_tool_call_client_str = client_str
@classmethod
def get_last_tool_call_client_str(cls) -> str | None:
return cls._last_tool_call_client_str
@classmethod
def get_name_from_cls(cls) -> str:
name = cls.__name__
if name.endswith("Tool"):
name = name[:-4]
# convert to snake_case
name = "".join(["_" + c.lower() if c.isupper() else c for c in name]).lstrip("_")
return name
def get_name(self) -> str:
return self.get_name_from_cls()
def get_apply_fn(self) -> ApplyMethodProtocol:
apply_fn = getattr(self, "apply")
if apply_fn is None:
raise RuntimeError(f"apply not defined in {self}. Did you forget to implement it?")
return apply_fn
@classmethod
def can_edit(cls) -> bool:
"""
Returns whether this tool can perform editing operations on code.
:return: True if the tool can edit code, False otherwise
"""
return issubclass(cls, ToolMarkerCanEdit)
@classmethod
def get_tool_description(cls) -> str:
docstring = cls.__doc__
if docstring is None:
return ""
return docstring.strip()
@classmethod
def get_apply_docstring_from_cls(cls) -> str:
"""Get the docstring for the apply method from the class (static metadata).
Needed for creating MCP tools in a separate process without running into serialization issues.
"""
# First try to get from __dict__ to handle dynamic docstring changes
if "apply" in cls.__dict__:
apply_fn = cls.__dict__["apply"]
else:
# Fall back to getattr for inherited methods
apply_fn = getattr(cls, "apply", None)
if apply_fn is None:
raise AttributeError(f"apply method not defined in {cls}. Did you forget to implement it?")
docstring = apply_fn.__doc__
if not docstring:
raise AttributeError(f"apply method has no (or empty) docstring in {cls}. Did you forget to implement it?")
return docstring.strip()
def get_apply_docstring(self) -> str:
"""Gets the docstring for the tool application, used by the MCP server."""
return self.get_apply_docstring_from_cls()
def get_apply_fn_metadata(self) -> FuncMetadata:
"""Gets the metadata for the tool application function, used by the MCP server."""
return self.get_apply_fn_metadata_from_cls()
@classmethod
def get_apply_fn_metadata_from_cls(cls) -> FuncMetadata:
"""Get the metadata for the apply method from the class (static metadata).
Needed for creating MCP tools in a separate process without running into serialization issues.
"""
# First try to get from __dict__ to handle dynamic docstring changes
if "apply" in cls.__dict__:
apply_fn = cls.__dict__["apply"]
else:
# Fall back to getattr for inherited methods
apply_fn = getattr(cls, "apply", None)
if apply_fn is None:
raise AttributeError(f"apply method not defined in {cls}. Did you forget to implement it?")
return func_metadata(apply_fn, skip_names=["self", "cls"])
def _log_tool_application(self, frame: Any) -> None:
params = {}
ignored_params = {"self", "log_call", "catch_exceptions", "args", "apply_fn"}
for param, value in frame.f_locals.items():
if param in ignored_params:
continue
if param == "kwargs":
params.update(value)
else:
params[param] = value
log.info(f"{self.get_name_from_cls()}: {dict_string(params)}")
def _limit_length(self, result: str, max_answer_chars: int) -> str:
if max_answer_chars == -1:
max_answer_chars = self.agent.serena_config.default_max_tool_answer_chars
if max_answer_chars <= 0:
raise ValueError(f"Must be positive or the default (-1), got: {max_answer_chars=}")
if (n_chars := len(result)) > max_answer_chars:
result = (
f"The answer is too long ({n_chars} characters). "
+ "Please try a more specific tool query or raise the max_answer_chars parameter."
)
return result
def is_active(self) -> bool:
return self.agent.tool_is_active(self.get_name())
def is_readonly(self) -> bool:
return not self.can_edit()
def is_symbolic(self) -> bool:
return issubclass(self.__class__, ToolMarkerSymbolicRead) or issubclass(self.__class__, ToolMarkerSymbolicEdit)
def apply_ex(self, log_call: bool = True, catch_exceptions: bool = True, mcp_ctx: Context | None = None, **kwargs) -> str: # type: ignore
"""
Applies the tool with logging and exception handling, using the given keyword arguments
"""
if mcp_ctx is not None:
try:
client_params = mcp_ctx.session.client_params
if client_params is not None:
client_info = cast(Implementation, client_params.clientInfo)
client_str = client_info.title if client_info.title else client_info.name + " " + client_info.version
if client_str != self.get_last_tool_call_client_str():
log.debug(f"Updating client info: {client_info}")
self.set_last_tool_call_client_str(client_str)
except BaseException as e:
log.info(f"Failed to get client info: {e}.")
def task() -> str:
apply_fn = self.get_apply_fn()
try:
if not self.is_active():
return f"Error: Tool '{self.get_name_from_cls()}' is not active. Active tools: {self.agent.get_active_tool_names()}"
except Exception as e:
return f"RuntimeError while checking if tool {self.get_name_from_cls()} is active: {e}"
if log_call:
self._log_tool_application(inspect.currentframe())
try:
# check whether the tool requires an active project and language server
if not isinstance(self, ToolMarkerDoesNotRequireActiveProject):
if self.agent.get_active_project() is None:
return (
"Error: No active project. Ask the user to provide the project path or to select a project from this list of known projects: "
+ f"{self.agent.serena_config.project_names}"
)
# apply the actual tool
try:
result = apply_fn(**kwargs)
except SolidLSPException as e:
if e.is_language_server_terminated():
affected_language = e.get_affected_language()
if affected_language is not None:
log.error(
f"Language server terminated while executing tool ({e}). Restarting the language server and retrying ..."
)
self.agent.get_language_server_manager_or_raise().restart_language_server(affected_language)
result = apply_fn(**kwargs)
else:
log.error(
f"Language server terminated while executing tool ({e}), but affected language is unknown. Not retrying."
)
raise
else:
raise
# record tool usage
self.agent.record_tool_usage(kwargs, result, self)
except Exception as e:
if not catch_exceptions:
raise
msg = f"Error executing tool: {e.__class__.__name__} - {e}"
log.error(f"Error executing tool: {e}", exc_info=e)
result = msg
if log_call:
log.info(f"Result: {result}")
try:
ls_manager = self.agent.get_language_server_manager()
if ls_manager is not None:
ls_manager.save_all_caches()
except Exception as e:
log.error(f"Error saving language server cache: {e}")
return result
# execute the tool in the agent's task executor, with timeout
try:
task_exec = self.agent.issue_task(task, name=self.__class__.__name__)
return task_exec.result(timeout=self.agent.serena_config.tool_timeout)
except Exception as e: # typically TimeoutError (other exceptions caught in task)
msg = f"Error: {e.__class__.__name__} - {e}"
log.error(msg)
return msg
@staticmethod
def _to_json(x: Any) -> str:
return json.dumps(x, ensure_ascii=False)
class EditedFileContext:
"""
Context manager for file editing.
Create the context, then use `set_updated_content` to set the new content, the original content
being provided in `original_content`.
When exiting the context without an exception, the updated content will be written back to the file.
"""
def __init__(self, relative_path: str, code_editor: "CodeEditor"):
self._relative_path = relative_path
self._code_editor = code_editor
self._edited_file: CodeEditor.EditedFile | None = None
self._edited_file_context: Any = None
def __enter__(self) -> Self:
self._edited_file_context = self._code_editor.edited_file_context(self._relative_path)
self._edited_file = self._edited_file_context.__enter__()
return self
def get_original_content(self) -> str:
"""
:return: the original content of the file before any modifications.
"""
assert self._edited_file is not None
return self._edited_file.get_contents()
def set_updated_content(self, content: str) -> None:
"""
Sets the updated content of the file, which will be written back to the file
when the context is exited without an exception.
:param content: the updated content of the file
"""
assert self._edited_file is not None
self._edited_file.set_contents(content)
def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None:
assert self._edited_file_context is not None
self._edited_file_context.__exit__(exc_type, exc_value, traceback)
@dataclass(kw_only=True)
class RegisteredTool:
tool_class: type[Tool]
is_optional: bool
tool_name: str
tool_packages = ["serena.tools"]
@singleton
class ToolRegistry:
def __init__(self) -> None:
self._tool_dict: dict[str, RegisteredTool] = {}
inclusion_predicate = lambda c: "apply" in c.__dict__ # include only concrete tool classes that implement apply
for cls in iter_subclasses(Tool, inclusion_predicate=inclusion_predicate):
if not any(cls.__module__.startswith(pkg) for pkg in tool_packages):
continue
is_optional = issubclass(cls, ToolMarkerOptional)
name = cls.get_name_from_cls()
if name in self._tool_dict:
raise ValueError(f"Duplicate tool name found: {name}. Tool classes must have unique names.")
self._tool_dict[name] = RegisteredTool(tool_class=cls, is_optional=is_optional, tool_name=name)
def get_tool_class_by_name(self, tool_name: str) -> type[Tool]:
if tool_name not in self._tool_dict:
raise ValueError(f"Tool named '{tool_name}' not found.")
return self._tool_dict[tool_name].tool_class
def get_all_tool_classes(self) -> list[type[Tool]]:
return list(t.tool_class for t in self._tool_dict.values())
def get_tool_classes_default_enabled(self) -> list[type[Tool]]:
"""
:return: the list of tool classes that are enabled by default (i.e. non-optional tools).
"""
return [t.tool_class for t in self._tool_dict.values() if not t.is_optional]
def get_tool_classes_optional(self) -> list[type[Tool]]:
"""
:return: the list of tool classes that are optional (i.e. disabled by default).
"""
return [t.tool_class for t in self._tool_dict.values() if t.is_optional]
def get_tool_names_default_enabled(self) -> list[str]:
"""
:return: the list of tool names that are enabled by default (i.e. non-optional tools).
"""
return [t.tool_name for t in self._tool_dict.values() if not t.is_optional]
def get_tool_names_optional(self) -> list[str]:
"""
:return: the list of tool names that are optional (i.e. disabled by default).
"""
return [t.tool_name for t in self._tool_dict.values() if t.is_optional]
def get_tool_names(self) -> list[str]:
"""
:return: the list of all tool names.
"""
return list(self._tool_dict.keys())
def print_tool_overview(
self, tools: Iterable[type[Tool] | Tool] | None = None, include_optional: bool = False, only_optional: bool = False
) -> None:
"""
Print a summary of the tools. If no tools are passed, a summary of the selection of tools (all, default or only optional) is printed.
"""
if tools is None:
if only_optional:
tools = self.get_tool_classes_optional()
elif include_optional:
tools = self.get_all_tool_classes()
else:
tools = self.get_tool_classes_default_enabled()
tool_dict: dict[str, type[Tool] | Tool] = {}
for tool_class in tools:
tool_dict[tool_class.get_name_from_cls()] = tool_class
for tool_name in sorted(tool_dict.keys()):
tool_class = tool_dict[tool_name]
print(f" * `{tool_name}`: {tool_class.get_tool_description().strip()}")
def is_valid_tool_name(self, tool_name: str) -> bool:
return tool_name in self._tool_dict
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/tools_base.py",
"license": "MIT License",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/tools/workflow_tools.py | """
Tools supporting the general workflow of the agent
"""
import platform
from serena.tools import Tool, ToolMarkerDoesNotRequireActiveProject, ToolMarkerOptional
class CheckOnboardingPerformedTool(Tool):
"""
Checks whether project onboarding was already performed.
"""
def apply(self) -> str:
"""
Checks whether project onboarding was already performed.
You should always call this tool before beginning to actually work on the project/after activating a project.
"""
project_memories = self.memories_manager.list_project_memories()
if len(project_memories) == 0:
msg = (
"Onboarding not performed yet (no memories available). "
"You should perform onboarding by calling the `onboarding` tool before proceeding with the task. "
)
else:
# Not reporting the list of memories here, as they were already reported at project activation
# (with the system prompt if the project was activated at startup)
msg = (
f"Onboarding was already performed: {len(project_memories)} project memories are available. "
"Consider reading memories if they appear relevant to the task at hand."
)
msg += " If you have not read the 'Serena Instructions Manual', do so now."
return msg
class OnboardingTool(Tool):
"""
Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building).
"""
def apply(self) -> str:
"""
Call this tool if onboarding was not performed yet.
You will call this tool at most once per conversation.
:return: instructions on how to create the onboarding information
"""
system = platform.system()
return self.prompt_factory.create_onboarding_prompt(system=system)
class ThinkAboutCollectedInformationTool(Tool, ToolMarkerOptional):
"""
Thinking tool for pondering the completeness of collected information.
"""
def apply(self) -> str:
"""
Think about the collected information and whether it is sufficient and relevant.
This tool should ALWAYS be called after you have completed a non-trivial sequence of searching steps like
find_symbol, find_referencing_symbols, search_files_for_pattern, read_file, etc.
"""
return self.prompt_factory.create_think_about_collected_information()
class ThinkAboutTaskAdherenceTool(Tool, ToolMarkerOptional):
"""
Thinking tool for determining whether the agent is still on track with the current task.
"""
def apply(self) -> str:
"""
Think about the task at hand and whether you are still on track.
Especially important if the conversation has been going on for a while and there
has been a lot of back and forth.
This tool should ALWAYS be called before you insert, replace, or delete code.
"""
return self.prompt_factory.create_think_about_task_adherence()
class ThinkAboutWhetherYouAreDoneTool(Tool, ToolMarkerOptional):
"""
Thinking tool for determining whether the task is truly completed.
"""
def apply(self) -> str:
"""
Whenever you feel that you are done with what the user has asked for, it is important to call this tool.
"""
return self.prompt_factory.create_think_about_whether_you_are_done()
class SummarizeChangesTool(Tool, ToolMarkerOptional):
"""
Provides instructions for summarizing the changes made to the codebase.
"""
def apply(self) -> str:
"""
Summarize the changes you have made to the codebase.
This tool should always be called after you have fully completed any non-trivial coding task,
but only after the think_about_whether_you_are_done call.
"""
return self.prompt_factory.create_summarize_changes()
class PrepareForNewConversationTool(Tool):
"""
Provides instructions for preparing for a new conversation (in order to continue with the necessary context).
"""
def apply(self) -> str:
"""
Instructions for preparing for a new conversation. This tool should only be called on explicit user request.
"""
return self.prompt_factory.create_prepare_for_new_conversation()
class InitialInstructionsTool(Tool, ToolMarkerDoesNotRequireActiveProject):
"""
Provides instructions on how to use the Serena toolbox.
Should only be used in settings where the system prompt is not read automatically by the client.
NOTE: Some MCP clients (including Claude Desktop) do not read the system prompt automatically!
"""
def apply(self) -> str:
"""
Provides the 'Serena Instructions Manual', which contains essential information on how to use the Serena toolbox.
IMPORTANT: If you have not yet read the manual, call this tool immediately after you are given your task by the user,
as it will critically inform you!
"""
return self.agent.create_system_prompt()
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/tools/workflow_tools.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:src/serena/util/exception.py | import os
import sys
from serena.agent import log
def is_headless_environment() -> bool:
"""
Detect if we're running in a headless environment where GUI operations would fail.
Returns True if:
- No DISPLAY variable on Linux/Unix
- Running in SSH session
- Running in WSL without X server
- Running in Docker container
"""
# Check if we're on Windows - GUI usually works there
if sys.platform == "win32":
return False
# Check for DISPLAY variable (required for X11)
if not os.environ.get("DISPLAY"): # type: ignore
return True
# Check for SSH session
if os.environ.get("SSH_CONNECTION") or os.environ.get("SSH_CLIENT"):
return True
# Check for common CI/container environments
if os.environ.get("CI") or os.environ.get("CONTAINER") or os.path.exists("/.dockerenv"):
return True
# Check for WSL (only on Unix-like systems where os.uname exists)
if hasattr(os, "uname"):
if "microsoft" in os.uname().release.lower():
# In WSL, even with DISPLAY set, X server might not be running
# This is a simplified check - could be improved
return True
return False
def show_fatal_exception_safe(e: Exception) -> None:
"""
Shows the given exception in the GUI log viewer on the main thread and ensures that the exception is logged or at
least printed to stderr.
"""
# Log the error and print it to stderr
log.error(f"Fatal exception: {e}", exc_info=e)
print(f"Fatal exception: {e}", file=sys.stderr)
# Don't attempt GUI in headless environments
if is_headless_environment():
log.debug("Skipping GUI error display in headless environment")
return
# attempt to show the error in the GUI
try:
# NOTE: The import can fail on macOS if Tk is not available (depends on Python interpreter installation, which uv
# used as a base); while tkinter as such is always available, its dependencies can be unavailable on macOS.
from serena.gui_log_viewer import show_fatal_exception
show_fatal_exception(e)
except Exception as gui_error:
log.debug(f"Failed to show GUI error dialog: {gui_error}")
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/util/exception.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:test/solidlsp/csharp/test_csharp_basic.py | import os
import tempfile
from pathlib import Path
from typing import cast
from unittest.mock import Mock, patch
import pytest
from sensai.util import logging
from serena.util.logging import SuspendedLoggersContext
from solidlsp import SolidLanguageServer
from solidlsp.language_servers.csharp_language_server import (
CSharpLanguageServer,
breadth_first_file_scan,
find_solution_or_project_file,
)
from solidlsp.ls_config import Language, LanguageServerConfig
from solidlsp.ls_utils import SymbolUtils
from solidlsp.settings import SolidLSPSettings
@pytest.mark.csharp
class TestCSharpLanguageServer:
@pytest.mark.parametrize("language_server", [Language.CSHARP], indirect=True)
def test_find_symbol(self, language_server: SolidLanguageServer) -> None:
"""Test finding symbols in the full symbol tree."""
symbols = language_server.request_full_symbol_tree()
assert SymbolUtils.symbol_tree_contains_name(symbols, "Program"), "Program class not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Calculator"), "Calculator class not found in symbol tree"
assert SymbolUtils.symbol_tree_contains_name(symbols, "Add"), "Add method not found in symbol tree"
@pytest.mark.parametrize("language_server", [Language.CSHARP], indirect=True)
def test_get_document_symbols(self, language_server: SolidLanguageServer) -> None:
"""Test getting document symbols from a C# file."""
file_path = os.path.join("Program.cs")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Check that we have symbols
assert len(symbols) > 0
# Flatten the symbols if they're nested
if isinstance(symbols[0], list):
symbols = symbols[0]
# Look for expected classes
class_names = [s.get("name") for s in symbols if s.get("kind") == 5] # 5 is class
assert "Program" in class_names
assert "Calculator" in class_names
@pytest.mark.parametrize("language_server", [Language.CSHARP], indirect=True)
def test_find_referencing_symbols(self, language_server: SolidLanguageServer) -> None:
"""Test finding references using symbol selection range."""
file_path = os.path.join("Program.cs")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
add_symbol = None
# Handle nested symbol structure
symbol_list = symbols[0] if symbols and isinstance(symbols[0], list) else symbols
for sym in symbol_list:
# Symbol names are normalized to base form (e.g., "Add" not "Add(int, int) : int")
if sym.get("name") == "Add":
add_symbol = sym
break
assert add_symbol is not None, "Could not find 'Add' method symbol in Program.cs"
sel_start = add_symbol["selectionRange"]["start"]
refs = language_server.request_references(file_path, sel_start["line"], sel_start["character"] + 1)
assert any(
"Program.cs" in ref.get("relativePath", "") for ref in refs
), "Program.cs should reference Add method (tried all positions in selectionRange)"
@pytest.mark.parametrize("language_server", [Language.CSHARP], indirect=True)
def test_nested_namespace_symbols(self, language_server: SolidLanguageServer) -> None:
"""Test getting symbols from nested namespace."""
file_path = os.path.join("Models", "Person.cs")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Check that we have symbols
assert len(symbols) > 0
# Flatten the symbols if they're nested
if isinstance(symbols[0], list):
symbols = symbols[0]
# Check that we have the Person class
assert any(s.get("name") == "Person" and s.get("kind") == 5 for s in symbols)
# Check for properties and methods (names are normalized to base form)
symbol_names = [s.get("name") for s in symbols]
assert "Name" in symbol_names, "Name property not found"
assert "Age" in symbol_names, "Age property not found"
assert "Email" in symbol_names, "Email property not found"
assert "ToString" in symbol_names, "ToString method not found"
assert "IsAdult" in symbol_names, "IsAdult method not found"
@pytest.mark.parametrize("language_server", [Language.CSHARP], indirect=True)
def test_find_referencing_symbols_across_files(self, language_server: SolidLanguageServer) -> None:
"""Test finding references to Calculator.Subtract method across files."""
# First, find the Subtract method in Program.cs
file_path = os.path.join("Program.cs")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
# Flatten the symbols if they're nested
symbol_list = symbols[0] if symbols and isinstance(symbols[0], list) else symbols
subtract_symbol = None
for sym in symbol_list:
# Symbol names are normalized to base form (e.g., "Subtract" not "Subtract(int, int) : int")
if sym.get("name") == "Subtract":
subtract_symbol = sym
break
assert subtract_symbol is not None, "Could not find 'Subtract' method symbol in Program.cs"
# Get references to the Subtract method
sel_start = subtract_symbol["selectionRange"]["start"]
refs = language_server.request_references(file_path, sel_start["line"], sel_start["character"] + 1)
# Should find references where the method is called
ref_files = cast(list[str], [ref.get("relativePath", "") for ref in refs])
print(f"Found references: {refs}")
print(f"Reference files: {ref_files}")
# Check that we have reference in Models/Person.cs where Calculator.Subtract is called
# Note: New Roslyn version doesn't include the definition itself as a reference (more correct behavior)
assert any(
os.path.join("Models", "Person.cs") in ref_file for ref_file in ref_files
), "Should find reference in Models/Person.cs where Calculator.Subtract is called"
assert len(refs) > 0, "Should find at least one reference"
# check for a second time, since the first call may trigger initialization and change the state of the LS
refs_second_call = language_server.request_references(file_path, sel_start["line"], sel_start["character"] + 1)
assert refs_second_call == refs, "Second call to request_references should return the same results"
@pytest.mark.parametrize("language_server", [Language.CSHARP], indirect=True)
def test_hover_includes_type_information(self, language_server: SolidLanguageServer) -> None:
"""Test that hover information is available and includes type information."""
file_path = os.path.join("Models", "Person.cs")
# Open the file first
language_server.open_file(file_path)
# Test 1: Hover over the Name property (line 6, column 23 - on "Name")
# Source: public string Name { get; set; }
hover_info = language_server.request_hover(file_path, 6, 23)
# Verify hover returns content
assert hover_info is not None, "Hover should return information for Name property"
assert isinstance(hover_info, dict), "Hover should be a dict"
assert "contents" in hover_info, "Hover should have contents"
contents = hover_info["contents"]
assert isinstance(contents, dict), "Hover contents should be a dict"
assert "value" in contents, "Hover contents should have value"
hover_text = contents["value"]
# Verify the hover contains property signature with type
assert "string" in hover_text, f"Hover should include 'string' type, got: {hover_text}"
assert "Name" in hover_text, f"Hover should include 'Name' property name, got: {hover_text}"
# Test 2: Hover over the IsAdult method (line 22, column 21 - on "IsAdult")
# Source: public bool IsAdult()
hover_method = language_server.request_hover(file_path, 22, 21)
# Verify method hover returns content
assert hover_method is not None, "Hover should return information for IsAdult method"
assert isinstance(hover_method, dict), "Hover should be a dict"
assert "contents" in hover_method, "Hover should have contents"
contents = hover_method["contents"]
assert isinstance(contents, dict), "Hover contents should be a dict"
assert "value" in contents, "Hover contents should have value"
method_hover_text = contents["value"]
# Verify the hover contains method signature with return type
assert "bool" in method_hover_text, f"Hover should include 'bool' return type, got: {method_hover_text}"
assert "IsAdult" in method_hover_text, f"Hover should include 'IsAdult' method name, got: {method_hover_text}"
@pytest.mark.csharp
class TestCSharpSolutionProjectOpening:
"""Test C# language server solution and project opening functionality."""
def test_breadth_first_file_scan(self):
"""Test that breadth_first_file_scan finds files in breadth-first order."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create test directory structure
(temp_path / "file1.txt").touch()
(temp_path / "subdir1").mkdir()
(temp_path / "subdir1" / "file2.txt").touch()
(temp_path / "subdir2").mkdir()
(temp_path / "subdir2" / "file3.txt").touch()
(temp_path / "subdir1" / "subdir3").mkdir()
(temp_path / "subdir1" / "subdir3" / "file4.txt").touch()
# Scan files
files = list(breadth_first_file_scan(str(temp_path)))
filenames = [os.path.basename(f) for f in files]
# Should find all files
assert len(files) == 4
assert "file1.txt" in filenames
assert "file2.txt" in filenames
assert "file3.txt" in filenames
assert "file4.txt" in filenames
# file1.txt should be found first (breadth-first)
assert filenames[0] == "file1.txt"
def test_find_solution_or_project_file_with_solution(self):
"""Test that find_solution_or_project_file prefers .sln files."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create both .sln and .csproj files
solution_file = temp_path / "MySolution.sln"
project_file = temp_path / "MyProject.csproj"
solution_file.touch()
project_file.touch()
result = find_solution_or_project_file(str(temp_path))
# Should prefer .sln file
assert result == str(solution_file)
def test_find_solution_or_project_file_with_project_only(self):
"""Test that find_solution_or_project_file falls back to .csproj files."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create only .csproj file
project_file = temp_path / "MyProject.csproj"
project_file.touch()
result = find_solution_or_project_file(str(temp_path))
# Should return .csproj file
assert result == str(project_file)
def test_find_solution_or_project_file_with_nested_files(self):
"""Test that find_solution_or_project_file finds files in subdirectories."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create nested structure
(temp_path / "src").mkdir()
solution_file = temp_path / "src" / "MySolution.sln"
solution_file.touch()
result = find_solution_or_project_file(str(temp_path))
# Should find nested .sln file
assert result == str(solution_file)
def test_find_solution_or_project_file_returns_none_when_no_files(self):
"""Test that find_solution_or_project_file returns None when no .sln or .csproj files exist."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create some other files
(temp_path / "readme.txt").touch()
(temp_path / "other.cs").touch()
result = find_solution_or_project_file(str(temp_path))
# Should return None
assert result is None
def test_find_solution_or_project_file_prefers_solution_breadth_first(self):
"""Test that solution files are preferred even when deeper in the tree."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create .csproj at root and .sln in subdirectory
project_file = temp_path / "MyProject.csproj"
project_file.touch()
(temp_path / "src").mkdir()
solution_file = temp_path / "src" / "MySolution.sln"
solution_file.touch()
result = find_solution_or_project_file(str(temp_path))
# Should still prefer .sln file even though it's deeper
assert result == str(solution_file)
@patch("solidlsp.language_servers.csharp_language_server.CSharpLanguageServer.DependencyProvider._ensure_server_installed")
@patch("solidlsp.language_servers.csharp_language_server.CSharpLanguageServer._start_server")
def test_csharp_language_server_logs_solution_discovery(self, mock_start_server, mock_ensure_server_installed):
"""Test that CSharpLanguageServer logs solution/project discovery during initialization."""
mock_ensure_server_installed.return_value = ("/usr/bin/dotnet", "/path/to/server.dll")
# Create test directory with solution file
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
solution_file = temp_path / "TestSolution.sln"
solution_file.touch()
mock_config = Mock(spec=LanguageServerConfig)
mock_config.ignored_paths = []
# Create CSharpLanguageServer instance
mock_settings = Mock(spec=SolidLSPSettings)
mock_settings.ls_resources_dir = "/tmp/test_ls_resources"
mock_settings.project_data_path = str(temp_path / "project_data")
with SuspendedLoggersContext():
logging.getLogger().setLevel(logging.DEBUG)
with logging.MemoryLoggerContext() as mem_log:
CSharpLanguageServer(mock_config, str(temp_path), mock_settings)
# Verify that logger was called with solution file discovery
expected_log_msg = f"Found solution/project file: {solution_file}"
assert expected_log_msg in mem_log.get_log()
@patch("solidlsp.language_servers.csharp_language_server.CSharpLanguageServer.DependencyProvider._ensure_server_installed")
@patch("solidlsp.language_servers.csharp_language_server.CSharpLanguageServer._start_server")
def test_csharp_language_server_logs_no_solution_warning(self, mock_start_server, mock_ensure_server_installed):
"""Test that CSharpLanguageServer logs warning when no solution/project files are found."""
# Mock the server installation
mock_ensure_server_installed.return_value = ("/usr/bin/dotnet", "/path/to/server.dll")
# Create empty test directory
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Mock logger to capture log messages
mock_config = Mock(spec=LanguageServerConfig)
mock_config.ignored_paths = []
mock_settings = Mock(spec=SolidLSPSettings)
mock_settings.ls_resources_dir = "/tmp/test_ls_resources"
mock_settings.project_data_path = str(temp_path / "project_data")
# Create CSharpLanguageServer instance
with SuspendedLoggersContext():
logging.getLogger().setLevel(logging.DEBUG)
with logging.MemoryLoggerContext() as mem_log:
CSharpLanguageServer(mock_config, str(temp_path), mock_settings)
# Verify that logger was called with warning about no solution/project files
expected_log_msg = "No .sln/.slnx or .csproj file found, language server will attempt auto-discovery"
assert expected_log_msg in mem_log.get_log()
def test_solution_and_project_opening_with_real_test_repo(self):
"""Test solution and project opening with the actual C# test repository."""
# Get the C# test repo path
test_repo_path = Path(__file__).parent.parent.parent / "resources" / "repos" / "csharp" / "test_repo"
if not test_repo_path.exists():
pytest.skip("C# test repository not found")
# Test solution/project discovery in the real test repo
result = find_solution_or_project_file(str(test_repo_path))
# Should find either .sln or .csproj file
assert result is not None
assert result.endswith((".sln", ".csproj"))
# Verify the file actually exists
assert os.path.exists(result)
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/csharp/test_csharp_basic.py",
"license": "MIT License",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/solidlsp/clojure/test_clojure_basic.py | import pytest
from serena.project import Project
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import UnifiedSymbolInformation
from test.conftest import language_tests_enabled
from . import CORE_PATH, UTILS_PATH
@pytest.mark.skipif(not language_tests_enabled(Language.CLOJURE), reason="Clojure tests are disabled")
@pytest.mark.clojure
class TestLanguageServerBasics:
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_basic_definition(self, language_server: SolidLanguageServer):
"""
Test finding definition of 'greet' function call in core.clj
"""
result = language_server.request_definition(CORE_PATH, 20, 12) # Position of 'greet' in (greet "World")
assert isinstance(result, list)
assert len(result) >= 1
definition = result[0]
assert definition["relativePath"] == CORE_PATH
assert definition["range"]["start"]["line"] == 2, "Should find the definition of greet function at line 2"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_cross_file_references(self, language_server: SolidLanguageServer):
"""
Test finding references to 'multiply' function from core.clj
"""
result = language_server.request_references(CORE_PATH, 12, 6)
assert isinstance(result, list) and len(result) >= 2, "Should find definition + usage in utils.clj"
usage_found = any(
item["relativePath"] == UTILS_PATH and item["range"]["start"]["line"] == 6 # multiply usage in calculate-area
for item in result
)
assert usage_found, "Should find multiply usage in utils.clj"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_completions(self, language_server: SolidLanguageServer):
with language_server.open_file(UTILS_PATH):
# After "core/" in calculate-area
result = language_server.request_completions(UTILS_PATH, 6, 8)
assert isinstance(result, list) and len(result) > 0
completion_texts = [item["completionText"] for item in result]
assert any("multiply" in text for text in completion_texts), "Should find 'multiply' function in completions after 'core/'"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_document_symbols(self, language_server: SolidLanguageServer):
symbols, _ = language_server.request_document_symbols(CORE_PATH).get_all_symbols_and_roots()
assert isinstance(symbols, list) and len(symbols) >= 4, "greet, add, multiply, -main functions"
# Check that we find the expected function symbols
symbol_names = [symbol["name"] for symbol in symbols]
expected_functions = ["greet", "add", "multiply", "-main"]
for func_name in expected_functions:
assert func_name in symbol_names, f"Should find {func_name} function in symbols"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_hover(self, language_server: SolidLanguageServer):
"""Test hover on greet function"""
result = language_server.request_hover(CORE_PATH, 2, 7)
assert result is not None, "Hover should return information for greet function"
assert "contents" in result
# Should contain function signature or documentation
contents = result["contents"]
if isinstance(contents, str):
assert "greet" in contents.lower()
elif isinstance(contents, dict) and "value" in contents:
assert "greet" in contents["value"].lower()
else:
assert False, f"Unexpected contents format: {type(contents)}"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_workspace_symbols(self, language_server: SolidLanguageServer):
# Search for functions containing "add"
result = language_server.request_workspace_symbol("add")
assert isinstance(result, list) and len(result) > 0, "Should find at least one symbol containing 'add'"
# Should find the 'add' function
symbol_names = [symbol["name"] for symbol in result]
assert any("add" in name.lower() for name in symbol_names), f"Should find 'add' function in symbols: {symbol_names}"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_namespace_functions(self, language_server: SolidLanguageServer):
"""Test definition lookup for core/greet usage in utils.clj"""
# Position of 'greet' in core/greet call
result = language_server.request_definition(UTILS_PATH, 11, 25)
assert isinstance(result, list)
assert len(result) >= 1
definition = result[0]
assert definition["relativePath"] == CORE_PATH, "Should find the definition of greet in core.clj"
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_request_references_with_content(self, language_server: SolidLanguageServer):
"""Test references to multiply function with content"""
references = language_server.request_references(CORE_PATH, 12, 6)
result = [
language_server.retrieve_content_around_line(ref1["relativePath"], ref1["range"]["start"]["line"], 3, 0) for ref1 in references
]
assert result is not None, "Should find references with content"
assert isinstance(result, list)
assert len(result) >= 2, "Should find definition + usage in utils.clj"
for ref in result:
assert ref.source_file_path is not None, "Each reference should have a source file path"
content_str = ref.to_display_string()
assert len(content_str) > 0, "Content should not be empty"
# Verify we find the reference in utils.clj with context
utils_refs = [ref for ref in result if ref.source_file_path and "utils.clj" in ref.source_file_path]
assert len(utils_refs) > 0, "Should find reference in utils.clj"
# The context should contain the calculate-area function
utils_content = utils_refs[0].to_display_string()
assert "calculate-area" in utils_content
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_request_full_symbol_tree(self, language_server: SolidLanguageServer):
"""Test retrieving the full symbol tree for project overview
We just check that we find some expected symbols.
"""
result = language_server.request_full_symbol_tree()
assert result is not None, "Should return symbol tree"
assert isinstance(result, list), "Symbol tree should be a list"
assert len(result) > 0, "Should find symbols in the project"
def traverse_symbols(symbols, indent=0):
"""Recursively traverse symbols to print their structure"""
info = []
for s in symbols:
name = getattr(s, "name", "NO_NAME")
kind = getattr(s, "kind", "NO_KIND")
info.append(f"{' ' * indent}Symbol: {name}, Kind: {kind}")
if hasattr(s, "children") and s.children:
info.append(" " * indent + "Children:")
info.extend(traverse_symbols(s.children, indent + 2))
return info
def list_all_symbols(symbols: list[UnifiedSymbolInformation]):
found = []
for symbol in symbols:
found.append(symbol["name"])
found.extend(list_all_symbols(symbol["children"]))
return found
all_symbol_names = list_all_symbols(result)
expected_symbols = ["greet", "add", "multiply", "-main", "calculate-area", "format-greeting", "sum-list"]
found_expected = [name for name in expected_symbols if any(name in symbol_name for symbol_name in all_symbol_names)]
if len(found_expected) < 7:
pytest.fail(
f"Expected to find at least 3 symbols from {expected_symbols}, but found: {found_expected}.\n"
f"All symbol names: {all_symbol_names}\n"
f"Symbol tree structure:\n{traverse_symbols(result)}"
)
@pytest.mark.parametrize("language_server", [Language.CLOJURE], indirect=True)
def test_request_referencing_symbols(self, language_server: SolidLanguageServer):
"""Test finding symbols that reference a given symbol
Finds references to the 'multiply' function.
"""
result = language_server.request_referencing_symbols(CORE_PATH, 12, 6)
assert isinstance(result, list) and len(result) > 0, "Should find at least one referencing symbol"
found_relevant_references = False
for ref in result:
if hasattr(ref, "symbol") and "calculate-area" in ref.symbol["name"]:
found_relevant_references = True
break
assert found_relevant_references, f"Should have found calculate-area referencing multiply, but got: {result}"
class TestProjectBasics:
@pytest.mark.parametrize("project", [Language.CLOJURE], indirect=True)
def test_retrieve_content_around_line(self, project: Project):
"""Test retrieving content around specific lines"""
# Test retrieving content around the greet function definition (line 2)
result = project.retrieve_content_around_line(CORE_PATH, 2, 2)
assert result is not None, "Should retrieve content around line 2"
content_str = result.to_display_string()
assert "greet" in content_str, "Should contain the greet function definition"
assert "defn" in content_str, "Should contain defn keyword"
# Test retrieving content around multiply function (around line 13)
result = project.retrieve_content_around_line(CORE_PATH, 13, 1)
assert result is not None, "Should retrieve content around line 13"
content_str = result.to_display_string()
assert "multiply" in content_str, "Should contain multiply function"
@pytest.mark.parametrize("project", [Language.CLOJURE], indirect=True)
def test_search_files_for_pattern(self, project: Project) -> None:
result = project.search_source_files_for_pattern("defn.*greet")
assert result is not None, "Pattern search should return results"
assert len(result) > 0, "Should find at least one match for 'defn.*greet'"
core_matches = [match for match in result if match.source_file_path and "core.clj" in match.source_file_path]
assert len(core_matches) > 0, "Should find greet function in core.clj"
result = project.search_source_files_for_pattern(":require")
assert result is not None, "Should find require statements"
utils_matches = [match for match in result if match.source_file_path and "utils.clj" in match.source_file_path]
assert len(utils_matches) > 0, "Should find require statement in utils.clj"
| {
"repo_id": "oraios/serena",
"file_path": "test/solidlsp/clojure/test_clojure_basic.py",
"license": "MIT License",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:src/serena/util/git.py | import logging
from sensai.util.git import GitStatus
from .shell import subprocess_check_output
log = logging.getLogger(__name__)
def get_git_status() -> GitStatus | None:
try:
commit_hash = subprocess_check_output(["git", "rev-parse", "HEAD"])
unstaged = bool(subprocess_check_output(["git", "diff", "--name-only"]))
staged = bool(subprocess_check_output(["git", "diff", "--staged", "--name-only"]))
untracked = bool(subprocess_check_output(["git", "ls-files", "--others", "--exclude-standard"]))
return GitStatus(
commit=commit_hash, has_unstaged_changes=unstaged, has_staged_uncommitted_changes=staged, has_untracked_files=untracked
)
except:
return None
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/util/git.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/solidlsp/ls_request.py | from typing import TYPE_CHECKING, Any, Union
from solidlsp.lsp_protocol_handler import lsp_types
if TYPE_CHECKING:
from .ls_process import LanguageServerProcess
class LanguageServerRequest:
def __init__(self, handler: "LanguageServerProcess"):
self.handler = handler
def _send_request(self, method: str, params: Any | None = None) -> Any:
return self.handler.send_request(method, params)
def implementation(self, params: lsp_types.ImplementationParams) -> Union["lsp_types.Definition", list["lsp_types.LocationLink"], None]:
"""A request to resolve the implementation locations of a symbol at a given text
document position. The request's parameter is of type [TextDocumentPositionParams]
(#TextDocumentPositionParams) the response is of type {@link Definition} or a
Thenable that resolves to such.
"""
return self._send_request("textDocument/implementation", params)
def type_definition(
self, params: lsp_types.TypeDefinitionParams
) -> Union["lsp_types.Definition", list["lsp_types.LocationLink"], None]:
"""A request to resolve the type definition locations of a symbol at a given text
document position. The request's parameter is of type [TextDocumentPositionParams]
(#TextDocumentPositionParams) the response is of type {@link Definition} or a
Thenable that resolves to such.
"""
return self._send_request("textDocument/typeDefinition", params)
def document_color(self, params: lsp_types.DocumentColorParams) -> list["lsp_types.ColorInformation"]:
"""A request to list all color symbols found in a given text document. The request's
parameter is of type {@link DocumentColorParams} the
response is of type {@link ColorInformation ColorInformation[]} or a Thenable
that resolves to such.
"""
return self._send_request("textDocument/documentColor", params)
def color_presentation(self, params: lsp_types.ColorPresentationParams) -> list["lsp_types.ColorPresentation"]:
"""A request to list all presentation for a color. The request's
parameter is of type {@link ColorPresentationParams} the
response is of type {@link ColorInformation ColorInformation[]} or a Thenable
that resolves to such.
"""
return self._send_request("textDocument/colorPresentation", params)
def folding_range(self, params: lsp_types.FoldingRangeParams) -> list["lsp_types.FoldingRange"] | None:
"""A request to provide folding ranges in a document. The request's
parameter is of type {@link FoldingRangeParams}, the
response is of type {@link FoldingRangeList} or a Thenable
that resolves to such.
"""
return self._send_request("textDocument/foldingRange", params)
def declaration(self, params: lsp_types.DeclarationParams) -> Union["lsp_types.Declaration", list["lsp_types.LocationLink"], None]:
"""A request to resolve the type definition locations of a symbol at a given text
document position. The request's parameter is of type [TextDocumentPositionParams]
(#TextDocumentPositionParams) the response is of type {@link Declaration}
or a typed array of {@link DeclarationLink} or a Thenable that resolves
to such.
"""
return self._send_request("textDocument/declaration", params)
def selection_range(self, params: lsp_types.SelectionRangeParams) -> list["lsp_types.SelectionRange"] | None:
"""A request to provide selection ranges in a document. The request's
parameter is of type {@link SelectionRangeParams}, the
response is of type {@link SelectionRange SelectionRange[]} or a Thenable
that resolves to such.
"""
return self._send_request("textDocument/selectionRange", params)
def prepare_call_hierarchy(self, params: lsp_types.CallHierarchyPrepareParams) -> list["lsp_types.CallHierarchyItem"] | None:
"""A request to result a `CallHierarchyItem` in a document at a given position.
Can be used as an input to an incoming or outgoing call hierarchy.
@since 3.16.0
"""
return self._send_request("textDocument/prepareCallHierarchy", params)
def incoming_calls(self, params: lsp_types.CallHierarchyIncomingCallsParams) -> list["lsp_types.CallHierarchyIncomingCall"] | None:
"""A request to resolve the incoming calls for a given `CallHierarchyItem`.
@since 3.16.0
"""
return self._send_request("callHierarchy/incomingCalls", params)
def outgoing_calls(self, params: lsp_types.CallHierarchyOutgoingCallsParams) -> list["lsp_types.CallHierarchyOutgoingCall"] | None:
"""A request to resolve the outgoing calls for a given `CallHierarchyItem`.
@since 3.16.0
"""
return self._send_request("callHierarchy/outgoingCalls", params)
def semantic_tokens_full(self, params: lsp_types.SemanticTokensParams) -> Union["lsp_types.SemanticTokens", None]:
"""@since 3.16.0"""
return self._send_request("textDocument/semanticTokens/full", params)
def semantic_tokens_delta(
self, params: lsp_types.SemanticTokensDeltaParams
) -> Union["lsp_types.SemanticTokens", "lsp_types.SemanticTokensDelta", None]:
"""@since 3.16.0"""
return self._send_request("textDocument/semanticTokens/full/delta", params)
def semantic_tokens_range(self, params: lsp_types.SemanticTokensRangeParams) -> Union["lsp_types.SemanticTokens", None]:
"""@since 3.16.0"""
return self._send_request("textDocument/semanticTokens/range", params)
def linked_editing_range(self, params: lsp_types.LinkedEditingRangeParams) -> Union["lsp_types.LinkedEditingRanges", None]:
"""A request to provide ranges that can be edited together.
@since 3.16.0
"""
return self._send_request("textDocument/linkedEditingRange", params)
def will_create_files(self, params: lsp_types.CreateFilesParams) -> Union["lsp_types.WorkspaceEdit", None]:
"""The will create files request is sent from the client to the server before files are actually
created as long as the creation is triggered from within the client.
@since 3.16.0
"""
return self._send_request("workspace/willCreateFiles", params)
def will_rename_files(self, params: lsp_types.RenameFilesParams) -> Union["lsp_types.WorkspaceEdit", None]:
"""The will rename files request is sent from the client to the server before files are actually
renamed as long as the rename is triggered from within the client.
@since 3.16.0
"""
return self._send_request("workspace/willRenameFiles", params)
def will_delete_files(self, params: lsp_types.DeleteFilesParams) -> Union["lsp_types.WorkspaceEdit", None]:
"""The did delete files notification is sent from the client to the server when
files were deleted from within the client.
@since 3.16.0
"""
return self._send_request("workspace/willDeleteFiles", params)
def moniker(self, params: lsp_types.MonikerParams) -> list["lsp_types.Moniker"] | None:
"""A request to get the moniker of a symbol at a given text document position.
The request parameter is of type {@link TextDocumentPositionParams}.
The response is of type {@link Moniker Moniker[]} or `null`.
"""
return self._send_request("textDocument/moniker", params)
def prepare_type_hierarchy(self, params: lsp_types.TypeHierarchyPrepareParams) -> list["lsp_types.TypeHierarchyItem"] | None:
"""A request to result a `TypeHierarchyItem` in a document at a given position.
Can be used as an input to a subtypes or supertypes type hierarchy.
@since 3.17.0
"""
return self._send_request("textDocument/prepareTypeHierarchy", params)
def type_hierarchy_supertypes(self, params: lsp_types.TypeHierarchySupertypesParams) -> list["lsp_types.TypeHierarchyItem"] | None:
"""A request to resolve the supertypes for a given `TypeHierarchyItem`.
@since 3.17.0
"""
return self._send_request("typeHierarchy/supertypes", params)
def type_hierarchy_subtypes(self, params: lsp_types.TypeHierarchySubtypesParams) -> list["lsp_types.TypeHierarchyItem"] | None:
"""A request to resolve the subtypes for a given `TypeHierarchyItem`.
@since 3.17.0
"""
return self._send_request("typeHierarchy/subtypes", params)
def inline_value(self, params: lsp_types.InlineValueParams) -> list["lsp_types.InlineValue"] | None:
"""A request to provide inline values in a document. The request's parameter is of
type {@link InlineValueParams}, the response is of type
{@link InlineValue InlineValue[]} or a Thenable that resolves to such.
@since 3.17.0
"""
return self._send_request("textDocument/inlineValue", params)
def inlay_hint(self, params: lsp_types.InlayHintParams) -> list["lsp_types.InlayHint"] | None:
"""A request to provide inlay hints in a document. The request's parameter is of
type {@link InlayHintsParams}, the response is of type
{@link InlayHint InlayHint[]} or a Thenable that resolves to such.
@since 3.17.0
"""
return self._send_request("textDocument/inlayHint", params)
def resolve_inlay_hint(self, params: lsp_types.InlayHint) -> "lsp_types.InlayHint":
"""A request to resolve additional properties for an inlay hint.
The request's parameter is of type {@link InlayHint}, the response is
of type {@link InlayHint} or a Thenable that resolves to such.
@since 3.17.0
"""
return self._send_request("inlayHint/resolve", params)
def text_document_diagnostic(self, params: lsp_types.DocumentDiagnosticParams) -> "lsp_types.DocumentDiagnosticReport":
"""The document diagnostic request definition.
@since 3.17.0
"""
return self._send_request("textDocument/diagnostic", params)
def workspace_diagnostic(self, params: lsp_types.WorkspaceDiagnosticParams) -> "lsp_types.WorkspaceDiagnosticReport":
"""The workspace diagnostic request definition.
@since 3.17.0
"""
return self._send_request("workspace/diagnostic", params)
def initialize(self, params: lsp_types.InitializeParams) -> "lsp_types.InitializeResult":
"""The initialize request is sent from the client to the server.
It is sent once as the request after starting up the server.
The requests parameter is of type {@link InitializeParams}
the response if of type {@link InitializeResult} of a Thenable that
resolves to such.
"""
return self._send_request("initialize", params)
def shutdown(self) -> None:
"""A shutdown request is sent from the client to the server.
It is sent once when the client decides to shutdown the
server. The only notification that is sent after a shutdown request
is the exit event.
"""
return self._send_request("shutdown")
def will_save_wait_until(self, params: lsp_types.WillSaveTextDocumentParams) -> list["lsp_types.TextEdit"] | None:
"""A document will save request is sent from the client to the server before
the document is actually saved. The request can return an array of TextEdits
which will be applied to the text document before it is saved. Please note that
clients might drop results if computing the text edits took too long or if a
server constantly fails on this request. This is done to keep the save fast and
reliable.
"""
return self._send_request("textDocument/willSaveWaitUntil", params)
def completion(self, params: lsp_types.CompletionParams) -> Union[list["lsp_types.CompletionItem"], "lsp_types.CompletionList", None]:
"""Request to request completion at a given text document position. The request's
parameter is of type {@link TextDocumentPosition} the response
is of type {@link CompletionItem CompletionItem[]} or {@link CompletionList}
or a Thenable that resolves to such.
The request can delay the computation of the {@link CompletionItem.detail `detail`}
and {@link CompletionItem.documentation `documentation`} properties to the `completionItem/resolve`
request. However, properties that are needed for the initial sorting and filtering, like `sortText`,
`filterText`, `insertText`, and `textEdit`, must not be changed during resolve.
"""
return self._send_request("textDocument/completion", params)
def resolve_completion_item(self, params: lsp_types.CompletionItem) -> "lsp_types.CompletionItem":
"""Request to resolve additional information for a given completion item.The request's
parameter is of type {@link CompletionItem} the response
is of type {@link CompletionItem} or a Thenable that resolves to such.
"""
return self._send_request("completionItem/resolve", params)
def hover(self, params: lsp_types.HoverParams) -> Union["lsp_types.Hover", None]:
"""Request to request hover information at a given text document position. The request's
parameter is of type {@link TextDocumentPosition} the response is of
type {@link Hover} or a Thenable that resolves to such.
"""
return self._send_request("textDocument/hover", params)
def signature_help(self, params: lsp_types.SignatureHelpParams) -> Union["lsp_types.SignatureHelp", None]:
return self._send_request("textDocument/signatureHelp", params)
def definition(self, params: lsp_types.DefinitionParams) -> Union["lsp_types.Definition", list["lsp_types.LocationLink"], None]:
"""A request to resolve the definition location of a symbol at a given text
document position. The request's parameter is of type [TextDocumentPosition]
(#TextDocumentPosition) the response is of either type {@link Definition}
or a typed array of {@link DefinitionLink} or a Thenable that resolves
to such.
"""
return self._send_request("textDocument/definition", params)
def references(self, params: lsp_types.ReferenceParams) -> list["lsp_types.Location"] | None:
"""A request to resolve project-wide references for the symbol denoted
by the given text document position. The request's parameter is of
type {@link ReferenceParams} the response is of type
{@link Location Location[]} or a Thenable that resolves to such.
"""
return self._send_request("textDocument/references", params)
def document_highlight(self, params: lsp_types.DocumentHighlightParams) -> list["lsp_types.DocumentHighlight"] | None:
"""Request to resolve a {@link DocumentHighlight} for a given
text document position. The request's parameter is of type [TextDocumentPosition]
(#TextDocumentPosition) the request response is of type [DocumentHighlight[]]
(#DocumentHighlight) or a Thenable that resolves to such.
"""
return self._send_request("textDocument/documentHighlight", params)
def document_symbol(
self, params: lsp_types.DocumentSymbolParams
) -> list["lsp_types.SymbolInformation"] | list["lsp_types.DocumentSymbol"] | None:
"""A request to list all symbols found in a given text document. The request's
parameter is of type {@link TextDocumentIdentifier} the
response is of type {@link SymbolInformation SymbolInformation[]} or a Thenable
that resolves to such.
"""
return self._send_request("textDocument/documentSymbol", params)
def code_action(self, params: lsp_types.CodeActionParams) -> list[Union["lsp_types.Command", "lsp_types.CodeAction"]] | None:
"""A request to provide commands for the given text document and range."""
return self._send_request("textDocument/codeAction", params)
def resolve_code_action(self, params: lsp_types.CodeAction) -> "lsp_types.CodeAction":
"""Request to resolve additional information for a given code action.The request's
parameter is of type {@link CodeAction} the response
is of type {@link CodeAction} or a Thenable that resolves to such.
"""
return self._send_request("codeAction/resolve", params)
def workspace_symbol(
self, params: lsp_types.WorkspaceSymbolParams
) -> list["lsp_types.SymbolInformation"] | list["lsp_types.WorkspaceSymbol"] | None:
"""A request to list project-wide symbols matching the query string given
by the {@link WorkspaceSymbolParams}. The response is
of type {@link SymbolInformation SymbolInformation[]} or a Thenable that
resolves to such.
@since 3.17.0 - support for WorkspaceSymbol in the returned data. Clients
need to advertise support for WorkspaceSymbols via the client capability
`workspace.symbol.resolveSupport`.
"""
return self._send_request("workspace/symbol", params)
def resolve_workspace_symbol(self, params: lsp_types.WorkspaceSymbol) -> "lsp_types.WorkspaceSymbol":
"""A request to resolve the range inside the workspace
symbol's location.
@since 3.17.0
"""
return self._send_request("workspaceSymbol/resolve", params)
def code_lens(self, params: lsp_types.CodeLensParams) -> list["lsp_types.CodeLens"] | None:
"""A request to provide code lens for the given text document."""
return self._send_request("textDocument/codeLens", params)
def resolve_code_lens(self, params: lsp_types.CodeLens) -> "lsp_types.CodeLens":
"""A request to resolve a command for a given code lens."""
return self._send_request("codeLens/resolve", params)
def document_link(self, params: lsp_types.DocumentLinkParams) -> list["lsp_types.DocumentLink"] | None:
"""A request to provide document links"""
return self._send_request("textDocument/documentLink", params)
def resolve_document_link(self, params: lsp_types.DocumentLink) -> "lsp_types.DocumentLink":
"""Request to resolve additional information for a given document link. The request's
parameter is of type {@link DocumentLink} the response
is of type {@link DocumentLink} or a Thenable that resolves to such.
"""
return self._send_request("documentLink/resolve", params)
def formatting(self, params: lsp_types.DocumentFormattingParams) -> list["lsp_types.TextEdit"] | None:
"""A request to to format a whole document."""
return self._send_request("textDocument/formatting", params)
def range_formatting(self, params: lsp_types.DocumentRangeFormattingParams) -> list["lsp_types.TextEdit"] | None:
"""A request to to format a range in a document."""
return self._send_request("textDocument/rangeFormatting", params)
def on_type_formatting(self, params: lsp_types.DocumentOnTypeFormattingParams) -> list["lsp_types.TextEdit"] | None:
"""A request to format a document on type."""
return self._send_request("textDocument/onTypeFormatting", params)
def rename(self, params: lsp_types.RenameParams) -> Union["lsp_types.WorkspaceEdit", None]:
"""A request to rename a symbol."""
return self._send_request("textDocument/rename", params)
def prepare_rename(self, params: lsp_types.PrepareRenameParams) -> Union["lsp_types.PrepareRenameResult", None]:
"""A request to test and perform the setup necessary for a rename.
@since 3.16 - support for default behavior
"""
return self._send_request("textDocument/prepareRename", params)
def execute_command(self, params: lsp_types.ExecuteCommandParams) -> Union["lsp_types.LSPAny", None]:
"""A request send from the client to the server to execute a command. The request might return
a workspace edit which the client will apply to the workspace.
"""
return self._send_request("workspace/executeCommand", params)
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/ls_request.py",
"license": "MIT License",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:src/solidlsp/lsp_protocol_handler/server.py | """
This file provides the implementation of the JSON-RPC client, that launches and
communicates with the language server.
The initial implementation of this file was obtained from
https://github.com/predragnikolic/OLSP under the MIT License with the following terms:
MIT License
Copyright (c) 2023 Предраг Николић
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import dataclasses
import json
import logging
import os
from typing import Any, Union
from .lsp_types import ErrorCodes
StringDict = dict[str, Any]
PayloadLike = Union[list[StringDict], StringDict, None, bool]
CONTENT_LENGTH = "Content-Length: "
ENCODING = "utf-8"
log = logging.getLogger(__name__)
@dataclasses.dataclass
class ProcessLaunchInfo:
"""
This class is used to store the information required to launch a (language server) process.
"""
cmd: str | list[str]
"""
the command used to launch the process.
Specification as a list is preferred (as it is more robust and avoids incorrect quoting of arguments);
the string variant is supported for backward compatibility only
"""
env: dict[str, str] = dataclasses.field(default_factory=dict)
"""
the environment variables to set for the process
"""
cwd: str = os.getcwd()
"""
the working directory for the process
"""
class LSPError(Exception):
def __init__(self, code: ErrorCodes, message: str) -> None:
super().__init__(message)
self.code = code
def to_lsp(self) -> StringDict:
return {"code": self.code, "message": super().__str__()}
@classmethod
def from_lsp(cls, d: StringDict) -> "LSPError":
return LSPError(d["code"], d["message"])
def __str__(self) -> str:
return f"{super().__str__()} ({self.code})"
def make_response(request_id: Any, params: PayloadLike) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "result": params}
def make_error_response(request_id: Any, err: LSPError) -> StringDict:
return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()}
# LSP methods that expect NO params field at all (not even empty object).
# These methods use Void/unit type in their protocol definition.
# - shutdown: HLS uses Haskell's Void type, rust-analyzer expects unit
# - exit: Similar - notification with no params
# Sending params:{} to these methods causes parse errors like "Cannot parse Void"
# See: https://www.jsonrpc.org/specification ("params MAY be omitted")
_NO_PARAMS_METHODS = frozenset({"shutdown", "exit"})
def _build_params_field(method: str, params: PayloadLike) -> StringDict:
"""Build the params portion of a JSON-RPC message based on LSP method requirements.
LSP methods with Void/unit type (shutdown, exit) must omit params field entirely
to satisfy HLS and rust-analyzer. Other methods send empty {} for None params
to maintain Delphi/FPC LSP compatibility (PR #851).
Returns a dict that can be merged into the message using ** unpacking.
"""
if method in _NO_PARAMS_METHODS:
return {} # Omit params entirely for Void-type methods
elif params is not None:
return {"params": params}
else:
return {"params": {}} # Keep {} for Delphi/FPC compatibility
def make_notification(method: str, params: PayloadLike) -> StringDict:
"""Create a JSON-RPC 2.0 notification message."""
return {"jsonrpc": "2.0", "method": method, **_build_params_field(method, params)}
def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict:
"""Create a JSON-RPC 2.0 request message."""
return {"jsonrpc": "2.0", "method": method, "id": request_id, **_build_params_field(method, params)}
class StopLoopException(Exception):
pass
def create_message(payload: PayloadLike) -> tuple[bytes, bytes, bytes]:
body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING)
return (
f"Content-Length: {len(body)}\r\n".encode(ENCODING),
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING),
body,
)
class MessageType:
error = 1
warning = 2
info = 3
log = 4
def content_length(line: bytes) -> int | None:
if line.startswith(b"Content-Length: "):
_, value = line.split(b"Content-Length: ")
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError(f"Invalid Content-Length header: {value!r}")
return None
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/lsp_protocol_handler/server.py",
"license": "MIT License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:test/resources/repos/python/test_repo/ignore_this_dir_with_postfix/ignored_module.py | """
Example demonstrating user management with the test_repo module.
This example showcases:
- Creating and managing users
- Using various object types and relationships
- Type annotations and complex Python patterns
"""
import logging
from dataclasses import dataclass
from typing import Any
from test_repo.models import User, create_user_object
from test_repo.services import UserService
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@dataclass
class UserStats:
"""Statistics about user activity."""
user_id: str
login_count: int = 0
last_active_days: int = 0
engagement_score: float = 0.0
def is_active(self) -> bool:
"""Check if the user is considered active."""
return self.last_active_days < 30
class UserManager:
"""Example class demonstrating complex user management."""
def __init__(self, service: UserService):
self.service = service
self.active_users: dict[str, User] = {}
self.user_stats: dict[str, UserStats] = {}
def register_user(self, name: str, email: str, roles: list[str] | None = None) -> User:
"""Register a new user."""
logger.info(f"Registering new user: {name} ({email})")
user = self.service.create_user(name=name, email=email, roles=roles)
self.active_users[user.id] = user
self.user_stats[user.id] = UserStats(user_id=user.id)
return user
def get_user(self, user_id: str) -> User | None:
"""Get a user by ID."""
if user_id in self.active_users:
return self.active_users[user_id]
# Try to fetch from service
user = self.service.get_user(user_id)
if user:
self.active_users[user.id] = user
return user
def update_user_stats(self, user_id: str, login_count: int, days_since_active: int) -> None:
"""Update statistics for a user."""
if user_id not in self.user_stats:
self.user_stats[user_id] = UserStats(user_id=user_id)
stats = self.user_stats[user_id]
stats.login_count = login_count
stats.last_active_days = days_since_active
# Calculate engagement score based on activity
engagement = (100 - min(days_since_active, 100)) * 0.8
engagement += min(login_count, 20) * 0.2
stats.engagement_score = engagement
def get_active_users(self) -> list[User]:
"""Get all active users."""
active_user_ids = [user_id for user_id, stats in self.user_stats.items() if stats.is_active()]
return [self.active_users[user_id] for user_id in active_user_ids if user_id in self.active_users]
def get_user_by_email(self, email: str) -> User | None:
"""Find a user by their email address."""
for user in self.active_users.values():
if user.email == email:
return user
return None
# Example function demonstrating type annotations
def process_user_data(users: list[User], include_inactive: bool = False, transform_func: callable | None = None) -> dict[str, Any]:
"""Process user data with optional transformations."""
result: dict[str, Any] = {"users": [], "total": 0, "admin_count": 0}
for user in users:
if transform_func:
user_data = transform_func(user.to_dict())
else:
user_data = user.to_dict()
result["users"].append(user_data)
result["total"] += 1
if "admin" in user.roles:
result["admin_count"] += 1
return result
def main():
"""Main function demonstrating the usage of UserManager."""
# Initialize service and manager
service = UserService()
manager = UserManager(service)
# Register some users
admin = manager.register_user("Admin User", "admin@example.com", ["admin"])
user1 = manager.register_user("Regular User", "user@example.com", ["user"])
user2 = manager.register_user("Another User", "another@example.com", ["user"])
# Update some stats
manager.update_user_stats(admin.id, 100, 5)
manager.update_user_stats(user1.id, 50, 10)
manager.update_user_stats(user2.id, 10, 45) # Inactive user
# Get active users
active_users = manager.get_active_users()
logger.info(f"Active users: {len(active_users)}")
# Process user data
user_data = process_user_data(active_users, transform_func=lambda u: {**u, "full_name": u.get("name", "")})
logger.info(f"Processed {user_data['total']} users, {user_data['admin_count']} admins")
# Example of calling create_user directly
external_user = create_user_object(id="ext123", name="External User", email="external@example.org", roles=["external"])
logger.info(f"Created external user: {external_user.name}")
if __name__ == "__main__":
main()
| {
"repo_id": "oraios/serena",
"file_path": "test/resources/repos/python/test_repo/ignore_this_dir_with_postfix/ignored_module.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:src/solidlsp/ls.py | import dataclasses
import hashlib
import json
import logging
import os
import pathlib
import shutil
import subprocess
import threading
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Hashable, Iterator
from contextlib import contextmanager
from copy import copy
from pathlib import Path, PurePath
from time import perf_counter, sleep
from typing import Self, Union, cast
import pathspec
from sensai.util.pickle import getstate, load_pickle
from sensai.util.string import ToStringMixin
from serena.util.file_system import match_path
from serena.util.text_utils import MatchedConsecutiveLines
from solidlsp import ls_types
from solidlsp.ls_config import Language, LanguageServerConfig
from solidlsp.ls_exceptions import SolidLSPException
from solidlsp.ls_process import LanguageServerProcess
from solidlsp.ls_types import UnifiedSymbolInformation
from solidlsp.ls_utils import FileUtils, PathUtils, TextUtils
from solidlsp.lsp_protocol_handler import lsp_types
from solidlsp.lsp_protocol_handler import lsp_types as LSPTypes
from solidlsp.lsp_protocol_handler.lsp_constants import LSPConstants
from solidlsp.lsp_protocol_handler.lsp_types import (
Definition,
DefinitionParams,
DocumentSymbol,
LocationLink,
RenameParams,
SymbolInformation,
)
from solidlsp.lsp_protocol_handler.server import (
LSPError,
ProcessLaunchInfo,
StringDict,
)
from solidlsp.settings import SolidLSPSettings
from solidlsp.util.cache import load_cache, save_cache
GenericDocumentSymbol = Union[LSPTypes.DocumentSymbol, LSPTypes.SymbolInformation, ls_types.UnifiedSymbolInformation]
log = logging.getLogger(__name__)
_debug_enabled = log.isEnabledFor(logging.DEBUG)
"""Serves as a flag that triggers additional computation when debug logging is enabled."""
@dataclasses.dataclass(kw_only=True)
class ReferenceInSymbol:
"""A symbol retrieved when requesting reference to a symbol, together with the location of the reference"""
symbol: ls_types.UnifiedSymbolInformation
line: int
character: int
class LSPFileBuffer:
"""
This class is used to store the contents of an open LSP file in memory.
"""
def __init__(
self,
abs_path: Path,
uri: str,
encoding: str,
version: int,
language_id: str,
ref_count: int,
language_server: "SolidLanguageServer",
open_in_ls: bool = True,
) -> None:
self.abs_path = abs_path
self.language_server = language_server
self.uri = uri
self._read_file_modified_date: float | None = None
self._contents: str | None = None
self.version = version
self.language_id = language_id
self.ref_count = ref_count
self.encoding = encoding
self._content_hash: str | None = None
self._is_open_in_ls = False
if open_in_ls:
self._open_in_ls()
def _open_in_ls(self) -> None:
"""
Open the file in the language server if it is not already open.
"""
if self._is_open_in_ls:
return
self._is_open_in_ls = True
self.language_server.server.notify.did_open_text_document(
{
LSPConstants.TEXT_DOCUMENT: { # type: ignore
LSPConstants.URI: self.uri,
LSPConstants.LANGUAGE_ID: self.language_id,
LSPConstants.VERSION: 0,
LSPConstants.TEXT: self.contents,
}
}
)
def close(self) -> None:
if self._is_open_in_ls:
self.language_server.server.notify.did_close_text_document(
{
LSPConstants.TEXT_DOCUMENT: { # type: ignore
LSPConstants.URI: self.uri,
}
}
)
def ensure_open_in_ls(self) -> None:
"""Ensure that the file is opened in the language server."""
self._open_in_ls()
@property
def contents(self) -> str:
file_modified_date = self.abs_path.stat().st_mtime
# if contents are cached, check if they are stale (file modification since last read) and invalidate if so
if self._contents is not None:
assert self._read_file_modified_date is not None
if file_modified_date > self._read_file_modified_date:
self._contents = None
if self._contents is None:
self._read_file_modified_date = file_modified_date
self._contents = FileUtils.read_file(str(self.abs_path), self.encoding)
self._content_hash = None
return self._contents
@contents.setter
def contents(self, new_contents: str) -> None:
"""
Sets new contents for the file buffer (in-memory change only).
Persistence of the change to disk must be handled separately.
:param new_contents: the new contents to set
"""
self._contents = new_contents
self._content_hash = None
@property
def content_hash(self) -> str:
if self._content_hash is None:
self._content_hash = hashlib.md5(self.contents.encode(self.encoding)).hexdigest()
return self._content_hash
def split_lines(self) -> list[str]:
"""Splits the contents of the file into lines."""
return self.contents.split("\n")
class SymbolBody(ToStringMixin):
"""
Representation of the body of a symbol, which allows the extraction of the symbol's text
from the lines of the file it is defined in.
Instances that share the same lines buffer are memory-efficient,
using only 4 integers and a reference to the lines buffer from which the text can be extracted,
i.e. a core representation of only about 40 bytes per body.
"""
def __init__(self, lines: list[str], start_line: int, start_col: int, end_line: int, end_col: int) -> None:
self._lines = lines
self._start_line = start_line
self._start_col = start_col
self._end_line = end_line
self._end_col = end_col
def _tostring_excludes(self) -> list[str]:
return ["_lines"]
def get_text(self) -> str:
# extract relevant lines
symbol_body = "\n".join(self._lines[self._start_line : self._end_line + 1])
# remove leading content from the first line
symbol_body = symbol_body[self._start_col :]
# remove trailing content from the last line
last_line = self._lines[self._end_line]
trailing_length = len(last_line) - self._end_col
if trailing_length > 0:
symbol_body = symbol_body[: -(len(last_line) - self._end_col)]
return symbol_body
class SymbolBodyFactory:
"""
A factory for the creation of SymbolBody instances from symbols dictionaries.
Instances created from the same factory instance are memory-efficient, as they share
the same lines buffer.
"""
def __init__(self, file_buffer: LSPFileBuffer):
self._lines = file_buffer.split_lines()
def create_symbol_body(self, symbol: GenericDocumentSymbol) -> SymbolBody:
existing_body = symbol.get("body", None)
if existing_body and isinstance(existing_body, SymbolBody):
return existing_body
assert "location" in symbol
start_line = symbol["location"]["range"]["start"]["line"] # type: ignore
end_line = symbol["location"]["range"]["end"]["line"] # type: ignore
start_col = symbol["location"]["range"]["start"]["character"] # type: ignore
end_col = symbol["location"]["range"]["end"]["character"] # type: ignore
return SymbolBody(self._lines, start_line, start_col, end_line, end_col)
class DocumentSymbols:
# IMPORTANT: Instances of this class are persisted in the high-level document symbol cache
def __init__(self, root_symbols: list[ls_types.UnifiedSymbolInformation]):
self.root_symbols = root_symbols
self._all_symbols: list[ls_types.UnifiedSymbolInformation] | None = None
def __getstate__(self) -> dict:
return getstate(DocumentSymbols, self, transient_properties=["_all_symbols"])
def iter_symbols(self) -> Iterator[ls_types.UnifiedSymbolInformation]:
"""
Iterate over all symbols in the document symbol tree.
Yields symbols in a depth-first manner.
"""
if self._all_symbols is not None:
yield from self._all_symbols
return
def traverse(s: ls_types.UnifiedSymbolInformation) -> Iterator[ls_types.UnifiedSymbolInformation]:
yield s
for child in s.get("children", []):
yield from traverse(child)
for root_symbol in self.root_symbols:
yield from traverse(root_symbol)
def get_all_symbols_and_roots(self) -> tuple[list[ls_types.UnifiedSymbolInformation], list[ls_types.UnifiedSymbolInformation]]:
"""
This function returns all symbols in the document as a flat list and the root symbols.
It exists to facilitate migration from previous versions, where this was the return interface of
the LS method that obtained document symbols.
:return: A tuple containing a list of all symbols in the document and a list of root symbols.
"""
if self._all_symbols is None:
self._all_symbols = list(self.iter_symbols())
return self._all_symbols, self.root_symbols
class LanguageServerDependencyProvider(ABC):
"""
Prepares dependencies for a language server (if any), ultimately enabling the launch command to be constructed
and optionally providing environment variables that are necessary for the execution.
"""
def __init__(self, custom_settings: SolidLSPSettings.CustomLSSettings, ls_resources_dir: str):
self._custom_settings = custom_settings
self._ls_resources_dir = ls_resources_dir
@abstractmethod
def create_launch_command(self) -> list[str]:
"""
Creates the launch command for this language server, potentially downloading and installing dependencies
beforehand.
:return: the launch command as a list containing the executable and its arguments
"""
def create_launch_command_env(self) -> dict[str, str]:
"""
Provides environment variables to be set when executing the launch command.
This method is intended to be overridden by subclasses that need to set variables.
:return: a mapping for variable names to values
"""
return {}
class LanguageServerDependencyProviderSinglePath(LanguageServerDependencyProvider, ABC):
"""
Special case of a dependency provider, where there is a single core dependency which provides
the basis for the launch command.
The core dependency's path can be overridden by the user in LS-specific settings (SerenaConfig)
via the key "ls_path". If the user provides the key, the specified path is used directly.
Otherwise, the provider implementation is called to get or install the core dependency.
"""
@abstractmethod
def _get_or_install_core_dependency(self) -> str:
"""
Gets the language server's core path, potentially installing dependencies beforehand.
:return: the core dependency's path (e.g. executable, jar, etc.)
"""
def create_launch_command(self) -> list[str]:
path = self._custom_settings.get("ls_path", None)
if path is not None:
core_path = path
else:
core_path = self._get_or_install_core_dependency()
return self._create_launch_command(core_path)
@abstractmethod
def _create_launch_command(self, core_path: str) -> list[str]:
"""
:param core_path: path to the core dependency
:return: the launch command as a list containing the executable and its arguments
"""
class SolidLanguageServer(ABC):
"""
The LanguageServer class provides a language agnostic interface to the Language Server Protocol.
It is used to communicate with Language Servers of different programming languages.
"""
CACHE_FOLDER_NAME = "cache"
RAW_DOCUMENT_SYMBOLS_CACHE_VERSION = 1
"""
global version identifier for raw symbol caches; an LS-specific version is defined separately and combined with this.
This should be incremented whenever there is a change in the way raw document symbols are stored.
If the result of a language server changes in a way that affects the raw document symbols,
the LS-specific version should be incremented instead.
"""
RAW_DOCUMENT_SYMBOL_CACHE_FILENAME = "raw_document_symbols.pkl"
RAW_DOCUMENT_SYMBOL_CACHE_FILENAME_LEGACY_FALLBACK = "document_symbols_cache_v23-06-25.pkl"
DOCUMENT_SYMBOL_CACHE_VERSION = 4
DOCUMENT_SYMBOL_CACHE_FILENAME = "document_symbols.pkl"
# To be overridden and extended by subclasses
def is_ignored_dirname(self, dirname: str) -> bool:
"""
A language-specific condition for directories that should always be ignored. For example, venv
in Python and node_modules in JS/TS should be ignored always.
"""
return dirname.startswith(".")
@staticmethod
def _determine_log_level(line: str) -> int:
"""
Classify a stderr line from the language server to determine appropriate logging level.
Language servers may emit informational messages to stderr that contain words like "error"
but are not actual errors. Subclasses can override this method to filter out known
false-positive patterns specific to their language server.
:param line: The stderr line to classify
:return: A logging level (logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR)
"""
line_lower = line.lower()
# Default classification: treat lines with "error" or "exception" as ERROR level
if "error" in line_lower or "exception" in line_lower or line.startswith("E["):
return logging.ERROR
else:
return logging.INFO
@classmethod
def get_language_enum_instance(cls) -> Language:
return Language.from_ls_class(cls)
@classmethod
def ls_resources_dir(cls, solidlsp_settings: SolidLSPSettings, mkdir: bool = True) -> str:
"""
Returns the directory where the language server resources are downloaded.
This is used to store language server binaries, configuration files, etc.
"""
result = os.path.join(solidlsp_settings.ls_resources_dir, cls.__name__)
# Migration of previously downloaded LS resources that were downloaded to a subdir of solidlsp instead of to the user's home
pre_migration_ls_resources_dir = os.path.join(os.path.dirname(__file__), "language_servers", "static", cls.__name__)
if os.path.exists(pre_migration_ls_resources_dir):
if os.path.exists(result):
# if the directory already exists, we just remove the old resources
shutil.rmtree(result, ignore_errors=True)
else:
# move old resources to the new location
shutil.move(pre_migration_ls_resources_dir, result)
if mkdir:
os.makedirs(result, exist_ok=True)
return result
@classmethod
def create(
cls,
config: LanguageServerConfig,
repository_root_path: str,
timeout: float | None = None,
solidlsp_settings: SolidLSPSettings | None = None,
) -> "SolidLanguageServer":
"""
Creates a language specific LanguageServer instance based on the given configuration, and appropriate settings for the programming language.
If language is Java, then ensure that jdk-17.0.6 or higher is installed, `java` is in PATH, and JAVA_HOME is set to the installation directory.
If language is JS/TS, then ensure that node (v18.16.0 or higher) is installed and in PATH.
:param repository_root_path: The root path of the repository.
:param config: language server configuration.
:param logger: The logger to use.
:param timeout: the timeout for requests to the language server. If None, no timeout will be used.
:param solidlsp_settings: additional settings
:return LanguageServer: A language specific LanguageServer instance.
"""
ls: SolidLanguageServer
if solidlsp_settings is None:
solidlsp_settings = SolidLSPSettings()
# Ensure repository_root_path is absolute to avoid issues with file URIs
repository_root_path = os.path.abspath(repository_root_path)
ls_class = config.code_language.get_ls_class()
# For now, we assume that all language server implementations have the same signature of the constructor
# (which, unfortunately, differs from the signature of the base class).
# If this assumption is ever violated, we need branching logic here.
ls = ls_class(config, repository_root_path, solidlsp_settings) # type: ignore
ls.set_request_timeout(timeout)
return ls
def __init__(
self,
config: LanguageServerConfig,
repository_root_path: str,
process_launch_info: ProcessLaunchInfo | None,
language_id: str,
solidlsp_settings: SolidLSPSettings,
cache_version_raw_document_symbols: Hashable = 1,
):
"""
Initializes a LanguageServer instance.
Do not instantiate this class directly. Use `LanguageServer.create` method instead.
:param config: the global SolidLSP configuration.
:param repository_root_path: the root path of the repository.
:param process_launch_info: (DEPRECATED - implement _create_dependency_provider instead)
the command used to start the actual language server.
The command must pass appropriate flags to the binary, so that it runs in the stdio mode,
as opposed to HTTP, TCP modes supported by some language servers.
:param cache_version_raw_document_symbols: the version, for caching, of the raw document symbols coming
from this specific language server. This should be incremented by subclasses calling this constructor
whenever the format of the raw document symbols changes (typically because the language server
improves/fixes its output).
"""
self._solidlsp_settings = solidlsp_settings
lang = self.get_language_enum_instance()
self._custom_settings = solidlsp_settings.get_ls_specific_settings(lang)
self._ls_resources_dir = self.ls_resources_dir(solidlsp_settings)
log.debug(f"Custom config (LS-specific settings) for {lang}: {self._custom_settings}")
self._encoding = config.encoding
self.repository_root_path: str = repository_root_path
log.debug(
f"Creating language server instance for {repository_root_path=} with {language_id=} and process launch info: {process_launch_info}"
)
self.language_id = language_id
self.open_file_buffers: dict[str, LSPFileBuffer] = {}
self.language = Language(language_id)
# initialise symbol caches
self.cache_dir = Path(self._solidlsp_settings.project_data_path) / self.CACHE_FOLDER_NAME / self.language_id
self.cache_dir.mkdir(parents=True, exist_ok=True)
# * raw document symbols cache
self._ls_specific_raw_document_symbols_cache_version = cache_version_raw_document_symbols
self._raw_document_symbols_cache: dict[str, tuple[str, list[DocumentSymbol] | list[SymbolInformation] | None]] = {}
"""maps relative file paths to a tuple of (file_content_hash, raw_root_symbols)"""
self._raw_document_symbols_cache_is_modified: bool = False
self._load_raw_document_symbols_cache()
# * high-level document symbols cache
self._document_symbols_cache: dict[str, tuple[str, DocumentSymbols]] = {}
"""maps relative file paths to a tuple of (file_content_hash, document_symbols)"""
self._document_symbols_cache_is_modified: bool = False
self._load_document_symbols_cache()
self.server_started = False
if config.trace_lsp_communication:
def logging_fn(source: str, target: str, msg: StringDict | str) -> None:
log.debug(f"LSP: {source} -> {target}: {msg!s}")
else:
logging_fn = None # type: ignore
# create the LanguageServerHandler, which provides the functionality to start the language server and communicate with it,
# preparing the launch command beforehand
self._dependency_provider: LanguageServerDependencyProvider | None = None
if process_launch_info is None:
self._dependency_provider = self._create_dependency_provider()
process_launch_info = self._create_process_launch_info()
log.debug(f"Creating language server instance with {language_id=} and process launch info: {process_launch_info}")
self.server = LanguageServerProcess(
process_launch_info,
language=self.language,
determine_log_level=self._determine_log_level,
logger=logging_fn,
start_independent_lsp_process=config.start_independent_lsp_process,
)
# Set up the pathspec matcher for the ignored paths
# for all absolute paths in ignored_paths, convert them to relative paths
processed_patterns = []
for pattern in set(config.ignored_paths):
# Normalize separators (pathspec expects forward slashes)
pattern = pattern.replace(os.path.sep, "/")
processed_patterns.append(pattern)
log.debug(f"Processing {len(processed_patterns)} ignored paths from the config")
# Create a pathspec matcher from the processed patterns
self._ignore_spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, processed_patterns)
self._request_timeout: float | None = None
self._has_waited_for_cross_file_references = False
def _create_dependency_provider(self) -> LanguageServerDependencyProvider:
"""
Creates the dependency provider for this language server.
Subclasses should override this method to provide their specific dependency provider.
This method is only called if process_launch_info is not passed to __init__.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement _create_dependency_provider() or pass process_launch_info to __init__()"
)
def _create_process_launch_info(self) -> ProcessLaunchInfo:
assert self._dependency_provider is not None
cmd = self._dependency_provider.create_launch_command()
env = self._dependency_provider.create_launch_command_env()
return ProcessLaunchInfo(cmd=cmd, cwd=self.repository_root_path, env=env)
def _get_wait_time_for_cross_file_referencing(self) -> float:
"""Meant to be overridden by subclasses for LS that don't have a reliable "finished initializing" signal.
LS may return incomplete results on calls to `request_references` (only references found in the same file),
if the LS is not fully initialized yet.
"""
return 2
def set_request_timeout(self, timeout: float | None) -> None:
"""
:param timeout: the timeout, in seconds, for requests to the language server.
"""
self.server.set_request_timeout(timeout)
def get_ignore_spec(self) -> pathspec.PathSpec:
"""
Returns the pathspec matcher for the paths that were configured to be ignored through
the language server configuration.
This is a subset of the full language-specific ignore spec that determines
which files are relevant for the language server.
This matcher is useful for operations outside of the language server,
such as when searching for relevant non-language files in the project.
"""
return self._ignore_spec
def is_ignored_path(self, relative_path: str, ignore_unsupported_files: bool = True) -> bool:
"""
Determine if a path should be ignored based on file type
and ignore patterns.
:param relative_path: Relative path to check
:param ignore_unsupported_files: whether files that are not supported source files should be ignored
:return: True if the path should be ignored, False otherwise
"""
abs_path = os.path.join(self.repository_root_path, relative_path)
if not os.path.exists(abs_path):
raise FileNotFoundError(f"File {abs_path} not found, the ignore check cannot be performed")
# Check file extension if it's a file
is_file = os.path.isfile(abs_path)
if is_file and ignore_unsupported_files:
fn_matcher = self.language.get_source_fn_matcher()
if not fn_matcher.is_relevant_filename(abs_path):
return True
# Create normalized path for consistent handling
rel_path = Path(relative_path)
# Check each part of the path against always fulfilled ignore conditions
dir_parts = rel_path.parts
if is_file:
dir_parts = dir_parts[:-1]
for part in dir_parts:
if not part: # Skip empty parts (e.g., from leading '/')
continue
if self.is_ignored_dirname(part):
return True
return match_path(relative_path, self.get_ignore_spec(), root_path=self.repository_root_path)
def _shutdown(self, timeout: float = 5.0) -> None:
"""
A robust shutdown process designed to terminate cleanly on all platforms, including Windows,
by explicitly closing all I/O pipes.
"""
if not self.server.is_running():
log.debug("Server process not running, skipping shutdown.")
return
log.info(f"Initiating final robust shutdown with a {timeout}s timeout...")
process = self.server.process
if process is None:
log.debug("Server process is None, cannot shutdown.")
return
# --- Main Shutdown Logic ---
# Stage 1: Graceful Termination Request
# Send LSP shutdown and close stdin to signal no more input.
try:
log.debug("Sending LSP shutdown request...")
# Use a thread to timeout the LSP shutdown call since it can hang
shutdown_thread = threading.Thread(target=self.server.shutdown)
shutdown_thread.daemon = True
shutdown_thread.start()
shutdown_thread.join(timeout=2.0) # 2 second timeout for LSP shutdown
if shutdown_thread.is_alive():
log.debug("LSP shutdown request timed out, proceeding to terminate...")
else:
log.debug("LSP shutdown request completed.")
if process.stdin and not process.stdin.closed:
process.stdin.close()
log.debug("Stage 1 shutdown complete.")
except Exception as e:
log.debug(f"Exception during graceful shutdown: {e}")
# Ignore errors here, we are proceeding to terminate anyway.
# Stage 2: Terminate and Wait for Process to Exit
log.debug(f"Terminating process {process.pid}, current status: {process.poll()}")
process.terminate()
# Stage 3: Wait for process termination with timeout
try:
log.debug(f"Waiting for process {process.pid} to terminate...")
exit_code = process.wait(timeout=timeout)
log.info(f"Language server process terminated successfully with exit code {exit_code}.")
except subprocess.TimeoutExpired:
# If termination failed, forcefully kill the process
log.warning(f"Process {process.pid} termination timed out, killing process forcefully...")
process.kill()
try:
exit_code = process.wait(timeout=2.0)
log.info(f"Language server process killed successfully with exit code {exit_code}.")
except subprocess.TimeoutExpired:
log.error(f"Process {process.pid} could not be killed within timeout.")
except Exception as e:
log.error(f"Error during process shutdown: {e}")
@contextmanager
def start_server(self) -> Iterator["SolidLanguageServer"]:
self.start()
yield self
self.stop()
def _start_server_process(self) -> None:
self.server_started = True
self._start_server()
@abstractmethod
def _start_server(self) -> None:
pass
def _get_language_id_for_file(self, relative_file_path: str) -> str:
"""Return the language ID for a file.
Override in subclasses to return file-specific language IDs.
Default implementation returns self.language_id.
"""
return self.language_id
@contextmanager
def open_file(self, relative_file_path: str, open_in_ls: bool = True) -> Iterator[LSPFileBuffer]:
"""
Open a file in the Language Server. This is required before making any requests to the Language Server.
:param relative_file_path: The relative path of the file to open.
:param open_in_ls: whether to open the file in the language server, sending the didOpen notification.
Set this to False to read the local file buffer without notifying the LS; the file can
be opened in the LS later by calling the `ensure_open_in_ls` method on the returned LSPFileBuffer.
"""
if not self.server_started:
log.error("open_file called before Language Server started")
raise SolidLSPException("Language Server not started")
absolute_file_path = Path(self.repository_root_path, relative_file_path)
uri = absolute_file_path.as_uri()
if uri in self.open_file_buffers:
fb = self.open_file_buffers[uri]
assert fb.uri == uri
assert fb.ref_count >= 1
fb.ref_count += 1
if open_in_ls:
fb.ensure_open_in_ls()
yield fb
fb.ref_count -= 1
else:
version = 0
language_id = self._get_language_id_for_file(relative_file_path)
fb = LSPFileBuffer(
abs_path=absolute_file_path,
uri=uri,
encoding=self._encoding,
version=version,
language_id=language_id,
ref_count=1,
language_server=self,
open_in_ls=open_in_ls,
)
self.open_file_buffers[uri] = fb
yield fb
fb.ref_count -= 1
if self.open_file_buffers[uri].ref_count == 0:
self.open_file_buffers[uri].close()
del self.open_file_buffers[uri]
@contextmanager
def _open_file_context(
self, relative_file_path: str, file_buffer: LSPFileBuffer | None = None, open_in_ls: bool = True
) -> Iterator[LSPFileBuffer]:
"""
Internal context manager to open a file, optionally reusing an existing file buffer.
:param relative_file_path: the relative path of the file to open.
:param file_buffer: an optional existing file buffer to reuse.
:param open_in_ls: whether to open the file in the language server, sending the didOpen notification.
Set this to False to read the local file buffer without notifying the LS; the file can
be opened in the LS later by calling the `ensure_open_in_ls` method on the returned LSPFileBuffer.
"""
if file_buffer is not None:
expected_uri = pathlib.Path(os.path.join(self.repository_root_path, relative_file_path)).as_uri()
assert file_buffer.uri == expected_uri, f"Inconsistency between provided {file_buffer.uri=} and {expected_uri=}"
if open_in_ls:
file_buffer.ensure_open_in_ls()
yield file_buffer
else:
with self.open_file(relative_file_path, open_in_ls=open_in_ls) as fb:
yield fb
def insert_text_at_position(self, relative_file_path: str, line: int, column: int, text_to_be_inserted: str) -> ls_types.Position:
"""
Insert text at the given line and column in the given file and return
the updated cursor position after inserting the text.
:param relative_file_path: The relative path of the file to open.
:param line: The line number at which text should be inserted.
:param column: The column number at which text should be inserted.
:param text_to_be_inserted: The text to insert.
"""
if not self.server_started:
log.error("insert_text_at_position called before Language Server started")
raise SolidLSPException("Language Server not started")
absolute_file_path = str(PurePath(self.repository_root_path, relative_file_path))
uri = pathlib.Path(absolute_file_path).as_uri()
# Ensure the file is open
assert uri in self.open_file_buffers
file_buffer = self.open_file_buffers[uri]
file_buffer.version += 1
new_contents, new_l, new_c = TextUtils.insert_text_at_position(file_buffer.contents, line, column, text_to_be_inserted)
file_buffer.contents = new_contents
self.server.notify.did_change_text_document(
{
LSPConstants.TEXT_DOCUMENT: { # type: ignore
LSPConstants.VERSION: file_buffer.version,
LSPConstants.URI: file_buffer.uri,
},
LSPConstants.CONTENT_CHANGES: [
{
LSPConstants.RANGE: {
"start": {"line": line, "character": column},
"end": {"line": line, "character": column},
},
"text": text_to_be_inserted,
}
],
}
)
return ls_types.Position(line=new_l, character=new_c)
def delete_text_between_positions(
self,
relative_file_path: str,
start: ls_types.Position,
end: ls_types.Position,
) -> str:
"""
Delete text between the given start and end positions in the given file and return the deleted text.
"""
if not self.server_started:
log.error("insert_text_at_position called before Language Server started")
raise SolidLSPException("Language Server not started")
absolute_file_path = str(PurePath(self.repository_root_path, relative_file_path))
uri = pathlib.Path(absolute_file_path).as_uri()
# Ensure the file is open
assert uri in self.open_file_buffers
file_buffer = self.open_file_buffers[uri]
file_buffer.version += 1
new_contents, deleted_text = TextUtils.delete_text_between_positions(
file_buffer.contents, start_line=start["line"], start_col=start["character"], end_line=end["line"], end_col=end["character"]
)
file_buffer.contents = new_contents
self.server.notify.did_change_text_document(
{
LSPConstants.TEXT_DOCUMENT: { # type: ignore
LSPConstants.VERSION: file_buffer.version,
LSPConstants.URI: file_buffer.uri,
},
LSPConstants.CONTENT_CHANGES: [{LSPConstants.RANGE: {"start": start, "end": end}, "text": ""}],
}
)
return deleted_text
def _send_definition_request(self, definition_params: DefinitionParams) -> Definition | list[LocationLink] | None:
return self.server.send.definition(definition_params)
def request_definition(self, relative_file_path: str, line: int, column: int) -> list[ls_types.Location]:
"""
Raise a [textDocument/definition](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_definition) request to the Language Server
for the symbol at the given line and column in the given file. Wait for the response and return the result.
:param relative_file_path: The relative path of the file that has the symbol for which definition should be looked up
:param line: The line number of the symbol
:param column: The column number of the symbol
:return: the list of locations where the symbol is defined
"""
if not self.server_started:
log.error("request_definition called before language server started")
raise SolidLSPException("Language Server not started")
if not self._has_waited_for_cross_file_references:
# Some LS require waiting for a while before they can return cross-file definitions.
# This is a workaround for such LS that don't have a reliable "finished initializing" signal.
sleep(self._get_wait_time_for_cross_file_referencing())
self._has_waited_for_cross_file_references = True
with self.open_file(relative_file_path):
# sending request to the language server and waiting for response
definition_params = cast(
DefinitionParams,
{
LSPConstants.TEXT_DOCUMENT: {
LSPConstants.URI: pathlib.Path(str(PurePath(self.repository_root_path, relative_file_path))).as_uri()
},
LSPConstants.POSITION: {
LSPConstants.LINE: line,
LSPConstants.CHARACTER: column,
},
},
)
response = self._send_definition_request(definition_params)
ret: list[ls_types.Location] = []
if isinstance(response, list):
# response is either of type Location[] or LocationLink[]
for item in response:
assert isinstance(item, dict)
if LSPConstants.URI in item and LSPConstants.RANGE in item:
new_item: dict = {}
new_item.update(item)
new_item["absolutePath"] = PathUtils.uri_to_path(new_item["uri"])
new_item["relativePath"] = PathUtils.get_relative_path(new_item["absolutePath"], self.repository_root_path)
ret.append(ls_types.Location(**new_item)) # type: ignore
elif LSPConstants.TARGET_URI in item and LSPConstants.TARGET_RANGE in item and LSPConstants.TARGET_SELECTION_RANGE in item:
new_item: dict = {} # type: ignore
new_item["uri"] = item[LSPConstants.TARGET_URI] # type: ignore
new_item["absolutePath"] = PathUtils.uri_to_path(new_item["uri"])
new_item["relativePath"] = PathUtils.get_relative_path(new_item["absolutePath"], self.repository_root_path)
new_item["range"] = item[LSPConstants.TARGET_SELECTION_RANGE] # type: ignore
ret.append(ls_types.Location(**new_item)) # type: ignore
else:
assert False, f"Unexpected response from Language Server: {item}"
elif isinstance(response, dict):
# response is of type Location
assert LSPConstants.URI in response
assert LSPConstants.RANGE in response
new_item: dict = {} # type: ignore
new_item.update(response)
new_item["absolutePath"] = PathUtils.uri_to_path(new_item["uri"])
new_item["relativePath"] = PathUtils.get_relative_path(new_item["absolutePath"], self.repository_root_path)
ret.append(ls_types.Location(**new_item)) # type: ignore
elif response is None:
# Some language servers return None when they cannot find a definition
# This is expected for certain symbol types like generics or types with incomplete information
log.warning(f"Language server returned None for definition request at {relative_file_path}:{line}:{column}")
else:
assert False, f"Unexpected response from Language Server: {response}"
return ret
# Some LS cause problems with this, so the call is isolated from the rest to allow overriding in subclasses
def _send_references_request(self, relative_file_path: str, line: int, column: int) -> list[lsp_types.Location] | None:
return self.server.send.references(
{
"textDocument": {"uri": PathUtils.path_to_uri(os.path.join(self.repository_root_path, relative_file_path))},
"position": {"line": line, "character": column},
"context": {"includeDeclaration": False},
}
)
def request_references(self, relative_file_path: str, line: int, column: int) -> list[ls_types.Location]:
"""
Raise a [textDocument/references](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_references) request to the Language Server
to find references to the symbol at the given line and column in the given file. Wait for the response and return the result.
Filters out references located in ignored directories.
:param relative_file_path: The relative path of the file that has the symbol for which references should be looked up
:param line: The line number of the symbol
:param column: The column number of the symbol
:return: A list of locations where the symbol is referenced (excluding ignored directories)
"""
if not self.server_started:
log.error("request_references called before Language Server started")
raise SolidLSPException("Language Server not started")
with self.open_file(relative_file_path):
if not self._has_waited_for_cross_file_references:
# Some LS require waiting for a while before they can return cross-file references.
# This is a workaround for such LS that don't have a reliable "finished initializing" signal.
# The waiting has to happen after at least one file was opened in the ls
sleep(self._get_wait_time_for_cross_file_referencing())
self._has_waited_for_cross_file_references = True
t0 = perf_counter() if _debug_enabled else 0.0
try:
response = self._send_references_request(relative_file_path, line=line, column=column)
except Exception as e:
# Catch LSP internal error (-32603) and raise a more informative exception
if isinstance(e, LSPError) and getattr(e, "code", None) == -32603:
raise RuntimeError(
f"LSP internal error (-32603) when requesting references for {relative_file_path}:{line}:{column}. "
"This often occurs when requesting references for a symbol not referenced in the expected way. "
) from e
raise
if response is None:
if _debug_enabled:
elapsed_ms = (perf_counter() - t0) * 1000
log.debug("perf: request_references path=%s elapsed_ms=%.2f count=0", relative_file_path, elapsed_ms)
return []
ret: list[ls_types.Location] = []
assert isinstance(response, list), f"Unexpected response from Language Server (expected list, got {type(response)}): {response}"
for item in response:
assert isinstance(item, dict), f"Unexpected response from Language Server (expected dict, got {type(item)}): {item}"
assert LSPConstants.URI in item
assert LSPConstants.RANGE in item
abs_path = PathUtils.uri_to_path(item[LSPConstants.URI]) # type: ignore
if not Path(abs_path).is_relative_to(self.repository_root_path):
log.warning(
"Found a reference in a path outside the repository, probably the LS is parsing things in installed packages or in the standardlib! "
f"Path: {abs_path}. This is a bug but we currently simply skip these references."
)
continue
rel_path = Path(abs_path).relative_to(self.repository_root_path)
if self.is_ignored_path(str(rel_path)):
log.debug("Ignoring reference in %s since it should be ignored", rel_path)
continue
new_item: dict = {}
new_item.update(item)
new_item["absolutePath"] = str(abs_path)
new_item["relativePath"] = str(rel_path)
ret.append(ls_types.Location(**new_item)) # type: ignore
if _debug_enabled:
elapsed_ms = (perf_counter() - t0) * 1000
unique_files = len({r["relativePath"] for r in ret})
log.debug(
"perf: request_references path=%s elapsed_ms=%.2f count=%d unique_files=%d",
relative_file_path,
elapsed_ms,
len(ret),
unique_files,
)
return ret
def request_text_document_diagnostics(self, relative_file_path: str) -> list[ls_types.Diagnostic]:
"""
Raise a [textDocument/diagnostic](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_diagnostic) request to the Language Server
to find diagnostics for the given file. Wait for the response and return the result.
:param relative_file_path: The relative path of the file to retrieve diagnostics for
:return: A list of diagnostics for the file
"""
if not self.server_started:
log.error("request_text_document_diagnostics called before Language Server started")
raise SolidLSPException("Language Server not started")
with self.open_file(relative_file_path):
response = self.server.send.text_document_diagnostic(
{
LSPConstants.TEXT_DOCUMENT: { # type: ignore
LSPConstants.URI: pathlib.Path(str(PurePath(self.repository_root_path, relative_file_path))).as_uri()
}
}
)
if response is None:
return [] # type: ignore
assert isinstance(response, dict), f"Unexpected response from Language Server (expected list, got {type(response)}): {response}"
ret: list[ls_types.Diagnostic] = []
for item in response["items"]: # type: ignore
new_item: ls_types.Diagnostic = {
"uri": pathlib.Path(str(PurePath(self.repository_root_path, relative_file_path))).as_uri(),
"severity": item["severity"],
"message": item["message"],
"range": item["range"],
"code": item["code"], # type: ignore
}
ret.append(ls_types.Diagnostic(**new_item))
return ret
def retrieve_full_file_content(self, file_path: str) -> str:
"""
Retrieve the full content of the given file.
"""
if os.path.isabs(file_path):
file_path = os.path.relpath(file_path, self.repository_root_path)
with self.open_file(file_path) as file_data:
return file_data.contents
def retrieve_content_around_line(
self, relative_file_path: str, line: int, context_lines_before: int = 0, context_lines_after: int = 0
) -> MatchedConsecutiveLines:
"""
Retrieve the content of the given file around the given line.
:param relative_file_path: The relative path of the file to retrieve the content from
:param line: The line number to retrieve the content around
:param context_lines_before: The number of lines to retrieve before the given line
:param context_lines_after: The number of lines to retrieve after the given line
:return MatchedConsecutiveLines: A container with the desired lines.
"""
with self.open_file(relative_file_path) as file_data:
file_contents = file_data.contents
return MatchedConsecutiveLines.from_file_contents(
file_contents,
line=line,
context_lines_before=context_lines_before,
context_lines_after=context_lines_after,
source_file_path=relative_file_path,
)
def request_completions(
self, relative_file_path: str, line: int, column: int, allow_incomplete: bool = False
) -> list[ls_types.CompletionItem]:
"""
Raise a [textDocument/completion](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_completion) request to the Language Server
to find completions at the given line and column in the given file. Wait for the response and return the result.
:param relative_file_path: The relative path of the file that has the symbol for which completions should be looked up
:param line: The line number of the symbol
:param column: The column number of the symbol
:return: A list of completions
"""
with self.open_file(relative_file_path):
open_file_buffer = self.open_file_buffers[pathlib.Path(os.path.join(self.repository_root_path, relative_file_path)).as_uri()]
completion_params: LSPTypes.CompletionParams = {
"position": {"line": line, "character": column},
"textDocument": {"uri": open_file_buffer.uri},
"context": {"triggerKind": LSPTypes.CompletionTriggerKind.Invoked},
}
response: list[LSPTypes.CompletionItem] | LSPTypes.CompletionList | None = None
num_retries = 0
while response is None or (response["isIncomplete"] and num_retries < 30): # type: ignore
response = self.server.send.completion(completion_params)
if isinstance(response, list):
response = {"items": response, "isIncomplete": False}
num_retries += 1
# TODO: Understand how to appropriately handle `isIncomplete`
if response is None or (response["isIncomplete"] and not allow_incomplete): # type: ignore
return []
if "items" in response:
response = response["items"] # type: ignore
response = cast(list[LSPTypes.CompletionItem], response)
# TODO: Handle the case when the completion is a keyword
items = [item for item in response if item["kind"] != LSPTypes.CompletionItemKind.Keyword]
completions_list: list[ls_types.CompletionItem] = []
for item in items:
assert "insertText" in item or "textEdit" in item
assert "kind" in item
completion_item = {}
if "detail" in item:
completion_item["detail"] = item["detail"]
if "label" in item:
completion_item["completionText"] = item["label"]
completion_item["kind"] = item["kind"] # type: ignore
elif "insertText" in item: # type: ignore
completion_item["completionText"] = item["insertText"]
completion_item["kind"] = item["kind"]
elif "textEdit" in item and "newText" in item["textEdit"]:
completion_item["completionText"] = item["textEdit"]["newText"]
completion_item["kind"] = item["kind"]
elif "textEdit" in item and "range" in item["textEdit"]:
new_dot_lineno, new_dot_colno = (
completion_params["position"]["line"],
completion_params["position"]["character"],
)
assert all(
(
item["textEdit"]["range"]["start"]["line"] == new_dot_lineno,
item["textEdit"]["range"]["start"]["character"] == new_dot_colno,
item["textEdit"]["range"]["start"]["line"] == item["textEdit"]["range"]["end"]["line"],
item["textEdit"]["range"]["start"]["character"] == item["textEdit"]["range"]["end"]["character"],
)
)
completion_item["completionText"] = item["textEdit"]["newText"]
completion_item["kind"] = item["kind"]
elif "textEdit" in item and "insert" in item["textEdit"]:
assert False
else:
assert False
completion_item = ls_types.CompletionItem(**completion_item) # type: ignore
completions_list.append(completion_item)
return [json.loads(json_repr) for json_repr in set(json.dumps(item, sort_keys=True) for item in completions_list)]
def _request_document_symbols(
self, relative_file_path: str, file_data: LSPFileBuffer | None
) -> list[SymbolInformation] | list[DocumentSymbol] | None:
"""
Sends a [documentSymbol](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_documentSymbol)
request to the language server to find symbols in the given file - or returns a cached result if available.
:param relative_file_path: the relative path of the file that has the symbols.
:param file_data: the file data buffer, if already opened. If None, the file will be opened in this method.
:return: the list of root symbols in the file.
"""
def get_cached_raw_document_symbols(cache_key: str, fd: LSPFileBuffer) -> list[SymbolInformation] | list[DocumentSymbol] | None:
file_hash_and_result = self._raw_document_symbols_cache.get(cache_key)
if file_hash_and_result is None:
log.debug("No cache hit for raw document symbols in %s", relative_file_path)
log.debug("perf: raw_document_symbols_cache MISS path=%s", relative_file_path)
return None
file_hash, result = file_hash_and_result
if file_hash == fd.content_hash:
log.debug("Returning cached raw document symbols for %s", relative_file_path)
log.debug("perf: raw_document_symbols_cache HIT path=%s", relative_file_path)
return result
log.debug("Document content for %s has changed (raw symbol cache is not up-to-date)", relative_file_path)
log.debug("perf: raw_document_symbols_cache STALE path=%s", relative_file_path)
return None
def get_raw_document_symbols(fd: LSPFileBuffer) -> list[SymbolInformation] | list[DocumentSymbol] | None:
# check for cached result
cache_key = relative_file_path
response = get_cached_raw_document_symbols(cache_key, fd)
if response is not None:
return response
# no cached result, query language server
log.debug(f"Requesting document symbols for {relative_file_path} from the Language Server")
response = self.server.send.document_symbol(
{"textDocument": {"uri": pathlib.Path(os.path.join(self.repository_root_path, relative_file_path)).as_uri()}}
)
# update cache
self._raw_document_symbols_cache[cache_key] = (fd.content_hash, response)
self._raw_document_symbols_cache_is_modified = True
return response
with self._open_file_context(relative_file_path, file_buffer=file_data) as fd:
return get_raw_document_symbols(fd)
def request_document_symbols(self, relative_file_path: str, file_buffer: LSPFileBuffer | None = None) -> DocumentSymbols:
"""
Retrieves the collection of symbols in the given file
:param relative_file_path: The relative path of the file that has the symbols
:param file_buffer: an optional file buffer if the file is already opened.
:return: the collection of symbols in the file.
All contained symbols will have a location, children, and a parent attribute,
where the parent attribute is None for root symbols.
Note that this is slightly different from the call to request_full_symbol_tree,
where the parent attribute will be the file symbol which in turn may have a package symbol as parent.
If you need a symbol tree that contains file symbols as well, you should use `request_full_symbol_tree` instead.
"""
with self._open_file_context(relative_file_path, file_buffer, open_in_ls=False) as file_data:
# check if the desired result is cached
cache_key = relative_file_path
file_hash_and_result = self._document_symbols_cache.get(cache_key)
if file_hash_and_result is None:
log.debug("No cache hit for document symbols in %s", relative_file_path)
log.debug("perf: document_symbols_cache MISS path=%s", relative_file_path)
else:
file_hash, document_symbols = file_hash_and_result
if file_hash == file_data.content_hash:
log.debug("Returning cached document symbols for %s", relative_file_path)
log.debug("perf: document_symbols_cache HIT path=%s", relative_file_path)
return document_symbols
log.debug("Cached document symbol content for %s has changed", relative_file_path)
log.debug("perf: document_symbols_cache STALE path=%s", relative_file_path)
# no cached result: request the root symbols from the language server
root_symbols = self._request_document_symbols(relative_file_path, file_data)
if root_symbols is None:
log.warning(
f"Received None response from the Language Server for document symbols in {relative_file_path}. "
f"This means the language server can't understand this file (possibly due to syntax errors). It may also be due to a bug or misconfiguration of the LS. "
f"Returning empty list",
)
return DocumentSymbols([])
assert isinstance(root_symbols, list), f"Unexpected response from Language Server: {root_symbols}"
log.debug("Received %d root symbols for %s from the language server", len(root_symbols), relative_file_path)
body_factory = SymbolBodyFactory(file_data)
def convert_to_unified_symbol(original_symbol_dict: GenericDocumentSymbol) -> ls_types.UnifiedSymbolInformation:
"""
Converts the given symbol dictionary to the unified representation, ensuring
that all required fields are present (except 'children' which is handled separately).
:param original_symbol_dict: the item to augment
:return: the augmented item (new object)
"""
# noinspection PyInvalidCast
item = cast(ls_types.UnifiedSymbolInformation, dict(original_symbol_dict))
absolute_path = os.path.join(self.repository_root_path, relative_file_path)
# handle missing location and path entries
if "location" not in item:
uri = pathlib.Path(absolute_path).as_uri()
assert "range" in item
tree_location = ls_types.Location(
uri=uri,
range=item["range"],
absolutePath=absolute_path,
relativePath=relative_file_path,
)
item["location"] = tree_location
location = item["location"]
if "absolutePath" not in location:
location["absolutePath"] = absolute_path # type: ignore
if "relativePath" not in location:
location["relativePath"] = relative_file_path # type: ignore
item["body"] = self.create_symbol_body(item, factory=body_factory)
# handle missing selectionRange
if "selectionRange" not in item:
if "range" in item:
item["selectionRange"] = item["range"]
else:
item["selectionRange"] = item["location"]["range"]
return item
def convert_symbols_with_common_parent(
symbols: list[DocumentSymbol] | list[SymbolInformation] | list[UnifiedSymbolInformation],
parent: ls_types.UnifiedSymbolInformation | None,
) -> list[ls_types.UnifiedSymbolInformation]:
"""
Converts the given symbols into UnifiedSymbolInformation with proper parent-child relationships,
adding overload indices for symbols with the same name under the same parent.
"""
total_name_counts: dict[str, int] = defaultdict(lambda: 0)
for symbol in symbols:
total_name_counts[symbol["name"]] += 1
name_counts: dict[str, int] = defaultdict(lambda: 0)
unified_symbols = []
for symbol in symbols:
usymbol = convert_to_unified_symbol(symbol)
if total_name_counts[usymbol["name"]] > 1:
usymbol["overload_idx"] = name_counts[usymbol["name"]]
name_counts[usymbol["name"]] += 1
usymbol["parent"] = parent
if "children" in usymbol:
usymbol["children"] = convert_symbols_with_common_parent(usymbol["children"], usymbol) # type: ignore
else:
usymbol["children"] = [] # type: ignore
unified_symbols.append(usymbol)
return unified_symbols
unified_root_symbols = convert_symbols_with_common_parent(root_symbols, None)
document_symbols = DocumentSymbols(unified_root_symbols)
# update cache
log.debug("Updating cached document symbols for %s", relative_file_path)
self._document_symbols_cache[cache_key] = (file_data.content_hash, document_symbols)
self._document_symbols_cache_is_modified = True
return document_symbols
def request_full_symbol_tree(self, within_relative_path: str | None = None) -> list[ls_types.UnifiedSymbolInformation]:
"""
Will go through all files in the project or within a relative path and build a tree of symbols.
Note: this may be slow the first time it is called, especially if `within_relative_path` is not used to restrict the search.
For each file, a symbol of kind File (2) will be created. For directories, a symbol of kind Package (4) will be created.
All symbols will have a children attribute, thereby representing the tree structure of all symbols in the project
that are within the repository.
All symbols except the root packages will have a parent attribute.
Will ignore directories starting with '.', language-specific defaults
and user-configured directories (e.g. from .gitignore).
:param within_relative_path: pass a relative path to only consider symbols within this path.
If a file is passed, only the symbols within this file will be considered.
If a directory is passed, all files within this directory will be considered.
:return: A list of root symbols representing the top-level packages/modules in the project.
"""
if within_relative_path is not None:
within_abs_path = os.path.join(self.repository_root_path, within_relative_path)
if not os.path.exists(within_abs_path):
raise FileNotFoundError(f"File or directory not found: {within_abs_path}")
if os.path.isfile(within_abs_path):
if self.is_ignored_path(within_relative_path):
log.error("You passed a file explicitly, but it is ignored. This is probably an error. File: %s", within_relative_path)
return []
else:
root_nodes = self.request_document_symbols(within_relative_path).root_symbols
return root_nodes
# Helper function to recursively process directories
def process_directory(rel_dir_path: str) -> list[ls_types.UnifiedSymbolInformation]:
abs_dir_path = self.repository_root_path if rel_dir_path == "." else os.path.join(self.repository_root_path, rel_dir_path)
abs_dir_path = os.path.realpath(abs_dir_path)
if self.is_ignored_path(str(Path(abs_dir_path).relative_to(self.repository_root_path))):
log.debug("Skipping directory: %s (because it should be ignored)", rel_dir_path)
return []
result = []
try:
contained_dir_or_file_names = os.listdir(abs_dir_path)
except OSError:
return []
# Create package symbol for directory
package_symbol = ls_types.UnifiedSymbolInformation( # type: ignore
name=os.path.basename(abs_dir_path),
kind=ls_types.SymbolKind.Package,
location=ls_types.Location(
uri=str(pathlib.Path(abs_dir_path).as_uri()),
range={"start": {"line": 0, "character": 0}, "end": {"line": 0, "character": 0}},
absolutePath=str(abs_dir_path),
relativePath=str(Path(abs_dir_path).resolve().relative_to(self.repository_root_path)),
),
children=[],
)
result.append(package_symbol)
for contained_dir_or_file_name in contained_dir_or_file_names:
contained_dir_or_file_abs_path = os.path.join(abs_dir_path, contained_dir_or_file_name)
# obtain relative path
try:
contained_dir_or_file_rel_path = str(
Path(contained_dir_or_file_abs_path).resolve().relative_to(self.repository_root_path)
)
except ValueError as e:
# Typically happens when the path is not under the repository root (e.g., symlink pointing outside)
log.warning(
"Skipping path %s; likely outside of the repository root %s [cause: %s]",
contained_dir_or_file_abs_path,
self.repository_root_path,
e,
)
continue
if self.is_ignored_path(contained_dir_or_file_rel_path):
log.debug("Skipping item: %s (because it should be ignored)", contained_dir_or_file_rel_path)
continue
if os.path.isdir(contained_dir_or_file_abs_path):
child_symbols = process_directory(contained_dir_or_file_rel_path)
package_symbol["children"].extend(child_symbols)
for child in child_symbols:
child["parent"] = package_symbol
elif os.path.isfile(contained_dir_or_file_abs_path):
with self._open_file_context(contained_dir_or_file_rel_path, open_in_ls=False) as file_data:
document_symbols = self.request_document_symbols(contained_dir_or_file_rel_path, file_data)
file_root_nodes = document_symbols.root_symbols
# Create file symbol, link with children
file_range = self._get_range_from_file_content(file_data.contents)
file_symbol = ls_types.UnifiedSymbolInformation( # type: ignore
name=os.path.splitext(contained_dir_or_file_name)[0],
kind=ls_types.SymbolKind.File,
range=file_range,
selectionRange=file_range,
location=ls_types.Location(
uri=str(pathlib.Path(contained_dir_or_file_abs_path).as_uri()),
range=file_range,
absolutePath=str(contained_dir_or_file_abs_path),
relativePath=str(Path(contained_dir_or_file_abs_path).resolve().relative_to(self.repository_root_path)),
),
children=file_root_nodes,
parent=package_symbol,
)
for child in file_root_nodes:
child["parent"] = file_symbol
# Link file symbol with package
package_symbol["children"].append(file_symbol)
# TODO: Not sure if this is actually still needed given recent changes to relative path handling
def fix_relative_path(nodes: list[ls_types.UnifiedSymbolInformation]) -> None:
for node in nodes:
if "location" in node and "relativePath" in node["location"]:
path = Path(node["location"]["relativePath"]) # type: ignore
if path.is_absolute():
try:
path = path.relative_to(self.repository_root_path)
node["location"]["relativePath"] = str(path)
except Exception:
pass
if "children" in node:
fix_relative_path(node["children"])
fix_relative_path(file_root_nodes)
return result
# Start from the root or the specified directory
start_rel_path = within_relative_path or "."
return process_directory(start_rel_path)
@staticmethod
def _get_range_from_file_content(file_content: str) -> ls_types.Range:
"""
Get the range for the given file.
"""
lines = file_content.split("\n")
end_line = len(lines)
end_column = len(lines[-1])
return ls_types.Range(start=ls_types.Position(line=0, character=0), end=ls_types.Position(line=end_line, character=end_column))
def request_dir_overview(self, relative_dir_path: str) -> dict[str, list[UnifiedSymbolInformation]]:
"""
:return: A mapping of all relative paths analyzed to lists of top-level symbols in the corresponding file.
"""
symbol_tree = self.request_full_symbol_tree(relative_dir_path)
# Initialize result dictionary
result: dict[str, list[UnifiedSymbolInformation]] = defaultdict(list)
# Helper function to process a symbol and its children
def process_symbol(symbol: ls_types.UnifiedSymbolInformation) -> None:
if symbol["kind"] == ls_types.SymbolKind.File:
# For file symbols, process their children (top-level symbols)
for child in symbol["children"]:
# Handle cross-platform path resolution (fixes Docker/macOS path issues)
absolute_path = Path(child["location"]["absolutePath"]).resolve()
repository_root = Path(self.repository_root_path).resolve()
# Try pathlib first, fallback to alternative approach if paths are incompatible
try:
path = absolute_path.relative_to(repository_root)
except ValueError:
# If paths are from different roots (e.g., /workspaces vs /Users),
# use the relativePath from location if available, or extract from absolutePath
if "relativePath" in child["location"] and child["location"]["relativePath"]:
path = Path(child["location"]["relativePath"])
else:
# Extract relative path by finding common structure
# Example: /workspaces/.../test_repo/file.py -> test_repo/file.py
path_parts = absolute_path.parts
# Find the last common part or use a fallback
if "test_repo" in path_parts:
test_repo_idx = path_parts.index("test_repo")
path = Path(*path_parts[test_repo_idx:])
else:
# Last resort: use filename only
path = Path(absolute_path.name)
result[str(path)].append(child)
# For package/directory symbols, process their children
for child in symbol["children"]:
process_symbol(child)
# Process each root symbol
for root in symbol_tree:
process_symbol(root)
return result
def request_document_overview(self, relative_file_path: str) -> list[UnifiedSymbolInformation]:
"""
:return: the top-level symbols in the given file.
"""
return self.request_document_symbols(relative_file_path).root_symbols
def request_overview(self, within_relative_path: str) -> dict[str, list[UnifiedSymbolInformation]]:
"""
An overview of all symbols in the given file or directory.
:param within_relative_path: the relative path to the file or directory to get the overview of.
:return: A mapping of all relative paths analyzed to lists of top-level symbols in the corresponding file.
"""
abs_path = (Path(self.repository_root_path) / within_relative_path).resolve()
if not abs_path.exists():
raise FileNotFoundError(f"File or directory not found: {abs_path}")
if abs_path.is_file():
symbols_overview = self.request_document_overview(within_relative_path)
return {within_relative_path: symbols_overview}
else:
return self.request_dir_overview(within_relative_path)
def request_hover(
self, relative_file_path: str, line: int, column: int, file_buffer: LSPFileBuffer | None = None
) -> ls_types.Hover | None:
"""
Raise a [textDocument/hover](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_hover) request to the Language Server
to find the hover information at the given line and column in the given file. Wait for the response and return the result.
:param relative_file_path: The relative path of the file that has the hover information
:param line: The line number of the symbol
:param column: The column number of the symbol
:param file_buffer: The file buffer to use for the request. If not provided, the file will be read from disk.
Can be used for optimizing number of file reads in downstream code
"""
with self._open_file_context(relative_file_path, file_buffer=file_buffer) as fb:
return self._request_hover(fb, line, column)
def _request_hover(self, file_buffer: LSPFileBuffer, line: int, column: int) -> ls_types.Hover | None:
"""
Performs the actual hover request.
"""
response = self.server.send.hover(
{
"textDocument": {"uri": file_buffer.uri},
"position": {
"line": line,
"character": column,
},
}
)
if response is None:
return None
assert isinstance(response, dict)
contents = response.get("contents")
if not contents:
return None
if isinstance(contents, dict) and not contents.get("value"):
return None
return ls_types.Hover(**response) # type: ignore
def request_signature_help(self, relative_file_path: str, line: int, column: int) -> ls_types.SignatureHelp | None:
"""
Raise a [textDocument/signatureHelp](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_signatureHelp)
request to the Language Server to find the signature help at the given line and column in the given file.
Note: contrary to `hover`, this only returns something on the position of a *call* and not on a symbol definition.
This means for Serena's purposes, this method is not particularly useful. The result is also fairly verbose (but well structured).
:param relative_file_path: The relative path of the file that has the signature help
:param line: The line number of the symbol
:param column: The column number of the symbol
:return None
"""
with self.open_file(relative_file_path):
response = self.server.send.signature_help(
{
"textDocument": {"uri": pathlib.Path(os.path.join(self.repository_root_path, relative_file_path)).as_uri()},
"position": {
"line": line,
"character": column,
},
}
)
if response is None:
return None
assert isinstance(response, dict)
return ls_types.SignatureHelp(**response) # type: ignore
def create_symbol_body(
self,
symbol: ls_types.UnifiedSymbolInformation | LSPTypes.SymbolInformation,
factory: SymbolBodyFactory | None = None,
) -> SymbolBody:
if factory is None:
assert "relativePath" in symbol["location"]
with self._open_file_context(symbol["location"]["relativePath"]) as f: # type: ignore
factory = SymbolBodyFactory(f)
return factory.create_symbol_body(symbol)
def request_referencing_symbols(
self,
relative_file_path: str,
line: int,
column: int,
include_imports: bool = True,
include_self: bool = False,
include_body: bool = False,
include_file_symbols: bool = False,
) -> list[ReferenceInSymbol]:
"""
Finds all symbols that reference the symbol at the given location.
This is similar to request_references but filters to only include symbols
(functions, methods, classes, etc.) that reference the target symbol.
:param relative_file_path: The relative path to the file.
:param line: The 0-indexed line number.
:param column: The 0-indexed column number.
:param include_imports: whether to also include imports as references.
Unfortunately, the LSP does not have an import type, so the references corresponding to imports
will not be easily distinguishable from definitions.
:param include_self: whether to include the references that is the "input symbol" itself.
Only has an effect if the relative_file_path, line and column point to a symbol, for example a definition.
:param include_body: whether to include the body of the symbols in the result.
:param include_file_symbols: whether to include references that are file symbols. This
is often a fallback mechanism for when the reference cannot be resolved to a symbol.
:return: List of objects containing the symbol and the location of the reference.
"""
if not self.server_started:
log.error("request_referencing_symbols called before Language Server started")
raise SolidLSPException("Language Server not started")
# First, get all references to the symbol
references = self.request_references(relative_file_path, line, column)
if not references:
return []
debug_enabled = log.isEnabledFor(logging.DEBUG)
t0_loop = perf_counter() if debug_enabled else 0.0
# For each reference, find the containing symbol
result = []
incoming_symbol = None
for ref in references:
ref_path = ref["relativePath"]
assert ref_path is not None
ref_line = ref["range"]["start"]["line"]
ref_col = ref["range"]["start"]["character"]
with self.open_file(ref_path) as file_data:
body_factory = SymbolBodyFactory(file_data)
# Get the containing symbol for this reference
containing_symbol = self.request_containing_symbol(
ref_path, ref_line, ref_col, include_body=include_body, body_factory=body_factory
)
if containing_symbol is None:
# TODO: HORRIBLE HACK! I don't know how to do it better for now...
# THIS IS BOUND TO BREAK IN MANY CASES! IT IS ALSO SPECIFIC TO PYTHON!
# Background:
# When a variable is used to change something, like
#
# instance = MyClass()
# instance.status = "new status"
#
# we can't find the containing symbol for the reference to `status`
# since there is no container on the line of the reference
# The hack is to try to find a variable symbol in the containing module
# by using the text of the reference to find the variable name (In a very heuristic way)
# and then look for a symbol with that name and kind Variable
ref_text = file_data.contents.split("\n")[ref_line]
if "." in ref_text:
containing_symbol_name = ref_text.split(".")[0]
document_symbols = self.request_document_symbols(ref_path)
for symbol in document_symbols.iter_symbols():
if symbol["name"] == containing_symbol_name and symbol["kind"] == ls_types.SymbolKind.Variable:
containing_symbol = copy(symbol)
containing_symbol["location"] = ref
containing_symbol["range"] = ref["range"]
break
# We failed retrieving the symbol, falling back to creating a file symbol
if containing_symbol is None and include_file_symbols:
log.warning(f"Could not find containing symbol for {ref_path}:{ref_line}:{ref_col}. Returning file symbol instead")
fileRange = self._get_range_from_file_content(file_data.contents)
location = ls_types.Location(
uri=str(pathlib.Path(os.path.join(self.repository_root_path, ref_path)).as_uri()),
range=fileRange,
absolutePath=str(os.path.join(self.repository_root_path, ref_path)),
relativePath=ref_path,
)
name = os.path.splitext(os.path.basename(ref_path))[0]
containing_symbol = ls_types.UnifiedSymbolInformation(
kind=ls_types.SymbolKind.File,
range=fileRange,
selectionRange=fileRange,
location=location,
name=name,
children=[],
)
if include_body:
containing_symbol["body"] = self.create_symbol_body(containing_symbol, factory=body_factory)
if containing_symbol is None or (not include_file_symbols and containing_symbol["kind"] == ls_types.SymbolKind.File):
continue
assert "location" in containing_symbol
assert "selectionRange" in containing_symbol
# Checking for self-reference
if (
containing_symbol["location"]["relativePath"] == relative_file_path
and containing_symbol["selectionRange"]["start"]["line"] == ref_line
and containing_symbol["selectionRange"]["start"]["character"] == ref_col
):
incoming_symbol = containing_symbol
if include_self:
result.append(ReferenceInSymbol(symbol=containing_symbol, line=ref_line, character=ref_col))
continue
log.debug(f"Found self-reference for {incoming_symbol['name']}, skipping it since {include_self=}")
continue
# checking whether reference is an import
# This is neither really safe nor elegant, but if we don't do it,
# there is no way to distinguish between definitions and imports as import is not a symbol-type
# and we get the type referenced symbol resulting from imports...
if (
not include_imports
and incoming_symbol is not None
and containing_symbol["name"] == incoming_symbol["name"]
and containing_symbol["kind"] == incoming_symbol["kind"]
):
log.debug(
f"Found import of referenced symbol {incoming_symbol['name']}"
f"in {containing_symbol['location']['relativePath']}, skipping"
)
continue
result.append(ReferenceInSymbol(symbol=containing_symbol, line=ref_line, character=ref_col))
if debug_enabled:
loop_elapsed_ms = (perf_counter() - t0_loop) * 1000
unique_files = len({r.symbol["location"]["relativePath"] for r in result})
log.debug(
"perf: request_referencing_symbols path=%s loop_elapsed_ms=%.2f ref_count=%d result_count=%d unique_files=%d",
relative_file_path,
loop_elapsed_ms,
len(references),
len(result),
unique_files,
)
return result
def request_containing_symbol(
self,
relative_file_path: str,
line: int,
column: int | None = None,
strict: bool = False,
include_body: bool = False,
body_factory: SymbolBodyFactory | None = None,
) -> ls_types.UnifiedSymbolInformation | None:
"""
Finds the first symbol containing the position for the given file.
For Python, container symbols are considered to be those with kinds corresponding to
functions, methods, or classes (typically: Function (12), Method (6), Class (5)).
The method operates as follows:
- Request the document symbols for the file.
- Filter symbols to those that start at or before the given line.
- From these, first look for symbols whose range contains the (line, column).
- If one or more symbols contain the position, return the one with the greatest starting position
(i.e. the innermost container).
- If none (strictly) contain the position, return the symbol with the greatest starting position
among those above the given line.
- If no container candidates are found, return None.
:param relative_file_path: The relative path to the Python file.
:param line: The 0-indexed line number.
:param column: The 0-indexed column (also called character). If not passed, the lookup will be based
only on the line.
:param strict: If True, the position must be strictly within the range of the symbol.
Setting to True is useful for example for finding the parent of a symbol, as with strict=False,
and the line pointing to a symbol itself, the containing symbol will be the symbol itself
(and not the parent).
:param include_body: Whether to include the body of the symbol in the result.
:return: The container symbol (if found) or None.
"""
# checking if the line is empty, unfortunately ugly and duplicating code, but I don't want to refactor
with self.open_file(relative_file_path):
absolute_file_path = str(PurePath(self.repository_root_path, relative_file_path))
content = FileUtils.read_file(absolute_file_path, self._encoding)
if content.split("\n")[line].strip() == "":
log.error(f"Passing empty lines to request_container_symbol is currently not supported, {relative_file_path=}, {line=}")
return None
document_symbols = self.request_document_symbols(relative_file_path)
# make jedi and pyright api compatible
# the former has no location, the later has no range
# we will just always add location of the desired format to all symbols
for symbol in document_symbols.iter_symbols():
if "location" not in symbol:
range = symbol["range"]
location = ls_types.Location(
uri=f"file:/{absolute_file_path}",
range=range,
absolutePath=absolute_file_path,
relativePath=relative_file_path,
)
symbol["location"] = location
else:
location = symbol["location"]
assert "range" in location
location["absolutePath"] = absolute_file_path
location["relativePath"] = relative_file_path
location["uri"] = Path(absolute_file_path).as_uri()
# Allowed container kinds, currently only for Python
container_symbol_kinds = {ls_types.SymbolKind.Method, ls_types.SymbolKind.Function, ls_types.SymbolKind.Class}
def is_position_in_range(line: int, range_d: ls_types.Range) -> bool:
start = range_d["start"]
end = range_d["end"]
column_condition = True
if strict:
line_condition = end["line"] >= line > start["line"]
if column is not None and line == start["line"]:
column_condition = column > start["character"]
else:
line_condition = end["line"] >= line >= start["line"]
if column is not None and line == start["line"]:
column_condition = column >= start["character"]
return line_condition and column_condition
# Only consider containers that are not one-liners (otherwise we may get imports)
candidate_containers = [
s
for s in document_symbols.iter_symbols()
if s["kind"] in container_symbol_kinds and s["location"]["range"]["start"]["line"] != s["location"]["range"]["end"]["line"]
]
var_containers = [s for s in document_symbols.iter_symbols() if s["kind"] == ls_types.SymbolKind.Variable]
candidate_containers.extend(var_containers)
if not candidate_containers:
return None
# From the candidates, find those whose range contains the given position.
containing_symbols = []
for symbol in candidate_containers:
s_range = symbol["location"]["range"]
if not is_position_in_range(line, s_range):
continue
containing_symbols.append(symbol)
if containing_symbols:
# Return the one with the greatest starting position (i.e. the innermost container).
containing_symbol = max(containing_symbols, key=lambda s: s["location"]["range"]["start"]["line"])
if include_body:
containing_symbol["body"] = self.create_symbol_body(containing_symbol, factory=body_factory)
return containing_symbol
else:
return None
def request_container_of_symbol(
self, symbol: ls_types.UnifiedSymbolInformation, include_body: bool = False
) -> ls_types.UnifiedSymbolInformation | None:
"""
Finds the container of the given symbol if there is one. If the parent attribute is present, the parent is returned
without further searching.
:param symbol: The symbol to find the container of.
:param include_body: whether to include the body of the symbol in the result.
:return: The container of the given symbol or None if no container is found.
"""
if "parent" in symbol:
return symbol["parent"]
assert "location" in symbol, f"Symbol {symbol} has no location and no parent attribute"
return self.request_containing_symbol(
symbol["location"]["relativePath"], # type: ignore
symbol["location"]["range"]["start"]["line"],
symbol["location"]["range"]["start"]["character"],
strict=True,
include_body=include_body,
)
def _get_preferred_definition(self, definitions: list[ls_types.Location]) -> ls_types.Location:
"""
Select the preferred definition from a list of definitions.
When multiple definitions are returned (e.g., both source and type definitions),
this method determines which one to use. The base implementation simply returns
the first definition.
Subclasses can override this method to implement language-specific preferences.
For example, TypeScript/Vue servers may prefer source files over .d.ts type
definition files.
:param definitions: A non-empty list of definition locations.
:return: The preferred definition location.
"""
return definitions[0]
def request_defining_symbol(
self,
relative_file_path: str,
line: int,
column: int,
include_body: bool = False,
) -> ls_types.UnifiedSymbolInformation | None:
"""
Finds the symbol that defines the symbol at the given location.
This method first finds the definition of the symbol at the given position,
then retrieves the full symbol information for that definition.
:param relative_file_path: The relative path to the file.
:param line: The 0-indexed line number.
:param column: The 0-indexed column number.
:param include_body: whether to include the body of the symbol in the result.
:return: The symbol information for the definition, or None if not found.
"""
if not self.server_started:
log.error("request_defining_symbol called before language server started")
raise SolidLSPException("Language Server not started")
# Get the definition location(s)
definitions = self.request_definition(relative_file_path, line, column)
if not definitions:
return None
# Select the preferred definition (subclasses can override _get_preferred_definition)
definition = self._get_preferred_definition(definitions)
def_path = definition["relativePath"]
assert def_path is not None
def_line = definition["range"]["start"]["line"]
def_col = definition["range"]["start"]["character"]
# Find the symbol at or containing this location
defining_symbol = self.request_containing_symbol(def_path, def_line, def_col, strict=False, include_body=include_body)
return defining_symbol
def _document_symbols_cache_fingerprint(self) -> Hashable | None:
"""
Returns a fingerprint of any language server-specific aspects that result in changes
to the high-level document symbol information.
Language servers must implement this method/change the return value
* whenever they change the `request_document_symbols` implementation to modify the returned content
* are reconfigured in a way that affects the returned contents (e.g. context-specific configuration
such as build flags or environment variables); configuration options can, in such cases, be
hashed together to produce a single fingerprint value.
Whenever the value changes, the document symbols cache will be invalidated and re-populated.
The value must be hashable and safe for inclusion in cache version tuples.
E.g. use an integer, a string or a tuple of integers/strings.
Returns None if no context-specific fingerprint is needed.
"""
return None
def _document_symbols_cache_version(self) -> Hashable:
"""
Return the version for the document symbols cache.
Incorporates cache context fingerprint if provided by the language server.
"""
fingerprint = self._document_symbols_cache_fingerprint()
if fingerprint is not None:
return (self.DOCUMENT_SYMBOL_CACHE_VERSION, fingerprint)
return self.DOCUMENT_SYMBOL_CACHE_VERSION
def _save_raw_document_symbols_cache(self) -> None:
cache_file = self.cache_dir / self.RAW_DOCUMENT_SYMBOL_CACHE_FILENAME
if not self._raw_document_symbols_cache_is_modified:
log.debug("No changes to raw document symbols cache, skipping save")
return
log.info("Saving updated raw document symbols cache to %s", cache_file)
try:
save_cache(str(cache_file), self._raw_document_symbols_cache_version(), self._raw_document_symbols_cache)
self._raw_document_symbols_cache_is_modified = False
except Exception as e:
log.error(
"Failed to save raw document symbols cache to %s: %s. Note: this may have resulted in a corrupted cache file.",
cache_file,
e,
)
def _raw_document_symbols_cache_version(self) -> tuple[Hashable, ...]:
base_version: tuple[Hashable, ...] = (self.RAW_DOCUMENT_SYMBOLS_CACHE_VERSION, self._ls_specific_raw_document_symbols_cache_version)
fingerprint = self._document_symbols_cache_fingerprint()
if fingerprint is not None:
return (*base_version, fingerprint)
return base_version
def _load_raw_document_symbols_cache(self) -> None:
cache_file = self.cache_dir / self.RAW_DOCUMENT_SYMBOL_CACHE_FILENAME
if not cache_file.exists():
# check for legacy cache to load to migrate
legacy_cache_file = self.cache_dir / self.RAW_DOCUMENT_SYMBOL_CACHE_FILENAME_LEGACY_FALLBACK
if legacy_cache_file.exists():
try:
legacy_cache: dict[
str, tuple[str, tuple[list[ls_types.UnifiedSymbolInformation], list[ls_types.UnifiedSymbolInformation]]]
] = load_pickle(legacy_cache_file)
log.info("Migrating legacy document symbols cache with %d entries", len(legacy_cache))
num_symbols_migrated = 0
migrated_cache = {}
for cache_key, (file_hash, (all_symbols, root_symbols)) in legacy_cache.items():
if cache_key.endswith("-True"): # include_body=True
new_cache_key = cache_key[:-5]
migrated_cache[new_cache_key] = (file_hash, root_symbols)
num_symbols_migrated += len(all_symbols)
log.info("Migrated %d document symbols from legacy cache", num_symbols_migrated)
self._raw_document_symbols_cache = migrated_cache # type: ignore
self._raw_document_symbols_cache_is_modified = True
self._save_raw_document_symbols_cache()
legacy_cache_file.unlink()
return
except Exception as e:
log.error("Error during cache migration: %s", e)
return
# load existing cache (if any)
if cache_file.exists():
log.info("Loading document symbols cache from %s", cache_file)
try:
saved_cache = load_cache(str(cache_file), self._raw_document_symbols_cache_version())
if saved_cache is not None:
self._raw_document_symbols_cache = saved_cache
log.info(f"Loaded {len(self._raw_document_symbols_cache)} entries from raw document symbols cache.")
except Exception as e:
# cache can become corrupt, so just skip loading it
log.warning(
"Failed to load raw document symbols cache from %s (%s); Ignoring cache.",
cache_file,
e,
)
def _save_document_symbols_cache(self) -> None:
cache_file = self.cache_dir / self.DOCUMENT_SYMBOL_CACHE_FILENAME
if not self._document_symbols_cache_is_modified:
log.debug("No changes to document symbols cache, skipping save")
return
log.info("Saving updated document symbols cache to %s", cache_file)
try:
save_cache(str(cache_file), self._document_symbols_cache_version(), self._document_symbols_cache)
self._document_symbols_cache_is_modified = False
except Exception as e:
log.error(
"Failed to save document symbols cache to %s: %s. Note: this may have resulted in a corrupted cache file.",
cache_file,
e,
)
def _load_document_symbols_cache(self) -> None:
cache_file = self.cache_dir / self.DOCUMENT_SYMBOL_CACHE_FILENAME
if cache_file.exists():
log.info("Loading document symbols cache from %s", cache_file)
try:
saved_cache = load_cache(str(cache_file), self._document_symbols_cache_version())
if saved_cache is not None:
self._document_symbols_cache = saved_cache
log.info(f"Loaded {len(self._document_symbols_cache)} entries from document symbols cache.")
except Exception as e:
# cache can become corrupt, so just skip loading it
log.warning(
"Failed to load document symbols cache from %s (%s); Ignoring cache.",
cache_file,
e,
)
def save_cache(self) -> None:
self._save_raw_document_symbols_cache()
self._save_document_symbols_cache()
def request_workspace_symbol(self, query: str) -> list[ls_types.UnifiedSymbolInformation] | None:
"""
Raise a [workspace/symbol](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#workspace_symbol) request to the Language Server
to find symbols across the whole workspace. Wait for the response and return the result.
:param query: The query string to filter symbols by
:return: A list of matching symbols
"""
response = self.server.send.workspace_symbol({"query": query})
if response is None:
return None
assert isinstance(response, list)
ret: list[ls_types.UnifiedSymbolInformation] = []
for item in response:
assert isinstance(item, dict)
assert LSPConstants.NAME in item
assert LSPConstants.KIND in item
assert LSPConstants.LOCATION in item
ret.append(ls_types.UnifiedSymbolInformation(**item)) # type: ignore
return ret
def request_rename_symbol_edit(
self,
relative_file_path: str,
line: int,
column: int,
new_name: str,
) -> ls_types.WorkspaceEdit | None:
"""
Retrieve a WorkspaceEdit for renaming the symbol at the given location to the new name.
Does not apply the edit, just retrieves it. In order to actually rename the symbol, call apply_workspace_edit.
:param relative_file_path: The relative path to the file containing the symbol
:param line: The 0-indexed line number of the symbol
:param column: The 0-indexed column number of the symbol
:param new_name: The new name for the symbol
:return: A WorkspaceEdit containing the changes needed to rename the symbol, or None if rename is not supported
"""
params = RenameParams(
textDocument=ls_types.TextDocumentIdentifier(
uri=pathlib.Path(os.path.join(self.repository_root_path, relative_file_path)).as_uri()
),
position=ls_types.Position(line=line, character=column),
newName=new_name,
)
with self.open_file(relative_file_path):
return self.server.send.rename(params)
def apply_text_edits_to_file(self, relative_path: str, edits: list[ls_types.TextEdit]) -> None:
"""
Apply a list of text edits to a file.
:param relative_path: The relative path of the file to edit
:param edits: List of TextEdit dictionaries to apply
"""
with self.open_file(relative_path):
# Sort edits by position (latest first) to avoid position shifts
sorted_edits = sorted(edits, key=lambda e: (e["range"]["start"]["line"], e["range"]["start"]["character"]), reverse=True)
for edit in sorted_edits:
start_pos = ls_types.Position(line=edit["range"]["start"]["line"], character=edit["range"]["start"]["character"])
end_pos = ls_types.Position(line=edit["range"]["end"]["line"], character=edit["range"]["end"]["character"])
# Delete the old text and insert the new text
self.delete_text_between_positions(relative_path, start_pos, end_pos)
self.insert_text_at_position(relative_path, start_pos["line"], start_pos["character"], edit["newText"])
def start(self) -> "SolidLanguageServer":
"""
Starts the language server process and connects to it. Call shutdown when ready.
:return: self for method chaining
"""
log.info(f"Starting language server with language {self.language_server.language} for {self.language_server.repository_root_path}")
self._start_server_process()
return self
def stop(self, shutdown_timeout: float = 2.0) -> None:
"""
Stops the language server process.
This function never raises an exception (any exceptions during shutdown are logged).
:param shutdown_timeout: time, in seconds, to wait for the server to shutdown gracefully before killing it
"""
try:
self._shutdown(timeout=shutdown_timeout)
except Exception as e:
log.warning(f"Exception while shutting down language server: {e}")
@property
def language_server(self) -> Self:
return self
@property
def handler(self) -> LanguageServerProcess:
"""Access the underlying language server handler.
Useful for advanced operations like sending custom commands
or registering notification handlers.
"""
return self.server
def is_running(self) -> bool:
return self.server.is_running()
| {
"repo_id": "oraios/serena",
"file_path": "src/solidlsp/ls.py",
"license": "MIT License",
"lines": 1907,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:src/serena/util/thread.py | import threading
from collections.abc import Callable
from enum import Enum
from typing import Generic, TypeVar
from sensai.util.string import ToStringMixin
class TimeoutException(Exception):
def __init__(self, message: str, timeout: float) -> None:
super().__init__(message)
self.timeout = timeout
T = TypeVar("T")
class ExecutionResult(Generic[T], ToStringMixin):
class Status(Enum):
SUCCESS = "success"
TIMEOUT = "timeout"
EXCEPTION = "error"
def __init__(self) -> None:
self.result_value: T | None = None
self.status: ExecutionResult.Status | None = None
self.exception: Exception | None = None
def set_result_value(self, value: T) -> None:
self.result_value = value
self.status = ExecutionResult.Status.SUCCESS
def set_timed_out(self, exception: TimeoutException) -> None:
self.exception = exception
self.status = ExecutionResult.Status.TIMEOUT
def set_exception(self, exception: Exception) -> None:
self.exception = exception
self.status = ExecutionResult.Status.EXCEPTION
def execute_with_timeout(func: Callable[[], T], timeout: float, function_name: str) -> ExecutionResult[T]:
"""
Executes the given function with a timeout
:param func: the function to execute
:param timeout: the timeout in seconds
:param function_name: the name of the function (for error messages)
:returns: the execution result
"""
execution_result: ExecutionResult[T] = ExecutionResult()
def target() -> None:
try:
value = func()
execution_result.set_result_value(value)
except Exception as e:
execution_result.set_exception(e)
thread = threading.Thread(target=target, daemon=True)
thread.start()
thread.join(timeout=timeout)
if thread.is_alive():
timeout_exception = TimeoutException(f"Execution of '{function_name}' timed out after {timeout} seconds.", timeout)
execution_result.set_timed_out(timeout_exception)
return execution_result
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/util/thread.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/interprompt/prompt_factory.py | import logging
import os
from typing import Any
from .multilang_prompt import DEFAULT_LANG_CODE, LanguageFallbackMode, MultiLangPromptCollection, PromptList
log = logging.getLogger(__name__)
class PromptFactoryBase:
"""Base class for auto-generated prompt factory classes."""
def __init__(self, prompts_dir: str | list[str], lang_code: str = DEFAULT_LANG_CODE, fallback_mode=LanguageFallbackMode.EXCEPTION):
"""
:param prompts_dir: the directory containing the prompt templates and prompt lists.
If a list is provided, will look for prompt templates in the dirs from left to right
(first one containing the desired template wins).
:param lang_code: the language code to use for retrieving the prompt templates and prompt lists.
Leave as `default` for single-language use cases.
:param fallback_mode: the fallback mode to use when a prompt template or prompt list is not found for the requested language.
Irrelevant for single-language use cases.
"""
self.lang_code = lang_code
self._prompt_collection = MultiLangPromptCollection(prompts_dir, fallback_mode=fallback_mode)
def _render_prompt(self, prompt_name: str, params: dict[str, Any]) -> str:
del params["self"]
return self._prompt_collection.render_prompt_template(prompt_name, params, lang_code=self.lang_code)
def _get_prompt_list(self, prompt_name: str) -> PromptList:
return self._prompt_collection.get_prompt_list(prompt_name, self.lang_code)
def autogenerate_prompt_factory_module(prompts_dir: str, target_module_path: str) -> None:
"""
Auto-generates a prompt factory module for the given prompt directory.
The generated `PromptFactory` class is meant to be the central entry class for retrieving and rendering prompt templates and prompt
lists in your application.
It will contain one method per prompt template and prompt list, and is useful for both single- and multi-language use cases.
:param prompts_dir: the directory containing the prompt templates and prompt lists
:param target_module_path: the path to the target module file (.py). Important: The module will be overwritten!
"""
generated_code = """
# ruff: noqa
# black: skip
# mypy: ignore-errors
# NOTE: This module is auto-generated from interprompt.autogenerate_prompt_factory_module, do not edit manually!
from interprompt.multilang_prompt import PromptList
from interprompt.prompt_factory import PromptFactoryBase
from typing import Any
class PromptFactory(PromptFactoryBase):
\"""
A class for retrieving and rendering prompt templates and prompt lists.
\"""
"""
# ---- add methods based on prompt template names and parameters and prompt list names ----
prompt_collection = MultiLangPromptCollection(prompts_dir)
for template_name in prompt_collection.get_prompt_template_names():
template_parameters = prompt_collection.get_prompt_template_parameters(template_name)
if len(template_parameters) == 0:
method_params_str = ""
else:
method_params_str = ", *, " + ", ".join([f"{param}: Any" for param in template_parameters])
generated_code += f"""
def create_{template_name}(self{method_params_str}) -> str:
return self._render_prompt('{template_name}', locals())
"""
for prompt_list_name in prompt_collection.get_prompt_list_names():
generated_code += f"""
def get_list_{prompt_list_name}(self) -> PromptList:
return self._get_prompt_list('{prompt_list_name}')
"""
os.makedirs(os.path.dirname(target_module_path), exist_ok=True)
with open(target_module_path, "w", encoding="utf-8") as f:
f.write(generated_code)
log.info(f"Prompt factory generated successfully in {target_module_path}")
| {
"repo_id": "oraios/serena",
"file_path": "src/interprompt/prompt_factory.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
oraios/serena:test/serena/test_symbol_editing.py | """
Snapshot tests using the (awesome) syrupy pytest plugin https://github.com/syrupy-project/syrupy.
Recreate the snapshots with `pytest --snapshot-update`.
"""
import logging
import os
import shutil
import sys
import tempfile
import time
from abc import ABC, abstractmethod
from collections.abc import Iterator
from contextlib import contextmanager
from dataclasses import dataclass, field
from difflib import SequenceMatcher
from pathlib import Path
from typing import Literal, NamedTuple
import pytest
from overrides import overrides
from syrupy import SnapshotAssertion
from serena.code_editor import CodeEditor, LanguageServerCodeEditor
from solidlsp.ls_config import Language
from src.serena.symbol import LanguageServerSymbolRetriever
from test.conftest import get_repo_path, start_ls_context
pytestmark = pytest.mark.snapshot
log = logging.getLogger(__name__)
class LineChange(NamedTuple):
"""Represents a change to a specific line or range of lines."""
operation: Literal["insert", "delete", "replace"]
original_start: int
original_end: int
modified_start: int
modified_end: int
original_lines: list[str]
modified_lines: list[str]
@dataclass
class CodeDiff:
"""
Represents the difference between original and modified code.
Provides object-oriented access to diff information including line numbers.
"""
relative_path: str
original_content: str
modified_content: str
_line_changes: list[LineChange] = field(init=False)
def __post_init__(self) -> None:
"""Compute the diff using difflib's SequenceMatcher."""
original_lines = self.original_content.splitlines(keepends=True)
modified_lines = self.modified_content.splitlines(keepends=True)
matcher = SequenceMatcher(None, original_lines, modified_lines)
self._line_changes = []
for tag, orig_start, orig_end, mod_start, mod_end in matcher.get_opcodes():
if tag == "equal":
continue
if tag == "insert":
self._line_changes.append(
LineChange(
operation="insert",
original_start=orig_start,
original_end=orig_start,
modified_start=mod_start,
modified_end=mod_end,
original_lines=[],
modified_lines=modified_lines[mod_start:mod_end],
)
)
elif tag == "delete":
self._line_changes.append(
LineChange(
operation="delete",
original_start=orig_start,
original_end=orig_end,
modified_start=mod_start,
modified_end=mod_start,
original_lines=original_lines[orig_start:orig_end],
modified_lines=[],
)
)
elif tag == "replace":
self._line_changes.append(
LineChange(
operation="replace",
original_start=orig_start,
original_end=orig_end,
modified_start=mod_start,
modified_end=mod_end,
original_lines=original_lines[orig_start:orig_end],
modified_lines=modified_lines[mod_start:mod_end],
)
)
@property
def line_changes(self) -> list[LineChange]:
"""Get all line changes in the diff."""
return self._line_changes
@property
def has_changes(self) -> bool:
"""Check if there are any changes."""
return len(self._line_changes) > 0
@property
def added_lines(self) -> list[tuple[int, str]]:
"""Get all added lines with their line numbers (0-based) in the modified file."""
result = []
for change in self._line_changes:
if change.operation in ("insert", "replace"):
for i, line in enumerate(change.modified_lines):
result.append((change.modified_start + i, line))
return result
@property
def deleted_lines(self) -> list[tuple[int, str]]:
"""Get all deleted lines with their line numbers (0-based) in the original file."""
result = []
for change in self._line_changes:
if change.operation in ("delete", "replace"):
for i, line in enumerate(change.original_lines):
result.append((change.original_start + i, line))
return result
@property
def modified_line_numbers(self) -> list[int]:
"""Get all line numbers (0-based) that were modified in the modified file."""
line_nums: set[int] = set()
for change in self._line_changes:
if change.operation in ("insert", "replace"):
line_nums.update(range(change.modified_start, change.modified_end))
return sorted(line_nums)
@property
def affected_original_line_numbers(self) -> list[int]:
"""Get all line numbers (0-based) that were affected in the original file."""
line_nums: set[int] = set()
for change in self._line_changes:
if change.operation in ("delete", "replace"):
line_nums.update(range(change.original_start, change.original_end))
return sorted(line_nums)
def get_unified_diff(self, context_lines: int = 3) -> str:
"""Get the unified diff as a string."""
import difflib
original_lines = self.original_content.splitlines(keepends=True)
modified_lines = self.modified_content.splitlines(keepends=True)
diff = difflib.unified_diff(
original_lines, modified_lines, fromfile=f"a/{self.relative_path}", tofile=f"b/{self.relative_path}", n=context_lines
)
return "".join(diff)
def get_context_diff(self, context_lines: int = 3) -> str:
"""Get the context diff as a string."""
import difflib
original_lines = self.original_content.splitlines(keepends=True)
modified_lines = self.modified_content.splitlines(keepends=True)
diff = difflib.context_diff(
original_lines, modified_lines, fromfile=f"a/{self.relative_path}", tofile=f"b/{self.relative_path}", n=context_lines
)
return "".join(diff)
class EditingTest(ABC):
def __init__(self, language: Language, rel_path: str):
"""
:param language: the language
:param rel_path: the relative path of the edited file
"""
self.rel_path = rel_path
self.language = language
self.original_repo_path = get_repo_path(language)
self.repo_path: Path | None = None
@contextmanager
def _setup(self) -> Iterator[LanguageServerSymbolRetriever]:
"""Context manager for setup/teardown with a temporary directory, providing the symbol manager."""
temp_dir = Path(tempfile.mkdtemp())
self.repo_path = temp_dir / self.original_repo_path.name
language_server = None # Initialize language_server
try:
print(f"Copying repo from {self.original_repo_path} to {self.repo_path}")
shutil.copytree(self.original_repo_path, self.repo_path)
# prevent deadlock on Windows due to file locks caused by antivirus or some other external software
# wait for a long time here
if os.name == "nt":
time.sleep(0.1)
log.info(f"Creating language server for {self.language} {self.rel_path}")
with start_ls_context(self.language, str(self.repo_path)) as language_server:
yield LanguageServerSymbolRetriever(ls=language_server)
finally:
# prevent deadlock on Windows due to lingering file locks
if os.name == "nt":
time.sleep(0.1)
log.info(f"Removing temp directory {temp_dir}")
shutil.rmtree(temp_dir, ignore_errors=True)
log.info(f"Temp directory {temp_dir} removed")
def _read_file(self, rel_path: str) -> str:
"""Read the content of a file in the test repository."""
assert self.repo_path is not None
file_path = self.repo_path / rel_path
with open(file_path, encoding="utf-8") as f:
return f.read()
def run_test(self, content_after_ground_truth: SnapshotAssertion) -> None:
with self._setup() as symbol_retriever:
content_before = self._read_file(self.rel_path)
code_editor = LanguageServerCodeEditor(symbol_retriever)
self._apply_edit(code_editor)
content_after = self._read_file(self.rel_path)
code_diff = CodeDiff(self.rel_path, original_content=content_before, modified_content=content_after)
self._test_diff(code_diff, content_after_ground_truth)
@abstractmethod
def _apply_edit(self, code_editor: CodeEditor) -> None:
pass
def _test_diff(self, code_diff: CodeDiff, snapshot: SnapshotAssertion) -> None:
assert code_diff.has_changes, f"Sanity check failed: No changes detected in {code_diff.relative_path}"
assert code_diff.modified_content == snapshot
# Python test file path
PYTHON_TEST_REL_FILE_PATH = os.path.join("test_repo", "variables.py")
# TypeScript test file path
TYPESCRIPT_TEST_FILE = "index.ts"
class DeleteSymbolTest(EditingTest):
def __init__(self, language: Language, rel_path: str, deleted_symbol: str):
super().__init__(language, rel_path)
self.deleted_symbol = deleted_symbol
self.rel_path = rel_path
def _apply_edit(self, code_editor: CodeEditor) -> None:
code_editor.delete_symbol(self.deleted_symbol, self.rel_path)
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
DeleteSymbolTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"VariableContainer",
),
marks=pytest.mark.python,
),
pytest.param(
DeleteSymbolTest(
Language.TYPESCRIPT,
TYPESCRIPT_TEST_FILE,
"DemoClass",
),
marks=pytest.mark.typescript,
),
],
)
def test_delete_symbol(test_case, snapshot: SnapshotAssertion):
test_case.run_test(content_after_ground_truth=snapshot)
NEW_PYTHON_FUNCTION = """def new_inserted_function():
print("This is a new function inserted before another.")"""
NEW_PYTHON_CLASS_WITH_LEADING_NEWLINES = """
class NewInsertedClass:
pass
"""
NEW_PYTHON_CLASS_WITH_TRAILING_NEWLINES = """class NewInsertedClass:
pass
"""
NEW_TYPESCRIPT_FUNCTION = """function newInsertedFunction(): void {
console.log("This is a new function inserted before another.");
}"""
NEW_PYTHON_VARIABLE = 'new_module_var = "Inserted after typed_module_var"'
NEW_TYPESCRIPT_FUNCTION_AFTER = """function newFunctionAfterClass(): void {
console.log("This function is after DemoClass.");
}"""
class InsertInRelToSymbolTest(EditingTest):
def __init__(
self, language: Language, rel_path: str, symbol_name: str, new_content: str, mode: Literal["before", "after"] | None = None
):
super().__init__(language, rel_path)
self.symbol_name = symbol_name
self.new_content = new_content
self.mode: Literal["before", "after"] | None = mode
def set_mode(self, mode: Literal["before", "after"]):
self.mode = mode
def _apply_edit(self, code_editor: CodeEditor) -> None:
assert self.mode is not None
if self.mode == "before":
code_editor.insert_before_symbol(self.symbol_name, self.rel_path, self.new_content)
elif self.mode == "after":
code_editor.insert_after_symbol(self.symbol_name, self.rel_path, self.new_content)
@pytest.mark.parametrize("mode", ["before", "after"])
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
InsertInRelToSymbolTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"typed_module_var",
NEW_PYTHON_VARIABLE,
),
marks=pytest.mark.python,
),
pytest.param(
InsertInRelToSymbolTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"use_module_variables",
NEW_PYTHON_FUNCTION,
),
marks=pytest.mark.python,
),
pytest.param(
InsertInRelToSymbolTest(
Language.TYPESCRIPT,
TYPESCRIPT_TEST_FILE,
"DemoClass",
NEW_TYPESCRIPT_FUNCTION_AFTER,
),
marks=pytest.mark.typescript,
),
pytest.param(
InsertInRelToSymbolTest(
Language.TYPESCRIPT,
TYPESCRIPT_TEST_FILE,
"helperFunction",
NEW_TYPESCRIPT_FUNCTION,
),
marks=pytest.mark.typescript,
),
],
)
def test_insert_in_rel_to_symbol(test_case: InsertInRelToSymbolTest, mode: Literal["before", "after"], snapshot: SnapshotAssertion):
test_case.set_mode(mode)
test_case.run_test(content_after_ground_truth=snapshot)
@pytest.mark.python
def test_insert_python_class_before(snapshot: SnapshotAssertion):
InsertInRelToSymbolTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"VariableDataclass",
NEW_PYTHON_CLASS_WITH_TRAILING_NEWLINES,
mode="before",
).run_test(snapshot)
@pytest.mark.python
def test_insert_python_class_after(snapshot: SnapshotAssertion):
InsertInRelToSymbolTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"VariableDataclass",
NEW_PYTHON_CLASS_WITH_LEADING_NEWLINES,
mode="after",
).run_test(snapshot)
PYTHON_REPLACED_BODY = """def modify_instance_var(self):
# This body has been replaced
self.instance_var = "Replaced!"
self.reassignable_instance_var = 999
"""
TYPESCRIPT_REPLACED_BODY = """function printValue() {
// This body has been replaced
console.warn("New value: " + this.value);
}
"""
class ReplaceBodyTest(EditingTest):
def __init__(self, language: Language, rel_path: str, symbol_name: str, new_body: str):
super().__init__(language, rel_path)
self.symbol_name = symbol_name
self.new_body = new_body
def _apply_edit(self, code_editor: CodeEditor) -> None:
code_editor.replace_body(self.symbol_name, self.rel_path, self.new_body)
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
ReplaceBodyTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"VariableContainer/modify_instance_var",
PYTHON_REPLACED_BODY,
),
marks=pytest.mark.python,
),
pytest.param(
ReplaceBodyTest(
Language.TYPESCRIPT,
TYPESCRIPT_TEST_FILE,
"DemoClass/printValue",
TYPESCRIPT_REPLACED_BODY,
),
marks=pytest.mark.typescript,
),
],
)
def test_replace_body(test_case: ReplaceBodyTest, snapshot: SnapshotAssertion):
# assert "a" in snapshot
test_case.run_test(content_after_ground_truth=snapshot)
NIX_ATTR_REPLACEMENT = """c = 3;"""
class NixAttrReplacementTest(EditingTest):
"""Test for replacing individual attributes in Nix that should NOT result in double semicolons."""
def __init__(self, language: Language, rel_path: str, symbol_name: str, new_body: str):
super().__init__(language, rel_path)
self.symbol_name = symbol_name
self.new_body = new_body
def _apply_edit(self, code_editor: CodeEditor) -> None:
code_editor.replace_body(self.symbol_name, self.rel_path, self.new_body)
@pytest.mark.nix
@pytest.mark.skipif(sys.platform == "win32", reason="nixd language server doesn't run on Windows")
def test_nix_symbol_replacement_no_double_semicolon(snapshot: SnapshotAssertion):
"""
Test that replacing a Nix attribute does not result in double semicolons.
This test exercises the bug where:
- Original: users.users.example = { isSystemUser = true; group = "example"; description = "Example service user"; };
- Replacement: c = 3;
- Bug result would be: c = 3;; (double semicolon)
- Correct result should be: c = 3; (single semicolon)
The replacement body includes a semicolon, but the language server's range extension
logic should prevent double semicolons.
"""
test_case = NixAttrReplacementTest(
Language.NIX,
"default.nix",
"testUser", # Simple attrset with multiple key-value pairs
NIX_ATTR_REPLACEMENT,
)
test_case.run_test(content_after_ground_truth=snapshot)
class RenameSymbolTest(EditingTest):
def __init__(self, language: Language, rel_path: str, symbol_name: str, new_name: str):
super().__init__(language, rel_path)
self.symbol_name = symbol_name
self.new_name = new_name
def _apply_edit(self, code_editor: CodeEditor) -> None:
code_editor.rename_symbol(self.symbol_name, self.rel_path, self.new_name)
@overrides
def _test_diff(self, code_diff: CodeDiff, snapshot: SnapshotAssertion) -> None:
# sanity check (e.g., for newly generated snapshots) that the new name is actually in the modified content
assert self.new_name in code_diff.modified_content, f"New name '{self.new_name}' not found in modified content."
return super()._test_diff(code_diff, snapshot)
@pytest.mark.python
def test_rename_symbol(snapshot: SnapshotAssertion):
test_case = RenameSymbolTest(
Language.PYTHON,
PYTHON_TEST_REL_FILE_PATH,
"typed_module_var",
"renamed_typed_module_var",
)
test_case.run_test(content_after_ground_truth=snapshot)
# ===== VUE WRITE OPERATIONS TESTS =====
VUE_TEST_FILE = os.path.join("src", "components", "CalculatorButton.vue")
VUE_STORE_FILE = os.path.join("src", "stores", "calculator.ts")
NEW_VUE_HANDLER = """const handleDoubleClick = () => {
pressCount.value++;
emit('click', props.label);
}"""
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
DeleteSymbolTest(
Language.VUE,
VUE_TEST_FILE,
"handleMouseEnter",
),
marks=pytest.mark.vue,
),
],
)
def test_delete_symbol_vue(test_case: DeleteSymbolTest, snapshot: SnapshotAssertion) -> None:
test_case.run_test(content_after_ground_truth=snapshot)
@pytest.mark.parametrize("mode", ["before", "after"])
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
InsertInRelToSymbolTest(
Language.VUE,
VUE_TEST_FILE,
"handleClick",
NEW_VUE_HANDLER,
),
marks=pytest.mark.vue,
),
],
)
def test_insert_in_rel_to_symbol_vue(
test_case: InsertInRelToSymbolTest,
mode: Literal["before", "after"],
snapshot: SnapshotAssertion,
) -> None:
test_case.set_mode(mode)
test_case.run_test(content_after_ground_truth=snapshot)
VUE_REPLACED_HANDLECLICK_BODY = """const handleClick = () => {
if (!props.disabled) {
pressCount.value = 0; // Reset instead of incrementing
emit('click', props.label);
}
}"""
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
ReplaceBodyTest(
Language.VUE,
VUE_TEST_FILE,
"handleClick",
VUE_REPLACED_HANDLECLICK_BODY,
),
marks=pytest.mark.vue,
),
],
)
def test_replace_body_vue(test_case: ReplaceBodyTest, snapshot: SnapshotAssertion) -> None:
test_case.run_test(content_after_ground_truth=snapshot)
VUE_REPLACED_PRESSCOUNT_BODY = """const pressCount = ref(100)"""
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
ReplaceBodyTest(
Language.VUE,
VUE_TEST_FILE,
"pressCount",
VUE_REPLACED_PRESSCOUNT_BODY,
),
marks=pytest.mark.vue,
),
],
)
def test_replace_body_vue_with_disambiguation(test_case: ReplaceBodyTest, snapshot: SnapshotAssertion) -> None:
"""Test symbol disambiguation when replacing body in Vue files.
This test verifies the fix for the Vue LSP symbol duplication issue.
When the LSP returns two symbols with the same name (e.g., pressCount appears both as
a definition `const pressCount = ref(0)` and as a shorthand property in `defineExpose({ pressCount })`),
the _find_unique_symbol method should prefer the symbol with the larger range (the definition).
The test exercises this by calling replace_body on 'pressCount', which internally calls
_find_unique_symbol and should correctly select the definition (line 40, 19 chars) over
the reference (line 97, 10 chars).
"""
test_case.run_test(content_after_ground_truth=snapshot)
VUE_STORE_REPLACED_CLEAR_BODY = """function clear() {
// Modified: Reset to initial state with a log
console.log('Clearing calculator state');
displayValue.value = '0';
expression.value = '';
operationHistory.value = [];
lastResult.value = undefined;
}"""
@pytest.mark.parametrize(
"test_case",
[
pytest.param(
ReplaceBodyTest(
Language.VUE,
VUE_STORE_FILE,
"clear",
VUE_STORE_REPLACED_CLEAR_BODY,
),
marks=pytest.mark.vue,
),
],
)
def test_replace_body_vue_ts_file(test_case: ReplaceBodyTest, snapshot: SnapshotAssertion) -> None:
"""Test that TypeScript files within Vue projects can be edited."""
test_case.run_test(content_after_ground_truth=snapshot)
| {
"repo_id": "oraios/serena",
"file_path": "test/serena/test_symbol_editing.py",
"license": "MIT License",
"lines": 542,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:test/serena/util/test_file_system.py | import os
import shutil
import tempfile
from pathlib import Path
# Assuming the gitignore parser code is in a module named 'gitignore_parser'
from serena.util.file_system import GitignoreParser, GitignoreSpec
class TestGitignoreParser:
"""Test class for GitignoreParser functionality."""
def setup_method(self):
"""Set up test environment before each test method."""
# Create a temporary directory for testing
self.test_dir = tempfile.mkdtemp()
self.repo_path = Path(self.test_dir)
# Create test repository structure
self._create_repo_structure()
def teardown_method(self):
"""Clean up test environment after each test method."""
# Remove the temporary directory
shutil.rmtree(self.test_dir)
def _create_repo_structure(self):
"""
Create a test repository structure with multiple gitignore files.
Structure:
repo/
├── .gitignore
├── file1.txt
├── test.log
├── src/
│ ├── .gitignore
│ ├── main.py
│ ├── test.log
│ ├── build/
│ │ └── output.o
│ └── lib/
│ ├── .gitignore
│ └── cache.tmp
└── docs/
├── .gitignore
├── api.md
└── temp/
└── draft.md
"""
# Create directories
(self.repo_path / "src").mkdir()
(self.repo_path / "src" / "build").mkdir()
(self.repo_path / "src" / "lib").mkdir()
(self.repo_path / "docs").mkdir()
(self.repo_path / "docs" / "temp").mkdir()
# Create files
(self.repo_path / "file1.txt").touch()
(self.repo_path / "test.log").touch()
(self.repo_path / "src" / "main.py").touch()
(self.repo_path / "src" / "test.log").touch()
(self.repo_path / "src" / "build" / "output.o").touch()
(self.repo_path / "src" / "lib" / "cache.tmp").touch()
(self.repo_path / "docs" / "api.md").touch()
(self.repo_path / "docs" / "temp" / "draft.md").touch()
# Create root .gitignore
root_gitignore = self.repo_path / ".gitignore"
root_gitignore.write_text(
"""# Root gitignore
*.log
/build/
"""
)
# Create src/.gitignore
src_gitignore = self.repo_path / "src" / ".gitignore"
src_gitignore.write_text(
"""# Source gitignore
*.o
build/
!important.log
"""
)
# Create src/lib/.gitignore (deeply nested)
src_lib_gitignore = self.repo_path / "src" / "lib" / ".gitignore"
src_lib_gitignore.write_text(
"""# Library gitignore
*.tmp
*.cache
"""
)
# Create docs/.gitignore
docs_gitignore = self.repo_path / "docs" / ".gitignore"
docs_gitignore.write_text(
"""# Docs gitignore
temp/
*.tmp
"""
)
def test_initialization(self):
"""Test GitignoreParser initialization."""
parser = GitignoreParser(str(self.repo_path))
assert parser.repo_root == str(self.repo_path.absolute())
assert len(parser.get_ignore_specs()) == 4
def test_find_gitignore_files(self):
"""Test finding all gitignore files in repository, including deeply nested ones."""
parser = GitignoreParser(str(self.repo_path))
# Get file paths from specs
gitignore_files = [spec.file_path for spec in parser.get_ignore_specs()]
# Convert to relative paths for easier testing
rel_paths = [os.path.relpath(f, self.repo_path) for f in gitignore_files]
rel_paths.sort()
assert len(rel_paths) == 4
assert ".gitignore" in rel_paths
assert os.path.join("src", ".gitignore") in rel_paths
assert os.path.join("src", "lib", ".gitignore") in rel_paths # Deeply nested
assert os.path.join("docs", ".gitignore") in rel_paths
def test_parse_patterns_root_directory(self):
"""Test parsing gitignore patterns in root directory."""
# Create a simple test case with only root gitignore
test_dir = self.repo_path / "test_root"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""*.log
build/
/temp.txt
"""
)
parser = GitignoreParser(str(test_dir))
specs = parser.get_ignore_specs()
assert len(specs) == 1
patterns = specs[0].patterns
assert "*.log" in patterns
assert "build/" in patterns
assert "/temp.txt" in patterns
def test_parse_patterns_subdirectory(self):
"""Test parsing gitignore patterns in subdirectory."""
# Create a test case with subdirectory gitignore
test_dir = self.repo_path / "test_sub"
test_dir.mkdir()
subdir = test_dir / "src"
subdir.mkdir()
gitignore = subdir / ".gitignore"
gitignore.write_text(
"""*.o
/build/
test.log
"""
)
parser = GitignoreParser(str(test_dir))
specs = parser.get_ignore_specs()
assert len(specs) == 1
patterns = specs[0].patterns
# Non-anchored pattern should get ** prefix
assert "src/**/*.o" in patterns
# Anchored pattern should not get ** prefix
assert "src/build/" in patterns
# Non-anchored pattern without slash
assert "src/**/test.log" in patterns
def test_should_ignore_root_patterns(self):
"""Test ignoring files based on root .gitignore."""
parser = GitignoreParser(str(self.repo_path))
# Files that should be ignored
assert parser.should_ignore("test.log")
assert parser.should_ignore(str(self.repo_path / "test.log"))
# Files that should NOT be ignored
assert not parser.should_ignore("file1.txt")
assert not parser.should_ignore("src/main.py")
def test_should_ignore_subdirectory_patterns(self):
"""Test ignoring files based on subdirectory .gitignore files."""
parser = GitignoreParser(str(self.repo_path))
# .o files in src should be ignored
assert parser.should_ignore("src/build/output.o")
# build/ directory in src should be ignored
assert parser.should_ignore("src/build/")
# temp/ directory in docs should be ignored
assert parser.should_ignore("docs/temp/draft.md")
# But temp/ outside docs should not be ignored by docs/.gitignore
assert not parser.should_ignore("temp/file.txt")
# Test deeply nested .gitignore in src/lib/
# .tmp files in src/lib should be ignored
assert parser.should_ignore("src/lib/cache.tmp")
# .cache files in src/lib should also be ignored
assert parser.should_ignore("src/lib/data.cache")
# But .tmp files outside src/lib should not be ignored by src/lib/.gitignore
assert not parser.should_ignore("src/other.tmp")
def test_anchored_vs_non_anchored_patterns(self):
"""Test the difference between anchored and non-anchored patterns."""
# Create new test structure
test_dir = self.repo_path / "test_anchored"
test_dir.mkdir()
(test_dir / "src").mkdir()
(test_dir / "src" / "subdir").mkdir()
(test_dir / "src" / "subdir" / "deep").mkdir()
# Create src/.gitignore with both anchored and non-anchored patterns
gitignore = test_dir / "src" / ".gitignore"
gitignore.write_text(
"""/temp.txt
data.json
"""
)
# Create test files
(test_dir / "src" / "temp.txt").touch()
(test_dir / "src" / "data.json").touch()
(test_dir / "src" / "subdir" / "temp.txt").touch()
(test_dir / "src" / "subdir" / "data.json").touch()
(test_dir / "src" / "subdir" / "deep" / "data.json").touch()
parser = GitignoreParser(str(test_dir))
# Anchored pattern /temp.txt should only match in src/
assert parser.should_ignore("src/temp.txt")
assert not parser.should_ignore("src/subdir/temp.txt")
# Non-anchored pattern data.json should match anywhere under src/
assert parser.should_ignore("src/data.json")
assert parser.should_ignore("src/subdir/data.json")
assert parser.should_ignore("src/subdir/deep/data.json")
def test_root_anchored_patterns(self):
"""Test anchored patterns in root .gitignore only match root-level files."""
# Create new test structure for root anchored patterns
test_dir = self.repo_path / "test_root_anchored"
test_dir.mkdir()
(test_dir / "src").mkdir()
(test_dir / "docs").mkdir()
(test_dir / "src" / "nested").mkdir()
# Create root .gitignore with anchored patterns
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""/config.json
/temp.log
/build
*.pyc
"""
)
# Create test files at root level
(test_dir / "config.json").touch()
(test_dir / "temp.log").touch()
(test_dir / "build").mkdir()
(test_dir / "file.pyc").touch()
# Create same-named files in subdirectories
(test_dir / "src" / "config.json").touch()
(test_dir / "src" / "temp.log").touch()
(test_dir / "src" / "build").mkdir()
(test_dir / "src" / "file.pyc").touch()
(test_dir / "docs" / "config.json").touch()
(test_dir / "docs" / "temp.log").touch()
(test_dir / "src" / "nested" / "config.json").touch()
(test_dir / "src" / "nested" / "temp.log").touch()
(test_dir / "src" / "nested" / "build").mkdir()
parser = GitignoreParser(str(test_dir))
# Anchored patterns should only match root-level files
assert parser.should_ignore("config.json")
assert not parser.should_ignore("src/config.json")
assert not parser.should_ignore("docs/config.json")
assert not parser.should_ignore("src/nested/config.json")
assert parser.should_ignore("temp.log")
assert not parser.should_ignore("src/temp.log")
assert not parser.should_ignore("docs/temp.log")
assert not parser.should_ignore("src/nested/temp.log")
assert parser.should_ignore("build")
assert not parser.should_ignore("src/build")
assert not parser.should_ignore("src/nested/build")
# Non-anchored patterns should match everywhere
assert parser.should_ignore("file.pyc")
assert parser.should_ignore("src/file.pyc")
def test_mixed_anchored_and_non_anchored_root_patterns(self):
"""Test mix of anchored and non-anchored patterns in root .gitignore."""
test_dir = self.repo_path / "test_mixed_patterns"
test_dir.mkdir()
(test_dir / "app").mkdir()
(test_dir / "tests").mkdir()
(test_dir / "app" / "modules").mkdir()
# Create root .gitignore with mixed patterns
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""/secrets.env
/dist/
node_modules/
*.tmp
/app/local.config
debug.log
"""
)
# Create test files and directories
(test_dir / "secrets.env").touch()
(test_dir / "dist").mkdir()
(test_dir / "node_modules").mkdir()
(test_dir / "file.tmp").touch()
(test_dir / "app" / "local.config").touch()
(test_dir / "debug.log").touch()
# Create same files in subdirectories
(test_dir / "app" / "secrets.env").touch()
(test_dir / "app" / "dist").mkdir()
(test_dir / "app" / "node_modules").mkdir()
(test_dir / "app" / "file.tmp").touch()
(test_dir / "app" / "debug.log").touch()
(test_dir / "tests" / "secrets.env").touch()
(test_dir / "tests" / "node_modules").mkdir()
(test_dir / "tests" / "debug.log").touch()
(test_dir / "app" / "modules" / "local.config").touch()
parser = GitignoreParser(str(test_dir))
# Anchored patterns should only match at root
assert parser.should_ignore("secrets.env")
assert not parser.should_ignore("app/secrets.env")
assert not parser.should_ignore("tests/secrets.env")
assert parser.should_ignore("dist")
assert not parser.should_ignore("app/dist")
assert parser.should_ignore("app/local.config")
assert not parser.should_ignore("app/modules/local.config")
# Non-anchored patterns should match everywhere
assert parser.should_ignore("node_modules")
assert parser.should_ignore("app/node_modules")
assert parser.should_ignore("tests/node_modules")
assert parser.should_ignore("file.tmp")
assert parser.should_ignore("app/file.tmp")
assert parser.should_ignore("debug.log")
assert parser.should_ignore("app/debug.log")
assert parser.should_ignore("tests/debug.log")
def test_negation_patterns(self):
"""Test negation patterns are parsed correctly."""
test_dir = self.repo_path / "test_negation"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""*.log
!important.log
!src/keep.log
"""
)
parser = GitignoreParser(str(test_dir))
specs = parser.get_ignore_specs()
assert len(specs) == 1
patterns = specs[0].patterns
assert "*.log" in patterns
assert "!important.log" in patterns
assert "!src/keep.log" in patterns
def test_comments_and_empty_lines(self):
"""Test that comments and empty lines are ignored."""
test_dir = self.repo_path / "test_comments"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""# This is a comment
*.log
# Another comment
# Indented comment
build/
"""
)
parser = GitignoreParser(str(test_dir))
specs = parser.get_ignore_specs()
assert len(specs) == 1
patterns = specs[0].patterns
assert len(patterns) == 2
assert "*.log" in patterns
assert "build/" in patterns
def test_escaped_characters(self):
"""Test escaped special characters."""
test_dir = self.repo_path / "test_escaped"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""\\#not-a-comment.txt
\\!not-negation.txt
"""
)
parser = GitignoreParser(str(test_dir))
specs = parser.get_ignore_specs()
assert len(specs) == 1
patterns = specs[0].patterns
assert "#not-a-comment.txt" in patterns
assert "!not-negation.txt" in patterns
def test_escaped_negation_patterns(self):
test_dir = self.repo_path / "test_escaped_negation"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""*.log
\\!not-negation.log
!actual-negation.log
"""
)
parser = GitignoreParser(str(test_dir))
specs = parser.get_ignore_specs()
assert len(specs) == 1
patterns = specs[0].patterns
# Key assertions: escaped exclamation becomes literal, real negation preserved
assert "!not-negation.log" in patterns # escaped -> literal
assert "!actual-negation.log" in patterns # real negation preserved
# Test the actual behavioral difference between escaped and real negation:
# *.log pattern should ignore test.log
assert parser.should_ignore("test.log")
# Escaped negation file should still be ignored by *.log pattern
assert parser.should_ignore("!not-negation.log")
# Actual negation should override the *.log pattern
assert not parser.should_ignore("actual-negation.log")
def test_glob_patterns(self):
"""Test various glob patterns work correctly."""
test_dir = self.repo_path / "test_glob"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""*.pyc
**/*.tmp
src/*.o
!src/important.o
[Tt]est*
"""
)
# Create test files
(test_dir / "src").mkdir()
(test_dir / "src" / "nested").mkdir()
(test_dir / "file.pyc").touch()
(test_dir / "src" / "file.pyc").touch()
(test_dir / "file.tmp").touch()
(test_dir / "src" / "nested" / "file.tmp").touch()
(test_dir / "src" / "file.o").touch()
(test_dir / "src" / "important.o").touch()
(test_dir / "Test.txt").touch()
(test_dir / "test.log").touch()
parser = GitignoreParser(str(test_dir))
# *.pyc should match everywhere
assert parser.should_ignore("file.pyc")
assert parser.should_ignore("src/file.pyc")
# **/*.tmp should match all .tmp files
assert parser.should_ignore("file.tmp")
assert parser.should_ignore("src/nested/file.tmp")
# src/*.o should only match .o files directly in src/
assert parser.should_ignore("src/file.o")
# Character class patterns
assert parser.should_ignore("Test.txt")
assert parser.should_ignore("test.log")
def test_empty_gitignore(self):
"""Test handling of empty gitignore files."""
test_dir = self.repo_path / "test_empty"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text("")
parser = GitignoreParser(str(test_dir))
# Should not crash and should return empty list
assert len(parser.get_ignore_specs()) == 0
def test_malformed_gitignore(self):
"""Test handling of malformed gitignore content."""
test_dir = self.repo_path / "test_malformed"
test_dir.mkdir()
gitignore = test_dir / ".gitignore"
gitignore.write_text(
"""# Only comments and empty lines
# More comments
"""
)
parser = GitignoreParser(str(test_dir))
# Should handle gracefully
assert len(parser.get_ignore_specs()) == 0
def test_reload(self):
"""Test reloading gitignore files."""
test_dir = self.repo_path / "test_reload"
test_dir.mkdir()
# Create initial gitignore
gitignore = test_dir / ".gitignore"
gitignore.write_text("*.log")
parser = GitignoreParser(str(test_dir))
assert len(parser.get_ignore_specs()) == 1
assert parser.should_ignore("test.log")
# Modify gitignore
gitignore.write_text("*.tmp")
# Without reload, should still use old patterns
assert parser.should_ignore("test.log")
assert not parser.should_ignore("test.tmp")
# After reload, should use new patterns
parser.reload()
assert not parser.should_ignore("test.log")
assert parser.should_ignore("test.tmp")
def test_gitignore_spec_matches(self):
"""Test GitignoreSpec.matches method."""
spec = GitignoreSpec("/path/to/.gitignore", ["*.log", "build/", "!important.log"])
assert spec.matches("test.log")
assert spec.matches("build/output.o")
assert spec.matches("src/test.log")
# Note: Negation patterns in pathspec work differently than in git
# This is a limitation of the pathspec library
def test_subdirectory_gitignore_pattern_scoping(self):
"""Test that subdirectory .gitignore patterns are scoped correctly."""
# Create test structure: foo/ with subdirectory bar/
test_dir = self.repo_path / "test_subdir_scoping"
test_dir.mkdir()
(test_dir / "foo").mkdir()
(test_dir / "foo" / "bar").mkdir()
# Create files in various locations
(test_dir / "foo.txt").touch() # root level
(test_dir / "foo" / "foo.txt").touch() # in foo/
(test_dir / "foo" / "bar" / "foo.txt").touch() # in foo/bar/
# Test case 1: foo.txt in foo/.gitignore should only ignore in foo/ subtree
gitignore = test_dir / "foo" / ".gitignore"
gitignore.write_text("foo.txt\n")
parser = GitignoreParser(str(test_dir))
# foo.txt at root should NOT be ignored by foo/.gitignore
assert not parser.should_ignore("foo.txt"), "Root foo.txt should not be ignored by foo/.gitignore"
# foo.txt in foo/ should be ignored
assert parser.should_ignore("foo/foo.txt"), "foo/foo.txt should be ignored"
# foo.txt in foo/bar/ should be ignored (within foo/ subtree)
assert parser.should_ignore("foo/bar/foo.txt"), "foo/bar/foo.txt should be ignored"
def test_anchored_pattern_in_subdirectory(self):
"""Test that anchored patterns in subdirectory only match immediate children."""
test_dir = self.repo_path / "test_anchored_subdir"
test_dir.mkdir()
(test_dir / "foo").mkdir()
(test_dir / "foo" / "bar").mkdir()
# Create files
(test_dir / "foo.txt").touch() # root level
(test_dir / "foo" / "foo.txt").touch() # in foo/
(test_dir / "foo" / "bar" / "foo.txt").touch() # in foo/bar/
# Test case 2: /foo.txt in foo/.gitignore should only match foo/foo.txt
gitignore = test_dir / "foo" / ".gitignore"
gitignore.write_text("/foo.txt\n")
parser = GitignoreParser(str(test_dir))
# foo.txt at root should NOT be ignored
assert not parser.should_ignore("foo.txt"), "Root foo.txt should not be ignored"
# foo.txt directly in foo/ should be ignored
assert parser.should_ignore("foo/foo.txt"), "foo/foo.txt should be ignored by /foo.txt pattern"
# foo.txt in foo/bar/ should NOT be ignored (anchored pattern only matches immediate children)
assert not parser.should_ignore("foo/bar/foo.txt"), "foo/bar/foo.txt should NOT be ignored by /foo.txt pattern"
def test_double_star_pattern_scoping(self):
"""Test that **/pattern in subdirectory only applies within that subtree."""
test_dir = self.repo_path / "test_doublestar_scope"
test_dir.mkdir()
(test_dir / "foo").mkdir()
(test_dir / "foo" / "bar").mkdir()
(test_dir / "other").mkdir()
# Create files
(test_dir / "foo.txt").touch() # root level
(test_dir / "foo" / "foo.txt").touch() # in foo/
(test_dir / "foo" / "bar" / "foo.txt").touch() # in foo/bar/
(test_dir / "other" / "foo.txt").touch() # in other/
# Test case 3: **/foo.txt in foo/.gitignore should only ignore within foo/ subtree
gitignore = test_dir / "foo" / ".gitignore"
gitignore.write_text("**/foo.txt\n")
parser = GitignoreParser(str(test_dir))
# foo.txt at root should NOT be ignored
assert not parser.should_ignore("foo.txt"), "Root foo.txt should not be ignored by foo/.gitignore"
# foo.txt in foo/ should be ignored
assert parser.should_ignore("foo/foo.txt"), "foo/foo.txt should be ignored"
# foo.txt in foo/bar/ should be ignored (within foo/ subtree)
assert parser.should_ignore("foo/bar/foo.txt"), "foo/bar/foo.txt should be ignored"
# foo.txt in other/ should NOT be ignored (outside foo/ subtree)
assert not parser.should_ignore("other/foo.txt"), "other/foo.txt should NOT be ignored by foo/.gitignore"
def test_anchored_double_star_pattern(self):
"""Test that /**/pattern in subdirectory works correctly."""
test_dir = self.repo_path / "test_anchored_doublestar"
test_dir.mkdir()
(test_dir / "foo").mkdir()
(test_dir / "foo" / "bar").mkdir()
(test_dir / "other").mkdir()
# Create files
(test_dir / "foo.txt").touch() # root level
(test_dir / "foo" / "foo.txt").touch() # in foo/
(test_dir / "foo" / "bar" / "foo.txt").touch() # in foo/bar/
(test_dir / "other" / "foo.txt").touch() # in other/
# Test case 4: /**/foo.txt in foo/.gitignore should correctly ignore only within foo/ subtree
gitignore = test_dir / "foo" / ".gitignore"
gitignore.write_text("/**/foo.txt\n")
parser = GitignoreParser(str(test_dir))
# foo.txt at root should NOT be ignored
assert not parser.should_ignore("foo.txt"), "Root foo.txt should not be ignored"
# foo.txt in foo/ should be ignored
assert parser.should_ignore("foo/foo.txt"), "foo/foo.txt should be ignored"
# foo.txt in foo/bar/ should be ignored (within foo/ subtree)
assert parser.should_ignore("foo/bar/foo.txt"), "foo/bar/foo.txt should be ignored"
# foo.txt in other/ should NOT be ignored (outside foo/ subtree)
assert not parser.should_ignore("other/foo.txt"), "other/foo.txt should NOT be ignored by foo/.gitignore"
| {
"repo_id": "oraios/serena",
"file_path": "test/serena/util/test_file_system.py",
"license": "MIT License",
"lines": 560,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
oraios/serena:src/serena/dashboard.py | import os
import socket
import threading
from collections.abc import Callable
from pathlib import Path
from typing import TYPE_CHECKING, Any, Self
from flask import Flask, Response, request, send_from_directory
from pydantic import BaseModel
from sensai.util import logging
from serena.analytics import ToolUsageStats
from serena.config.serena_config import SerenaConfig, SerenaPaths
from serena.constants import SERENA_DASHBOARD_DIR
from serena.task_executor import TaskExecutor
from serena.util.logging import MemoryLogHandler
if TYPE_CHECKING:
from serena.agent import SerenaAgent
log = logging.getLogger(__name__)
# disable Werkzeug's logging to avoid cluttering the output
logging.getLogger("werkzeug").setLevel(logging.WARNING)
class RequestLog(BaseModel):
start_idx: int = 0
class ResponseLog(BaseModel):
messages: list[str]
max_idx: int
active_project: str | None = None
class ResponseToolNames(BaseModel):
tool_names: list[str]
class ResponseToolStats(BaseModel):
stats: dict[str, dict[str, int]]
class ResponseConfigOverview(BaseModel):
active_project: dict[str, str | None]
context: dict[str, str]
modes: list[dict[str, str]]
active_tools: list[str]
tool_stats_summary: dict[str, dict[str, int]]
registered_projects: list[dict[str, str | bool]]
available_tools: list[dict[str, str | bool]]
available_modes: list[dict[str, str | bool]]
available_contexts: list[dict[str, str | bool]]
available_memories: list[str] | None
jetbrains_mode: bool
languages: list[str]
encoding: str | None
current_client: str | None
class ResponseAvailableLanguages(BaseModel):
languages: list[str]
class RequestAddLanguage(BaseModel):
language: str
class RequestRemoveLanguage(BaseModel):
language: str
class RequestGetMemory(BaseModel):
memory_name: str
class ResponseGetMemory(BaseModel):
content: str
memory_name: str
class RequestSaveMemory(BaseModel):
memory_name: str
content: str
class RequestDeleteMemory(BaseModel):
memory_name: str
class RequestRenameMemory(BaseModel):
old_name: str
new_name: str
class ResponseGetSerenaConfig(BaseModel):
content: str
class RequestSaveSerenaConfig(BaseModel):
content: str
class RequestCancelTaskExecution(BaseModel):
task_id: int
class QueuedExecution(BaseModel):
task_id: int
is_running: bool
name: str
finished_successfully: bool
logged: bool
@classmethod
def from_task_info(cls, task_info: TaskExecutor.TaskInfo) -> Self:
return cls(
task_id=task_info.task_id,
is_running=task_info.is_running,
name=task_info.name,
finished_successfully=task_info.finished_successfully(),
logged=task_info.logged,
)
class SerenaDashboardAPI:
log = logging.getLogger(__qualname__)
def __init__(
self,
memory_log_handler: MemoryLogHandler,
tool_names: list[str],
agent: "SerenaAgent",
shutdown_callback: Callable[[], None] | None = None,
tool_usage_stats: ToolUsageStats | None = None,
) -> None:
self._memory_log_handler = memory_log_handler
self._tool_names = tool_names
self._agent = agent
self._shutdown_callback = shutdown_callback
self._app = Flask(__name__)
self._tool_usage_stats = tool_usage_stats
self._setup_routes()
@property
def memory_log_handler(self) -> MemoryLogHandler:
return self._memory_log_handler
def _setup_routes(self) -> None:
# Static files
@self._app.route("/dashboard/<path:filename>")
def serve_dashboard(filename: str) -> Response:
return send_from_directory(SERENA_DASHBOARD_DIR, filename)
@self._app.route("/dashboard/")
def serve_dashboard_index() -> Response:
return send_from_directory(SERENA_DASHBOARD_DIR, "index.html")
# API routes
@self._app.route("/heartbeat", methods=["GET"])
def get_heartbeat() -> dict[str, Any]:
return {"status": "alive"}
@self._app.route("/get_log_messages", methods=["POST"])
def get_log_messages() -> dict[str, Any]:
request_data = request.get_json()
if not request_data:
request_log = RequestLog()
else:
request_log = RequestLog.model_validate(request_data)
result = self._get_log_messages(request_log)
return result.model_dump()
@self._app.route("/get_tool_names", methods=["GET"])
def get_tool_names() -> dict[str, Any]:
result = self._get_tool_names()
return result.model_dump()
@self._app.route("/get_tool_stats", methods=["GET"])
def get_tool_stats_route() -> dict[str, Any]:
result = self._get_tool_stats()
return result.model_dump()
@self._app.route("/clear_tool_stats", methods=["POST"])
def clear_tool_stats_route() -> dict[str, str]:
self._clear_tool_stats()
return {"status": "cleared"}
@self._app.route("/clear_logs", methods=["POST"])
def clear_logs() -> dict[str, str]:
self._memory_log_handler.clear_log_messages()
return {"status": "cleared"}
@self._app.route("/get_token_count_estimator_name", methods=["GET"])
def get_token_count_estimator_name() -> dict[str, str]:
estimator_name = self._tool_usage_stats.token_estimator_name if self._tool_usage_stats else "unknown"
return {"token_count_estimator_name": estimator_name}
@self._app.route("/get_config_overview", methods=["GET"])
def get_config_overview() -> dict[str, Any]:
result = self._agent.execute_task(self._get_config_overview, logged=False)
return result.model_dump()
@self._app.route("/shutdown", methods=["PUT"])
def shutdown() -> dict[str, str]:
self._shutdown()
return {"status": "shutting down"}
@self._app.route("/get_available_languages", methods=["GET"])
def get_available_languages() -> dict[str, Any]:
result = self._get_available_languages()
return result.model_dump()
@self._app.route("/add_language", methods=["POST"])
def add_language() -> dict[str, str]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_add_language = RequestAddLanguage.model_validate(request_data)
try:
self._add_language(request_add_language)
return {"status": "success", "message": f"Language {request_add_language.language} added successfully"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/remove_language", methods=["POST"])
def remove_language() -> dict[str, str]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_remove_language = RequestRemoveLanguage.model_validate(request_data)
try:
self._remove_language(request_remove_language)
return {"status": "success", "message": f"Language {request_remove_language.language} removed successfully"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/get_memory", methods=["POST"])
def get_memory() -> dict[str, Any]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_get_memory = RequestGetMemory.model_validate(request_data)
try:
result = self._get_memory(request_get_memory)
return result.model_dump()
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/save_memory", methods=["POST"])
def save_memory() -> dict[str, str]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_save_memory = RequestSaveMemory.model_validate(request_data)
try:
self._save_memory(request_save_memory)
return {"status": "success", "message": f"Memory {request_save_memory.memory_name} saved successfully"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/delete_memory", methods=["POST"])
def delete_memory() -> dict[str, str]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_delete_memory = RequestDeleteMemory.model_validate(request_data)
try:
self._delete_memory(request_delete_memory)
return {"status": "success", "message": f"Memory {request_delete_memory.memory_name} deleted successfully"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/rename_memory", methods=["POST"])
def rename_memory() -> dict[str, str]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_rename_memory = RequestRenameMemory.model_validate(request_data)
try:
result_message = self._rename_memory(request_rename_memory)
return {"status": "success", "message": result_message}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/get_serena_config", methods=["GET"])
def get_serena_config() -> dict[str, Any]:
try:
result = self._get_serena_config()
return result.model_dump()
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/save_serena_config", methods=["POST"])
def save_serena_config() -> dict[str, str]:
request_data = request.get_json()
if not request_data:
return {"status": "error", "message": "No data provided"}
request_save_config = RequestSaveSerenaConfig.model_validate(request_data)
try:
self._save_serena_config(request_save_config)
return {"status": "success", "message": "Serena config saved successfully"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/queued_task_executions", methods=["GET"])
def get_queued_executions() -> dict[str, Any]:
try:
current_executions = self._agent.get_current_tasks()
response = [QueuedExecution.from_task_info(task_info).model_dump() for task_info in current_executions]
return {"queued_executions": response, "status": "success"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/cancel_task_execution", methods=["POST"])
def cancel_task_execution() -> dict[str, Any]:
request_data = request.get_json()
try:
request_cancel_task = RequestCancelTaskExecution.model_validate(request_data)
for task in self._agent.get_current_tasks():
if task.task_id == request_cancel_task.task_id:
task.cancel()
return {"status": "success", "was_cancelled": True}
return {
"status": "success",
"was_cancelled": False,
"message": f"Task with id {request_data.get('task_id')} not found, maybe execution was already finished",
}
except Exception as e:
return {"status": "error", "message": str(e), "was_cancelled": False}
@self._app.route("/last_execution", methods=["GET"])
def get_last_execution() -> dict[str, Any]:
try:
last_execution_info = self._agent.get_last_executed_task()
response = QueuedExecution.from_task_info(last_execution_info).model_dump() if last_execution_info is not None else None
return {"last_execution": response, "status": "success"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/news_snippet_ids", methods=["GET"])
def get_news_snippet_ids() -> dict[str, str | list[int]]:
def _get_unread_news_ids() -> list[int]:
all_news_files = (Path(SERENA_DASHBOARD_DIR) / "news").glob("*.html")
all_news_ids = [int(f.stem) for f in all_news_files]
"""News ids are ints of format YYYYMMDD (publication dates)"""
# Filter news items by installation date
serena_config_creation_date = SerenaConfig.get_config_file_creation_date()
if serena_config_creation_date is None:
# should not normally happen, since config file should exist when the dashboard is started
# We assume a fresh installation in this case
log.error("Serena config file not found when starting the dashboard")
return []
serena_config_creation_date_int = int(serena_config_creation_date.strftime("%Y%m%d"))
# Only include news items published on or after the installation date
post_installation_news_ids = [news_id for news_id in all_news_ids if news_id >= serena_config_creation_date_int]
news_snippet_id_file = SerenaPaths().news_snippet_id_file
if not os.path.exists(news_snippet_id_file):
return post_installation_news_ids
with open(news_snippet_id_file, encoding="utf-8") as f:
last_read_news_id = int(f.read().strip())
return [news_id for news_id in post_installation_news_ids if news_id > last_read_news_id]
try:
unread_news_ids = _get_unread_news_ids()
return {"news_snippet_ids": unread_news_ids, "status": "success"}
except Exception as e:
return {"status": "error", "message": str(e)}
@self._app.route("/mark_news_snippet_as_read", methods=["POST"])
def mark_news_snippet_as_read() -> dict[str, str]:
try:
request_data = request.get_json()
news_snippet_id = int(request_data.get("news_snippet_id"))
news_snippet_id_file = SerenaPaths().news_snippet_id_file
with open(news_snippet_id_file, "w", encoding="utf-8") as f:
f.write(str(news_snippet_id))
return {"status": "success", "message": f"Marked news snippet {news_snippet_id} as read"}
except Exception as e:
return {"status": "error", "message": str(e)}
def _get_log_messages(self, request_log: RequestLog) -> ResponseLog:
messages = self._memory_log_handler.get_log_messages(from_idx=request_log.start_idx)
project = self._agent.get_active_project()
project_name = project.project_name if project else None
return ResponseLog(messages=messages.messages, max_idx=messages.max_idx, active_project=project_name)
def _get_tool_names(self) -> ResponseToolNames:
return ResponseToolNames(tool_names=self._tool_names)
def _get_tool_stats(self) -> ResponseToolStats:
if self._tool_usage_stats is not None:
return ResponseToolStats(stats=self._tool_usage_stats.get_tool_stats_dict())
else:
return ResponseToolStats(stats={})
def _clear_tool_stats(self) -> None:
if self._tool_usage_stats is not None:
self._tool_usage_stats.clear()
def _get_config_overview(self) -> ResponseConfigOverview:
from serena.config.context_mode import SerenaAgentContext, SerenaAgentMode
from serena.tools.tools_base import Tool
# Get active project info
project = self._agent.get_active_project()
active_project_name = project.project_name if project else None
project_info = {
"name": active_project_name,
"language": ", ".join([l.value for l in project.project_config.languages]) if project else None,
"path": str(project.project_root) if project else None,
}
# Get context info
context = self._agent.get_context()
context_info = {
"name": context.name,
"description": context.description,
"path": SerenaAgentContext.get_path(context.name, instance=context),
}
# Get active modes
modes = self._agent.get_active_modes()
modes_info = [
{"name": mode.name, "description": mode.description, "path": SerenaAgentMode.get_path(mode.name, instance=mode)}
for mode in modes
]
active_mode_names = [mode.name for mode in modes]
# Get active tools
active_tools = self._agent.get_active_tool_names()
# Get registered projects
registered_projects: list[dict[str, str | bool]] = []
for proj in self._agent.serena_config.projects:
registered_projects.append(
{
"name": proj.project_name,
"path": str(proj.project_root),
"is_active": proj.project_name == active_project_name,
}
)
# Get all available tools (excluding active ones)
all_tool_names = sorted([tool.get_name_from_cls() for tool in self._agent._all_tools.values()])
available_tools: list[dict[str, str | bool]] = []
for tool_name in all_tool_names:
if tool_name not in active_tools:
available_tools.append(
{
"name": tool_name,
"is_active": False,
}
)
# Get all available modes
all_mode_names = SerenaAgentMode.list_registered_mode_names()
available_modes: list[dict[str, str | bool]] = []
for mode_name in all_mode_names:
try:
mode_path = SerenaAgentMode.get_path(mode_name)
except FileNotFoundError:
# Skip modes that can't be found (shouldn't happen for registered modes)
continue
available_modes.append(
{
"name": mode_name,
"is_active": mode_name in active_mode_names,
"path": mode_path,
}
)
# Get all available contexts
all_context_names = SerenaAgentContext.list_registered_context_names()
available_contexts: list[dict[str, str | bool]] = []
for context_name in all_context_names:
try:
context_path = SerenaAgentContext.get_path(context_name)
except FileNotFoundError:
# Skip contexts that can't be found (shouldn't happen for registered contexts)
continue
available_contexts.append(
{
"name": context_name,
"is_active": context_name == context.name,
"path": context_path,
}
)
# Get basic tool stats (just num_calls for overview)
tool_stats_summary = {}
if self._tool_usage_stats is not None:
full_stats = self._tool_usage_stats.get_tool_stats_dict()
tool_stats_summary = {name: {"num_calls": stats["num_times_called"]} for name, stats in full_stats.items()}
# Get available memories if ReadMemoryTool is active
available_memories = None
if self._agent.tool_is_active("read_memory") and project is not None:
available_memories = project.memories_manager.list_memories().get_full_list()
# Get list of languages for the active project
languages = []
if project is not None:
languages = [lang.value for lang in project.project_config.languages]
# Get file encoding for the active project
encoding = None
if project is not None:
encoding = project.project_config.encoding
return ResponseConfigOverview(
active_project=project_info,
context=context_info,
modes=modes_info,
active_tools=active_tools,
tool_stats_summary=tool_stats_summary,
registered_projects=registered_projects,
available_tools=available_tools,
available_modes=available_modes,
available_contexts=available_contexts,
available_memories=available_memories,
jetbrains_mode=self._agent.get_language_backend().is_jetbrains(),
languages=languages,
encoding=encoding,
current_client=Tool.get_last_tool_call_client_str(),
)
def _shutdown(self) -> None:
log.info("Shutting down Serena")
if self._shutdown_callback:
self._shutdown_callback()
else:
# noinspection PyProtectedMember
# noinspection PyUnresolvedReferences
os._exit(0)
def _get_available_languages(self) -> ResponseAvailableLanguages:
from solidlsp.ls_config import Language
def run() -> ResponseAvailableLanguages:
all_languages = [lang.value for lang in Language.iter_all(include_experimental=False)]
# Filter out already added languages for the active project
project = self._agent.get_active_project()
if project:
current_languages = [lang.value for lang in project.project_config.languages]
available_languages = [lang for lang in all_languages if lang not in current_languages]
else:
available_languages = all_languages
return ResponseAvailableLanguages(languages=sorted(available_languages))
return self._agent.execute_task(run, logged=False)
def _get_memory(self, request_get_memory: RequestGetMemory) -> ResponseGetMemory:
def run() -> ResponseGetMemory:
project = self._agent.get_active_project()
if project is None:
raise ValueError("No active project")
content = project.memories_manager.load_memory(request_get_memory.memory_name)
return ResponseGetMemory(content=content, memory_name=request_get_memory.memory_name)
return self._agent.execute_task(run, logged=False)
def _save_memory(self, request_save_memory: RequestSaveMemory) -> None:
def run() -> None:
project = self._agent.get_active_project()
if project is None:
raise ValueError("No active project")
project.memories_manager.save_memory(request_save_memory.memory_name, request_save_memory.content, is_tool_context=False)
self._agent.execute_task(run, logged=True, name="SaveMemory")
def _delete_memory(self, request_delete_memory: RequestDeleteMemory) -> None:
def run() -> None:
project = self._agent.get_active_project()
if project is None:
raise ValueError("No active project")
project.memories_manager.delete_memory(request_delete_memory.memory_name, is_tool_context=False)
self._agent.execute_task(run, logged=True, name="DeleteMemory")
def _rename_memory(self, request_rename_memory: RequestRenameMemory) -> str:
def run() -> str:
project = self._agent.get_active_project()
if project is None:
raise ValueError("No active project")
return project.memories_manager.move_memory(
request_rename_memory.old_name, request_rename_memory.new_name, is_tool_context=False
)
return self._agent.execute_task(run, logged=True, name="RenameMemory")
def _get_serena_config(self) -> ResponseGetSerenaConfig:
config_path = self._agent.serena_config.config_file_path
if config_path is None or not os.path.exists(config_path):
raise ValueError("Serena config file not found")
with open(config_path, encoding="utf-8") as f:
content = f.read()
return ResponseGetSerenaConfig(content=content)
def _save_serena_config(self, request_save_config: RequestSaveSerenaConfig) -> None:
def run() -> None:
config_path = self._agent.serena_config.config_file_path
if config_path is None:
raise ValueError("Serena config file path not set")
with open(config_path, "w", encoding="utf-8") as f:
f.write(request_save_config.content)
self._agent.execute_task(run, logged=True, name="SaveSerenaConfig")
def _add_language(self, request_add_language: RequestAddLanguage) -> None:
from solidlsp.ls_config import Language
try:
language = Language(request_add_language.language)
except ValueError:
raise ValueError(f"Invalid language: {request_add_language.language}")
# add_language is already thread-safe
self._agent.add_language(language)
def _remove_language(self, request_remove_language: RequestRemoveLanguage) -> None:
from solidlsp.ls_config import Language
try:
language = Language(request_remove_language.language)
except ValueError:
raise ValueError(f"Invalid language: {request_remove_language.language}")
# remove_language is already thread-safe
self._agent.remove_language(language)
@staticmethod
def _find_first_free_port(start_port: int, host: str) -> int:
port = start_port
while port <= 65535:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind((host, port))
return port
except OSError:
port += 1
raise RuntimeError(f"No free ports found starting from {start_port}")
def run(self, host: str, port: int) -> int:
"""
Runs the dashboard on the given host and port and returns the port number.
"""
# patch flask.cli.show_server to avoid printing the server info
from flask import cli
cli.show_server_banner = lambda *args, **kwargs: None
self._app.run(host=host, port=port, debug=False, use_reloader=False, threaded=True)
return port
def run_in_thread(self, host: str) -> tuple[threading.Thread, int]:
port = self._find_first_free_port(0x5EDA, host)
log.info("Starting dashboard (listen_address=%s, port=%d)", host, port)
thread = threading.Thread(target=lambda: self.run(host=host, port=port), daemon=True)
thread.start()
return thread, port
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/dashboard.py",
"license": "MIT License",
"lines": 548,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
oraios/serena:scripts/print_mode_context_options.py | from serena.config.context_mode import SerenaAgentContext, SerenaAgentMode
if __name__ == "__main__":
print("---------- Available modes: ----------")
for mode_name in SerenaAgentMode.list_registered_mode_names():
mode = SerenaAgentMode.load(mode_name)
mode.print_overview()
print("\n")
print("---------- Available contexts: ----------")
for context_name in SerenaAgentContext.list_registered_context_names():
context = SerenaAgentContext.load(context_name)
context.print_overview()
print("\n")
| {
"repo_id": "oraios/serena",
"file_path": "scripts/print_mode_context_options.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/interprompt/jinja_template.py | from typing import Any
import jinja2
import jinja2.meta
import jinja2.nodes
import jinja2.visitor
from interprompt.util.class_decorators import singleton
class ParameterizedTemplateInterface:
def get_parameters(self) -> list[str]: ...
@singleton
class _JinjaEnvProvider:
def __init__(self) -> None:
self._env: jinja2.Environment | None = None
def get_env(self) -> jinja2.Environment:
if self._env is None:
self._env = jinja2.Environment()
return self._env
class JinjaTemplate(ParameterizedTemplateInterface):
def __init__(self, template_string: str) -> None:
self._template_string = template_string
self._template = _JinjaEnvProvider().get_env().from_string(self._template_string)
parsed_content = self._template.environment.parse(self._template_string)
self._parameters = sorted(jinja2.meta.find_undeclared_variables(parsed_content))
def render(self, **params: Any) -> str:
"""Renders the template with the given kwargs. You can find out which parameters are required by calling get_parameter_names()."""
return self._template.render(**params)
def get_parameters(self) -> list[str]:
"""A sorted list of parameter names that are extracted from the template string. It is impossible to know the types of the parameter
values, they can be primitives, dicts or dict-like objects.
:return: the list of parameter names
"""
return self._parameters
| {
"repo_id": "oraios/serena",
"file_path": "src/interprompt/jinja_template.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/interprompt/util/class_decorators.py | from typing import Any
def singleton(cls: type[Any]) -> Any:
instance = None
def get_instance(*args: Any, **kwargs: Any) -> Any:
nonlocal instance
if instance is None:
instance = cls(*args, **kwargs)
return instance
return get_instance
| {
"repo_id": "oraios/serena",
"file_path": "src/interprompt/util/class_decorators.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/serena/constants.py | from pathlib import Path
_repo_root_path = Path(__file__).parent.parent.parent.resolve()
_serena_pkg_path = Path(__file__).parent.resolve()
SERENA_MANAGED_DIR_NAME = ".serena"
# TODO: Path-related constants should be moved to SerenaPaths; don't add further constants here.
REPO_ROOT = str(_repo_root_path)
PROMPT_TEMPLATES_DIR_INTERNAL = str(_serena_pkg_path / "resources" / "config" / "prompt_templates")
SERENAS_OWN_CONTEXT_YAMLS_DIR = str(_serena_pkg_path / "resources" / "config" / "contexts")
"""The contexts that are shipped with the Serena package, i.e. the default contexts."""
SERENAS_OWN_MODE_YAMLS_DIR = str(_serena_pkg_path / "resources" / "config" / "modes")
"""The modes that are shipped with the Serena package, i.e. the default modes."""
INTERNAL_MODE_YAMLS_DIR = str(_serena_pkg_path / "resources" / "config" / "internal_modes")
"""Internal modes, never overridden by user modes."""
SERENA_DASHBOARD_DIR = str(_serena_pkg_path / "resources" / "dashboard")
SERENA_ICON_DIR = str(_serena_pkg_path / "resources" / "icons")
DEFAULT_SOURCE_FILE_ENCODING = "utf-8"
"""The default encoding assumed for project source files."""
DEFAULT_CONTEXT = "desktop-app"
SERENA_FILE_ENCODING = "utf-8"
"""The encoding used for Serena's own files, such as configuration files and memories."""
PROJECT_TEMPLATE_FILE = str(_serena_pkg_path / "resources" / "project.template.yml")
PROJECT_LOCAL_TEMPLATE_FILE = str(_serena_pkg_path / "resources" / "project.local.template.yml")
SERENA_CONFIG_TEMPLATE_FILE = str(_serena_pkg_path / "resources" / "serena_config.template.yml")
SERENA_LOG_FORMAT = "%(levelname)-5s %(asctime)-15s [%(threadName)s] %(name)s:%(funcName)s:%(lineno)d - %(message)s"
LOG_MESSAGES_BUFFER_SIZE = 2500
"""The maximum number of log messages to keep in the buffer (for the dashboard)."""
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/constants.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:src/serena/prompt_factory.py | import os
from serena.config.serena_config import SerenaPaths
from serena.constants import PROMPT_TEMPLATES_DIR_INTERNAL
from serena.generated.generated_prompt_factory import PromptFactory
class SerenaPromptFactory(PromptFactory):
"""
A class for retrieving and rendering prompt templates and prompt lists.
"""
def __init__(self) -> None:
user_templates_dir = SerenaPaths().user_prompt_templates_dir
os.makedirs(user_templates_dir, exist_ok=True)
super().__init__(prompts_dir=[user_templates_dir, PROMPT_TEMPLATES_DIR_INTERNAL])
| {
"repo_id": "oraios/serena",
"file_path": "src/serena/prompt_factory.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
oraios/serena:test/serena/test_symbol.py | from unittest.mock import MagicMock
import pytest
from serena.jetbrains.jetbrains_types import SymbolDTO, SymbolDTOKey
from serena.symbol import LanguageServerSymbol, LanguageServerSymbolRetriever, NamePathComponent, NamePathMatcher
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
class TestSymbolNameMatching:
def _create_assertion_error_message(
self,
name_path_pattern: str,
name_path_components: list[NamePathComponent],
is_substring_match: bool,
expected_result: bool,
actual_result: bool,
) -> str:
"""Helper to create a detailed error message for assertions."""
qnp_repr = "/".join(map(str, name_path_components))
return (
f"Pattern '{name_path_pattern}' (substring: {is_substring_match}) vs "
f"Name path components {name_path_components} (as '{qnp_repr}'). "
f"Expected: {expected_result}, Got: {actual_result}"
)
@pytest.mark.parametrize(
"name_path_pattern, symbol_name_path_parts, is_substring_match, expected",
[
# Exact matches, anywhere in the name (is_substring_match=False)
pytest.param("foo", ["foo"], False, True, id="'foo' matches 'foo' exactly (simple)"),
pytest.param("foo/", ["foo"], False, True, id="'foo/' matches 'foo' exactly (simple)"),
pytest.param("foo", ["bar", "foo"], False, True, id="'foo' matches ['bar', 'foo'] exactly (simple, last element)"),
pytest.param("foo", ["foobar"], False, False, id="'foo' does not match 'foobar' exactly (simple)"),
pytest.param(
"foo", ["bar", "foobar"], False, False, id="'foo' does not match ['bar', 'foobar'] exactly (simple, last element)"
),
pytest.param(
"foo", ["path", "to", "foo"], False, True, id="'foo' matches ['path', 'to', 'foo'] exactly (simple, last element)"
),
# Exact matches, absolute patterns (is_substring_match=False)
pytest.param("/foo", ["foo"], False, True, id="'/foo' matches ['foo'] exactly (absolute simple)"),
pytest.param("/foo", ["foo", "bar"], False, False, id="'/foo' does not match ['foo', 'bar'] (absolute simple, len mismatch)"),
pytest.param("/foo", ["bar"], False, False, id="'/foo' does not match ['bar'] (absolute simple, name mismatch)"),
pytest.param(
"/foo", ["bar", "foo"], False, False, id="'/foo' does not match ['bar', 'foo'] (absolute simple, position mismatch)"
),
# Substring matches, anywhere in the name (is_substring_match=True)
pytest.param("foo", ["foobar"], True, True, id="'foo' matches 'foobar' as substring (simple)"),
pytest.param("foo", ["bar", "foobar"], True, True, id="'foo' matches ['bar', 'foobar'] as substring (simple, last element)"),
pytest.param(
"foo", ["barfoo"], True, True, id="'foo' matches 'barfoo' as substring (simple)"
), # This was potentially ambiguous before
pytest.param("foo", ["baz"], True, False, id="'foo' does not match 'baz' as substring (simple)"),
pytest.param("foo", ["bar", "baz"], True, False, id="'foo' does not match ['bar', 'baz'] as substring (simple, last element)"),
pytest.param("foo", ["my_foobar_func"], True, True, id="'foo' matches 'my_foobar_func' as substring (simple)"),
pytest.param(
"foo",
["ClassA", "my_foobar_method"],
True,
True,
id="'foo' matches ['ClassA', 'my_foobar_method'] as substring (simple, last element)",
),
pytest.param("foo", ["my_bar_func"], True, False, id="'foo' does not match 'my_bar_func' as substring (simple)"),
# Substring matches, absolute patterns (is_substring_match=True)
pytest.param("/foo", ["foobar"], True, True, id="'/foo' matches ['foobar'] as substring (absolute simple)"),
pytest.param("/foo/", ["foobar"], True, True, id="'/foo/' matches ['foobar'] as substring (absolute simple, last element)"),
pytest.param("/foo", ["barfoobaz"], True, True, id="'/foo' matches ['barfoobaz'] as substring (absolute simple)"),
pytest.param(
"/foo", ["foo", "bar"], True, False, id="'/foo' does not match ['foo', 'bar'] as substring (absolute simple, len mismatch)"
),
pytest.param("/foo", ["bar"], True, False, id="'/foo' does not match ['bar'] (absolute simple, no substr)"),
pytest.param(
"/foo", ["bar", "foo"], True, False, id="'/foo' does not match ['bar', 'foo'] (absolute simple, position mismatch)"
),
pytest.param(
"/foo/", ["bar", "foo"], True, False, id="'/foo/' does not match ['bar', 'foo'] (absolute simple, position mismatch)"
),
],
)
def test_match_simple_name(self, name_path_pattern, symbol_name_path_parts, is_substring_match, expected):
"""Tests matching for simple names (no '/' in pattern)."""
symbol_name_path_components = [NamePathComponent(part) for part in symbol_name_path_parts]
result = NamePathMatcher(name_path_pattern, is_substring_match).matches_reversed_components(reversed(symbol_name_path_components))
error_msg = self._create_assertion_error_message(name_path_pattern, symbol_name_path_parts, is_substring_match, expected, result)
assert result == expected, error_msg
@pytest.mark.parametrize(
"name_path_pattern, symbol_name_path_parts, is_substring_match, expected",
[
# --- Relative patterns (suffix matching) ---
# Exact matches, relative patterns (is_substring_match=False)
pytest.param("bar/foo", ["bar", "foo"], False, True, id="R: 'bar/foo' matches ['bar', 'foo'] exactly"),
pytest.param("bar/foo", ["mod", "bar", "foo"], False, True, id="R: 'bar/foo' matches ['mod', 'bar', 'foo'] exactly (suffix)"),
pytest.param(
"bar/foo", ["bar", "foo", "baz"], False, False, id="R: 'bar/foo' does not match ['bar', 'foo', 'baz'] (pattern shorter)"
),
pytest.param("bar/foo", ["bar"], False, False, id="R: 'bar/foo' does not match ['bar'] (pattern longer)"),
pytest.param("bar/foo", ["baz", "foo"], False, False, id="R: 'bar/foo' does not match ['baz', 'foo'] (first part mismatch)"),
pytest.param("bar/foo", ["bar", "baz"], False, False, id="R: 'bar/foo' does not match ['bar', 'baz'] (last part mismatch)"),
pytest.param("bar/foo", ["foo"], False, False, id="R: 'bar/foo' does not match ['foo'] (pattern longer)"),
pytest.param(
"bar/foo", ["other", "foo"], False, False, id="R: 'bar/foo' does not match ['other', 'foo'] (first part mismatch)"
),
pytest.param(
"bar/foo", ["bar", "otherfoo"], False, False, id="R: 'bar/foo' does not match ['bar', 'otherfoo'] (last part mismatch)"
),
# Substring matches, relative patterns (is_substring_match=True)
pytest.param("bar/foo", ["bar", "foobar"], True, True, id="R: 'bar/foo' matches ['bar', 'foobar'] as substring"),
pytest.param(
"bar/foo", ["mod", "bar", "foobar"], True, True, id="R: 'bar/foo' matches ['mod', 'bar', 'foobar'] as substring (suffix)"
),
pytest.param("bar/foo", ["bar", "bazfoo"], True, True, id="R: 'bar/foo' matches ['bar', 'bazfoo'] as substring"),
pytest.param("bar/fo", ["bar", "foo"], True, True, id="R: 'bar/fo' matches ['bar', 'foo'] as substring"), # codespell:ignore
pytest.param("bar/foo", ["bar", "baz"], True, False, id="R: 'bar/foo' does not match ['bar', 'baz'] (last no substr)"),
pytest.param(
"bar/foo", ["baz", "foobar"], True, False, id="R: 'bar/foo' does not match ['baz', 'foobar'] (first part mismatch)"
),
pytest.param(
"bar/foo", ["bar", "my_foobar_method"], True, True, id="R: 'bar/foo' matches ['bar', 'my_foobar_method'] as substring"
),
pytest.param(
"bar/foo",
["mod", "bar", "my_foobar_method"],
True,
True,
id="R: 'bar/foo' matches ['mod', 'bar', 'my_foobar_method'] as substring (suffix)",
),
pytest.param(
"bar/foo",
["bar", "another_method"],
True,
False,
id="R: 'bar/foo' does not match ['bar', 'another_method'] (last no substr)",
),
pytest.param(
"bar/foo",
["other", "my_foobar_method"],
True,
False,
id="R: 'bar/foo' does not match ['other', 'my_foobar_method'] (first part mismatch)",
),
pytest.param("bar/f", ["bar", "foo"], True, True, id="R: 'bar/f' matches ['bar', 'foo'] as substring"),
# Exact matches, absolute patterns (is_substring_match=False)
pytest.param("/bar/foo", ["bar", "foo"], False, True, id="A: '/bar/foo' matches ['bar', 'foo'] exactly"),
pytest.param(
"/bar/foo", ["bar", "foo", "baz"], False, False, id="A: '/bar/foo' does not match ['bar', 'foo', 'baz'] (pattern shorter)"
),
pytest.param("/bar/foo", ["bar"], False, False, id="A: '/bar/foo' does not match ['bar'] (pattern longer)"),
pytest.param("/bar/foo", ["baz", "foo"], False, False, id="A: '/bar/foo' does not match ['baz', 'foo'] (first part mismatch)"),
pytest.param(
"/bar/foo",
["baz", "bar", "foo"],
False,
False,
id="A: '/bar/foo' does not match ['baz', 'bar', 'foo'] (only suffix match for abs pattern)",
),
pytest.param("/bar/foo", ["bar", "baz"], False, False, id="A: '/bar/foo' does not match ['bar', 'baz'] (last part mismatch)"),
# Substring matches (is_substring_match=True)
pytest.param("/bar/foo", ["bar", "foobar"], True, True, id="A: '/bar/foo' matches ['bar', 'foobar'] as substring"),
pytest.param("/bar/foo", ["bar", "bazfoo"], True, True, id="A: '/bar/foo' matches ['bar', 'bazfoo'] as substring"),
pytest.param("/bar/fo", ["bar", "foo"], True, True, id="A: '/bar/fo' matches ['bar', 'foo'] as substring"), # codespell:ignore
pytest.param("/bar/foo", ["bar", "baz"], True, False, id="A: '/bar/foo' does not match ['bar', 'baz'] (last no substr)"),
pytest.param(
"/bar/foo", ["baz", "foobar"], True, False, id="A: '/bar/foo' does not match ['baz', 'foobar'] (first part mismatch)"
),
],
)
def test_match_name_path_pattern_path_len_2(self, name_path_pattern, symbol_name_path_parts, is_substring_match, expected):
"""Tests matching for qualified names (e.g. 'module/class/func')."""
symbol_name_path_components = [NamePathComponent(part) for part in symbol_name_path_parts]
result = NamePathMatcher(name_path_pattern, is_substring_match).matches_reversed_components(reversed(symbol_name_path_components))
error_msg = self._create_assertion_error_message(name_path_pattern, symbol_name_path_parts, is_substring_match, expected, result)
assert result == expected, error_msg
@pytest.mark.parametrize(
"name_path_pattern, symbol_name_path_components, expected",
[
pytest.param(
"bar/foo",
[NamePathComponent("bar"), NamePathComponent("foo", 0)],
True,
id="R: 'bar/foo' matches ['bar', 'foo'] with overload_index=0",
),
pytest.param(
"bar/foo",
[NamePathComponent("bar"), NamePathComponent("foo", 1)],
True,
id="R: 'bar/foo' matches ['bar', 'foo'] with overload_index=1",
),
pytest.param(
"bar/foo[0]",
[NamePathComponent("bar"), NamePathComponent("foo", 0)],
True,
id="R: 'bar/foo[0]' matches ['bar', 'foo'] with overload_index=0",
),
pytest.param(
"bar/foo[1]",
[NamePathComponent("bar"), NamePathComponent("foo", 0)],
False,
id="R: 'bar/foo[1]' does not match ['bar', 'foo'] with overload_index=0",
),
pytest.param(
"bar/foo", [NamePathComponent("bar", 0), NamePathComponent("foo")], True, id="R: 'bar/foo' matches ['bar[0]', 'foo']"
),
pytest.param(
"bar/foo", [NamePathComponent("bar", 0), NamePathComponent("foo", 1)], True, id="R: 'bar/foo' matches ['bar[0]', 'foo[1]']"
),
pytest.param(
"bar[0]/foo", [NamePathComponent("bar", 0), NamePathComponent("foo")], True, id="R: 'bar[0]/foo' matches ['bar[0]', 'foo']"
),
pytest.param(
"bar[0]/foo[1]",
[NamePathComponent("bar", 0), NamePathComponent("foo", 1)],
True,
id="R: 'bar[0]/foo[1]' matches ['bar[0]', 'foo[1]']",
),
pytest.param(
"bar[0]/foo[1]",
[NamePathComponent("bar", 1), NamePathComponent("foo", 0)],
False,
id="R: 'bar[0]/foo[1]' does not match ['bar[1]', 'foo[0]']",
),
],
)
def test_match_name_path_pattern_with_overload_idx(self, name_path_pattern, symbol_name_path_components, expected):
"""Tests matching for qualified names (e.g. 'module/class/func')."""
matcher = NamePathMatcher(name_path_pattern, False)
result = matcher.matches_reversed_components(reversed(symbol_name_path_components))
error_msg = self._create_assertion_error_message(name_path_pattern, symbol_name_path_components, False, expected, result)
assert result == expected, error_msg
@pytest.mark.python
class TestLanguageServerSymbolRetriever:
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_request_info(self, language_server: SolidLanguageServer):
symbol_retriever = LanguageServerSymbolRetriever(language_server)
create_user_method_symbol = symbol_retriever.find("UserService/create_user", within_relative_path="test_repo/services.py")[0]
create_user_method_symbol_info = symbol_retriever.request_info_for_symbol(create_user_method_symbol)
assert "Create a new user and store it" in create_user_method_symbol_info
class TestSymbolDictTypes:
@staticmethod
def check_key_type(dict_type: type, key_type: type):
"""
:param dict_type: a TypedDict type
:param key_type: the corresponding key type (Literal[...]) that the dict should have for keys
"""
dict_type_keys = dict_type.__annotations__.keys()
assert len(dict_type_keys) == len(
key_type.__args__ # type: ignore
), f"Expected {len(key_type.__args__)} keys in {dict_type}, but got {len(dict_type_keys)}" # type: ignore
for expected_key in key_type.__args__: # type: ignore
assert expected_key in dict_type_keys, f"Expected key '{expected_key}' not found in {dict_type}"
def test_ls_symbol_dict_type(self):
self.check_key_type(LanguageServerSymbol.OutputDict, LanguageServerSymbol.OutputDictKey)
def test_jb_symbol_dict_type(self):
self.check_key_type(SymbolDTO, SymbolDTOKey)
def _make_mock_symbols(count: int, *, relative_path: str = "test_repo/services.py") -> list[MagicMock]:
symbols: list[MagicMock] = []
for i in range(count):
sym = MagicMock()
sym.relative_path = relative_path
sym.line = i + 1
sym.column = 0
sym.symbol_root = {}
symbols.append(sym)
return symbols
@pytest.mark.python
class TestHoverBudget:
"""Tests for symbol_info_budget time budget behavior."""
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_budget_not_exceeded_all_lookups_performed(self, language_server: SolidLanguageServer, monkeypatch: pytest.MonkeyPatch):
"""With a large budget, all hover lookups are performed."""
# Create symbol retriever with a mock agent that has large budget
mock_agent = MagicMock()
mock_agent.serena_config.symbol_info_budget = 10.0
mock_agent.get_active_project.return_value = None
symbol_retriever = LanguageServerSymbolRetriever(language_server, agent=mock_agent)
# Track _request_info calls
call_count = 0
def counting_request_info(file_path, line, column, **kwargs):
nonlocal call_count
call_count += 1
return f"info:{line}:{column}"
monkeypatch.setattr(symbol_retriever, "_request_info", counting_request_info)
# Create mock symbols with unique (line, col) pairs
symbols = _make_mock_symbols(3)
result = symbol_retriever.request_info_for_symbol_batch(symbols)
# All 3 symbols should have info (no budget exceeded)
assert call_count == 3
assert all(info is not None for info in result.values())
assert len(result) == 3
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_budget_exceeded_partial_info(self, language_server: SolidLanguageServer, monkeypatch: pytest.MonkeyPatch):
"""With a small budget, hover lookups stop and remaining symbols get None info."""
# Create symbol retriever with a mock agent that has small budget (0.1s)
mock_agent = MagicMock()
mock_agent.serena_config.symbol_info_budget = 0.1
mock_agent.get_active_project.return_value = None
symbol_retriever = LanguageServerSymbolRetriever(language_server, agent=mock_agent)
# Track _request_info calls and simulate 0.05s per call
call_count = 0
simulated_time = [0.0]
def slow_request_info(file_path, line, column, **kwargs):
nonlocal call_count
call_count += 1
# Simulate each hover taking 0.05s
simulated_time[0] += 0.05
return f"info:{line}:{column}"
# Mock perf_counter to return simulated time for hover duration
def mock_perf_counter():
return simulated_time[0]
monkeypatch.setattr(symbol_retriever, "_request_info", slow_request_info)
monkeypatch.setattr("serena.symbol.perf_counter", mock_perf_counter)
# Create 5 mock symbols with unique (line, col) pairs
symbols = _make_mock_symbols(5)
result = symbol_retriever.request_info_for_symbol_batch(symbols)
# Budget is 0.1s, each call takes 0.05s, so only 2 calls should succeed
# After 2 calls: 0.1s >= 0.1s budget, remaining 3 should be skipped
assert call_count == 2
assert len(result) == 5
# First 2 symbols should have info, last 3 should be None
result_list = list(result.values())
assert result_list[0] is not None
assert result_list[1] is not None
assert result_list[2] is None
assert result_list[3] is None
assert result_list[4] is None
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_budget_zero_means_unlimited(self, language_server: SolidLanguageServer, monkeypatch: pytest.MonkeyPatch):
"""With budget=0, all hover lookups proceed (no early stopping)."""
# Create symbol retriever with budget=0 (unlimited)
mock_agent = MagicMock()
mock_agent.serena_config.symbol_info_budget = 0.0
mock_agent.get_active_project.return_value = None
symbol_retriever = LanguageServerSymbolRetriever(language_server, agent=mock_agent)
# Track _request_info calls
call_count = 0
def counting_request_info(file_path, line, column, **kwargs):
nonlocal call_count
call_count += 1
return f"info:{line}:{column}"
monkeypatch.setattr(symbol_retriever, "_request_info", counting_request_info)
# Create mock symbols
symbols = _make_mock_symbols(5)
result = symbol_retriever.request_info_for_symbol_batch(symbols)
# All 5 symbols should be looked up (no budget limit)
assert call_count == 5
assert all(info is not None for info in result.values())
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_project_budget_overrides_global(self, language_server: SolidLanguageServer, monkeypatch: pytest.MonkeyPatch):
"""Project-level budget overrides global budget."""
# Create symbol retriever with global budget 10.0 but project budget 0.05
mock_project = MagicMock()
mock_project.project_config.symbol_info_budget = 0.05
mock_agent = MagicMock()
mock_agent.serena_config.symbol_info_budget = 10.0
mock_agent.get_active_project.return_value = mock_project
symbol_retriever = LanguageServerSymbolRetriever(language_server, agent=mock_agent)
# Track _request_info calls and simulate time
call_count = 0
simulated_time = [0.0]
def slow_request_info(file_path, line, column, **kwargs):
nonlocal call_count
call_count += 1
simulated_time[0] += 0.03
return f"info:{line}:{column}"
def mock_perf_counter():
return simulated_time[0]
monkeypatch.setattr(symbol_retriever, "_request_info", slow_request_info)
monkeypatch.setattr("serena.symbol.perf_counter", mock_perf_counter)
# Create 5 mock symbols
symbols = _make_mock_symbols(5)
symbol_retriever.request_info_for_symbol_batch(symbols)
# Project budget is 0.05s, each call takes 0.03s
# Budget check happens BEFORE starting a new call:
# - Before call 1: spent=0 < 0.05, proceed, spent becomes 0.03
# - Before call 2: spent=0.03 < 0.05, proceed, spent becomes 0.06
# - Before call 3: spent=0.06 >= 0.05, skip
# So 2 calls succeed (proving project budget 0.05 overrode global 10.0)
assert call_count == 2
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_project_null_inherits_global(self, language_server: SolidLanguageServer, monkeypatch: pytest.MonkeyPatch):
"""When project budget is None, global budget is used."""
# Create symbol retriever with project budget=None (inherit global)
mock_project = MagicMock()
mock_project.project_config.symbol_info_budget = None
mock_agent = MagicMock()
mock_agent.serena_config.symbol_info_budget = 10.0
mock_agent.get_active_project.return_value = mock_project
symbol_retriever = LanguageServerSymbolRetriever(language_server, agent=mock_agent)
# Track _request_info calls
call_count = 0
def counting_request_info(file_path, line, column, **kwargs):
nonlocal call_count
call_count += 1
return f"info:{line}:{column}"
monkeypatch.setattr(symbol_retriever, "_request_info", counting_request_info)
# Create 3 mock symbols
symbols = _make_mock_symbols(3)
result = symbol_retriever.request_info_for_symbol_batch(symbols)
# Global budget is 10s, all 3 should succeed
assert call_count == 3
assert all(info is not None for info in result.values())
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_no_agent_uses_default_budget(self, language_server: SolidLanguageServer, monkeypatch: pytest.MonkeyPatch):
"""When agent is None, default budget of 5s is used."""
# Create symbol retriever without agent
symbol_retriever = LanguageServerSymbolRetriever(language_server, agent=None)
# Track _request_info calls
call_count = 0
def counting_request_info(file_path, line, column, **kwargs):
nonlocal call_count
call_count += 1
return f"info:{line}:{column}"
monkeypatch.setattr(symbol_retriever, "_request_info", counting_request_info)
# Create 3 mock symbols
symbols = _make_mock_symbols(3)
result = symbol_retriever.request_info_for_symbol_batch(symbols)
# Default budget is 5s, all 3 should succeed
assert call_count == 3
assert all(info is not None for info in result.values())
| {
"repo_id": "oraios/serena",
"file_path": "test/serena/test_symbol.py",
"license": "MIT License",
"lines": 415,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/tests/extension/uuid/test_uuid.py | from __future__ import annotations
from typing import (
TYPE_CHECKING,
ClassVar,
Self,
)
from uuid import UUID
import numpy as np
from pandas.core.dtypes.dtypes import ExtensionDtype
import pandas as pd
from pandas.core.arrays.base import ExtensionArray
if TYPE_CHECKING:
import builtins
from collections.abc import Iterable
from numpy.typing import NDArray
from pandas._typing import (
Dtype,
ScalarIndexer,
)
# 16 void bytes: 128 bit, every pattern valid, no funky behavior like 0 stripping.
_UuidNumpyDtype = np.dtype("V16")
class UuidDtype(ExtensionDtype):
# ExtensionDtype essential API (3 class attrs and methods)
name: ClassVar[str] = "uuid"
type: ClassVar[builtins.type[UUID]] = UUID
@classmethod
def construct_array_type(cls) -> builtins.type[UuidExtensionArray]:
return UuidExtensionArray
# ExtensionDtype overrides
kind: ClassVar[str] = _UuidNumpyDtype.kind
class UuidExtensionArray(ExtensionArray):
# Implementation details and convenience
_data: NDArray[np.void]
def __init__(self, values: Iterable[UUID], *, copy: bool = False) -> None:
self._data = np.array([x.bytes for x in values], dtype=_UuidNumpyDtype)
# Parts of ExtensionArray's essential API required for tests:
dtype: ClassVar[UuidDtype] = UuidDtype()
@classmethod
def _from_sequence(
cls,
scalars: Iterable[UUID],
*,
dtype: Dtype | None = None,
copy: bool = False,
) -> Self:
if dtype is None:
dtype = UuidDtype()
return cls(scalars, copy=copy)
def __getitem__(self, index: ScalarIndexer) -> UUID: # type: ignore[override]
assert isinstance(index, int | np.integer)
return UUID(bytes=self._data[index].tobytes())
def __len__(self) -> int:
return len(self._data)
def test_construct() -> None:
"""Tests that we can construct UuidExtensionArray from a list of valid values."""
from uuid import uuid4
a = UuidExtensionArray([UUID(int=0), u := uuid4()])
assert a[0].int == 0
assert a[1] == u
def test_series() -> None:
"""Tests that Series accepts (unstructured) void ExtensionDtypes."""
from uuid import uuid4
s = pd.Series([u := uuid4()], dtype=UuidDtype(), name="s")
assert str(u) in str(s)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/extension/uuid/test_uuid.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/tests/copy_view/index/test_intervalindex.py | import numpy as np
from pandas import (
Interval,
IntervalIndex,
Series,
array,
)
import pandas._testing as tm
from pandas.tests.copy_view.util import get_array
def test_constructor_copy_input_interval_ea_default():
# GH 63388
arr = array([Interval(0, 1), Interval(1, 2)])
idx = IntervalIndex(arr)
assert not tm.shares_memory(arr, idx.array)
def test_series_from_temporary_intervalindex_readonly_data():
# GH 63388
arr = array([Interval(0, 1), Interval(1, 2)])
arr._left.flags.writeable = False
arr._right.flags.writeable = False
ser = Series(IntervalIndex(arr))
assert not np.shares_memory(arr._left, get_array(ser)._left)
ser.iloc[0] = Interval(5, 6)
expected = Series([Interval(5, 6), Interval(1, 2)], dtype="interval[int64, right]")
tm.assert_series_equal(ser, expected)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/copy_view/index/test_intervalindex.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/tests/dtypes/cast/test_box_unbox.py | from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.core.dtypes.cast import (
maybe_box_native,
maybe_unbox_numpy_scalar,
)
from pandas import (
Interval,
Period,
Timedelta,
Timestamp,
)
@pytest.mark.parametrize(
"obj,expected_dtype",
[
(b"\x00\x10", bytes),
(4, int),
(np.uint(4), int),
(np.int32(-4), int),
(np.uint8(4), int),
(float(454.98), float),
(np.float16(0.4), float),
(np.float64(1.4), float),
(np.bool_(False), bool),
(datetime(2005, 2, 25), datetime),
(np.datetime64("2005-02-25"), Timestamp),
(Timestamp("2005-02-25"), Timestamp),
(np.timedelta64(1, "D"), Timedelta),
(Timedelta(1, "D"), Timedelta),
(Interval(0, 1), Interval),
(Period("4Q2005"), Period),
],
)
def test_maybe_box_native(obj, expected_dtype):
boxed_obj = maybe_box_native(obj)
result_dtype = type(boxed_obj)
assert result_dtype is expected_dtype
@pytest.mark.parametrize("typecode", np.typecodes["All"])
def test_maybe_unbox_numpy_scalar(typecode, using_python_scalars):
# https://github.com/pandas-dev/pandas/pull/63016
if typecode == "?":
scalar = False
expected = bool
elif typecode in "bhilqnpBHILQNP":
scalar = 0
expected = int
elif typecode in "efdg":
scalar = 0.0
expected = float
elif typecode in "FDG":
scalar = 0.0 + 0.0j
expected = complex
elif typecode in "SV":
scalar = b""
expected = bytes
elif typecode == "U":
scalar = ""
expected = str
elif typecode == "O":
scalar = 0
expected = int
elif typecode == "M":
scalar = datetime(2025, 1, 1)
expected = Timestamp
elif typecode == "m":
scalar = timedelta(seconds=3)
expected = Timedelta
else:
raise ValueError(f"typecode {typecode} not recognized")
value = np.array([scalar], dtype=typecode)[0]
result = maybe_unbox_numpy_scalar(value)
if using_python_scalars:
assert type(result) == expected
else:
assert result is value
def test_maybe_unbox_numpy_scalar_timestamp(unit, using_python_scalars):
# https://github.com/pandas-dev/pandas/pull/63016
value = np.datetime64(1, unit)
expected = Timestamp(1, unit=unit) if using_python_scalars else value
result = maybe_unbox_numpy_scalar(value)
assert result == expected
assert type(result) == type(expected)
def test_maybe_unbox_numpy_scalar_datetime(unit, using_python_scalars):
# https://github.com/pandas-dev/pandas/pull/63016
value = np.timedelta64(1, unit)
expected = Timedelta(1, unit=unit) if using_python_scalars else value
result = maybe_unbox_numpy_scalar(value)
assert result == expected
assert type(result) == type(expected)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/dtypes/cast/test_box_unbox.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/tests/arithmetic/test_string.py | import operator
from pathlib import Path
import numpy as np
import pytest
from pandas.compat import HAS_PYARROW
from pandas.errors import Pandas4Warning
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
NA,
ArrowDtype,
Series,
StringDtype,
)
import pandas._testing as tm
from pandas.core.construction import extract_array
def string_dtype_highest_priority(dtype1, dtype2):
if HAS_PYARROW:
DTYPE_HIERARCHY = [
StringDtype("python", na_value=np.nan),
StringDtype("pyarrow", na_value=np.nan),
StringDtype("python", na_value=NA),
StringDtype("pyarrow", na_value=NA),
]
else:
DTYPE_HIERARCHY = [
StringDtype("python", na_value=np.nan),
StringDtype("python", na_value=NA),
]
h1 = DTYPE_HIERARCHY.index(dtype1)
h2 = DTYPE_HIERARCHY.index(dtype2)
return DTYPE_HIERARCHY[max(h1, h2)]
def test_eq_all_na():
pytest.importorskip("pyarrow")
a = pd.array([NA, NA], dtype=StringDtype("pyarrow"))
result = a == a
expected = pd.array([NA, NA], dtype="boolean[pyarrow]")
tm.assert_extension_array_equal(result, expected)
def test_reversed_logical_ops(any_string_dtype):
# GH#60234
dtype = any_string_dtype
warn = None if dtype == object else Pandas4Warning
left = Series([True, False, False, True])
right = Series(["", "", "b", "c"], dtype=dtype)
msg = "operations between boolean dtype and"
with tm.assert_produces_warning(warn, match=msg):
result = left | right
expected = left | right.astype(bool)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(warn, match=msg):
result = left & right
expected = left & right.astype(bool)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(warn, match=msg):
result = left ^ right
expected = left ^ right.astype(bool)
tm.assert_series_equal(result, expected)
def test_pathlib_path_division(any_string_dtype, request):
# GH#61940
if any_string_dtype == object:
mark = pytest.mark.xfail(
reason="with NA present we go through _masked_arith_op which "
"raises TypeError bc Path is not recognized by lib.is_scalar."
)
request.applymarker(mark)
item = Path("/Users/Irv/")
ser = Series(["A", "B", NA], dtype=any_string_dtype)
result = item / ser
expected = Series([item / "A", item / "B", ser.dtype.na_value], dtype=object)
tm.assert_series_equal(result, expected)
result = ser / item
expected = Series(["A" / item, "B" / item, ser.dtype.na_value], dtype=object)
tm.assert_series_equal(result, expected)
def test_mixed_object_comparison(any_string_dtype):
# GH#60228
dtype = any_string_dtype
ser = Series(["a", "b"], dtype=dtype)
mixed = Series([1, "b"], dtype=object)
result = ser == mixed
expected = Series([False, True], dtype=bool)
if dtype == object:
pass
elif dtype.storage == "python" and dtype.na_value is NA:
expected = expected.astype("boolean")
elif dtype.storage == "pyarrow" and dtype.na_value is NA:
expected = expected.astype("bool[pyarrow]")
tm.assert_series_equal(result, expected)
def test_pyarrow_numpy_string_invalid():
# GH#56008
pa = pytest.importorskip("pyarrow")
ser = Series([False, True])
ser2 = Series(["a", "b"], dtype=StringDtype(na_value=np.nan))
result = ser == ser2
expected_eq = Series(False, index=ser.index)
tm.assert_series_equal(result, expected_eq)
result = ser != ser2
expected_ne = Series(True, index=ser.index)
tm.assert_series_equal(result, expected_ne)
with pytest.raises(TypeError, match="Invalid comparison"):
ser > ser2
# GH#59505
ser3 = ser2.astype("string[pyarrow]")
result3_eq = ser3 == ser
tm.assert_series_equal(result3_eq, expected_eq.astype("bool[pyarrow]"))
result3_ne = ser3 != ser
tm.assert_series_equal(result3_ne, expected_ne.astype("bool[pyarrow]"))
with pytest.raises(TypeError, match="Invalid comparison"):
ser > ser3
ser4 = ser2.astype(ArrowDtype(pa.string()))
result4_eq = ser4 == ser
tm.assert_series_equal(result4_eq, expected_eq.astype("bool[pyarrow]"))
result4_ne = ser4 != ser
tm.assert_series_equal(result4_ne, expected_ne.astype("bool[pyarrow]"))
with pytest.raises(TypeError, match="Invalid comparison"):
ser > ser4
def test_mul_bool_invalid(any_string_dtype):
# GH#62595
dtype = any_string_dtype
ser = Series(["a", "b", "c"], dtype=dtype)
if dtype == object:
pytest.skip("This is not expect to raise")
elif dtype.storage == "python":
msg = "Cannot multiply StringArray by bools. Explicitly cast to integers"
else:
msg = "Can only string multiply by an integer"
with pytest.raises(TypeError, match=msg):
False * ser
with pytest.raises(TypeError, match=msg):
ser * True
with pytest.raises(TypeError, match=msg):
ser * np.array([True, False, True], dtype=bool)
with pytest.raises(TypeError, match=msg):
np.array([True, False, True], dtype=bool) * ser
def test_add(any_string_dtype, request):
dtype = any_string_dtype
if dtype == object:
mark = pytest.mark.xfail(
reason="Need to update expected for numpy object dtype"
)
request.applymarker(mark)
a = Series(["a", "b", "c", None, None], dtype=dtype)
b = Series(["x", "y", None, "z", None], dtype=dtype)
result = a + b
expected = Series(["ax", "by", None, None, None], dtype=dtype)
tm.assert_series_equal(result, expected)
result = a.add(b)
tm.assert_series_equal(result, expected)
result = a.radd(b)
expected = Series(["xa", "yb", None, None, None], dtype=dtype)
tm.assert_series_equal(result, expected)
result = a.add(b, fill_value="-")
expected = Series(["ax", "by", "c-", "-z", None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_add_2d(any_string_dtype, request):
dtype = any_string_dtype
if dtype == object or dtype.storage == "pyarrow":
reason = "Failed: DID NOT RAISE <class 'ValueError'>"
mark = pytest.mark.xfail(raises=None, reason=reason)
request.applymarker(mark)
a = pd.array(["a", "b", "c"], dtype=dtype)
b = np.array([["a", "b", "c"]], dtype=object)
with pytest.raises(ValueError, match="3 != 1"):
a + b
s = Series(a)
with pytest.raises(ValueError, match="3 != 1"):
s + b
def test_add_sequence(any_string_dtype, request, using_infer_string):
dtype = any_string_dtype
if (
dtype != object
and dtype.storage == "python"
and dtype.na_value is np.nan
and HAS_PYARROW
and using_infer_string
):
mark = pytest.mark.xfail(
reason="As of GH#62522, the list gets wrapped with sanitize_array, "
"which casts to a higher-priority StringArray, so we get "
"NotImplemented."
)
request.applymarker(mark)
if dtype == np.dtype(object) and using_infer_string:
mark = pytest.mark.xfail(reason="Cannot broadcast list")
request.applymarker(mark)
a = pd.array(["a", "b", None, None], dtype=dtype)
other = ["x", None, "y", None]
result = a + other
expected = pd.array(["ax", None, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = other + a
expected = pd.array(["xa", None, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_mul(any_string_dtype):
dtype = any_string_dtype
a = pd.array(["a", "b", None], dtype=dtype)
result = a * 2
expected = pd.array(["aa", "bb", None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = 2 * a
tm.assert_extension_array_equal(result, expected)
def test_add_strings(any_string_dtype, request):
dtype = any_string_dtype
if dtype != np.dtype(object):
mark = pytest.mark.xfail(reason="GH-28527")
request.applymarker(mark)
arr = pd.array(["a", "b", "c", "d"], dtype=dtype)
df = pd.DataFrame([["t", "y", "v", "w"]], dtype=object)
assert arr.__add__(df) is NotImplemented
result = arr + df
expected = pd.DataFrame([["at", "by", "cv", "dw"]]).astype(dtype)
tm.assert_frame_equal(result, expected)
result = df + arr
expected = pd.DataFrame([["ta", "yb", "vc", "wd"]]).astype(dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-28527")
def test_add_frame(any_string_dtype, using_infer_string):
if not using_infer_string:
pytest.skip(
"This doesn't fail on this build, but this build is going away, "
"so not worth more invasive fix."
)
dtype = any_string_dtype
arr = pd.array(["a", "b", np.nan, np.nan], dtype=dtype)
df = pd.DataFrame([["x", np.nan, "y", np.nan]])
assert arr.__add__(df) is NotImplemented
result = arr + df
expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype(dtype)
tm.assert_frame_equal(result, expected)
result = df + arr
expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype(dtype)
tm.assert_frame_equal(result, expected)
def test_comparison_methods_scalar(comparison_op, any_string_dtype):
dtype = any_string_dtype
op_name = f"__{comparison_op.__name__}__"
a = pd.array(["a", None, "c"], dtype=dtype)
other = "a"
result = getattr(a, op_name)(other)
if dtype == object or dtype.na_value is np.nan:
expected = np.array([getattr(item, op_name)(other) for item in a])
if comparison_op == operator.ne:
expected[1] = True
else:
expected[1] = False
result = extract_array(result, extract_numpy=True)
tm.assert_numpy_array_equal(result, expected.astype(np.bool_))
else:
expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean"
expected = np.array([getattr(item, op_name)(other) for item in a], dtype=object)
expected = pd.array(expected, dtype=expected_dtype)
tm.assert_extension_array_equal(result, expected)
def test_comparison_methods_scalar_pd_na(comparison_op, any_string_dtype):
dtype = any_string_dtype
op_name = f"__{comparison_op.__name__}__"
a = pd.array(["a", None, "c"], dtype=dtype)
result = getattr(a, op_name)(NA)
if dtype == np.dtype(object) or dtype.na_value is np.nan:
if operator.ne == comparison_op:
expected = np.array([True, True, True])
else:
expected = np.array([False, False, False])
result = extract_array(result, extract_numpy=True)
tm.assert_numpy_array_equal(result, expected)
else:
expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean"
expected = pd.array([None, None, None], dtype=expected_dtype)
tm.assert_extension_array_equal(result, expected)
tm.assert_extension_array_equal(result, expected)
def test_comparison_methods_scalar_not_string(comparison_op, any_string_dtype):
op_name = f"__{comparison_op.__name__}__"
dtype = any_string_dtype
a = pd.array(["a", None, "c"], dtype=dtype)
other = 42
if op_name not in ["__eq__", "__ne__"]:
with pytest.raises(TypeError, match="Invalid comparison|not supported between"):
getattr(a, op_name)(other)
return
result = getattr(a, op_name)(other)
result = extract_array(result, extract_numpy=True)
if dtype == np.dtype(object) or dtype.na_value is np.nan:
expected_data = {
"__eq__": [False, False, False],
"__ne__": [True, True, True],
}[op_name]
expected = np.array(expected_data)
tm.assert_numpy_array_equal(result, expected)
else:
expected_data = {"__eq__": [False, None, False], "__ne__": [True, None, True]}[
op_name
]
expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean"
expected = pd.array(expected_data, dtype=expected_dtype)
tm.assert_extension_array_equal(result, expected)
def test_comparison_methods_array(comparison_op, any_string_dtype, any_string_dtype2):
op_name = f"__{comparison_op.__name__}__"
dtype = any_string_dtype
dtype2 = any_string_dtype2
a = pd.array(["a", None, "c"], dtype=dtype)
other = pd.array([None, None, "c"], dtype=dtype2)
result = comparison_op(a, other)
result = extract_array(result, extract_numpy=True)
# ensure operation is commutative
result2 = comparison_op(other, a)
result2 = extract_array(result2, extract_numpy=True)
tm.assert_equal(result, result2)
if (dtype == object or dtype.na_value is np.nan) and (
dtype2 == object or dtype2.na_value is np.nan
):
if operator.ne == comparison_op:
expected = np.array([True, True, False])
else:
expected = np.array([False, False, False])
expected[-1] = getattr(other[-1], op_name)(a[-1])
result = extract_array(result, extract_numpy=True)
tm.assert_numpy_array_equal(result, expected)
else:
if dtype == object:
max_dtype = dtype2
elif dtype2 == object:
max_dtype = dtype
else:
max_dtype = string_dtype_highest_priority(dtype, dtype2)
if max_dtype.storage == "python":
expected_dtype = "boolean"
else:
expected_dtype = "bool[pyarrow]"
expected = np.full(len(a), fill_value=None, dtype="object")
expected[-1] = getattr(other[-1], op_name)(a[-1])
expected = pd.array(expected, dtype=expected_dtype)
tm.assert_equal(result, expected)
@td.skip_if_no("pyarrow")
def test_comparison_methods_array_arrow_extension(comparison_op, any_string_dtype):
# Test pd.ArrowDtype(pa.string()) against other string arrays
import pyarrow as pa
dtype2 = any_string_dtype
op_name = f"__{comparison_op.__name__}__"
dtype = ArrowDtype(pa.string())
a = pd.array(["a", None, "c"], dtype=dtype)
other = pd.array([None, None, "c"], dtype=dtype2)
result = comparison_op(a, other)
# ensure operation is commutative
result2 = comparison_op(other, a)
tm.assert_equal(result, result2)
expected = pd.array([None, None, True], dtype="bool[pyarrow]")
expected[-1] = getattr(other[-1], op_name)(a[-1])
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("box", [pd.array, pd.Index, Series])
def test_comparison_methods_list(comparison_op, any_string_dtype, box, request):
dtype = any_string_dtype
if box is pd.array and dtype != object and dtype.na_value is np.nan:
mark = pytest.mark.xfail(
reason="After wrapping list, op returns NotImplemented, see GH#62522"
)
request.applymarker(mark)
op_name = f"__{comparison_op.__name__}__"
a = box(pd.array(["a", None, "c"], dtype=dtype))
item = "c"
other = [None, None, "c"]
result = comparison_op(a, other)
# ensure operation is commutative
result2 = comparison_op(other, a)
tm.assert_equal(result, result2)
if dtype == np.dtype(object) or dtype.na_value is np.nan:
if operator.ne == comparison_op:
expected = np.array([True, True, False])
else:
expected = np.array([False, False, False])
expected[-1] = getattr(item, op_name)(item)
if box is not pd.Index:
# if GH#62766 is addressed this check can be removed
expected = box(expected, dtype=expected.dtype)
tm.assert_equal(result, expected)
else:
expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean"
expected = np.full(len(a), fill_value=None, dtype="object")
expected[-1] = getattr(item, op_name)(item)
expected = pd.array(expected, dtype=expected_dtype)
expected = extract_array(expected, extract_numpy=True)
if box is not pd.Index:
# if GH#62766 is addressed this check can be removed
expected = tm.box_expected(expected, box)
tm.assert_equal(result, expected)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/arithmetic/test_string.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/api/typing/aliases.py | from pandas._typing import (
AggFuncType,
AlignJoin,
AnyAll,
AnyArrayLike,
ArrayLike,
AstypeArg,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
CorrelationMethod,
CSVEngine,
DropKeep,
Dtype,
DtypeArg,
DtypeBackend,
DtypeObj,
ExcelWriterIfSheetExists,
ExcelWriterMergeCells,
FilePath,
FillnaOptions,
FloatFormatType,
FormattersType,
FromDictOrient,
HTMLFlavors,
IgnoreRaise,
IndexLabel,
InterpolateOptions,
IntervalClosedType,
IntervalLeftRight,
JoinHow,
JoinValidate,
JSONEngine,
JSONSerializable,
ListLike,
MergeHow,
MergeValidate,
NaPosition,
NsmallestNlargestKeep,
OpenFileErrors,
Ordered,
ParquetCompressionOptions,
QuantileInterpolation,
ReadBuffer,
ReadCsvBuffer,
ReadPickleBuffer,
ReindexMethod,
Scalar,
ScalarIndexer,
SequenceIndexer,
SequenceNotStr,
SliceType,
SortKind,
StorageOptions,
Suffixes,
TakeIndexer,
TimeAmbiguous,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
TimeNonexistent,
TimestampConvertibleTypes,
TimeUnit,
ToStataByteorder,
ToTimestampHow,
UpdateJoin,
UsecolsArgType,
WindowingRankType,
WriteBuffer,
WriteExcelBuffer,
XMLParsers,
)
__all__ = [
"AggFuncType",
"AlignJoin",
"AnyAll",
"AnyArrayLike",
"ArrayLike",
"AstypeArg",
"Axes",
"Axis",
"CSVEngine",
"ColspaceArgType",
"CompressionOptions",
"CorrelationMethod",
"DropKeep",
"Dtype",
"DtypeArg",
"DtypeBackend",
"DtypeObj",
"ExcelWriterIfSheetExists",
"ExcelWriterMergeCells",
"FilePath",
"FillnaOptions",
"FloatFormatType",
"FormattersType",
"FromDictOrient",
"HTMLFlavors",
"IgnoreRaise",
"IndexLabel",
"InterpolateOptions",
"IntervalClosedType",
"IntervalLeftRight",
"JSONEngine",
"JSONSerializable",
"JoinHow",
"JoinValidate",
"ListLike",
"MergeHow",
"MergeValidate",
"NaPosition",
"NsmallestNlargestKeep",
"OpenFileErrors",
"Ordered",
"ParquetCompressionOptions",
"QuantileInterpolation",
"ReadBuffer",
"ReadCsvBuffer",
"ReadPickleBuffer",
"ReindexMethod",
"Scalar",
"ScalarIndexer",
"SequenceIndexer",
"SequenceNotStr",
"SliceType",
"SortKind",
"StorageOptions",
"Suffixes",
"TakeIndexer",
"TimeAmbiguous",
"TimeGrouperOrigin",
"TimeNonexistent",
"TimeUnit",
"TimedeltaConvertibleTypes",
"TimestampConvertibleTypes",
"ToStataByteorder",
"ToTimestampHow",
"UpdateJoin",
"UsecolsArgType",
"WindowingRankType",
"WriteBuffer",
"WriteExcelBuffer",
"XMLParsers",
]
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/api/typing/aliases.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
pandas-dev/pandas:pandas/tests/arithmetic/test_bool.py | import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
def test_divmod_bool_raises(box_with_array):
# GH#46043 // raises, so divmod should too
ser = Series([True, False])
obj = tm.box_expected(ser, box_with_array)
msg = "operator 'floordiv' not implemented for bool dtypes"
with pytest.raises(NotImplementedError, match=msg):
obj // obj
if box_with_array is DataFrame:
msg = "operator 'floordiv' not implemented for bool dtypes"
else:
msg = "operator 'divmod' not implemented for bool dtypes"
with pytest.raises(NotImplementedError, match=msg):
divmod(obj, obj)
# go through __rdivmod__
with pytest.raises(NotImplementedError, match=msg):
divmod(True, obj)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/arithmetic/test_bool.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/core/col.py | from __future__ import annotations
from collections.abc import (
Callable,
Hashable,
)
from typing import (
TYPE_CHECKING,
Any,
NoReturn,
)
from pandas.util._decorators import set_module
if TYPE_CHECKING:
from pandas import (
DataFrame,
Series,
)
# Used only for generating the str repr of expressions.
_OP_SYMBOLS = {
"__add__": "+",
"__radd__": "+",
"__sub__": "-",
"__rsub__": "-",
"__mul__": "*",
"__rmul__": "*",
"__truediv__": "/",
"__rtruediv__": "/",
"__floordiv__": "//",
"__rfloordiv__": "//",
"__mod__": "%",
"__rmod__": "%",
"__ge__": ">=",
"__gt__": ">",
"__le__": "<=",
"__lt__": "<",
"__eq__": "==",
"__ne__": "!=",
"__and__": "&",
"__rand__": "&",
"__or__": "|",
"__ror__": "|",
"__xor__": "^",
"__rxor__": "^",
}
def _parse_args(df: DataFrame, *args: Any) -> tuple[Series]:
# Parse `args`, evaluating any expressions we encounter.
return tuple(
[x._eval_expression(df) if isinstance(x, Expression) else x for x in args]
)
def _parse_kwargs(df: DataFrame, **kwargs: Any) -> dict[str, Any]:
# Parse `kwargs`, evaluating any expressions we encounter.
return {
key: val._eval_expression(df) if isinstance(val, Expression) else val
for key, val in kwargs.items()
}
def _pretty_print_args_kwargs(*args: Any, **kwargs: Any) -> str:
inputs_repr = ", ".join(repr(arg) for arg in args)
kwargs_repr = ", ".join(f"{k}={v!r}" for k, v in kwargs.items())
all_args = []
if inputs_repr:
all_args.append(inputs_repr)
if kwargs_repr:
all_args.append(kwargs_repr)
return ", ".join(all_args)
@set_module("pandas.api.typing")
class Expression:
"""
Class representing a deferred column.
This is not meant to be instantiated directly. Instead, use :meth:`pandas.col`.
"""
def __init__(
self,
func: Callable[[DataFrame], Any],
repr_str: str,
needs_parenthese: bool = False,
) -> None:
self._func = func
self._repr_str = repr_str
self._needs_parentheses = needs_parenthese
def _eval_expression(self, df: DataFrame) -> Any:
return self._func(df)
def _with_op(
self, op: str, other: Any, repr_str: str, needs_parentheses: bool = True
) -> Expression:
if isinstance(other, Expression):
return Expression(
lambda df: getattr(self._eval_expression(df), op)(
other._eval_expression(df)
),
repr_str,
needs_parenthese=needs_parentheses,
)
else:
return Expression(
lambda df: getattr(self._eval_expression(df), op)(other),
repr_str,
needs_parenthese=needs_parentheses,
)
def _maybe_wrap_parentheses(self, other: Any) -> tuple[str, str]:
if self._needs_parentheses:
self_repr = f"({self!r})"
else:
self_repr = f"{self!r}"
if isinstance(other, Expression) and other._needs_parentheses:
other_repr = f"({other!r})"
else:
other_repr = f"{other!r}"
return self_repr, other_repr
# Binary ops
def __add__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__add__", other, f"{self_repr} + {other_repr}")
def __radd__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__radd__", other, f"{other_repr} + {self_repr}")
def __sub__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__sub__", other, f"{self_repr} - {other_repr}")
def __rsub__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rsub__", other, f"{other_repr} - {self_repr}")
def __mul__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__mul__", other, f"{self_repr} * {other_repr}")
def __rmul__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rmul__", other, f"{other_repr} * {self_repr}")
def __matmul__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__matmul__", other, f"{self_repr} @ {other_repr}")
def __rmatmul__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rmatmul__", other, f"{other_repr} @ {self_repr}")
def __pow__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__pow__", other, f"{self_repr} ** {other_repr}")
def __rpow__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rpow__", other, f"{other_repr} ** {self_repr}")
def __truediv__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__truediv__", other, f"{self_repr} / {other_repr}")
def __rtruediv__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rtruediv__", other, f"{other_repr} / {self_repr}")
def __floordiv__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__floordiv__", other, f"{self_repr} // {other_repr}")
def __rfloordiv__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rfloordiv__", other, f"{other_repr} // {self_repr}")
def __ge__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__ge__", other, f"{self_repr} >= {other_repr}")
def __gt__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__gt__", other, f"{self_repr} > {other_repr}")
def __le__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__le__", other, f"{self_repr} <= {other_repr}")
def __lt__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__lt__", other, f"{self_repr} < {other_repr}")
def __eq__(self, other: object) -> Expression: # type: ignore[override]
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__eq__", other, f"{self_repr} == {other_repr}")
def __ne__(self, other: object) -> Expression: # type: ignore[override]
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__ne__", other, f"{self_repr} != {other_repr}")
def __mod__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__mod__", other, f"{self_repr} % {other_repr}")
def __rmod__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rmod__", other, f"{other_repr} % {self_repr}")
# Logical ops
def __and__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__and__", other, f"{self_repr} & {other_repr}")
def __rand__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rand__", other, f"{other_repr} & {self_repr}")
def __or__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__or__", other, f"{self_repr} | {other_repr}")
def __ror__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__ror__", other, f"{other_repr} | {self_repr}")
def __xor__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__xor__", other, f"{self_repr} ^ {other_repr}")
def __rxor__(self, other: Any) -> Expression:
self_repr, other_repr = self._maybe_wrap_parentheses(other)
return self._with_op("__rxor__", other, f"{other_repr} ^ {self_repr}")
def __invert__(self) -> Expression:
return Expression(
lambda df: ~self._eval_expression(df),
f"~{self._repr_str}",
needs_parenthese=True,
)
def __neg__(self) -> Expression:
if self._needs_parentheses:
repr_str = f"-({self._repr_str})"
else:
repr_str = f"-{self._repr_str}"
return Expression(
lambda df: -self._eval_expression(df),
repr_str,
needs_parenthese=True,
)
def __pos__(self) -> Expression:
if self._needs_parentheses:
repr_str = f"+({self._repr_str})"
else:
repr_str = f"+{self._repr_str}"
return Expression(
lambda df: +self._eval_expression(df),
repr_str,
needs_parenthese=True,
)
def __abs__(self) -> Expression:
return Expression(
lambda df: abs(self._eval_expression(df)),
f"abs({self._repr_str})",
needs_parenthese=True,
)
def __array_ufunc__(
self, ufunc: Callable[..., Any], method: str, *inputs: Any, **kwargs: Any
) -> Expression:
def func(df: DataFrame) -> Any:
parsed_inputs = _parse_args(df, *inputs)
parsed_kwargs = _parse_kwargs(df, *kwargs)
return ufunc(*parsed_inputs, **parsed_kwargs)
args_str = _pretty_print_args_kwargs(*inputs, **kwargs)
repr_str = f"{ufunc.__name__}({args_str})"
return Expression(func, repr_str)
def __getitem__(self, item: Any) -> Expression:
return self._with_op(
"__getitem__", item, f"{self!r}[{item!r}]", needs_parentheses=True
)
def _call_with_func(self, func: Callable, **kwargs: Any) -> Expression:
def wrapped(df: DataFrame) -> Any:
parsed_kwargs = _parse_kwargs(df, **kwargs)
return func(**parsed_kwargs)
args_str = _pretty_print_args_kwargs(**kwargs)
repr_str = func.__name__ + "(" + args_str + ")"
return Expression(wrapped, repr_str)
def __call__(self, *args: Any, **kwargs: Any) -> Expression:
def func(df: DataFrame, *args: Any, **kwargs: Any) -> Any:
parsed_args = _parse_args(df, *args)
parsed_kwargs = _parse_kwargs(df, **kwargs)
return self._eval_expression(df)(*parsed_args, **parsed_kwargs)
args_str = _pretty_print_args_kwargs(*args, **kwargs)
repr_str = f"{self._repr_str}({args_str})"
return Expression(lambda df: func(df, *args, **kwargs), repr_str)
def __getattr__(self, name: str, /) -> Any:
repr_str = f"{self!r}"
if self._needs_parentheses:
repr_str = f"({repr_str})"
repr_str += f".{name}"
return Expression(lambda df: getattr(self._eval_expression(df), name), repr_str)
def __repr__(self) -> str:
return self._repr_str or "Expr(...)"
# Unsupported ops
def __bool__(self) -> NoReturn:
raise TypeError("boolean value of an expression is ambiguous")
def __iter__(self) -> NoReturn:
raise TypeError("Expression objects are not iterable")
def __copy__(self) -> NoReturn:
raise TypeError("Expression objects are not copiable")
def __deepcopy__(self, memo: dict[int, Any] | None) -> NoReturn:
raise TypeError("Expression objects are not copiable")
@set_module("pandas")
def col(col_name: Hashable) -> Expression:
"""
Generate deferred object representing a column of a DataFrame.
Any place which accepts ``lambda df: df[col_name]``, such as
:meth:`DataFrame.assign` or :meth:`DataFrame.loc`, can also accept
``pd.col(col_name)``.
.. versionadded:: 3.0.0
Parameters
----------
col_name : Hashable
Column name.
Returns
-------
`pandas.api.typing.Expression`
A deferred object representing a column of a DataFrame.
See Also
--------
DataFrame.query : Query columns of a dataframe using string expressions.
Examples
--------
You can use `col` in `assign`.
>>> df = pd.DataFrame({"name": ["beluga", "narwhal"], "speed": [100, 110]})
>>> df.assign(name_titlecase=pd.col("name").str.title())
name speed name_titlecase
0 beluga 100 Beluga
1 narwhal 110 Narwhal
You can also use it for filtering.
>>> df.loc[pd.col("speed") > 105]
name speed
1 narwhal 110
"""
if not isinstance(col_name, Hashable):
msg = f"Expected Hashable, got: {type(col_name)}"
raise TypeError(msg)
def func(df: DataFrame) -> Series:
if col_name not in df.columns:
columns_str = str(df.columns.tolist())
max_len = 90
if len(columns_str) > max_len:
columns_str = columns_str[:max_len] + "...]"
msg = (
f"Column '{col_name}' not found in given DataFrame.\n\n"
f"Hint: did you mean one of {columns_str} instead?"
)
raise ValueError(msg)
return df[col_name]
return Expression(func, f"col({col_name!r})")
__all__ = ["Expression", "col"]
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/core/col.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
pandas-dev/pandas:pandas/tests/test_col.py | from collections.abc import Callable
import copy
from datetime import datetime
import operator
import re
import numpy as np
import pytest
from pandas._libs.properties import cache_readonly
import pandas as pd
import pandas._testing as tm
from pandas.api.typing import Expression
from pandas.tests.test_register_accessor import ensure_removed
@pytest.mark.parametrize(
("expr", "expected_values", "expected_str"),
[
(pd.col("a"), [1, 2], "col('a')"),
(pd.col("a") * 2, [2, 4], "col('a') * 2"),
(pd.col("a").sum(), [3, 3], "col('a').sum()"),
(pd.col("a") + 1, [2, 3], "col('a') + 1"),
(1 + pd.col("a"), [2, 3], "1 + col('a')"),
(pd.col("a") - 1, [0, 1], "col('a') - 1"),
(1 - pd.col("a"), [0, -1], "1 - col('a')"),
(pd.col("a") * 1, [1, 2], "col('a') * 1"),
(1 * pd.col("a"), [1, 2], "1 * col('a')"),
(2 ** pd.col("a"), [2, 4], "2 ** col('a')"),
(pd.col("a") ** 2, [1, 4], "col('a') ** 2"),
(pd.col("a") / 1, [1.0, 2.0], "col('a') / 1"),
(1 / pd.col("a"), [1.0, 0.5], "1 / col('a')"),
(pd.col("a") // 1, [1, 2], "col('a') // 1"),
(1 // pd.col("a"), [1, 0], "1 // col('a')"),
(pd.col("a") % 1, [0, 0], "col('a') % 1"),
(1 % pd.col("a"), [0, 1], "1 % col('a')"),
(pd.col("a") > 1, [False, True], "col('a') > 1"),
(pd.col("a") >= 1, [True, True], "col('a') >= 1"),
(pd.col("a") < 1, [False, False], "col('a') < 1"),
(pd.col("a") <= 1, [True, False], "col('a') <= 1"),
(pd.col("a") == 1, [True, False], "col('a') == 1"),
(np.power(pd.col("a"), 2), [1, 4], "power(col('a'), 2)"),
(np.divide(pd.col("a"), pd.col("a")), [1.0, 1.0], "divide(col('a'), col('a'))"),
(
(pd.col("a") + 1) * (pd.col("b") + 2),
[10, 18],
"(col('a') + 1) * (col('b') + 2)",
),
(
(pd.col("a") - 1).astype("bool"),
[False, True],
"(col('a') - 1).astype('bool')",
),
# Unary operators
(-pd.col("a"), [-1, -2], "-col('a')"),
(+pd.col("a"), [1, 2], "+col('a')"),
(-(pd.col("a") + 1), [-2, -3], "-(col('a') + 1)"),
(-pd.col("a") * 2, [-2, -4], "(-col('a')) * 2"),
(abs(pd.col("a")), [1, 2], "abs(col('a'))"),
(abs(pd.col("a") - 2), [1, 0], "abs(col('a') - 2)"),
],
)
def test_col_simple(
expr: Expression, expected_values: list[object], expected_str: str
) -> None:
# https://github.com/pandas-dev/pandas/pull/64267
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
result = df.assign(c=expr)
expected = pd.DataFrame({"a": [1, 2], "b": [3, 4], "c": expected_values})
tm.assert_frame_equal(result, expected)
assert str(expr) == expected_str
@pytest.mark.parametrize(
("op", "expected_values", "expected_str"),
[
(operator.iadd, [3, 4], "col('a') + 2"),
(operator.iand, [0, 2], "col('a') & 2"),
(operator.ifloordiv, [0, 1], "col('a') // 2"),
(operator.imod, [1, 0], "col('a') % 2"),
(operator.imul, [2, 4], "col('a') * 2"),
(operator.ior, [3, 2], "col('a') | 2"),
(operator.ipow, [1, 4], "col('a') ** 2"),
(operator.isub, [-1, 0], "col('a') - 2"),
(operator.itruediv, [0.5, 1.0], "col('a') / 2"),
(operator.ixor, [3, 0], "col('a') ^ 2"),
],
)
def test_inplace_ops(
op: Callable, expected_values: list[object], expected_str: str
) -> None:
# https://github.com/pandas-dev/pandas/pull/64267
df = pd.DataFrame({"a": [1, 2]})
expr = pd.col("a")
expr = op(expr, 2)
result = df.assign(c=expr)
expected = pd.DataFrame({"a": [1, 2], "c": expected_values})
tm.assert_frame_equal(result, expected)
assert str(expr) == expected_str
def test_matmul():
# https://github.com/pandas-dev/pandas/pull/64267
df = pd.DataFrame({"a": [1, 2]})
expr = pd.col("a") @ [3, 4]
result = df.assign(c=expr)
expected = pd.DataFrame({"a": [1, 2], "c": [11, 11]})
tm.assert_frame_equal(result, expected)
assert str(expr) == "col('a') @ [3, 4]"
expr = [3, 4] @ pd.col("a")
result = df.assign(c=expr)
expected = pd.DataFrame({"a": [1, 2], "c": [11, 11]})
tm.assert_frame_equal(result, expected)
assert str(expr) == "[3, 4] @ col('a')"
def test_frame_getitem() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
expr = pd.col("a") == 2
result = df[expr]
expected = df.iloc[[1]]
tm.assert_frame_equal(result, expected)
def test_frame_setitem() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
expr = pd.col("a") == 2
result = df.copy()
result[expr] = 100
expected = pd.DataFrame({"a": [1, 100], "b": [3, 100]})
tm.assert_frame_equal(result, expected)
def test_frame_loc() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
expr = pd.col("a") == 2
result = df.copy()
result.loc[expr, "b"] = 100
expected = pd.DataFrame({"a": [1, 2], "b": [3, 100]})
tm.assert_frame_equal(result, expected)
def test_frame_iloc() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
expr = pd.col("a") == 2
result = df.copy()
result.iloc[expr, 1] = 100
expected = pd.DataFrame({"a": [1, 2], "b": [3, 100]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("expr", "expected_values", "expected_str"),
[
(pd.col("a").dt.year, [2020], "col('a').dt.year"),
(pd.col("a").dt.strftime("%B"), ["January"], "col('a').dt.strftime('%B')"),
(pd.col("b").str.upper(), ["FOO"], "col('b').str.upper()"),
],
)
def test_namespaces(
expr: Expression, expected_values: list[object], expected_str: str
) -> None:
df = pd.DataFrame({"a": [datetime(2020, 1, 1)], "b": ["foo"]})
result = df.assign(c=expr)
expected = pd.DataFrame(
{"a": [datetime(2020, 1, 1)], "b": ["foo"], "c": expected_values}
)
tm.assert_frame_equal(result, expected, check_dtype=False)
assert str(expr) == expected_str
def test_invalid() -> None:
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
with pytest.raises(ValueError, match=r"did you mean one of \['a', 'b'\] instead"):
df.assign(c=pd.col("c").mean())
df = pd.DataFrame({f"col_{i}": [0] for i in range(11)})
msg = (
"did you mean one of "
r"\['col_0', 'col_1', 'col_2', 'col_3', "
"'col_4', 'col_5', 'col_6', 'col_7', "
r"'col_8', 'col_9',\.\.\.\] instead"
)
""
with pytest.raises(ValueError, match=msg):
df.assign(c=pd.col("c").mean())
def test_custom_accessor() -> None:
df = pd.DataFrame({"a": [1, 2, 3]})
class XYZAccessor:
def __init__(self, pandas_obj):
self._obj = pandas_obj
def mean(self):
return self._obj.mean()
with ensure_removed(pd.Series, "xyz"):
pd.api.extensions.register_series_accessor("xyz")(XYZAccessor)
result = df.assign(b=pd.col("a").xyz.mean())
expected = pd.DataFrame({"a": [1, 2, 3], "b": [2.0, 2.0, 2.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("expr", "expected_values", "expected_str"),
[
(
pd.col("a") & pd.col("b"),
[False, False, True, False],
"col('a') & col('b')",
),
(
pd.col("a") & True,
[True, False, True, False],
"col('a') & True",
),
(
pd.col("a") | pd.col("b"),
[True, True, True, True],
"col('a') | col('b')",
),
(
pd.col("a") | False,
[True, False, True, False],
"col('a') | False",
),
(
pd.col("a") ^ pd.col("b"),
[True, True, False, True],
"col('a') ^ col('b')",
),
(
pd.col("a") ^ True,
[False, True, False, True],
"col('a') ^ True",
),
(
~pd.col("a"),
[False, True, False, True],
"~col('a')",
),
],
)
def test_col_logical_ops(
expr: Expression, expected_values: list[bool], expected_str: str
) -> None:
# https://github.com/pandas-dev/pandas/issues/63322
df = pd.DataFrame({"a": [True, False, True, False], "b": [False, True, True, True]})
result = df.assign(c=expr)
expected = pd.DataFrame(
{
"a": [True, False, True, False],
"b": [False, True, True, True],
"c": expected_values,
}
)
tm.assert_frame_equal(result, expected)
assert str(expr) == expected_str
# Test that the expression works with .loc
result = df.loc[expr]
expected = df[expected_values]
tm.assert_frame_equal(result, expected)
def test_expression_getitem() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2, 3]})
expr = pd.col("a")[1]
expected_str = "col('a')[1]"
assert str(expr) == expected_str
result = df.assign(b=expr)
expected = pd.DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]})
tm.assert_frame_equal(result, expected)
def test_property() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2, 3]})
expr = pd.col("a").index
expected_str = "col('a').index"
assert str(expr) == expected_str
result = df.assign(b=expr)
expected = pd.DataFrame({"a": [1, 2, 3], "b": [0, 1, 2]})
tm.assert_frame_equal(result, expected)
def test_cached_property() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
# Ensure test is valid
assert isinstance(pd.Index.dtype, cache_readonly)
df = pd.DataFrame({"a": [1, 2, 3]})
expr = pd.col("a").index.dtype
expected_str = "col('a').index.dtype"
assert str(expr) == expected_str
result = df.assign(b=expr)
expected = pd.DataFrame({"a": [1, 2, 3], "b": np.int64})
tm.assert_frame_equal(result, expected)
def test_qcut() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2, 3]})
expr = pd.qcut(pd.col("a"), 3)
expected_str = "qcut(x=col('a'), q=3, labels=None, retbins=False, precision=3)"
assert str(expr) == expected_str, str(expr)
result = df.assign(b=expr)
expected = pd.DataFrame({"a": [1, 2, 3], "b": pd.qcut(df["a"], 3)})
tm.assert_frame_equal(result, expected)
def test_where() -> None:
# https://github.com/pandas-dev/pandas/pull/63439
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expr = pd.col("a").where(pd.col("b") == 5, 100)
expected_str = "col('a').where(col('b') == 5, 100)"
assert str(expr) == expected_str, str(expr)
result = df.assign(c=expr)
expected = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [100, 2, 100]})
tm.assert_frame_equal(result, expected)
expr = pd.col("a").where(pd.col("b") == 5, pd.col("a") + 1)
expected_str = "col('a').where(col('b') == 5, col('a') + 1)"
assert str(expr) == expected_str, str(expr)
result = df.assign(c=expr)
expected = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [2, 2, 4]})
tm.assert_frame_equal(result, expected)
# Unsupported ops
def test_bool():
with pytest.raises(TypeError, match="boolean value of an expression is ambiguous"):
bool(pd.col("a"))
def test_iter():
with pytest.raises(TypeError, match="Expression objects are not iterable"):
iter(pd.col("a"))
def test_contains():
# Python 3.14 changes the message from "is not iterable" to
# "is not a container or iterable"
with pytest.raises(
TypeError, match="argument of type 'Expression' is not .*iterable"
):
1 in pd.col("a")
def test_copy():
with pytest.raises(TypeError, match="Expression objects are not copiable"):
copy.copy(pd.col("a"))
def test_deepcopy():
with pytest.raises(TypeError, match="Expression objects are not copiable"):
copy.deepcopy(pd.col("a"))
def test_divmod():
msg = re.escape("unsupported operand type(s) for divmod(): 'Expression' and 'int'")
with pytest.raises(TypeError, match=msg):
divmod(pd.col("a"), 2)
def test_len():
with pytest.raises(TypeError, match="object of type 'Expression' has no len()"):
len(pd.col("a"))
def test_round():
msg = "type Expression doesn't define __round__ method"
with pytest.raises(TypeError, match=msg):
round(pd.col("a"), 2)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/test_col.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pandas-dev/pandas:pandas/io/iceberg.py | from typing import (
Any,
)
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import set_module
from pandas import DataFrame
@set_module("pandas")
def read_iceberg(
table_identifier: str,
catalog_name: str | None = None,
*,
catalog_properties: dict[str, Any] | None = None,
columns: list[str] | None = None,
row_filter: str | None = None,
case_sensitive: bool = True,
snapshot_id: int | None = None,
limit: int | None = None,
scan_properties: dict[str, Any] | None = None,
) -> DataFrame:
"""
Read an Apache Iceberg table into a pandas DataFrame.
.. versionadded:: 3.0.0
.. warning::
read_iceberg is experimental and may change without warning.
Parameters
----------
table_identifier : str
Table identifier.
catalog_name : str, optional
The name of the catalog.
catalog_properties : dict of {str: str}, optional
The properties that are used next to the catalog configuration.
columns : list of str, optional
A list of strings representing the column names to return in the output
dataframe.
row_filter : str, optional
A string that describes the desired rows.
case_sensitive : bool, default True
If True column matching is case sensitive.
snapshot_id : int, optional
Snapshot ID to time travel to. By default the table will be scanned as of the
current snapshot ID.
limit : int, optional
An integer representing the number of rows to return in the scan result.
By default all matching rows will be fetched.
scan_properties : dict of {str: obj}, optional
Additional Table properties as a dictionary of string key value pairs to use
for this scan.
Returns
-------
DataFrame
DataFrame based on the Iceberg table.
See Also
--------
read_parquet : Read a Parquet file.
Examples
--------
>>> df = pd.read_iceberg(
... table_identifier="my_table",
... catalog_name="my_catalog",
... catalog_properties={"s3.secret-access-key": "my-secret"},
... row_filter="trip_distance >= 10.0",
... columns=["VendorID", "tpep_pickup_datetime"],
... ) # doctest: +SKIP
"""
pyiceberg_catalog = import_optional_dependency("pyiceberg.catalog")
pyiceberg_expressions = import_optional_dependency("pyiceberg.expressions")
if catalog_properties is None:
catalog_properties = {}
catalog = pyiceberg_catalog.load_catalog(catalog_name, **catalog_properties)
table = catalog.load_table(table_identifier)
if row_filter is None:
row_filter = pyiceberg_expressions.AlwaysTrue()
if columns is None:
selected_fields = ("*",)
else:
selected_fields = tuple(columns) # type: ignore[assignment]
if scan_properties is None:
scan_properties = {}
result = table.scan(
row_filter=row_filter,
selected_fields=selected_fields,
case_sensitive=case_sensitive,
snapshot_id=snapshot_id,
options=scan_properties,
limit=limit,
)
return result.to_pandas()
def to_iceberg(
df: DataFrame,
table_identifier: str,
catalog_name: str | None = None,
*,
catalog_properties: dict[str, Any] | None = None,
location: str | None = None,
append: bool = False,
snapshot_properties: dict[str, str] | None = None,
) -> None:
"""
Write a DataFrame to an Apache Iceberg table.
.. versionadded:: 3.0.0
Parameters
----------
table_identifier : str
Table identifier.
catalog_name : str, optional
The name of the catalog.
catalog_properties : dict of {str: str}, optional
The properties that are used next to the catalog configuration.
location : str, optional
Location for the table.
append : bool, default False
If ``True``, append data to the table, instead of replacing the content.
snapshot_properties : dict of {str: str}, optional
Custom properties to be added to the snapshot summary
See Also
--------
read_iceberg : Read an Apache Iceberg table.
DataFrame.to_parquet : Write a DataFrame in Parquet format.
"""
pa = import_optional_dependency("pyarrow")
pyiceberg_catalog = import_optional_dependency("pyiceberg.catalog")
if catalog_properties is None:
catalog_properties = {}
catalog = pyiceberg_catalog.load_catalog(catalog_name, **catalog_properties)
arrow_table = pa.Table.from_pandas(df)
table = catalog.create_table_if_not_exists(
identifier=table_identifier,
schema=arrow_table.schema,
location=location,
# we could add `partition_spec`, `sort_order` and `properties` in the
# future, but it may not be trivial without exposing PyIceberg objects
)
if snapshot_properties is None:
snapshot_properties = {}
if append:
table.append(arrow_table, snapshot_properties=snapshot_properties)
else:
table.overwrite(arrow_table, snapshot_properties=snapshot_properties)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/io/iceberg.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
pandas-dev/pandas:pandas/tests/io/test_iceberg.py | """
Tests for the Apache Iceberg format.
Tests in this file use a simple Iceberg catalog based on SQLite, with the same
data used for Parquet tests (``pandas/tests/io/data/parquet/simple.parquet``).
"""
import collections
import importlib
import pathlib
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.io.iceberg import read_iceberg
pytestmark = pytest.mark.single_cpu
pyiceberg = pytest.importorskip("pyiceberg")
pyiceberg_catalog = pytest.importorskip("pyiceberg.catalog")
pq = pytest.importorskip("pyarrow.parquet")
Catalog = collections.namedtuple("Catalog", ["name", "uri", "warehouse"])
@pytest.fixture
def catalog(request, tmp_path):
# the catalog stores the full path of data files, so the catalog needs to be
# created dynamically, and not saved in pandas/tests/io/data as other formats
uri = f"sqlite:///{tmp_path}/catalog.sqlite"
warehouse = f"file://{tmp_path}"
catalog_name = request.param if hasattr(request, "param") else None
catalog = pyiceberg_catalog.load_catalog(
catalog_name or "default",
type="sql",
uri=uri,
warehouse=warehouse,
)
catalog.create_namespace("ns")
df = pq.read_table(
pathlib.Path(__file__).parent / "data" / "parquet" / "simple.parquet"
)
table = catalog.create_table("ns.my_table", schema=df.schema)
table.append(df)
if catalog_name is not None:
config_path = pathlib.Path.home() / ".pyiceberg.yaml"
with open(config_path, "w", encoding="utf-8") as f:
f.write(f"""\
catalog:
{catalog_name}:
type: sql
uri: {uri}
warehouse: {warehouse}""")
importlib.reload(pyiceberg_catalog) # needed to reload the config file
yield Catalog(name=catalog_name or "default", uri=uri, warehouse=warehouse)
if catalog_name is not None:
config_path.unlink()
class TestIceberg:
def test_read(self, catalog):
expected = pd.DataFrame(
{
"A": [1, 2, 3],
"B": ["foo", "foo", "foo"],
}
)
result = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("catalog", ["default", "pandas_tests"], indirect=True)
def test_read_by_catalog_name(self, catalog):
expected = pd.DataFrame(
{
"A": [1, 2, 3],
"B": ["foo", "foo", "foo"],
}
)
result = read_iceberg(
"ns.my_table",
catalog_name=catalog.name,
)
tm.assert_frame_equal(result, expected)
def test_read_with_row_filter(self, catalog):
expected = pd.DataFrame(
{
"A": [2, 3],
"B": ["foo", "foo"],
}
)
result = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
row_filter="A > 1",
)
tm.assert_frame_equal(result, expected)
def test_read_with_case_sensitive(self, catalog):
expected = pd.DataFrame(
{
"A": [1, 2, 3],
}
)
result = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
columns=["a"],
case_sensitive=False,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match="^Could not find column"):
read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
columns=["a"],
case_sensitive=True,
)
def test_read_with_limit(self, catalog):
expected = pd.DataFrame(
{
"A": [1, 2],
"B": ["foo", "foo"],
}
)
result = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
limit=2,
)
tm.assert_frame_equal(result, expected)
def test_write(self, catalog):
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": ["foo", "foo", "foo"],
}
)
df.to_iceberg(
"ns.new_table",
catalog_properties={"uri": catalog.uri},
location=catalog.warehouse,
)
result = read_iceberg(
"ns.new_table",
catalog_properties={"uri": catalog.uri},
)
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("catalog", ["default", "pandas_tests"], indirect=True)
def test_write_by_catalog_name(self, catalog):
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": ["foo", "foo", "foo"],
}
)
df.to_iceberg(
"ns.new_table",
catalog_name=catalog.name,
)
result = read_iceberg(
"ns.new_table",
catalog_name=catalog.name,
)
tm.assert_frame_equal(result, df)
def test_write_existing_table_with_append_true(self, catalog):
original = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
)
new = pd.DataFrame(
{
"A": [1, 2, 3],
"B": ["foo", "foo", "foo"],
}
)
expected = pd.concat([original, new], ignore_index=True)
new.to_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
location=catalog.warehouse,
append=True,
)
result = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
)
tm.assert_frame_equal(result, expected)
def test_write_existing_table_with_append_false(self, catalog):
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": ["foo", "foo", "foo"],
}
)
df.to_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
location=catalog.warehouse,
append=False,
)
result = read_iceberg(
"ns.my_table",
catalog_properties={"uri": catalog.uri},
)
tm.assert_frame_equal(result, df)
| {
"repo_id": "pandas-dev/pandas",
"file_path": "pandas/tests/io/test_iceberg.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless/settings/custom.py | import os
from pathlib import Path
from typing import Any
from paperless.settings.parsers import get_choice_from_env
from paperless.settings.parsers import get_int_from_env
from paperless.settings.parsers import parse_dict_from_str
def parse_db_settings(data_dir: Path) -> dict[str, dict[str, Any]]:
"""Parse database settings from environment variables.
Core connection variables (no deprecation):
- PAPERLESS_DBENGINE (sqlite/postgresql/mariadb)
- PAPERLESS_DBHOST, PAPERLESS_DBPORT
- PAPERLESS_DBNAME, PAPERLESS_DBUSER, PAPERLESS_DBPASS
Advanced options can be set via:
- Legacy individual env vars (deprecated in v3.0, removed in v3.2)
- PAPERLESS_DB_OPTIONS (recommended v3+ approach)
Args:
data_dir: The data directory path for SQLite database location.
Returns:
A databases dict suitable for Django DATABASES setting.
"""
try:
engine = get_choice_from_env(
"PAPERLESS_DBENGINE",
{"sqlite", "postgresql", "mariadb"},
default="sqlite",
)
except ValueError:
# MariaDB users already had to set PAPERLESS_DBENGINE, so it was picked up above
# SQLite users didn't need to set anything
engine = "postgresql" if "PAPERLESS_DBHOST" in os.environ else "sqlite"
db_config: dict[str, Any]
base_options: dict[str, Any]
match engine:
case "sqlite":
db_config = {
"ENGINE": "django.db.backends.sqlite3",
"NAME": str((data_dir / "db.sqlite3").resolve()),
}
base_options = {}
case "postgresql":
db_config = {
"ENGINE": "django.db.backends.postgresql",
"HOST": os.getenv("PAPERLESS_DBHOST"),
"NAME": os.getenv("PAPERLESS_DBNAME", "paperless"),
"USER": os.getenv("PAPERLESS_DBUSER", "paperless"),
"PASSWORD": os.getenv("PAPERLESS_DBPASS", "paperless"),
}
base_options = {
"sslmode": os.getenv("PAPERLESS_DBSSLMODE", "prefer"),
"sslrootcert": os.getenv("PAPERLESS_DBSSLROOTCERT"),
"sslcert": os.getenv("PAPERLESS_DBSSLCERT"),
"sslkey": os.getenv("PAPERLESS_DBSSLKEY"),
}
if (pool_size := get_int_from_env("PAPERLESS_DB_POOLSIZE")) is not None:
base_options["pool"] = {
"min_size": 1,
"max_size": pool_size,
}
case "mariadb":
db_config = {
"ENGINE": "django.db.backends.mysql",
"HOST": os.getenv("PAPERLESS_DBHOST"),
"NAME": os.getenv("PAPERLESS_DBNAME", "paperless"),
"USER": os.getenv("PAPERLESS_DBUSER", "paperless"),
"PASSWORD": os.getenv("PAPERLESS_DBPASS", "paperless"),
}
base_options = {
"read_default_file": "/etc/mysql/my.cnf",
"charset": "utf8mb4",
"collation": "utf8mb4_unicode_ci",
"ssl_mode": os.getenv("PAPERLESS_DBSSLMODE", "PREFERRED"),
"ssl": {
"ca": os.getenv("PAPERLESS_DBSSLROOTCERT"),
"cert": os.getenv("PAPERLESS_DBSSLCERT"),
"key": os.getenv("PAPERLESS_DBSSLKEY"),
},
}
case _: # pragma: no cover
raise NotImplementedError(engine)
# Handle port setting for external databases
if (
engine in ("postgresql", "mariadb")
and (port := get_int_from_env("PAPERLESS_DBPORT")) is not None
):
db_config["PORT"] = port
# Handle timeout setting (common across all engines, different key names)
if (timeout := get_int_from_env("PAPERLESS_DB_TIMEOUT")) is not None:
timeout_key = "timeout" if engine == "sqlite" else "connect_timeout"
base_options[timeout_key] = timeout
# Apply PAPERLESS_DB_OPTIONS overrides
db_config["OPTIONS"] = parse_dict_from_str(
os.getenv("PAPERLESS_DB_OPTIONS"),
defaults=base_options,
separator=";",
type_map={
# SQLite options
"timeout": int,
# Postgres/MariaDB options
"connect_timeout": int,
"pool.min_size": int,
"pool.max_size": int,
},
)
return {"default": db_config}
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/settings/custom.py",
"license": "GNU General Public License v3.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless/tests/settings/test_custom_parsers.py | import os
from pathlib import Path
import pytest
from pytest_mock import MockerFixture
from paperless.settings.custom import parse_db_settings
class TestParseDbSettings:
"""Test suite for parse_db_settings function."""
@pytest.mark.parametrize(
("env_vars", "expected_database_settings"),
[
pytest.param(
{},
{
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": None, # Will be replaced with tmp_path
"OPTIONS": {},
},
},
id="default-sqlite",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "sqlite",
"PAPERLESS_DB_OPTIONS": "timeout=30",
},
{
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": None, # Will be replaced with tmp_path
"OPTIONS": {
"timeout": 30,
},
},
},
id="sqlite-with-timeout-override",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "postgresql",
"PAPERLESS_DBHOST": "localhost",
},
{
"default": {
"ENGINE": "django.db.backends.postgresql",
"HOST": "localhost",
"NAME": "paperless",
"USER": "paperless",
"PASSWORD": "paperless",
"OPTIONS": {
"sslmode": "prefer",
"sslrootcert": None,
"sslcert": None,
"sslkey": None,
},
},
},
id="postgresql-defaults",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "postgresql",
"PAPERLESS_DBHOST": "paperless-db-host",
"PAPERLESS_DBPORT": "1111",
"PAPERLESS_DBNAME": "customdb",
"PAPERLESS_DBUSER": "customuser",
"PAPERLESS_DBPASS": "custompass",
"PAPERLESS_DB_OPTIONS": "pool.max_size=50;pool.min_size=2;sslmode=require",
},
{
"default": {
"ENGINE": "django.db.backends.postgresql",
"HOST": "paperless-db-host",
"PORT": 1111,
"NAME": "customdb",
"USER": "customuser",
"PASSWORD": "custompass",
"OPTIONS": {
"sslmode": "require",
"sslrootcert": None,
"sslcert": None,
"sslkey": None,
"pool": {
"min_size": 2,
"max_size": 50,
},
},
},
},
id="postgresql-overrides",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "postgresql",
"PAPERLESS_DBHOST": "pghost",
"PAPERLESS_DB_POOLSIZE": "10",
},
{
"default": {
"ENGINE": "django.db.backends.postgresql",
"HOST": "pghost",
"NAME": "paperless",
"USER": "paperless",
"PASSWORD": "paperless",
"OPTIONS": {
"sslmode": "prefer",
"sslrootcert": None,
"sslcert": None,
"sslkey": None,
"pool": {
"min_size": 1,
"max_size": 10,
},
},
},
},
id="postgresql-legacy-poolsize",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "postgresql",
"PAPERLESS_DBHOST": "pghost",
"PAPERLESS_DBSSLMODE": "require",
"PAPERLESS_DBSSLROOTCERT": "/certs/ca.crt",
"PAPERLESS_DB_TIMEOUT": "30",
},
{
"default": {
"ENGINE": "django.db.backends.postgresql",
"HOST": "pghost",
"NAME": "paperless",
"USER": "paperless",
"PASSWORD": "paperless",
"OPTIONS": {
"sslmode": "require",
"sslrootcert": "/certs/ca.crt",
"sslcert": None,
"sslkey": None,
"connect_timeout": 30,
},
},
},
id="postgresql-legacy-ssl-and-timeout",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "mariadb",
"PAPERLESS_DBHOST": "localhost",
},
{
"default": {
"ENGINE": "django.db.backends.mysql",
"HOST": "localhost",
"NAME": "paperless",
"USER": "paperless",
"PASSWORD": "paperless",
"OPTIONS": {
"read_default_file": "/etc/mysql/my.cnf",
"charset": "utf8mb4",
"collation": "utf8mb4_unicode_ci",
"ssl_mode": "PREFERRED",
"ssl": {
"ca": None,
"cert": None,
"key": None,
},
},
},
},
id="mariadb-defaults",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "mariadb",
"PAPERLESS_DBHOST": "paperless-mariadb-host",
"PAPERLESS_DBPORT": "5555",
"PAPERLESS_DBUSER": "my-cool-user",
"PAPERLESS_DBPASS": "my-secure-password",
"PAPERLESS_DB_OPTIONS": "ssl.ca=/path/to/ca.pem;ssl_mode=REQUIRED",
},
{
"default": {
"ENGINE": "django.db.backends.mysql",
"HOST": "paperless-mariadb-host",
"PORT": 5555,
"NAME": "paperless",
"USER": "my-cool-user",
"PASSWORD": "my-secure-password",
"OPTIONS": {
"read_default_file": "/etc/mysql/my.cnf",
"charset": "utf8mb4",
"collation": "utf8mb4_unicode_ci",
"ssl_mode": "REQUIRED",
"ssl": {
"ca": "/path/to/ca.pem",
"cert": None,
"key": None,
},
},
},
},
id="mariadb-overrides",
),
pytest.param(
{
"PAPERLESS_DBENGINE": "mariadb",
"PAPERLESS_DBHOST": "mariahost",
"PAPERLESS_DBSSLMODE": "REQUIRED",
"PAPERLESS_DBSSLROOTCERT": "/certs/ca.pem",
"PAPERLESS_DBSSLCERT": "/certs/client.pem",
"PAPERLESS_DBSSLKEY": "/certs/client.key",
"PAPERLESS_DB_TIMEOUT": "25",
},
{
"default": {
"ENGINE": "django.db.backends.mysql",
"HOST": "mariahost",
"NAME": "paperless",
"USER": "paperless",
"PASSWORD": "paperless",
"OPTIONS": {
"read_default_file": "/etc/mysql/my.cnf",
"charset": "utf8mb4",
"collation": "utf8mb4_unicode_ci",
"ssl_mode": "REQUIRED",
"ssl": {
"ca": "/certs/ca.pem",
"cert": "/certs/client.pem",
"key": "/certs/client.key",
},
"connect_timeout": 25,
},
},
},
id="mariadb-legacy-ssl-and-timeout",
),
],
)
def test_parse_db_settings(
self,
tmp_path: Path,
mocker: MockerFixture,
env_vars: dict[str, str],
expected_database_settings: dict[str, dict],
) -> None:
"""Test various database configurations with defaults and overrides."""
# Clear environment and set test vars
mocker.patch.dict(os.environ, env_vars, clear=True)
# Update expected paths with actual tmp_path
if (
"default" in expected_database_settings
and expected_database_settings["default"]["NAME"] is None
):
expected_database_settings["default"]["NAME"] = str(
tmp_path / "db.sqlite3",
)
settings = parse_db_settings(tmp_path)
assert settings == expected_database_settings
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/tests/settings/test_custom_parsers.py",
"license": "GNU General Public License v3.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless/tests/settings/test_environment_parsers.py | import os
from pathlib import Path
import pytest
from pytest_mock import MockerFixture
from paperless.settings.parsers import get_choice_from_env
from paperless.settings.parsers import get_int_from_env
from paperless.settings.parsers import parse_dict_from_str
from paperless.settings.parsers import str_to_bool
class TestStringToBool:
@pytest.mark.parametrize(
"true_value",
[
pytest.param("true", id="lowercase_true"),
pytest.param("1", id="digit_1"),
pytest.param("T", id="capital_T"),
pytest.param("y", id="lowercase_y"),
pytest.param("YES", id="uppercase_YES"),
pytest.param(" True ", id="whitespace_true"),
],
)
def test_true_conversion(self, true_value: str):
"""Test that various 'true' strings correctly evaluate to True."""
assert str_to_bool(true_value) is True
@pytest.mark.parametrize(
"false_value",
[
pytest.param("false", id="lowercase_false"),
pytest.param("0", id="digit_0"),
pytest.param("f", id="capital_f"),
pytest.param("N", id="capital_N"),
pytest.param("no", id="lowercase_no"),
pytest.param(" False ", id="whitespace_false"),
],
)
def test_false_conversion(self, false_value: str):
"""Test that various 'false' strings correctly evaluate to False."""
assert str_to_bool(false_value) is False
def test_invalid_conversion(self):
"""Test that an invalid string raises a ValueError."""
with pytest.raises(ValueError, match="Cannot convert 'maybe' to a boolean\\."):
str_to_bool("maybe")
class TestParseDictFromString:
def test_empty_and_none_input(self):
"""Test behavior with None or empty string input."""
assert parse_dict_from_str(None) == {}
assert parse_dict_from_str("") == {}
defaults = {"a": 1}
res = parse_dict_from_str(None, defaults=defaults)
assert res == defaults
# Ensure it returns a copy, not the original object
assert res is not defaults
def test_basic_parsing(self):
"""Test simple key-value parsing without defaults or types."""
env_str = "key1=val1, key2=val2"
expected = {"key1": "val1", "key2": "val2"}
assert parse_dict_from_str(env_str) == expected
def test_with_defaults(self):
"""Test that environment values override defaults correctly."""
defaults = {"host": "localhost", "port": 8000, "user": "default"}
env_str = "port=9090, host=db.example.com"
expected = {"host": "db.example.com", "port": "9090", "user": "default"}
result = parse_dict_from_str(env_str, defaults=defaults)
assert result == expected
def test_type_casting(self):
"""Test successful casting of values to specified types."""
env_str = "port=9090, debug=true, timeout=12.5, user=admin"
type_map = {"port": int, "debug": bool, "timeout": float}
expected = {"port": 9090, "debug": True, "timeout": 12.5, "user": "admin"}
result = parse_dict_from_str(env_str, type_map=type_map)
assert result == expected
def test_type_casting_with_defaults(self):
"""Test casting when values come from both defaults and env string."""
defaults = {"port": 8000, "debug": False, "retries": 3}
env_str = "port=9090, debug=true"
type_map = {"port": int, "debug": bool, "retries": int}
# The 'retries' value comes from defaults and is already an int,
# so it should not be processed by the caster.
expected = {"port": 9090, "debug": True, "retries": 3}
result = parse_dict_from_str(env_str, defaults=defaults, type_map=type_map)
assert result == expected
assert isinstance(result["retries"], int)
def test_path_casting(self, tmp_path: Path):
"""Test successful casting of a string to a resolved pathlib.Path object."""
# Create a dummy file to resolve against
test_file = tmp_path / "test_file.txt"
test_file.touch()
env_str = f"config_path={test_file}"
type_map = {"config_path": Path}
result = parse_dict_from_str(env_str, type_map=type_map)
# The result should be a resolved Path object
assert isinstance(result["config_path"], Path)
assert result["config_path"] == test_file.resolve()
def test_custom_separator(self):
"""Test parsing with a custom separator like a semicolon."""
env_str = "host=db; port=5432; user=test"
expected = {"host": "db", "port": "5432", "user": "test"}
result = parse_dict_from_str(env_str, separator=";")
assert result == expected
def test_edge_cases_in_string(self):
"""Test malformed strings to ensure robustness."""
# Malformed pair 'debug' is skipped, extra comma is ignored
env_str = "key=val,, debug, foo=bar"
expected = {"key": "val", "foo": "bar"}
assert parse_dict_from_str(env_str) == expected
# Value can contain the equals sign
env_str = "url=postgres://user:pass@host:5432/db"
expected = {"url": "postgres://user:pass@host:5432/db"}
assert parse_dict_from_str(env_str) == expected
def test_casting_error_handling(self):
"""Test that a ValueError is raised for invalid casting."""
env_str = "port=not-a-number"
type_map = {"port": int}
with pytest.raises(ValueError) as excinfo:
parse_dict_from_str(env_str, type_map=type_map)
assert "Error casting key 'port'" in str(excinfo.value)
assert "value 'not-a-number'" in str(excinfo.value)
assert "to type 'int'" in str(excinfo.value)
def test_bool_casting_error(self):
"""Test that an invalid boolean string raises a ValueError."""
env_str = "debug=maybe"
type_map = {"debug": bool}
with pytest.raises(ValueError, match="Error casting key 'debug'"):
parse_dict_from_str(env_str, type_map=type_map)
def test_nested_key_parsing_basic(self):
"""Basic nested key parsing using dot-notation."""
env_str = "database.host=db.example.com, database.port=5432, logging.level=INFO"
result = parse_dict_from_str(env_str)
assert result == {
"database": {"host": "db.example.com", "port": "5432"},
"logging": {"level": "INFO"},
}
def test_nested_overrides_defaults_and_deepcopy(self):
"""Nested env keys override defaults and defaults are deep-copied."""
defaults = {"database": {"host": "127.0.0.1", "port": 3306, "user": "default"}}
env_str = "database.host=db.example.com, debug=true"
result = parse_dict_from_str(
env_str,
defaults=defaults,
type_map={"debug": bool},
)
assert result["database"]["host"] == "db.example.com"
# Unchanged default preserved
assert result["database"]["port"] == 3306
assert result["database"]["user"] == "default"
# Default object was deep-copied (no same nested object identity)
assert result is not defaults
assert result["database"] is not defaults["database"]
def test_nested_type_casting(self):
"""Type casting for nested keys (dot-notation) should work."""
env_str = "database.host=db.example.com, database.port=5433, debug=false"
type_map = {"database.port": int, "debug": bool}
result = parse_dict_from_str(env_str, type_map=type_map)
assert result["database"]["host"] == "db.example.com"
assert result["database"]["port"] == 5433
assert isinstance(result["database"]["port"], int)
assert result["debug"] is False
assert isinstance(result["debug"], bool)
def test_nested_casting_error_message(self):
"""Error messages should include the full dotted key name on failure."""
env_str = "database.port=not-a-number"
type_map = {"database.port": int}
with pytest.raises(ValueError) as excinfo:
parse_dict_from_str(env_str, type_map=type_map)
msg = str(excinfo.value)
assert "Error casting key 'database.port'" in msg
assert "value 'not-a-number'" in msg
assert "to type 'int'" in msg
def test_type_map_does_not_recast_non_string_defaults(self):
"""If a default already provides a non-string value, the caster should skip it."""
defaults = {"database": {"port": 3306}}
type_map = {"database.port": int}
result = parse_dict_from_str(None, defaults=defaults, type_map=type_map)
assert result["database"]["port"] == 3306
assert isinstance(result["database"]["port"], int)
class TestGetIntFromEnv:
@pytest.mark.parametrize(
("env_value", "expected"),
[
pytest.param("42", 42, id="positive"),
pytest.param("-10", -10, id="negative"),
pytest.param("0", 0, id="zero"),
pytest.param("999", 999, id="large_positive"),
pytest.param("-999", -999, id="large_negative"),
],
)
def test_existing_env_var_valid_ints(self, mocker, env_value, expected):
"""Test that existing environment variables with valid integers return correct values."""
mocker.patch.dict(os.environ, {"INT_VAR": env_value})
assert get_int_from_env("INT_VAR") == expected
@pytest.mark.parametrize(
("default", "expected"),
[
pytest.param(100, 100, id="positive_default"),
pytest.param(0, 0, id="zero_default"),
pytest.param(-50, -50, id="negative_default"),
pytest.param(None, None, id="none_default"),
],
)
def test_missing_env_var_with_defaults(self, mocker, default, expected):
"""Test that missing environment variables return provided defaults."""
mocker.patch.dict(os.environ, {}, clear=True)
assert get_int_from_env("MISSING_VAR", default=default) == expected
def test_missing_env_var_no_default(self, mocker):
"""Test that missing environment variable with no default returns None."""
mocker.patch.dict(os.environ, {}, clear=True)
assert get_int_from_env("MISSING_VAR") is None
@pytest.mark.parametrize(
"invalid_value",
[
pytest.param("not_a_number", id="text"),
pytest.param("42.5", id="float"),
pytest.param("42a", id="alpha_suffix"),
pytest.param("", id="empty"),
pytest.param(" ", id="whitespace"),
pytest.param("true", id="boolean"),
pytest.param("1.0", id="decimal"),
],
)
def test_invalid_int_values_raise_error(self, mocker, invalid_value):
"""Test that invalid integer values raise ValueError."""
mocker.patch.dict(os.environ, {"INVALID_INT": invalid_value})
with pytest.raises(ValueError):
get_int_from_env("INVALID_INT")
class TestGetEnvChoice:
@pytest.fixture
def valid_choices(self) -> set[str]:
"""Fixture providing a set of valid environment choices."""
return {"development", "staging", "production"}
def test_returns_valid_env_value(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function returns the environment value when it's valid."""
mocker.patch.dict("os.environ", {"TEST_ENV": "development"})
result = get_choice_from_env("TEST_ENV", valid_choices)
assert result == "development"
def test_returns_default_when_env_not_set(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function returns default value when env var is not set."""
mocker.patch.dict("os.environ", {}, clear=True)
result = get_choice_from_env("TEST_ENV", valid_choices, default="staging")
assert result == "staging"
def test_raises_error_when_env_not_set_and_no_default(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function raises ValueError when env var is missing and no default."""
mocker.patch.dict("os.environ", {}, clear=True)
with pytest.raises(ValueError) as exc_info:
get_choice_from_env("TEST_ENV", valid_choices)
assert "Environment variable 'TEST_ENV' is required but not set" in str(
exc_info.value,
)
def test_raises_error_when_env_value_invalid(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function raises ValueError when env value is not in choices."""
mocker.patch.dict("os.environ", {"TEST_ENV": "invalid_value"})
with pytest.raises(ValueError) as exc_info:
get_choice_from_env("TEST_ENV", valid_choices)
error_msg = str(exc_info.value)
assert (
"Environment variable 'TEST_ENV' has invalid value 'invalid_value'"
in error_msg
)
assert "Valid choices are:" in error_msg
assert "development" in error_msg
assert "staging" in error_msg
assert "production" in error_msg
def test_raises_error_when_default_invalid(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function raises ValueError when default value is not in choices."""
mocker.patch.dict("os.environ", {}, clear=True)
with pytest.raises(ValueError) as exc_info:
get_choice_from_env("TEST_ENV", valid_choices, default="invalid_default")
error_msg = str(exc_info.value)
assert (
"Environment variable 'TEST_ENV' has invalid value 'invalid_default'"
in error_msg
)
def test_case_sensitive_validation(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that validation is case sensitive."""
mocker.patch.dict("os.environ", {"TEST_ENV": "DEVELOPMENT"})
with pytest.raises(ValueError):
get_choice_from_env("TEST_ENV", valid_choices)
def test_empty_string_env_value(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test behavior with empty string environment value."""
mocker.patch.dict("os.environ", {"TEST_ENV": ""})
with pytest.raises(ValueError) as exc_info:
get_choice_from_env("TEST_ENV", valid_choices)
assert "has invalid value ''" in str(exc_info.value)
def test_whitespace_env_value(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test behavior with whitespace-only environment value."""
mocker.patch.dict("os.environ", {"TEST_ENV": " development "})
with pytest.raises(ValueError):
get_choice_from_env("TEST_ENV", valid_choices)
def test_single_choice_set(self, mocker: MockerFixture) -> None:
"""Test function works correctly with single choice set."""
single_choice: set[str] = {"production"}
mocker.patch.dict("os.environ", {"TEST_ENV": "production"})
result = get_choice_from_env("TEST_ENV", single_choice)
assert result == "production"
def test_large_choice_set(self, mocker: MockerFixture) -> None:
"""Test function works correctly with large choice set."""
large_choices: set[str] = {f"option_{i}" for i in range(100)}
mocker.patch.dict("os.environ", {"TEST_ENV": "option_50"})
result = get_choice_from_env("TEST_ENV", large_choices)
assert result == "option_50"
def test_different_env_keys(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test function works with different environment variable keys."""
test_cases = [
("DJANGO_ENV", "development"),
("DATABASE_BACKEND", "staging"),
("LOG_LEVEL", "production"),
("APP_MODE", "development"),
]
for env_key, env_value in test_cases:
mocker.patch.dict("os.environ", {env_key: env_value})
result = get_choice_from_env(env_key, valid_choices)
assert result == env_value
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/tests/settings/test_environment_parsers.py",
"license": "GNU General Public License v3.0",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/management/commands/base.py | """
Base command class for Paperless-ngx management commands.
Provides automatic progress bar and multiprocessing support with minimal boilerplate.
"""
from __future__ import annotations
import os
from collections.abc import Iterable
from collections.abc import Sized
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import as_completed
from dataclasses import dataclass
from typing import TYPE_CHECKING
from typing import Any
from typing import ClassVar
from typing import Generic
from typing import TypeVar
from django import db
from django.core.management import CommandError
from django.db.models import QuerySet
from django_rich.management import RichCommand
from rich.console import Console
from rich.progress import BarColumn
from rich.progress import MofNCompleteColumn
from rich.progress import Progress
from rich.progress import SpinnerColumn
from rich.progress import TextColumn
from rich.progress import TimeElapsedColumn
from rich.progress import TimeRemainingColumn
if TYPE_CHECKING:
from collections.abc import Callable
from collections.abc import Generator
from collections.abc import Iterable
from collections.abc import Sequence
from django.core.management import CommandParser
T = TypeVar("T")
R = TypeVar("R")
@dataclass(frozen=True, slots=True)
class ProcessResult(Generic[T, R]):
"""
Result of processing a single item in parallel.
Attributes:
item: The input item that was processed.
result: The return value from the processing function, or None if an error occurred.
error: The exception if processing failed, or None on success.
"""
item: T
result: R | None
error: BaseException | None
@property
def success(self) -> bool:
"""Return True if the item was processed successfully."""
return self.error is None
class PaperlessCommand(RichCommand):
"""
Base command class with automatic progress bar and multiprocessing support.
Features are opt-in via class attributes:
supports_progress_bar: Adds --no-progress-bar argument (default: True)
supports_multiprocessing: Adds --processes argument (default: False)
Example usage:
class Command(PaperlessCommand):
help = "Process all documents"
def handle(self, *args, **options):
documents = Document.objects.all()
for doc in self.track(documents, description="Processing..."):
process_document(doc)
class Command(PaperlessCommand):
help = "Regenerate thumbnails"
supports_multiprocessing = True
def handle(self, *args, **options):
ids = list(Document.objects.values_list("id", flat=True))
for result in self.process_parallel(process_doc, ids):
if result.error:
self.console.print(f"[red]Failed: {result.error}[/red]")
"""
supports_progress_bar: ClassVar[bool] = True
supports_multiprocessing: ClassVar[bool] = False
# Instance attributes set by execute() before handle() runs
no_progress_bar: bool
process_count: int
def add_arguments(self, parser: CommandParser) -> None:
"""Add arguments based on supported features."""
super().add_arguments(parser)
if self.supports_progress_bar:
parser.add_argument(
"--no-progress-bar",
default=False,
action="store_true",
help="Disable the progress bar",
)
if self.supports_multiprocessing:
default_processes = max(1, (os.cpu_count() or 1) // 4)
parser.add_argument(
"--processes",
default=default_processes,
type=int,
help=f"Number of processes to use (default: {default_processes})",
)
def execute(self, *args: Any, **options: Any) -> str | None:
"""
Set up instance state before handle() is called.
This is called by Django's command infrastructure after argument parsing
but before handle(). We use it to set instance attributes from options.
"""
# Set progress bar state
if self.supports_progress_bar:
self.no_progress_bar = options.get("no_progress_bar", False)
else:
self.no_progress_bar = True
# Set multiprocessing state
if self.supports_multiprocessing:
self.process_count = options.get("processes", 1)
if self.process_count < 1:
raise CommandError("--processes must be at least 1")
else:
self.process_count = 1
return super().execute(*args, **options)
def _create_progress(self, description: str) -> Progress:
"""
Create a configured Progress instance.
Progress output is directed to stderr to match the convention that
progress bars are transient UI feedback, not command output. This
mirrors tqdm's default behavior and prevents progress bar rendering
from interfering with stdout-based assertions in tests or piped
command output.
Args:
description: Text to display alongside the progress bar.
Returns:
A Progress instance configured with appropriate columns.
"""
return Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
MofNCompleteColumn(),
TimeElapsedColumn(),
TimeRemainingColumn(),
console=Console(stderr=True),
transient=False,
)
def _get_iterable_length(self, iterable: Iterable[object]) -> int | None:
"""
Attempt to determine the length of an iterable without consuming it.
Tries .count() first (for Django querysets - executes SELECT COUNT(*)),
then falls back to len() for sequences.
Args:
iterable: The iterable to measure.
Returns:
The length if determinable, None otherwise.
"""
if isinstance(iterable, QuerySet):
return iterable.count()
if isinstance(iterable, Sized):
return len(iterable)
return None
def track(
self,
iterable: Iterable[T],
*,
description: str = "Processing...",
total: int | None = None,
) -> Generator[T, None, None]:
"""
Iterate over items with an optional progress bar.
Respects --no-progress-bar flag. When disabled, simply yields items
without any progress display.
Args:
iterable: The items to iterate over.
description: Text to display alongside the progress bar.
total: Total number of items. If None, attempts to determine
automatically via .count() (for querysets) or len().
Yields:
Items from the iterable.
Example:
for doc in self.track(documents, description="Renaming..."):
process(doc)
"""
if self.no_progress_bar:
yield from iterable
return
# Attempt to determine total if not provided
if total is None:
total = self._get_iterable_length(iterable)
with self._create_progress(description) as progress:
task_id = progress.add_task(description, total=total)
for item in iterable:
yield item
progress.advance(task_id)
def process_parallel(
self,
fn: Callable[[T], R],
items: Sequence[T],
*,
description: str = "Processing...",
) -> Generator[ProcessResult[T, R], None, None]:
"""
Process items in parallel with progress tracking.
When --processes=1, runs sequentially in the main process without
spawning subprocesses. This is critical for testing, as multiprocessing
breaks fixtures, mocks, and database transactions.
When --processes > 1, uses ProcessPoolExecutor and automatically closes
database connections before spawning workers (required for PostgreSQL).
Args:
fn: Function to apply to each item. Must be picklable for parallel
execution (i.e., defined at module level, not a lambda or closure).
items: Sequence of items to process.
description: Text to display alongside the progress bar.
Yields:
ProcessResult for each item, containing the item, result, and any error.
Example:
def regenerate_thumbnail(doc_id: int) -> Path:
...
for result in self.process_parallel(regenerate_thumbnail, doc_ids):
if result.error:
self.console.print(f"[red]Failed {result.item}[/red]")
"""
total = len(items)
if self.process_count == 1:
# Sequential execution in main process - critical for testing
yield from self._process_sequential(fn, items, description, total)
else:
# Parallel execution with ProcessPoolExecutor
yield from self._process_parallel(fn, items, description, total)
def _process_sequential(
self,
fn: Callable[[T], R],
items: Sequence[T],
description: str,
total: int,
) -> Generator[ProcessResult[T, R], None, None]:
"""Process items sequentially in the main process."""
for item in self.track(items, description=description, total=total):
try:
result = fn(item)
yield ProcessResult(item=item, result=result, error=None)
except Exception as e:
yield ProcessResult(item=item, result=None, error=e)
def _process_parallel(
self,
fn: Callable[[T], R],
items: Sequence[T],
description: str,
total: int,
) -> Generator[ProcessResult[T, R], None, None]:
"""Process items in parallel using ProcessPoolExecutor."""
# Close database connections before forking - required for PostgreSQL
db.connections.close_all()
with self._create_progress(description) as progress:
task_id = progress.add_task(description, total=total)
with ProcessPoolExecutor(max_workers=self.process_count) as executor:
# Submit all tasks and map futures back to items
future_to_item = {executor.submit(fn, item): item for item in items}
# Yield results as they complete
for future in as_completed(future_to_item):
item = future_to_item[future]
try:
result = future.result()
yield ProcessResult(item=item, result=result, error=None)
except Exception as e:
yield ProcessResult(item=item, result=None, error=e)
finally:
progress.advance(task_id)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/management/commands/base.py",
"license": "GNU General Public License v3.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/documents/tests/management/test_management_base_cmd.py | """Tests for PaperlessCommand base class."""
from __future__ import annotations
import io
from typing import TYPE_CHECKING
import pytest
from django.core.management import CommandError
from django.db.models import QuerySet
from rich.console import Console
from documents.management.commands.base import PaperlessCommand
from documents.management.commands.base import ProcessResult
if TYPE_CHECKING:
from pytest_mock import MockerFixture
# --- Test Commands ---
# These simulate real command implementations for testing
class SimpleCommand(PaperlessCommand):
"""Command with default settings (progress bar, no multiprocessing)."""
help = "Simple test command"
def handle(self, *args, **options):
items = list(range(5))
results = []
for item in self.track(items, description="Processing..."):
results.append(item * 2)
self.stdout.write(f"Results: {results}")
class NoProgressBarCommand(PaperlessCommand):
"""Command with progress bar disabled."""
help = "No progress bar command"
supports_progress_bar = False
def handle(self, *args, **options):
items = list(range(3))
for _ in self.track(items):
# We don't need to actually work
pass
self.stdout.write("Done")
class MultiprocessCommand(PaperlessCommand):
"""Command with multiprocessing support."""
help = "Multiprocess test command"
supports_multiprocessing = True
def handle(self, *args, **options):
items = list(range(5))
results = []
for result in self.process_parallel(
_double_value,
items,
description="Processing...",
):
results.append(result)
successes = sum(1 for r in results if r.success)
self.stdout.write(f"Successes: {successes}")
# --- Helper Functions for Multiprocessing ---
# Must be at module level to be picklable
def _double_value(x: int) -> int:
"""Double the input value."""
return x * 2
def _divide_ten_by(x: int) -> float:
"""Divide 10 by x. Raises ZeroDivisionError if x is 0."""
return 10 / x
# --- Fixtures ---
@pytest.fixture
def console() -> Console:
"""Create a non-interactive console for testing."""
return Console(force_terminal=False, force_interactive=False)
@pytest.fixture
def simple_command(console: Console) -> SimpleCommand:
"""Create a SimpleCommand instance configured for testing."""
command = SimpleCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
command.console = console
command.no_progress_bar = True
command.process_count = 1
return command
@pytest.fixture
def multiprocess_command(console: Console) -> MultiprocessCommand:
"""Create a MultiprocessCommand instance configured for testing."""
command = MultiprocessCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
command.console = console
command.no_progress_bar = True
command.process_count = 1
return command
@pytest.fixture
def mock_queryset():
"""
Create a mock Django QuerySet that tracks method calls.
This verifies we use .count() instead of len() for querysets.
"""
class MockQuerySet(QuerySet):
def __init__(self, items: list):
self._items = items
self.count_called = False
def count(self) -> int:
self.count_called = True
return len(self._items)
def __iter__(self):
return iter(self._items)
def __len__(self):
raise AssertionError("len() should not be called on querysets")
return MockQuerySet
# --- Test Classes ---
@pytest.mark.management
class TestProcessResult:
"""Tests for the ProcessResult dataclass."""
def test_success_result(self):
result = ProcessResult(item=1, result=2, error=None)
assert result.item == 1
assert result.result == 2
assert result.error is None
assert result.success is True
def test_error_result(self):
error = ValueError("test error")
result = ProcessResult(item=1, result=None, error=error)
assert result.item == 1
assert result.result is None
assert result.error is error
assert result.success is False
@pytest.mark.management
class TestPaperlessCommandArguments:
"""Tests for argument parsing behavior."""
def test_progress_bar_argument_added_by_default(self):
command = SimpleCommand()
parser = command.create_parser("manage.py", "simple")
options = parser.parse_args(["--no-progress-bar"])
assert options.no_progress_bar is True
options = parser.parse_args([])
assert options.no_progress_bar is False
def test_progress_bar_argument_not_added_when_disabled(self):
command = NoProgressBarCommand()
parser = command.create_parser("manage.py", "noprogress")
options = parser.parse_args([])
assert not hasattr(options, "no_progress_bar")
def test_processes_argument_added_when_multiprocessing_enabled(self):
command = MultiprocessCommand()
parser = command.create_parser("manage.py", "multiprocess")
options = parser.parse_args(["--processes", "4"])
assert options.processes == 4
options = parser.parse_args([])
assert options.processes >= 1
def test_processes_argument_not_added_when_multiprocessing_disabled(self):
command = SimpleCommand()
parser = command.create_parser("manage.py", "simple")
options = parser.parse_args([])
assert not hasattr(options, "processes")
@pytest.mark.management
class TestPaperlessCommandExecute:
"""Tests for the execute() setup behavior."""
@pytest.fixture
def base_options(self) -> dict:
"""Base options required for execute()."""
return {
"verbosity": 1,
"no_color": True,
"force_color": False,
"skip_checks": True,
}
@pytest.mark.parametrize(
("no_progress_bar_flag", "expected"),
[
pytest.param(False, False, id="progress-bar-enabled"),
pytest.param(True, True, id="progress-bar-disabled"),
],
)
def test_no_progress_bar_state_set(
self,
base_options: dict,
*,
no_progress_bar_flag: bool,
expected: bool,
):
command = SimpleCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
options = {**base_options, "no_progress_bar": no_progress_bar_flag}
command.execute(**options)
assert command.no_progress_bar is expected
def test_no_progress_bar_always_true_when_not_supported(self, base_options: dict):
command = NoProgressBarCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
command.execute(**base_options)
assert command.no_progress_bar is True
@pytest.mark.parametrize(
("processes", "expected"),
[
pytest.param(1, 1, id="single-process"),
pytest.param(4, 4, id="four-processes"),
],
)
def test_process_count_set(
self,
base_options: dict,
processes: int,
expected: int,
):
command = MultiprocessCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
options = {**base_options, "processes": processes, "no_progress_bar": True}
command.execute(**options)
assert command.process_count == expected
@pytest.mark.parametrize(
"invalid_count",
[
pytest.param(0, id="zero"),
pytest.param(-1, id="negative"),
],
)
def test_process_count_validation_rejects_invalid(
self,
base_options: dict,
invalid_count: int,
):
command = MultiprocessCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
options = {**base_options, "processes": invalid_count, "no_progress_bar": True}
with pytest.raises(CommandError, match="--processes must be at least 1"):
command.execute(**options)
def test_process_count_defaults_to_one_when_not_supported(self, base_options: dict):
command = SimpleCommand()
command.stdout = io.StringIO()
command.stderr = io.StringIO()
options = {**base_options, "no_progress_bar": True}
command.execute(**options)
assert command.process_count == 1
@pytest.mark.management
class TestGetIterableLength:
"""Tests for the _get_iterable_length() method."""
def test_uses_count_for_querysets(
self,
simple_command: SimpleCommand,
mock_queryset,
):
"""Should call .count() on Django querysets rather than len()."""
queryset = mock_queryset([1, 2, 3, 4, 5])
result = simple_command._get_iterable_length(queryset)
assert result == 5
assert queryset.count_called is True
def test_uses_len_for_sized(self, simple_command: SimpleCommand):
"""Should use len() for sequences and other Sized types."""
result = simple_command._get_iterable_length([1, 2, 3, 4])
assert result == 4
def test_returns_none_for_unsized_iterables(self, simple_command: SimpleCommand):
"""Should return None for generators and other iterables without len()."""
result = simple_command._get_iterable_length(x for x in [1, 2, 3])
assert result is None
@pytest.mark.management
class TestTrack:
"""Tests for the track() method."""
def test_with_progress_bar_disabled(self, simple_command: SimpleCommand):
simple_command.no_progress_bar = True
items = ["a", "b", "c"]
result = list(simple_command.track(items, description="Test..."))
assert result == items
def test_with_progress_bar_enabled(self, simple_command: SimpleCommand):
simple_command.no_progress_bar = False
items = [1, 2, 3]
result = list(simple_command.track(items, description="Processing..."))
assert result == items
def test_with_explicit_total(self, simple_command: SimpleCommand):
simple_command.no_progress_bar = False
def gen():
yield from [1, 2, 3]
result = list(simple_command.track(gen(), total=3))
assert result == [1, 2, 3]
def test_with_generator_no_total(self, simple_command: SimpleCommand):
def gen():
yield from [1, 2, 3]
result = list(simple_command.track(gen()))
assert result == [1, 2, 3]
def test_empty_iterable(self, simple_command: SimpleCommand):
result = list(simple_command.track([]))
assert result == []
def test_uses_queryset_count(
self,
simple_command: SimpleCommand,
mock_queryset,
mocker: MockerFixture,
):
"""Verify track() uses .count() for querysets."""
simple_command.no_progress_bar = False
queryset = mock_queryset([1, 2, 3])
spy = mocker.spy(simple_command, "_get_iterable_length")
result = list(simple_command.track(queryset))
assert result == [1, 2, 3]
spy.assert_called_once_with(queryset)
assert queryset.count_called is True
@pytest.mark.management
class TestProcessParallel:
"""Tests for the process_parallel() method."""
def test_sequential_processing_single_process(
self,
multiprocess_command: MultiprocessCommand,
):
multiprocess_command.process_count = 1
items = [1, 2, 3, 4, 5]
results = list(multiprocess_command.process_parallel(_double_value, items))
assert len(results) == 5
assert all(r.success for r in results)
result_map = {r.item: r.result for r in results}
assert result_map == {1: 2, 2: 4, 3: 6, 4: 8, 5: 10}
def test_sequential_processing_handles_errors(
self,
multiprocess_command: MultiprocessCommand,
):
multiprocess_command.process_count = 1
items = [1, 2, 0, 4] # 0 causes ZeroDivisionError
results = list(multiprocess_command.process_parallel(_divide_ten_by, items))
assert len(results) == 4
successes = [r for r in results if r.success]
failures = [r for r in results if not r.success]
assert len(successes) == 3
assert len(failures) == 1
assert failures[0].item == 0
assert isinstance(failures[0].error, ZeroDivisionError)
def test_parallel_closes_db_connections(
self,
multiprocess_command: MultiprocessCommand,
mocker: MockerFixture,
):
multiprocess_command.process_count = 2
items = [1, 2, 3]
mock_connections = mocker.patch(
"documents.management.commands.base.db.connections",
)
results = list(multiprocess_command.process_parallel(_double_value, items))
mock_connections.close_all.assert_called_once()
assert len(results) == 3
def test_parallel_processing_handles_errors(
self,
multiprocess_command: MultiprocessCommand,
mocker: MockerFixture,
):
multiprocess_command.process_count = 2
items = [1, 2, 0, 4]
mocker.patch("documents.management.commands.base.db.connections")
results = list(multiprocess_command.process_parallel(_divide_ten_by, items))
failures = [r for r in results if not r.success]
assert len(failures) == 1
assert failures[0].item == 0
def test_empty_items(self, multiprocess_command: MultiprocessCommand):
results = list(multiprocess_command.process_parallel(_double_value, []))
assert results == []
def test_result_contains_original_item(
self,
multiprocess_command: MultiprocessCommand,
):
items = [10, 20, 30]
results = list(multiprocess_command.process_parallel(_double_value, items))
for result in results:
assert result.item in items
assert result.result == result.item * 2
def test_sequential_path_used_for_single_process(
self,
multiprocess_command: MultiprocessCommand,
mocker: MockerFixture,
):
"""Verify single process uses sequential path (important for testing)."""
multiprocess_command.process_count = 1
spy_sequential = mocker.spy(multiprocess_command, "_process_sequential")
spy_parallel = mocker.spy(multiprocess_command, "_process_parallel")
list(multiprocess_command.process_parallel(_double_value, [1, 2, 3]))
spy_sequential.assert_called_once()
spy_parallel.assert_not_called()
def test_parallel_path_used_for_multiple_processes(
self,
multiprocess_command: MultiprocessCommand,
mocker: MockerFixture,
):
"""Verify multiple processes uses parallel path."""
multiprocess_command.process_count = 2
mocker.patch("documents.management.commands.base.db.connections")
spy_sequential = mocker.spy(multiprocess_command, "_process_sequential")
spy_parallel = mocker.spy(multiprocess_command, "_process_parallel")
list(multiprocess_command.process_parallel(_double_value, [1, 2, 3]))
spy_parallel.assert_called_once()
spy_sequential.assert_not_called()
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/management/test_management_base_cmd.py",
"license": "GNU General Public License v3.0",
"lines": 377,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/tests/test_api_document_versions.py | from __future__ import annotations
from typing import TYPE_CHECKING
from unittest import TestCase
from unittest import mock
from auditlog.models import LogEntry # type: ignore[import-untyped]
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.core.files.uploadedfile import SimpleUploadedFile
from rest_framework import status
from rest_framework.test import APITestCase
from documents.data_models import DocumentSource
from documents.filters import EffectiveContentFilter
from documents.filters import TitleContentFilter
from documents.models import Document
from documents.tests.utils import DirectoriesMixin
if TYPE_CHECKING:
from pathlib import Path
class TestDocumentVersioningApi(DirectoriesMixin, APITestCase):
def setUp(self) -> None:
super().setUp()
self.user = User.objects.create_superuser(username="temp_admin")
self.client.force_authenticate(user=self.user)
def _make_pdf_upload(self, name: str = "version.pdf") -> SimpleUploadedFile:
return SimpleUploadedFile(
name,
b"%PDF-1.4\n1 0 obj\n<<>>\nendobj\n%%EOF",
content_type="application/pdf",
)
def _write_file(self, path: Path, content: bytes = b"data") -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(content)
def _create_pdf(
self,
*,
title: str,
checksum: str,
root_document: Document | None = None,
) -> Document:
doc = Document.objects.create(
title=title,
checksum=checksum,
mime_type="application/pdf",
root_document=root_document,
)
self._write_file(doc.source_path, b"pdf")
self._write_file(doc.thumbnail_path, b"thumb")
return doc
def test_root_endpoint_returns_root_for_version_and_root(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
resp_root = self.client.get(f"/api/documents/{root.id}/root/")
self.assertEqual(resp_root.status_code, status.HTTP_200_OK)
self.assertEqual(resp_root.data["root_id"], root.id)
resp_version = self.client.get(f"/api/documents/{version.id}/root/")
self.assertEqual(resp_version.status_code, status.HTTP_200_OK)
self.assertEqual(resp_version.data["root_id"], root.id)
def test_root_endpoint_returns_404_for_missing_document(self) -> None:
resp = self.client.get("/api/documents/9999/root/")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_root_endpoint_returns_403_when_user_lacks_permission(self) -> None:
owner = User.objects.create_user(username="owner")
viewer = User.objects.create_user(username="viewer")
viewer.user_permissions.add(
Permission.objects.get(codename="view_document"),
)
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
owner=owner,
)
self.client.force_authenticate(user=viewer)
resp = self.client.get(f"/api/documents/{root.id}/root/")
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_version_disallows_deleting_root(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
with mock.patch("documents.index.remove_document_from_index"):
resp = self.client.delete(f"/api/documents/{root.id}/versions/{root.id}/")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Document.objects.filter(id=root.id).exists())
def test_delete_version_deletes_version_and_returns_current_version(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
content="root-content",
)
v1 = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
content="v1-content",
)
v2 = Document.objects.create(
title="v2",
checksum="v2",
mime_type="application/pdf",
root_document=root,
content="v2-content",
)
with (
mock.patch("documents.index.remove_document_from_index"),
mock.patch("documents.index.add_or_update_document"),
):
resp = self.client.delete(f"/api/documents/{root.id}/versions/{v2.id}/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertFalse(Document.objects.filter(id=v2.id).exists())
self.assertEqual(resp.data["current_version_id"], v1.id)
root.refresh_from_db()
self.assertEqual(root.content, "root-content")
with (
mock.patch("documents.index.remove_document_from_index"),
mock.patch("documents.index.add_or_update_document"),
):
resp = self.client.delete(f"/api/documents/{root.id}/versions/{v1.id}/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertFalse(Document.objects.filter(id=v1.id).exists())
self.assertEqual(resp.data["current_version_id"], root.id)
root.refresh_from_db()
self.assertEqual(root.content, "root-content")
def test_delete_version_writes_audit_log_entry(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
version_id = version.id
with (
mock.patch("documents.index.remove_document_from_index"),
mock.patch("documents.index.add_or_update_document"),
):
resp = self.client.delete(
f"/api/documents/{root.id}/versions/{version_id}/",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# Audit log entry is created against the root document.
entry = (
LogEntry.objects.filter(
content_type=ContentType.objects.get_for_model(Document),
object_id=root.id,
)
.order_by("-timestamp")
.first()
)
self.assertIsNotNone(entry)
assert entry is not None
self.assertIsNotNone(entry.actor)
assert entry.actor is not None
self.assertEqual(entry.actor.id, self.user.id)
self.assertEqual(entry.action, LogEntry.Action.UPDATE)
self.assertEqual(
entry.changes,
{"Version Deleted": ["None", version_id]},
)
additional_data = entry.additional_data or {}
self.assertEqual(additional_data.get("version_id"), version_id)
def test_delete_version_returns_404_when_version_not_related(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
other_root = Document.objects.create(
title="other",
checksum="other",
mime_type="application/pdf",
)
other_version = Document.objects.create(
title="other-v1",
checksum="other-v1",
mime_type="application/pdf",
root_document=other_root,
)
with mock.patch("documents.index.remove_document_from_index"):
resp = self.client.delete(
f"/api/documents/{root.id}/versions/{other_version.id}/",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_version_accepts_version_id_as_root_parameter(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
with (
mock.patch("documents.index.remove_document_from_index"),
mock.patch("documents.index.add_or_update_document"),
):
resp = self.client.delete(
f"/api/documents/{version.id}/versions/{version.id}/",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertFalse(Document.objects.filter(id=version.id).exists())
self.assertEqual(resp.data["current_version_id"], root.id)
def test_delete_version_returns_404_when_root_missing(self) -> None:
resp = self.client.delete("/api/documents/9999/versions/123/")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_version_reindexes_root_document(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
with (
mock.patch("documents.index.remove_document_from_index") as remove_index,
mock.patch("documents.index.add_or_update_document") as add_or_update,
):
resp = self.client.delete(
f"/api/documents/{root.id}/versions/{version.id}/",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
remove_index.assert_called_once_with(version)
add_or_update.assert_called_once()
self.assertEqual(add_or_update.call_args[0][0].id, root.id)
def test_delete_version_returns_403_without_permission(self) -> None:
owner = User.objects.create_user(username="owner")
other = User.objects.create_user(username="other")
other.user_permissions.add(
Permission.objects.get(codename="delete_document"),
)
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
owner=owner,
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
self.client.force_authenticate(user=other)
resp = self.client.delete(
f"/api/documents/{root.id}/versions/{version.id}/",
)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_version_returns_404_when_version_missing(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
resp = self.client.delete(f"/api/documents/{root.id}/versions/9999/")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_update_version_label_updates_and_trims(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
version_label="old",
)
resp = self.client.patch(
f"/api/documents/{root.id}/versions/{version.id}/",
{"version_label": " Label 1 "},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
version.refresh_from_db()
self.assertEqual(version.version_label, "Label 1")
self.assertEqual(resp.data["version_label"], "Label 1")
self.assertEqual(resp.data["id"], version.id)
self.assertFalse(resp.data["is_root"])
def test_update_version_label_clears_on_blank(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
version_label="Root Label",
)
resp = self.client.patch(
f"/api/documents/{root.id}/versions/{root.id}/",
{"version_label": " "},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
root.refresh_from_db()
self.assertIsNone(root.version_label)
self.assertIsNone(resp.data["version_label"])
self.assertTrue(resp.data["is_root"])
def test_update_version_label_returns_403_without_permission(self) -> None:
owner = User.objects.create_user(username="owner")
other = User.objects.create_user(username="other")
other.user_permissions.add(
Permission.objects.get(codename="change_document"),
)
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
owner=owner,
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
self.client.force_authenticate(user=other)
resp = self.client.patch(
f"/api/documents/{root.id}/versions/{version.id}/",
{"version_label": "Blocked"},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_update_version_label_returns_404_for_unrelated_version(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
other_root = Document.objects.create(
title="other",
checksum="other",
mime_type="application/pdf",
)
other_version = Document.objects.create(
title="other-v1",
checksum="other-v1",
mime_type="application/pdf",
root_document=other_root,
)
resp = self.client.patch(
f"/api/documents/{root.id}/versions/{other_version.id}/",
{"version_label": "Nope"},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_download_version_param_errors(self) -> None:
root = self._create_pdf(title="root", checksum="root")
resp = self.client.get(
f"/api/documents/{root.id}/download/?version=not-a-number",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.get(f"/api/documents/{root.id}/download/?version=9999")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
other_root = self._create_pdf(title="other", checksum="other")
other_version = self._create_pdf(
title="other-v1",
checksum="other-v1",
root_document=other_root,
)
resp = self.client.get(
f"/api/documents/{root.id}/download/?version={other_version.id}",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_download_preview_thumb_with_version_param(self) -> None:
root = self._create_pdf(title="root", checksum="root")
version = self._create_pdf(
title="v1",
checksum="v1",
root_document=root,
)
self._write_file(version.source_path, b"version")
self._write_file(version.thumbnail_path, b"thumb")
resp = self.client.get(
f"/api/documents/{root.id}/download/?version={version.id}",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.content, b"version")
resp = self.client.get(
f"/api/documents/{root.id}/preview/?version={version.id}",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.content, b"version")
resp = self.client.get(
f"/api/documents/{root.id}/thumb/?version={version.id}",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.content, b"thumb")
def test_metadata_version_param_uses_version(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
with mock.patch("documents.views.DocumentViewSet.get_metadata") as metadata:
metadata.return_value = []
resp = self.client.get(
f"/api/documents/{root.id}/metadata/?version={version.id}",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertTrue(metadata.called)
def test_metadata_version_param_errors(self) -> None:
root = self._create_pdf(title="root", checksum="root")
resp = self.client.get(
f"/api/documents/{root.id}/metadata/?version=not-a-number",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
resp = self.client.get(f"/api/documents/{root.id}/metadata/?version=9999")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
other_root = self._create_pdf(title="other", checksum="other")
other_version = self._create_pdf(
title="other-v1",
checksum="other-v1",
root_document=other_root,
)
resp = self.client.get(
f"/api/documents/{root.id}/metadata/?version={other_version.id}",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_metadata_returns_403_when_user_lacks_permission(self) -> None:
owner = User.objects.create_user(username="owner")
other = User.objects.create_user(username="other")
other.user_permissions.add(
Permission.objects.get(codename="view_document"),
)
doc = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
owner=owner,
)
self.client.force_authenticate(user=other)
resp = self.client.get(f"/api/documents/{doc.id}/metadata/")
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_update_version_enqueues_consume_with_overrides(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
upload = self._make_pdf_upload()
async_task = mock.Mock()
async_task.id = "task-123"
with mock.patch("documents.views.consume_file") as consume_mock:
consume_mock.delay.return_value = async_task
resp = self.client.post(
f"/api/documents/{root.id}/update_version/",
{"document": upload, "version_label": " New Version "},
format="multipart",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data, "task-123")
consume_mock.delay.assert_called_once()
input_doc, overrides = consume_mock.delay.call_args[0]
self.assertEqual(input_doc.root_document_id, root.id)
self.assertEqual(input_doc.source, DocumentSource.ApiUpload)
self.assertEqual(overrides.version_label, "New Version")
self.assertEqual(overrides.actor_id, self.user.id)
def test_update_version_with_version_pk_normalizes_to_root(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
version = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
)
upload = self._make_pdf_upload()
async_task = mock.Mock()
async_task.id = "task-123"
with mock.patch("documents.views.consume_file") as consume_mock:
consume_mock.delay.return_value = async_task
resp = self.client.post(
f"/api/documents/{version.id}/update_version/",
{"document": upload, "version_label": " New Version "},
format="multipart",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data, "task-123")
consume_mock.delay.assert_called_once()
input_doc, overrides = consume_mock.delay.call_args[0]
self.assertEqual(input_doc.root_document_id, root.id)
self.assertEqual(overrides.version_label, "New Version")
self.assertEqual(overrides.actor_id, self.user.id)
def test_update_version_returns_500_on_consume_failure(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
upload = self._make_pdf_upload()
with mock.patch("documents.views.consume_file") as consume_mock:
consume_mock.delay.side_effect = Exception("boom")
resp = self.client.post(
f"/api/documents/{root.id}/update_version/",
{"document": upload},
format="multipart",
)
self.assertEqual(resp.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_update_version_returns_403_without_permission(self) -> None:
owner = User.objects.create_user(username="owner")
other = User.objects.create_user(username="other")
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
owner=owner,
)
self.client.force_authenticate(user=other)
resp = self.client.post(
f"/api/documents/{root.id}/update_version/",
{"document": self._make_pdf_upload()},
format="multipart",
)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
def test_update_version_returns_404_for_missing_document(self) -> None:
resp = self.client.post(
"/api/documents/9999/update_version/",
{"document": self._make_pdf_upload()},
format="multipart",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_update_version_requires_document(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
resp = self.client.post(
f"/api/documents/{root.id}/update_version/",
{"version_label": "label"},
format="multipart",
)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_content_updates_latest_version_content(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
content="root-content",
)
v1 = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
content="v1-content",
)
v2 = Document.objects.create(
title="v2",
checksum="v2",
mime_type="application/pdf",
root_document=root,
content="v2-content",
)
resp = self.client.patch(
f"/api/documents/{root.id}/",
{"content": "edited-content"},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["content"], "edited-content")
root.refresh_from_db()
v1.refresh_from_db()
v2.refresh_from_db()
self.assertEqual(v2.content, "edited-content")
self.assertEqual(root.content, "root-content")
self.assertEqual(v1.content, "v1-content")
def test_patch_content_updates_selected_version_content(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
content="root-content",
)
v1 = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
content="v1-content",
)
v2 = Document.objects.create(
title="v2",
checksum="v2",
mime_type="application/pdf",
root_document=root,
content="v2-content",
)
resp = self.client.patch(
f"/api/documents/{root.id}/?version={v1.id}",
{"content": "edited-v1"},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["content"], "edited-v1")
root.refresh_from_db()
v1.refresh_from_db()
v2.refresh_from_db()
self.assertEqual(v1.content, "edited-v1")
self.assertEqual(v2.content, "v2-content")
self.assertEqual(root.content, "root-content")
def test_retrieve_returns_latest_version_content(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
content="root-content",
)
Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
content="v1-content",
)
resp = self.client.get(f"/api/documents/{root.id}/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["content"], "v1-content")
def test_retrieve_with_version_param_returns_selected_version_content(self) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
content="root-content",
)
v1 = Document.objects.create(
title="v1",
checksum="v1",
mime_type="application/pdf",
root_document=root,
content="v1-content",
)
resp = self.client.get(f"/api/documents/{root.id}/?version={v1.id}")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["content"], "v1-content")
class TestVersionAwareFilters(TestCase):
def test_title_content_filter_falls_back_to_content(self) -> None:
queryset = mock.Mock()
fallback_queryset = mock.Mock()
queryset.filter.side_effect = [FieldError("missing field"), fallback_queryset]
result = TitleContentFilter().filter(queryset, " latest ")
self.assertIs(result, fallback_queryset)
self.assertEqual(queryset.filter.call_count, 2)
def test_effective_content_filter_falls_back_to_content_lookup(self) -> None:
queryset = mock.Mock()
fallback_queryset = mock.Mock()
queryset.filter.side_effect = [FieldError("missing field"), fallback_queryset]
result = EffectiveContentFilter(lookup_expr="icontains").filter(
queryset,
" latest ",
)
self.assertIs(result, fallback_queryset)
first_kwargs = queryset.filter.call_args_list[0].kwargs
second_kwargs = queryset.filter.call_args_list[1].kwargs
self.assertEqual(first_kwargs, {"effective_content__icontains": "latest"})
self.assertEqual(second_kwargs, {"content__icontains": "latest"})
def test_effective_content_filter_returns_input_for_empty_values(self) -> None:
queryset = mock.Mock()
result = EffectiveContentFilter(lookup_expr="icontains").filter(queryset, " ")
self.assertIs(result, queryset)
queryset.filter.assert_not_called()
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_api_document_versions.py",
"license": "GNU General Public License v3.0",
"lines": 693,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/tests/test_version_conditionals.py | from types import SimpleNamespace
from unittest import mock
from django.test import TestCase
from documents.conditionals import metadata_etag
from documents.conditionals import preview_etag
from documents.conditionals import thumbnail_last_modified
from documents.models import Document
from documents.tests.utils import DirectoriesMixin
from documents.versioning import resolve_effective_document_by_pk
class TestConditionals(DirectoriesMixin, TestCase):
def test_metadata_etag_uses_latest_version_for_root_request(self) -> None:
root = Document.objects.create(
title="root",
checksum="root-checksum",
archive_checksum="root-archive",
mime_type="application/pdf",
)
latest = Document.objects.create(
title="v1",
checksum="version-checksum",
archive_checksum="version-archive",
mime_type="application/pdf",
root_document=root,
)
request = SimpleNamespace(query_params={})
self.assertEqual(metadata_etag(request, root.id), latest.checksum)
self.assertEqual(preview_etag(request, root.id), latest.archive_checksum)
def test_resolve_effective_doc_returns_none_for_invalid_or_unrelated_version(
self,
) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
other_root = Document.objects.create(
title="other",
checksum="other",
mime_type="application/pdf",
)
other_version = Document.objects.create(
title="other-v1",
checksum="other-v1",
mime_type="application/pdf",
root_document=other_root,
)
invalid_request = SimpleNamespace(query_params={"version": "not-a-number"})
unrelated_request = SimpleNamespace(
query_params={"version": str(other_version.id)},
)
self.assertIsNone(
resolve_effective_document_by_pk(root.id, invalid_request).document,
)
self.assertIsNone(
resolve_effective_document_by_pk(root.id, unrelated_request).document,
)
def test_thumbnail_last_modified_uses_effective_document_for_cache_key(
self,
) -> None:
root = Document.objects.create(
title="root",
checksum="root",
mime_type="application/pdf",
)
latest = Document.objects.create(
title="v2",
checksum="v2",
mime_type="application/pdf",
root_document=root,
)
latest.thumbnail_path.parent.mkdir(parents=True, exist_ok=True)
latest.thumbnail_path.write_bytes(b"thumb")
request = SimpleNamespace(query_params={})
with mock.patch(
"documents.conditionals.get_thumbnail_modified_key",
return_value="thumb-modified-key",
) as get_thumb_key:
result = thumbnail_last_modified(request, root.id)
self.assertIsNotNone(result)
get_thumb_key.assert_called_once_with(latest.id)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_version_conditionals.py",
"license": "GNU General Public License v3.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/versioning.py | from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING
from typing import Any
from documents.models import Document
if TYPE_CHECKING:
from django.http import HttpRequest
class VersionResolutionError(str, Enum):
INVALID = "invalid"
NOT_FOUND = "not_found"
@dataclass(frozen=True, slots=True)
class VersionResolution:
document: Document | None
error: VersionResolutionError | None = None
def _document_manager(*, include_deleted: bool) -> Any:
return Document.global_objects if include_deleted else Document.objects
def get_request_version_param(request: HttpRequest) -> str | None:
if hasattr(request, "query_params"):
return request.query_params.get("version")
return None
def get_root_document(doc: Document, *, include_deleted: bool = False) -> Document:
# Use root_document_id to avoid a query when this is already a root.
# If root_document isn't available, fall back to the document itself.
if doc.root_document_id is None:
return doc
if doc.root_document is not None:
return doc.root_document
manager = _document_manager(include_deleted=include_deleted)
root_doc = manager.only("id").filter(id=doc.root_document_id).first()
return root_doc or doc
def get_latest_version_for_root(
root_doc: Document,
*,
include_deleted: bool = False,
) -> Document:
manager = _document_manager(include_deleted=include_deleted)
latest = manager.filter(root_document=root_doc).order_by("-id").first()
return latest or root_doc
def resolve_requested_version_for_root(
root_doc: Document,
request: Any,
*,
include_deleted: bool = False,
) -> VersionResolution:
version_param = get_request_version_param(request)
if not version_param:
return VersionResolution(
document=get_latest_version_for_root(
root_doc,
include_deleted=include_deleted,
),
)
try:
version_id = int(version_param)
except (TypeError, ValueError):
return VersionResolution(document=None, error=VersionResolutionError.INVALID)
manager = _document_manager(include_deleted=include_deleted)
candidate = manager.only("id", "root_document_id").filter(id=version_id).first()
if candidate is None:
return VersionResolution(document=None, error=VersionResolutionError.NOT_FOUND)
if candidate.id != root_doc.id and candidate.root_document_id != root_doc.id:
return VersionResolution(document=None, error=VersionResolutionError.NOT_FOUND)
return VersionResolution(document=candidate)
def resolve_effective_document(
request_doc: Document,
request: Any,
*,
include_deleted: bool = False,
) -> VersionResolution:
root_doc = get_root_document(request_doc, include_deleted=include_deleted)
if get_request_version_param(request) is not None:
return resolve_requested_version_for_root(
root_doc,
request,
include_deleted=include_deleted,
)
if request_doc.root_document_id is None:
return VersionResolution(
document=get_latest_version_for_root(
root_doc,
include_deleted=include_deleted,
),
)
return VersionResolution(document=request_doc)
def resolve_effective_document_by_pk(
pk: int,
request: Any,
*,
include_deleted: bool = False,
) -> VersionResolution:
manager = _document_manager(include_deleted=include_deleted)
request_doc = manager.only("id", "root_document_id").filter(pk=pk).first()
if request_doc is None:
return VersionResolution(document=None, error=VersionResolutionError.NOT_FOUND)
return resolve_effective_document(
request_doc,
request,
include_deleted=include_deleted,
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/versioning.py",
"license": "GNU General Public License v3.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/documents/plugins/date_parsing/base.py | import datetime
import logging
from abc import ABC
from abc import abstractmethod
from collections.abc import Iterator
from dataclasses import dataclass
from types import TracebackType
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import dateparser
logger = logging.getLogger(__name__)
@dataclass(frozen=True, slots=True)
class DateParserConfig:
"""
Configuration for a DateParser instance.
This object is created by the factory and passed to the
parser's constructor, decoupling the parser from settings.
"""
languages: list[str]
timezone_str: str
ignore_dates: set[datetime.date]
# A "now" timestamp for filtering future dates.
# Passed in by the factory.
reference_time: datetime.datetime
# Settings for the default RegexDateParser
# Other plugins should use or consider these, but it is not required
filename_date_order: str | None
content_date_order: str
class DateParserPluginBase(ABC):
"""
Abstract base class for date parsing strategies.
Instances are configured via a DateParserConfig object.
"""
def __init__(self, config: DateParserConfig):
"""
Initializes the parser with its configuration.
"""
self.config = config
def __enter__(self) -> Self:
"""
Enter the runtime context related to this object.
Subclasses can override this to acquire resources (connections, handles).
"""
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""
Exit the runtime context related to this object.
Subclasses can override this to release resources.
"""
# Default implementation does nothing.
# Returning None implies exceptions are propagated.
def _parse_string(
self,
date_string: str,
date_order: str,
) -> datetime.datetime | None:
"""
Helper method to parse a single date string using dateparser.
Uses configuration from `self.config`.
"""
try:
return dateparser.parse(
date_string,
settings={
"DATE_ORDER": date_order,
"PREFER_DAY_OF_MONTH": "first",
"RETURN_AS_TIMEZONE_AWARE": True,
"TIMEZONE": self.config.timezone_str,
},
locales=self.config.languages,
)
except Exception as e:
logger.error(f"Error while parsing date string '{date_string}': {e}")
return None
def _filter_date(
self,
date: datetime.datetime | None,
) -> datetime.datetime | None:
"""
Helper method to validate a parsed datetime object.
Uses configuration from `self.config`.
"""
if (
date is not None
and date.year > 1900
and date <= self.config.reference_time
and date.date() not in self.config.ignore_dates
):
return date
return None
@abstractmethod
def parse(self, filename: str, content: str) -> Iterator[datetime.datetime]:
"""
Parses a document's filename and content, yielding valid datetime objects.
"""
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/plugins/date_parsing/base.py",
"license": "GNU General Public License v3.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/documents/tests/date_parsing/test_date_parser_plugin_loading.py | import datetime
import logging
from collections.abc import Iterator
from importlib.metadata import EntryPoint
import pytest
import pytest_mock
from django.utils import timezone
from documents.plugins.date_parsing import DATE_PARSER_ENTRY_POINT_GROUP
from documents.plugins.date_parsing import _discover_parser_class
from documents.plugins.date_parsing import get_date_parser
from documents.plugins.date_parsing.base import DateParserConfig
from documents.plugins.date_parsing.base import DateParserPluginBase
from documents.plugins.date_parsing.regex_parser import RegexDateParserPlugin
class AlphaParser(DateParserPluginBase):
def parse(self, filename: str, content: str) -> Iterator[datetime.datetime]:
yield timezone.now()
class BetaParser(DateParserPluginBase):
def parse(self, filename: str, content: str) -> Iterator[datetime.datetime]:
yield timezone.now()
@pytest.mark.date_parsing
@pytest.mark.usefixtures("clear_lru_cache")
class TestDiscoverParserClass:
"""Tests for the _discover_parser_class() function."""
def test_returns_default_when_no_plugins_found(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(),
)
result = _discover_parser_class()
assert result is RegexDateParserPlugin
def test_returns_default_when_entrypoint_query_fails(
self,
mocker: pytest_mock.MockerFixture,
caplog: pytest.LogCaptureFixture,
) -> None:
mocker.patch(
"documents.plugins.date_parsing.entry_points",
side_effect=RuntimeError("boom"),
)
result = _discover_parser_class()
assert result is RegexDateParserPlugin
assert "Could not query entry points" in caplog.text
def test_filters_out_invalid_plugins(
self,
mocker: pytest_mock.MockerFixture,
caplog: pytest.LogCaptureFixture,
) -> None:
fake_ep = mocker.MagicMock(spec=EntryPoint)
fake_ep.name = "bad_plugin"
fake_ep.load.return_value = object # not subclass of DateParser
mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(fake_ep,),
)
result = _discover_parser_class()
assert result is RegexDateParserPlugin
assert "does not subclass DateParser" in caplog.text
def test_skips_plugins_that_fail_to_load(
self,
mocker: pytest_mock.MockerFixture,
caplog: pytest.LogCaptureFixture,
) -> None:
fake_ep = mocker.MagicMock(spec=EntryPoint)
fake_ep.name = "failing_plugin"
fake_ep.load.side_effect = ImportError("cannot import")
mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(fake_ep,),
)
result = _discover_parser_class()
assert result is RegexDateParserPlugin
assert "Unable to load date parser plugin failing_plugin" in caplog.text
def test_returns_single_valid_plugin_without_warning(
self,
mocker: pytest_mock.MockerFixture,
caplog: pytest.LogCaptureFixture,
) -> None:
"""If exactly one valid plugin is discovered, it should be returned without logging a warning."""
ep = mocker.MagicMock(spec=EntryPoint)
ep.name = "alpha"
ep.load.return_value = AlphaParser
mock_entry_points = mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(ep,),
)
with caplog.at_level(
logging.WARNING,
logger="documents.plugins.date_parsing",
):
result = _discover_parser_class()
# It should have called entry_points with the correct group
mock_entry_points.assert_called_once_with(group=DATE_PARSER_ENTRY_POINT_GROUP)
# The discovered class should be exactly our AlphaParser
assert result is AlphaParser
# No warnings should have been logged
assert not any(
"Multiple date parsers found" in record.message for record in caplog.records
), "Unexpected warning logged when only one plugin was found"
def test_returns_first_valid_plugin_by_name(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
ep_a = mocker.MagicMock(spec=EntryPoint)
ep_a.name = "alpha"
ep_a.load.return_value = AlphaParser
ep_b = mocker.MagicMock(spec=EntryPoint)
ep_b.name = "beta"
ep_b.load.return_value = BetaParser
mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(ep_b, ep_a),
)
result = _discover_parser_class()
assert result is AlphaParser
def test_logs_warning_if_multiple_plugins_found(
self,
mocker: pytest_mock.MockerFixture,
caplog: pytest.LogCaptureFixture,
) -> None:
ep1 = mocker.MagicMock(spec=EntryPoint)
ep1.name = "a"
ep1.load.return_value = AlphaParser
ep2 = mocker.MagicMock(spec=EntryPoint)
ep2.name = "b"
ep2.load.return_value = BetaParser
mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(ep1, ep2),
)
with caplog.at_level(
logging.WARNING,
logger="documents.plugins.date_parsing",
):
result = _discover_parser_class()
# Should select alphabetically first plugin ("a")
assert result is AlphaParser
# Should log a warning mentioning multiple parsers
assert any(
"Multiple date parsers found" in record.message for record in caplog.records
), "Expected a warning about multiple date parsers"
def test_cache_behavior_only_runs_once(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
mock_entry_points = mocker.patch(
"documents.plugins.date_parsing.entry_points",
return_value=(),
)
# First call populates cache
_discover_parser_class()
# Second call should not re-invoke entry_points
_discover_parser_class()
mock_entry_points.assert_called_once()
@pytest.mark.django_db
@pytest.mark.date_parsing
@pytest.mark.usefixtures("mock_date_parser_settings")
class TestGetDateParser:
"""Tests for the get_date_parser() factory function."""
def test_returns_instance_of_discovered_class(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
mocker.patch(
"documents.plugins.date_parsing._discover_parser_class",
return_value=AlphaParser,
)
parser = get_date_parser()
assert isinstance(parser, AlphaParser)
assert isinstance(parser.config, DateParserConfig)
assert parser.config.languages == ["en", "de"]
assert parser.config.timezone_str == "UTC"
assert parser.config.ignore_dates == [datetime.date(1900, 1, 1)]
assert parser.config.filename_date_order == "YMD"
assert parser.config.content_date_order == "DMY"
# Check reference_time near now
delta = abs((parser.config.reference_time - timezone.now()).total_seconds())
assert delta < 2
def test_uses_default_regex_parser_when_no_plugins(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
mocker.patch(
"documents.plugins.date_parsing._discover_parser_class",
return_value=RegexDateParserPlugin,
)
parser = get_date_parser()
assert isinstance(parser, RegexDateParserPlugin)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/date_parsing/test_date_parser_plugin_loading.py",
"license": "GNU General Public License v3.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/tests/date_parsing/test_date_parsing.py | import datetime
import logging
from typing import Any
import pytest
import pytest_mock
from documents.plugins.date_parsing.base import DateParserConfig
from documents.plugins.date_parsing.regex_parser import RegexDateParserPlugin
@pytest.mark.date_parsing
class TestParseString:
"""Tests for DateParser._parse_string method via RegexDateParser."""
@pytest.mark.parametrize(
("date_string", "date_order", "expected_year"),
[
pytest.param("15/01/2024", "DMY", 2024, id="dmy_slash"),
pytest.param("01/15/2024", "MDY", 2024, id="mdy_slash"),
pytest.param("2024/01/15", "YMD", 2024, id="ymd_slash"),
pytest.param("January 15, 2024", "DMY", 2024, id="month_name_comma"),
pytest.param("15 Jan 2024", "DMY", 2024, id="day_abbr_month_year"),
pytest.param("15.01.2024", "DMY", 2024, id="dmy_dot"),
pytest.param("2024-01-15", "YMD", 2024, id="ymd_dash"),
],
)
def test_parse_string_valid_formats(
self,
regex_parser: RegexDateParserPlugin,
date_string: str,
date_order: str,
expected_year: int,
) -> None:
"""Should correctly parse various valid date formats."""
result = regex_parser._parse_string(date_string, date_order)
assert result is not None
assert result.year == expected_year
@pytest.mark.parametrize(
"invalid_string",
[
pytest.param("not a date", id="plain_text"),
pytest.param("32/13/2024", id="invalid_day_month"),
pytest.param("", id="empty_string"),
pytest.param("abc123xyz", id="alphanumeric_gibberish"),
pytest.param("99/99/9999", id="out_of_range"),
],
)
def test_parse_string_invalid_input(
self,
regex_parser: RegexDateParserPlugin,
invalid_string: str,
) -> None:
"""Should return None for invalid date strings."""
result = regex_parser._parse_string(invalid_string, "DMY")
assert result is None
def test_parse_string_handles_exceptions(
self,
caplog: pytest.LogCaptureFixture,
mocker: pytest_mock.MockerFixture,
regex_parser: RegexDateParserPlugin,
) -> None:
"""Should handle and log exceptions from dateparser gracefully."""
with caplog.at_level(
logging.ERROR,
logger="documents.plugins.date_parsing.base",
):
# We still need to mock dateparser.parse to force the exception
mocker.patch(
"documents.plugins.date_parsing.base.dateparser.parse",
side_effect=ValueError(
"Parsing error: 01/01/2024",
),
)
# 1. Execute the function under test
result = regex_parser._parse_string("01/01/2024", "DMY")
assert result is None
# Check if an error was logged
assert len(caplog.records) == 1
assert caplog.records[0].levelname == "ERROR"
# Check if the specific error message is present
assert "Error while parsing date string" in caplog.text
# Optional: Check for the exact exception message if it's included in the log
assert "Parsing error: 01/01/2024" in caplog.text
@pytest.mark.date_parsing
class TestFilterDate:
"""Tests for DateParser._filter_date method via RegexDateParser."""
@pytest.mark.parametrize(
("date", "expected_output"),
[
# Valid Dates
pytest.param(
datetime.datetime(2024, 1, 10, tzinfo=datetime.timezone.utc),
datetime.datetime(2024, 1, 10, tzinfo=datetime.timezone.utc),
id="valid_past_date",
),
pytest.param(
datetime.datetime(2024, 1, 15, 12, 0, 0, tzinfo=datetime.timezone.utc),
datetime.datetime(2024, 1, 15, 12, 0, 0, tzinfo=datetime.timezone.utc),
id="exactly_at_reference",
),
pytest.param(
datetime.datetime(1901, 1, 1, tzinfo=datetime.timezone.utc),
datetime.datetime(1901, 1, 1, tzinfo=datetime.timezone.utc),
id="year_1901_valid",
),
# Date is > reference_time
pytest.param(
datetime.datetime(2024, 1, 16, tzinfo=datetime.timezone.utc),
None,
id="future_date_day_after",
),
# date.date() in ignore_dates
pytest.param(
datetime.datetime(2024, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
None,
id="ignored_date_midnight_jan1",
),
pytest.param(
datetime.datetime(2024, 1, 1, 10, 30, 0, tzinfo=datetime.timezone.utc),
None,
id="ignored_date_midday_jan1",
),
pytest.param(
datetime.datetime(2024, 12, 25, 15, 0, 0, tzinfo=datetime.timezone.utc),
None,
id="ignored_date_dec25_future",
),
# date.year <= 1900
pytest.param(
datetime.datetime(1899, 12, 31, tzinfo=datetime.timezone.utc),
None,
id="year_1899",
),
pytest.param(
datetime.datetime(1900, 1, 1, tzinfo=datetime.timezone.utc),
None,
id="year_1900_boundary",
),
# date is None
pytest.param(None, None, id="none_input"),
],
)
def test_filter_date_validation_rules(
self,
config_with_ignore_dates: DateParserConfig,
date: datetime.datetime | None,
expected_output: datetime.datetime | None,
) -> None:
"""Should correctly validate dates against various rules."""
parser = RegexDateParserPlugin(config_with_ignore_dates)
result = parser._filter_date(date)
assert result == expected_output
def test_filter_date_respects_ignore_dates(
self,
config_with_ignore_dates: DateParserConfig,
) -> None:
"""Should filter out dates in the ignore_dates set."""
parser = RegexDateParserPlugin(config_with_ignore_dates)
ignored_date = datetime.datetime(
2024,
1,
1,
12,
0,
tzinfo=datetime.timezone.utc,
)
another_ignored = datetime.datetime(
2024,
12,
25,
15,
30,
tzinfo=datetime.timezone.utc,
)
allowed_date = datetime.datetime(
2024,
1,
2,
12,
0,
tzinfo=datetime.timezone.utc,
)
assert parser._filter_date(ignored_date) is None
assert parser._filter_date(another_ignored) is None
assert parser._filter_date(allowed_date) == allowed_date
def test_filter_date_timezone_aware(
self,
regex_parser: RegexDateParserPlugin,
) -> None:
"""Should work with timezone-aware datetimes."""
date_utc = datetime.datetime(2024, 1, 10, 12, 0, tzinfo=datetime.timezone.utc)
result = regex_parser._filter_date(date_utc)
assert result is not None
assert result.tzinfo is not None
@pytest.mark.date_parsing
class TestRegexDateParser:
@pytest.mark.parametrize(
("filename", "content", "expected"),
[
pytest.param(
"report-2023-12-25.txt",
"Event recorded on 25/12/2022.",
[
datetime.datetime(2023, 12, 25, tzinfo=datetime.timezone.utc),
datetime.datetime(2022, 12, 25, tzinfo=datetime.timezone.utc),
],
id="filename-y-m-d_and_content-d-m-y",
),
pytest.param(
"img_2023.01.02.jpg",
"Taken on 01/02/2023",
[
datetime.datetime(2023, 1, 2, tzinfo=datetime.timezone.utc),
datetime.datetime(2023, 2, 1, tzinfo=datetime.timezone.utc),
],
id="ambiguous-dates-respect-orders",
),
pytest.param(
"notes.txt",
"bad date 99/99/9999 and 25/12/2022",
[
datetime.datetime(2022, 12, 25, tzinfo=datetime.timezone.utc),
],
id="parse-exception-skips-bad-and-yields-good",
),
],
)
def test_parse_returns_expected_dates(
self,
base_config: DateParserConfig,
mocker: pytest_mock.MockerFixture,
filename: str,
content: str,
expected: list[datetime.datetime],
) -> None:
"""
High-level tests that exercise RegexDateParser.parse only.
dateparser.parse is mocked so tests are deterministic.
"""
parser = RegexDateParserPlugin(base_config)
# Patch the dateparser.parse
target = "documents.plugins.date_parsing.base.dateparser.parse"
def fake_parse(
date_string: str,
settings: dict[str, Any] | None = None,
locales: None = None,
) -> datetime.datetime | None:
date_order = settings.get("DATE_ORDER") if settings else None
# Filename-style YYYY-MM-DD / YYYY.MM.DD
if (
"2023-12-25" in date_string
or "2023.12.25" in date_string
or "2023-12-25" in date_string
):
return datetime.datetime(2023, 12, 25, tzinfo=datetime.timezone.utc)
# content DMY 25/12/2022
if "25/12/2022" in date_string or "25-12-2022" in date_string:
return datetime.datetime(2022, 12, 25, tzinfo=datetime.timezone.utc)
# filename YMD 2023.01.02
if "2023.01.02" in date_string or "2023-01-02" in date_string:
return datetime.datetime(2023, 1, 2, tzinfo=datetime.timezone.utc)
# ambiguous 01/02/2023 -> respect DATE_ORDER setting
if "01/02/2023" in date_string:
if date_order == "DMY":
return datetime.datetime(2023, 2, 1, tzinfo=datetime.timezone.utc)
if date_order == "YMD":
return datetime.datetime(2023, 1, 2, tzinfo=datetime.timezone.utc)
# fallback
return datetime.datetime(2023, 2, 1, tzinfo=datetime.timezone.utc)
# simulate parse failure for malformed input
if "99/99/9999" in date_string or "bad date" in date_string:
raise Exception("parse failed for malformed date")
return None
mocker.patch(target, side_effect=fake_parse)
results = list(parser.parse(filename, content))
assert results == expected
for dt in results:
assert dt.tzinfo is not None
def test_parse_filters_future_and_ignored_dates(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
"""
Ensure parser filters out:
- dates after reference_time
- dates whose .date() are in ignore_dates
"""
cfg = DateParserConfig(
languages=["en"],
timezone_str="UTC",
ignore_dates={datetime.date(2023, 12, 10)},
reference_time=datetime.datetime(
2024,
1,
15,
12,
0,
0,
tzinfo=datetime.timezone.utc,
),
filename_date_order="YMD",
content_date_order="DMY",
)
parser = RegexDateParserPlugin(cfg)
target = "documents.plugins.date_parsing.base.dateparser.parse"
def fake_parse(
date_string: str,
settings: dict[str, Any] | None = None,
locales: None = None,
) -> datetime.datetime | None:
if "10/12/2023" in date_string or "10-12-2023" in date_string:
# ignored date
return datetime.datetime(2023, 12, 10, tzinfo=datetime.timezone.utc)
if "01/02/2024" in date_string or "01-02-2024" in date_string:
# future relative to reference_time -> filtered
return datetime.datetime(2024, 2, 1, tzinfo=datetime.timezone.utc)
if "05/01/2023" in date_string or "05-01-2023" in date_string:
# valid
return datetime.datetime(2023, 1, 5, tzinfo=datetime.timezone.utc)
return None
mocker.patch(target, side_effect=fake_parse)
content = "Ignored: 10/12/2023, Future: 01/02/2024, Keep: 05/01/2023"
results = list(parser.parse("whatever.txt", content))
assert results == [datetime.datetime(2023, 1, 5, tzinfo=datetime.timezone.utc)]
def test_parse_handles_no_matches_and_returns_empty_list(
self,
base_config: DateParserConfig,
) -> None:
"""
When there are no matching date-like substrings, parse should yield nothing.
"""
parser = RegexDateParserPlugin(base_config)
results = list(
parser.parse("no-dates.txt", "this has no dates whatsoever"),
)
assert results == []
def test_parse_skips_filename_when_filename_date_order_none(
self,
mocker: pytest_mock.MockerFixture,
) -> None:
"""
When filename_date_order is None the parser must not attempt to parse the filename.
Only dates found in the content should be passed to dateparser.parse.
"""
cfg = DateParserConfig(
languages=["en"],
timezone_str="UTC",
ignore_dates=set(),
reference_time=datetime.datetime(
2024,
1,
15,
12,
0,
0,
tzinfo=datetime.timezone.utc,
),
filename_date_order=None,
content_date_order="DMY",
)
parser = RegexDateParserPlugin(cfg)
# Patch the module's dateparser.parse so we can inspect calls
target = "documents.plugins.date_parsing.base.dateparser.parse"
def fake_parse(
date_string: str,
settings: dict[str, Any] | None = None,
locales: None = None,
) -> datetime.datetime | None:
# return distinct datetimes so we can tell which source was parsed
if "25/12/2022" in date_string:
return datetime.datetime(2022, 12, 25, tzinfo=datetime.timezone.utc)
if "2023-12-25" in date_string:
return datetime.datetime(2023, 12, 25, tzinfo=datetime.timezone.utc)
return None
mock = mocker.patch(target, side_effect=fake_parse)
filename = "report-2023-12-25.txt"
content = "Event recorded on 25/12/2022."
results = list(parser.parse(filename, content))
# Only the content date should have been parsed -> one call
assert mock.call_count == 1
# # first call, first positional arg
called_date_string = mock.call_args_list[0][0][0]
assert "25/12/2022" in called_date_string
# And the parser should have yielded the corresponding datetime
assert results == [
datetime.datetime(2022, 12, 25, tzinfo=datetime.timezone.utc),
]
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/date_parsing/test_date_parsing.py",
"license": "GNU General Public License v3.0",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/tests/test_migration_share_link_bundle.py | from documents.tests.utils import TestMigrations
class TestMigrateShareLinkBundlePermissions(TestMigrations):
migrate_from = "0007_document_content_length"
migrate_to = "0008_sharelinkbundle"
def setUpBeforeMigration(self, apps) -> None:
User = apps.get_model("auth", "User")
Group = apps.get_model("auth", "Group")
self.Permission = apps.get_model("auth", "Permission")
self.user = User.objects.create(username="user1")
self.group = Group.objects.create(name="group1")
add_document = self.Permission.objects.get(codename="add_document")
self.user.user_permissions.add(add_document.id)
self.group.permissions.add(add_document.id)
def test_share_link_permissions_granted_to_add_document_holders(self) -> None:
share_perms = self.Permission.objects.filter(
codename__contains="sharelinkbundle",
)
self.assertTrue(self.user.user_permissions.filter(pk__in=share_perms).exists())
self.assertTrue(self.group.permissions.filter(pk__in=share_perms).exists())
class TestReverseMigrateShareLinkBundlePermissions(TestMigrations):
migrate_from = "0008_sharelinkbundle"
migrate_to = "0007_document_content_length"
def setUpBeforeMigration(self, apps) -> None:
User = apps.get_model("auth", "User")
Group = apps.get_model("auth", "Group")
self.Permission = apps.get_model("auth", "Permission")
self.user = User.objects.create(username="user1")
self.group = Group.objects.create(name="group1")
add_document = self.Permission.objects.get(codename="add_document")
share_perms = self.Permission.objects.filter(
codename__contains="sharelinkbundle",
)
self.share_perm_ids = list(share_perms.values_list("id", flat=True))
self.user.user_permissions.add(add_document.id, *self.share_perm_ids)
self.group.permissions.add(add_document.id, *self.share_perm_ids)
def test_share_link_permissions_revoked_on_reverse(self) -> None:
self.assertFalse(
self.user.user_permissions.filter(pk__in=self.share_perm_ids).exists(),
)
self.assertFalse(
self.group.permissions.filter(pk__in=self.share_perm_ids).exists(),
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_migration_share_link_bundle.py",
"license": "GNU General Public License v3.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/tests/test_share_link_bundles.py | from __future__ import annotations
import zipfile
from datetime import timedelta
from pathlib import Path
from unittest import mock
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from rest_framework import serializers
from rest_framework import status
from rest_framework.test import APITestCase
from documents.filters import ShareLinkBundleFilterSet
from documents.models import ShareLink
from documents.models import ShareLinkBundle
from documents.serialisers import ShareLinkBundleSerializer
from documents.tasks import build_share_link_bundle
from documents.tasks import cleanup_expired_share_link_bundles
from documents.tests.factories import DocumentFactory
from documents.tests.utils import DirectoriesMixin
class ShareLinkBundleAPITests(DirectoriesMixin, APITestCase):
ENDPOINT = "/api/share_link_bundles/"
def setUp(self) -> None:
super().setUp()
self.user = User.objects.create_superuser(username="bundle_admin")
self.client.force_authenticate(self.user)
self.document = DocumentFactory.create()
@mock.patch("documents.views.build_share_link_bundle.delay")
def test_create_bundle_triggers_build_job(self, delay_mock) -> None:
payload = {
"document_ids": [self.document.pk],
"file_version": ShareLink.FileVersion.ARCHIVE,
"expiration_days": 7,
}
response = self.client.post(self.ENDPOINT, payload, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
bundle = ShareLinkBundle.objects.get(pk=response.data["id"])
self.assertEqual(bundle.documents.count(), 1)
self.assertEqual(bundle.status, ShareLinkBundle.Status.PENDING)
delay_mock.assert_called_once_with(bundle.pk)
def test_create_bundle_rejects_missing_documents(self) -> None:
payload = {
"document_ids": [9999],
"file_version": ShareLink.FileVersion.ARCHIVE,
"expiration_days": 7,
}
response = self.client.post(self.ENDPOINT, payload, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("document_ids", response.data)
@mock.patch("documents.views.has_perms_owner_aware", return_value=False)
def test_create_bundle_rejects_insufficient_permissions(self, perms_mock) -> None:
payload = {
"document_ids": [self.document.pk],
"file_version": ShareLink.FileVersion.ARCHIVE,
"expiration_days": 7,
}
response = self.client.post(self.ENDPOINT, payload, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("document_ids", response.data)
perms_mock.assert_called()
@mock.patch("documents.views.build_share_link_bundle.delay")
def test_rebuild_bundle_resets_state(self, delay_mock) -> None:
bundle = ShareLinkBundle.objects.create(
slug="rebuild-slug",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.FAILED,
)
bundle.documents.set([self.document])
bundle.last_error = {"message": "Something went wrong"}
bundle.size_bytes = 100
bundle.file_path = "path/to/file.zip"
bundle.save()
response = self.client.post(f"{self.ENDPOINT}{bundle.pk}/rebuild/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
bundle.refresh_from_db()
self.assertEqual(bundle.status, ShareLinkBundle.Status.PENDING)
self.assertIsNone(bundle.last_error)
self.assertIsNone(bundle.size_bytes)
self.assertEqual(bundle.file_path, "")
delay_mock.assert_called_once_with(bundle.pk)
def test_rebuild_bundle_rejects_processing_status(self) -> None:
bundle = ShareLinkBundle.objects.create(
slug="processing-slug",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.PROCESSING,
)
bundle.documents.set([self.document])
response = self.client.post(f"{self.ENDPOINT}{bundle.pk}/rebuild/")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("detail", response.data)
def test_create_bundle_rejects_duplicate_documents(self) -> None:
payload = {
"document_ids": [self.document.pk, self.document.pk],
"file_version": ShareLink.FileVersion.ARCHIVE,
"expiration_days": 7,
}
response = self.client.post(self.ENDPOINT, payload, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("document_ids", response.data)
def test_download_ready_bundle_streams_file(self) -> None:
bundle_file = Path(self.dirs.media_dir) / "bundles" / "ready.zip"
bundle_file.parent.mkdir(parents=True, exist_ok=True)
bundle_file.write_bytes(b"binary-zip-content")
bundle = ShareLinkBundle.objects.create(
slug="readyslug",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.READY,
file_path=str(bundle_file),
)
bundle.documents.set([self.document])
self.client.logout()
response = self.client.get(f"/share/{bundle.slug}/")
content = b"".join(response.streaming_content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["Content-Type"], "application/zip")
self.assertEqual(content, b"binary-zip-content")
self.assertIn("attachment;", response["Content-Disposition"])
def test_download_pending_bundle_returns_202(self) -> None:
bundle = ShareLinkBundle.objects.create(
slug="pendingslug",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.PENDING,
)
bundle.documents.set([self.document])
self.client.logout()
response = self.client.get(f"/share/{bundle.slug}/")
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_download_failed_bundle_returns_503(self) -> None:
bundle = ShareLinkBundle.objects.create(
slug="failedslug",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.FAILED,
)
bundle.documents.set([self.document])
self.client.logout()
response = self.client.get(f"/share/{bundle.slug}/")
self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)
def test_expired_share_link_redirects(self) -> None:
share_link = ShareLink.objects.create(
slug="expiredlink",
document=self.document,
file_version=ShareLink.FileVersion.ORIGINAL,
expiration=timezone.now() - timedelta(hours=1),
)
self.client.logout()
response = self.client.get(f"/share/{share_link.slug}/")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertIn("sharelink_expired=1", response["Location"])
def test_unknown_share_link_redirects(self) -> None:
self.client.logout()
response = self.client.get("/share/unknownsharelink/")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertIn("sharelink_notfound=1", response["Location"])
class ShareLinkBundleTaskTests(DirectoriesMixin, APITestCase):
def setUp(self) -> None:
super().setUp()
self.document = DocumentFactory.create()
def test_cleanup_expired_share_link_bundles(self) -> None:
expired_path = Path(self.dirs.media_dir) / "expired.zip"
expired_path.parent.mkdir(parents=True, exist_ok=True)
expired_path.write_bytes(b"expired")
active_path = Path(self.dirs.media_dir) / "active.zip"
active_path.write_bytes(b"active")
expired_bundle = ShareLinkBundle.objects.create(
slug="expired-bundle",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.READY,
expiration=timezone.now() - timedelta(days=1),
file_path=str(expired_path),
)
expired_bundle.documents.set([self.document])
active_bundle = ShareLinkBundle.objects.create(
slug="active-bundle",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.READY,
expiration=timezone.now() + timedelta(days=1),
file_path=str(active_path),
)
active_bundle.documents.set([self.document])
cleanup_expired_share_link_bundles()
self.assertFalse(ShareLinkBundle.objects.filter(pk=expired_bundle.pk).exists())
self.assertTrue(ShareLinkBundle.objects.filter(pk=active_bundle.pk).exists())
self.assertFalse(expired_path.exists())
self.assertTrue(active_path.exists())
def test_cleanup_expired_share_link_bundles_logs_on_failure(self) -> None:
expired_bundle = ShareLinkBundle.objects.create(
slug="expired-bundle",
file_version=ShareLink.FileVersion.ARCHIVE,
status=ShareLinkBundle.Status.READY,
expiration=timezone.now() - timedelta(days=1),
)
expired_bundle.documents.set([self.document])
with mock.patch.object(
ShareLinkBundle,
"delete",
side_effect=RuntimeError("fail"),
):
with self.assertLogs("paperless.tasks", level="WARNING") as logs:
cleanup_expired_share_link_bundles()
self.assertTrue(
any(
"Failed to delete expired share link bundle" in msg
for msg in logs.output
),
)
class ShareLinkBundleBuildTaskTests(DirectoriesMixin, APITestCase):
def setUp(self) -> None:
super().setUp()
self.document = DocumentFactory.create(
mime_type="application/pdf",
checksum="123",
)
self.document.archive_checksum = ""
self.document.save()
self.addCleanup(
setattr,
settings,
"SHARE_LINK_BUNDLE_DIR",
settings.SHARE_LINK_BUNDLE_DIR,
)
settings.SHARE_LINK_BUNDLE_DIR = (
Path(settings.MEDIA_ROOT) / "documents" / "share_link_bundles"
)
def _write_document_file(self, *, archive: bool, content: bytes) -> Path:
if archive:
self.document.archive_filename = f"{self.document.pk:07}.pdf"
self.document.save()
path = self.document.archive_path
else:
path = self.document.source_path
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(content)
return path
def test_build_share_link_bundle_creates_zip_and_sets_metadata(self) -> None:
self._write_document_file(archive=False, content=b"source")
archive_path = self._write_document_file(archive=True, content=b"archive")
bundle = ShareLinkBundle.objects.create(
slug="build-archive",
file_version=ShareLink.FileVersion.ARCHIVE,
)
bundle.documents.set([self.document])
build_share_link_bundle(bundle.pk)
bundle.refresh_from_db()
self.assertEqual(bundle.status, ShareLinkBundle.Status.READY)
self.assertIsNone(bundle.last_error)
self.assertIsNotNone(bundle.built_at)
self.assertGreater(bundle.size_bytes or 0, 0)
final_path = bundle.absolute_file_path
self.assertIsNotNone(final_path)
self.assertTrue(final_path.exists())
with zipfile.ZipFile(final_path) as zipf:
names = zipf.namelist()
self.assertEqual(len(names), 1)
self.assertEqual(zipf.read(names[0]), archive_path.read_bytes())
def test_build_share_link_bundle_overwrites_existing_file(self) -> None:
self._write_document_file(archive=False, content=b"source")
bundle = ShareLinkBundle.objects.create(
slug="overwrite",
file_version=ShareLink.FileVersion.ORIGINAL,
)
bundle.documents.set([self.document])
existing = settings.SHARE_LINK_BUNDLE_DIR / "overwrite.zip"
existing.parent.mkdir(parents=True, exist_ok=True)
existing.write_bytes(b"old")
build_share_link_bundle(bundle.pk)
bundle.refresh_from_db()
final_path = bundle.absolute_file_path
self.assertIsNotNone(final_path)
self.assertTrue(final_path.exists())
self.assertNotEqual(final_path.read_bytes(), b"old")
def test_build_share_link_bundle_failure_marks_failed(self) -> None:
self._write_document_file(archive=False, content=b"source")
bundle = ShareLinkBundle.objects.create(
slug="fail-bundle",
file_version=ShareLink.FileVersion.ORIGINAL,
)
bundle.documents.set([self.document])
with (
mock.patch(
"documents.tasks.OriginalsOnlyStrategy.add_document",
side_effect=RuntimeError("zip failure"),
),
mock.patch("pathlib.Path.unlink") as unlink_mock,
):
unlink_mock.side_effect = [OSError("unlink"), OSError("unlink-finally")] + [
None,
] * 5
with self.assertRaises(RuntimeError):
build_share_link_bundle(bundle.pk)
bundle.refresh_from_db()
self.assertEqual(bundle.status, ShareLinkBundle.Status.FAILED)
self.assertIsInstance(bundle.last_error, dict)
self.assertEqual(bundle.last_error.get("message"), "zip failure")
self.assertEqual(bundle.last_error.get("exception_type"), "RuntimeError")
scratch_zips = list(Path(settings.SCRATCH_DIR).glob("*.zip"))
self.assertTrue(scratch_zips)
for path in scratch_zips:
path.unlink(missing_ok=True)
def test_build_share_link_bundle_missing_bundle_noop(self) -> None:
# Should not raise when bundle does not exist
build_share_link_bundle(99999)
class ShareLinkBundleFilterSetTests(DirectoriesMixin, APITestCase):
def setUp(self) -> None:
super().setUp()
self.document = DocumentFactory.create()
self.document.checksum = "doc1checksum"
self.document.save()
self.other_document = DocumentFactory.create()
self.other_document.checksum = "doc2checksum"
self.other_document.save()
self.bundle_one = ShareLinkBundle.objects.create(
slug="bundle-one",
file_version=ShareLink.FileVersion.ORIGINAL,
)
self.bundle_one.documents.set([self.document])
self.bundle_two = ShareLinkBundle.objects.create(
slug="bundle-two",
file_version=ShareLink.FileVersion.ORIGINAL,
)
self.bundle_two.documents.set([self.other_document])
def test_filter_documents_returns_all_for_empty_value(self) -> None:
filterset = ShareLinkBundleFilterSet(
data={"documents": ""},
queryset=ShareLinkBundle.objects.all(),
)
self.assertCountEqual(filterset.qs, [self.bundle_one, self.bundle_two])
def test_filter_documents_handles_invalid_input(self) -> None:
filterset = ShareLinkBundleFilterSet(
data={"documents": "invalid"},
queryset=ShareLinkBundle.objects.all(),
)
self.assertFalse(filterset.qs.exists())
def test_filter_documents_filters_by_multiple_ids(self) -> None:
filterset = ShareLinkBundleFilterSet(
data={"documents": f"{self.document.pk},{self.other_document.pk}"},
queryset=ShareLinkBundle.objects.all(),
)
self.assertCountEqual(filterset.qs, [self.bundle_one, self.bundle_two])
def test_filter_documents_returns_queryset_for_empty_ids(self) -> None:
filterset = ShareLinkBundleFilterSet(
data={"documents": ","},
queryset=ShareLinkBundle.objects.all(),
)
self.assertCountEqual(filterset.qs, [self.bundle_one, self.bundle_two])
class ShareLinkBundleModelTests(DirectoriesMixin, APITestCase):
def test_absolute_file_path_handles_relative_and_absolute(self) -> None:
relative_path = Path("relative.zip")
bundle = ShareLinkBundle.objects.create(
slug="relative-bundle",
file_version=ShareLink.FileVersion.ORIGINAL,
file_path=str(relative_path),
)
self.assertEqual(
bundle.absolute_file_path,
(settings.SHARE_LINK_BUNDLE_DIR / relative_path).resolve(),
)
absolute_path = Path(self.dirs.media_dir) / "absolute.zip"
bundle.file_path = str(absolute_path)
self.assertEqual(bundle.absolute_file_path.resolve(), absolute_path.resolve())
def test_str_returns_translated_slug(self) -> None:
bundle = ShareLinkBundle.objects.create(
slug="string-slug",
file_version=ShareLink.FileVersion.ORIGINAL,
)
self.assertIn("string-slug", str(bundle))
def test_remove_file_deletes_existing_file(self) -> None:
bundle_path = settings.SHARE_LINK_BUNDLE_DIR / "remove.zip"
bundle_path.parent.mkdir(parents=True, exist_ok=True)
bundle_path.write_bytes(b"remove-me")
bundle = ShareLinkBundle.objects.create(
slug="remove-bundle",
file_version=ShareLink.FileVersion.ORIGINAL,
file_path=str(bundle_path.relative_to(settings.SHARE_LINK_BUNDLE_DIR)),
)
bundle.remove_file()
self.assertFalse(bundle_path.exists())
def test_remove_file_handles_oserror(self) -> None:
bundle_path = settings.SHARE_LINK_BUNDLE_DIR / "remove-error.zip"
bundle_path.parent.mkdir(parents=True, exist_ok=True)
bundle_path.write_bytes(b"remove-me")
bundle = ShareLinkBundle.objects.create(
slug="remove-error",
file_version=ShareLink.FileVersion.ORIGINAL,
file_path=str(bundle_path.relative_to(settings.SHARE_LINK_BUNDLE_DIR)),
)
with mock.patch("pathlib.Path.unlink", side_effect=OSError("fail")):
bundle.remove_file()
self.assertTrue(bundle_path.exists())
def test_delete_calls_remove_file(self) -> None:
bundle_path = settings.SHARE_LINK_BUNDLE_DIR / "delete.zip"
bundle_path.parent.mkdir(parents=True, exist_ok=True)
bundle_path.write_bytes(b"remove-me")
bundle = ShareLinkBundle.objects.create(
slug="delete-bundle",
file_version=ShareLink.FileVersion.ORIGINAL,
file_path=str(bundle_path.relative_to(settings.SHARE_LINK_BUNDLE_DIR)),
)
bundle.delete()
self.assertFalse(bundle_path.exists())
class ShareLinkBundleSerializerTests(DirectoriesMixin, APITestCase):
def setUp(self) -> None:
super().setUp()
self.document = DocumentFactory.create()
def test_validate_document_ids_rejects_duplicates(self) -> None:
serializer = ShareLinkBundleSerializer(
data={
"document_ids": [self.document.pk, self.document.pk],
"file_version": ShareLink.FileVersion.ORIGINAL,
},
)
self.assertFalse(serializer.is_valid())
self.assertIn("document_ids", serializer.errors)
def test_create_assigns_documents_and_expiration(self) -> None:
serializer = ShareLinkBundleSerializer(
data={
"document_ids": [self.document.pk],
"file_version": ShareLink.FileVersion.ORIGINAL,
"expiration_days": 3,
},
)
self.assertTrue(serializer.is_valid(), serializer.errors)
bundle = serializer.save()
self.assertEqual(list(bundle.documents.all()), [self.document])
expected_expiration = timezone.now() + timedelta(days=3)
self.assertAlmostEqual(
bundle.expiration,
expected_expiration,
delta=timedelta(seconds=10),
)
def test_create_raises_when_missing_documents(self) -> None:
serializer = ShareLinkBundleSerializer(
data={
"document_ids": [self.document.pk, 9999],
"file_version": ShareLink.FileVersion.ORIGINAL,
},
)
self.assertTrue(serializer.is_valid(), serializer.errors)
with self.assertRaises(serializers.ValidationError):
serializer.save(documents=[self.document])
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_share_link_bundles.py",
"license": "GNU General Public License v3.0",
"lines": 438,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/management/commands/document_llmindex.py | from typing import Any
from documents.management.commands.base import PaperlessCommand
from documents.tasks import llmindex_index
class Command(PaperlessCommand):
help = "Manages the LLM-based vector index for Paperless."
def add_arguments(self, parser: Any) -> None:
super().add_arguments(parser)
parser.add_argument("command", choices=["rebuild", "update"])
def handle(self, *args: Any, **options: Any) -> None:
llmindex_index(
rebuild=options["command"] == "rebuild",
scheduled=False,
iter_wrapper=lambda docs: self.track(
docs,
description="Indexing documents...",
),
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/management/commands/document_llmindex.py",
"license": "GNU General Public License v3.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_ai/ai_classifier.py | import logging
from django.contrib.auth.models import User
from documents.models import Document
from documents.permissions import get_objects_for_user_owner_aware
from paperless.config import AIConfig
from paperless_ai.client import AIClient
from paperless_ai.indexing import query_similar_documents
from paperless_ai.indexing import truncate_content
logger = logging.getLogger("paperless_ai.rag_classifier")
def build_prompt_without_rag(document: Document) -> str:
filename = document.filename or ""
content = truncate_content(document.content[:4000] or "")
return f"""
You are a document classification assistant.
Analyze the following document and extract the following information:
- A short descriptive title
- Tags that reflect the content
- Names of people or organizations mentioned
- The type or category of the document
- Suggested folder paths for storing the document
- Up to 3 relevant dates in YYYY-MM-DD format
Filename:
{filename}
Content:
{content}
""".strip()
def build_prompt_with_rag(document: Document, user: User | None = None) -> str:
base_prompt = build_prompt_without_rag(document)
context = truncate_content(get_context_for_document(document, user))
return f"""{base_prompt}
Additional context from similar documents:
{context}
""".strip()
def get_context_for_document(
doc: Document,
user: User | None = None,
max_docs: int = 5,
) -> str:
visible_documents = (
get_objects_for_user_owner_aware(
user,
"view_document",
Document,
)
if user
else None
)
similar_docs = query_similar_documents(
document=doc,
document_ids=[document.pk for document in visible_documents]
if visible_documents
else None,
)[:max_docs]
context_blocks = []
for similar in similar_docs:
text = similar.content[:1000] or ""
title = similar.title or similar.filename or "Untitled"
context_blocks.append(f"TITLE: {title}\n{text}")
return "\n\n".join(context_blocks)
def parse_ai_response(raw: dict) -> dict:
return {
"title": raw.get("title", ""),
"tags": raw.get("tags", []),
"correspondents": raw.get("correspondents", []),
"document_types": raw.get("document_types", []),
"storage_paths": raw.get("storage_paths", []),
"dates": raw.get("dates", []),
}
def get_ai_document_classification(
document: Document,
user: User | None = None,
) -> dict:
ai_config = AIConfig()
prompt = (
build_prompt_with_rag(document, user)
if ai_config.llm_embedding_backend
else build_prompt_without_rag(document)
)
client = AIClient()
result = client.run_llm_query(prompt)
return parse_ai_response(result)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/ai_classifier.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_ai/base_model.py | from llama_index.core.bridge.pydantic import BaseModel
class DocumentClassifierSchema(BaseModel):
title: str
tags: list[str]
correspondents: list[str]
document_types: list[str]
storage_paths: list[str]
dates: list[str]
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/base_model.py",
"license": "GNU General Public License v3.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_ai/chat.py | import logging
import sys
from llama_index.core import VectorStoreIndex
from llama_index.core.prompts import PromptTemplate
from llama_index.core.query_engine import RetrieverQueryEngine
from documents.models import Document
from paperless_ai.client import AIClient
from paperless_ai.indexing import load_or_build_index
logger = logging.getLogger("paperless_ai.chat")
MAX_SINGLE_DOC_CONTEXT_CHARS = 15000
SINGLE_DOC_SNIPPET_CHARS = 800
CHAT_PROMPT_TMPL = PromptTemplate(
template="""Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge, answer the query.
Query: {query_str}
Answer:""",
)
def stream_chat_with_documents(query_str: str, documents: list[Document]):
client = AIClient()
index = load_or_build_index()
doc_ids = [str(doc.pk) for doc in documents]
# Filter only the node(s) that match the document IDs
nodes = [
node
for node in index.docstore.docs.values()
if node.metadata.get("document_id") in doc_ids
]
if len(nodes) == 0:
logger.warning("No nodes found for the given documents.")
yield "Sorry, I couldn't find any content to answer your question."
return
local_index = VectorStoreIndex(nodes=nodes)
retriever = local_index.as_retriever(
similarity_top_k=3 if len(documents) == 1 else 5,
)
if len(documents) == 1:
# Just one doc — provide full content
doc = documents[0]
# TODO: include document metadata in the context
content = doc.content or ""
context_body = content
if len(content) > MAX_SINGLE_DOC_CONTEXT_CHARS:
logger.info(
"Truncating single-document context from %s to %s characters",
len(content),
MAX_SINGLE_DOC_CONTEXT_CHARS,
)
context_body = content[:MAX_SINGLE_DOC_CONTEXT_CHARS]
top_nodes = retriever.retrieve(query_str)
if len(top_nodes) > 0:
snippets = "\n\n".join(
f"TITLE: {node.metadata.get('title')}\n{node.text[:SINGLE_DOC_SNIPPET_CHARS]}"
for node in top_nodes
)
context_body = f"{context_body}\n\nTOP MATCHES:\n{snippets}"
context = f"TITLE: {doc.title or doc.filename}\n{context_body}"
else:
top_nodes = retriever.retrieve(query_str)
if len(top_nodes) == 0:
logger.warning("Retriever returned no nodes for the given documents.")
yield "Sorry, I couldn't find any content to answer your question."
return
context = "\n\n".join(
f"TITLE: {node.metadata.get('title')}\n{node.text[:SINGLE_DOC_SNIPPET_CHARS]}"
for node in top_nodes
)
prompt = CHAT_PROMPT_TMPL.partial_format(
context_str=context,
query_str=query_str,
).format(llm=client.llm)
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
llm=client.llm,
streaming=True,
)
logger.debug("Document chat prompt: %s", prompt)
response_stream = query_engine.query(prompt)
for chunk in response_stream.response_gen:
yield chunk
sys.stdout.flush()
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/chat.py",
"license": "GNU General Public License v3.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/paperless_ai/client.py | import logging
from llama_index.core.llms import ChatMessage
from llama_index.core.program.function_program import get_function_tool
from llama_index.llms.ollama import Ollama
from llama_index.llms.openai import OpenAI
from paperless.config import AIConfig
from paperless_ai.base_model import DocumentClassifierSchema
logger = logging.getLogger("paperless_ai.client")
class AIClient:
"""
A client for interacting with an LLM backend.
"""
def __init__(self) -> None:
self.settings = AIConfig()
self.llm = self.get_llm()
def get_llm(self) -> Ollama | OpenAI:
if self.settings.llm_backend == "ollama":
return Ollama(
model=self.settings.llm_model or "llama3.1",
base_url=self.settings.llm_endpoint or "http://localhost:11434",
request_timeout=120,
)
elif self.settings.llm_backend == "openai":
return OpenAI(
model=self.settings.llm_model or "gpt-3.5-turbo",
api_base=self.settings.llm_endpoint or None,
api_key=self.settings.llm_api_key,
)
else:
raise ValueError(f"Unsupported LLM backend: {self.settings.llm_backend}")
def run_llm_query(self, prompt: str) -> str:
logger.debug(
"Running LLM query against %s with model %s",
self.settings.llm_backend,
self.settings.llm_model,
)
user_msg = ChatMessage(role="user", content=prompt)
tool = get_function_tool(DocumentClassifierSchema)
result = self.llm.chat_with_tools(
tools=[tool],
user_msg=user_msg,
chat_history=[],
)
tool_calls = self.llm.get_tool_calls_from_response(
result,
error_on_no_tool_call=True,
)
logger.debug("LLM query result: %s", tool_calls)
parsed = DocumentClassifierSchema(**tool_calls[0].tool_kwargs)
return parsed.model_dump()
def run_chat(self, messages: list[ChatMessage]) -> str:
logger.debug(
"Running chat query against %s with model %s",
self.settings.llm_backend,
self.settings.llm_model,
)
result = self.llm.chat(messages)
logger.debug("Chat result: %s", result)
return result
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/client.py",
"license": "GNU General Public License v3.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_ai/embedding.py | import json
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pathlib import Path
from django.conf import settings
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from documents.models import Document
from documents.models import Note
from paperless.config import AIConfig
from paperless.models import LLMEmbeddingBackend
def get_embedding_model() -> BaseEmbedding:
config = AIConfig()
match config.llm_embedding_backend:
case LLMEmbeddingBackend.OPENAI:
return OpenAIEmbedding(
model=config.llm_embedding_model or "text-embedding-3-small",
api_key=config.llm_api_key,
api_base=config.llm_endpoint or None,
)
case LLMEmbeddingBackend.HUGGINGFACE:
return HuggingFaceEmbedding(
model_name=config.llm_embedding_model
or "sentence-transformers/all-MiniLM-L6-v2",
)
case _:
raise ValueError(
f"Unsupported embedding backend: {config.llm_embedding_backend}",
)
def get_embedding_dim() -> int:
"""
Loads embedding dimension from meta.json if available, otherwise infers it
from a dummy embedding and stores it for future use.
"""
config = AIConfig()
model = config.llm_embedding_model or (
"text-embedding-3-small"
if config.llm_embedding_backend == "openai"
else "sentence-transformers/all-MiniLM-L6-v2"
)
meta_path: Path = settings.LLM_INDEX_DIR / "meta.json"
if meta_path.exists():
with meta_path.open() as f:
meta = json.load(f)
if meta.get("embedding_model") != model:
raise RuntimeError(
f"Embedding model changed from {meta.get('embedding_model')} to {model}. "
"You must rebuild the index.",
)
return meta["dim"]
embedding_model = get_embedding_model()
test_embed = embedding_model.get_text_embedding("test")
dim = len(test_embed)
with meta_path.open("w") as f:
json.dump({"embedding_model": model, "dim": dim}, f)
return dim
def build_llm_index_text(doc: Document) -> str:
lines = [
f"Title: {doc.title}",
f"Filename: {doc.filename}",
f"Created: {doc.created}",
f"Added: {doc.added}",
f"Modified: {doc.modified}",
f"Tags: {', '.join(tag.name for tag in doc.tags.all())}",
f"Document Type: {doc.document_type.name if doc.document_type else ''}",
f"Correspondent: {doc.correspondent.name if doc.correspondent else ''}",
f"Storage Path: {doc.storage_path.name if doc.storage_path else ''}",
f"Archive Serial Number: {doc.archive_serial_number or ''}",
f"Notes: {','.join([str(c.note) for c in Note.objects.filter(document=doc)])}",
]
for instance in doc.custom_fields.all():
lines.append(f"Custom Field - {instance.field.name}: {instance}")
lines.append("\nContent:\n")
lines.append(doc.content or "")
return "\n".join(lines)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/embedding.py",
"license": "GNU General Public License v3.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/paperless_ai/indexing.py | import logging
import shutil
from collections.abc import Callable
from collections.abc import Iterable
from datetime import timedelta
from pathlib import Path
from typing import TypeVar
import faiss
import llama_index.core.settings as llama_settings
from celery import states
from django.conf import settings
from django.utils import timezone
from llama_index.core import Document as LlamaDocument
from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.core import load_index_from_storage
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.prompts import PromptTemplate
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.schema import BaseNode
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.storage.index_store import SimpleIndexStore
from llama_index.core.text_splitter import TokenTextSplitter
from llama_index.vector_stores.faiss import FaissVectorStore
from documents.models import Document
from documents.models import PaperlessTask
from paperless_ai.embedding import build_llm_index_text
from paperless_ai.embedding import get_embedding_dim
from paperless_ai.embedding import get_embedding_model
_T = TypeVar("_T")
IterWrapper = Callable[[Iterable[_T]], Iterable[_T]]
def _identity(iterable: Iterable[_T]) -> Iterable[_T]:
return iterable
logger = logging.getLogger("paperless_ai.indexing")
def queue_llm_index_update_if_needed(*, rebuild: bool, reason: str) -> bool:
from documents.tasks import llmindex_index
has_running = PaperlessTask.objects.filter(
task_name=PaperlessTask.TaskName.LLMINDEX_UPDATE,
status__in=[states.PENDING, states.STARTED],
).exists()
has_recent = PaperlessTask.objects.filter(
task_name=PaperlessTask.TaskName.LLMINDEX_UPDATE,
date_created__gte=(timezone.now() - timedelta(minutes=5)),
).exists()
if has_running or has_recent:
return False
llmindex_index.delay(rebuild=rebuild, scheduled=False, auto=True)
logger.warning(
"Queued LLM index update%s: %s",
" (rebuild)" if rebuild else "",
reason,
)
return True
def get_or_create_storage_context(*, rebuild=False):
"""
Loads or creates the StorageContext (vector store, docstore, index store).
If rebuild=True, deletes and recreates everything.
"""
if rebuild:
shutil.rmtree(settings.LLM_INDEX_DIR, ignore_errors=True)
settings.LLM_INDEX_DIR.mkdir(parents=True, exist_ok=True)
if rebuild or not settings.LLM_INDEX_DIR.exists():
embedding_dim = get_embedding_dim()
faiss_index = faiss.IndexFlatL2(embedding_dim)
vector_store = FaissVectorStore(faiss_index=faiss_index)
docstore = SimpleDocumentStore()
index_store = SimpleIndexStore()
else:
vector_store = FaissVectorStore.from_persist_dir(settings.LLM_INDEX_DIR)
docstore = SimpleDocumentStore.from_persist_dir(settings.LLM_INDEX_DIR)
index_store = SimpleIndexStore.from_persist_dir(settings.LLM_INDEX_DIR)
return StorageContext.from_defaults(
docstore=docstore,
index_store=index_store,
vector_store=vector_store,
persist_dir=settings.LLM_INDEX_DIR,
)
def build_document_node(document: Document) -> list[BaseNode]:
"""
Given a Document, returns parsed Nodes ready for indexing.
"""
text = build_llm_index_text(document)
metadata = {
"document_id": str(document.id),
"title": document.title,
"tags": [t.name for t in document.tags.all()],
"correspondent": document.correspondent.name
if document.correspondent
else None,
"document_type": document.document_type.name
if document.document_type
else None,
"created": document.created.isoformat() if document.created else None,
"added": document.added.isoformat() if document.added else None,
"modified": document.modified.isoformat(),
}
doc = LlamaDocument(text=text, metadata=metadata)
parser = SimpleNodeParser()
return parser.get_nodes_from_documents([doc])
def load_or_build_index(nodes=None):
"""
Load an existing VectorStoreIndex if present,
or build a new one using provided nodes if storage is empty.
"""
embed_model = get_embedding_model()
llama_settings.Settings.embed_model = embed_model
storage_context = get_or_create_storage_context()
try:
return load_index_from_storage(storage_context=storage_context)
except ValueError as e:
logger.warning("Failed to load index from storage: %s", e)
if not nodes:
queue_llm_index_update_if_needed(
rebuild=vector_store_file_exists(),
reason="LLM index missing or invalid while loading.",
)
logger.info("No nodes provided for index creation.")
raise
return VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
embed_model=embed_model,
)
def remove_document_docstore_nodes(document: Document, index: VectorStoreIndex):
"""
Removes existing documents from docstore for a given document from the index.
This is necessary because FAISS IndexFlatL2 is append-only.
"""
all_node_ids = list(index.docstore.docs.keys())
existing_nodes = [
node.node_id
for node in index.docstore.get_nodes(all_node_ids)
if node.metadata.get("document_id") == str(document.id)
]
for node_id in existing_nodes:
# Delete from docstore, FAISS IndexFlatL2 are append-only
index.docstore.delete_document(node_id)
def vector_store_file_exists():
"""
Check if the vector store file exists in the LLM index directory.
"""
return Path(settings.LLM_INDEX_DIR / "default__vector_store.json").exists()
def update_llm_index(
*,
iter_wrapper: IterWrapper[Document] = _identity,
rebuild=False,
) -> str:
"""
Rebuild or update the LLM index.
"""
nodes = []
documents = Document.objects.all()
if not documents.exists():
msg = "No documents found to index."
logger.warning(msg)
return msg
if rebuild or not vector_store_file_exists():
# remove meta.json to force re-detection of embedding dim
(settings.LLM_INDEX_DIR / "meta.json").unlink(missing_ok=True)
# Rebuild index from scratch
logger.info("Rebuilding LLM index.")
embed_model = get_embedding_model()
llama_settings.Settings.embed_model = embed_model
storage_context = get_or_create_storage_context(rebuild=True)
for document in iter_wrapper(documents):
document_nodes = build_document_node(document)
nodes.extend(document_nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
embed_model=embed_model,
show_progress=False,
)
msg = "LLM index rebuilt successfully."
else:
# Update existing index
index = load_or_build_index()
all_node_ids = list(index.docstore.docs.keys())
existing_nodes = {
node.metadata.get("document_id"): node
for node in index.docstore.get_nodes(all_node_ids)
}
for document in iter_wrapper(documents):
doc_id = str(document.id)
document_modified = document.modified.isoformat()
if doc_id in existing_nodes:
node = existing_nodes[doc_id]
node_modified = node.metadata.get("modified")
if node_modified == document_modified:
continue
# Again, delete from docstore, FAISS IndexFlatL2 are append-only
index.docstore.delete_document(node.node_id)
nodes.extend(build_document_node(document))
else:
# New document, add it
nodes.extend(build_document_node(document))
if nodes:
msg = "LLM index updated successfully."
logger.info(
"Updating %d nodes in LLM index.",
len(nodes),
)
index.insert_nodes(nodes)
else:
msg = "No changes detected in LLM index."
logger.info(msg)
index.storage_context.persist(persist_dir=settings.LLM_INDEX_DIR)
return msg
def llm_index_add_or_update_document(document: Document):
"""
Adds or updates a document in the LLM index.
If the document already exists, it will be replaced.
"""
new_nodes = build_document_node(document)
index = load_or_build_index(nodes=new_nodes)
remove_document_docstore_nodes(document, index)
index.insert_nodes(new_nodes)
index.storage_context.persist(persist_dir=settings.LLM_INDEX_DIR)
def llm_index_remove_document(document: Document):
"""
Removes a document from the LLM index.
"""
index = load_or_build_index()
remove_document_docstore_nodes(document, index)
index.storage_context.persist(persist_dir=settings.LLM_INDEX_DIR)
def truncate_content(content: str) -> str:
prompt_helper = PromptHelper(
context_window=8192,
num_output=512,
chunk_overlap_ratio=0.1,
chunk_size_limit=None,
)
splitter = TokenTextSplitter(separator=" ", chunk_size=512, chunk_overlap=50)
content_chunks = splitter.split_text(content)
truncated_chunks = prompt_helper.truncate(
prompt=PromptTemplate(template="{content}"),
text_chunks=content_chunks,
padding=5,
)
return " ".join(truncated_chunks)
def query_similar_documents(
document: Document,
top_k: int = 5,
document_ids: list[int] | None = None,
) -> list[Document]:
"""
Runs a similarity query and returns top-k similar Document objects.
"""
if not vector_store_file_exists():
queue_llm_index_update_if_needed(
rebuild=False,
reason="LLM index not found for similarity query.",
)
return []
index = load_or_build_index()
# constrain only the node(s) that match the document IDs, if given
doc_node_ids = (
[
node.node_id
for node in index.docstore.docs.values()
if node.metadata.get("document_id") in document_ids
]
if document_ids
else None
)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
doc_ids=doc_node_ids,
)
query_text = truncate_content(
(document.title or "") + "\n" + (document.content or ""),
)
results = retriever.retrieve(query_text)
document_ids = [
int(node.metadata["document_id"])
for node in results
if "document_id" in node.metadata
]
return list(Document.objects.filter(pk__in=document_ids))
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/indexing.py",
"license": "GNU General Public License v3.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/paperless_ai/matching.py | import difflib
import logging
import re
from django.contrib.auth.models import User
from documents.models import Correspondent
from documents.models import DocumentType
from documents.models import StoragePath
from documents.models import Tag
from documents.permissions import get_objects_for_user_owner_aware
MATCH_THRESHOLD = 0.8
logger = logging.getLogger("paperless_ai.matching")
def match_tags_by_name(names: list[str], user: User) -> list[Tag]:
queryset = get_objects_for_user_owner_aware(
user,
["view_tag"],
Tag,
)
return _match_names_to_queryset(names, queryset, "name")
def match_correspondents_by_name(names: list[str], user: User) -> list[Correspondent]:
queryset = get_objects_for_user_owner_aware(
user,
["view_correspondent"],
Correspondent,
)
return _match_names_to_queryset(names, queryset, "name")
def match_document_types_by_name(names: list[str], user: User) -> list[DocumentType]:
queryset = get_objects_for_user_owner_aware(
user,
["view_documenttype"],
DocumentType,
)
return _match_names_to_queryset(names, queryset, "name")
def match_storage_paths_by_name(names: list[str], user: User) -> list[StoragePath]:
queryset = get_objects_for_user_owner_aware(
user,
["view_storagepath"],
StoragePath,
)
return _match_names_to_queryset(names, queryset, "name")
def _normalize(s: str) -> str:
s = s.lower()
s = re.sub(r"[^\w\s]", "", s) # remove punctuation
s = s.strip()
return s
def _match_names_to_queryset(names: list[str], queryset, attr: str):
results = []
objects = list(queryset)
object_names = [_normalize(getattr(obj, attr)) for obj in objects]
for name in names:
if not name:
continue
target = _normalize(name)
# First try exact match
if target in object_names:
index = object_names.index(target)
matched = objects.pop(index)
object_names.pop(index) # keep object list aligned after removal
results.append(matched)
continue
# Fuzzy match fallback
matches = difflib.get_close_matches(
target,
object_names,
n=1,
cutoff=MATCH_THRESHOLD,
)
if matches:
index = object_names.index(matches[0])
matched = objects.pop(index)
object_names.pop(index)
results.append(matched)
else:
pass
return results
def extract_unmatched_names(
names: list[str],
matched_objects: list,
attr="name",
) -> list[str]:
matched_names = {getattr(obj, attr).lower() for obj in matched_objects}
return [name for name in names if name.lower() not in matched_names]
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/matching.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_ai/tests/test_ai_classifier.py | import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from django.test import override_settings
from documents.models import Document
from paperless_ai.ai_classifier import build_prompt_with_rag
from paperless_ai.ai_classifier import build_prompt_without_rag
from paperless_ai.ai_classifier import get_ai_document_classification
from paperless_ai.ai_classifier import get_context_for_document
@pytest.fixture
def mock_document():
doc = MagicMock(spec=Document)
doc.title = "Test Title"
doc.filename = "test_file.pdf"
doc.created = "2023-01-01"
doc.added = "2023-01-02"
doc.modified = "2023-01-03"
tag1 = MagicMock()
tag1.name = "Tag1"
tag2 = MagicMock()
tag2.name = "Tag2"
doc.tags.all = MagicMock(return_value=[tag1, tag2])
doc.document_type = MagicMock()
doc.document_type.name = "Invoice"
doc.correspondent = MagicMock()
doc.correspondent.name = "Test Correspondent"
doc.archive_serial_number = "12345"
doc.content = "This is the document content."
cf1 = MagicMock(__str__=lambda x: "Value1")
cf1.field = MagicMock()
cf1.field.name = "Field1"
cf1.value = "Value1"
cf2 = MagicMock(__str__=lambda x: "Value2")
cf2.field = MagicMock()
cf2.field.name = "Field2"
cf2.value = "Value2"
doc.custom_fields.all = MagicMock(return_value=[cf1, cf2])
return doc
@pytest.fixture
def mock_similar_documents():
doc1 = MagicMock()
doc1.content = "Content of document 1"
doc1.title = "Title 1"
doc1.filename = "file1.txt"
doc2 = MagicMock()
doc2.content = "Content of document 2"
doc2.title = None
doc2.filename = "file2.txt"
doc3 = MagicMock()
doc3.content = None
doc3.title = None
doc3.filename = None
return [doc1, doc2, doc3]
@pytest.mark.django_db
@patch("paperless_ai.client.AIClient.run_llm_query")
@override_settings(
LLM_BACKEND="ollama",
LLM_MODEL="some_model",
)
def test_get_ai_document_classification_success(mock_run_llm_query, mock_document):
mock_run_llm_query.return_value = {
"title": "Test Title",
"tags": ["test", "document"],
"correspondents": ["John Doe"],
"document_types": ["report"],
"storage_paths": ["Reports"],
"dates": ["2023-01-01"],
}
result = get_ai_document_classification(mock_document)
assert result["title"] == "Test Title"
assert result["tags"] == ["test", "document"]
assert result["correspondents"] == ["John Doe"]
assert result["document_types"] == ["report"]
assert result["storage_paths"] == ["Reports"]
assert result["dates"] == ["2023-01-01"]
@pytest.mark.django_db
@patch("paperless_ai.client.AIClient.run_llm_query")
def test_get_ai_document_classification_failure(mock_run_llm_query, mock_document):
mock_run_llm_query.side_effect = Exception("LLM query failed")
# assert raises an exception
with pytest.raises(Exception):
get_ai_document_classification(mock_document)
@pytest.mark.django_db
@patch("paperless_ai.client.AIClient.run_llm_query")
@patch("paperless_ai.ai_classifier.build_prompt_with_rag")
@override_settings(
LLM_EMBEDDING_BACKEND="huggingface",
LLM_EMBEDDING_MODEL="some_model",
LLM_BACKEND="ollama",
LLM_MODEL="some_model",
)
def test_use_rag_if_configured(
mock_build_prompt_with_rag,
mock_run_llm_query,
mock_document,
):
mock_build_prompt_with_rag.return_value = "Prompt with RAG"
mock_run_llm_query.return_value.text = json.dumps({})
get_ai_document_classification(mock_document)
mock_build_prompt_with_rag.assert_called_once()
@pytest.mark.django_db
@patch("paperless_ai.client.AIClient.run_llm_query")
@patch("paperless_ai.ai_classifier.build_prompt_without_rag")
@patch("paperless.config.AIConfig")
@override_settings(
LLM_BACKEND="ollama",
LLM_MODEL="some_model",
)
def test_use_without_rag_if_not_configured(
mock_ai_config,
mock_build_prompt_without_rag,
mock_run_llm_query,
mock_document,
):
mock_ai_config.llm_embedding_backend = None
mock_build_prompt_without_rag.return_value = "Prompt without RAG"
mock_run_llm_query.return_value.text = json.dumps({})
get_ai_document_classification(mock_document)
mock_build_prompt_without_rag.assert_called_once()
@pytest.mark.django_db
@override_settings(
LLM_EMBEDDING_BACKEND="huggingface",
LLM_BACKEND="ollama",
LLM_MODEL="some_model",
)
def test_prompt_with_without_rag(mock_document):
with patch(
"paperless_ai.ai_classifier.get_context_for_document",
return_value="Context from similar documents",
):
prompt = build_prompt_without_rag(mock_document)
assert "Additional context from similar documents:" not in prompt
prompt = build_prompt_with_rag(mock_document)
assert "Additional context from similar documents:" in prompt
@patch("paperless_ai.ai_classifier.query_similar_documents")
def test_get_context_for_document(
mock_query_similar_documents,
mock_document,
mock_similar_documents,
):
mock_query_similar_documents.return_value = mock_similar_documents
result = get_context_for_document(mock_document, max_docs=2)
expected_result = (
"TITLE: Title 1\nContent of document 1\n\n"
"TITLE: file2.txt\nContent of document 2"
)
assert result == expected_result
mock_query_similar_documents.assert_called_once()
def test_get_context_for_document_no_similar_docs(mock_document):
with patch("paperless_ai.ai_classifier.query_similar_documents", return_value=[]):
result = get_context_for_document(mock_document)
assert result == ""
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/tests/test_ai_classifier.py",
"license": "GNU General Public License v3.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_ai/tests/test_ai_indexing.py | import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from celery import states
from django.test import override_settings
from django.utils import timezone
from llama_index.core.base.embeddings.base import BaseEmbedding
from documents.models import Document
from documents.models import PaperlessTask
from paperless_ai import indexing
@pytest.fixture
def temp_llm_index_dir(tmp_path):
original_dir = indexing.settings.LLM_INDEX_DIR
indexing.settings.LLM_INDEX_DIR = tmp_path
yield tmp_path
indexing.settings.LLM_INDEX_DIR = original_dir
@pytest.fixture
def real_document(db):
return Document.objects.create(
title="Test Document",
content="This is some test content.",
added=timezone.now(),
)
@pytest.fixture
def mock_embed_model():
fake = FakeEmbedding()
with (
patch("paperless_ai.indexing.get_embedding_model") as mock_index,
patch(
"paperless_ai.embedding.get_embedding_model",
) as mock_embedding,
):
mock_index.return_value = fake
mock_embedding.return_value = fake
yield mock_index
class FakeEmbedding(BaseEmbedding):
# TODO: maybe a better way to do this?
def _aget_query_embedding(self, query: str) -> list[float]:
return [0.1] * self.get_query_embedding_dim()
def _get_query_embedding(self, query: str) -> list[float]:
return [0.1] * self.get_query_embedding_dim()
def _get_text_embedding(self, text: str) -> list[float]:
return [0.1] * self.get_query_embedding_dim()
def get_query_embedding_dim(self) -> int:
return 384 # Match your real FAISS config
@pytest.mark.django_db
def test_build_document_node(real_document) -> None:
nodes = indexing.build_document_node(real_document)
assert len(nodes) > 0
assert nodes[0].metadata["document_id"] == str(real_document.id)
@pytest.mark.django_db
def test_update_llm_index(
temp_llm_index_dir,
real_document,
mock_embed_model,
) -> None:
with patch("documents.models.Document.objects.all") as mock_all:
mock_queryset = MagicMock()
mock_queryset.exists.return_value = True
mock_queryset.__iter__.return_value = iter([real_document])
mock_all.return_value = mock_queryset
indexing.update_llm_index(rebuild=True)
assert any(temp_llm_index_dir.glob("*.json"))
@pytest.mark.django_db
def test_update_llm_index_removes_meta(
temp_llm_index_dir,
real_document,
mock_embed_model,
) -> None:
# Pre-create a meta.json with incorrect data
(temp_llm_index_dir / "meta.json").write_text(
json.dumps({"embedding_model": "old", "dim": 1}),
)
with patch("documents.models.Document.objects.all") as mock_all:
mock_queryset = MagicMock()
mock_queryset.exists.return_value = True
mock_queryset.__iter__.return_value = iter([real_document])
mock_all.return_value = mock_queryset
indexing.update_llm_index(rebuild=True)
meta = json.loads((temp_llm_index_dir / "meta.json").read_text())
from paperless.config import AIConfig
config = AIConfig()
expected_model = config.llm_embedding_model or (
"text-embedding-3-small"
if config.llm_embedding_backend == "openai"
else "sentence-transformers/all-MiniLM-L6-v2"
)
assert meta == {"embedding_model": expected_model, "dim": 384}
@pytest.mark.django_db
def test_update_llm_index_partial_update(
temp_llm_index_dir,
real_document,
mock_embed_model,
) -> None:
doc2 = Document.objects.create(
title="Test Document 2",
content="This is some test content 2.",
added=timezone.now(),
checksum="1234567890abcdef",
)
# Initial index
with patch("documents.models.Document.objects.all") as mock_all:
mock_queryset = MagicMock()
mock_queryset.exists.return_value = True
mock_queryset.__iter__.return_value = iter([real_document, doc2])
mock_all.return_value = mock_queryset
indexing.update_llm_index(rebuild=True)
# modify document
updated_document = real_document
updated_document.modified = timezone.now() # simulate modification
# new doc
doc3 = Document.objects.create(
title="Test Document 3",
content="This is some test content 3.",
added=timezone.now(),
checksum="abcdef1234567890",
)
with patch("documents.models.Document.objects.all") as mock_all:
mock_queryset = MagicMock()
mock_queryset.exists.return_value = True
mock_queryset.__iter__.return_value = iter([updated_document, doc2, doc3])
mock_all.return_value = mock_queryset
# assert logs "Updating LLM index with %d new nodes and removing %d old nodes."
with patch("paperless_ai.indexing.logger") as mock_logger:
indexing.update_llm_index(rebuild=False)
mock_logger.info.assert_called_once_with(
"Updating %d nodes in LLM index.",
2,
)
indexing.update_llm_index(rebuild=False)
assert any(temp_llm_index_dir.glob("*.json"))
def test_get_or_create_storage_context_raises_exception(
temp_llm_index_dir,
mock_embed_model,
) -> None:
with pytest.raises(Exception):
indexing.get_or_create_storage_context(rebuild=False)
@override_settings(
LLM_EMBEDDING_BACKEND="huggingface",
)
def test_load_or_build_index_builds_when_nodes_given(
temp_llm_index_dir,
real_document,
mock_embed_model,
) -> None:
with (
patch(
"paperless_ai.indexing.load_index_from_storage",
side_effect=ValueError("Index not found"),
),
patch(
"paperless_ai.indexing.VectorStoreIndex",
return_value=MagicMock(),
) as mock_index_cls,
patch(
"paperless_ai.indexing.get_or_create_storage_context",
return_value=MagicMock(),
) as mock_storage,
):
mock_storage.return_value.persist_dir = temp_llm_index_dir
indexing.load_or_build_index(
nodes=[indexing.build_document_node(real_document)],
)
mock_index_cls.assert_called_once()
def test_load_or_build_index_raises_exception_when_no_nodes(
temp_llm_index_dir,
mock_embed_model,
) -> None:
with (
patch(
"paperless_ai.indexing.load_index_from_storage",
side_effect=ValueError("Index not found"),
),
patch(
"paperless_ai.indexing.get_or_create_storage_context",
return_value=MagicMock(),
),
):
with pytest.raises(Exception):
indexing.load_or_build_index()
@pytest.mark.django_db
def test_load_or_build_index_succeeds_when_nodes_given(
temp_llm_index_dir,
mock_embed_model,
) -> None:
with (
patch(
"paperless_ai.indexing.load_index_from_storage",
side_effect=ValueError("Index not found"),
),
patch(
"paperless_ai.indexing.VectorStoreIndex",
return_value=MagicMock(),
) as mock_index_cls,
patch(
"paperless_ai.indexing.get_or_create_storage_context",
return_value=MagicMock(),
) as mock_storage,
):
mock_storage.return_value.persist_dir = temp_llm_index_dir
indexing.load_or_build_index(
nodes=[MagicMock()],
)
mock_index_cls.assert_called_once()
@pytest.mark.django_db
def test_add_or_update_document_updates_existing_entry(
temp_llm_index_dir,
real_document,
mock_embed_model,
) -> None:
indexing.update_llm_index(rebuild=True)
indexing.llm_index_add_or_update_document(real_document)
assert any(temp_llm_index_dir.glob("*.json"))
@pytest.mark.django_db
def test_remove_document_deletes_node_from_docstore(
temp_llm_index_dir,
real_document,
mock_embed_model,
) -> None:
indexing.update_llm_index(rebuild=True)
index = indexing.load_or_build_index()
assert len(index.docstore.docs) == 1
indexing.llm_index_remove_document(real_document)
index = indexing.load_or_build_index()
assert len(index.docstore.docs) == 0
@pytest.mark.django_db
def test_update_llm_index_no_documents(
temp_llm_index_dir,
mock_embed_model,
) -> None:
with patch("documents.models.Document.objects.all") as mock_all:
mock_queryset = MagicMock()
mock_queryset.exists.return_value = False
mock_queryset.__iter__.return_value = iter([])
mock_all.return_value = mock_queryset
# check log message
with patch("paperless_ai.indexing.logger") as mock_logger:
indexing.update_llm_index(rebuild=True)
mock_logger.warning.assert_called_once_with(
"No documents found to index.",
)
@pytest.mark.django_db
def test_queue_llm_index_update_if_needed_enqueues_when_idle_or_skips_recent() -> None:
# No existing tasks
with patch("documents.tasks.llmindex_index") as mock_task:
result = indexing.queue_llm_index_update_if_needed(
rebuild=True,
reason="test enqueue",
)
assert result is True
mock_task.delay.assert_called_once_with(rebuild=True, scheduled=False, auto=True)
PaperlessTask.objects.create(
task_id="task-1",
task_name=PaperlessTask.TaskName.LLMINDEX_UPDATE,
status=states.STARTED,
date_created=timezone.now(),
)
# Existing running task
with patch("documents.tasks.llmindex_index") as mock_task:
result = indexing.queue_llm_index_update_if_needed(
rebuild=False,
reason="should skip",
)
assert result is False
mock_task.delay.assert_not_called()
@override_settings(
LLM_EMBEDDING_BACKEND="huggingface",
LLM_BACKEND="ollama",
)
def test_query_similar_documents(
temp_llm_index_dir,
real_document,
) -> None:
with (
patch("paperless_ai.indexing.get_or_create_storage_context") as mock_storage,
patch("paperless_ai.indexing.load_or_build_index") as mock_load_or_build_index,
patch(
"paperless_ai.indexing.vector_store_file_exists",
) as mock_vector_store_exists,
patch("paperless_ai.indexing.VectorIndexRetriever") as mock_retriever_cls,
patch("paperless_ai.indexing.Document.objects.filter") as mock_filter,
):
mock_storage.return_value = MagicMock()
mock_storage.return_value.persist_dir = temp_llm_index_dir
mock_vector_store_exists.return_value = True
mock_index = MagicMock()
mock_load_or_build_index.return_value = mock_index
mock_retriever = MagicMock()
mock_retriever_cls.return_value = mock_retriever
mock_node1 = MagicMock()
mock_node1.metadata = {"document_id": 1}
mock_node2 = MagicMock()
mock_node2.metadata = {"document_id": 2}
mock_retriever.retrieve.return_value = [mock_node1, mock_node2]
mock_filtered_docs = [MagicMock(pk=1), MagicMock(pk=2)]
mock_filter.return_value = mock_filtered_docs
result = indexing.query_similar_documents(real_document, top_k=3)
mock_load_or_build_index.assert_called_once()
mock_retriever_cls.assert_called_once()
mock_retriever.retrieve.assert_called_once_with(
"Test Document\nThis is some test content.",
)
mock_filter.assert_called_once_with(pk__in=[1, 2])
assert result == mock_filtered_docs
@pytest.mark.django_db
def test_query_similar_documents_triggers_update_when_index_missing(
temp_llm_index_dir,
real_document,
) -> None:
with (
patch(
"paperless_ai.indexing.vector_store_file_exists",
return_value=False,
),
patch(
"paperless_ai.indexing.queue_llm_index_update_if_needed",
) as mock_queue,
patch("paperless_ai.indexing.load_or_build_index") as mock_load,
):
result = indexing.query_similar_documents(
real_document,
top_k=2,
)
mock_queue.assert_called_once_with(
rebuild=False,
reason="LLM index not found for similarity query.",
)
mock_load.assert_not_called()
assert result == []
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/tests/test_ai_indexing.py",
"license": "GNU General Public License v3.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_ai/tests/test_chat.py | from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from llama_index.core import VectorStoreIndex
from llama_index.core.schema import TextNode
from paperless_ai.chat import stream_chat_with_documents
@pytest.fixture(autouse=True)
def patch_embed_model():
from llama_index.core import settings as llama_settings
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# Use a real BaseEmbedding subclass to satisfy llama-index 0.14 validation
llama_settings.Settings.embed_model = MockEmbedding(embed_dim=1536)
yield
llama_settings.Settings.embed_model = None
@pytest.fixture(autouse=True)
def patch_embed_nodes():
with patch(
"llama_index.core.indices.vector_store.base.embed_nodes",
) as mock_embed_nodes:
mock_embed_nodes.side_effect = lambda nodes, *_args, **_kwargs: {
node.node_id: [0.1] * 1536 for node in nodes
}
yield
@pytest.fixture
def mock_document():
doc = MagicMock()
doc.pk = 1
doc.title = "Test Document"
doc.filename = "test_file.pdf"
doc.content = "This is the document content."
return doc
def test_stream_chat_with_one_document_full_content(mock_document) -> None:
with (
patch("paperless_ai.chat.AIClient") as mock_client_cls,
patch("paperless_ai.chat.load_or_build_index") as mock_load_index,
patch(
"paperless_ai.chat.RetrieverQueryEngine.from_args",
) as mock_query_engine_cls,
):
mock_client = MagicMock()
mock_client_cls.return_value = mock_client
mock_client.llm = MagicMock()
mock_node = TextNode(
text="This is node content.",
metadata={"document_id": str(mock_document.pk), "title": "Test Document"},
)
mock_index = MagicMock()
mock_index.docstore.docs.values.return_value = [mock_node]
mock_load_index.return_value = mock_index
mock_response_stream = MagicMock()
mock_response_stream.response_gen = iter(["chunk1", "chunk2"])
mock_query_engine = MagicMock()
mock_query_engine_cls.return_value = mock_query_engine
mock_query_engine.query.return_value = mock_response_stream
output = list(stream_chat_with_documents("What is this?", [mock_document]))
assert output == ["chunk1", "chunk2"]
def test_stream_chat_with_multiple_documents_retrieval(patch_embed_nodes) -> None:
with (
patch("paperless_ai.chat.AIClient") as mock_client_cls,
patch("paperless_ai.chat.load_or_build_index") as mock_load_index,
patch(
"paperless_ai.chat.RetrieverQueryEngine.from_args",
) as mock_query_engine_cls,
patch.object(VectorStoreIndex, "as_retriever") as mock_as_retriever,
):
# Mock AIClient and LLM
mock_client = MagicMock()
mock_client_cls.return_value = mock_client
mock_client.llm = MagicMock()
# Create two real TextNodes
mock_node1 = TextNode(
text="Content for doc 1.",
metadata={"document_id": "1", "title": "Document 1"},
)
mock_node2 = TextNode(
text="Content for doc 2.",
metadata={"document_id": "2", "title": "Document 2"},
)
mock_index = MagicMock()
mock_index.docstore.docs.values.return_value = [mock_node1, mock_node2]
mock_load_index.return_value = mock_index
# Patch as_retriever to return a retriever whose retrieve() returns mock_node1 and mock_node2
mock_retriever = MagicMock()
mock_retriever.retrieve.return_value = [mock_node1, mock_node2]
mock_as_retriever.return_value = mock_retriever
# Mock response stream
mock_response_stream = MagicMock()
mock_response_stream.response_gen = iter(["chunk1", "chunk2"])
# Mock RetrieverQueryEngine
mock_query_engine = MagicMock()
mock_query_engine_cls.return_value = mock_query_engine
mock_query_engine.query.return_value = mock_response_stream
# Fake documents
doc1 = MagicMock(pk=1)
doc2 = MagicMock(pk=2)
output = list(stream_chat_with_documents("What's up?", [doc1, doc2]))
assert output == ["chunk1", "chunk2"]
def test_stream_chat_no_matching_nodes() -> None:
with (
patch("paperless_ai.chat.AIClient") as mock_client_cls,
patch("paperless_ai.chat.load_or_build_index") as mock_load_index,
):
mock_client = MagicMock()
mock_client_cls.return_value = mock_client
mock_client.llm = MagicMock()
mock_index = MagicMock()
# No matching nodes
mock_index.docstore.docs.values.return_value = []
mock_load_index.return_value = mock_index
output = list(stream_chat_with_documents("Any info?", [MagicMock(pk=1)]))
assert output == ["Sorry, I couldn't find any content to answer your question."]
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/tests/test_chat.py",
"license": "GNU General Public License v3.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_ai/tests/test_client.py | from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from llama_index.core.llms import ChatMessage
from llama_index.core.llms.llm import ToolSelection
from paperless_ai.client import AIClient
@pytest.fixture
def mock_ai_config():
with patch("paperless_ai.client.AIConfig") as MockAIConfig:
mock_config = MagicMock()
MockAIConfig.return_value = mock_config
yield mock_config
@pytest.fixture
def mock_ollama_llm():
with patch("paperless_ai.client.Ollama") as MockOllama:
yield MockOllama
@pytest.fixture
def mock_openai_llm():
with patch("paperless_ai.client.OpenAI") as MockOpenAI:
yield MockOpenAI
def test_get_llm_ollama(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_endpoint = "http://test-url"
client = AIClient()
mock_ollama_llm.assert_called_once_with(
model="test_model",
base_url="http://test-url",
request_timeout=120,
)
assert client.llm == mock_ollama_llm.return_value
def test_get_llm_openai(mock_ai_config, mock_openai_llm):
mock_ai_config.llm_backend = "openai"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_api_key = "test_api_key"
mock_ai_config.llm_endpoint = "http://test-url"
client = AIClient()
mock_openai_llm.assert_called_once_with(
model="test_model",
api_base="http://test-url",
api_key="test_api_key",
)
assert client.llm == mock_openai_llm.return_value
def test_get_llm_unsupported_backend(mock_ai_config):
mock_ai_config.llm_backend = "unsupported"
with pytest.raises(ValueError, match="Unsupported LLM backend: unsupported"):
AIClient()
def test_run_llm_query(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_endpoint = "http://test-url"
mock_llm_instance = mock_ollama_llm.return_value
tool_selection = ToolSelection(
tool_id="call_test",
tool_name="DocumentClassifierSchema",
tool_kwargs={
"title": "Test Title",
"tags": ["test", "document"],
"correspondents": ["John Doe"],
"document_types": ["report"],
"storage_paths": ["Reports"],
"dates": ["2023-01-01"],
},
)
mock_llm_instance.chat_with_tools.return_value = MagicMock()
mock_llm_instance.get_tool_calls_from_response.return_value = [tool_selection]
client = AIClient()
result = client.run_llm_query("test_prompt")
assert result["title"] == "Test Title"
def test_run_chat(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_endpoint = "http://test-url"
mock_llm_instance = mock_ollama_llm.return_value
mock_llm_instance.chat.return_value = "test_chat_result"
client = AIClient()
messages = [ChatMessage(role="user", content="Hello")]
result = client.run_chat(messages)
mock_llm_instance.chat.assert_called_once_with(messages)
assert result == "test_chat_result"
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/tests/test_client.py",
"license": "GNU General Public License v3.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_ai/tests/test_embedding.py | import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from django.conf import settings
from documents.models import Document
from paperless.models import LLMEmbeddingBackend
from paperless_ai.embedding import build_llm_index_text
from paperless_ai.embedding import get_embedding_dim
from paperless_ai.embedding import get_embedding_model
@pytest.fixture
def mock_ai_config():
with patch("paperless_ai.embedding.AIConfig") as MockAIConfig:
yield MockAIConfig
@pytest.fixture
def temp_llm_index_dir(tmp_path):
original_dir = settings.LLM_INDEX_DIR
settings.LLM_INDEX_DIR = tmp_path
yield tmp_path
settings.LLM_INDEX_DIR = original_dir
@pytest.fixture
def mock_document():
doc = MagicMock(spec=Document)
doc.title = "Test Title"
doc.filename = "test_file.pdf"
doc.created = "2023-01-01"
doc.added = "2023-01-02"
doc.modified = "2023-01-03"
tag1 = MagicMock()
tag1.name = "Tag1"
tag2 = MagicMock()
tag2.name = "Tag2"
doc.tags.all = MagicMock(return_value=[tag1, tag2])
doc.document_type = MagicMock()
doc.document_type.name = "Invoice"
doc.correspondent = MagicMock()
doc.correspondent.name = "Test Correspondent"
doc.archive_serial_number = "12345"
doc.content = "This is the document content."
cf1 = MagicMock(__str__=lambda x: "Value1")
cf1.field = MagicMock()
cf1.field.name = "Field1"
cf1.value = "Value1"
cf2 = MagicMock(__str__=lambda x: "Value2")
cf2.field = MagicMock()
cf2.field.name = "Field2"
cf2.value = "Value2"
doc.custom_fields.all = MagicMock(return_value=[cf1, cf2])
return doc
def test_get_embedding_model_openai(mock_ai_config):
mock_ai_config.return_value.llm_embedding_backend = LLMEmbeddingBackend.OPENAI
mock_ai_config.return_value.llm_embedding_model = "text-embedding-3-small"
mock_ai_config.return_value.llm_api_key = "test_api_key"
mock_ai_config.return_value.llm_endpoint = "http://test-url"
with patch("paperless_ai.embedding.OpenAIEmbedding") as MockOpenAIEmbedding:
model = get_embedding_model()
MockOpenAIEmbedding.assert_called_once_with(
model="text-embedding-3-small",
api_key="test_api_key",
api_base="http://test-url",
)
assert model == MockOpenAIEmbedding.return_value
def test_get_embedding_model_huggingface(mock_ai_config):
mock_ai_config.return_value.llm_embedding_backend = LLMEmbeddingBackend.HUGGINGFACE
mock_ai_config.return_value.llm_embedding_model = (
"sentence-transformers/all-MiniLM-L6-v2"
)
with patch(
"paperless_ai.embedding.HuggingFaceEmbedding",
) as MockHuggingFaceEmbedding:
model = get_embedding_model()
MockHuggingFaceEmbedding.assert_called_once_with(
model_name="sentence-transformers/all-MiniLM-L6-v2",
)
assert model == MockHuggingFaceEmbedding.return_value
def test_get_embedding_model_invalid_backend(mock_ai_config):
mock_ai_config.return_value.llm_embedding_backend = "INVALID_BACKEND"
with pytest.raises(
ValueError,
match="Unsupported embedding backend: INVALID_BACKEND",
):
get_embedding_model()
def test_get_embedding_dim_infers_and_saves(temp_llm_index_dir, mock_ai_config):
mock_ai_config.return_value.llm_embedding_backend = "openai"
mock_ai_config.return_value.llm_embedding_model = None
class DummyEmbedding:
def get_text_embedding(self, text):
return [0.0] * 7
with patch(
"paperless_ai.embedding.get_embedding_model",
return_value=DummyEmbedding(),
) as mock_get:
dim = get_embedding_dim()
mock_get.assert_called_once()
assert dim == 7
meta = json.loads((temp_llm_index_dir / "meta.json").read_text())
assert meta == {"embedding_model": "text-embedding-3-small", "dim": 7}
def test_get_embedding_dim_reads_existing_meta(temp_llm_index_dir, mock_ai_config):
mock_ai_config.return_value.llm_embedding_backend = "openai"
mock_ai_config.return_value.llm_embedding_model = None
(temp_llm_index_dir / "meta.json").write_text(
json.dumps({"embedding_model": "text-embedding-3-small", "dim": 11}),
)
with patch("paperless_ai.embedding.get_embedding_model") as mock_get:
assert get_embedding_dim() == 11
mock_get.assert_not_called()
def test_get_embedding_dim_raises_on_model_change(temp_llm_index_dir, mock_ai_config):
mock_ai_config.return_value.llm_embedding_backend = "openai"
mock_ai_config.return_value.llm_embedding_model = None
(temp_llm_index_dir / "meta.json").write_text(
json.dumps({"embedding_model": "old", "dim": 11}),
)
with pytest.raises(
RuntimeError,
match="Embedding model changed from old to text-embedding-3-small",
):
get_embedding_dim()
def test_build_llm_index_text(mock_document):
with patch("documents.models.Note.objects.filter") as mock_notes_filter:
mock_notes_filter.return_value = [
MagicMock(note="Note1"),
MagicMock(note="Note2"),
]
result = build_llm_index_text(mock_document)
assert "Title: Test Title" in result
assert "Filename: test_file.pdf" in result
assert "Created: 2023-01-01" in result
assert "Tags: Tag1, Tag2" in result
assert "Document Type: Invoice" in result
assert "Correspondent: Test Correspondent" in result
assert "Notes: Note1,Note2" in result
assert "Content:\n\nThis is the document content." in result
assert "Custom Field - Field1: Value1\nCustom Field - Field2: Value2" in result
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/tests/test_embedding.py",
"license": "GNU General Public License v3.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_ai/tests/test_matching.py | from unittest.mock import patch
from django.test import TestCase
from documents.models import Correspondent
from documents.models import DocumentType
from documents.models import StoragePath
from documents.models import Tag
from paperless_ai.matching import extract_unmatched_names
from paperless_ai.matching import match_correspondents_by_name
from paperless_ai.matching import match_document_types_by_name
from paperless_ai.matching import match_storage_paths_by_name
from paperless_ai.matching import match_tags_by_name
class TestAIMatching(TestCase):
def setUp(self) -> None:
# Create test data for Tag
self.tag1 = Tag.objects.create(name="Test Tag 1")
self.tag2 = Tag.objects.create(name="Test Tag 2")
# Create test data for Correspondent
self.correspondent1 = Correspondent.objects.create(name="Test Correspondent 1")
self.correspondent2 = Correspondent.objects.create(name="Test Correspondent 2")
# Create test data for DocumentType
self.document_type1 = DocumentType.objects.create(name="Test Document Type 1")
self.document_type2 = DocumentType.objects.create(name="Test Document Type 2")
# Create test data for StoragePath
self.storage_path1 = StoragePath.objects.create(name="Test Storage Path 1")
self.storage_path2 = StoragePath.objects.create(name="Test Storage Path 2")
@patch("paperless_ai.matching.get_objects_for_user_owner_aware")
def test_match_tags_by_name(self, mock_get_objects) -> None:
mock_get_objects.return_value = Tag.objects.all()
names = ["Test Tag 1", "Nonexistent Tag"]
result = match_tags_by_name(names, user=None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, "Test Tag 1")
@patch("paperless_ai.matching.get_objects_for_user_owner_aware")
def test_match_correspondents_by_name(self, mock_get_objects) -> None:
mock_get_objects.return_value = Correspondent.objects.all()
names = ["Test Correspondent 1", "Nonexistent Correspondent"]
result = match_correspondents_by_name(names, user=None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, "Test Correspondent 1")
@patch("paperless_ai.matching.get_objects_for_user_owner_aware")
def test_match_document_types_by_name(self, mock_get_objects) -> None:
mock_get_objects.return_value = DocumentType.objects.all()
names = ["Test Document Type 1", "Nonexistent Document Type"]
result = match_document_types_by_name(names, user=None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, "Test Document Type 1")
@patch("paperless_ai.matching.get_objects_for_user_owner_aware")
def test_match_storage_paths_by_name(self, mock_get_objects) -> None:
mock_get_objects.return_value = StoragePath.objects.all()
names = ["Test Storage Path 1", "Nonexistent Storage Path"]
result = match_storage_paths_by_name(names, user=None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, "Test Storage Path 1")
def test_extract_unmatched_names(self) -> None:
llm_names = ["Test Tag 1", "Nonexistent Tag"]
matched_objects = [self.tag1]
unmatched_names = extract_unmatched_names(llm_names, matched_objects)
self.assertEqual(unmatched_names, ["Nonexistent Tag"])
@patch("paperless_ai.matching.get_objects_for_user_owner_aware")
def test_match_tags_by_name_with_empty_names(self, mock_get_objects) -> None:
mock_get_objects.return_value = Tag.objects.all()
names = [None, "", " "]
result = match_tags_by_name(names, user=None)
self.assertEqual(result, [])
@patch("paperless_ai.matching.get_objects_for_user_owner_aware")
def test_match_tags_with_fuzzy_matching(self, mock_get_objects) -> None:
mock_get_objects.return_value = Tag.objects.all()
names = ["Test Taag 1", "Teest Tag 2"]
result = match_tags_by_name(names, user=None)
self.assertEqual(len(result), 2)
self.assertEqual(result[0].name, "Test Tag 1")
self.assertEqual(result[1].name, "Test Tag 2")
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_ai/tests/test_matching.py",
"license": "GNU General Public License v3.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_remote/apps.py | from django.apps import AppConfig
from paperless_remote.signals import remote_consumer_declaration
class PaperlessRemoteParserConfig(AppConfig):
name = "paperless_remote"
def ready(self) -> None:
from documents.signals import document_consumer_declaration
document_consumer_declaration.connect(remote_consumer_declaration)
AppConfig.ready(self)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_remote/apps.py",
"license": "GNU General Public License v3.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_remote/checks.py | from django.conf import settings
from django.core.checks import Error
from django.core.checks import register
@register()
def check_remote_parser_configured(app_configs, **kwargs):
if settings.REMOTE_OCR_ENGINE == "azureai" and not (
settings.REMOTE_OCR_ENDPOINT and settings.REMOTE_OCR_API_KEY
):
return [
Error(
"Azure AI remote parser requires endpoint and API key to be configured.",
),
]
return []
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_remote/checks.py",
"license": "GNU General Public License v3.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.