| | |
| |
|
| | TEXT_DATA = r""" |
| | # The bytecode interpreter |
| | This document describes the workings and implementation of the bytecode |
| | interpreter, the part of python that executes compiled Python code. Its |
| | entry point is in [Python/ceval.c](../Python/ceval.c). |
| | At a high level, the interpreter consists of a loop that iterates over the |
| | bytecode instructions, executing each of them via a switch statement that |
| | has a case implementing each opcode. This switch statement is generated |
| | from the instruction definitions in [Python/bytecodes.c](../Python/bytecodes.c) |
| | which are written in [a DSL](../Tools/cases_generator/interpreter_definition.md) |
| | developed for this purpose. |
| | Recall that the [Python Compiler](compiler.md) produces a [`CodeObject`](code_objects.md), |
| | which contains the bytecode instructions along with static data that is required to execute them, |
| | such as the consts list, variable names, |
| | [exception table](exception_handling.md#format-of-the-exception-table), and so on. |
| | When the interpreter's |
| | [`PyEval_EvalCode()`](https://docs.python.org/3.14/c-api/veryhigh.html#c.PyEval_EvalCode) |
| | function is called to execute a `CodeObject`, it constructs a [`Frame`](frames.md) and calls |
| | [`_PyEval_EvalFrame()`](https://docs.python.org/3.14/c-api/veryhigh.html#c.PyEval_EvalCode) |
| | to execute the code object in this frame. The frame holds the dynamic state of the |
| | `CodeObject`'s execution, including the instruction pointer, the globals and builtins. |
| | It also has a reference to the `CodeObject` itself. |
| | In addition to the frame, `_PyEval_EvalFrame()` also receives a |
| | [`Thread State`](https://docs.python.org/3/c-api/init.html#c.PyThreadState) |
| | object, `tstate`, which includes things like the exception state and the |
| | recursion depth. The thread state also provides access to the per-interpreter |
| | state (`tstate->interp`), which has a pointer to the per-runtime (that is, |
| | truly global) state (`tstate->interp->runtime`). |
| | Finally, `_PyEval_EvalFrame()` receives an integer argument `throwflag` |
| | which, when nonzero, indicates that the interpreter should just raise the current exception |
| | (this is used in the implementation of |
| | [`gen.throw`](https://docs.python.org/3.14/reference/expressions.html#generator.throw). |
| | By default, [`_PyEval_EvalFrame()`](https://docs.python.org/3.14/c-api/veryhigh.html#c.PyEval_EvalCode) |
| | simply calls [`_PyEval_EvalFrameDefault()`] to execute the frame. However, as per |
| | [`PEP 523`](https://peps.python.org/pep-0523/) this is configurable by setting |
| | `interp->eval_frame`. In the following, we describe the default function, |
| | `_PyEval_EvalFrameDefault()`. |
| | ## Instruction decoding |
| | The first task of the interpreter is to decode the bytecode instructions. |
| | Bytecode is stored as an array of 16-bit code units (`_Py_CODEUNIT`). |
| | Each code unit contains an 8-bit `opcode` and an 8-bit argument (`oparg`), both unsigned. |
| | In order to make the bytecode format independent of the machine byte order when stored on disk, |
| | `opcode` is always the first byte and `oparg` is always the second byte. |
| | Macros are used to extract the `opcode` and `oparg` from a code unit |
| | (`_Py_OPCODE(word)` and `_Py_OPARG(word)`). |
| | Some instructions (for example, `NOP` or `POP_TOP`) have no argument -- in this case |
| | we ignore `oparg`. |
| | A simplified version of the interpreter's main loop looks like this: |
| | ```c |
| | _Py_CODEUNIT *first_instr = code->co_code_adaptive; |
| | _Py_CODEUNIT *next_instr = first_instr; |
| | while (1) { |
| | _Py_CODEUNIT word = *next_instr++; |
| | unsigned char opcode = _Py_OPCODE(word); |
| | unsigned int oparg = _Py_OPARG(word); |
| | switch (opcode) { |
| | // ... A case for each opcode ... |
| | } |
| | } |
| | ``` |
| | This loop iterates over the instructions, decoding each into its `opcode` |
| | and `oparg`, and then executes the switch case that implements this `opcode`. |
| | The instruction format supports 256 different opcodes, which is sufficient. |
| | However, it also limits `oparg` to 8-bit values, which is too restrictive. |
| | To overcome this, the `EXTENDED_ARG` opcode allows us to prefix any instruction |
| | with one or more additional data bytes, which combine into a larger oparg. |
| | For example, this sequence of code units: |
| | EXTENDED_ARG 1 |
| | EXTENDED_ARG 0 |
| | LOAD_CONST 2 |
| | would set `opcode` to `LOAD_CONST` and `oparg` to `65538` (that is, `0x1_00_02`). |
| | The compiler should limit itself to at most three `EXTENDED_ARG` prefixes, to allow the |
| | resulting `oparg` to fit in 32 bits, but the interpreter does not check this. |
| | In the following, a `code unit` is always two bytes, while an `instruction` is a |
| | sequence of code units consisting of zero to three `EXTENDED_ARG` opcodes followed by |
| | a primary opcode. |
| | The following loop, to be inserted just above the `switch` statement, will make the above |
| | snippet decode a complete instruction: |
| | ```c |
| | while (opcode == EXTENDED_ARG) { |
| | word = *next_instr++; |
| | opcode = _Py_OPCODE(word); |
| | oparg = (oparg << 8) | _Py_OPARG(word); |
| | } |
| | ``` |
| | For various reasons we'll get to later (mostly efficiency, given that `EXTENDED_ARG` |
| | is rare) the actual code is different. |
| | ## Jumps |
| | Note that when the `switch` statement is reached, `next_instr` (the "instruction offset") |
| | already points to the next instruction. |
| | Thus, jump instructions can be implemented by manipulating `next_instr`: |
| | - A jump forward (`JUMP_FORWARD`) sets `next_instr += oparg`. |
| | - A jump backward (`JUMP_BACKWARD`) sets `next_instr -= oparg`. |
| | ## Inline cache entries |
| | Some (specialized or specializable) instructions have an associated "inline cache". |
| | The inline cache consists of one or more two-byte entries included in the bytecode |
| | array as additional words following the `opcode`/`oparg` pair. |
| | The size of the inline cache for a particular instruction is fixed by its `opcode`. |
| | Moreover, the inline cache size for all instructions in a |
| | [family of specialized/specializable instructions](#Specialization) |
| | (for example, `LOAD_ATTR`, `LOAD_ATTR_SLOT`, `LOAD_ATTR_MODULE`) must all be |
| | the same. Cache entries are reserved by the compiler and initialized with zeros. |
| | Although they are represented by code units, cache entries do not conform to the |
| | `opcode` / `oparg` format. |
| | If an instruction has an inline cache, the layout of its cache is described in |
| | the instruction's definition in [`Python/bytecodes.c`](../Python/bytecodes.c). |
| | The structs defined in [`pycore_code.h`](../Include/internal/pycore_code.h) |
| | allow us to access the cache by casting `next_instr` to a pointer to the relevant |
| | `struct`. The size of such a `struct` must be independent of the machine |
| | architecture, word size and alignment requirements. For a 32-bit field, the |
| | `struct` should use `_Py_CODEUNIT field[2]`. |
| | The instruction implementation is responsible for advancing `next_instr` past the inline cache. |
| | For example, if an instruction's inline cache is four bytes (that is, two code units) in size, |
| | the code for the instruction must contain `next_instr += 2;`. |
| | This is equivalent to a relative forward jump by that many code units. |
| | (In the interpreter definition DSL, this is coded as `JUMPBY(n)`, where `n` is the number |
| | of code units to jump, typically given as a named constant.) |
| | Serializing non-zero cache entries would present a problem because the serialization |
| | (:mod:`marshal`) format must be independent of the machine byte order. |
| | More information about the use of inline caches can be found in |
| | [PEP 659](https://peps.python.org/pep-0659/#ancillary-data). |
| | ## The evaluation stack |
| | Most instructions read or write some data in the form of object references (`PyObject *`). |
| | The CPython bytecode interpreter is a stack machine, meaning that its instructions operate |
| | by pushing data onto and popping it off the stack. |
| | The stack forms part of the frame for the code object. Its maximum depth is calculated |
| | by the compiler and stored in the `co_stacksize` field of the code object, so that the |
| | stack can be pre-allocated as a contiguous array of `PyObject*` pointers, when the frame |
| | is created. |
| | The stack effects of each instruction are also exposed through the |
| | [opcode metadata](../Include/internal/pycore_opcode_metadata.h) through two |
| | functions that report how many stack elements the instructions consumes, |
| | and how many it produces (`_PyOpcode_num_popped` and `_PyOpcode_num_pushed`). |
| | For example, the `BINARY_OP` instruction pops two objects from the stack and pushes the |
| | result back onto the stack. |
| | The stack grows up in memory; the operation `PUSH(x)` is equivalent to `*stack_pointer++ = x`, |
| | whereas `x = POP()` means `x = *--stack_pointer`. |
| | Overflow and underflow checks are active in debug mode, but are otherwise optimized away. |
| | At any point during execution, the stack level is knowable based on the instruction pointer |
| | alone, and some properties of each item on the stack are also known. |
| | In particular, only a few instructions may push a `NULL` onto the stack, and the positions |
| | that may be `NULL` are known. |
| | A few other instructions (`GET_ITER`, `FOR_ITER`) push or pop an object that is known to |
| | be an iterator. |
| | Instruction sequences that do not allow statically knowing the stack depth are deemed illegal; |
| | the bytecode compiler never generates such sequences. |
| | For example, the following sequence is illegal, because it keeps pushing items on the stack: |
| | LOAD_FAST 0 |
| | JUMP_BACKWARD 2 |
| | > [!NOTE] |
| | > Do not confuse the evaluation stack with the call stack, which is used to implement calling |
| | > and returning from functions. |
| | ## Error handling |
| | When the implementation of an opcode raises an exception, it jumps to the |
| | `exception_unwind` label in [Python/ceval.c](../Python/ceval.c). |
| | The exception is then handled as described in the |
| | [`exception handling documentation`](exception_handling.md#handling-exceptions). |
| | ## Python-to-Python calls |
| | The `_PyEval_EvalFrameDefault()` function is recursive, because sometimes |
| | the interpreter calls some C function that calls back into the interpreter. |
| | In 3.10 and before, this was the case even when a Python function called |
| | another Python function: |
| | The `CALL` opcode would call the `tp_call` dispatch function of the |
| | callee, which would extract the code object, create a new frame for the call |
| | stack, and then call back into the interpreter. This approach is very general |
| | but consumes several C stack frames for each nested Python call, thereby |
| | increasing the risk of an (unrecoverable) C stack overflow. |
| | Since 3.11, the `CALL` instruction special-cases function objects to "inline" |
| | the call. When a call gets inlined, a new frame gets pushed onto the call |
| | stack and the interpreter "jumps" to the start of the callee's bytecode. |
| | When an inlined callee executes a `RETURN_VALUE` instruction, the frame is |
| | popped off the call stack and the interpreter returns to its caller, |
| | by popping a frame off the call stack and "jumping" to the return address. |
| | There is a flag in the frame (`frame->is_entry`) that indicates whether |
| | the frame was inlined (set if it wasn't). |
| | If `RETURN_VALUE` finds this flag set, it performs the usual cleanup and |
| | returns from `_PyEval_EvalFrameDefault()` altogether, to a C caller. |
| | A similar check is performed when an unhandled exception occurs. |
| | ## The call stack |
| | Up through 3.10, the call stack was implemented as a singly-linked list of |
| | [frame objects](frames.md). This was expensive because each call would require a |
| | heap allocation for the stack frame. |
| | Since 3.11, frames are no longer fully-fledged objects. Instead, a leaner internal |
| | `_PyInterpreterFrame` structure is used. Most frames are allocated contiguously in a |
| | per-thread stack (see `_PyThreadState_PushFrame` in [Python/pystate.c](../Python/pystate.c)), |
| | which improves memory locality and reduces overhead. |
| | If the current `datastack_chunk` has enough space (`_PyThreadState_HasStackSpace`) |
| | then the lightweight `_PyFrame_PushUnchecked` can be used instead of `_PyThreadState_PushFrame`. |
| | Sometimes an actual `PyFrameObject` is needed, such as when Python code calls |
| | `sys._getframe()` or an extension module calls |
| | [`PyEval_GetFrame()`](https://docs.python.org/3/c-api/reflection.html#c.PyEval_GetFrame). |
| | In this case we allocate a proper `PyFrameObject` and initialize it from the |
| | `_PyInterpreterFrame`. |
| | Things get more complicated when generators are involved, since those do not |
| | follow the push/pop model. This includes async functions, which are based on |
| | the same mechanism. A generator object has space for a `_PyInterpreterFrame` |
| | structure, including the variable-size part (used for locals and the eval stack). |
| | When a generator (or async) function is first called, a special opcode |
| | `RETURN_GENERATOR` is executed, which is responsible for creating the |
| | generator object. The generator object's `_PyInterpreterFrame` is initialized |
| | with a copy of the current stack frame. The current stack frame is then popped |
| | off the frame stack and the generator object is returned. |
| | (Details differ depending on the `is_entry` flag.) |
| | When the generator is resumed, the interpreter pushes its `_PyInterpreterFrame` |
| | onto the frame stack and resumes execution. |
| | See also the [generators](generators.md) section. |
| | <!-- |
| | ## All sorts of variables |
| | The bytecode compiler determines the scope in which each variable name is defined, |
| | and generates instructions accordingly. For example, loading a local variable |
| | onto the stack is done using `LOAD_FAST`, while loading a global is done using |
| | `LOAD_GLOBAL`. |
| | The key types of variables are: |
| | - fast locals: used in functions |
| | - (slow or regular) locals: used in classes and at the top level |
| | - globals and builtins: the compiler cannot distinguish between globals and |
| | builtins (though at runtime, the specializing interpreter can) |
| | - cells: used for nonlocal references |
| | (TODO: Write the rest of this section. Alas, the author got distracted and won't have time to continue this for a while.) |
| | --> |
| | <!-- |
| | Other topics |
| | ------------ |
| | (TODO: Each of the following probably deserves its own section.) |
| | - co_consts, co_names, co_varnames, and their ilk |
| | - How calls work (how args are transferred, return, exceptions) |
| | - Eval breaker (interrupts, GIL) |
| | - Tracing |
| | - Setting the current lineno (debugger-induced jumps) |
| | - Specialization, inline caches etc. |
| | --> |
| | ## Introducing a new bytecode instruction |
| | It is occasionally necessary to add a new opcode in order to implement |
| | a new feature or change the way that existing features are compiled. |
| | This section describes the changes required to do this. |
| | First, you must choose a name for the bytecode, implement it in |
| | [`Python/bytecodes.c`](../Python/bytecodes.c) and add a documentation |
| | entry in [`Doc/library/dis.rst`](../Doc/library/dis.rst). |
| | Then run `make regen-cases` to assign a number for it (see |
| | [`Include/opcode_ids.h`](../Include/opcode_ids.h)) and regenerate a |
| | number of files with the actual implementation of the bytecode in |
| | [`Python/generated_cases.c.h`](../Python/generated_cases.c.h) and |
| | metadata about it in additional files. |
| | With a new bytecode you must also change what is called the "magic number" for |
| | .pyc files: bump the value of the variable `MAGIC_NUMBER` in |
| | [`Lib/importlib/_bootstrap_external.py`](../Lib/importlib/_bootstrap_external.py). |
| | Changing this number will lead to all .pyc files with the old `MAGIC_NUMBER` |
| | to be recompiled by the interpreter on import. Whenever `MAGIC_NUMBER` is |
| | changed, the ranges in the `magic_values` array in |
| | [`PC/launcher.c`](../PC/launcher.c) may also need to be updated. Changes to |
| | [`Lib/importlib/_bootstrap_external.py`](../Lib/importlib/_bootstrap_external.py) |
| | will take effect only after running `make regen-importlib`. |
| | > [!NOTE] |
| | > Running `make regen-importlib` before adding the new bytecode target to |
| | > [`Python/bytecodes.c`](../Python/bytecodes.c) |
| | > (followed by `make regen-cases`) will result in an error. You should only run |
| | > `make regen-importlib` after the new bytecode target has been added. |
| | > [!NOTE] |
| | > On Windows, running the `./build.bat` script will automatically |
| | > regenerate the required files without requiring additional arguments. |
| | Finally, you need to introduce the use of the new bytecode. Update |
| | [`Python/codegen.c`](../Python/codegen.c) to emit code with this bytecode. |
| | Optimizations in [`Python/flowgraph.c`](../Python/flowgraph.c) may also |
| | need to be updated. If the new opcode affects a control flow or the block |
| | stack, you may have to update the `frame_setlineno()` function in |
| | [`Objects/frameobject.c`](../Objects/frameobject.c). It may also be necessary |
| | to update [`Lib/dis.py`](../Lib/dis.py) if the new opcode interprets its |
| | argument in a special way (like `FORMAT_VALUE` or `MAKE_FUNCTION`). |
| | If you make a change here that can affect the output of bytecode that |
| | is already in existence and you do not change the magic number, make |
| | sure to delete your old .py(c|o) files! Even though you will end up changing |
| | the magic number if you change the bytecode, while you are debugging your work |
| | you may be changing the bytecode output without constantly bumping up the |
| | magic number. This can leave you with stale .pyc files that will not be |
| | recreated. |
| | Running `find . -name '*.py[co]' -exec rm -f '{}' +` should delete all .pyc |
| | files you have, forcing new ones to be created and thus allow you test out your |
| | new bytecode properly. Run `make regen-importlib` for updating the |
| | bytecode of frozen importlib files. You have to run `make` again after this |
| | to recompile the generated C files. |
| | ## Specialization |
| | Bytecode specialization, which was introduced in |
| | [PEP 659](https://peps.python.org/pep-0659/), speeds up program execution by |
| | rewriting instructions based on runtime information. This is done by replacing |
| | a generic instruction with a faster version that works for the case that this |
| | program encounters. Each specializable instruction is responsible for rewriting |
| | itself, using its [inline caches](#inline-cache-entries) for |
| | bookkeeping. |
| | When an adaptive instruction executes, it may attempt to specialize itself, |
| | depending on the argument and the contents of its cache. This is done |
| | by calling one of the `_Py_Specialize_XXX` functions in |
| | [`Python/specialize.c`](../Python/specialize.c). |
| | The specialized instructions are responsible for checking that the special-case |
| | assumptions still apply, and de-optimizing back to the generic version if not. |
| | ## Families of instructions |
| | A *family* of instructions consists of an adaptive instruction along with the |
| | specialized instructions that it can be replaced by. |
| | It has the following fundamental properties: |
| | * It corresponds to a single instruction in the code |
| | generated by the bytecode compiler. |
| | * It has a single adaptive instruction that records an execution count and, |
| | at regular intervals, attempts to specialize itself. If not specializing, |
| | it executes the base implementation. |
| | * It has at least one specialized form of the instruction that is tailored |
| | for a particular value or set of values at runtime. |
| | * All members of the family must have the same number of inline cache entries, |
| | to ensure correct execution. |
| | Individual family members do not need to use all of the entries, |
| | but must skip over any unused entries when executing. |
| | The current implementation also requires the following, |
| | although these are not fundamental and may change: |
| | * All families use one or more inline cache entries, |
| | the first entry is always the counter. |
| | * All instruction names should start with the name of the adaptive |
| | instruction. |
| | * Specialized forms should have names describing their specialization. |
| | ## Example family |
| | The `LOAD_GLOBAL` instruction (in [Python/bytecodes.c](../Python/bytecodes.c)) |
| | already has an adaptive family that serves as a relatively simple example. |
| | The `LOAD_GLOBAL` instruction performs adaptive specialization, |
| | calling `_Py_Specialize_LoadGlobal()` when the counter reaches zero. |
| | There are two specialized instructions in the family, `LOAD_GLOBAL_MODULE` |
| | which is specialized for global variables in the module, and |
| | `LOAD_GLOBAL_BUILTIN` which is specialized for builtin variables. |
| | ## Performance analysis |
| | The benefit of a specialization can be assessed with the following formula: |
| | `Tbase/Tadaptive`. |
| | Where `Tbase` is the mean time to execute the base instruction, |
| | and `Tadaptive` is the mean time to execute the specialized and adaptive forms. |
| | `Tadaptive = (sum(Ti*Ni) + Tmiss*Nmiss)/(sum(Ni)+Nmiss)` |
| | `Ti` is the time to execute the `i`th instruction in the family and `Ni` is |
| | the number of times that instruction is executed. |
| | `Tmiss` is the time to process a miss, including de-optimzation |
| | and the time to execute the base instruction. |
| | The ideal situation is where misses are rare and the specialized |
| | forms are much faster than the base instruction. |
| | `LOAD_GLOBAL` is near ideal, `Nmiss/sum(Ni) ≈ 0`. |
| | In which case we have `Tadaptive ≈ sum(Ti*Ni)`. |
| | Since we can expect the specialized forms `LOAD_GLOBAL_MODULE` and |
| | `LOAD_GLOBAL_BUILTIN` to be much faster than the adaptive base instruction, |
| | we would expect the specialization of `LOAD_GLOBAL` to be profitable. |
| | ## Design considerations |
| | While `LOAD_GLOBAL` may be ideal, instructions like `LOAD_ATTR` and |
| | `CALL_FUNCTION` are not. For maximum performance we want to keep `Ti` |
| | low for all specialized instructions and `Nmiss` as low as possible. |
| | Keeping `Nmiss` low means that there should be specializations for almost |
| | all values seen by the base instruction. Keeping `sum(Ti*Ni)` low means |
| | keeping `Ti` low which means minimizing branches and dependent memory |
| | accesses (pointer chasing). These two objectives may be in conflict, |
| | requiring judgement and experimentation to design the family of instructions. |
| | The size of the inline cache should as small as possible, |
| | without impairing performance, to reduce the number of |
| | `EXTENDED_ARG` jumps, and to reduce pressure on the CPU's data cache. |
| | ### Gathering data |
| | Before choosing how to specialize an instruction, it is important to gather |
| | some data. What are the patterns of usage of the base instruction? |
| | Data can best be gathered by instrumenting the interpreter. Since a |
| | specialization function and adaptive instruction are going to be required, |
| | instrumentation can most easily be added in the specialization function. |
| | ### Choice of specializations |
| | The performance of the specializing adaptive interpreter relies on the |
| | quality of specialization and keeping the overhead of specialization low. |
| | Specialized instructions must be fast. In order to be fast, |
| | specialized instructions should be tailored for a particular |
| | set of values that allows them to: |
| | 1. Verify that incoming value is part of that set with low overhead. |
| | 2. Perform the operation quickly. |
| | This requires that the set of values is chosen such that membership can be |
| | tested quickly and that membership is sufficient to allow the operation to be |
| | performed quickly. |
| | For example, `LOAD_GLOBAL_MODULE` is specialized for `globals()` |
| | dictionaries that have a keys with the expected version. |
| | This can be tested quickly: |
| | * `globals->keys->dk_version == expected_version` |
| | and the operation can be performed quickly: |
| | * `value = entries[cache->index].me_value;`. |
| | Because it is impossible to measure the performance of an instruction without |
| | also measuring unrelated factors, the assessment of the quality of a |
| | specialization will require some judgement. |
| | As a general rule, specialized instructions should be much faster than the |
| | base instruction. |
| | ### Implementation of specialized instructions |
| | In general, specialized instructions should be implemented in two parts: |
| | 1. A sequence of guards, each of the form |
| | `DEOPT_IF(guard-condition-is-false, BASE_NAME)`. |
| | 2. The operation, which should ideally have no branches and |
| | a minimum number of dependent memory accesses. |
| | In practice, the parts may overlap, as data required for guards |
| | can be re-used in the operation. |
| | If there are branches in the operation, then consider further specialization |
| | to eliminate the branches. |
| | ### Maintaining stats |
| | Finally, take care that stats are gathered correctly. |
| | After the last `DEOPT_IF` has passed, a hit should be recorded with |
| | `STAT_INC(BASE_INSTRUCTION, hit)`. |
| | After an optimization has been deferred in the adaptive instruction, |
| | that should be recorded with `STAT_INC(BASE_INSTRUCTION, deferred)`. |
| | Additional resources |
| | -------------------- |
| | * Brandt Bucher's talk about the specializing interpreter at PyCon US 2023. |
| | [Slides](https://github.com/brandtbucher/brandtbucher/blob/master/2023/04/21/inside_cpython_311s_new_specializing_adaptive_interpreter.pdf) |
| | [Video](https://www.youtube.com/watch?v=PGZPSWZSkJI&t=1470s) |
| | |
| | #!/usr/bin/env python3 |
| | import asyncio |
| | import argparse |
| | import json |
| | import os |
| | import platform |
| | import re |
| | import shlex |
| | import shutil |
| | import signal |
| | import subprocess |
| | import sys |
| | import sysconfig |
| | from asyncio import wait_for |
| | from contextlib import asynccontextmanager |
| | from datetime import datetime, timezone |
| | from glob import glob |
| | from os.path import abspath, basename, relpath |
| | from pathlib import Path |
| | from subprocess import CalledProcessError |
| | from tempfile import TemporaryDirectory |
| | SCRIPT_NAME = Path(__file__).name |
| | ANDROID_DIR = Path(__file__).resolve().parent |
| | PYTHON_DIR = ANDROID_DIR.parent |
| | in_source_tree = ( |
| | ANDROID_DIR.name == "Android" and (PYTHON_DIR / "pyconfig.h.in").exists() |
| | ) |
| | ENV_SCRIPT = ANDROID_DIR / "android-env.sh" |
| | TESTBED_DIR = ANDROID_DIR / "testbed" |
| | CROSS_BUILD_DIR = PYTHON_DIR / "cross-build" |
| | HOSTS = ["aarch64-linux-android", "x86_64-linux-android"] |
| | APP_ID = "org.python.testbed" |
| | DECODE_ARGS = ("UTF-8", "backslashreplace") |
| | try: |
| | android_home = Path(os.environ['ANDROID_HOME']) |
| | except KeyError: |
| | sys.exit("The ANDROID_HOME environment variable is required.") |
| | adb = Path( |
| | f"{android_home}/platform-tools/adb" |
| | + (".exe" if os.name == "nt" else "") |
| | ) |
| | gradlew = Path( |
| | f"{TESTBED_DIR}/gradlew" |
| | + (".bat" if os.name == "nt" else "") |
| | ) |
| | # Whether we've seen any output from Python yet. |
| | python_started = False |
| | # Buffer for verbose output which will be displayed only if a test fails and |
| | # there has been no output from Python. |
| | hidden_output = [] |
| | def log_verbose(context, line, stream=sys.stdout): |
| | if context.verbose: |
| | stream.write(line) |
| | else: |
| | hidden_output.append((stream, line)) |
| | def delete_glob(pattern): |
| | # Path.glob doesn't accept non-relative patterns. |
| | for path in glob(str(pattern)): |
| | path = Path(path) |
| | print(f"Deleting {path} ...") |
| | if path.is_dir() and not path.is_symlink(): |
| | shutil.rmtree(path) |
| | else: |
| | path.unlink() |
| | def subdir(*parts, create=False): |
| | path = CROSS_BUILD_DIR.joinpath(*parts) |
| | if not path.exists(): |
| | if not create: |
| | sys.exit( |
| | f"{path} does not exist. Create it by running the appropriate " |
| | f"`configure` subcommand of {SCRIPT_NAME}.") |
| | else: |
| | path.mkdir(parents=True) |
| | return path |
| | def run(command, *, host=None, env=None, log=True, **kwargs): |
| | kwargs.setdefault("check", True) |
| | if env is None: |
| | env = os.environ.copy() |
| | if host: |
| | host_env = android_env(host) |
| | print_env(host_env) |
| | env.update(host_env) |
| | if log: |
| | print(">", join_command(command)) |
| | return subprocess.run(command, env=env, **kwargs) |
| | # Format a command so it can be copied into a shell. Like shlex.join, but also |
| | # accepts arguments which are Paths, or a single string/Path outside of a list. |
| | def join_command(args): |
| | if isinstance(args, (str, Path)): |
| | return str(args) |
| | else: |
| | return shlex.join(map(str, args)) |
| | # Format the environment so it can be pasted into a shell. |
| | def print_env(env): |
| | for key, value in sorted(env.items()): |
| | print(f"export {key}={shlex.quote(value)}") |
| | def android_env(host): |
| | if host: |
| | prefix = subdir(host) / "prefix" |
| | else: |
| | prefix = ANDROID_DIR / "prefix" |
| | sysconfig_files = prefix.glob("lib/python*/_sysconfigdata__android_*.py") |
| | sysconfig_filename = next(sysconfig_files).name |
| | host = re.fullmatch(r"_sysconfigdata__android_(.+).py", sysconfig_filename)[1] |
| | env_output = subprocess.run( |
| | f"set -eu; " |
| | f"HOST={host}; " |
| | f"PREFIX={prefix}; " |
| | f". {ENV_SCRIPT}; " |
| | f"export", |
| | check=True, shell=True, capture_output=True, encoding='utf-8', |
| | ).stdout |
| | env = {} |
| | for line in env_output.splitlines(): |
| | # We don't require every line to match, as there may be some other |
| | # output from installing the NDK. |
| | if match := re.search( |
| | "^(declare -x |export )?(\\w+)=['\"]?(.*?)['\"]?$", line |
| | ): |
| | key, value = match[2], match[3] |
| | if os.environ.get(key) != value: |
| | env[key] = value |
| | if not env: |
| | raise ValueError(f"Found no variables in {ENV_SCRIPT.name} output:\n" |
| | + env_output) |
| | return env |
| | def build_python_path(): |
| | """The path to the build Python binary.""" |
| | build_dir = subdir("build") |
| | binary = build_dir / "python" |
| | if not binary.is_file(): |
| | binary = binary.with_suffix(".exe") |
| | if not binary.is_file(): |
| | raise FileNotFoundError("Unable to find `python(.exe)` in " |
| | f"{build_dir}") |
| | return binary |
| | def configure_build_python(context): |
| | if context.clean: |
| | clean("build") |
| | os.chdir(subdir("build", create=True)) |
| | command = [relpath(PYTHON_DIR / "configure")] |
| | if context.args: |
| | command.extend(context.args) |
| | run(command) |
| | def make_build_python(context): |
| | os.chdir(subdir("build")) |
| | run(["make", "-j", str(os.cpu_count())]) |
| | # To create new builds of these dependencies, usually all that's necessary is to |
| | # push a tag to the cpython-android-source-deps repository, and GitHub Actions |
| | # will do the rest. |
| | # |
| | # If you're a member of the Python core team, and you'd like to be able to push |
| | # these tags yourself, please contact Malcolm Smith or Russell Keith-Magee. |
| | def unpack_deps(host, prefix_dir): |
| | os.chdir(prefix_dir) |
| | deps_url = "https://github.com/beeware/cpython-android-source-deps/releases/download" |
| | for name_ver in ["bzip2-1.0.8-3", "libffi-3.4.4-3", "openssl-3.0.18-0", |
| | "sqlite-3.50.4-0", "xz-5.4.6-1", "zstd-1.5.7-1"]: |
| | filename = f"{name_ver}-{host}.tar.gz" |
| | download(f"{deps_url}/{name_ver}/{filename}") |
| | shutil.unpack_archive(filename) |
| | os.remove(filename) |
| | def download(url, target_dir="."): |
| | out_path = f"{target_dir}/{basename(url)}" |
| | run(["curl", "-Lf", "--retry", "5", "--retry-all-errors", "-o", out_path, url]) |
| | return out_path |
| | def configure_host_python(context): |
| | if context.clean: |
| | clean(context.host) |
| | host_dir = subdir(context.host, create=True) |
| | prefix_dir = host_dir / "prefix" |
| | if not prefix_dir.exists(): |
| | prefix_dir.mkdir() |
| | unpack_deps(context.host, prefix_dir) |
| | os.chdir(host_dir) |
| | command = [ |
| | # Basic cross-compiling configuration |
| | relpath(PYTHON_DIR / "configure"), |
| | f"--host={context.host}", |
| | f"--build={sysconfig.get_config_var('BUILD_GNU_TYPE')}", |
| | f"--with-build-python={build_python_path()}", |
| | "--without-ensurepip", |
| | # Android always uses a shared libpython. |
| | "--enable-shared", |
| | "--without-static-libpython", |
| | # Dependent libraries. The others are found using pkg-config: see |
| | # android-env.sh. |
| | f"--with-openssl={prefix_dir}", |
| | ] |
| | if context.args: |
| | command.extend(context.args) |
| | run(command, host=context.host) |
| | def make_host_python(context): |
| | # The CFLAGS and LDFLAGS set in android-env include the prefix dir, so |
| | # delete any previous Python installation to prevent it being used during |
| | # the build. |
| | host_dir = subdir(context.host) |
| | prefix_dir = host_dir / "prefix" |
| | for pattern in ("include/python*", "lib/libpython*", "lib/python*"): |
| | delete_glob(f"{prefix_dir}/{pattern}") |
| | # The Android environment variables were already captured in the Makefile by |
| | # `configure`, and passing them again when running `make` may cause some |
| | # flags to be duplicated. So we don't use the `host` argument here. |
| | os.chdir(host_dir) |
| | run(["make", "-j", str(os.cpu_count())]) |
| | # The `make install` output is very verbose and rarely useful, so |
| | # suppress it by default. |
| | run( |
| | ["make", "install", f"prefix={prefix_dir}"], |
| | capture_output=not context.verbose, |
| | ) |
| | def build_all(context): |
| | steps = [configure_build_python, make_build_python, configure_host_python, |
| | make_host_python] |
| | for step in steps: |
| | step(context) |
| | def clean(host): |
| | delete_glob(CROSS_BUILD_DIR / host) |
| | def clean_all(context): |
| | for host in HOSTS + ["build"]: |
| | clean(host) |
| | def setup_ci(): |
| | if "GITHUB_ACTIONS" in os.environ: |
| | # Enable emulator hardware acceleration |
| | # (https://github.blog/changelog/2024-04-02-github-actions-hardware-accelerated-android-virtualization-now-available/). |
| | if platform.system() == "Linux": |
| | run( |
| | ["sudo", "tee", "/etc/udev/rules.d/99-kvm4all.rules"], |
| | input='KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"\n', |
| | text=True, |
| | ) |
| | run(["sudo", "udevadm", "control", "--reload-rules"]) |
| | run(["sudo", "udevadm", "trigger", "--name-match=kvm"]) |
| | # Free up disk space by deleting unused versions of the NDK |
| | # (https://github.com/freakboy3742/pyspamsum/pull/108). |
| | for line in ENV_SCRIPT.read_text().splitlines(): |
| | if match := re.fullmatch(r"ndk_version=(.+)", line): |
| | ndk_version = match[1] |
| | break |
| | else: |
| | raise ValueError(f"Failed to find NDK version in {ENV_SCRIPT.name}") |
| | for item in (android_home / "ndk").iterdir(): |
| | if item.name[0].isdigit() and item.name != ndk_version: |
| | delete_glob(item) |
| | def setup_sdk(): |
| | sdkmanager = android_home / ( |
| | "cmdline-tools/latest/bin/sdkmanager" |
| | + (".bat" if os.name == "nt" else "") |
| | ) |
| | # Gradle will fail if it needs to install an SDK package whose license |
| | # hasn't been accepted, so pre-accept all licenses. |
| | if not all((android_home / "licenses" / path).exists() for path in [ |
| | "android-sdk-arm-dbt-license", "android-sdk-license" |
| | ]): |
| | run( |
| | [sdkmanager, "--licenses"], |
| | text=True, |
| | capture_output=True, |
| | input="y\n" * 100, |
| | ) |
| | # Gradle may install this automatically, but we can't rely on that because |
| | # we need to run adb within the logcat task. |
| | if not adb.exists(): |
| | run([sdkmanager, "platform-tools"]) |
| | # To avoid distributing compiled artifacts without corresponding source code, |
| | # the Gradle wrapper is not included in the CPython repository. Instead, we |
| | # extract it from the Gradle GitHub repository. |
| | def setup_testbed(): |
| | paths = ["gradlew", "gradlew.bat", "gradle/wrapper/gradle-wrapper.jar"] |
| | if all((TESTBED_DIR / path).exists() for path in paths): |
| | return |
| | # The wrapper version isn't important, as any version of the wrapper can |
| | # download any version of Gradle. The Gradle version actually used for the |
| | # build is specified in testbed/gradle/wrapper/gradle-wrapper.properties. |
| | version = "8.9.0" |
| | for path in paths: |
| | out_path = TESTBED_DIR / path |
| | out_path.parent.mkdir(exist_ok=True) |
| | download( |
| | f"https://raw.githubusercontent.com/gradle/gradle/v{version}/{path}", |
| | out_path.parent, |
| | ) |
| | os.chmod(out_path, 0o755) |
| | # run_testbed will build the app automatically, but it's useful to have this as |
| | # a separate command to allow running the app outside of this script. |
| | def build_testbed(context): |
| | setup_sdk() |
| | setup_testbed() |
| | run( |
| | [gradlew, "--console", "plain", "packageDebug", "packageDebugAndroidTest"], |
| | cwd=TESTBED_DIR, |
| | ) |
| | # Work around a bug involving sys.exit and TaskGroups |
| | # (https://github.com/python/cpython/issues/101515). |
| | def exit(*args): |
| | raise MySystemExit(*args) |
| | class MySystemExit(Exception): |
| | pass |
| | # The `test` subcommand runs all subprocesses through this context manager so |
| | # that no matter what happens, they can always be cancelled from another task, |
| | # and they will always be cleaned up on exit. |
| | @asynccontextmanager |
| | async def async_process(*args, **kwargs): |
| | process = await asyncio.create_subprocess_exec(*args, **kwargs) |
| | try: |
| | yield process |
| | finally: |
| | if process.returncode is None: |
| | # Allow a reasonably long time for Gradle to clean itself up, |
| | # because we don't want stale emulators left behind. |
| | timeout = 10 |
| | process.terminate() |
| | try: |
| | await wait_for(process.wait(), timeout) |
| | except TimeoutError: |
| | print( |
| | f"Command {args} did not terminate after {timeout} seconds " |
| | f" - sending SIGKILL" |
| | ) |
| | process.kill() |
| | # Even after killing the process we must still wait for it, |
| | # otherwise we'll get the warning "Exception ignored in __del__". |
| | await wait_for(process.wait(), timeout=1) |
| | async def async_check_output(*args, **kwargs): |
| | async with async_process( |
| | *args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs |
| | ) as process: |
| | stdout, stderr = await process.communicate() |
| | if process.returncode == 0: |
| | return stdout.decode(*DECODE_ARGS) |
| | else: |
| | raise CalledProcessError( |
| | process.returncode, args, |
| | stdout.decode(*DECODE_ARGS), stderr.decode(*DECODE_ARGS) |
| | ) |
| | # Return a list of the serial numbers of connected devices. Emulators will have |
| | # serials of the form "emulator-5678". |
| | async def list_devices(): |
| | serials = [] |
| | header_found = False |
| | lines = (await async_check_output(adb, "devices")).splitlines() |
| | for line in lines: |
| | # Ignore blank lines, and all lines before the header. |
| | line = line.strip() |
| | if line == "List of devices attached": |
| | header_found = True |
| | elif header_found and line: |
| | try: |
| | serial, status = line.split() |
| | except ValueError: |
| | raise ValueError(f"failed to parse {line!r}") |
| | if status == "device": |
| | serials.append(serial) |
| | if not header_found: |
| | raise ValueError(f"failed to parse {lines}") |
| | return serials |
| | async def find_device(context, initial_devices): |
| | if context.managed: |
| | print("Waiting for managed device - this may take several minutes") |
| | while True: |
| | new_devices = set(await list_devices()).difference(initial_devices) |
| | if len(new_devices) == 0: |
| | await asyncio.sleep(1) |
| | elif len(new_devices) == 1: |
| | serial = new_devices.pop() |
| | print(f"Serial: {serial}") |
| | return serial |
| | else: |
| | exit(f"Found more than one new device: {new_devices}") |
| | else: |
| | return context.connected |
| | # An older version of this script in #121595 filtered the logs by UID instead. |
| | # But logcat can't filter by UID until API level 31. If we ever switch back to |
| | # filtering by UID, we'll also have to filter by time so we only show messages |
| | # produced after the initial call to `stop_app`. |
| | # |
| | # We're more likely to miss the PID because it's shorter-lived, so there's a |
| | # workaround in PythonSuite.kt to stop it being *too* short-lived. |
| | async def find_pid(serial): |
| | print("Waiting for app to start - this may take several minutes") |
| | shown_error = False |
| | while True: |
| | try: |
| | # `pidof` requires API level 24 or higher. The level 23 emulator |
| | # includes it, but it doesn't work (it returns all processes). |
| | pid = (await async_check_output( |
| | adb, "-s", serial, "shell", "pidof", "-s", APP_ID |
| | )).strip() |
| | except CalledProcessError as e: |
| | # If the app isn't running yet, pidof gives no output. So if there |
| | # is output, there must have been some other error. However, this |
| | # sometimes happens transiently, especially when running a managed |
| | # emulator for the first time, so don't make it fatal. |
| | if (e.stdout or e.stderr) and not shown_error: |
| | print_called_process_error(e) |
| | print("This may be transient, so continuing to wait") |
| | shown_error = True |
| | else: |
| | # Some older devices (e.g. Nexus 4) return zero even when no process |
| | # was found, so check whether we actually got any output. |
| | if pid: |
| | print(f"PID: {pid}") |
| | return pid |
| | # Loop fairly rapidly to avoid missing a short-lived process. |
| | await asyncio.sleep(0.2) |
| | async def logcat_task(context, initial_devices): |
| | # Gradle may need to do some large downloads of libraries and emulator |
| | # images. This will happen during find_device in --managed mode, or find_pid |
| | # in --connected mode. |
| | startup_timeout = 600 |
| | serial = await wait_for(find_device(context, initial_devices), startup_timeout) |
| | pid = await wait_for(find_pid(serial), startup_timeout) |
| | # `--pid` requires API level 24 or higher. |
| | args = [adb, "-s", serial, "logcat", "--pid", pid, "--format", "tag"] |
| | logcat_started = False |
| | async with async_process( |
| | *args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, |
| | ) as process: |
| | while line := (await process.stdout.readline()).decode(*DECODE_ARGS): |
| | if match := re.fullmatch(r"([A-Z])/(.*)", line, re.DOTALL): |
| | logcat_started = True |
| | level, message = match.groups() |
| | else: |
| | # If the regex doesn't match, this is either a logcat startup |
| | # error, or the second or subsequent line of a multi-line |
| | # message. Python won't produce multi-line messages, but other |
| | # components might. |
| | level, message = None, line |
| | # Exclude high-volume messages which are rarely useful. |
| | if context.verbose < 2 and "from python test_syslog" in message: |
| | continue |
| | # Put high-level messages on stderr so they're highlighted in the |
| | # buildbot logs. This will include Python's own stderr. |
| | stream = ( |
| | sys.stderr |
| | if level in ["W", "E", "F"] # WARNING, ERROR, FATAL (aka ASSERT) |
| | else sys.stdout |
| | ) |
| | # To simplify automated processing of the output, e.g. a buildbot |
| | # posting a failure notice on a GitHub PR, we strip the level and |
| | # tag indicators from Python's stdout and stderr. |
| | for prefix in ["python.stdout: ", "python.stderr: "]: |
| | if message.startswith(prefix): |
| | global python_started |
| | python_started = True |
| | stream.write(message.removeprefix(prefix)) |
| | break |
| | else: |
| | # Non-Python messages add a lot of noise, but they may |
| | # sometimes help explain a failure. |
| | log_verbose(context, line, stream) |
| | # If the device disconnects while logcat is running, which always |
| | # happens in --managed mode, some versions of adb return non-zero. |
| | # Distinguish this from a logcat startup error by checking whether we've |
| | # received any logcat messages yet. |
| | status = await wait_for(process.wait(), timeout=1) |
| | if status != 0 and not logcat_started: |
| | raise CalledProcessError(status, args) |
| | def stop_app(serial): |
| | run([adb, "-s", serial, "shell", "am", "force-stop", APP_ID], log=False) |
| | async def gradle_task(context): |
| | env = os.environ.copy() |
| | if context.managed: |
| | task_prefix = context.managed |
| | else: |
| | task_prefix = "connected" |
| | env["ANDROID_SERIAL"] = context.connected |
| | if context.ci_mode: |
| | context.args[0:0] = [ |
| | # See _add_ci_python_opts in libregrtest/main.py. |
| | "-W", "error", "-bb", "-E", |
| | # Randomization is disabled because order-dependent failures are |
| | # much less likely to pass on a rerun in single-process mode. |
| | "-m", "test", |
| | f"--{context.ci_mode}-ci", "--single-process", "--no-randomize" |
| | ] |
| | if not any(arg in context.args for arg in ["-c", "-m"]): |
| | context.args[0:0] = ["-m", "test"] |
| | args = [ |
| | gradlew, "--console", "plain", f"{task_prefix}DebugAndroidTest", |
| | ] + [ |
| | f"-P{name}={value}" |
| | for name, value in [ |
| | ("python.sitePackages", context.site_packages), |
| | ("python.cwd", context.cwd), |
| | ( |
| | "android.testInstrumentationRunnerArguments.pythonArgs", |
| | json.dumps(context.args), |
| | ), |
| | ] |
| | if value |
| | ] |
| | if context.verbose >= 2: |
| | args.append("--info") |
| | log_verbose(context, f"> {join_command(args)}\n") |
| | try: |
| | async with async_process( |
| | *args, cwd=TESTBED_DIR, env=env, |
| | stdout=subprocess.PIPE, stderr=subprocess.STDOUT, |
| | ) as process: |
| | while line := (await process.stdout.readline()).decode(*DECODE_ARGS): |
| | # Gradle may take several minutes to install SDK packages, so |
| | # it's worth showing those messages even in non-verbose mode. |
| | if line.startswith('Preparing "Install'): |
| | sys.stdout.write(line) |
| | else: |
| | log_verbose(context, line) |
| | status = await wait_for(process.wait(), timeout=1) |
| | if status == 0: |
| | exit(0) |
| | else: |
| | raise CalledProcessError(status, args) |
| | finally: |
| | # Gradle does not stop the tests when interrupted. |
| | if context.connected: |
| | stop_app(context.connected) |
| | async def run_testbed(context): |
| | setup_ci() |
| | setup_sdk() |
| | setup_testbed() |
| | if context.managed: |
| | # In this mode, Gradle will create a device with an unpredictable name. |
| | # So we save a list of the running devices before starting Gradle, and |
| | # find_device then waits for a new device to appear. |
| | initial_devices = await list_devices() |
| | else: |
| | # In case the previous shutdown was unclean, make sure the app isn't |
| | # running, otherwise we might show logs from a previous run. This is |
| | # unnecessary in --managed mode, because Gradle creates a new emulator |
| | # every time. |
| | stop_app(context.connected) |
| | initial_devices = None |
| | try: |
| | async with asyncio.TaskGroup() as tg: |
| | tg.create_task(logcat_task(context, initial_devices)) |
| | tg.create_task(gradle_task(context)) |
| | except* MySystemExit as e: |
| | raise SystemExit(*e.exceptions[0].args) from None |
| | except* CalledProcessError as e: |
| | # If Python produced no output, then the user probably wants to see the |
| | # verbose output to explain why the test failed. |
| | if not python_started: |
| | for stream, line in hidden_output: |
| | stream.write(line) |
| | # Extract it from the ExceptionGroup so it can be handled by `main`. |
| | raise e.exceptions[0] |
| | def package_version(prefix_dir): |
| | patchlevel_glob = f"{prefix_dir}/include/python*/patchlevel.h" |
| | patchlevel_paths = glob(patchlevel_glob) |
| | if len(patchlevel_paths) != 1: |
| | sys.exit(f"{patchlevel_glob} matched {len(patchlevel_paths)} paths.") |
| | for line in open(patchlevel_paths[0]): |
| | if match := re.fullmatch(r'\s*#define\s+PY_VERSION\s+"(.+)"\s*', line): |
| | version = match[1] |
| | break |
| | else: |
| | sys.exit(f"Failed to find Python version in {patchlevel_paths[0]}.") |
| | # If not building against a tagged commit, add a timestamp to the version. |
| | # Follow the PyPA version number rules, as this will make it easier to |
| | # process with other tools. |
| | if version.endswith("+"): |
| | version += datetime.now(timezone.utc).strftime("%Y%m%d.%H%M%S") |
| | return version |
| | def package(context): |
| | prefix_dir = subdir(context.host, "prefix") |
| | version = package_version(prefix_dir) |
| | with TemporaryDirectory(prefix=SCRIPT_NAME) as temp_dir: |
| | temp_dir = Path(temp_dir) |
| | # Include all tracked files from the Android directory. |
| | for line in run( |
| | ["git", "ls-files"], |
| | cwd=ANDROID_DIR, capture_output=True, text=True, log=False, |
| | ).stdout.splitlines(): |
| | src = ANDROID_DIR / line |
| | dst = temp_dir / line |
| | dst.parent.mkdir(parents=True, exist_ok=True) |
| | shutil.copy2(src, dst, follow_symlinks=False) |
| | # Include anything from the prefix directory which could be useful |
| | # either for embedding Python in an app, or building third-party |
| | # packages against it. |
| | for rel_dir, patterns in [ |
| | ("include", ["openssl*", "python*", "sqlite*"]), |
| | ("lib", ["engines-3", "libcrypto*.so", "libpython*", "libsqlite*", |
| | "libssl*.so", "ossl-modules", "python*"]), |
| | ("lib/pkgconfig", ["*crypto*", "*ssl*", "*python*", "*sqlite*"]), |
| | ]: |
| | for pattern in patterns: |
| | for src in glob(f"{prefix_dir}/{rel_dir}/{pattern}"): |
| | dst = temp_dir / relpath(src, prefix_dir.parent) |
| | dst.parent.mkdir(parents=True, exist_ok=True) |
| | if Path(src).is_dir(): |
| | shutil.copytree( |
| | src, dst, symlinks=True, |
| | ignore=lambda *args: ["__pycache__"] |
| | ) |
| | else: |
| | shutil.copy2(src, dst, follow_symlinks=False) |
| | # Strip debug information. |
| | if not context.debug: |
| | so_files = glob(f"{temp_dir}/**/*.so", recursive=True) |
| | run([android_env(context.host)["STRIP"], *so_files], log=False) |
| | dist_dir = subdir(context.host, "dist", create=True) |
| | package_path = shutil.make_archive( |
| | f"{dist_dir}/python-{version}-{context.host}", "gztar", temp_dir |
| | ) |
| | print(f"Wrote {package_path}") |
| | return package_path |
| | def ci(context): |
| | for step in [ |
| | configure_build_python, |
| | make_build_python, |
| | configure_host_python, |
| | make_host_python, |
| | package, |
| | ]: |
| | caption = ( |
| | step.__name__.replace("_", " ") |
| | .capitalize() |
| | .replace("python", "Python") |
| | ) |
| | print(f"::group::{caption}") |
| | result = step(context) |
| | if step is package: |
| | package_path = result |
| | print("::endgroup::") |
| | if ( |
| | "GITHUB_ACTIONS" in os.environ |
| | and (platform.system(), platform.machine()) != ("Linux", "x86_64") |
| | ): |
| | print( |
| | "Skipping tests: GitHub Actions does not support the Android " |
| | "emulator on this platform." |
| | ) |
| | else: |
| | with TemporaryDirectory(prefix=SCRIPT_NAME) as temp_dir: |
| | print("::group::Tests") |
| | # Prove the package is self-contained by using it to run the tests. |
| | shutil.unpack_archive(package_path, temp_dir) |
| | launcher_args = [ |
| | "--managed", "maxVersion", "-v", f"--{context.ci_mode}-ci" |
| | ] |
| | run( |
| | ["./android.py", "test", *launcher_args], |
| | cwd=temp_dir |
| | ) |
| | print("::endgroup::") |
| | def env(context): |
| | print_env(android_env(getattr(context, "host", None))) |
| | # Handle SIGTERM the same way as SIGINT. This ensures that if we're terminated |
| | # by the buildbot worker, we'll make an attempt to clean up our subprocesses. |
| | def install_signal_handler(): |
| | def signal_handler(*args): |
| | os.kill(os.getpid(), signal.SIGINT) |
| | signal.signal(signal.SIGTERM, signal_handler) |
| | def parse_args(): |
| | parser = argparse.ArgumentParser() |
| | subcommands = parser.add_subparsers(dest="subcommand", required=True) |
| | def add_parser(*args, **kwargs): |
| | parser = subcommands.add_parser(*args, **kwargs) |
| | parser.add_argument( |
| | "-v", "--verbose", action="count", default=0, |
| | help="Show verbose output. Use twice to be even more verbose.") |
| | return parser |
| | # Subcommands |
| | build = add_parser( |
| | "build", help="Run configure-build, make-build, configure-host and " |
| | "make-host") |
| | configure_build = add_parser( |
| | "configure-build", help="Run `configure` for the build Python") |
| | add_parser( |
| | "make-build", help="Run `make` for the build Python") |
| | configure_host = add_parser( |
| | "configure-host", help="Run `configure` for Android") |
| | make_host = add_parser( |
| | "make-host", help="Run `make` for Android") |
| | add_parser("clean", help="Delete all build directories") |
| | add_parser("build-testbed", help="Build the testbed app") |
| | test = add_parser("test", help="Run the testbed app") |
| | package = add_parser("package", help="Make a release package") |
| | ci = add_parser("ci", help="Run build, package and test") |
| | env = add_parser("env", help="Print environment variables") |
| | # Common arguments |
| | for subcommand in [build, configure_build, configure_host, ci]: |
| | subcommand.add_argument( |
| | "--clean", action="store_true", default=False, dest="clean", |
| | help="Delete the relevant build directories first") |
| | host_commands = [build, configure_host, make_host, package, ci] |
| | if in_source_tree: |
| | host_commands.append(env) |
| | for subcommand in host_commands: |
| | subcommand.add_argument( |
| | "host", metavar="HOST", choices=HOSTS, |
| | help="Host triplet: choices=[%(choices)s]") |
| | for subcommand in [build, configure_build, configure_host, ci]: |
| | subcommand.add_argument("args", nargs="*", |
| | help="Extra arguments to pass to `configure`") |
| | # Test arguments |
| | device_group = test.add_mutually_exclusive_group(required=True) |
| | device_group.add_argument( |
| | "--connected", metavar="SERIAL", help="Run on a connected device. " |
| | "Connect it yourself, then get its serial from `adb devices`.") |
| | device_group.add_argument( |
| | "--managed", metavar="NAME", help="Run on a Gradle-managed device. " |
| | "These are defined in `managedDevices` in testbed/app/build.gradle.kts.") |
| | test.add_argument( |
| | "--site-packages", metavar="DIR", type=abspath, |
| | help="Directory to copy as the app's site-packages.") |
| | test.add_argument( |
| | "--cwd", metavar="DIR", type=abspath, |
| | help="Directory to copy as the app's working directory.") |
| | test.add_argument( |
| | "args", nargs="*", help=f"Python command-line arguments. " |
| | f"Separate them from {SCRIPT_NAME}'s own arguments with `--`. " |
| | f"If neither -c nor -m are included, `-m test` will be prepended, " |
| | f"which will run Python's own test suite.") |
| | # Package arguments. |
| | for subcommand in [package, ci]: |
| | subcommand.add_argument( |
| | "-g", action="store_true", default=False, dest="debug", |
| | help="Include debug information in package") |
| | # CI arguments |
| | for subcommand in [test, ci]: |
| | group = subcommand.add_mutually_exclusive_group(required=subcommand is ci) |
| | group.add_argument( |
| | "--fast-ci", action="store_const", dest="ci_mode", const="fast", |
| | help="Add test arguments for GitHub Actions") |
| | group.add_argument( |
| | "--slow-ci", action="store_const", dest="ci_mode", const="slow", |
| | help="Add test arguments for buildbots") |
| | return parser.parse_args() |
| | def main(): |
| | install_signal_handler() |
| | # Under the buildbot, stdout is not a TTY, but we must still flush after |
| | # every line to make sure our output appears in the correct order relative |
| | # to the output of our subprocesses. |
| | for stream in [sys.stdout, sys.stderr]: |
| | stream.reconfigure(line_buffering=True) |
| | context = parse_args() |
| | dispatch = { |
| | "configure-build": configure_build_python, |
| | "make-build": make_build_python, |
| | "configure-host": configure_host_python, |
| | "make-host": make_host_python, |
| | "build": build_all, |
| | "clean": clean_all, |
| | "build-testbed": build_testbed, |
| | "test": run_testbed, |
| | "package": package, |
| | "ci": ci, |
| | "env": env, |
| | } |
| | try: |
| | result = dispatch[context.subcommand](context) |
| | if asyncio.iscoroutine(result): |
| | asyncio.run(result) |
| | except CalledProcessError as e: |
| | print_called_process_error(e) |
| | sys.exit(1) |
| | def print_called_process_error(e): |
| | for stream_name in ["stdout", "stderr"]: |
| | content = getattr(e, stream_name) |
| | if isinstance(content, bytes): |
| | content = content.decode(*DECODE_ARGS) |
| | stream = getattr(sys, stream_name) |
| | if content: |
| | stream.write(content) |
| | if not content.endswith("\n"): |
| | stream.write("\n") |
| | # shlex uses single quotes, so we surround the command with double quotes. |
| | print( |
| | f'Command "{join_command(e.cmd)}" returned exit status {e.returncode}' |
| | ) |
| | if __name__ == "__main__": |
| | main() |
| | |
| | # Python for Android |
| | If you obtained this README as part of a release package, then the only |
| | applicable sections are "Prerequisites", "Testing", and "Using in your own app". |
| | If you obtained this README as part of the CPython source tree, then you can |
| | also follow the other sections to compile Python for Android yourself. |
| | However, most app developers should not need to do any of these things manually. |
| | Instead, use one of the tools listed |
| | [here](https://docs.python.org/3/using/android.html), which will provide a much |
| | easier experience. |
| | ## Prerequisites |
| | If you already have an Android SDK installed, export the `ANDROID_HOME` |
| | environment variable to point at its location. Otherwise, here's how to install |
| | it: |
| | * Download the "Command line tools" from <https://developer.android.com/studio>. |
| | * Create a directory `android-sdk/cmdline-tools`, and unzip the command line |
| | tools package into it. |
| | * Rename `android-sdk/cmdline-tools/cmdline-tools` to |
| | `android-sdk/cmdline-tools/latest`. |
| | * `export ANDROID_HOME=/path/to/android-sdk` |
| | The `android.py` script will automatically use the SDK's `sdkmanager` to install |
| | any packages it needs. |
| | The script also requires the following commands to be on the `PATH`: |
| | * `curl` |
| | * `java` (or set the `JAVA_HOME` environment variable) |
| | ## Building |
| | Python can be built for Android on any POSIX platform supported by the Android |
| | development tools, which currently means Linux or macOS. |
| | First we'll make a "build" Python (for your development machine), then use it to |
| | help produce a "host" Python for Android. So make sure you have all the usual |
| | tools and libraries needed to build Python for your development machine. |
| | The easiest way to do a build is to use the `android.py` script. You can either |
| | have it perform the entire build process from start to finish in one step, or |
| | you can do it in discrete steps that mirror running `configure` and `make` for |
| | each of the two builds of Python you end up producing. |
| | The discrete steps for building via `android.py` are: |
| | ```sh |
| | ./android.py configure-build |
| | ./android.py make-build |
| | ./android.py configure-host HOST |
| | ./android.py make-host HOST |
| | ``` |
| | `HOST` identifies which architecture to build. To see the possible values, run |
| | `./android.py configure-host --help`. |
| | To do all steps in a single command, run: |
| | ```sh |
| | ./android.py build HOST |
| | ``` |
| | In the end you should have a build Python in `cross-build/build`, and a host |
| | Python in `cross-build/HOST`. |
| | You can use `--` as a separator for any of the `configure`-related commands – |
| | including `build` itself – to pass arguments to the underlying `configure` |
| | call. For example, if you want a pydebug build that also caches the results from |
| | `configure`, you can do: |
| | ```sh |
| | ./android.py build HOST -- -C --with-pydebug |
| | ``` |
| | ## Packaging |
| | After building an architecture as described in the section above, you can |
| | package it for release with this command: |
| | ```sh |
| | ./android.py package HOST |
| | ``` |
| | `HOST` is defined in the section above. |
| | This will generate a tarball in `cross-build/HOST/dist`, whose structure is |
| | similar to the `Android` directory of the CPython source tree. |
| | ## Testing |
| | The Python test suite can be run on Linux, macOS, or Windows. |
| | On Linux, the emulator needs access to the KVM virtualization interface. This may |
| | require adding your user to a group, or changing your udev rules. On GitHub |
| | Actions, the test script will do this automatically using the commands shown |
| | [here](https://github.blog/changelog/2024-04-02-github-actions-hardware-accelerated-android-virtualization-now-available/). |
| | The test suite can usually be run on a device with 2 GB of RAM, but this is |
| | borderline, so you may need to increase it to 4 GB. As of Android |
| | Studio Koala, 2 GB is the default for all emulators, although the user interface |
| | may indicate otherwise. Locate the emulator's directory under `~/.android/avd`, |
| | and find `hw.ramSize` in both config.ini and hardware-qemu.ini. Either set these |
| | manually to the same value, or use the Android Studio Device Manager, which will |
| | update both files. |
| | You can run the test suite either: |
| | * Within the CPython repository, after doing a build as described above. On |
| | Windows, you won't be able to do the build on the same machine, so you'll have |
| | to copy the `cross-build/HOST/prefix` directory from somewhere else. |
| | * Or by taking a release package built using the `package` command, extracting |
| | it wherever you want, and using its own copy of `android.py`. |
| | The test script supports the following modes: |
| | * In `--connected` mode, it runs on a device or emulator you have already |
| | connected to the build machine. List the available devices with |
| | `$ANDROID_HOME/platform-tools/adb devices -l`, then pass a device ID to the |
| | script like this: |
| | ```sh |
| | ./android.py test --connected emulator-5554 |
| | ``` |
| | * In `--managed` mode, it uses a temporary headless emulator defined in the |
| | `managedDevices` section of testbed/app/build.gradle.kts. This mode is slower, |
| | but more reproducible. |
| | We currently define two devices: `minVersion` and `maxVersion`, corresponding |
| | to our minimum and maximum supported Android versions. For example: |
| | ```sh |
| | ./android.py test --managed maxVersion |
| | ``` |
| | By default, the only messages the script will show are Python's own stdout and |
| | stderr. Add the `-v` option to also show Gradle output, and non-Python logcat |
| | messages. |
| | Any other arguments on the `android.py test` command line will be passed through |
| | to `python -m test` – use `--` to separate them from android.py's own options. |
| | See the [Python Developer's |
| | Guide](https://devguide.python.org/testing/run-write-tests/) for common options |
| | – most of them will work on Android, except for those that involve subprocesses, |
| | such as `-j`. |
| | Every time you run `android.py test`, changes in pure-Python files in the |
| | repository's `Lib` directory will be picked up immediately. Changes in C files, |
| | and architecture-specific files such as sysconfigdata, will not take effect |
| | until you re-run `android.py make-host` or `build`. |
| | The testbed app can also be used to test third-party packages. For more details, |
| | run `android.py test --help`, paying attention to the options `--site-packages`, |
| | `--cwd`, `-c` and `-m`. |
| | ## Using in your own app |
| | See https://docs.python.org/3/using/android.html. |
| | |
| | #include <android/log.h> |
| | #include <errno.h> |
| | #include <jni.h> |
| | #include <pthread.h> |
| | #include <Python.h> |
| | #include <signal.h> |
| | #include <stdio.h> |
| | #include <string.h> |
| | #include <unistd.h> |
| | static void throw_runtime_exception(JNIEnv *env, const char *message) { |
| | (*env)->ThrowNew( |
| | env, |
| | (*env)->FindClass(env, "java/lang/RuntimeException"), |
| | message); |
| | } |
| | static void throw_errno(JNIEnv *env, const char *error_prefix) { |
| | char error_message[1024]; |
| | snprintf(error_message, sizeof(error_message), |
| | "%s: %s", error_prefix, strerror(errno)); |
| | throw_runtime_exception(env, error_message); |
| | } |
| | // --- Stdio redirection ------------------------------------------------------ |
| | // Most apps won't need this, because the Python-level sys.stdout and sys.stderr |
| | // are redirected to the Android logcat by Python itself. However, in the |
| | // testbed it's useful to redirect the native streams as well, to debug problems |
| | // in the Python startup or redirection process. |
| | // |
| | // Based on |
| | // https://github.com/beeware/briefcase-android-gradle-template/blob/v0.3.11/%7B%7B%20cookiecutter.safe_formal_name%20%7D%7D/app/src/main/cpp/native-lib.cpp |
| | typedef struct { |
| | FILE *file; |
| | int fd; |
| | android_LogPriority priority; |
| | char *tag; |
| | int pipe[2]; |
| | } StreamInfo; |
| | // The FILE member can't be initialized here because stdout and stderr are not |
| | // compile-time constants. Instead, it's initialized immediately before the |
| | // redirection. |
| | static StreamInfo STREAMS[] = { |
| | {NULL, STDOUT_FILENO, ANDROID_LOG_INFO, "native.stdout", {-1, -1}}, |
| | {NULL, STDERR_FILENO, ANDROID_LOG_WARN, "native.stderr", {-1, -1}}, |
| | {NULL, -1, ANDROID_LOG_UNKNOWN, NULL, {-1, -1}}, |
| | }; |
| | // The maximum length of a log message in bytes, including the level marker and |
| | // tag, is defined as LOGGER_ENTRY_MAX_PAYLOAD in |
| | // platform/system/logging/liblog/include/log/log.h. As of API level 30, messages |
| | // longer than this will be be truncated by logcat. This limit has already been |
| | // reduced at least once in the history of Android (from 4076 to 4068 between API |
| | // level 23 and 26), so leave some headroom. |
| | static const int MAX_BYTES_PER_WRITE = 4000; |
| | static void *redirection_thread(void *arg) { |
| | StreamInfo *si = (StreamInfo*)arg; |
| | ssize_t read_size; |
| | char buf[MAX_BYTES_PER_WRITE]; |
| | while ((read_size = read(si->pipe[0], buf, sizeof buf - 1)) > 0) { |
| | buf[read_size] = '\0'; /* add null-terminator */ |
| | __android_log_write(si->priority, si->tag, buf); |
| | } |
| | return 0; |
| | } |
| | static char *redirect_stream(StreamInfo *si) { |
| | /* make the FILE unbuffered, to ensure messages are never lost */ |
| | if (setvbuf(si->file, 0, _IONBF, 0)) { |
| | return "setvbuf"; |
| | } |
| | /* create the pipe and redirect the file descriptor */ |
| | if (pipe(si->pipe)) { |
| | return "pipe"; |
| | } |
| | if (dup2(si->pipe[1], si->fd) == -1) { |
| | return "dup2"; |
| | } |
| | /* start the logging thread */ |
| | pthread_t thr; |
| | if ((errno = pthread_create(&thr, 0, redirection_thread, si))) { |
| | return "pthread_create"; |
| | } |
| | if ((errno = pthread_detach(thr))) { |
| | return "pthread_detach"; |
| | } |
| | return 0; |
| | } |
| | JNIEXPORT void JNICALL Java_org_python_testbed_PythonTestRunner_redirectStdioToLogcat( |
| | JNIEnv *env, jobject obj |
| | ) { |
| | STREAMS[0].file = stdout; |
| | STREAMS[1].file = stderr; |
| | for (StreamInfo *si = STREAMS; si->file; si++) { |
| | char *error_prefix; |
| | if ((error_prefix = redirect_stream(si))) { |
| | throw_errno(env, error_prefix); |
| | return; |
| | } |
| | } |
| | } |
| | // --- Python initialization --------------------------------------------------- |
| | static char *init_signals() { |
| | // Some tests use SIGUSR1, but that's blocked by default in an Android app in |
| | // order to make it available to `sigwait` in the Signal Catcher thread. |
| | // (https://cs.android.com/android/platform/superproject/+/android14-qpr3-release:art/runtime/signal_catcher.cc). |
| | // That thread's functionality is only useful for debugging the JVM, so disabling |
| | // it should not weaken the tests. |
| | // |
| | // There's no safe way of stopping the thread completely (#123982), but simply |
| | // unblocking SIGUSR1 is enough to fix most tests. |
| | // |
| | // However, in tests that generate multiple different signals in quick |
| | // succession, it's possible for SIGUSR1 to arrive while the main thread is busy |
| | // running the C-level handler for a different signal. In that case, the SIGUSR1 |
| | // may be sent to the Signal Catcher thread instead, which will generate a log |
| | // message containing the text "reacting to signal". |
| | // |
| | // Such tests may need to be changed in one of the following ways: |
| | // * Use a signal other than SIGUSR1 (e.g. test_stress_delivery_simultaneous in |
| | // test_signal.py). |
| | // * Send the signal to a specific thread rather than the whole process (e.g. |
| | // test_signals in test_threadsignals.py. |
| | sigset_t set; |
| | if (sigemptyset(&set)) { |
| | return "sigemptyset"; |
| | } |
| | if (sigaddset(&set, SIGUSR1)) { |
| | return "sigaddset"; |
| | } |
| | if ((errno = pthread_sigmask(SIG_UNBLOCK, &set, NULL))) { |
| | return "pthread_sigmask"; |
| | } |
| | return NULL; |
| | } |
| | static void throw_status(JNIEnv *env, PyStatus status) { |
| | throw_runtime_exception(env, status.err_msg ? status.err_msg : ""); |
| | } |
| | JNIEXPORT int JNICALL Java_org_python_testbed_PythonTestRunner_runPython( |
| | JNIEnv *env, jobject obj, jstring home, jarray args |
| | ) { |
| | const char *home_utf8 = (*env)->GetStringUTFChars(env, home, NULL); |
| | char cwd[PATH_MAX]; |
| | snprintf(cwd, sizeof(cwd), "%s/%s", home_utf8, "cwd"); |
| | if (chdir(cwd)) { |
| | throw_errno(env, "chdir"); |
| | return 1; |
| | } |
| | char *error_prefix; |
| | if ((error_prefix = init_signals())) { |
| | throw_errno(env, error_prefix); |
| | return 1; |
| | } |
| | PyConfig config; |
| | PyStatus status; |
| | PyConfig_InitPythonConfig(&config); |
| | jsize argc = (*env)->GetArrayLength(env, args); |
| | const char *argv[argc + 1]; |
| | for (int i = 0; i < argc; i++) { |
| | jobject arg = (*env)->GetObjectArrayElement(env, args, i); |
| | argv[i] = (*env)->GetStringUTFChars(env, arg, NULL); |
| | } |
| | argv[argc] = NULL; |
| | // PyConfig_SetBytesArgv "must be called before other methods, since the |
| | // preinitialization configuration depends on command line arguments" |
| | if (PyStatus_Exception(status = PyConfig_SetBytesArgv(&config, argc, (char**)argv))) { |
| | throw_status(env, status); |
| | return 1; |
| | } |
| | status = PyConfig_SetBytesString(&config, &config.home, home_utf8); |
| | if (PyStatus_Exception(status)) { |
| | throw_status(env, status); |
| | return 1; |
| | } |
| | status = Py_InitializeFromConfig(&config); |
| | if (PyStatus_Exception(status)) { |
| | throw_status(env, status); |
| | return 1; |
| | } |
| | return Py_RunMain(); |
| | } |
| | |
| | cmake_minimum_required(VERSION 3.4.1) |
| | project(testbed) |
| | # Resolve variables from the command line. |
| | string( |
| | REPLACE {{triplet}} ${CMAKE_LIBRARY_ARCHITECTURE} |
| | PYTHON_PREFIX_DIR ${PYTHON_PREFIX_DIR} |
| | ) |
| | include_directories(${PYTHON_PREFIX_DIR}/include/python${PYTHON_VERSION}) |
| | link_directories(${PYTHON_PREFIX_DIR}/lib) |
| | link_libraries(log python${PYTHON_VERSION}) |
| | add_library(main_activity SHARED main_activity.c) |
| | |
| | |
| | """ |
| |
|