text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_X86_INCLUDE_IA32_EXCEPTION_H_
#define ZEPHYR_ARCH_X86_INCLUDE_IA32_EXCEPTION_H_
#ifndef _ASMLANGUAGE
#include <zephyr/toolchain/common.h>
#define _EXCEPTION_INTLIST(vector, dpl) \
".pushsection .gnu.linkonce.intList.exc_" #vector "\n\t" \
".long 1f\n\t" /* ISR_LIST.fnc */ \
".long -1\n\t" /* ISR_LIST.irq */ \
".long -1\n\t" /* ISR_LIST.priority */ \
".long " STRINGIFY(vector) "\n\t" /* ISR_LIST.vec */ \
".long " STRINGIFY(dpl) "\n\t" /* ISR_LIST.dpl */ \
".long 0\n\t" /* ISR_LIST.tss */ \
".popsection\n\t" \
/* Extra preprocessor indirection to ensure arguments get expanded before
* concatenation takes place
*/
#define __EXCEPTION_STUB_NAME(handler, vec) \
_ ## handler ## _vector_ ## vec ## _stub
#define _EXCEPTION_STUB_NAME(handler, vec) \
__EXCEPTION_STUB_NAME(handler, vec) \
/* Unfortunately, GCC extended asm doesn't work at toplevel so we need
* to stringify stuff.
*
* What we are doing here is generating entries in the .intList section
* and also the assembly language stubs for the exception. We use
* .gnu.linkonce section prefix so that the linker only includes the
* first one of these it encounters for a particular vector. In this
* way it's easy for applications or drivers to install custom exception
* handlers without having to #ifdef out previous instances such as in
* arch/x86/core/fatal.c
*/
#define __EXCEPTION_CONNECT(handler, vector, dpl, codepush) \
__asm__ ( \
_EXCEPTION_INTLIST(vector, dpl) \
".pushsection .gnu.linkonce.t.exc_" STRINGIFY(vector) \
"_stub, \"ax\"\n\t" \
".global " STRINGIFY(_EXCEPTION_STUB_NAME(handler, vector)) "\n\t" \
STRINGIFY(_EXCEPTION_STUB_NAME(handler, vector)) ":\n\t" \
"1:\n\t" \
codepush \
"push $" STRINGIFY(handler) "\n\t" \
"jmp _exception_enter\n\t" \
".popsection\n\t" \
)
/**
* @brief Connect an exception handler that doesn't expect error code
*
* Assign an exception handler to a particular vector in the IDT.
*
* @param handler A handler function of the prototype
* void handler(const struct arch_esf *esf)
* @param vector Vector index in the IDT
*/
#define _EXCEPTION_CONNECT_NOCODE(handler, vector, dpl) \
__EXCEPTION_CONNECT(handler, vector, dpl, "push $0\n\t")
/**
* @brief Connect an exception handler that does expect error code
*
* Assign an exception handler to a particular vector in the IDT.
* The error code will be accessible in esf->errorCode
*
* @param handler A handler function of the prototype
* void handler(const struct arch_esf *esf)
* @param vector Vector index in the IDT
*/
#define _EXCEPTION_CONNECT_CODE(handler, vector, dpl) \
__EXCEPTION_CONNECT(handler, vector, dpl, "")
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_EXCEPTION_H_ */
``` | /content/code_sandbox/arch/x86/include/ia32/exception.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 764 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_X86_INCLUDE_IA32_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_X86_INCLUDE_IA32_OFFSETS_SHORT_ARCH_H_
#include <zephyr/offsets.h>
/* kernel */
#define _kernel_offset_to_isf \
(___kernel_t_arch_OFFSET + ___kernel_arch_t_isf_OFFSET)
/* end - kernel */
/* threads */
#define _thread_offset_to_excNestCount \
(___thread_t_arch_OFFSET + ___thread_arch_t_excNestCount_OFFSET)
#define _thread_offset_to_esp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_esp_OFFSET)
#define _thread_offset_to_preempFloatReg \
(___thread_t_arch_OFFSET + ___thread_arch_t_preempFloatReg_OFFSET)
/* end - threads */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_OFFSETS_SHORT_ARCH_H_ */
``` | /content/code_sandbox/arch/x86/include/ia32/offsets_short_arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 185 |
```c
/*
*
*/
#include <zephyr/toolchain.h>
__weak void *__dso_handle;
int __cxa_atexit(void (*destructor)(void *), void *objptr, void *dso)
{
ARG_UNUSED(destructor);
ARG_UNUSED(objptr);
ARG_UNUSED(dso);
return 0;
}
int atexit(void (*function)(void))
{
return 0;
}
``` | /content/code_sandbox/arch/arc/arcmwdt/arcmwdt-dtr-stubs.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 84 |
```c
/* cache.c - d-cache support for ARC CPUs */
/*
*
*/
/**
* @file
* @brief d-cache manipulation
*
* This module contains functions for manipulation of the d-cache.
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/util.h>
#include <zephyr/toolchain.h>
#include <zephyr/cache.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <kernel_internal.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/init.h>
#include <stdbool.h>
#if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
size_t sys_cache_line_size;
#endif
#define DC_CTRL_DC_ENABLE 0x0 /* enable d-cache */
#define DC_CTRL_DC_DISABLE 0x1 /* disable d-cache */
#define DC_CTRL_INVALID_ONLY 0x0 /* invalid d-cache only */
#define DC_CTRL_INVALID_FLUSH 0x40 /* invalid and flush d-cache */
#define DC_CTRL_ENABLE_FLUSH_LOCKED 0x80 /* locked d-cache can be flushed */
#define DC_CTRL_DISABLE_FLUSH_LOCKED 0x0 /* locked d-cache cannot be flushed */
#define DC_CTRL_FLUSH_STATUS 0x100/* flush status */
#define DC_CTRL_DIRECT_ACCESS 0x0 /* direct access mode */
#define DC_CTRL_INDIRECT_ACCESS 0x20 /* indirect access mode */
#define DC_CTRL_OP_SUCCEEDED 0x4 /* d-cache operation succeeded */
static bool dcache_available(void)
{
unsigned long val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
val &= 0xff; /* extract version */
return (val == 0) ? false : true;
}
static void dcache_dc_ctrl(uint32_t dcache_en_mask)
{
if (dcache_available()) {
z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
}
}
void arch_dcache_enable(void)
{
dcache_dc_ctrl(DC_CTRL_DC_ENABLE);
}
void arch_dcache_disable(void)
{
/* nothing */
}
int arch_dcache_flush_range(void *start_addr_ptr, size_t size)
{
size_t line_size = sys_cache_data_line_size_get();
uintptr_t start_addr = (uintptr_t)start_addr_ptr;
uintptr_t end_addr;
unsigned int key;
if (!dcache_available() || (size == 0U) || line_size == 0U) {
return -ENOTSUP;
}
end_addr = start_addr + size;
start_addr = ROUND_DOWN(start_addr, line_size);
key = arch_irq_lock(); /* --enter critical section-- */
do {
z_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
__builtin_arc_nop();
__builtin_arc_nop();
__builtin_arc_nop();
/* wait for flush completion */
do {
if ((z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
DC_CTRL_FLUSH_STATUS) == 0) {
break;
}
} while (1);
start_addr += line_size;
} while (start_addr < end_addr);
arch_irq_unlock(key); /* --exit critical section-- */
return 0;
}
int arch_dcache_invd_range(void *start_addr_ptr, size_t size)
{
size_t line_size = sys_cache_data_line_size_get();
uintptr_t start_addr = (uintptr_t)start_addr_ptr;
uintptr_t end_addr;
unsigned int key;
if (!dcache_available() || (size == 0U) || line_size == 0U) {
return -ENOTSUP;
}
end_addr = start_addr + size;
start_addr = ROUND_DOWN(start_addr, line_size);
key = arch_irq_lock(); /* -enter critical section- */
do {
z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDL, start_addr);
__builtin_arc_nop();
__builtin_arc_nop();
__builtin_arc_nop();
start_addr += line_size;
} while (start_addr < end_addr);
irq_unlock(key); /* -exit critical section- */
return 0;
}
int arch_dcache_flush_and_invd_range(void *start_addr_ptr, size_t size)
{
return -ENOTSUP;
}
int arch_dcache_flush_all(void)
{
return -ENOTSUP;
}
int arch_dcache_invd_all(void)
{
return -ENOTSUP;
}
int arch_dcache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
#if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
static void init_dcache_line_size(void)
{
uint32_t val;
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
__ASSERT((val&0xff) != 0U, "d-cache is not present");
val = ((val>>16) & 0xf) + 1;
val *= 16U;
sys_cache_line_size = (size_t) val;
}
size_t arch_dcache_line_size_get(void)
{
return sys_cache_line_size;
}
#endif
void arch_icache_enable(void)
{
/* nothing */
}
void arch_icache_disable(void)
{
/* nothing */
}
int arch_icache_flush_all(void)
{
return -ENOTSUP;
}
int arch_icache_invd_all(void)
{
return -ENOTSUP;
}
int arch_icache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
int arch_icache_flush_range(void *addr, size_t size)
{
ARG_UNUSED(addr);
ARG_UNUSED(size);
return -ENOTSUP;
}
int arch_icache_invd_range(void *addr, size_t size)
{
ARG_UNUSED(addr);
ARG_UNUSED(size);
return -ENOTSUP;
}
int arch_icache_flush_and_invd_range(void *addr, size_t size)
{
ARG_UNUSED(addr);
ARG_UNUSED(size);
return -ENOTSUP;
}
static int init_dcache(void)
{
sys_cache_data_enable();
#if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
init_dcache_line_size();
#endif
return 0;
}
SYS_INIT(init_dcache, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
``` | /content/code_sandbox/arch/arc/core/cache.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,330 |
```unknown
# ARC options
menu "ARC Options"
depends on ARC
config ARCH
default "arc"
config CPU_ARCEM
bool
select ATOMIC_OPERATIONS_C
help
This option signifies the use of an ARC EM CPU
config CPU_ARCHS
bool
select ATOMIC_OPERATIONS_BUILTIN
select BARRIER_OPERATIONS_BUILTIN
help
This option signifies the use of an ARC HS CPU
choice
prompt "ARC Instruction Set"
default ISA_ARCV2
config ISA_ARCV2
bool "ARC ISA v2"
select ARCH_HAS_STACK_PROTECTION if ARC_HAS_STACK_CHECKING || (ARC_MPU && ARC_MPU_VER !=2)
select ARCH_HAS_USERSPACE if ARC_MPU
select ARCH_HAS_SINGLE_THREAD_SUPPORT if !SMP
select USE_SWITCH
select USE_SWITCH_SUPPORTED
help
v2 ISA for the ARC-HS & ARC-EM cores
config ISA_ARCV3
bool "ARC ISA v3"
select ARCH_HAS_SINGLE_THREAD_SUPPORT if !SMP
select USE_SWITCH
select USE_SWITCH_SUPPORTED
endchoice
if ISA_ARCV2
config CPU_EM4
bool
select CPU_ARCEM
help
If y, the SoC uses an ARC EM4 CPU
config CPU_EM4_DMIPS
bool
select CPU_ARCEM
help
If y, the SoC uses an ARC EM4 DMIPS CPU
config CPU_EM4_FPUS
bool
select CPU_ARCEM
help
If y, the SoC uses an ARC EM4 DMIPS CPU with the single-precision
floating-point extension
config CPU_EM4_FPUDA
bool
select CPU_ARCEM
help
If y, the SoC uses an ARC EM4 DMIPS CPU with single-precision
floating-point and double assist instructions
config CPU_EM6
bool
select CPU_ARCEM
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
If y, the SoC uses an ARC EM6 CPU
config CPU_HS3X
bool
select CPU_ARCHS
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
If y, the SoC uses an ARC HS3x CPU
config CPU_HS4X
bool
select CPU_ARCHS
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
If y, the SoC uses an HS4X CPU
endif #ISA_ARCV2
if ISA_ARCV3
config CPU_HS5X
bool
select CPU_ARCHS
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
If y, the SoC uses an ARC HS6x CPU
config CPU_HS6X
bool
select CPU_ARCHS
select 64BIT
select CPU_HAS_DCACHE
select CPU_HAS_ICACHE
help
If y, the SoC uses an ARC HS6x CPU
endif #ISA_ARCV3
config FP_FPU_DA
bool
menu "ARC CPU Options"
config ARC_HAS_ZOL
bool
depends on ISA_ARCV2
default y
help
ARCv2 CPUs have ZOL hardware loop mechanism which the ARCv3 ISA drops.
Architecturally ZOL provides
- LPcc instruction
- LP_COUNT core reg
- LP_START, LP_END aux regs
Disabling this option removes usage of ZOL regs from code
config NUM_IRQ_PRIO_LEVELS
int "Number of supported interrupt priority levels"
range 1 16
help
Interrupt priorities available will be 0 to NUM_IRQ_PRIO_LEVELS-1.
The minimum value is 1.
The BSP must provide a valid default for proper operation.
config NUM_IRQS
int "Upper limit of interrupt numbers/IDs used"
range 17 256
help
Interrupts available will be 0 to NUM_IRQS-1.
The minimum value is 17 as the first 16 entries in the vector
table are for CPU exceptions.
The BSP must provide a valid default. This drives the size of the
vector table.
config RGF_NUM_BANKS
int "Number of General Purpose Register Banks"
depends on ARC_FIRQ
depends on NUM_IRQ_PRIO_LEVELS > 1
range 1 2
default 2
help
The ARC CPU can be configured to have more than one register
bank. If fast interrupts are supported (FIRQ), the 2nd
register bank, in the set, will be used by FIRQ interrupts.
If fast interrupts are supported but there is only 1
register bank, the fast interrupt handler must save
and restore general purpose registers.
NOTE: it's required to have more than one interrupt priority level
to use second register bank - otherwise all interrupts will use
same register bank. Such configuration isn't supported in software
and it is not beneficial from the performance point of view.
config ARC_FIRQ
bool "FIRQ enable"
depends on ISA_ARCV2
depends on NUM_IRQ_PRIO_LEVELS > 1
depends on !ARC_HAS_SECURE
default y
help
Fast interrupts are supported (FIRQ). If FIRQ enabled, for interrupts
with highest priority, status32 and pc will be saved in aux regs,
other regs will be saved according to the number of register bank;
If FIRQ is disabled, the handle of interrupts with highest priority
will be same with other interrupts.
NOTE: we don't allow the configuration with FIRQ enabled and only one
interrupt priority level (so all interrupts are FIRQ). Such
configuration isn't supported in software and it is not beneficial
from the performance point of view.
config ARC_FIRQ_STACK
bool "Separate firq stack"
depends on ARC_FIRQ && RGF_NUM_BANKS > 1
help
Use separate stack for FIRQ handing. When the fast irq is also a direct
irq, this will get the minimal interrupt latency.
config ARC_FIRQ_STACK_SIZE
int "FIRQ stack size"
depends on ARC_FIRQ_STACK
default 1024
help
The size of firq stack.
config ARC_HAS_STACK_CHECKING
bool "ARC has STACK_CHECKING"
depends on ISA_ARCV2
default y
help
ARC is configured with STACK_CHECKING which is a mechanism for
checking stack accesses and raising an exception when a stack
overflow or underflow is detected.
config ARC_CONNECT
bool "ARC has ARC connect"
select SCHED_IPI_SUPPORTED
help
ARC is configured with ARC CONNECT which is a hardware for connecting
multi cores.
config ARC_STACK_CHECKING
bool
select NO_UNUSED_STACK_INSPECTION
help
Use ARC STACK_CHECKING to do stack protection
config ARC_STACK_PROTECTION
bool
default y if HW_STACK_PROTECTION
select ARC_STACK_CHECKING if ARC_HAS_STACK_CHECKING
select MPU_STACK_GUARD if (!ARC_STACK_CHECKING && ARC_MPU && ARC_MPU_VER !=2)
select THREAD_STACK_INFO
help
This option enables either:
- The ARC stack checking, or
- the MPU-based stack guard
to cause a system fatal error
if the bounds of the current process stack are overflowed.
The two stack guard options are mutually exclusive. The
selection of the ARC stack checking is
prioritized over the MPU-based stack guard.
config ARC_USE_UNALIGNED_MEM_ACCESS
bool "Unaligned access in HW"
default y if CPU_ARCHS
depends on (CPU_ARCEM && !ARC_HAS_SECURE) || CPU_ARCHS
help
ARC EM cores w/o secure shield 2+2 mode support might be configured
to support unaligned memory access which is then disabled by default.
Enable unaligned access in hardware and make software to use it.
config ARC_CURRENT_THREAD_USE_NO_TLS
bool
select CURRENT_THREAD_USE_NO_TLS
default y if (RGF_NUM_BANKS > 1) || ("$(ZEPHYR_TOOLCHAIN_VARIANT)" = "arcmwdt")
help
Disable current Thread Local Storage for ARC. For cores with more then one
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
requires significant time, and it slows down performance.
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
TLS pointer via _current variable does not provide significant advantages
in case of MetaWare.
config GEN_ISR_TABLES
default y
config GEN_IRQ_START_VECTOR
default 16
config HARVARD
bool "Harvard Architecture"
help
The ARC CPU can be configured to have two busses;
one for instruction fetching and another that serves as a data bus.
config CODE_DENSITY
bool "Code Density Option"
help
Enable code density option to get better code density
config ARC_HAS_ACCL_REGS
bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6 and/or DSP)"
default y if CPU_HS3X || CPU_HS4X || CPU_HS5X || CPU_HS6X
help
Depending on the configuration, CPU can contain accumulator reg-pair
(also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process
config ARC_HAS_SECURE
bool "ARC has SecureShield"
depends on ISA_ARCV2
select CPU_HAS_TEE
select ARCH_HAS_TRUSTED_EXECUTION
help
This option is enabled when ARC core supports secure mode
config SJLI_TABLE_SIZE
int "SJLI table size"
depends on ARC_SECURE_FIRMWARE
default 8
help
The size of sjli (Secure Jump and Link Indexed) table. The
code in normal mode call secure services in secure mode through
sjli instruction.
config ARC_SECURE_FIRMWARE
bool "Generate Secure Firmware"
depends on ARC_HAS_SECURE
default y if TRUSTED_EXECUTION_SECURE
help
This option indicates that we are building a Zephyr image that
is intended to execute in secure mode. The option is only
applicable to ARC processors that implement the SecureShield.
This option enables Zephyr to include code that executes in
secure mode, as well as to exclude code that is designed to
execute only in normal mode.
Code executing in secure mode has access to both the secure
and normal resources of the ARC processors.
config ARC_NORMAL_FIRMWARE
bool "Generate Normal Firmware"
depends on !ARC_SECURE_FIRMWARE
depends on ARC_HAS_SECURE
default y if TRUSTED_EXECUTION_NONSECURE
help
This option indicates that we are building a Zephyr image that
is intended to execute in normal mode. Execution of this
image is triggered by secure firmware that executes in secure
mode. The option is only applicable to ARC processors that
implement the SecureShield.
This option enables Zephyr to include code that executes in
normal mode only, as well as to exclude code that is
designed to execute only in secure mode.
Code executing in normal mode has no access to secure
resources of the ARC processors, and, therefore, it shall avoid
accessing them.
source "arch/arc/core/dsp/Kconfig"
menu "ARC MPU Options"
depends on CPU_HAS_MPU
config ARC_MPU_ENABLE
bool "Memory Protection Unit (MPU)"
select ARC_MPU
help
Enable MPU
source "arch/arc/core/mpu/Kconfig"
endmenu
config DCACHE_LINE_SIZE
default 32
config ARC_EXCEPTION_STACK_SIZE
int "ARC exception handling stack size"
default 768 if !64BIT
default 2048 if 64BIT
help
Size in bytes of exception handling stack which is at the top of
interrupt stack to get smaller memory footprint because exception
is not frequent. To reduce the impact on interrupt handling,
especially nested interrupt, it cannot be too large.
endmenu
config ARC_EARLY_SOC_INIT
bool "Make early stage SoC-specific initialization"
help
Call SoC per-core setup code on early stage initialization
(before C runtime initialization). Setup code is called in form of
soc_early_asm_init_percpu assembler macro.
config MAIN_STACK_SIZE
default 4096 if 64BIT
config ISR_STACK_SIZE
default 4096 if 64BIT
config SYSTEM_WORKQUEUE_STACK_SIZE
default 4096 if 64BIT
config IDLE_STACK_SIZE
default 1024 if 64BIT
config IPM_CONSOLE_STACK_SIZE
default 2048 if 64BIT
config TEST_EXTRA_STACK_SIZE
default 2048 if 64BIT
config CMSIS_THREAD_MAX_STACK_SIZE
default 2048 if 64BIT
config CMSIS_V2_THREAD_MAX_STACK_SIZE
default 2048 if 64BIT
config CMSIS_V2_THREAD_DYNAMIC_STACK_SIZE
default 2048 if 64BIT
endmenu
``` | /content/code_sandbox/arch/arc/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,752 |
```unknown
/*
*
*/
/**
* @file
* @brief Thread context switching
*
* This module implements the routines necessary for thread context switching
* on ARCv2 CPUs.
*
* See isr_wrapper.S for details.
*/
#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <v2/irq.h>
#include <swap_macros.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
GTEXT(z_arc_switch)
/**
*
* @brief Initiate a cooperative context switch
*
* The arch_switch routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking arch_switch, the caller
* disables interrupts via irq_lock()
* Given that arch_switch() is called to effect a cooperative context switch,
* the caller-saved integer registers are saved on the stack by the function
* call preamble to arch_switch. This creates a custom stack frame that will
* be popped when returning from arch_switch, but is not suitable for handling
* a return from an exception. Thus, the fact that the thread is pending because
* of a cooperative call to arch_switch() has to be recorded via the
* _CAUSE_COOP code in the relinquish_cause of the thread's k_thread structure.
* The _rirq_exit()/_firq_exit() code will take care of doing the right thing
* to restore the thread status.
*
* When arch_switch() is invoked, we know the decision to perform a context
* switch or not has already been taken and a context switch must happen.
*
*
* C function prototype:
*
* void arch_switch(void *switch_to, void **switched_from);
*
*/
SECTION_FUNC(TEXT, z_arc_switch)
/*
* r0 = new_thread->switch_handle = switch_to thread,
* r1 = &old_thread->switch_handle
* get old_thread from r1
*/
SUBR r2, r1, ___thread_t_switch_handle_OFFSET
/* _thread_arch.relinquish_cause is 32 bit despite of platform bittnes */
_st32_huge_offset _CAUSE_COOP, r2, _thread_offset_to_relinquish_cause, r3
/*
* Save status32 and blink on the stack before the callee-saved registers.
* This is the same layout as the start of an IRQ stack frame.
*/
LRR r3, [_ARC_V2_STATUS32]
PUSHR r3
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r3, [_ARC_V2_SEC_STAT]
#else
mov_s r3, 0
#endif
push_s r3
#endif
PUSHR blink
_store_old_thread_callee_regs
/* disable stack checking here, as sp will be changed to target
* thread'sp
*/
_disable_stack_checking r3
MOVR r2, r0
_load_new_thread_callee_regs
breq r3, _CAUSE_RIRQ, _switch_return_from_rirq
nop_s
breq r3, _CAUSE_FIRQ, _switch_return_from_firq
nop_s
/* fall through to _switch_return_from_coop */
.align 4
_switch_return_from_coop:
POPR blink /* pc into blink */
#ifdef CONFIG_ARC_HAS_SECURE
pop_s r3 /* pop SEC_STAT */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sflag r3
#endif
#endif
POPR r3 /* status32 into r3 */
kflag r3 /* write status32 */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
PUSHR blink
bl z_thread_mark_switched_in
POPR blink
#endif
j_s [blink]
.align 4
_switch_return_from_rirq:
_switch_return_from_firq:
_set_misc_regs_irq_switch_from_irq
/* use lowest interrupt priority to simulate
* a interrupt return to load left regs of new
* thread
*/
LRR r3, [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
or r3, r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
ORR r3, r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
#else
SRR r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
PUSHR blink
bl z_thread_mark_switched_in
POPR blink
#endif
rtie
``` | /content/code_sandbox/arch/arc/core/switch.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,042 |
```unknown
/*
*
*/
/**
* @file
* @brief Wrapper for z_thread_entry
*
* Wrapper for z_thread_entry routine when called from the initial context.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <v2/irq.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
GTEXT(z_thread_entry_wrapper)
GTEXT(z_thread_entry_wrapper1)
/**
* @brief Wrapper for z_thread_entry
*
* The routine pops parameters for the z_thread_entry from stack frame, prepared
* by the arch_new_thread() routine.
*/
SECTION_FUNC(TEXT, z_thread_entry_wrapper)
seti _ARC_V2_INIT_IRQ_LOCK_KEY
z_thread_entry_wrapper1:
POPR r3
POPR r2
POPR r1
POPR r0
j z_thread_entry
nop
#if !defined(CONFIG_MULTITHREADING)
GTEXT(z_main_no_multithreading_entry_wrapper)
/* void z_main_no_multithreading_entry_wrapper(*p1, *p2, *p3, *main_stack, *main_entry) */
SECTION_FUNC(TEXT, z_main_no_multithreading_entry_wrapper)
MOVR sp, r3
/* *p1, *p2, *p3 are in r0, r1, r2 already */
jl [r4]
nop
/*
* If we return from main we'll return from main wrapper and appear here.
* Go to infinite loop as there is nothing more to do.
*/
z_main_no_multithreading_entry_wrapper_end:
b z_main_no_multithreading_entry_wrapper_end
#endif /* !CONFIG_MULTITHREADING */
``` | /content/code_sandbox/arch/arc/core/thread_entry_wrapper.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 357 |
```c
/*
*
*/
/**
* @file
* @brief Common fault handler for ARCv2
*
* Common fault handler for ARCv2 processors.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <inttypes.h>
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/common/exc_handle.h>
#include <zephyr/logging/log.h>
#include <err_dump_handling.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_USERSPACE
Z_EXC_DECLARE(z_arc_user_string_nlen);
static const struct z_exc_handle exceptions[] = {
Z_EXC_HANDLE(z_arc_user_string_nlen)
};
#endif
#if defined(CONFIG_MPU_STACK_GUARD)
/**
* @brief Assess occurrence of current thread's stack corruption
*
* This function performs an assessment whether a memory fault (on a given
* memory address) is the result of a stack overflow of the current thread.
*
* When called, we know at this point that we received an ARC
* protection violation, with any cause code, with the protection access
* error either "MPU" or "Secure MPU". In other words, an MPU fault of
* some kind. Need to determine whether this is a general MPU access
* exception or the specific case of a stack overflow.
*
* @param fault_addr memory address on which memory access violation
* has been reported.
* @param sp stack pointer when exception comes out
* @retval True if this appears to be a stack overflow
* @retval False if this does not appear to be a stack overflow
*/
static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
{
#if defined(CONFIG_MULTITHREADING)
uint32_t guard_end, guard_start;
const struct k_thread *thread = _current;
if (!thread) {
/* TODO: Under what circumstances could we get here ? */
return false;
}
#ifdef CONFIG_USERSPACE
if ((thread->base.user_options & K_USER) != 0) {
if ((z_arc_v2_aux_reg_read(_ARC_V2_ERSTATUS) &
_ARC_V2_STATUS32_U) != 0) {
/* Normal user mode context. There is no specific
* "guard" installed in this case, instead what's
* happening is that the stack pointer is crashing
* into the privilege mode stack buffer which
* immediately precedes it.
*/
guard_end = thread->stack_info.start;
guard_start = (uint32_t)thread->stack_obj;
} else {
/* Special case: handling a syscall on privilege stack.
* There is guard memory reserved immediately before
* it.
*/
guard_end = thread->arch.priv_stack_start;
guard_start = guard_end - Z_ARC_STACK_GUARD_SIZE;
}
} else
#endif /* CONFIG_USERSPACE */
{
/* Supervisor thread */
guard_end = thread->stack_info.start;
guard_start = guard_end - Z_ARC_STACK_GUARD_SIZE;
}
/* treat any MPU exceptions within the guard region as a stack
* overflow.As some instrustions
* (like enter_s {r13-r26, fp, blink}) push a collection of
* registers on to the stack. In this situation, the fault_addr
* will less than guard_end, but sp will greater than guard_end.
*/
if (fault_addr < guard_end && fault_addr >= guard_start) {
return true;
}
#endif /* CONFIG_MULTITHREADING */
return false;
}
#endif
#ifdef CONFIG_EXCEPTION_DEBUG
/* For EV_ProtV, the numbering/semantics of the parameter are consistent across
* several codes, although not all combination will be reported.
*
* These codes and parameters do not have associated* names in
* the technical manual, just switch on the values in Table 6-5
*/
static const char *get_protv_access_err(uint32_t parameter)
{
switch (parameter) {
case 0x1:
return "code protection scheme";
case 0x2:
return "stack checking scheme";
case 0x4:
return "MPU";
case 0x8:
return "MMU";
case 0x10:
return "NVM";
case 0x24:
return "Secure MPU";
case 0x44:
return "Secure MPU with SID mismatch";
default:
return "unknown";
}
}
static void dump_protv_exception(uint32_t cause, uint32_t parameter)
{
switch (cause) {
case 0x0:
ARC_EXCEPTION_DUMP("Instruction fetch violation (%s)",
get_protv_access_err(parameter));
break;
case 0x1:
ARC_EXCEPTION_DUMP("Memory read protection violation (%s)",
get_protv_access_err(parameter));
break;
case 0x2:
ARC_EXCEPTION_DUMP("Memory write protection violation (%s)",
get_protv_access_err(parameter));
break;
case 0x3:
ARC_EXCEPTION_DUMP("Memory read-modify-write violation (%s)",
get_protv_access_err(parameter));
break;
case 0x10:
ARC_EXCEPTION_DUMP("Normal vector table in secure memory");
break;
case 0x11:
ARC_EXCEPTION_DUMP("NS handler code located in S memory");
break;
case 0x12:
ARC_EXCEPTION_DUMP("NSC Table Range Violation");
break;
default:
ARC_EXCEPTION_DUMP("unknown");
break;
}
}
static void dump_machine_check_exception(uint32_t cause, uint32_t parameter)
{
switch (cause) {
case 0x0:
ARC_EXCEPTION_DUMP("double fault");
break;
case 0x1:
ARC_EXCEPTION_DUMP("overlapping TLB entries");
break;
case 0x2:
ARC_EXCEPTION_DUMP("fatal TLB error");
break;
case 0x3:
ARC_EXCEPTION_DUMP("fatal cache error");
break;
case 0x4:
ARC_EXCEPTION_DUMP("internal memory error on instruction fetch");
break;
case 0x5:
ARC_EXCEPTION_DUMP("internal memory error on data fetch");
break;
case 0x6:
ARC_EXCEPTION_DUMP("illegal overlapping MPU entries");
if (parameter == 0x1) {
ARC_EXCEPTION_DUMP(" - jump and branch target");
}
break;
case 0x10:
ARC_EXCEPTION_DUMP("secure vector table not located in secure memory");
break;
case 0x11:
ARC_EXCEPTION_DUMP("NSC jump table not located in secure memory");
break;
case 0x12:
ARC_EXCEPTION_DUMP("secure handler code not located in secure memory");
break;
case 0x13:
ARC_EXCEPTION_DUMP("NSC target address not located in secure memory");
break;
case 0x80:
ARC_EXCEPTION_DUMP("uncorrectable ECC or parity error in vector memory");
break;
default:
ARC_EXCEPTION_DUMP("unknown");
break;
}
}
static void dump_privilege_exception(uint32_t cause, uint32_t parameter)
{
switch (cause) {
case 0x0:
ARC_EXCEPTION_DUMP("Privilege violation");
break;
case 0x1:
ARC_EXCEPTION_DUMP("disabled extension");
break;
case 0x2:
ARC_EXCEPTION_DUMP("action point hit");
break;
case 0x10:
switch (parameter) {
case 0x1:
ARC_EXCEPTION_DUMP("N to S return using incorrect return mechanism");
break;
case 0x2:
ARC_EXCEPTION_DUMP("N to S return with incorrect operating mode");
break;
case 0x3:
ARC_EXCEPTION_DUMP("IRQ/exception return fetch from wrong mode");
break;
case 0x4:
ARC_EXCEPTION_DUMP("attempt to halt secure processor in NS mode");
break;
case 0x20:
ARC_EXCEPTION_DUMP("attempt to access secure resource from normal mode");
break;
case 0x40:
ARC_EXCEPTION_DUMP("SID violation on resource access (APEX/UAUX/key NVM)");
break;
default:
ARC_EXCEPTION_DUMP("unknown");
break;
}
break;
case 0x13:
switch (parameter) {
case 0x20:
ARC_EXCEPTION_DUMP("attempt to access secure APEX feature from NS mode");
break;
case 0x40:
ARC_EXCEPTION_DUMP("SID violation on access to APEX feature");
break;
default:
ARC_EXCEPTION_DUMP("unknown");
break;
}
break;
default:
ARC_EXCEPTION_DUMP("unknown");
break;
}
}
static void dump_exception_info(uint32_t vector, uint32_t cause, uint32_t parameter)
{
if (vector >= 0x10 && vector <= 0xFF) {
ARC_EXCEPTION_DUMP("interrupt %u", vector);
return;
}
/* Names are exactly as they appear in Designware ARCv2 ISA
* Programmer's reference manual for easy searching
*/
switch (vector) {
case ARC_EV_RESET:
ARC_EXCEPTION_DUMP("Reset");
break;
case ARC_EV_MEM_ERROR:
ARC_EXCEPTION_DUMP("Memory Error");
break;
case ARC_EV_INS_ERROR:
ARC_EXCEPTION_DUMP("Instruction Error");
break;
case ARC_EV_MACHINE_CHECK:
ARC_EXCEPTION_DUMP("EV_MachineCheck");
dump_machine_check_exception(cause, parameter);
break;
case ARC_EV_TLB_MISS_I:
ARC_EXCEPTION_DUMP("EV_TLBMissI");
break;
case ARC_EV_TLB_MISS_D:
ARC_EXCEPTION_DUMP("EV_TLBMissD");
break;
case ARC_EV_PROT_V:
ARC_EXCEPTION_DUMP("EV_ProtV");
dump_protv_exception(cause, parameter);
break;
case ARC_EV_PRIVILEGE_V:
ARC_EXCEPTION_DUMP("EV_PrivilegeV");
dump_privilege_exception(cause, parameter);
break;
case ARC_EV_SWI:
ARC_EXCEPTION_DUMP("EV_SWI");
break;
case ARC_EV_TRAP:
ARC_EXCEPTION_DUMP("EV_Trap");
break;
case ARC_EV_EXTENSION:
ARC_EXCEPTION_DUMP("EV_Extension");
break;
case ARC_EV_DIV_ZERO:
ARC_EXCEPTION_DUMP("EV_DivZero");
break;
case ARC_EV_DC_ERROR:
ARC_EXCEPTION_DUMP("EV_DCError");
break;
case ARC_EV_MISALIGNED:
ARC_EXCEPTION_DUMP("EV_Misaligned");
break;
case ARC_EV_VEC_UNIT:
ARC_EXCEPTION_DUMP("EV_VecUnit");
break;
default:
ARC_EXCEPTION_DUMP("unknown");
break;
}
}
#endif /* CONFIG_EXCEPTION_DEBUG */
/*
* @brief Fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy.
*/
void _Fault(struct arch_esf *esf, uint32_t old_sp)
{
uint32_t vector, cause, parameter;
uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
uint32_t ecr = z_arc_v2_aux_reg_read(_ARC_V2_ECR);
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
uint32_t start = (uint32_t)exceptions[i].start;
uint32_t end = (uint32_t)exceptions[i].end;
if (esf->pc >= start && esf->pc < end) {
esf->pc = (uint32_t)(exceptions[i].fixup);
return;
}
}
#endif
vector = Z_ARC_V2_ECR_VECTOR(ecr);
cause = Z_ARC_V2_ECR_CODE(ecr);
parameter = Z_ARC_V2_ECR_PARAMETER(ecr);
/* exception raised by kernel */
if (vector == ARC_EV_TRAP && parameter == _TRAP_S_CALL_RUNTIME_EXCEPT) {
/*
* in user mode software-triggered system fatal exceptions only allow
* K_ERR_KERNEL_OOPS and K_ERR_STACK_CHK_FAIL
*/
#ifdef CONFIG_USERSPACE
if ((esf->status32 & _ARC_V2_STATUS32_U) &&
esf->r0 != K_ERR_STACK_CHK_FAIL) {
esf->r0 = K_ERR_KERNEL_OOPS;
}
#endif
z_arc_fatal_error(esf->r0, esf);
return;
}
#ifdef CONFIG_EXCEPTION_DEBUG
ARC_EXCEPTION_DUMP("***** Exception vector: 0x%x, cause code: 0x%x, parameter 0x%x",
vector, cause, parameter);
ARC_EXCEPTION_DUMP("Address 0x%x", exc_addr);
dump_exception_info(vector, cause, parameter);
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
/* Vector 6 = EV_ProV. Regardless of cause, parameter 2 means stack
* check violation
* stack check and mpu violation can come out together, then
* parameter = 0x2 | [0x4 | 0x8 | 0x1]
*/
if (vector == ARC_EV_PROT_V && parameter & 0x2) {
z_arc_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
return;
}
#endif
#ifdef CONFIG_MPU_STACK_GUARD
if (vector == ARC_EV_PROT_V && ((parameter == 0x4) ||
(parameter == 0x24))) {
if (z_check_thread_stack_fail(exc_addr, old_sp)) {
z_arc_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
return;
}
}
#endif
z_arc_fatal_error(K_ERR_CPU_EXCEPTION, esf);
}
``` | /content/code_sandbox/arch/arc/core/fault.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,011 |
```c
/*
*
*/
/**
* @file Software interrupts utility code - ARC implementation
*/
#include <zephyr/kernel.h>
#include <zephyr/irq_offload.h>
#include <zephyr/init.h>
/* Choose a reasonable default for interrupt line which is used for irq_offload with the option
* to override it by setting interrupt line via device tree.
*/
#if DT_NODE_EXISTS(DT_NODELABEL(test_irq_offload_line_0))
#define IRQ_OFFLOAD_LINE DT_IRQN(DT_NODELABEL(test_irq_offload_line_0))
#else
/* Last two lined are already used in the IRQ tests, so we choose 3rd from the end line */
#define IRQ_OFFLOAD_LINE (CONFIG_NUM_IRQS - 3)
#endif
#define IRQ_OFFLOAD_PRIO 0
#define CURR_CPU (IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0)
static struct {
volatile irq_offload_routine_t fn;
const void *volatile arg;
} offload_params[CONFIG_MP_MAX_NUM_CPUS];
static void arc_irq_offload_handler(const void *unused)
{
ARG_UNUSED(unused);
offload_params[CURR_CPU].fn(offload_params[CURR_CPU].arg);
}
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
offload_params[CURR_CPU].fn = routine;
offload_params[CURR_CPU].arg = parameter;
compiler_barrier();
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, IRQ_OFFLOAD_LINE);
__asm__ volatile("sync");
/* If _current was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
}
/* need to be executed on every core in the system */
int arc_irq_offload_init(void)
{
IRQ_CONNECT(IRQ_OFFLOAD_LINE, IRQ_OFFLOAD_PRIO, arc_irq_offload_handler, NULL, 0);
/* The line is triggered and controlled with core private interrupt controller,
* so even in case common (IDU) interrupt line usage on SMP we need to enable it not
* with generic irq_enable() but via z_arc_v2_irq_unit_int_enable().
*/
z_arc_v2_irq_unit_int_enable(IRQ_OFFLOAD_LINE);
return 0;
}
SYS_INIT(arc_irq_offload_init, POST_KERNEL, 0);
``` | /content/code_sandbox/arch/arc/core/irq_offload.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 509 |
```linker script
/*
*
*/
/* when !XIP, .text is in RAM, and vector table must be at its very start */
KEEP(*(.exc_vector_table))
KEEP(*(".exc_vector_table.*"))
``` | /content/code_sandbox/arch/arc/core/vector_table.ld | linker script | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 41 |
```c
/*
*
*/
/**
* @file
* @brief New thread creation for ARCv2
*
* Core thread related primitives for the ARCv2 processor architecture.
*/
#include <zephyr/kernel.h>
#include <ksched.h>
#include <offsets_short.h>
#ifdef CONFIG_USERSPACE
#include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
#endif
#if defined(CONFIG_ARC_DSP) && defined(CONFIG_DSP_SHARING)
#include <zephyr/arch/arc/v2/dsp/arc_dsp.h>
static struct k_spinlock lock;
#endif
/* initial stack frame */
struct init_stack_frame {
uintptr_t pc;
#ifdef CONFIG_ARC_HAS_SECURE
uint32_t sec_stat;
#endif
uintptr_t status32;
uintptr_t r3;
uintptr_t r2;
uintptr_t r1;
uintptr_t r0;
};
#ifdef CONFIG_USERSPACE
struct user_init_stack_frame {
struct init_stack_frame iframe;
uint32_t user_sp;
};
static bool is_user(struct k_thread *thread)
{
return (thread->base.user_options & K_USER) != 0;
}
#endif
/* Set all stack-related architecture variables for the provided thread */
static void setup_stack_vars(struct k_thread *thread)
{
#ifdef CONFIG_USERSPACE
if (is_user(thread)) {
#ifdef CONFIG_GEN_PRIV_STACKS
thread->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(thread->stack_obj);
#else
thread->arch.priv_stack_start = (uint32_t)(thread->stack_obj);
#endif /* CONFIG_GEN_PRIV_STACKS */
thread->arch.priv_stack_start += Z_ARC_STACK_GUARD_SIZE;
} else {
thread->arch.priv_stack_start = 0;
}
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_USERSPACE
if (is_user(thread)) {
thread->arch.k_stack_top = thread->arch.priv_stack_start;
thread->arch.k_stack_base = (thread->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE);
thread->arch.u_stack_top = thread->stack_info.start;
thread->arch.u_stack_base = (thread->stack_info.start +
thread->stack_info.size);
} else
#endif /* CONFIG_USERSPACE */
{
thread->arch.k_stack_top = (uint32_t)thread->stack_info.start;
thread->arch.k_stack_base = (uint32_t)(thread->stack_info.start +
thread->stack_info.size);
#ifdef CONFIG_USERSPACE
thread->arch.u_stack_top = 0;
thread->arch.u_stack_base = 0;
#endif /* CONFIG_USERSPACE */
}
#endif /* CONFIG_ARC_STACK_CHECKING */
}
/* Get the initial stack frame pointer from the thread's stack buffer. */
static struct init_stack_frame *get_iframe(struct k_thread *thread,
char *stack_ptr)
{
#ifdef CONFIG_USERSPACE
if (is_user(thread)) {
/* Initial stack frame for a user thread is slightly larger;
* we land in z_user_thread_entry_wrapper on the privilege
* stack, and pop off an additional value for the user
* stack pointer.
*/
struct user_init_stack_frame *uframe;
uframe = Z_STACK_PTR_TO_FRAME(struct user_init_stack_frame,
thread->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE);
uframe->user_sp = (uint32_t)stack_ptr;
return &uframe->iframe;
}
#endif
return Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
}
/*
* Pre-populate values in the registers inside _callee_saved_stack struct
* so these registers have pre-defined values when new thread begins
* execution. For example, setting up the thread pointer for thread local
* storage here so the thread starts with thread pointer already set up.
*/
static inline void arch_setup_callee_saved_regs(struct k_thread *thread,
uintptr_t stack_ptr)
{
_callee_saved_stack_t *regs = UINT_TO_POINTER(stack_ptr);
ARG_UNUSED(regs);
/* GCC uses tls pointer cached in register, MWDT just call for _mwget_tls */
#if defined(CONFIG_THREAD_LOCAL_STORAGE) && !defined(__CCAC__)
#ifdef CONFIG_ISA_ARCV2
#if __ARC_TLS_REGNO__ <= 0
#error Compiler not configured for thread local storage
#endif
#define TLSREG _CONCAT(r, __ARC_TLS_REGNO__)
/* __ARC_TLS_REGNO__ is used for thread pointer for ARCv2 */
regs->TLSREG = thread->tls;
#else
/* R30 is used for thread pointer for ARCv3 */
regs->r30 = thread->tls;
#endif /* CONFIG_ISA_ARCV2 */
#endif
}
/*
* The initial context is a basic stack frame that contains arguments for
* z_thread_entry() return address, that points at z_thread_entry()
* and status register.
*/
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
struct init_stack_frame *iframe;
setup_stack_vars(thread);
/* Set up initial stack frame */
iframe = get_iframe(thread, stack_ptr);
#ifdef CONFIG_USERSPACE
/* enable US bit, US is read as zero in user mode. This will allow user
* mode sleep instructions, and it enables a form of denial-of-service
* attack by putting the processor in sleep mode, but since interrupt
* level/mask can't be set from user space that's not worse than
* executing a loop without yielding.
*/
iframe->status32 = _ARC_V2_STATUS32_US | _ARC_V2_STATUS32_DZ;
if (is_user(thread)) {
iframe->pc = (uint32_t)z_user_thread_entry_wrapper;
} else {
iframe->pc = (uint32_t)z_thread_entry_wrapper;
}
#else
iframe->status32 = _ARC_V2_STATUS32_DZ;
iframe->pc = ((uintptr_t)z_thread_entry_wrapper);
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
iframe->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
#endif
iframe->r0 = (uintptr_t)entry;
iframe->r1 = (uintptr_t)p1;
iframe->r2 = (uintptr_t)p2;
iframe->r3 = (uintptr_t)p3;
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
iframe->sec_stat |= _ARC_V2_SEC_STAT_SSC;
#else
iframe->status32 |= _ARC_V2_STATUS32_SC;
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#endif /* CONFIG_ARC_STACK_CHECKING */
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
iframe->status32 |= _ARC_V2_STATUS32_AD;
#endif
/* Set required thread members */
thread->switch_handle = thread;
thread->arch.relinquish_cause = _CAUSE_COOP;
thread->callee_saved.sp =
(uintptr_t)iframe - ___callee_saved_stack_t_SIZEOF;
arch_setup_callee_saved_regs(thread, thread->callee_saved.sp);
/* initial values in all other regs/k_thread entries are irrelevant */
}
#ifdef CONFIG_MULTITHREADING
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = _current;
return z_get_next_switch_handle(NULL);
}
#else
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
ARG_UNUSED(old_thread);
return NULL;
}
#endif
#ifdef CONFIG_USERSPACE
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
setup_stack_vars(_current);
/* possible optimizaiton: no need to load mem domain anymore */
/* need to lock cpu here ? */
configure_mpu_thread(_current);
z_arc_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)_current->stack_info.start,
(_current->stack_info.size -
_current->stack_info.delta), _current);
CODE_UNREACHABLE;
}
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
unsigned int key;
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Disable all floating point capabilities for the thread */
thread->base.user_options &= ~K_FP_REGS;
irq_unlock(key);
return 0;
}
int arch_float_enable(struct k_thread *thread, unsigned int options)
{
unsigned int key;
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Enable all floating point capabilities for the thread */
thread->base.user_options |= K_FP_REGS;
irq_unlock(key);
return 0;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#if !defined(CONFIG_MULTITHREADING)
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
extern void z_main_no_multithreading_entry_wrapper(void *p1, void *p2, void *p3,
void *main_stack, void *main_entry);
FUNC_NORETURN void z_arc_switch_to_main_no_multithreading(k_thread_entry_t main_entry,
void *p1, void *p2, void *p3)
{
_kernel.cpus[0].id = 0;
_kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
void *main_stack = (K_THREAD_STACK_BUFFER(z_main_stack) +
K_THREAD_STACK_SIZEOF(z_main_stack));
arch_irq_unlock(_ARC_V2_INIT_IRQ_LOCK_KEY);
z_main_no_multithreading_entry_wrapper(p1, p2, p3, main_stack, main_entry);
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#endif /* !CONFIG_MULTITHREADING */
#if defined(CONFIG_ARC_DSP) && defined(CONFIG_DSP_SHARING)
void arc_dsp_disable(struct k_thread *thread, unsigned int options)
{
/* Ensure a preemptive context switch does not occur */
k_spinlock_key_t key = k_spin_lock(&lock);
/* Disable DSP or AGU capabilities for the thread */
thread->base.user_options &= ~(uint8_t)options;
k_spin_unlock(&lock, key);
}
void arc_dsp_enable(struct k_thread *thread, unsigned int options)
{
/* Ensure a preemptive context switch does not occur */
k_spinlock_key_t key = k_spin_lock(&lock);
/* Enable dsp or agu capabilities for the thread */
thread->base.user_options |= (uint8_t)options;
k_spin_unlock(&lock, key);
}
#endif /* CONFIG_ARC_DSP && CONFIG_DSP_SHARING */
``` | /content/code_sandbox/arch/arc/core/thread.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,338 |
```unknown
/*
*
*/
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/syscall.h>
#include <swap_macros.h>
#include <v2/irq.h>
.macro clear_scratch_regs
mov_s r1, 0
mov_s r2, 0
mov_s r3, 0
mov_s r4, 0
mov_s r5, 0
mov_s r6, 0
mov_s r7, 0
mov_s r8, 0
mov_s r9, 0
mov_s r10, 0
mov_s r11, 0
mov_s r12, 0
.endm
.macro clear_callee_regs
mov_s r25, 0
mov_s r24, 0
mov_s r23, 0
mov_s r22, 0
mov_s r21, 0
mov_s r20, 0
mov_s r19, 0
mov_s r18, 0
mov_s r17, 0
mov_s r16, 0
mov_s r15, 0
mov_s r14, 0
mov_s r13, 0
.endm
GTEXT(z_arc_userspace_enter)
GTEXT(_arc_do_syscall)
GTEXT(z_user_thread_entry_wrapper)
GTEXT(arch_user_string_nlen)
GTEXT(z_arc_user_string_nlen_fault_start)
GTEXT(z_arc_user_string_nlen_fault_end)
GTEXT(z_arc_user_string_nlen_fixup)
/**
* @brief Wrapper for z_thread_entry in the case of user thread
*
* The init parameters are in privileged stack
*/
SECTION_FUNC(TEXT, z_user_thread_entry_wrapper)
seti _ARC_V2_INIT_IRQ_LOCK_KEY
pop_s r3
pop_s r2
pop_s r1
pop_s r0
/* the start of user sp is in r5 */
pop r5
/* start of privilege stack in blink */
mov_s blink, sp
st.aw r0, [r5, -4]
st.aw r1, [r5, -4]
st.aw r2, [r5, -4]
st.aw r3, [r5, -4]
/*
* when CONFIG_INIT_STACKS is enable, stack will be initialized
* in z_new_thread_init.
*/
j _arc_go_to_user_space
/**
*
* User space entry function
*
* This function is the entry point to user mode from privileged execution.
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*
*/
SECTION_FUNC(TEXT, z_arc_userspace_enter)
/*
* In ARCv2, the U bit can only be set through exception return
*/
/* disable stack checking as the stack should be initialized */
_disable_stack_checking blink
/* the end of user stack in r5 */
add r5, r4, r5
/* get start of privilege stack, r6 points to current thread */
ld blink, [r6, _thread_offset_to_priv_stack_start]
add blink, blink, CONFIG_PRIVILEGED_STACK_SIZE
mov_s sp, r5
push_s r0
push_s r1
push_s r2
push_s r3
mov r5, sp /* skip r0, r1, r2, r3 */
/* to avoid the leakage of kernel info, the thread stack needs to be
* re-initialized
*/
#ifdef CONFIG_INIT_STACKS
mov_s r0, 0xaaaaaaaa
#else
mov_s r0, 0x0
#endif
_clear_user_stack:
st.ab r0, [r4, 4]
cmp r4, r5
jlt _clear_user_stack
/* reload the stack checking regs as the original kernel stack
* becomes user stack
*/
#ifdef CONFIG_ARC_STACK_CHECKING
/* current thread in r6, SMP case is also considered */
mov r2, r6
_load_stack_check_regs
_enable_stack_checking r0
#endif
/* the following codes are used to switch from kernel mode
* to user mode by fake exception, because U bit can only be set
* by exception
*/
_arc_go_to_user_space:
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_U_BIT
mov_s r1, z_thread_entry_wrapper1
sr r0, [_ARC_V2_ERSTATUS]
sr r1, [_ARC_V2_ERET]
/* fake exception return */
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_AE_BIT
kflag r0
/* when exception returns from kernel to user, sp and _ARC_V2_USER_SP
* /_ARC_V2_SECU_SP will be switched
*/
#if defined(CONFIG_ARC_HAS_SECURE) && defined(CONFIG_ARC_SECURE_FIRMWARE)
lr r0, [_ARC_V2_SEC_STAT]
/* the mode returns from exception return is secure mode */
bset r0, r0, 31
sr r0, [_ARC_V2_ERSEC_STAT]
sr r5, [_ARC_V2_SEC_U_SP]
#else
sr r5, [_ARC_V2_USER_SP]
#endif
mov_s sp, blink
mov_s r0, 0
clear_callee_regs
clear_scratch_regs
mov fp, 0
mov r29, 0
mov r30, 0
mov blink, 0
rtie
/**
*
* Userspace system call function
*
* This function is used to do system calls from unprivileged code. This
* function is responsible for the following:
* 1) Dispatching the system call
* 2) Restoring stack and calling back to the caller of the system call
*
*/
SECTION_FUNC(TEXT, _arc_do_syscall)
/*
* r0-r5: arg1-arg6, r6 is call id which is already checked in
* trap_s handler, r7 is the system call stack frame pointer
* need to recover r0, r1, r2 because they will be modified in
* _create_irq_stack_frame. If a specific syscall frame (different
* with irq stack frame) is defined, the cover of r0, r1, r2 can be
* optimized.
*/
ld_s r0, [sp, ___isf_t_r0_OFFSET]
ld_s r1, [sp, ___isf_t_r1_OFFSET]
ld_s r2, [sp, ___isf_t_r2_OFFSET]
mov r7, sp
mov_s blink, _k_syscall_table
ld.as r6, [blink, r6]
jl [r6]
/* save return value */
st_s r0, [sp, ___isf_t_r0_OFFSET]
mov r29, 0
mov r30, 0
/* through fake exception return, go back to the caller */
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_AE_BIT
kflag r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
ld_s r0, [sp, ___isf_t_sec_stat_OFFSET]
sr r0,[_ARC_V2_ERSEC_STAT]
#endif
ld_s r0, [sp, ___isf_t_status32_OFFSET]
sr r0,[_ARC_V2_ERSTATUS]
ld_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
sr r0,[_ARC_V2_ERET]
_pop_irq_stack_frame
rtie
/*
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/
SECTION_FUNC(TEXT, arch_user_string_nlen)
/* int err; */
sub_s sp,sp,0x4
/* Initial error value (-1 failure), store at [sp,0] */
mov_s r3, -1
st_s r3, [sp, 0]
/* Loop setup.
* r12 (position locator) = s - 1
* r0 (length counter return value)) = 0
* lp_count = maxsize + 1
* */
sub r12, r0, 0x1
mov_s r0, 0
add_s r1, r1, 1
mov lp_count, r1
strlen_loop:
z_arc_user_string_nlen_fault_start:
/* is the byte at ++r12 a NULL? if so, we're done. Might fault! */
ldb.aw r1, [r12, 1]
z_arc_user_string_nlen_fault_end:
brne_s r1, 0, not_null
strlen_done:
/* Success, set err to 0 */
mov_s r1, 0
st_s r1, [sp, 0]
z_arc_user_string_nlen_fixup:
/* *err_arg = err; Pop stack and return */
ld_s r1, [sp, 0]
add_s sp, sp, 4
j_s.d [blink]
st_s r1, [r2, 0]
not_null:
/* check if we've hit the maximum, if so we're done. */
brne.d.nt lp_count, 0x1, inc_len
sub lp_count, lp_count, 0x1
b_s strlen_done
inc_len:
/* increment length measurement, loop again */
add_s r0, r0, 1
b_s strlen_loop
``` | /content/code_sandbox/arch/arc/core/userspace.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,095 |
```unknown
/*
*
*/
/**
* @file
* @brief Handling of transitions to-and-from fast IRQs (FIRQ)
*
* This module implements the code for handling entry to and exit from Fast IRQs.
*
* See isr_wrapper.S for details.
*/
#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <swap_macros.h>
GTEXT(_firq_enter)
GTEXT(_firq_exit)
/**
* @brief Work to be done before handing control to a FIRQ ISR
*
* The processor switches to a second register bank so registers from the
* current bank do not have to be preserved yet. The only issue is the LP_START/
* LP_COUNT/LP_END registers, which are not banked. These can be saved
* in available callee saved registers.
*
* If all FIRQ ISRs are programmed such that there are no use of the LP
* registers (ie. no LPcc instruction), and CONFIG_ARC_STACK_CHECKING is
* not set, then the kernel can be configured to not save and restore them.
*
* When entering a FIRQ, interrupts might as well be locked: the processor is
* running at its highest priority, and cannot be interrupted by any other
* interrupt. An exception, however, can be taken.
*
* Assumption by _isr_demux: r3 is untouched by _firq_enter.
*/
SECTION_FUNC(TEXT, _firq_enter)
/*
* ATTENTION:
* If CONFIG_RGF_NUM_BANKS>1, firq uses a 2nd register bank so GPRs do
* not need to be saved.
* If CONFIG_RGF_NUM_BANKS==1, firq must use the stack to save registers.
* This has already been done by _isr_wrapper.
*/
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r2, [_ARC_V2_SEC_STAT]
bclr r2, r2, _ARC_V2_SEC_STAT_SSC_BIT
sflag r2
#else
/* disable stack checking */
lr r2, [_ARC_V2_STATUS32]
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
kflag r2
#endif
#endif
#if CONFIG_RGF_NUM_BANKS != 1
/*
* Save LP_START/LP_COUNT/LP_END because called handler might use.
* Save these in callee saved registers to avoid using memory.
* These will be saved by the compiler if it needs to spill them.
*/
mov r23,lp_count
lr r24, [_ARC_V2_LP_START]
lr r25, [_ARC_V2_LP_END]
#endif
/* check whether irq stack is used */
_check_and_inc_int_nest_counter r0, r1
bne.d firq_nest
mov_s r0, sp
_get_curr_cpu_irq_stack sp
#if CONFIG_RGF_NUM_BANKS != 1
b firq_nest_1
firq_nest:
/*
* because firq and rirq share the same interrupt stack,
* switch back to original register bank to get correct sp.
* to get better firq latency, an approach is to prepare
* separate interrupt stack for firq and do not do thread
* switch in firq.
*/
lr r1, [_ARC_V2_STATUS32]
and r1, r1, ~_ARC_V2_STATUS32_RB(7)
kflag r1
/* here use _ARC_V2_USER_SP and ilink to exchange sp
* save original value of _ARC_V2_USER_SP and ilink into
* the stack of interrupted context first, then restore them later
*/
push ilink
PUSHAX ilink, _ARC_V2_USER_SP
/* sp here is the sp of interrupted context */
sr sp, [_ARC_V2_USER_SP]
/* here, bank 0 sp must go back to the value before push and
* PUSHAX as we will switch to bank1, the pop and POPAX later will
* change bank1's sp, not bank0's sp
*/
add sp, sp, 8
/* switch back to banked reg, only ilink can be used */
lr ilink, [_ARC_V2_STATUS32]
or ilink, ilink, _ARC_V2_STATUS32_RB(1)
kflag ilink
lr sp, [_ARC_V2_USER_SP]
POPAX ilink, _ARC_V2_USER_SP
pop ilink
firq_nest_1:
#else
firq_nest:
#endif
push_s r0
j _isr_demux
/**
* @brief Work to be done exiting a FIRQ
*/
SECTION_FUNC(TEXT, _firq_exit)
#if CONFIG_RGF_NUM_BANKS != 1
/* restore lp_count, lp_start, lp_end from r23-r25 */
mov lp_count,r23
sr r24, [_ARC_V2_LP_START]
sr r25, [_ARC_V2_LP_END]
#endif
_dec_int_nest_counter r0, r1
_check_nest_int_by_irq_act r0, r1
jne _firq_no_switch
/* sp is struct k_thread **old of z_arc_switch_in_isr which is a wrapper of
* z_get_next_switch_handle. r0 contains the 1st thread in ready queue. If it isn't NULL,
* then do switch to this thread.
*/
_get_next_switch_handle
CMPR r0, 0
bne _firq_switch
/* fall to no switch */
.align 4
_firq_no_switch:
/* restore interrupted context' sp */
pop sp
/*
* Keeping this code block close to those that use it allows using brxx
* instruction instead of a pair of cmp and bxx
*/
#if CONFIG_RGF_NUM_BANKS == 1
_pop_irq_stack_frame
#endif
rtie
.align 4
_firq_switch:
/* restore interrupted context' sp */
pop sp
#if CONFIG_RGF_NUM_BANKS != 1
/*
* save r0, r2 in irq stack for a while, as they will be changed by register
* bank switch
*/
_get_curr_cpu_irq_stack r1
st r0, [r1, -4]
st r2, [r1, -8]
/*
* We know there is no interrupted interrupt of lower priority at this
* point, so when switching back to register bank 0, it will contain the
* registers from the interrupted thread.
*/
#if defined(CONFIG_USERSPACE)
/* when USERSPACE is configured, here need to consider the case where firq comes
* out in user mode, according to ARCv2 ISA and nsim, the following micro ops
* will be executed:
* sp<-reg bank1'sp
* switch between sp and _ARC_V2_USER_SP
* then:
* sp is the sp of kernel stack of interrupted thread
* _ARC_V2_USER_SP is reg bank1'sp
* the sp of user stack of interrupted thread is reg bank0'sp
* if firq comes out in kernel mode, the following micro ops will be executed:
* sp<-reg bank'sp
* so, sw needs to do necessary handling to set up the correct sp
*/
lr r0, [_ARC_V2_AUX_IRQ_ACT]
bbit0 r0, 31, _firq_from_kernel
aex sp, [_ARC_V2_USER_SP]
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
kflag r0
aex sp, [_ARC_V2_USER_SP]
b _firq_create_irq_stack_frame
_firq_from_kernel:
#endif
/* chose register bank #0 */
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
kflag r0
_firq_create_irq_stack_frame:
/* we're back on the outgoing thread's stack */
_create_irq_stack_frame
/*
* In a FIRQ, STATUS32 of the outgoing thread is in STATUS32_P0 and the
* PC in ILINK: save them in status32/pc respectively.
*/
lr r0, [_ARC_V2_STATUS32_P0]
st_s r0, [sp, ___isf_t_status32_OFFSET]
st ilink, [sp, ___isf_t_pc_OFFSET] /* ilink into pc */
/*
* load r0, r2 from irq stack
*/
_get_curr_cpu_irq_stack r1
ld r0, [r1, -4]
ld r2, [r1, -8]
#endif
/* r2 is old thread */
st _CAUSE_FIRQ, [r2, _thread_offset_to_relinquish_cause]
_irq_store_old_thread_callee_regs
/* mov new thread (r0) to r2 */
mov r2, r0
_load_new_thread_callee_regs
breq r3, _CAUSE_RIRQ, _firq_switch_from_rirq
nop_s
breq r3, _CAUSE_FIRQ, _firq_switch_from_firq
nop_s
/* fall through */
.align 4
_firq_switch_from_coop:
_set_misc_regs_irq_switch_from_coop
/* pc into ilink */
pop_s r0
mov ilink, r0
pop_s r0 /* status32 into r0 */
sr r0, [_ARC_V2_STATUS32_P0]
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push_s blink
bl z_thread_mark_switched_in
pop_s blink
#endif
rtie
.align 4
_firq_switch_from_rirq:
_firq_switch_from_firq:
_set_misc_regs_irq_switch_from_irq
_pop_irq_stack_frame
ld ilink, [sp, -4] /* status32 into ilink */
sr ilink, [_ARC_V2_STATUS32_P0]
ld ilink, [sp, -8] /* pc into ilink */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push_s blink
bl z_thread_mark_switched_in
pop_s blink
#endif
/* LP registers are already restored, just switch back to bank 0 */
rtie
``` | /content/code_sandbox/arch/arc/core/fast_irq.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,250 |
```c
/*
*
*/
/**
* @file
* @brief Full C support initialization
*
*
* Initialization of full C support: zero the .bss, copy the .data if XIP,
* call z_cstart().
*
* Stack is available in this module, but not the global data/bss until their
* initialization is performed.
*/
#include <zephyr/types.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/arch/arc/cluster.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
/* XXX - keep for future use in full-featured cache APIs */
#if 0
/**
* @brief Disable the i-cache if present
*
* For those ARC CPUs that have a i-cache present,
* invalidate the i-cache and then disable it.
*/
static void disable_icache(void)
{
unsigned int val;
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
val &= 0xff; /* version field */
if (val == 0) {
return; /* skip if i-cache is not present */
}
z_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
__builtin_arc_nop();
z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
}
/**
* @brief Invalidate the data cache if present
*
* For those ARC CPUs that have a data cache present,
* invalidate the data cache.
*/
static void invalidate_dcache(void)
{
unsigned int val;
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
val &= 0xff; /* version field */
if (val == 0) {
return; /* skip if d-cache is not present */
}
z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
}
#endif
#ifdef CONFIG_ISA_ARCV3
/* NOTE: it will be called from early C code - we must NOT use global / static variables in it! */
static void arc_cluster_scm_enable(void)
{
unsigned int cluster_version;
/* Check that we have cluster and its version is supported */
cluster_version = z_arc_v2_aux_reg_read(_ARC_REG_CLN_BCR) & _ARC_CLN_BCR_VER_MAJOR_MASK;
if (cluster_version < _ARC_REG_CLN_BCR_VER_MAJOR_ARCV3_MIN) {
return;
}
/* Check that we have shared cache in cluster */
if (!(z_arc_v2_aux_reg_read(_ARC_CLNR_BCR_0) & _ARC_CLNR_BCR_0_HAS_SCM)) {
return;
}
/* Disable SCM, just in case. */
arc_cln_write_reg_nolock(ARC_CLN_CACHE_STATUS, 0);
/* Invalidate SCM before enabling. */
arc_cln_write_reg_nolock(ARC_CLN_CACHE_CMD,
ARC_CLN_CACHE_CMD_OP_REG_INV | ARC_CLN_CACHE_CMD_INCR);
while (arc_cln_read_reg_nolock(ARC_CLN_CACHE_STATUS) & ARC_CLN_CACHE_STATUS_BUSY)
;
arc_cln_write_reg_nolock(ARC_CLN_CACHE_STATUS, ARC_CLN_CACHE_STATUS_EN);
}
#endif /* CONFIG_ISA_ARCV3 */
#ifdef __CCAC__
extern char __device_states_start[];
extern char __device_states_end[];
/**
* @brief Clear device_states section
*
* This routine clears the device_states section,
* as MW compiler marks the section with NOLOAD flag.
*/
static void dev_state_zero(void)
{
z_early_memset(__device_states_start, 0, __device_states_end - __device_states_start);
}
#endif
extern FUNC_NORETURN void z_cstart(void);
/**
* @brief Prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*/
void z_prep_c(void)
{
#ifdef CONFIG_ISA_ARCV3
arc_cluster_scm_enable();
#endif
z_bss_zero();
#ifdef __CCAC__
dev_state_zero();
#endif
z_data_copy();
z_cstart();
CODE_UNREACHABLE;
}
``` | /content/code_sandbox/arch/arc/core/prep_c.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 887 |
```c
/*
*
*/
/**
* @file
* @brief ARCv2 ARC CONNECT driver
*
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/spinlock.h>
#include <kernel_internal.h>
static struct k_spinlock arc_connect_spinlock;
/* Generate an inter-core interrupt to the target core */
void z_arc_connect_ici_generate(uint32_t core)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_IRQ, core);
}
}
/* Acknowledge the inter-core interrupt raised by core */
void z_arc_connect_ici_ack(uint32_t core)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_ACK, core);
}
}
/* Read inter-core interrupt status */
uint32_t z_arc_connect_ici_read_status(uint32_t core)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_READ_STATUS, core);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Check the source of inter-core interrupt */
uint32_t z_arc_connect_ici_check_src(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Clear the inter-core interrupt */
void z_arc_connect_ici_clear(void)
{
uint32_t cpu, c;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE, 0);
cpu = z_arc_connect_cmd_readback(); /* 1,2,4,8... */
/*
* In rare case, multiple concurrent ICIs sent to same target can
* possibly be coalesced by MCIP into 1 asserted IRQ, so @cpu can be
* "vectored" (multiple bits sets) as opposed to typical single bit
*/
while (cpu) {
c = find_lsb_set(cpu) - 1;
z_arc_connect_cmd(
ARC_CONNECT_CMD_INTRPT_GENERATE_ACK, c);
cpu &= ~(1U << c);
}
}
}
/* Reset the cores in core_mask */
void z_arc_connect_debug_reset(uint32_t core_mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RESET,
0, core_mask);
}
}
/* Halt the cores in core_mask */
void z_arc_connect_debug_halt(uint32_t core_mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_HALT,
0, core_mask);
}
}
/* Run the cores in core_mask */
void z_arc_connect_debug_run(uint32_t core_mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RUN,
0, core_mask);
}
}
/* Set core mask */
void z_arc_connect_debug_mask_set(uint32_t core_mask, uint32_t mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_MASK,
mask, core_mask);
}
}
/* Read core mask */
uint32_t z_arc_connect_debug_mask_read(uint32_t core_mask)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_READ_MASK,
0, core_mask);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Select cores that should be halted if the core issuing the command is halted
*/
void z_arc_connect_debug_select_set(uint32_t core_mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_SELECT,
0, core_mask);
}
}
/* Read the select value */
uint32_t z_arc_connect_debug_select_read(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_SELECT, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the status, halt or run of all cores in the system */
uint32_t z_arc_connect_debug_en_read(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_EN, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the last command sent */
uint32_t z_arc_connect_debug_cmd_read(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CMD, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the value of internal MCD_CORE register */
uint32_t z_arc_connect_debug_core_read(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CORE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Clear global free running counter */
void z_arc_connect_gfrc_clear(void)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_CLEAR, 0);
}
}
/* Read total 64 bits of global free running counter */
uint64_t z_arc_connect_gfrc_read(void)
{
uint32_t low;
uint32_t high;
uint32_t key;
/*
* each core has its own arc connect interface, i.e.,
* CMD/READBACK. So several concurrent commands to ARC
* connect are of if they are trying to access different
* sub-components. For GFRC, HW allows simultaneously accessing to
* counters. So an irq lock is enough.
*/
key = arch_irq_lock();
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_LO, 0);
low = z_arc_connect_cmd_readback();
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HI, 0);
high = z_arc_connect_cmd_readback();
arch_irq_unlock(key);
return (((uint64_t)high) << 32) | low;
}
/* Enable global free running counter */
void z_arc_connect_gfrc_enable(void)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_ENABLE, 0);
}
}
/* Disable global free running counter */
void z_arc_connect_gfrc_disable(void)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_DISABLE, 0);
}
}
/* Disable global free running counter */
void z_arc_connect_gfrc_core_set(uint32_t core_mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_GFRC_SET_CORE,
0, core_mask);
}
}
/* Set the relevant cores to halt global free running counter */
uint32_t z_arc_connect_gfrc_halt_read(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HALT, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the internal CORE register */
uint32_t z_arc_connect_gfrc_core_read(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_CORE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Enable interrupt distribute unit */
void z_arc_connect_idu_enable(void)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_ENABLE, 0);
}
}
/* Disable interrupt distribute unit */
void z_arc_connect_idu_disable(void)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_DISABLE, 0);
}
}
/* Read enable status of interrupt distribute unit */
uint32_t z_arc_connect_idu_read_enable(void)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_ENABLE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Set the triggering mode and distribution mode for the specified common
* interrupt
*/
void z_arc_connect_idu_set_mode(uint32_t irq_num,
uint16_t trigger_mode, uint16_t distri_mode)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MODE,
irq_num, (distri_mode | (trigger_mode << 4)));
}
}
/* Read the internal MODE register of the specified common interrupt */
uint32_t z_arc_connect_idu_read_mode(uint32_t irq_num)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MODE, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Set the target cores to receive the specified common interrupt
* when it is triggered
*/
void z_arc_connect_idu_set_dest(uint32_t irq_num, uint32_t core_mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_DEST,
irq_num, core_mask);
}
}
/* Read the internal DEST register of the specified common interrupt */
uint32_t z_arc_connect_idu_read_dest(uint32_t irq_num)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_DEST, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Assert the specified common interrupt */
void z_arc_connect_idu_gen_cirq(uint32_t irq_num)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_GEN_CIRQ, irq_num);
}
}
/* Acknowledge the specified common interrupt */
void z_arc_connect_idu_ack_cirq(uint32_t irq_num)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_ACK_CIRQ, irq_num);
}
}
/* Read the internal STATUS register of the specified common interrupt */
uint32_t z_arc_connect_idu_check_status(uint32_t irq_num)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_STATUS, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the internal SOURCE register of the specified common interrupt */
uint32_t z_arc_connect_idu_check_source(uint32_t irq_num)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_SOURCE, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Mask or unmask the specified common interrupt */
void z_arc_connect_idu_set_mask(uint32_t irq_num, uint32_t mask)
{
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MASK,
irq_num, mask);
}
}
/* Read the internal MASK register of the specified common interrupt */
uint32_t z_arc_connect_idu_read_mask(uint32_t irq_num)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MASK, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Check if it is the first-acknowledging core to the common interrupt
* if IDU is programmed in the first-acknowledged mode
*/
uint32_t z_arc_connect_idu_check_first(uint32_t irq_num)
{
uint32_t ret = 0;
K_SPINLOCK(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_FIRST, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
``` | /content/code_sandbox/arch/arc/core/arc_connect.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,737 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <kernel_tls.h>
#include <zephyr/sys/util.h>
#ifdef __CCAC__
extern char _arcmwdt_tls_start[];
extern char _arcmwdt_tls_size[];
size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
{
size_t tls_size = (size_t)_arcmwdt_tls_size;
size_t tls_size_aligned = ROUND_UP(tls_size, ARCH_STACK_PTR_ALIGN);
stack_ptr -= tls_size_aligned;
memcpy(stack_ptr, _arcmwdt_tls_start, tls_size);
new_thread->tls = POINTER_TO_UINT(stack_ptr);
return tls_size_aligned;
}
void *_Preserve_flags _mwget_tls(void)
{
return (void *)(_current->tls);
}
#else
size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
{
/*
* TLS area for ARC has some data fields following by
* thread data and bss. These fields are supposed to be
* used by toolchain and OS TLS code to aid in locating
* the TLS data/bss. Zephyr currently has no use for
* this so we can simply skip these. However, since GCC
* is generating code assuming these fields are there,
* we simply skip them when setting the TLS pointer.
*/
/*
* Since we are populating things backwards,
* setup the TLS data/bss area first.
*/
stack_ptr -= z_tls_data_size();
z_tls_copy(stack_ptr);
/* Skip two pointers due to toolchain */
stack_ptr -= sizeof(uintptr_t) * 2;
/*
* Set thread TLS pointer which is used in
* context switch to point to TLS area.
*/
new_thread->tls = POINTER_TO_UINT(stack_ptr);
return (z_tls_data_size() + (sizeof(uintptr_t) * 2));
}
#endif
``` | /content/code_sandbox/arch/arc/core/tls.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 422 |
```unknown
/*
*
*/
/**
* @file
* @brief Wrapper around ISRs with logic for context switching
*
*
* Wrapper installed in vector table for handling dynamic interrupts that accept
* a parameter.
*/
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/cpu.h>
#include <swap_macros.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
GTEXT(_isr_wrapper)
GTEXT(_isr_demux)
#if defined(CONFIG_PM)
GTEXT(pm_system_resume)
#endif
/*
The symbols in this file are not real functions, and neither are
_rirq_enter/_firq_enter: they are jump points.
The flow is the following:
ISR -> _isr_wrapper -- + -> _rirq_enter -> _isr_demux -> ISR -> _rirq_exit
|
+ -> _firq_enter -> _isr_demux -> ISR -> _firq_exit
Context switch explanation:
The context switch code is spread in these files:
isr_wrapper.s, switch.s, swap_macros.h, fast_irq.s, regular_irq.s
IRQ stack frame layout:
high address
status32
pc
lp_count
lp_start
lp_end
blink
r13
...
sp -> r0
low address
The context switch code adopts this standard so that it is easier to follow:
- r2 contains _kernel.current ASAP, and the incoming thread when we
transition from outgoing thread to incoming thread
Not loading _kernel into r0 allows loading _kernel without stomping on
the parameter in r0 in arch_switch().
ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The
official documentation calls the regular interrupts 'IRQs', but the internals
of the kernel call them 'RIRQs' to differentiate from the 'irq' subsystem,
which is the interrupt API/layer of abstraction.
For FIRQ, there are two cases, depending upon the value of
CONFIG_RGF_NUM_BANKS.
CONFIG_RGF_NUM_BANKS==1 case:
Scratch registers are pushed onto the current stack just as they are with
RIRQ. See the above frame layout. Unlike RIRQ, the status32_p0 and ilink
registers are where status32 and the program counter are located, so these
need to be pushed.
CONFIG_RGF_NUM_BANKS!=1 case:
The FIRQ handler has its own register bank for general purpose registers,
and thus it doesn't have to save them on a stack. The 'loop' registers
(lp_count, lp_end, lp_start), however, are not present in the
second bank. The handler saves these special registers in unused callee saved
registers (to avoid stack accesses). It is possible to register a FIRQ
handler that operates outside of the kernel, but care must be taken to only
use instructions that only use the banked registers.
The kernel is able to handle transitions to and from FIRQ, RIRQ and threads.
The contexts are saved 'lazily': the minimum amount of work is
done upfront, and the rest is done when needed:
o RIRQ
All needed registers to run C code in the ISR are saved automatically
on the outgoing thread's stack: loop, status32, pc, and the caller-
saved GPRs. That stack frame layout is pre-determined. If returning
to a thread, the stack is popped and no registers have to be saved by
the kernel. If a context switch is required, the callee-saved GPRs
are then saved in the thread's stack.
o FIRQ
First, a FIRQ can be interrupting a lower-priority RIRQ: if this is
the case, the FIRQ does not take a scheduling decision and leaves it
the RIRQ to handle. This limits the amount of code that has to run at
interrupt-level.
CONFIG_RGF_NUM_BANKS==1 case:
Registers are saved on the stack frame just as they are for RIRQ.
Context switch can happen just as it does in the RIRQ case, however,
if the FIRQ interrupted a RIRQ, the FIRQ will return from interrupt
and let the RIRQ do the context switch. At entry, one register is
needed in order to have code to save other registers. r0 is saved
first in the stack and restored later
CONFIG_RGF_NUM_BANKS!=1 case:
During early initialization, the sp in the 2nd register bank is made to
refer to _firq_stack. This allows for the FIRQ handler to use its own
stack. GPRs are banked, loop registers are saved in unused callee saved
regs upon interrupt entry. If returning to a thread, loop registers are
restored and the CPU switches back to bank 0 for the GPRs. If a context
switch is needed, at this point only are all the registers saved.
First, a stack frame with the same layout as the automatic RIRQ one is
created and then the callee-saved GPRs are saved in the stack.
status32_p0 and ilink are saved in this case, not status32 and pc.
To create the stack frame, the FIRQ handling code must first go back to
using bank0 of registers, since that is where the registers containing
the exiting thread are saved. Care must be taken not to touch any
register before saving them: the only one usable at that point is the
stack pointer.
o coop
When a coop context switch is done, the callee-saved registers are
saved in the stack. The other GPRs do not need to be saved, since the
compiler has already placed them on the stack.
For restoring the contexts, there are six cases. In all cases, the
callee-saved registers of the incoming thread have to be restored. Then, there
are specifics for each case:
From coop:
o to coop
Do a normal function call return.
o to any irq
The incoming interrupted thread has an IRQ stack frame containing the
caller-saved registers that has to be popped. status32 has to be
restored, then we jump to the interrupted instruction.
From FIRQ:
When CONFIG_RGF_NUM_BANKS==1, context switch is done as it is for RIRQ.
When CONFIG_RGF_NUM_BANKS!=1, the processor is put back to using bank0,
not bank1 anymore, because it had to save the outgoing context from
bank0, and now has to load the incoming one into bank0.
o to coop
The address of the returning instruction from arch_switch() is loaded
in ilink and the saved status32 in status32_p0.
o to any irq
The IRQ has saved the caller-saved registers in a stack frame, which
must be popped, and status32 and pc loaded in status32_p0 and ilink.
From RIRQ:
o to coop
The interrupt return mechanism in the processor expects a stack frame,
but the outgoing context did not create one. A fake one is created
here, with only the relevant values filled in: pc, status32.
There is a discrepancy between the ABI from the ARCv2 docs,
including the way the processor pushes GPRs in pairs in the IRQ stack
frame, and the ABI GCC uses. r13 should be a callee-saved register,
but GCC treats it as caller-saved. This means that the processor pushes
it in the stack frame along with r12, but the compiler does not save it
before entering a function. So, it is saved as part of the callee-saved
registers, and restored there, but the processor restores it _a second
time_ when popping the IRQ stack frame. Thus, the correct value must
also be put in the fake stack frame when returning to a thread that
context switched out cooperatively.
o to any irq
Both types of IRQs already have an IRQ stack frame: simply return from
interrupt.
*/
SECTION_FUNC(TEXT, _isr_wrapper)
#ifdef CONFIG_ARC_FIRQ
#if CONFIG_RGF_NUM_BANKS == 1
/* free r0 here, use r0 to check whether irq is firq.
* for rirq, as sp will not change and r0 already saved, this action
* in fact is useless
* for firq, r0 will be restored later
*/
push r0
#endif
lr r0, [_ARC_V2_AUX_IRQ_ACT]
ffs r0, r0
cmp r0, 0
#if CONFIG_RGF_NUM_BANKS == 1
bnz rirq_path
pop r0
/* 1-register bank FIRQ handling must save registers on stack */
_create_irq_stack_frame
lr r0, [_ARC_V2_STATUS32_P0]
st_s r0, [sp, ___isf_t_status32_OFFSET]
st ilink, [sp, ___isf_t_pc_OFFSET]
mov_s r3, _firq_exit
mov_s r2, _firq_enter
j_s [r2]
rirq_path:
add sp, sp, 4
mov_s r3, _rirq_exit
mov_s r2, _rirq_enter
j_s [r2]
#else
mov.z r3, _firq_exit
mov.z r2, _firq_enter
mov.nz r3, _rirq_exit
mov.nz r2, _rirq_enter
j_s [r2]
#endif
#else
MOVR r3, _rirq_exit
MOVR r2, _rirq_enter
j_s [r2]
#endif
/* r0, r1, and r3 will be used in exit_tickless_idle macro */
.macro exit_tickless_idle
#if defined(CONFIG_PM)
clri r0 /* do not interrupt exiting tickless idle operations */
MOVR r1, _kernel
breq r3, 0, _skip_pm_save_idle_exit
st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */
PUSHR blink
jl pm_system_resume
POPR blink
_skip_pm_save_idle_exit:
seti r0
#endif
.endm
/* when getting here, r3 contains the interrupt exit stub to call */
SECTION_FUNC(TEXT, _isr_demux)
PUSHR r3
/* according to ARCv2 ISA, r25, r30, r58, r59 are caller-saved
* scratch registers, possibly used by interrupt handlers
*/
PUSHR r25
PUSHR r30
#ifdef CONFIG_ARC_HAS_ACCL_REGS
PUSHR r58
#ifndef CONFIG_64BIT
PUSHR r59
#endif /* !CONFIG_64BIT */
#endif
#ifdef CONFIG_SCHED_THREAD_USAGE
bl z_sched_usage_stop
#endif
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_enter
#endif
/* cannot be done before this point because we must be able to run C */
exit_tickless_idle
lr r0, [_ARC_V2_ICAUSE]
/* handle software triggered interrupt */
lr r3, [_ARC_V2_AUX_IRQ_HINT]
brne r3, r0, irq_hint_handled
sr 0, [_ARC_V2_AUX_IRQ_HINT]
irq_hint_handled:
sub r0, r0, 16
MOVR r1, _sw_isr_table
/* SW ISR table entries are 8-bytes wide for 32bit ISA and
* 16-bytes wide for 64bit ISA */
ASLR r0, r0, (ARC_REGSHIFT + 1)
ADDR r0, r1, r0
/* ISR into r1 */
LDR r1, r0, ARC_REGSZ
jl_s.d [r1]
/* delay slot: ISR parameter into r0 */
LDR r0, r0
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_exit
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
#ifndef CONFIG_64BIT
POPR r59
#endif /* !CONFIG_64BIT */
POPR r58
#endif
POPR r30
POPR r25
/* back from ISR, jump to exit stub */
POPR r3
j_s [r3]
nop_s
``` | /content/code_sandbox/arch/arc/core/isr_wrapper.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,646 |
```unknown
/*
*
*/
/**
* @file
* @brief CPU power management
*
* CPU power management routines.
*/
#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
GTEXT(arch_cpu_idle)
GTEXT(arch_cpu_atomic_idle)
GDATA(z_arc_cpu_sleep_mode)
SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
.align 4
.word 0
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
/*
* @brief Put the CPU in low-power mode
*
* This function always exits with interrupts unlocked.
*
* void nanCpuIdle(void)
*/
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
PUSHR blink
jl sys_trace_idle
POPR blink
#endif
/* z_arc_cpu_sleep_mode is 32 bit despite of platform bittnes */
ld r1, [z_arc_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s [blink]
nop
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
/*
* @brief Put the CPU in low-power mode, entered with IRQs locked
*
* This function exits with interrupts restored to <key>.
*
* void arch_cpu_atomic_idle(unsigned int key)
*/
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
PUSHR blink
jl sys_trace_idle
POPR blink
#endif
/* z_arc_cpu_sleep_mode is 32 bit despite of platform bittnes */
ld r1, [z_arc_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s.d [blink]
seti r0
#endif
``` | /content/code_sandbox/arch/arc/core/cpu_idle.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 415 |
```unknown
/*
*
*/
/**
* @file
* @brief Handling of transitions to-and-from regular IRQs (RIRQ)
*
* This module implements the code for handling entry to and exit from regular
* IRQs.
*
* See isr_wrapper.S for details.
*/
#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <swap_macros.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
GTEXT(_rirq_newthread_switch)
/*
===========================================================
RETURN FROM INTERRUPT TO COOPERATIVE THREAD
===========================================================
That's a special case because:
1. We return from IRQ handler to a cooperative thread
2. During IRQ handling context switch did happen
3. Returning to a thread which previously gave control
to another thread because of:
- Calling k_sleep()
- Explicitly yielding
- Bumping into locked sync primitive etc
What (3) means is before passing control to another thread our thread
in question:
a. Stashed all precious caller-saved registers on its stack
b. Pushed return address to the top of the stack as well
That's how thread's stack looks like right before jumping to another thread:
----------------------------->8---------------------------------
PRE-CONTEXT-SWITCH STACK
lower_addr, let's say: 0x1000
--------------------------------------
SP -> | Return address; PC (Program Counter), in fact value taken from
| BLINK register in arch_switch()
--------------------------------------
| STATUS32 value, we explicitly save it here for later usage, read-on
--------------------------------------
| Caller-saved registers: some of R0-R12
--------------------------------------
|...
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
When context gets switched the kernel saves callee-saved registers in the
thread's stack right on top of pre-switch contents so that's what we have:
----------------------------->8---------------------------------
POST-CONTEXT-SWITCH STACK
lower_addr, let's say: 0x1000
--------------------------------------
SP -> | Callee-saved registers: see struct _callee_saved_stack{}
| |- R13
| |- R14
| | ...
| \- FP
| ...
--------------------------------------
| Return address; PC (Program Counter)
--------------------------------------
| STATUS32 value
--------------------------------------
| Caller-saved registers: some of R0-R12
--------------------------------------
|...
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
So how do we return in such a complex scenario.
First we restore callee-saved regs with help of _load_callee_saved_regs().
Now we're back to PRE-CONTEXT-SWITCH STACK (see above).
Logically our next step is to load return address from the top of the stack
and jump to that address to continue execution of the desired thread, but
we're still in interrupt handling mode and the only way to return to normal
execution mode is to execute "rtie" instruction. And here we need to deal
with peculiarities of return from IRQ on ARCv2 cores.
Instead of simple jump to a return address stored in the tip of thread's stack
(with subsequent interrupt enable) ARCv2 core additionally automatically
restores some registers from stack. Most important ones are
PC ("Program Counter") which holds address of the next instruction to execute
and STATUS32 which holds imortant flags including global interrupt enable,
zero, carry etc.
To make things worse depending on ARC core configuration and run-time setup
of certain features different set of registers will be restored.
Typically those same registers are automatically saved on stack on entry to
an interrupt, but remember we're returning to the thread which was
not interrupted by interrupt and so on its stack there're no automatically
saved registers, still inevitably on RTIE execution register restoration
will happen. So if we do nothing special we'll end-up with that:
----------------------------->8---------------------------------
lower_addr, let's say: 0x1000
--------------------------------------
# | Return address; PC (Program Counter)
| --------------------------------------
| | STATUS32 value
| --------------------------------------
|
sizeof(_irq_stack_frame)
|
| | Caller-saved registers: R0-R12
V --------------------------------------
|...
SP -> | < Some data on thread's stack>
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
I.e. we'll go much deeper down the stack over needed return address, read
some value from unexpected location in stack and will try to jump there.
Nobody knows were we end-up then.
To work-around that problem we need to mimic existance of IRQ stack frame
of which we really only need return address obviously to return where we
need to. For that we just shift SP so that it points sizeof(_irq_stack_frame)
above like that:
----------------------------->8---------------------------------
lower_addr, let's say: 0x1000
SP -> |
A | < Some unrelated data >
| |
|
sizeof(_irq_stack_frame)
|
| --------------------------------------
| | Return address; PC (Program Counter)
| --------------------------------------
# | STATUS32 value
--------------------------------------
| Caller-saved registers: R0-R12
--------------------------------------
|...
| < Some data on thread's stack>
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
Indeed R0-R13 "restored" from IRQ stack frame will contain garbage but
it makes no difference because we're returning to execution of code as if
we're returning from yet another function call and so we will restore
all needed registers from the stack.
One other important remark here is R13.
CPU hardware automatically save/restore registers in pairs and since we
wanted to save/restore R12 in IRQ stack frame as a caller-saved register we
just happen to do that for R13 as well. But given compiler treats it as
a callee-saved register we save/restore it separately in _callee_saved_stack
structure. And when we restore callee-saved registers from stack we among
other registers recover R13. But later on return from IRQ with RTIE
instruction, R13 will be "restored" again from fake IRQ stack frame and
if we don't copy correct R13 value to fake IRQ stack frame R13 value
will be corrupted.
*/
/**
* @brief Work to be done before handing control to an IRQ ISR
*
* The processor pushes automatically all registers that need to be saved.
* However, since the processor always runs at kernel privilege there is no
* automatic switch to the IRQ stack: this must be done in software.
*
* Assumption by _isr_demux: r3 is untouched by _rirq_enter.
*/
SECTION_FUNC(TEXT, _rirq_enter)
/* the ISR will be handled in separate interrupt stack,
* so stack checking must be diabled, or exception will
* be caused
*/
_disable_stack_checking r2
clri
/* check whether irq stack is used, if
* not switch to isr stack
*/
_check_and_inc_int_nest_counter r0, r1
bne.d rirq_nest
MOVR r0, sp
_get_curr_cpu_irq_stack sp
rirq_nest:
PUSHR r0
seti
j _isr_demux
/**
* @brief Work to be done exiting an IRQ
*/
SECTION_FUNC(TEXT, _rirq_exit)
clri
POPR sp
_dec_int_nest_counter r0, r1
_check_nest_int_by_irq_act r0, r1
jne _rirq_no_switch
/* sp is struct k_thread **old of z_arc_switch_in_isr which is a wrapper of
* z_get_next_switch_handle. r0 contains the 1st thread in ready queue. If it isn't NULL,
* then do switch to this thread.
*/
_get_next_switch_handle
CMPR r0, 0
beq _rirq_no_switch
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to remember SEC_STAT.IRM bit */
lr r3, [_ARC_V2_SEC_STAT]
push_s r3
#endif
/* r2 is old thread
* _thread_arch.relinquish_cause is 32 bit despite of platform bittnes
*/
_st32_huge_offset _CAUSE_RIRQ, r2, _thread_offset_to_relinquish_cause, r1
_irq_store_old_thread_callee_regs
/* mov new thread (r0) to r2 */
MOVR r2, r0
/* _rirq_newthread_switch required by exception handling */
.align 4
_rirq_newthread_switch:
_load_new_thread_callee_regs
breq r3, _CAUSE_RIRQ, _rirq_switch_from_rirq
nop_s
breq r3, _CAUSE_FIRQ, _rirq_switch_from_firq
nop_s
/* fall through */
.align 4
_rirq_switch_from_coop:
/* for a cooperative switch, it's not in irq, so
* need to set some regs for irq return
*/
_set_misc_regs_irq_switch_from_coop
/*
* See verbose explanation of
* RETURN FROM INTERRUPT TO COOPERATIVE THREAD above
*/
/* carve fake stack */
SUBR sp, sp, ___isf_t_pc_OFFSET
#ifdef CONFIG_ARC_HAS_ZOL
/* reset zero-overhead loops */
STR 0, sp, ___isf_t_lp_end_OFFSET
#endif /* CONFIG_ARC_HAS_ZOL */
/*
* r13 is part of both the callee and caller-saved register sets because
* the processor is only able to save registers in pair in the regular
* IRQ prologue. r13 thus has to be set to its correct value in the IRQ
* stack frame.
*/
STR r13, sp, ___isf_t_r13_OFFSET
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
PUSHR blink
bl z_thread_mark_switched_in
POPR blink
#endif
/* stack now has the IRQ stack frame layout, pointing to sp */
/* rtie will pop the rest from the stack */
rtie
.align 4
_rirq_switch_from_firq:
_rirq_switch_from_rirq:
_set_misc_regs_irq_switch_from_irq
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
PUSHR blink
bl z_thread_mark_switched_in
POPR blink
#endif
_rirq_no_switch:
rtie
``` | /content/code_sandbox/arch/arc/core/regular_irq.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,335 |
```c
/*
*
*/
/**
* @file
* @brief codes required for ARC multicore and Zephyr smp support
*
*/
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <ksched.h>
#include <ipi.h>
#include <zephyr/init.h>
#include <zephyr/irq.h>
#include <arc_irq_offload.h>
volatile struct {
arch_cpustart_t fn;
void *arg;
} arc_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
/*
* arc_cpu_wake_flag is used to sync up master core and slave cores
* Slave core will spin for arc_cpu_wake_flag until master core sets
* it to the core id of slave core. Then, slave core clears it to notify
* master core that it's waken
*
*/
volatile uint32_t arc_cpu_wake_flag;
volatile char *arc_cpu_sp;
/*
* _curr_cpu is used to record the struct of _cpu_t of each cpu.
* for efficient usage in assembly
*/
volatile _cpu_t *_curr_cpu[CONFIG_MP_MAX_NUM_CPUS];
/* Called from Zephyr initialization */
void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg)
{
_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
arc_cpu_init[cpu_num].fn = fn;
arc_cpu_init[cpu_num].arg = arg;
/* set the initial sp of target sp through arc_cpu_sp
* arc_cpu_wake_flag will protect arc_cpu_sp that
* only one slave cpu can read it per time
*/
arc_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
arc_cpu_wake_flag = cpu_num;
/* wait slave cpu to start */
while (arc_cpu_wake_flag != 0U) {
;
}
}
#ifdef CONFIG_SMP
static void arc_connect_debug_mask_update(int cpu_num)
{
uint32_t core_mask = 1 << cpu_num;
/*
* MDB debugger may modify debug_select and debug_mask registers on start, so we can't
* rely on debug_select reset value.
*/
if (cpu_num != ARC_MP_PRIMARY_CPU_ID) {
core_mask |= z_arc_connect_debug_select_read();
}
z_arc_connect_debug_select_set(core_mask);
/* Debugger halts cores at all conditions:
* ARC_CONNECT_CMD_DEBUG_MASK_H: Core global halt.
* ARC_CONNECT_CMD_DEBUG_MASK_AH: Actionpoint halt.
* ARC_CONNECT_CMD_DEBUG_MASK_BH: Software breakpoint halt.
* ARC_CONNECT_CMD_DEBUG_MASK_SH: Self halt.
*/
z_arc_connect_debug_mask_set(core_mask, (ARC_CONNECT_CMD_DEBUG_MASK_SH
| ARC_CONNECT_CMD_DEBUG_MASK_BH | ARC_CONNECT_CMD_DEBUG_MASK_AH
| ARC_CONNECT_CMD_DEBUG_MASK_H));
}
#endif
void arc_core_private_intc_init(void);
/* the C entry of slave cores */
void arch_secondary_cpu_init(int cpu_num)
{
arch_cpustart_t fn;
#ifdef CONFIG_SMP
struct arc_connect_bcr bcr;
bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
if (bcr.dbg) {
/* configure inter-core debug unit if available */
arc_connect_debug_mask_update(cpu_num);
}
z_irq_setup();
arc_core_private_intc_init();
arc_irq_offload_init_smp();
z_arc_connect_ici_clear();
z_irq_priority_set(DT_IRQN(DT_NODELABEL(ici)),
DT_IRQ(DT_NODELABEL(ici), priority), 0);
irq_enable(DT_IRQN(DT_NODELABEL(ici)));
#endif
/* call the function set by arch_cpu_start */
fn = arc_cpu_init[cpu_num].fn;
fn(arc_cpu_init[cpu_num].arg);
}
#ifdef CONFIG_SMP
static void sched_ipi_handler(const void *unused)
{
ARG_UNUSED(unused);
z_arc_connect_ici_clear();
z_sched_ipi();
}
void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
unsigned int i;
unsigned int num_cpus = arch_num_cpus();
/* Send sched_ipi request to other cores
* if the target is current core, hardware will ignore it
*/
for (i = 0U; i < num_cpus; i++) {
if ((cpu_bitmap & BIT(i)) != 0) {
z_arc_connect_ici_generate(i);
}
}
}
void arch_sched_broadcast_ipi(void)
{
arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
}
int arch_smp_init(void)
{
struct arc_connect_bcr bcr;
/* necessary master core init */
_curr_cpu[0] = &(_kernel.cpus[0]);
bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
if (bcr.dbg) {
/* configure inter-core debug unit if available */
arc_connect_debug_mask_update(ARC_MP_PRIMARY_CPU_ID);
}
if (bcr.ipi) {
/* register ici interrupt, just need master core to register once */
z_arc_connect_ici_clear();
IRQ_CONNECT(DT_IRQN(DT_NODELABEL(ici)),
DT_IRQ(DT_NODELABEL(ici), priority),
sched_ipi_handler, NULL, 0);
irq_enable(DT_IRQN(DT_NODELABEL(ici)));
} else {
__ASSERT(0,
"ARC connect has no inter-core interrupt\n");
return -ENODEV;
}
if (bcr.gfrc) {
/* global free running count init */
z_arc_connect_gfrc_enable();
/* when all cores halt, gfrc halt */
z_arc_connect_gfrc_core_set((1 << arch_num_cpus()) - 1);
z_arc_connect_gfrc_clear();
} else {
__ASSERT(0,
"ARC connect has no global free running counter\n");
return -ENODEV;
}
return 0;
}
#endif
``` | /content/code_sandbox/arch/arc/core/smp.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,300 |
```unknown
/*
*
*/
/**
* @file
* @brief Fault handlers for ARCv2
*
* Fault handlers for ARCv2 processors.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <swap_macros.h>
#include <zephyr/syscall.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
GTEXT(_Fault)
GTEXT(__reset)
GTEXT(__memory_error)
GTEXT(__instruction_error)
GTEXT(__ev_machine_check)
GTEXT(__ev_tlb_miss_i)
GTEXT(__ev_tlb_miss_d)
GTEXT(__ev_prot_v)
GTEXT(__ev_privilege_v)
GTEXT(__ev_swi)
GTEXT(__ev_trap)
GTEXT(__ev_extension)
GTEXT(__ev_div_zero)
GTEXT(__ev_dc_error)
GTEXT(__ev_maligned)
.macro _save_exc_regs_into_stack
#ifdef CONFIG_ARC_HAS_SECURE
/* ERSEC_STAT is IOW/RAZ in normal mode */
lr r0,[_ARC_V2_ERSEC_STAT]
st_s r0, [sp, ___isf_t_sec_stat_OFFSET]
#endif
LRR r0, [_ARC_V2_ERET]
STR r0, sp, ___isf_t_pc_OFFSET
LRR r0, [_ARC_V2_ERSTATUS]
STR r0, sp, ___isf_t_status32_OFFSET
.endm
/*
* The exception handling will use top part of interrupt stack to
* get smaller memory footprint, because exception is not frequent.
* To reduce the impact on interrupt handling, especially nested interrupt
* the top part of interrupt stack cannot be too large, so add a check
* here
*/
#if CONFIG_ARC_EXCEPTION_STACK_SIZE > (CONFIG_ISR_STACK_SIZE >> 1)
#error "interrupt stack size is too small"
#endif
/*
* @brief Fault handler installed in the fault and reserved vectors
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,__memory_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__instruction_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_machine_check)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_i)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_d)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_prot_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_privilege_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_swi)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_extension)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_div_zero)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_dc_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_maligned)
_exc_entry:
/*
* re-use the top part of interrupt stack as exception
* stack. If this top part is used by interrupt handling,
* and exception is raised, then here it's guaranteed that
* exception handling has necessary stack to use
*/
MOVR ilink, sp
_get_curr_cpu_irq_stack sp
SUBR sp, sp, (CONFIG_ISR_STACK_SIZE - CONFIG_ARC_EXCEPTION_STACK_SIZE)
/*
* save caller saved registers
* this stack frame is set up in exception stack,
* not in the original sp (thread stack or interrupt stack).
* Because the exception may be raised by stack checking or
* mpu protect violation related to stack. If this stack frame
* is setup in original sp, double exception may be raised during
* _create_irq_stack_frame, which is unrecoverable.
*/
_create_irq_stack_frame
_save_exc_regs_into_stack
/* sp is parameter of _Fault */
MOVR r0, sp
/* ilink is the thread's original sp */
MOVR r1, ilink
jl _Fault
_exc_return:
/* the exception cause must be fixed in exception handler when exception returns
* directly, or exception will be repeated.
*
* If thread switch is raised in exception handler, the context of old thread will
* not be saved, i.e., it cannot be recovered, because we don't know where the
* exception comes out, thread context?irq_context?nest irq context?
*/
_get_next_switch_handle
BREQR r0, 0, _exc_return_from_exc
/* Save old thread into switch handle which is required by z_sched_switch_spin which
* will be called during old thread abort.
*/
STR r2, r2, ___thread_t_switch_handle_OFFSET
MOVR r2, r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/*
* sync up the ERSEC_STAT.ERM and SEC_STAT.IRM.
* use a fake interrupt return to simulate an exception turn.
* ERM and IRM record which mode the cpu should return, 1: secure
* 0: normal
*/
lr r3,[_ARC_V2_ERSEC_STAT]
btst r3, 31
bset.nz r3, r3, _ARC_V2_SEC_STAT_IRM_BIT
bclr.z r3, r3, _ARC_V2_SEC_STAT_IRM_BIT
sflag r3
#endif
/* clear AE bit to forget this was an exception, and go to
* register bank0 (if exception is raised in firq with 2 reg
* banks, then we may be bank1)
*/
#if defined(CONFIG_ARC_FIRQ) && CONFIG_RGF_NUM_BANKS != 1
/* save r2 in ilink because of the possible following reg
* bank switch
*/
mov ilink, r2
#endif
LRR r3, [_ARC_V2_STATUS32]
ANDR r3, r3, (~(_ARC_V2_STATUS32_AE | _ARC_V2_STATUS32_RB(7)))
kflag r3
/* pretend lowest priority interrupt happened to use common handler
* if exception is raised in irq, i.e., _ARC_V2_AUX_IRQ_ACT !=0,
* ignore irq handling, we cannot return to irq handling which may
* raise exception again. The ignored interrupts will be re-triggered
* if not cleared, or re-triggered by interrupt sources, or just missed
*/
#ifdef CONFIG_ARC_SECURE_FIRMWARE
mov_s r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
MOVR r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
push_s r2
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
pop_s r2
#else
SRR r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
#if defined(CONFIG_ARC_FIRQ) && CONFIG_RGF_NUM_BANKS != 1
mov r2, ilink
#endif
/* Assumption: r2 has next thread */
b _rirq_newthread_switch
_exc_return_from_exc:
/* exception handler may change return address.
* reload it
*/
LDR r0, sp, ___isf_t_pc_OFFSET
SRR r0, [_ARC_V2_ERET]
_pop_irq_stack_frame
MOVR sp, ilink
rtie
/* separated entry for trap which may be used by irq_offload, USERPSACE */
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
/* get the id of trap_s */
LRR ilink, [_ARC_V2_ECR]
ANDR ilink, ilink, 0x3f
#ifdef CONFIG_USERSPACE
cmp ilink, _TRAP_S_CALL_SYSTEM_CALL
bne _do_non_syscall_trap
/* do sys_call */
mov ilink, K_SYSCALL_LIMIT
cmp r6, ilink
blo valid_syscall_id
mov_s r0, r6
mov_s r6, K_SYSCALL_BAD
valid_syscall_id:
/* create a sys call frame
* caller regs (r0 - 12) are saved in _create_irq_stack_frame
* ok to use them later
*/
_create_irq_stack_frame
_save_exc_regs_into_stack
/* exc return and do sys call in kernel mode,
* so need to clear U bit, r0 is already loaded
* with ERSTATUS in _save_exc_regs_into_stack
*/
bclr r0, r0, _ARC_V2_STATUS32_U_BIT
sr r0, [_ARC_V2_ERSTATUS]
mov_s r0, _arc_do_syscall
sr r0, [_ARC_V2_ERET]
rtie
_do_non_syscall_trap:
#endif /* CONFIG_USERSPACE */
b _exc_entry
``` | /content/code_sandbox/arch/arc/core/fault_s.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,867 |
```c
/*
*
*/
/**
* @file
* @brief Time Stamp API for ARCv2
*
* Provide 64-bit time stamp API
*/
#include <zephyr/kernel.h>
#include <zephyr/toolchain.h>
#include <zephyr/kernel_structs.h>
/*
* @brief Read 64-bit timestamp value
*
* This function returns a 64-bit bit time stamp value that is clocked
* at the same frequency as the CPU.
*
* @return 64-bit time stamp value
*/
uint64_t z_tsc_read(void)
{
unsigned int key;
uint64_t t;
uint32_t count;
key = arch_irq_lock();
t = (uint64_t)sys_clock_tick_get();
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
arch_irq_unlock(key);
t *= k_ticks_to_cyc_floor64(1);
t += (uint64_t)count;
return t;
}
``` | /content/code_sandbox/arch/arc/core/timestamp.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 199 |
```c
/*
*
*/
/**
* @file
* @brief Fatal fault handling
*
* This module implements the routines necessary for handling fatal faults on
* ARCv2 CPUs.
*/
#include <zephyr/kernel.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/logging/log.h>
#include <kernel_arch_data.h>
#include <zephyr/arch/arc/v2/exception.h>
#include <err_dump_handling.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_EXCEPTION_DEBUG
static void dump_arc_esf(const struct arch_esf *esf)
{
ARC_EXCEPTION_DUMP(" r0: 0x%" PRIxPTR " r1: 0x%" PRIxPTR " r2: 0x%" PRIxPTR
" r3: 0x%" PRIxPTR "", esf->r0, esf->r1, esf->r2, esf->r3);
ARC_EXCEPTION_DUMP(" r4: 0x%" PRIxPTR " r5: 0x%" PRIxPTR " r6: 0x%" PRIxPTR
" r7: 0x%" PRIxPTR "", esf->r4, esf->r5, esf->r6, esf->r7);
ARC_EXCEPTION_DUMP(" r8: 0x%" PRIxPTR " r9: 0x%" PRIxPTR " r10: 0x%" PRIxPTR
" r11: 0x%" PRIxPTR "", esf->r8, esf->r9, esf->r10, esf->r11);
ARC_EXCEPTION_DUMP("r12: 0x%" PRIxPTR " r13: 0x%" PRIxPTR " pc: 0x%" PRIxPTR "",
esf->r12, esf->r13, esf->pc);
ARC_EXCEPTION_DUMP(" blink: 0x%" PRIxPTR " status32: 0x%" PRIxPTR "",
esf->blink, esf->status32);
#ifdef CONFIG_ARC_HAS_ZOL
ARC_EXCEPTION_DUMP("lp_end: 0x%" PRIxPTR " lp_start: 0x%" PRIxPTR
" lp_count: 0x%" PRIxPTR "", esf->lp_end, esf->lp_start, esf->lp_count);
#endif /* CONFIG_ARC_HAS_ZOL */
}
#endif
void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
dump_arc_esf(esf);
}
#endif /* CONFIG_EXCEPTION_DEBUG */
z_fatal_error(reason, esf);
}
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{
/* TODO: convert ssf_ptr contents into an esf, they are not the same */
ARG_UNUSED(ssf_ptr);
z_arc_fatal_error(K_ERR_KERNEL_OOPS, NULL);
CODE_UNREACHABLE;
}
FUNC_NORETURN void arch_system_halt(unsigned int reason)
{
ARG_UNUSED(reason);
__asm__("brk");
CODE_UNREACHABLE;
}
``` | /content/code_sandbox/arch/arc/core/fatal.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 692 |
```c
/*
*
*/
/**
* @file
* @brief ARCv2 interrupt management
*
*
* Interrupt management:
*
* - enabling/disabling
*
* An IRQ number passed to the @a irq parameters found in this file is a
* number from 16 to last IRQ number on the platform.
*/
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/irq.h>
#include <zephyr/sys/printk.h>
/*
* storage space for the interrupt stack of fast_irq
*/
#if defined(CONFIG_ARC_FIRQ_STACK)
#if defined(CONFIG_SMP)
K_KERNEL_STACK_ARRAY_DEFINE(_firq_interrupt_stack, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ARC_FIRQ_STACK_SIZE);
#else
K_KERNEL_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);
#endif
/**
* @brief Set the stack pointer for firq handling
*/
void z_arc_firq_stack_set(void)
{
#ifdef CONFIG_SMP
char *firq_sp = K_KERNEL_STACK_BUFFER(
_firq_interrupt_stack[z_arc_v2_core_id()]) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#else
char *firq_sp = K_KERNEL_STACK_BUFFER(_firq_interrupt_stack) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#endif
/* the z_arc_firq_stack_set must be called when irq diasbled, as
* it can be called not only in the init phase but also other places
*/
unsigned int key = arch_irq_lock();
__asm__ volatile (
/* only ilink will not be banked, so use ilink as channel
* between 2 banks
*/
"mov %%ilink, %0\n\t"
"lr %0, [%1]\n\t"
"or %0, %0, %2\n\t"
"kflag %0\n\t"
"mov %%sp, %%ilink\n\t"
/* switch back to bank0, use ilink to avoid the pollution of
* bank1's gp regs.
*/
"lr %%ilink, [%1]\n\t"
"and %%ilink, %%ilink, %3\n\t"
"kflag %%ilink\n\t"
:
: "r"(firq_sp), "i"(_ARC_V2_STATUS32),
"i"(_ARC_V2_STATUS32_RB(1)),
"i"(~_ARC_V2_STATUS32_RB(7))
);
arch_irq_unlock(key);
}
#endif
/*
* ARC CPU interrupt controllers hierarchy.
*
* Single-core (UP) case:
*
* --------------------------
* | CPU core 0 |
* --------------------------
* | core 0 (private) |
* | interrupt controller |
* --------------------------
* |
* [internal interrupts]
* [external interrupts]
*
*
* Multi-core (SMP) case:
*
* -------------------------- --------------------------
* | CPU core 0 | | CPU core 1 |
* -------------------------- --------------------------
* | core 0 (private) | | core 1 (private) |
* | interrupt controller | | interrupt controller |
* -------------------------- --------------------------
* | | | | | |
* | | [core 0 private internal interrupts] | | [core 1 private internal interrupts]
* | | | |
* | | | |
* | ------------------------------------------- |
* | | IDU (Interrupt Distribution Unit) | |
* | ------------------------------------------- |
* | | |
* | [common (shared) interrupts] |
* | |
* | |
* [core 0 private external interrupts] [core 1 private external interrupts]
*
*
*
* The interrupts are grouped in HW in the same order - firstly internal interrupts
* (with lowest line numbers in IVT), than common interrupts (if present), than external
* interrupts (with highest line numbers in IVT).
*
* NOTE: in case of SMP system we currently support in Zephyr only private internal and common
* interrupts, so the core-private external interrupts are currently not supported for SMP.
*/
/**
* @brief Enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* @a irq.
*/
void arch_irq_enable(unsigned int irq);
/**
* @brief Disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified @a irq.
*/
void arch_irq_disable(unsigned int irq);
/**
* @brief Return IRQ enable state
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
int arch_irq_is_enabled(unsigned int irq);
#ifdef CONFIG_ARC_CONNECT
#define IRQ_NUM_TO_IDU_NUM(id) ((id) - ARC_CONNECT_IDU_IRQ_START)
#define IRQ_IS_COMMON(id) ((id) >= ARC_CONNECT_IDU_IRQ_START)
void arch_irq_enable(unsigned int irq)
{
if (IRQ_IS_COMMON(irq)) {
z_arc_connect_idu_set_mask(IRQ_NUM_TO_IDU_NUM(irq), 0x0);
} else {
z_arc_v2_irq_unit_int_enable(irq);
}
}
void arch_irq_disable(unsigned int irq)
{
if (IRQ_IS_COMMON(irq)) {
z_arc_connect_idu_set_mask(IRQ_NUM_TO_IDU_NUM(irq), 0x1);
} else {
z_arc_v2_irq_unit_int_disable(irq);
}
}
int arch_irq_is_enabled(unsigned int irq)
{
if (IRQ_IS_COMMON(irq)) {
return !z_arc_connect_idu_read_mask(IRQ_NUM_TO_IDU_NUM(irq));
} else {
return z_arc_v2_irq_unit_int_enabled(irq);
}
}
#else
void arch_irq_enable(unsigned int irq)
{
z_arc_v2_irq_unit_int_enable(irq);
}
void arch_irq_disable(unsigned int irq)
{
z_arc_v2_irq_unit_int_disable(irq);
}
int arch_irq_is_enabled(unsigned int irq)
{
return z_arc_v2_irq_unit_int_enabled(irq);
}
#endif /* CONFIG_ARC_CONNECT */
/**
* @internal
*
* @brief Set an interrupt's priority
*
* Lower values take priority over higher values. Special case priorities are
* expressed via mutually exclusive flags.
* The priority is verified if ASSERT_ON is enabled; max priority level
* depends on CONFIG_NUM_IRQ_PRIO_LEVELS.
*/
void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{
ARG_UNUSED(flags);
__ASSERT(prio < CONFIG_NUM_IRQ_PRIO_LEVELS,
"invalid priority %d for irq %d", prio, irq);
/* 0 -> CONFIG_NUM_IRQ_PRIO_LEVELS allocated to secure world
* left prio levels allocated to normal world
*/
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
prio = prio < ARC_N_IRQ_START_LEVEL ?
prio : (ARC_N_IRQ_START_LEVEL - 1);
#elif defined(CONFIG_ARC_NORMAL_FIRMWARE)
prio = prio < ARC_N_IRQ_START_LEVEL ?
ARC_N_IRQ_START_LEVEL : prio;
#endif
z_arc_v2_irq_unit_prio_set(irq, prio);
}
/**
* @brief Spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*/
void z_irq_spurious(const void *unused)
{
ARG_UNUSED(unused);
z_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags)
{
z_isr_install(irq, routine, parameter);
z_irq_priority_set(irq, priority, flags);
return irq;
}
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
``` | /content/code_sandbox/arch/arc/core/irq_manage.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,755 |
```c
/*
*
*/
/**
* @file
* @brief Populated exception vector table
*
* Vector table with exceptions filled in. The reset vector is the system entry
* point, ie. the first instruction executed.
*
* The table is populated with all the system exception handlers. No exception
* should not be triggered until the kernel is ready to handle them.
*
* We are using a C file instead of an assembly file (like the ARM vector table)
* to work around an issue with the assembler where:
*
* .word <function>
*
* statements would end up with the two half-words of the functions' addresses
* swapped.
*/
#include <zephyr/types.h>
#include <zephyr/toolchain.h>
#include "vector_table.h"
struct vector_table {
uintptr_t reset;
uintptr_t memory_error;
uintptr_t instruction_error;
uintptr_t ev_machine_check;
uintptr_t ev_tlb_miss_i;
uintptr_t ev_tlb_miss_d;
uintptr_t ev_prot_v;
uintptr_t ev_privilege_v;
uintptr_t ev_swi;
uintptr_t ev_trap;
uintptr_t ev_extension;
uintptr_t ev_div_zero;
/* ev_dc_error is unused in ARCv3 and de-facto unused in ARCv2 as well */
uintptr_t ev_dc_error;
uintptr_t ev_maligned;
uintptr_t unused_1;
uintptr_t unused_2;
};
struct vector_table _VectorTable Z_GENERIC_SECTION(.exc_vector_table) = {
(uintptr_t)__reset,
(uintptr_t)__memory_error,
(uintptr_t)__instruction_error,
(uintptr_t)__ev_machine_check,
(uintptr_t)__ev_tlb_miss_i,
(uintptr_t)__ev_tlb_miss_d,
(uintptr_t)__ev_prot_v,
(uintptr_t)__ev_privilege_v,
(uintptr_t)__ev_swi,
(uintptr_t)__ev_trap,
(uintptr_t)__ev_extension,
(uintptr_t)__ev_div_zero,
(uintptr_t)__ev_dc_error,
(uintptr_t)__ev_maligned,
0,
0
};
``` | /content/code_sandbox/arch/arc/core/vector_table.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 447 |
```unknown
/*
*
*/
/**
* @file
* @brief Reset handler
*
* Reset handler that prepares the system for running C code.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <swap_macros.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
#ifdef CONFIG_ARC_EARLY_SOC_INIT
#include <soc_ctrl.h>
#endif
GDATA(z_interrupt_stacks)
GDATA(z_main_stack)
GDATA(_VectorTable)
/* use one of the available interrupt stacks during init */
#define INIT_STACK z_interrupt_stacks
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
GTEXT(__reset)
GTEXT(__start)
/**
* @brief Reset vector
*
* Ran when the system comes out of reset. The processor is at supervisor level.
*
* Locking interrupts prevents anything from interrupting the CPU.
*
* When these steps are completed, jump to z_prep_c(), which will finish setting
* up the system for running C code.
*/
SECTION_SUBSEC_FUNC(TEXT,_reset_and__start,__reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_and__start,__start)
/* lock interrupts: will get unlocked when switch to main task
* also make sure the processor in the correct status
*/
mov_s r0, 0
kflag r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sflag r0
#endif
/* interrupt related init */
#ifndef CONFIG_ARC_NORMAL_FIRMWARE
/* IRQ_ACT and IRQ_CTRL should be initialized and set in secure mode */
sr r0, [_ARC_V2_AUX_IRQ_ACT]
sr r0, [_ARC_V2_AUX_IRQ_CTRL]
#endif
sr r0, [_ARC_V2_AUX_IRQ_HINT]
/* set the vector table base early,
* so that exception vectors can be handled.
*/
MOVR r0, _VectorTable
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sr r0, [_ARC_V2_IRQ_VECT_BASE_S]
#else
SRR r0, [_ARC_V2_IRQ_VECT_BASE]
#endif
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_DZ_BIT
kflag r0
#if defined(CONFIG_USERSPACE)
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_US_BIT
kflag r0
#endif
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_AD_BIT
kflag r0
#endif
/* Invalidate icache */
lr r0, [_ARC_V2_I_CACHE_BUILD]
and.f r0, r0, 0xff
bz.nd done_icache_invalidate
mov_s r2, 0
sr r2, [_ARC_V2_IC_IVIC]
/* writing to IC_IVIC needs 3 NOPs */
nop_s
nop_s
nop_s
done_icache_invalidate:
/* Invalidate dcache */
lr r3, [_ARC_V2_D_CACHE_BUILD]
and.f r3, r3, 0xff
bz.nd done_dcache_invalidate
mov_s r1, 1
sr r1, [_ARC_V2_DC_IVDC]
done_dcache_invalidate:
#ifdef CONFIG_ARC_EARLY_SOC_INIT
soc_early_asm_init_percpu
#endif
_dsp_extension_probe
/*
* Init ARC internal architecture state
* Force to initialize internal architecture state to reset values
* For scenarios where board hardware is not re-initialized between tests,
* some settings need to be restored to its default initial states as a
* substitution of normal hardware reset sequence.
*/
#ifdef CONFIG_INIT_ARCH_HW_AT_BOOT
/* Set MPU (v4 or v8) registers to default */
#if CONFIG_ARC_MPU_VER == 4 || CONFIG_ARC_MPU_VER == 8
/* Set default reset value to _ARC_V2_MPU_EN register */
#define ARC_MPU_EN_RESET_VALUE 0x400181C0
mov_s r1, ARC_MPU_EN_RESET_VALUE
sr r1, [_ARC_V2_MPU_EN]
/* Get MPU region numbers */
lr r3, [_ARC_V2_MPU_BUILD]
lsr_s r3, r3, 8
and r3, r3, 0xff
mov_s r1, 0
mov_s r2, 0
/* Set all MPU regions by iterating index */
mpu_regions_reset:
brge r2, r3, done_mpu_regions_reset
sr r2, [_ARC_V2_MPU_INDEX]
sr r1, [_ARC_V2_MPU_RSTART]
sr r1, [_ARC_V2_MPU_REND]
sr r1, [_ARC_V2_MPU_RPER]
add_s r2, r2, 1
b_s mpu_regions_reset
done_mpu_regions_reset:
#endif
#endif
#ifdef CONFIG_ISA_ARCV3
/* Enable HW prefetcher if exist */
lr r0, [_ARC_HW_PF_BUILD]
breq r0, 0, hw_pf_setup_done
lr r1, [_ARC_HW_PF_CTRL]
or r1, r1, _ARC_HW_PF_CTRL_ENABLE
sr r1, [_ARC_HW_PF_CTRL]
hw_pf_setup_done:
#endif
#if defined(CONFIG_SMP) || CONFIG_MP_MAX_NUM_CPUS > 1
_get_cpu_id r0
breq r0, 0, _master_core_startup
/*
* Non-masters wait for master core (core 0) to boot enough
*/
_slave_core_wait:
#if CONFIG_MP_MAX_NUM_CPUS == 1
kflag 1
#endif
ld r1, [arc_cpu_wake_flag]
brne r0, r1, _slave_core_wait
LDR sp, arc_cpu_sp
/* signal master core that slave core runs */
st 0, [arc_cpu_wake_flag]
#if defined(CONFIG_ARC_FIRQ_STACK)
push r0
jl z_arc_firq_stack_set
pop r0
#endif
j arch_secondary_cpu_init
_master_core_startup:
#endif
#ifdef CONFIG_INIT_STACKS
/*
* use the main stack to call memset on the interrupt stack and the
* FIRQ stack when CONFIG_INIT_STACKS is enabled before switching to
* one of them for the rest of the early boot
*/
mov_s sp, z_main_stack
add sp, sp, CONFIG_MAIN_STACK_SIZE
mov_s r0, z_interrupt_stacks
mov_s r1, 0xaa
mov_s r2, CONFIG_ISR_STACK_SIZE
jl memset
#endif /* CONFIG_INIT_STACKS */
mov_s sp, INIT_STACK
add sp, sp, INIT_STACK_SIZE
#if defined(CONFIG_ARC_FIRQ_STACK)
jl z_arc_firq_stack_set
#endif
j z_prep_c
``` | /content/code_sandbox/arch/arc/core/reset.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,496 |
```unknown
/*
*
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
.macro clear_scratch_regs
mov r1, 0
mov r2, 0
mov r3, 0
mov r4, 0
mov r5, 0
mov r6, 0
mov r7, 0
mov r8, 0
mov r9, 0
mov r10, 0
mov r11, 0
mov r12, 0
.endm
.macro clear_callee_regs
mov r25, 0
mov r24, 0
mov r23, 0
mov r22, 0
mov r21, 0
mov r20, 0
mov r19, 0
mov r18, 0
mov r17, 0
mov r16, 0
mov r15, 0
mov r14, 0
mov r13, 0
.endm
GTEXT(arc_go_to_normal)
GTEXT(_arc_do_secure_call)
GDATA(arc_s_call_table)
SECTION_FUNC(TEXT, _arc_do_secure_call)
/* r0-r5: arg1-arg6, r6 is call id */
/* the call id should be checked */
/* disable normal interrupt happened when processor in secure mode ? */
/* seti (0x30 | (ARC_N_IRQ_START_LEVEL-1)) */
breq r6, ARC_S_CALL_CLRI, _s_clri
breq r6, ARC_S_CALL_SETI, _s_seti
push_s blink
mov blink, arc_s_call_table
ld.as r6, [blink, r6]
jl [r6]
/*
* no need to clear callee regs, as they will be saved and restored
* automatically
*/
clear_scratch_regs
mov r29, 0
mov r30, 0
_arc_do_secure_call_exit:
pop_s blink
j [blink]
/* enable normal interrupt */
/*
* j.d [blink]
* seti (0x30 | (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
*/
_s_clri:
lr r0, [_ARC_V2_STATUS32]
and r0, r0, 0x1e
asr r0, r0
or r0, r0, 0x30
mov r6, (0x30 | (ARC_N_IRQ_START_LEVEL-1))
j.d [blink]
seti r6
_s_seti:
btst r0, 4
jnz __seti_0
mov r0, (CONFIG_NUM_IRQ_PRIO_LEVELS - 1)
lr r6, [_ARC_V2_STATUS32]
and r6, r6, 0x1e
asr r6, r6
cmp r0, r6
mov.hs r0, r6
__seti_0:
and r0, r0, 0xf
brhs r0, ARC_N_IRQ_START_LEVEL, __seti_1
mov r0, ARC_N_IRQ_START_LEVEL
__seti_1:
or r0, r0, 0x30
j.d [blink]
seti r0
SECTION_FUNC(TEXT, arc_go_to_normal)
clear_callee_regs
clear_scratch_regs
mov fp, 0
mov r29, 0
mov r30, 0
mov blink, 0
jl [r0]
/* should not come here */
kflag 1
``` | /content/code_sandbox/arch/arc/core/secureshield/arc_secure.S | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 776 |
```c
/*
*
*/
/**
* @file
* @brief ARCv2 kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various ARCv2 kernel structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#include <zephyr/kernel.h>
#include <kernel_arch_data.h>
#include <gen_offset.h>
#include <kernel_offsets.h>
#ifdef CONFIG_DSP_SHARING
#include "../dsp/dsp_offsets.c"
#endif
GEN_OFFSET_SYM(_thread_arch_t, relinquish_cause);
#ifdef CONFIG_ARC_STACK_CHECKING
GEN_OFFSET_SYM(_thread_arch_t, k_stack_base);
GEN_OFFSET_SYM(_thread_arch_t, k_stack_top);
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_thread_arch_t, u_stack_base);
GEN_OFFSET_SYM(_thread_arch_t, u_stack_top);
#endif
#endif
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#endif
/* ARCv2-specific IRQ stack frame structure member offsets */
GEN_OFFSET_SYM(_isf_t, r0);
GEN_OFFSET_SYM(_isf_t, r1);
GEN_OFFSET_SYM(_isf_t, r2);
GEN_OFFSET_SYM(_isf_t, r3);
GEN_OFFSET_SYM(_isf_t, r4);
GEN_OFFSET_SYM(_isf_t, r5);
GEN_OFFSET_SYM(_isf_t, r6);
GEN_OFFSET_SYM(_isf_t, r7);
GEN_OFFSET_SYM(_isf_t, r8);
GEN_OFFSET_SYM(_isf_t, r9);
GEN_OFFSET_SYM(_isf_t, r10);
GEN_OFFSET_SYM(_isf_t, r11);
GEN_OFFSET_SYM(_isf_t, r12);
GEN_OFFSET_SYM(_isf_t, r13);
GEN_OFFSET_SYM(_isf_t, blink);
#ifdef CONFIG_ARC_HAS_ZOL
GEN_OFFSET_SYM(_isf_t, lp_end);
GEN_OFFSET_SYM(_isf_t, lp_start);
GEN_OFFSET_SYM(_isf_t, lp_count);
#endif /* CONFIG_ARC_HAS_ZOL */
#ifdef CONFIG_CODE_DENSITY
GEN_OFFSET_SYM(_isf_t, ei_base);
GEN_OFFSET_SYM(_isf_t, ldi_base);
GEN_OFFSET_SYM(_isf_t, jli_base);
#endif
GEN_OFFSET_SYM(_isf_t, pc);
#ifdef CONFIG_ARC_HAS_SECURE
GEN_OFFSET_SYM(_isf_t, sec_stat);
#endif
GEN_OFFSET_SYM(_isf_t, status32);
GEN_ABSOLUTE_SYM(___isf_t_SIZEOF, sizeof(_isf_t));
GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_stack_t, r13);
GEN_OFFSET_SYM(_callee_saved_stack_t, r14);
GEN_OFFSET_SYM(_callee_saved_stack_t, r15);
GEN_OFFSET_SYM(_callee_saved_stack_t, r16);
GEN_OFFSET_SYM(_callee_saved_stack_t, r17);
GEN_OFFSET_SYM(_callee_saved_stack_t, r18);
GEN_OFFSET_SYM(_callee_saved_stack_t, r19);
GEN_OFFSET_SYM(_callee_saved_stack_t, r20);
GEN_OFFSET_SYM(_callee_saved_stack_t, r21);
GEN_OFFSET_SYM(_callee_saved_stack_t, r22);
GEN_OFFSET_SYM(_callee_saved_stack_t, r23);
GEN_OFFSET_SYM(_callee_saved_stack_t, r24);
GEN_OFFSET_SYM(_callee_saved_stack_t, r25);
GEN_OFFSET_SYM(_callee_saved_stack_t, r26);
GEN_OFFSET_SYM(_callee_saved_stack_t, fp);
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
GEN_OFFSET_SYM(_callee_saved_stack_t, kernel_sp);
GEN_OFFSET_SYM(_callee_saved_stack_t, user_sp);
#else
GEN_OFFSET_SYM(_callee_saved_stack_t, user_sp);
#endif
#endif
GEN_OFFSET_SYM(_callee_saved_stack_t, r30);
#ifdef CONFIG_ARC_HAS_ACCL_REGS
GEN_OFFSET_SYM(_callee_saved_stack_t, r58);
#ifndef CONFIG_64BIT
GEN_OFFSET_SYM(_callee_saved_stack_t, r59);
#endif /* !CONFIG_64BIT */
#endif
#ifdef CONFIG_FPU_SHARING
GEN_OFFSET_SYM(_callee_saved_stack_t, fpu_status);
GEN_OFFSET_SYM(_callee_saved_stack_t, fpu_ctrl);
#ifdef CONFIG_FP_FPU_DA
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp2h);
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp2l);
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp1h);
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp1l);
#endif
#endif
GEN_ABSOLUTE_SYM(___callee_saved_stack_t_SIZEOF, sizeof(_callee_saved_stack_t));
GEN_ABS_SYM_END
``` | /content/code_sandbox/arch/arc/core/offsets/offsets.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,043 |
```c
/*
*
*/
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/types.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/arc/v2/secureshield/arc_secure.h>
#define IRQ_PRIO_MASK (0xffff << ARC_N_IRQ_START_LEVEL)
/*
* @brief read secure auxiliary regs on behalf of normal mode
*
* @param aux_reg address of aux reg
*
* Some aux regs require secure privilege, this function implements
* an secure service to access secure aux regs. Check should be done
* to decide whether the access is valid.
*/
static int32_t arc_s_aux_read(uint32_t aux_reg)
{
return -1;
}
/*
* @brief write secure auxiliary regs on behalf of normal mode
*
* @param aux_reg address of aux reg
* @param val, the val to write
*
* Some aux regs require secure privilege, this function implements
* an secure service to access secure aux regs. Check should be done
* to decide whether the access is valid.
*/
static int32_t arc_s_aux_write(uint32_t aux_reg, uint32_t val)
{
if (aux_reg == _ARC_V2_AUX_IRQ_ACT) {
/* 0 -> CONFIG_NUM_IRQ_PRIO_LEVELS allocated to secure world
* left prio levels allocated to normal world
*/
val &= IRQ_PRIO_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_ACT, val |
(z_arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT) &
(~IRQ_PRIO_MASK)));
return 0;
}
return -1;
}
/*
* @brief allocate interrupt for normal world
*
* @param intno, the interrupt to be allocated to normal world
*
* By default, most interrupts are configured to be secure in initialization.
* If normal world wants to use an interrupt, through this secure service to
* apply one. Necessary check should be done to decide whether the apply is
* valid
*/
static int32_t arc_s_irq_alloc(uint32_t intno)
{
z_arc_v2_irq_uinit_secure_set(intno, 0);
return 0;
}
/*
* \todo, to access MPU from normal mode, secure mpu service should be
* created. In the secure mpu service, the parameters should be checked
* (e.g., not overwrite the mpu regions for secure world)that operations
* are valid
*/
/*
* \todo, how to add secure service easily
*/
const _arc_s_call_handler_t arc_s_call_table[ARC_S_CALL_LIMIT] = {
[ARC_S_CALL_AUX_READ] = (_arc_s_call_handler_t)arc_s_aux_read,
[ARC_S_CALL_AUX_WRITE] = (_arc_s_call_handler_t)arc_s_aux_write,
[ARC_S_CALL_IRQ_ALLOC] = (_arc_s_call_handler_t)arc_s_irq_alloc,
};
``` | /content/code_sandbox/arch/arc/core/secureshield/secure_sys_services.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 630 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/kernel.h>
#include <errno.h>
#include <zephyr/types.h>
#include <zephyr/init.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/arc/v2/secureshield/arc_secure.h>
static void _default_sjli_entry(void);
/*
* sjli vector table must be in instruction space
* \todo: how to let user to install customized sjli entry easily, e.g.
* through macros or with the help of compiler?
*/
const static uint32_t _sjli_vector_table[CONFIG_SJLI_TABLE_SIZE] = {
[0] = (uint32_t)_arc_do_secure_call,
[1 ... (CONFIG_SJLI_TABLE_SIZE - 1)] = (uint32_t)_default_sjli_entry,
};
/*
* @brief default entry of sjli call
*
*/
static void _default_sjli_entry(void)
{
printk("default sjli entry\n");
}
/*
* @brief initialization of sjli related functions
*
*/
static void sjli_table_init(void)
{
/* install SJLI table */
z_arc_v2_aux_reg_write(_ARC_V2_NSC_TABLE_BASE, _sjli_vector_table);
z_arc_v2_aux_reg_write(_ARC_V2_NSC_TABLE_TOP,
(_sjli_vector_table + CONFIG_SJLI_TABLE_SIZE));
}
/*
* @brief initialization of secureshield related functions.
*/
static int arc_secureshield_init(void)
{
sjli_table_init();
/* set nic bit to enable seti/clri and
* sleep/wevt in normal mode.
* If not set, direct call of seti/clri etc. will raise exception.
* Then, these seti/clri instructions should be replaced with secure
* secure services (sjli call)
*
*/
__asm__ volatile("sflag 0x20");
return 0;
}
SYS_INIT(arc_secureshield_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
``` | /content/code_sandbox/arch/arc/core/secureshield/arc_sjli.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 443 |
```c
/*
*
*/
/**
* @file
* @brief ARCv2 DSP and AGU structure member offset definition file
*
*/
#ifdef CONFIG_DSP_SHARING
GEN_OFFSET_SYM(_callee_saved_stack_t, dsp_ctrl);
GEN_OFFSET_SYM(_callee_saved_stack_t, acc0_glo);
GEN_OFFSET_SYM(_callee_saved_stack_t, acc0_ghi);
#ifdef CONFIG_ARC_DSP_BFLY_SHARING
GEN_OFFSET_SYM(_callee_saved_stack_t, dsp_bfly0);
GEN_OFFSET_SYM(_callee_saved_stack_t, dsp_fft_ctrl);
#endif
#endif
#ifdef CONFIG_ARC_AGU_SHARING
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap0);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap1);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap2);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap3);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os0);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os1);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod0);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod1);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod2);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod3);
#ifdef CONFIG_ARC_AGU_MEDIUM
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap4);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap5);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap6);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap7);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os2);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os3);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod4);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod5);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod6);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod7);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod8);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod9);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod10);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod11);
#endif
#ifdef CONFIG_ARC_AGU_LARGE
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap8);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap9);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap10);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_ap11);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os4);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os5);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os6);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_os7);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod12);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod13);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod14);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod15);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod16);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod17);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod18);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod19);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod20);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod21);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod22);
GEN_OFFSET_SYM(_callee_saved_stack_t, agu_mod23);
#endif
#endif
``` | /content/code_sandbox/arch/arc/core/dsp/dsp_offsets.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 762 |
```unknown
# Digital Signal Processing (DSP) configuration options
menu "ARC DSP Options"
depends on CPU_HAS_DSP
config ARC_DSP
bool "digital signal processing (DSP)"
help
This option enables DSP and DSP instructions.
config ARC_DSP_TURNED_OFF
bool "Turn off DSP if it presents"
depends on !ARC_DSP
help
This option disables DSP block via resetting DSP_CRTL register.
config DSP_SHARING
bool "DSP register sharing"
depends on ARC_DSP && MULTITHREADING
select ARC_HAS_ACCL_REGS
help
This option enables preservation of the hardware DSP registers
across context switches to allow multiple threads to perform concurrent
DSP operations.
config ARC_DSP_BFLY_SHARING
bool "ARC complex DSP operation"
depends on ARC_DSP && CPU_ARCEM
help
This option is to enable Zephyr to store and restore DSP_BFLY0
and FFT_CTRL registers during context switch. This option is
only required when butterfly instructions are used in
multi-thread.
config ARC_XY_ENABLE
bool "ARC address generation unit registers"
help
Processors with XY memory and AGU registers can configure this
option to accelerate DSP instrctions.
config ARC_AGU_SHARING
bool "ARC address generation unit register sharing"
depends on ARC_XY_ENABLE && MULTITHREADING
default y if DSP_SHARING
help
This option enables preservation of the hardware AGU registers
across context switches to allow multiple threads to perform concurrent
operations on XY memory. Save and restore small size AGU registers is
set as default, including 4 address pointers regs, 2 address offset regs
and 4 modifiers regs.
config ARC_AGU_MEDIUM
bool "ARC AGU medium size register"
depends on ARC_AGU_SHARING
help
Save and restore medium AGU registers, including 8 address pointers regs,
4 address offset regs and 12 modifiers regs.
config ARC_AGU_LARGE
bool "ARC AGU large size register"
depends on ARC_AGU_SHARING
select ARC_AGU_MEDIUM
help
Save and restore large AGU registers, including 12 address pointers regs,
8 address offset regs and 24 modifiers regs.
endmenu
``` | /content/code_sandbox/arch/arc/core/dsp/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 473 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/arc/v2/aux_regs.h>
#include <zephyr/arch/arc/v2/mpu/arc_mpu.h>
#include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
#include <zephyr/linker/linker-defs.h>
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(mpu);
/**
* @brief Get the number of supported MPU regions
*
*/
static inline uint8_t get_num_regions(void)
{
uint32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
num = (num & 0xFF00U) >> 8U;
return (uint8_t)num;
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct parameter set.
*/
static inline uint32_t get_region_attr_by_type(uint32_t type)
{
switch (type) {
case THREAD_STACK_USER_REGION:
return REGION_RAM_ATTR;
case THREAD_STACK_REGION:
return AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR;
case THREAD_APP_DATA_REGION:
return REGION_RAM_ATTR;
case THREAD_STACK_GUARD_REGION:
/* no Write and Execute to guard region */
return AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR;
default:
/* unknown type */
return 0;
}
}
#if (CONFIG_ARC_MPU_VER == 4) || (CONFIG_ARC_MPU_VER == 8)
#include "arc_mpu_v4_internal.h"
#else
#include "arc_mpu_common_internal.h"
#endif
``` | /content/code_sandbox/arch/arc/core/mpu/arc_mpu.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 384 |
```objective-c
/*
*
*/
/**
* @file
* @brief save and load macro for ARCv2 DSP and AGU regs
*
*/
.macro _save_dsp_regs
#ifdef CONFIG_DSP_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
bbit0 r13, K_DSP_IDX, dsp_skip_save
lr r13, [_ARC_V2_DSP_CTRL]
st_s r13, [sp, ___callee_saved_stack_t_dsp_ctrl_OFFSET]
lr r13, [_ARC_V2_ACC0_GLO]
st_s r13, [sp, ___callee_saved_stack_t_acc0_glo_OFFSET]
lr r13, [_ARC_V2_ACC0_GHI]
st_s r13, [sp, ___callee_saved_stack_t_acc0_ghi_OFFSET]
#ifdef CONFIG_ARC_DSP_BFLY_SHARING
lr r13, [_ARC_V2_DSP_BFLY0]
st_s r13, [sp, ___callee_saved_stack_t_dsp_bfly0_OFFSET]
lr r13, [_ARC_V2_DSP_FFT_CTRL]
st_s r13, [sp, ___callee_saved_stack_t_dsp_fft_ctrl_OFFSET]
#endif
#endif
dsp_skip_save :
#ifdef CONFIG_ARC_AGU_SHARING
_save_agu_regs
#endif
.endm
.macro _save_agu_regs
#ifdef CONFIG_ARC_AGU_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
btst r13, K_AGU_IDX
jeq agu_skip_save
lr r13, [_ARC_V2_AGU_AP0]
st r13, [sp, ___callee_saved_stack_t_agu_ap0_OFFSET]
lr r13, [_ARC_V2_AGU_AP1]
st r13, [sp, ___callee_saved_stack_t_agu_ap1_OFFSET]
lr r13, [_ARC_V2_AGU_AP2]
st r13, [sp, ___callee_saved_stack_t_agu_ap2_OFFSET]
lr r13, [_ARC_V2_AGU_AP3]
st r13, [sp, ___callee_saved_stack_t_agu_ap3_OFFSET]
lr r13, [_ARC_V2_AGU_OS0]
st r13, [sp, ___callee_saved_stack_t_agu_os0_OFFSET]
lr r13, [_ARC_V2_AGU_OS1]
st r13, [sp, ___callee_saved_stack_t_agu_os1_OFFSET]
lr r13, [_ARC_V2_AGU_MOD0]
st r13, [sp, ___callee_saved_stack_t_agu_mod0_OFFSET]
lr r13, [_ARC_V2_AGU_MOD1]
st r13, [sp, ___callee_saved_stack_t_agu_mod1_OFFSET]
lr r13, [_ARC_V2_AGU_MOD2]
st r13, [sp, ___callee_saved_stack_t_agu_mod2_OFFSET]
lr r13, [_ARC_V2_AGU_MOD3]
st r13, [sp, ___callee_saved_stack_t_agu_mod3_OFFSET]
#ifdef CONFIG_ARC_AGU_MEDIUM
lr r13, [_ARC_V2_AGU_AP4]
st r13, [sp, ___callee_saved_stack_t_agu_ap4_OFFSET]
lr r13, [_ARC_V2_AGU_AP5]
st r13, [sp, ___callee_saved_stack_t_agu_ap5_OFFSET]
lr r13, [_ARC_V2_AGU_AP6]
st r13, [sp, ___callee_saved_stack_t_agu_ap6_OFFSET]
lr r13, [_ARC_V2_AGU_AP7]
st r13, [sp, ___callee_saved_stack_t_agu_ap7_OFFSET]
lr r13, [_ARC_V2_AGU_OS2]
st r13, [sp, ___callee_saved_stack_t_agu_os2_OFFSET]
lr r13, [_ARC_V2_AGU_OS3]
st r13, [sp, ___callee_saved_stack_t_agu_os3_OFFSET]
lr r13, [_ARC_V2_AGU_MOD4]
st r13, [sp, ___callee_saved_stack_t_agu_mod4_OFFSET]
lr r13, [_ARC_V2_AGU_MOD5]
st r13, [sp, ___callee_saved_stack_t_agu_mod5_OFFSET]
lr r13, [_ARC_V2_AGU_MOD6]
st r13, [sp, ___callee_saved_stack_t_agu_mod6_OFFSET]
lr r13, [_ARC_V2_AGU_MOD7]
st r13, [sp, ___callee_saved_stack_t_agu_mod7_OFFSET]
lr r13, [_ARC_V2_AGU_MOD8]
st r13, [sp, ___callee_saved_stack_t_agu_mod8_OFFSET]
lr r13, [_ARC_V2_AGU_MOD9]
st r13, [sp, ___callee_saved_stack_t_agu_mod9_OFFSET]
lr r13, [_ARC_V2_AGU_MOD10]
st r13, [sp, ___callee_saved_stack_t_agu_mod10_OFFSET]
lr r13, [_ARC_V2_AGU_MOD11]
st r13, [sp, ___callee_saved_stack_t_agu_mod11_OFFSET]
#endif
#ifdef CONFIG_ARC_AGU_LARGE
lr r13, [_ARC_V2_AGU_AP8]
st r13, [sp, ___callee_saved_stack_t_agu_ap8_OFFSET]
lr r13, [_ARC_V2_AGU_AP9]
st r13, [sp, ___callee_saved_stack_t_agu_ap9_OFFSET]
lr r13, [_ARC_V2_AGU_AP10]
st r13, [sp, ___callee_saved_stack_t_agu_ap10_OFFSET]
lr r13, [_ARC_V2_AGU_AP11]
st r13, [sp, ___callee_saved_stack_t_agu_ap11_OFFSET]
lr r13, [_ARC_V2_AGU_OS4]
st r13, [sp, ___callee_saved_stack_t_agu_os4_OFFSET]
lr r13, [_ARC_V2_AGU_OS5]
st r13, [sp, ___callee_saved_stack_t_agu_os5_OFFSET]
lr r13, [_ARC_V2_AGU_OS6]
st r13, [sp, ___callee_saved_stack_t_agu_os6_OFFSET]
lr r13, [_ARC_V2_AGU_OS7]
st r13, [sp, ___callee_saved_stack_t_agu_os7_OFFSET]
lr r13, [_ARC_V2_AGU_MOD12]
st r13, [sp, ___callee_saved_stack_t_agu_mod12_OFFSET]
lr r13, [_ARC_V2_AGU_MOD13]
st r13, [sp, ___callee_saved_stack_t_agu_mod13_OFFSET]
lr r13, [_ARC_V2_AGU_MOD14]
st r13, [sp, ___callee_saved_stack_t_agu_mod14_OFFSET]
lr r13, [_ARC_V2_AGU_MOD15]
st r13, [sp, ___callee_saved_stack_t_agu_mod15_OFFSET]
lr r13, [_ARC_V2_AGU_MOD16]
st r13, [sp, ___callee_saved_stack_t_agu_mod16_OFFSET]
lr r13, [_ARC_V2_AGU_MOD17]
st r13, [sp, ___callee_saved_stack_t_agu_mod17_OFFSET]
lr r13, [_ARC_V2_AGU_MOD18]
st r13, [sp, ___callee_saved_stack_t_agu_mod18_OFFSET]
lr r13, [_ARC_V2_AGU_MOD19]
st r13, [sp, ___callee_saved_stack_t_agu_mod19_OFFSET]
lr r13, [_ARC_V2_AGU_MOD20]
st r13, [sp, ___callee_saved_stack_t_agu_mod20_OFFSET]
lr r13, [_ARC_V2_AGU_MOD21]
_st32_huge_offset r13, sp, ___callee_saved_stack_t_agu_mod21_OFFSET, r1
lr r13, [_ARC_V2_AGU_MOD22]
_st32_huge_offset r13, sp, ___callee_saved_stack_t_agu_mod22_OFFSET, r1
lr r13, [_ARC_V2_AGU_MOD23]
_st32_huge_offset r13, sp, ___callee_saved_stack_t_agu_mod23_OFFSET, r1
#endif
#endif
agu_skip_save :
.endm
.macro _load_dsp_regs
#ifdef CONFIG_DSP_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
bbit0 r13, K_DSP_IDX, dsp_skip_load
ld_s r13, [sp, ___callee_saved_stack_t_dsp_ctrl_OFFSET]
sr r13, [_ARC_V2_DSP_CTRL]
ld_s r13, [sp, ___callee_saved_stack_t_acc0_glo_OFFSET]
sr r13, [_ARC_V2_ACC0_GLO]
ld_s r13, [sp, ___callee_saved_stack_t_acc0_ghi_OFFSET]
sr r13, [_ARC_V2_ACC0_GHI]
#ifdef CONFIG_ARC_DSP_BFLY_SHARING
ld_s r13, [sp, ___callee_saved_stack_t_dsp_bfly0_OFFSET]
sr r13, [_ARC_V2_DSP_BFLY0]
ld_s r13, [sp, ___callee_saved_stack_t_dsp_fft_ctrl_OFFSET]
sr r13, [_ARC_V2_DSP_FFT_CTRL]
#endif
#endif
dsp_skip_load :
#ifdef CONFIG_ARC_AGU_SHARING
_load_agu_regs
#endif
.endm
.macro _load_agu_regs
#ifdef CONFIG_ARC_AGU_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
btst r13, K_AGU_IDX
jeq agu_skip_load
ld r13, [sp, ___callee_saved_stack_t_agu_ap0_OFFSET]
sr r13, [_ARC_V2_AGU_AP0]
ld r13, [sp, ___callee_saved_stack_t_agu_ap1_OFFSET]
sr r13, [_ARC_V2_AGU_AP1]
ld r13, [sp, ___callee_saved_stack_t_agu_ap2_OFFSET]
sr r13, [_ARC_V2_AGU_AP2]
ld r13, [sp, ___callee_saved_stack_t_agu_ap3_OFFSET]
sr r13, [_ARC_V2_AGU_AP3]
ld r13, [sp, ___callee_saved_stack_t_agu_os0_OFFSET]
sr r13, [_ARC_V2_AGU_OS0]
ld r13, [sp, ___callee_saved_stack_t_agu_os1_OFFSET]
sr r13, [_ARC_V2_AGU_OS1]
ld r13, [sp, ___callee_saved_stack_t_agu_mod0_OFFSET]
sr r13, [_ARC_V2_AGU_MOD0]
ld r13, [sp, ___callee_saved_stack_t_agu_mod1_OFFSET]
sr r13, [_ARC_V2_AGU_MOD1]
ld r13, [sp, ___callee_saved_stack_t_agu_mod2_OFFSET]
sr r13, [_ARC_V2_AGU_MOD2]
ld r13, [sp, ___callee_saved_stack_t_agu_mod3_OFFSET]
sr r13, [_ARC_V2_AGU_MOD3]
#ifdef CONFIG_ARC_AGU_MEDIUM
ld r13, [sp, ___callee_saved_stack_t_agu_ap4_OFFSET]
sr r13, [_ARC_V2_AGU_AP4]
ld r13, [sp, ___callee_saved_stack_t_agu_ap5_OFFSET]
sr r13, [_ARC_V2_AGU_AP5]
ld r13, [sp, ___callee_saved_stack_t_agu_ap6_OFFSET]
sr r13, [_ARC_V2_AGU_AP6]
ld r13, [sp, ___callee_saved_stack_t_agu_ap7_OFFSET]
sr r13, [_ARC_V2_AGU_AP7]
ld r13, [sp, ___callee_saved_stack_t_agu_os2_OFFSET]
sr r13, [_ARC_V2_AGU_OS2]
ld r13, [sp, ___callee_saved_stack_t_agu_os3_OFFSET]
sr r13, [_ARC_V2_AGU_OS3]
ld r13, [sp, ___callee_saved_stack_t_agu_mod4_OFFSET]
sr r13, [_ARC_V2_AGU_MOD4]
ld r13, [sp, ___callee_saved_stack_t_agu_mod5_OFFSET]
sr r13, [_ARC_V2_AGU_MOD5]
ld r13, [sp, ___callee_saved_stack_t_agu_mod6_OFFSET]
sr r13, [_ARC_V2_AGU_MOD6]
ld r13, [sp, ___callee_saved_stack_t_agu_mod7_OFFSET]
sr r13, [_ARC_V2_AGU_MOD7]
ld r13, [sp, ___callee_saved_stack_t_agu_mod8_OFFSET]
sr r13, [_ARC_V2_AGU_MOD8]
ld r13, [sp, ___callee_saved_stack_t_agu_mod9_OFFSET]
sr r13, [_ARC_V2_AGU_MOD9]
ld r13, [sp, ___callee_saved_stack_t_agu_mod10_OFFSET]
sr r13, [_ARC_V2_AGU_MOD10]
ld r13, [sp, ___callee_saved_stack_t_agu_mod11_OFFSET]
sr r13, [_ARC_V2_AGU_MOD11]
#endif
#ifdef CONFIG_ARC_AGU_LARGE
ld r13, [sp, ___callee_saved_stack_t_agu_ap8_OFFSET]
sr r13, [_ARC_V2_AGU_AP8]
ld r13, [sp, ___callee_saved_stack_t_agu_ap9_OFFSET]
sr r13, [_ARC_V2_AGU_AP9]
ld r13, [sp, ___callee_saved_stack_t_agu_ap10_OFFSET]
sr r13, [_ARC_V2_AGU_AP10]
ld r13, [sp, ___callee_saved_stack_t_agu_ap11_OFFSET]
sr r13, [_ARC_V2_AGU_AP11]
ld r13, [sp, ___callee_saved_stack_t_agu_os4_OFFSET]
sr r13, [_ARC_V2_AGU_OS4]
ld r13, [sp, ___callee_saved_stack_t_agu_os5_OFFSET]
sr r13, [_ARC_V2_AGU_OS5]
ld r13, [sp, ___callee_saved_stack_t_agu_os6_OFFSET]
sr r13, [_ARC_V2_AGU_OS6]
ld r13, [sp, ___callee_saved_stack_t_agu_os7_OFFSET]
sr r13, [_ARC_V2_AGU_OS7]
ld r13, [sp, ___callee_saved_stack_t_agu_mod12_OFFSET]
sr r13, [_ARC_V2_AGU_MOD12]
ld r13, [sp, ___callee_saved_stack_t_agu_mod13_OFFSET]
sr r13, [_ARC_V2_AGU_MOD13]
ld r13, [sp, ___callee_saved_stack_t_agu_mod14_OFFSET]
sr r13, [_ARC_V2_AGU_MOD14]
ld r13, [sp, ___callee_saved_stack_t_agu_mod15_OFFSET]
sr r13, [_ARC_V2_AGU_MOD15]
ld r13, [sp, ___callee_saved_stack_t_agu_mod16_OFFSET]
sr r13, [_ARC_V2_AGU_MOD16]
ld r13, [sp, ___callee_saved_stack_t_agu_mod17_OFFSET]
sr r13, [_ARC_V2_AGU_MOD17]
ld r13, [sp, ___callee_saved_stack_t_agu_mod18_OFFSET]
sr r13, [_ARC_V2_AGU_MOD18]
ld r13, [sp, ___callee_saved_stack_t_agu_mod19_OFFSET]
sr r13, [_ARC_V2_AGU_MOD19]
ld r13, [sp, ___callee_saved_stack_t_agu_mod20_OFFSET]
sr r13, [_ARC_V2_AGU_MOD20]
ld r13, [sp, ___callee_saved_stack_t_agu_mod21_OFFSET]
sr r13, [_ARC_V2_AGU_MOD21]
ld r13, [sp, ___callee_saved_stack_t_agu_mod22_OFFSET]
sr r13, [_ARC_V2_AGU_MOD22]
ld r13, [sp, ___callee_saved_stack_t_agu_mod23_OFFSET]
sr r13, [_ARC_V2_AGU_MOD23]
#endif
#endif
agu_skip_load :
.endm
.macro _dsp_extension_probe
#ifdef CONFIG_ARC_DSP_TURNED_OFF
mov r0, 0 /* DSP_CTRL_DISABLED_ALL */
sr r0, [_ARC_V2_DSP_CTRL]
#endif
.endm
``` | /content/code_sandbox/arch/arc/core/dsp/swap_dsp_macros.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,623 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V6_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V6_INTERNAL_H_
#define AUX_MPU_EN_BANK_MASK BIT(0)
#define AUX_MPU_EN_IC BIT(12)
#define AUX_MPU_EN_DC BIT(13)
#define AUX_MPU_EN_ENABLE BIT(30)
#define AUX_MPU_EN_DISABLE ~BIT(30)
/*
* The size of the region is a 5-bit field, the three MSB bits are
* represented in [11:9] and the two LSB bits are represented in [1:0].
* Together these fields specify the size of the region in bytes:
* 00000-00011 Reserved
* 0x4 32 0x5 64 0x6 128 0x7 256
* 0x8 512 0x9 1k 0xA 2K 0xB 4K
* 0xC 8K 0xD 16K 0xE 32K 0xF 64K
* 0x10 128K 0x11 256K 0x12 512K 0x13 1M
* 0x14 2M 0x15 4M 0x16 8M 0x17 16M
* 0x18 32M 0x19 64M 0x1A 128M 0x1B 256M
* 0x1C 512M 0x1D 1G 0x1E 2G 0x1F 4G
*
* Bit ... 12 11 10 9 8 3 2 1 0
* ------+------------+------+---+-----------+
* ... | SIZE[11:9] | ATTR | R | SIZE[1:0] |
* ------+------------+------+---+-----------+
*/
/* arrange size into proper bit field in RDP aux reg*/
#define AUX_MPU_RDP_REGION_SIZE(size) (((size - 1) & BIT_MASK(2)) | \
(((size - 1) & (BIT_MASK(3) << 2)) << 7))
/* recover size from bit fields in RDP aux reg*/
#define AUX_MPU_RDP_SIZE_SHIFT(rdp) ((rdp & BIT_MASK(2)) | (((rdp >> 9) & BIT_MASK(3)) << 2))
#define AUX_MPU_RDB_VALID_MASK BIT(0)
#define AUX_MPU_RDP_ATTR_MASK (BIT_MASK(6) << 3)
#define AUX_MPU_RDP_SIZE_MASK ((BIT_MASK(3) << 9) | BIT_MASK(2))
/* Global code cacheability that applies to a region
* 0x0: (Default) Code is cacheable in all levels of the cache hierarchy
* 0x1: Code is not cacheable in any level of the cache hierarchy
*/
#define AUX_MPU_RDB_IC BIT(12)
/* Global data cacheability that applies to a region
* 0x0: (Default) Data is cacheable in all levels of the cache hierarchy
* 0x1: Data is not cacheable in any level of the cache hierarchy
*/
#define AUX_MPU_RDB_DC BIT(13)
/* Define a MPU region as non-volatile
* 0x0: (Default) The memory space for this MPU region is treated as a volatile uncached space.
* 0x1: The memory space for this MPU region is non-volatile
*/
#define AUX_MPU_RDB_NV BIT(14)
/* For MPU version 6, the minimum protection region size is 32 bytes */
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
#define ARC_FEATURE_MPU_BANK_SIZE 16
/**
* This internal function select a MPU bank
*/
static inline void _bank_select(uint32_t bank)
{
uint32_t val;
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & (~AUX_MPU_EN_BANK_MASK);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, val | bank);
}
/**
* This internal function initializes a MPU region
*/
static inline void _region_init(uint32_t index, uint32_t region_addr,
uint32_t size, uint32_t region_attr)
{
uint32_t bank = index / ARC_FEATURE_MPU_BANK_SIZE;
index = (index % ARC_FEATURE_MPU_BANK_SIZE) * 2U;
if (size > 0) {
uint8_t bits = find_msb_set(size) - 1;
if (bits < ARC_FEATURE_MPU_ALIGNMENT_BITS) {
bits = ARC_FEATURE_MPU_ALIGNMENT_BITS;
}
if (BIT(bits) < size) {
bits++;
}
/* Clear size bits and IC, DC bits, and set NV bit
* The default value of NV bit is 0 which means the region is volatile and uncached.
* Setting the NV bit here has no effect on mpu v6 but is for the
* forward compatibility to mpu v7. Currently we do not allow to toggle these bits
* until we implement the control of these region properties
* TODO: support uncacheable regions and volatile uncached regions
*/
region_attr &= ~(AUX_MPU_RDP_SIZE_MASK | AUX_MPU_RDB_IC | AUX_MPU_RDB_DC);
region_attr |= AUX_MPU_RDP_REGION_SIZE(bits) | AUX_MPU_RDB_NV;
region_addr |= AUX_MPU_RDB_VALID_MASK;
} else {
region_addr = 0U;
}
_bank_select(bank);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline int get_region_index_by_type(uint32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v6, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
*/
switch (type) {
case THREAD_STACK_USER_REGION:
return get_num_regions() - mpu_config.num_regions - THREAD_STACK_REGION;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_DOMAIN_PARTITION_REGION:
/*
* Start domain partition region from stack guard region
* since stack guard is not supported.
*/
return get_num_regions() - mpu_config.num_regions - type + 1;
default:
__ASSERT(0, "Unsupported type");
return -EINVAL;
}
}
/**
* This internal function checks if region is enabled or not
*/
static inline bool _is_enabled_region(uint32_t r_index)
{
uint32_t bank = r_index / ARC_FEATURE_MPU_BANK_SIZE;
uint32_t index = (r_index % ARC_FEATURE_MPU_BANK_SIZE) * 2U;
_bank_select(bank);
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + index)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
}
/**
* This internal function check if the given buffer is in the region
*/
static inline bool _is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
uint32_t r_addr_start;
uint32_t r_addr_end;
uint32_t r_size_lshift;
uint32_t bank = r_index / ARC_FEATURE_MPU_BANK_SIZE;
uint32_t index = (r_index % ARC_FEATURE_MPU_BANK_SIZE) * 2U;
_bank_select(bank);
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + index) & (~AUX_MPU_RDB_VALID_MASK);
r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + index) & AUX_MPU_RDP_SIZE_MASK;
r_size_lshift = AUX_MPU_RDP_SIZE_SHIFT(r_size_lshift);
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
if (start >= r_addr_start && (start + size) <= r_addr_end) {
return true;
}
return false;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{
uint32_t r_ap;
uint32_t bank = r_index / ARC_FEATURE_MPU_BANK_SIZE;
uint32_t index = (r_index % ARC_FEATURE_MPU_BANK_SIZE) * 2U;
_bank_select(bank);
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + index);
r_ap &= AUX_MPU_RDP_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V6_INTERNAL_H_ */
``` | /content/code_sandbox/arch/arc/core/mpu/arc_mpu_v6_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,133 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_
#define AUX_MPU_RPER_SID1 0x10000
/* valid mask: SID1+secure+valid */
#define AUX_MPU_RPER_VALID_MASK ((0x1) | AUX_MPU_RPER_SID1 | AUX_MPU_ATTR_S)
#define AUX_MPU_RPER_ATTR_MASK (0x1FF)
/* For MPU version 4, the minimum protection region size is 32 bytes */
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
#define CALC_REGION_END_ADDR(start, size) \
(start + size - (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS))
/* ARC MPU version 4 does not support mpu region overlap in hardware
* so if we want to allocate MPU region dynamically, e.g. thread stack,
* memory domain from a background region, a dynamic region splitting
* approach is designed. pls see comments in
* _dynamic_region_allocate_and_init
* But this approach has an impact on performance of thread switch.
* As a trade off, we can use the default mpu region as the background region
* to avoid the dynamic region splitting. This will give more privilege to
* codes in kernel mode which can access the memory region not covered by
* explicit mpu entry. Considering memory protection is mainly used to
* isolate malicious codes in user mode, it makes sense to get better
* thread switch performance through default mpu region.
* CONFIG_MPU_GAP_FILLING is used to turn this on/off.
*
*/
#if defined(CONFIG_MPU_GAP_FILLING)
#if defined(CONFIG_USERSPACE) && defined(CONFIG_MPU_STACK_GUARD)
/* 1 for stack guard , 1 for user thread, 1 for split */
#define MPU_REGION_NUM_FOR_THREAD 3
#elif defined(CONFIG_USERSPACE) || defined(CONFIG_MPU_STACK_GUARD)
/* 1 for stack guard or user thread stack , 1 for split */
#define MPU_REGION_NUM_FOR_THREAD 2
#else
#define MPU_REGION_NUM_FOR_THREAD 0
#endif
#define MPU_DYNAMIC_REGION_AREAS_NUM 2
/**
* @brief internal structure holding information of
* memory areas where dynamic MPU programming is allowed.
*/
struct dynamic_region_info {
uint8_t index;
uint32_t base;
uint32_t size;
uint32_t attr;
};
static uint8_t dynamic_regions_num;
static uint8_t dynamic_region_index;
/**
* Global array, holding the MPU region index of
* the memory region inside which dynamic memory
* regions may be configured.
*/
static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
#endif /* CONFIG_MPU_GAP_FILLING */
static uint8_t static_regions_num;
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* \todo through secure service to access mpu */
static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
uint32_t region_attr)
{
}
static inline void _region_set_attr(uint32_t index, uint32_t attr)
{
}
static inline uint32_t _region_get_attr(uint32_t index)
{
return 0;
}
static inline uint32_t _region_get_start(uint32_t index)
{
return 0;
}
static inline void _region_set_start(uint32_t index, uint32_t start)
{
}
static inline uint32_t _region_get_end(uint32_t index)
{
return 0;
}
static inline void _region_set_end(uint32_t index, uint32_t end)
{
}
/**
* This internal function probes the given addr's MPU index.if not
* in MPU, returns error
*/
static inline int _mpu_probe(uint32_t addr)
{
return -EINVAL;
}
/**
* This internal function checks if MPU region is enabled or not
*/
static inline bool _is_enabled_region(uint32_t r_index)
{
return false;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{
return false;
}
#else /* CONFIG_ARC_NORMAL_FIRMWARE */
/* the following functions are prepared for SECURE_FIRMWARE */
static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
uint32_t region_attr)
{
if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) {
size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
if (region_attr) {
region_attr &= AUX_MPU_RPER_ATTR_MASK;
region_attr |= AUX_MPU_RPER_VALID_MASK;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
CALC_REGION_END_ADDR(region_addr, size));
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
}
static inline void _region_set_attr(uint32_t index, uint32_t attr)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, attr |
AUX_MPU_RPER_VALID_MASK);
}
static inline uint32_t _region_get_attr(uint32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
}
static inline uint32_t _region_get_start(uint32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RSTART);
}
static inline void _region_set_start(uint32_t index, uint32_t start)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, start);
}
static inline uint32_t _region_get_end(uint32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_REND) +
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
static inline void _region_set_end(uint32_t index, uint32_t end)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND, end -
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS));
}
/**
* This internal function probes the given addr's MPU index.if not
* in MPU, returns error
*/
static inline int _mpu_probe(uint32_t addr)
{
uint32_t val;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
/* if no match or multiple regions match, return error */
if (val & 0xC0000000) {
return -EINVAL;
} else {
return val;
}
}
/**
* This internal function checks if MPU region is enabled or not
*/
static inline bool _is_enabled_region(uint32_t r_index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
AUX_MPU_RPER_VALID_MASK) == AUX_MPU_RPER_VALID_MASK);
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{
uint32_t r_ap;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
r_ap &= AUX_MPU_RPER_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
#endif /* CONFIG_ARC_NORMAL_FIRMWARE */
/**
* This internal function checks the area given by (start, size)
* and returns the index if the area match one MPU entry
*/
static inline int _get_region_index(uint32_t start, uint32_t size)
{
int index = _mpu_probe(start);
if (index > 0 && index == _mpu_probe(start + size - 1)) {
return index;
}
return -EINVAL;
}
#if defined(CONFIG_MPU_GAP_FILLING)
/**
* This internal function allocates a dynamic MPU region and returns
* the index or error
*/
static inline int _dynamic_region_allocate_index(void)
{
if (dynamic_region_index >= get_num_regions()) {
LOG_ERR("no enough mpu entries %d", dynamic_region_index);
return -EINVAL;
}
return dynamic_region_index++;
}
/* @brief allocate and init a dynamic MPU region
*
* This internal function performs the allocation and initialization of
* a dynamic MPU region
*
* @param base region base
* @param size region size
* @param attr region attribute
* @return <0 failure, >0 allocated dynamic region index
*/
static int _dynamic_region_allocate_and_init(uint32_t base, uint32_t size,
uint32_t attr)
{
int u_region_index = _get_region_index(base, size);
int region_index;
LOG_DBG("Region info: base 0x%x size 0x%x attr 0x%x", base, size, attr);
if (u_region_index == -EINVAL) {
/* no underlying region */
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
/* a new region */
_region_init(region_index, base, size, attr);
}
return region_index;
}
/*
* The new memory region is to be placed inside the underlying
* region, possibly splitting the underlying region into two.
*/
uint32_t u_region_start = _region_get_start(u_region_index);
uint32_t u_region_end = _region_get_end(u_region_index);
uint32_t u_region_attr = _region_get_attr(u_region_index);
uint32_t end = base + size;
if ((base == u_region_start) && (end == u_region_end)) {
/* The new region overlaps entirely with the
* underlying region. In this case we simply
* update the partition attributes of the
* underlying region with those of the new
* region.
*/
_region_init(u_region_index, base, size, attr);
region_index = u_region_index;
} else if (base == u_region_start) {
/* The new region starts exactly at the start of the
* underlying region; the start of the underlying
* region needs to be set to the end of the new region.
*/
_region_set_start(u_region_index, base + size);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
}
} else if (end == u_region_end) {
/* The new region ends exactly at the end of the
* underlying region; the end of the underlying
* region needs to be set to the start of the
* new region.
*/
_region_set_end(u_region_index, base);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
}
} else {
/* The new region lies strictly inside the
* underlying region, which needs to split
* into two regions.
*/
_region_set_end(u_region_index, base);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base + size,
u_region_end - end, u_region_attr);
}
}
}
return region_index;
}
/* @brief reset the dynamic MPU regions
*
* This internal function performs the reset of dynamic MPU regions
*/
static void _mpu_reset_dynamic_regions(void)
{
uint32_t i;
uint32_t num_regions = get_num_regions();
for (i = static_regions_num; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
for (i = 0U; i < dynamic_regions_num; i++) {
_region_init(
dyn_reg_info[i].index,
dyn_reg_info[i].base,
dyn_reg_info[i].size,
dyn_reg_info[i].attr);
}
/* dynamic regions are after static regions */
dynamic_region_index = static_regions_num;
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
{
uint32_t region_attr = get_region_attr_by_type(type);
return _dynamic_region_allocate_and_init(base, size, region_attr);
}
#else
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline int get_region_index_by_type(uint32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v2, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
*/
switch (type) {
case THREAD_STACK_USER_REGION:
return static_regions_num + THREAD_STACK_REGION;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_STACK_GUARD_REGION:
return static_regions_num + type;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return static_regions_num + type;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return static_regions_num + type - 1;
#endif
default:
__ASSERT(0, "Unsupported type");
return -EINVAL;
}
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
{
int region_index = get_region_index_by_type(type);
uint32_t region_attr = get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size);
if (region_attr == 0U || region_index < 0) {
return -EINVAL;
}
_region_init(region_index, base, size, region_attr);
return 0;
}
#endif
/* ARC Core MPU Driver API Implementation for ARC MPUv3 */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* the default region:
* secure:0x8000, SID:0x10000, KW:0x100 KR:0x80
*/
#define MPU_ENABLE_ATTR 0x18180
#else
#define MPU_ENABLE_ATTR 0
#endif
arc_core_mpu_default(MPU_ENABLE_ATTR);
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
/* MPU is always enabled, use default region to
* simulate MPU disable
*/
arc_core_mpu_default(REGION_ALL_ATTR | AUX_MPU_ATTR_S |
AUX_MPU_RPER_SID1);
}
/**
* @brief configure the thread's mpu regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread)
{
#if defined(CONFIG_MPU_GAP_FILLING)
/* the mpu entries of ARC MPUv4 are divided into 2 parts:
* static entries: global mpu entries, not changed in context switch
* dynamic entries: MPU entries changed in context switch and
* memory domain configure, including:
* MPU entries for user thread stack
* MPU entries for stack guard
* MPU entries for mem domain
* MPU entries for other thread specific regions
* before configuring thread specific mpu entries, need to reset dynamic
* entries
*/
_mpu_reset_dynamic_regions();
#endif
#if defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_start;
/* Set location of guard area when the thread is running in
* supervisor mode. For a supervisor thread, this is just low
* memory in the stack buffer. For a user thread, it only runs
* in supervisor mode when handling a system call on the privilege
* elevation stack.
*/
#if defined(CONFIG_USERSPACE)
if ((thread->base.user_options & K_USER) != 0U) {
guard_start = thread->arch.priv_stack_start;
} else
#endif
{
guard_start = thread->stack_info.start;
}
guard_start -= Z_ARC_STACK_GUARD_SIZE;
if (_mpu_configure(THREAD_STACK_GUARD_REGION, guard_start,
Z_ARC_STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
#endif /* CONFIG_MPU_STACK_GUARD */
#if defined(CONFIG_USERSPACE)
/* configure stack region of user thread */
if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION,
(uint32_t)thread->stack_info.start,
thread->stack_info.size) < 0) {
LOG_ERR("thread %p's stack failed", thread);
return;
}
}
#if defined(CONFIG_MPU_GAP_FILLING)
uint32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
/* configure thread's memory domain */
if (mem_domain) {
LOG_DBG("configure thread %p's domain: %p",
thread, mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
num_partitions = 0U;
pparts = NULL;
}
for (uint32_t i = 0; i < num_partitions; i++) {
if (pparts->size) {
if (_dynamic_region_allocate_and_init(pparts->start,
pparts->size, pparts->attr) < 0) {
LOG_ERR(
"thread %p's mem region: %p failed",
thread, pparts);
return;
}
}
pparts++;
}
#else
arc_core_mpu_configure_mem_domain(thread);
#endif
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(uint32_t region_attr)
{
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* \todo through secure service to access mpu */
#else
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr);
#endif
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param size region size
* @param region_attr region attribute
*/
int arc_core_mpu_region(uint32_t index, uint32_t base, uint32_t size,
uint32_t region_attr)
{
if (index >= get_num_regions()) {
return -EINVAL;
}
region_attr &= AUX_MPU_RPER_ATTR_MASK;
_region_init(index, base, size, region_attr);
return 0;
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
#if defined(CONFIG_MPU_GAP_FILLING)
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
arc_core_mpu_configure_thread(thread);
}
#else
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
uint32_t region_index;
uint32_t num_partitions;
uint32_t num_regions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL;
if (thread) {
mem_domain = thread->mem_domain_info.mem_domain;
}
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
num_regions = get_num_regions();
region_index = get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
while (num_partitions && region_index < num_regions) {
if (pparts->size > 0) {
LOG_DBG("set region 0x%x 0x%lx 0x%x",
region_index, pparts->start, pparts->size);
_region_init(region_index, pparts->start,
pparts->size, pparts->attr);
region_index++;
}
pparts++;
num_partitions--;
}
while (region_index < num_regions) {
/* clear the left mpu entries */
_region_init(region_index, 0, 0, 0);
region_index++;
}
}
#endif
/**
* @brief remove MPU regions for the memory partitions of the memory domain
*
* @param mem_domain the target memory domain
*/
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{
uint32_t num_partitions;
struct k_mem_partition *pparts;
int index;
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
for (uint32_t i = 0; i < num_partitions; i++) {
if (pparts->size) {
index = _get_region_index(pparts->start,
pparts->size);
if (index > 0) {
#if defined(CONFIG_MPU_GAP_FILLING)
_region_set_attr(index,
REGION_KERNEL_RAM_ATTR);
#else
_region_init(index, 0, 0, 0);
#endif
}
}
pparts++;
}
}
/**
* @brief reset MPU region for a single memory partition
*
* @param partition_id memory partition id
*/
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct k_mem_partition *partition = &domain->partitions[partition_id];
int region_index = _get_region_index(partition->start,
partition->size);
if (region_index < 0) {
return;
}
LOG_DBG("remove region 0x%x", region_index);
#if defined(CONFIG_MPU_GAP_FILLING)
_region_set_attr(region_index, REGION_KERNEL_RAM_ATTR);
#else
_region_init(region_index, 0, 0, 0);
#endif
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
#if defined(CONFIG_MPU_GAP_FILLING)
/* consider the worst case: each partition requires split */
return (get_num_regions() - MPU_REGION_NUM_FOR_THREAD) / 2;
#else
return get_num_regions() -
get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) - 1;
#endif
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
{
int r_index;
int key = arch_irq_lock();
/*
* For ARC MPU v4, overlapping is not supported.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*/
r_index = _mpu_probe((uint32_t)addr);
/* match and the area is in one region */
if (r_index >= 0 && r_index == _mpu_probe((uint32_t)addr + (size - 1))) {
if (_is_user_accessible_region(r_index, write)) {
r_index = 0;
} else {
r_index = -EPERM;
}
} else {
r_index = -EPERM;
}
arch_irq_unlock(key);
return r_index;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default initialization and configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static int arc_mpu_init(void)
{
uint32_t num_regions;
uint32_t i;
num_regions = get_num_regions();
/* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) {
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
static_regions_num = 0U;
/* Disable MPU */
arc_core_mpu_disable();
for (i = 0U; i < mpu_config.num_regions; i++) {
/* skip empty region */
if (mpu_config.mpu_regions[i].size == 0) {
continue;
}
#if defined(CONFIG_MPU_GAP_FILLING)
_region_init(static_regions_num,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
/* record the static region which can be split */
if (mpu_config.mpu_regions[i].attr & REGION_DYNAMIC) {
if (dynamic_regions_num >=
MPU_DYNAMIC_REGION_AREAS_NUM) {
LOG_ERR("not enough dynamic regions %d",
dynamic_regions_num);
return -EINVAL;
}
dyn_reg_info[dynamic_regions_num].index = i;
dyn_reg_info[dynamic_regions_num].base =
mpu_config.mpu_regions[i].base;
dyn_reg_info[dynamic_regions_num].size =
mpu_config.mpu_regions[i].size;
dyn_reg_info[dynamic_regions_num].attr =
mpu_config.mpu_regions[i].attr;
dynamic_regions_num++;
}
static_regions_num++;
#else
/* dynamic region will be covered by default mpu setting
* no need to configure
*/
if (!(mpu_config.mpu_regions[i].attr & REGION_DYNAMIC)) {
_region_init(static_regions_num,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
static_regions_num++;
}
#endif
}
for (i = static_regions_num; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_ */
``` | /content/code_sandbox/arch/arc/core/mpu/arc_mpu_v4_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,140 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_
#define AUX_MPU_EN_ENABLE BIT(30)
#define AUX_MPU_EN_DISABLE ~BIT(30)
/*
* The size of the region is a 5-bit field, the three MSB bits are
* represented in [11:9] and the two LSB bits are represented in [1:0].
* Together these fields specify the size of the region in bytes:
* 00000-00011 Reserved
* 0x4 32 0x5 64 0x6 128 0x7 256
* 0x8 512 0x9 1k 0xA 2K 0xB 4K
* 0xC 8K 0xD 16K 0xE 32K 0xF 64K
* 0x10 128K 0x11 256K 0x12 512K 0x13 1M
* 0x14 2M 0x15 4M 0x16 8M 0x17 16M
* 0x18 32M 0x19 64M 0x1A 128M 0x1B 256M
* 0x1C 512M 0x1D 1G 0x1E 2G 0x1F 4G
*
* Bit ... 12 11 10 9 8 3 2 1 0
* ------+------------+------+---+-----------+
* ... | SIZE[11:9] | ATTR | R | SIZE[1:0] |
* ------+------------+------+---+-----------+
*/
/* arrange size into proper bit field in RDP aux reg*/
#define AUX_MPU_RDP_REGION_SIZE(size) (((size - 1) & BIT_MASK(2)) | \
(((size - 1) & (BIT_MASK(3) << 2)) << 7))
/* recover size from bit fields in RDP aux reg*/
#define AUX_MPU_RDP_SIZE_SHIFT(rdp) ((rdp & BIT_MASK(2)) | (((rdp >> 9) & BIT_MASK(3)) << 2))
#define AUX_MPU_RDB_VALID_MASK BIT(0)
#define AUX_MPU_RDP_ATTR_MASK (BIT_MASK(6) << 3)
#define AUX_MPU_RDP_SIZE_MASK ((BIT_MASK(3) << 9) | BIT_MASK(2))
/* For MPU version 2, the minimum protection region size is 2048 bytes */
#if CONFIG_ARC_MPU_VER == 2
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 11
/* For MPU version 3, the minimum protection region size is 32 bytes */
#else
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
#endif
/**
* This internal function initializes a MPU region
*/
static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
uint32_t region_attr)
{
index = index * 2U;
if (size > 0) {
uint8_t bits = find_msb_set(size) - 1;
if (bits < ARC_FEATURE_MPU_ALIGNMENT_BITS) {
bits = ARC_FEATURE_MPU_ALIGNMENT_BITS;
}
if (BIT(bits) < size) {
bits++;
}
region_attr &= ~(AUX_MPU_RDP_SIZE_MASK);
region_attr |= AUX_MPU_RDP_REGION_SIZE(bits);
region_addr |= AUX_MPU_RDB_VALID_MASK;
} else {
region_addr = 0U;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline int get_region_index_by_type(uint32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v2, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
*/
switch (type) {
case THREAD_STACK_USER_REGION:
return get_num_regions() - mpu_config.num_regions - THREAD_STACK_REGION;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_DOMAIN_PARTITION_REGION:
/*
* Start domain partition region from stack guard region
* since stack guard is not supported.
*/
return get_num_regions() - mpu_config.num_regions - type + 1;
default:
__ASSERT(0, "Unsupported type");
return -EINVAL;
}
}
/**
* This internal function checks if region is enabled or not
*/
static inline bool _is_enabled_region(uint32_t r_index)
{
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
}
/**
* This internal function check if the given buffer is in the region
*/
static inline bool _is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
uint32_t r_addr_start;
uint32_t r_addr_end;
uint32_t r_size_lshift;
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U)
& (~AUX_MPU_RDB_VALID_MASK);
r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + r_index * 2U)
& AUX_MPU_RDP_SIZE_MASK;
r_size_lshift = AUX_MPU_RDP_SIZE_SHIFT(r_size_lshift);
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
if (start >= r_addr_start && (start + size) <= r_addr_end) {
return true;
}
return false;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{
uint32_t r_ap;
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + r_index * 2U);
r_ap &= AUX_MPU_RDP_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_ */
``` | /content/code_sandbox/arch/arc/core/mpu/arc_mpu_v2_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,602 |
```unknown
# Memory Protection Unit (MPU) configuration options
config ARC_MPU_VER
int "ARC MPU version"
range 2 8
default 2
help
ARC MPU has several versions. For MPU v2, the minimum region is 2048 bytes;
For other versions, the minimum region is 32 bytes; v4 has secure features,
v6 supports up to 32 regions. Note: MPU v5 & v7 are not supported.
config ARC_CORE_MPU
bool "ARC Core MPU functionalities"
help
ARC core MPU functionalities
config MPU_STACK_GUARD
bool "Thread Stack Guards"
depends on ARC_CORE_MPU && ARC_MPU_VER !=2
help
Enable thread stack guards via MPU. ARC supports built-in stack protection.
If your core supports that, it is preferred over MPU stack guard.
For ARC_MPU_VER == 2, it requires 2048 extra bytes and a strong start address
alignment, this will bring big waste of memory, so no support for it.
config ARC_MPU
bool "ARC MPU Support"
select MPU
select SRAM_REGION_PERMISSIONS
select ARC_CORE_MPU
select THREAD_STACK_INFO
select GEN_PRIV_STACKS if !(ARC_MPU_VER = 4 || ARC_MPU_VER = 8)
select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT if !(ARC_MPU_VER = 4 || ARC_MPU_VER = 8)
select MPU_REQUIRES_NON_OVERLAPPING_REGIONS if (ARC_MPU_VER = 4 || ARC_MPU_VER = 8)
select ARCH_MEM_DOMAIN_SUPPORTS_ISOLATED_STACKS
select MEM_DOMAIN_ISOLATED_STACKS
help
Target has ARC MPU
``` | /content/code_sandbox/arch/arc/core/mpu/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 356 |
```c
/*
*
*/
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
#include <zephyr/kernel_structs.h>
/*
* @brief Configure MPU for the thread
*
* This function configures per thread memory map reprogramming the MPU.
*
* @param thread thread info data structure.
*/
void configure_mpu_thread(struct k_thread *thread)
{
arc_core_mpu_disable();
arc_core_mpu_configure_thread(thread);
arc_core_mpu_enable();
}
#if defined(CONFIG_USERSPACE)
int arch_mem_domain_max_partitions_get(void)
{
return arc_core_mpu_get_max_domain_partition_regions();
}
/*
* Validate the given buffer is user accessible or not
*/
int arch_buffer_validate(const void *addr, size_t size, int write)
{
return arc_core_mpu_buffer_validate(addr, size, write);
}
#endif
``` | /content/code_sandbox/arch/arc/core/mpu/arc_core_mpu.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 203 |
```objective-c
/*
*
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute
* symbols" in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_FUNC_H_
#if !defined(_ASMLANGUAGE)
#include <kernel_arch_data.h>
#include <v2/irq.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void arch_kernel_init(void)
{
z_irq_setup();
}
/**
*
* @brief Indicates the interrupt number of the highest priority
* active interrupt
*
* @return IRQ number
*/
static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
{
uint32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
return irq_num;
}
static inline bool arch_is_in_isr(void)
{
return z_arc_v2_irq_unit_is_in_isr();
}
extern void z_thread_entry_wrapper(void);
extern void z_user_thread_entry_wrapper(void);
extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, uint32_t stack, uint32_t size,
struct k_thread *thread);
extern void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf);
extern void z_arc_switch(void *switch_to, void **switched_from);
static inline void arch_switch(void *switch_to, void **switched_from)
{
z_arc_switch(switch_to, switched_from);
}
#if !defined(CONFIG_MULTITHREADING)
extern FUNC_NORETURN void z_arc_switch_to_main_no_multithreading(
k_thread_entry_t main_func, void *p1, void *p2, void *p3);
#define ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING \
z_arc_switch_to_main_no_multithreading
#endif /* !CONFIG_MULTITHREADING */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_FUNC_H_ */
``` | /content/code_sandbox/arch/arc/include/kernel_arch_func.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 485 |
```objective-c
/*
*
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute
* symbols" in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <vector_table.h>
#ifndef _ASMLANGUAGE
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_ARC_HAS_SECURE
struct arch_esf {
#ifdef CONFIG_ARC_HAS_ZOL
uintptr_t lp_end;
uintptr_t lp_start;
uintptr_t lp_count;
#endif /* CONFIG_ARC_HAS_ZOL */
#ifdef CONFIG_CODE_DENSITY
/*
* Currently unsupported. This is where those registers are
* automatically pushed on the stack by the CPU when taking a regular
* IRQ.
*/
uintptr_t ei_base;
uintptr_t ldi_base;
uintptr_t jli_base;
#endif
uintptr_t r0;
uintptr_t r1;
uintptr_t r2;
uintptr_t r3;
uintptr_t r4;
uintptr_t r5;
uintptr_t r6;
uintptr_t r7;
uintptr_t r8;
uintptr_t r9;
uintptr_t r10;
uintptr_t r11;
uintptr_t r12;
uintptr_t r13;
uintptr_t blink;
uintptr_t pc;
uintptr_t sec_stat;
uintptr_t status32;
};
#else
struct arch_esf {
uintptr_t r0;
uintptr_t r1;
uintptr_t r2;
uintptr_t r3;
uintptr_t r4;
uintptr_t r5;
uintptr_t r6;
uintptr_t r7;
uintptr_t r8;
uintptr_t r9;
uintptr_t r10;
uintptr_t r11;
uintptr_t r12;
uintptr_t r13;
uintptr_t blink;
#ifdef CONFIG_ARC_HAS_ZOL
uintptr_t lp_end;
uintptr_t lp_start;
uintptr_t lp_count;
#endif /* CONFIG_ARC_HAS_ZOL */
#ifdef CONFIG_CODE_DENSITY
/*
* Currently unsupported. This is where those registers are
* automatically pushed on the stack by the CPU when taking a regular
* IRQ.
*/
uintptr_t ei_base;
uintptr_t ldi_base;
uintptr_t jli_base;
#endif
uintptr_t pc;
uintptr_t status32;
};
#endif
typedef struct arch_esf _isf_t;
/* callee-saved registers pushed on the stack, not in k_thread */
struct _callee_saved_stack {
uintptr_t r13;
uintptr_t r14;
uintptr_t r15;
uintptr_t r16;
uintptr_t r17;
uintptr_t r18;
uintptr_t r19;
uintptr_t r20;
uintptr_t r21;
uintptr_t r22;
uintptr_t r23;
uintptr_t r24;
uintptr_t r25;
uintptr_t r26;
uintptr_t fp; /* r27 */
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
uintptr_t user_sp;
uintptr_t kernel_sp;
#else
uintptr_t user_sp;
#endif
#endif
/* r28 is the stack pointer and saved separately */
/* r29 is ILINK and does not need to be saved */
uintptr_t r30;
#ifdef CONFIG_ARC_HAS_ACCL_REGS
uintptr_t r58;
#ifndef CONFIG_64BIT
uintptr_t r59;
#endif /* !CONFIG_64BIT */
#endif
#ifdef CONFIG_FPU_SHARING
uintptr_t fpu_status;
uintptr_t fpu_ctrl;
#ifdef CONFIG_FP_FPU_DA
uintptr_t dpfp2h;
uintptr_t dpfp2l;
uintptr_t dpfp1h;
uintptr_t dpfp1l;
#endif
#endif
#ifdef CONFIG_DSP_SHARING
#ifdef CONFIG_ARC_DSP_BFLY_SHARING
uintptr_t dsp_fft_ctrl;
uintptr_t dsp_bfly0;
#endif
uintptr_t acc0_ghi;
uintptr_t acc0_glo;
uintptr_t dsp_ctrl;
#endif
#ifdef CONFIG_ARC_AGU_SHARING
uintptr_t agu_ap0;
uintptr_t agu_ap1;
uintptr_t agu_ap2;
uintptr_t agu_ap3;
uintptr_t agu_os0;
uintptr_t agu_os1;
uintptr_t agu_mod0;
uintptr_t agu_mod1;
uintptr_t agu_mod2;
uintptr_t agu_mod3;
#ifdef CONFIG_ARC_AGU_MEDIUM
uintptr_t agu_ap4;
uintptr_t agu_ap5;
uintptr_t agu_ap6;
uintptr_t agu_ap7;
uintptr_t agu_os2;
uintptr_t agu_os3;
uintptr_t agu_mod4;
uintptr_t agu_mod5;
uintptr_t agu_mod6;
uintptr_t agu_mod7;
uintptr_t agu_mod8;
uintptr_t agu_mod9;
uintptr_t agu_mod10;
uintptr_t agu_mod11;
#endif
#ifdef CONFIG_ARC_AGU_LARGE
uintptr_t agu_ap8;
uintptr_t agu_ap9;
uintptr_t agu_ap10;
uintptr_t agu_ap11;
uintptr_t agu_os4;
uintptr_t agu_os5;
uintptr_t agu_os6;
uintptr_t agu_os7;
uintptr_t agu_mod12;
uintptr_t agu_mod13;
uintptr_t agu_mod14;
uintptr_t agu_mod15;
uintptr_t agu_mod16;
uintptr_t agu_mod17;
uintptr_t agu_mod18;
uintptr_t agu_mod19;
uintptr_t agu_mod20;
uintptr_t agu_mod21;
uintptr_t agu_mod22;
uintptr_t agu_mod23;
#endif
#endif
/*
* No need to save r31 (blink), it's either already pushed as the pc or
* blink on an irq stack frame.
*/
};
typedef struct _callee_saved_stack _callee_saved_stack_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_ */
``` | /content/code_sandbox/arch/arc/include/kernel_arch_data.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,445 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_COMMON_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_COMMON_INTERNAL_H_
#if CONFIG_ARC_MPU_VER == 2 || CONFIG_ARC_MPU_VER == 3
#include "arc_mpu_v2_internal.h"
#elif CONFIG_ARC_MPU_VER == 6
#include "arc_mpu_v6_internal.h"
#else
#error "Unsupported MPU version"
#endif
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
{
int32_t region_index = get_region_index_by_type(type);
uint32_t region_attr = get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size);
if (region_attr == 0U || region_index < 0) {
return -EINVAL;
}
/*
* For ARC MPU, MPU regions can be overlapped, smaller
* region index has higher priority.
*/
_region_init(region_index, base, size, region_attr);
return 0;
}
/* ARC Core MPU Driver API Implementation for ARC MP */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
/* Enable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
/* Disable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
}
/**
* @brief configure the thread's MPU regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread)
{
#if defined(CONFIG_USERSPACE)
/* configure stack region of user thread */
if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION,
(uint32_t)thread->stack_info.start,
thread->stack_info.size) < 0) {
LOG_ERR("user thread %p's stack failed", thread);
return;
}
}
LOG_DBG("configure thread %p's domain", thread);
arc_core_mpu_configure_mem_domain(thread);
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(uint32_t region_attr)
{
uint32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & (~AUX_MPU_RDP_ATTR_MASK);
region_attr &= AUX_MPU_RDP_ATTR_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param region_attr region attribute
*/
int arc_core_mpu_region(uint32_t index, uint32_t base, uint32_t size, uint32_t region_attr)
{
if (index >= get_num_regions()) {
return -EINVAL;
}
region_attr &= AUX_MPU_RDP_ATTR_MASK;
_region_init(index, base, size, region_attr);
return 0;
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
int region_index = get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
uint32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL;
if (thread) {
mem_domain = thread->mem_domain_info.mem_domain;
}
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
for (; region_index >= 0; region_index--) {
if (num_partitions) {
LOG_DBG("set region 0x%x 0x%lx 0x%x",
region_index, pparts->start, pparts->size);
_region_init(region_index, pparts->start, pparts->size, pparts->attr);
num_partitions--;
} else {
/* clear the left mpu entries */
_region_init(region_index, 0, 0, 0);
}
pparts++;
}
}
/**
* @brief remove MPU regions for the memory partitions of the memory domain
*
* @param mem_domain the target memory domain
*/
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{
ARG_UNUSED(mem_domain);
int region_index = get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
for (; region_index >= 0; region_index--) {
_region_init(region_index, 0, 0, 0);
}
}
/**
* @brief reset MPU region for a single memory partition
*
* @param domain the target memory domain
* @param partition_id memory partition id
*/
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain, uint32_t part_id)
{
ARG_UNUSED(domain);
int region_index = get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
LOG_DBG("disable region 0x%x", region_index + part_id);
/* Disable region */
_region_init(region_index + part_id, 0, 0, 0);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
return get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1;
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
{
/*
* For ARC MPU, smaller region number takes priority.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*
*/
for (int r_index = 0; r_index < get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) || !_is_in_region(r_index, (uint32_t)addr, size)) {
continue;
}
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default initialization and configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static int arc_mpu_init(void)
{
uint32_t num_regions = get_num_regions();
if (mpu_config.num_regions > num_regions) {
__ASSERT(0, "Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
/* Disable MPU */
arc_core_mpu_disable();
/*
* the MPU regions are filled in the reverse order.
* According to ARCv2 ISA, the MPU region with smaller
* index has higher priority. The static background MPU
* regions in mpu_config will be in the bottom. Then
* the special type regions will be above.
*/
int r_index = num_regions - mpu_config.num_regions;
/* clear all the regions first */
for (uint32_t i = 0U; i < r_index; i++) {
_region_init(i, 0, 0, 0);
}
/* configure the static regions */
for (uint32_t i = 0U; i < mpu_config.num_regions; i++) {
_region_init(r_index, mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size, mpu_config.mpu_regions[i].attr);
r_index++;
}
/* default region: no read, write and execute */
arc_core_mpu_default(0);
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_COMMON_INTERNAL_H_ */
``` | /content/code_sandbox/arch/arc/core/mpu/arc_mpu_common_internal.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,916 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_ERR_DUMP_HANDLING_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_ERR_DUMP_HANDLING_H_
#if defined CONFIG_LOG
#define ARC_EXCEPTION_DUMP(...) LOG_ERR(__VA_ARGS__)
#else
#define ARC_EXCEPTION_DUMP(format, ...) printk(format "\n", ##__VA_ARGS__)
#endif
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_ERR_DUMP_HANDLING_H_ */
``` | /content/code_sandbox/arch/arc/include/err_dump_handling.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 88 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_ARC_IRQ_OFFLOAD_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_ARC_IRQ_OFFLOAD_H_
#ifdef CONFIG_IRQ_OFFLOAD
int arc_irq_offload_init(const struct device *unused);
static inline void arc_irq_offload_init_smp(void)
{
arc_irq_offload_init(NULL);
}
#else
static inline void arc_irq_offload_init_smp(void) {}
#endif /* CONFIG_IRQ_OFFLOAD */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_ARC_IRQ_OFFLOAD_H_ */
``` | /content/code_sandbox/arch/arc/include/arc_irq_offload.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 116 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
#include <zephyr/offsets.h>
/* kernel */
/* nothing for now */
/* end - kernel */
/* threads */
#define _thread_offset_to_relinquish_cause \
(___thread_t_arch_OFFSET + ___thread_arch_t_relinquish_cause_OFFSET)
#define _thread_offset_to_k_stack_base \
(___thread_t_arch_OFFSET + ___thread_arch_t_k_stack_base_OFFSET)
#define _thread_offset_to_k_stack_top \
(___thread_t_arch_OFFSET + ___thread_arch_t_k_stack_top_OFFSET)
#define _thread_offset_to_u_stack_base \
(___thread_t_arch_OFFSET + ___thread_arch_t_u_stack_base_OFFSET)
#define _thread_offset_to_u_stack_top \
(___thread_t_arch_OFFSET + ___thread_arch_t_u_stack_top_OFFSET)
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
/* end - threads */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_ */
``` | /content/code_sandbox/arch/arc/include/offsets_short_arch.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 261 |
```objective-c
/*
*
*/
/**
* @file
* @brief Definitions for the exception vector table
*
*
* Definitions for the boot vector table.
*
* System exception handler names all have the same format:
*
* __<exception name with underscores>
*
* Refer to the ARCv2 manual for an explanation of the exceptions.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_VECTOR_TABLE_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_VECTOR_TABLE_H_
#define EXC_EV_TRAP 0x9
#ifdef _ASMLANGUAGE
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
GTEXT(__start)
GTEXT(_VectorTable)
GTEXT(__reset)
GTEXT(__memory_error)
GTEXT(__instruction_error)
GTEXT(__ev_machine_check)
GTEXT(__ev_tlb_miss_i)
GTEXT(__ev_tlb_miss_d)
GTEXT(__ev_prot_v)
GTEXT(__ev_privilege_v)
GTEXT(__ev_swi)
GTEXT(__ev_trap)
GTEXT(__ev_extension)
GTEXT(__ev_div_zero)
GTEXT(__ev_dc_error)
GTEXT(__ev_maligned)
GTEXT(z_prep_c)
GTEXT(_isr_wrapper)
#else
#ifdef __cplusplus
extern "C" {
#endif
extern void __reset(void);
extern void __memory_error(void);
extern void __instruction_error(void);
extern void __ev_machine_check(void);
extern void __ev_tlb_miss_i(void);
extern void __ev_tlb_miss_d(void);
extern void __ev_prot_v(void);
extern void __ev_privilege_v(void);
extern void __ev_swi(void);
extern void __ev_trap(void);
extern void __ev_extension(void);
extern void __ev_div_zero(void);
extern void __ev_dc_error(void);
extern void __ev_maligned(void);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_VECTOR_TABLE_H_ */
``` | /content/code_sandbox/arch/arc/include/vector_table.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 405 |
```objective-c
/*
*
*/
/**
* @file
* @brief Interrupt helper functions (ARC)
*
* This file contains private kernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_V2_IRQ_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_V2_IRQ_H_
#include <zephyr/arch/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
#define _ARC_V2_AUX_IRQ_CTRL_BLINK (1 << 9)
#define _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS (1 << 10)
#define _ARC_V2_AUX_IRQ_CTRL_U (1 << 11)
#define _ARC_V2_AUX_IRQ_CTRL_LP (1 << 13)
#define _ARC_V2_AUX_IRQ_CTRL_14_REGS 7
#define _ARC_V2_AUX_IRQ_CTRL_16_REGS 8
#define _ARC_V2_AUX_IRQ_CTRL_32_REGS 16
#ifdef CONFIG_ARC_SECURE_FIRMWARE
#define _ARC_V2_DEF_IRQ_LEVEL (ARC_N_IRQ_START_LEVEL - 1)
#else
#define _ARC_V2_DEF_IRQ_LEVEL (CONFIG_NUM_IRQ_PRIO_LEVELS - 1)
#endif
#define _ARC_V2_WAKE_IRQ_LEVEL _ARC_V2_DEF_IRQ_LEVEL
/*
* INIT_IRQ_LOCK_KEY is init interrupt level setting of a thread.
* It's configured by seti instruction when a thread starts to run
*, i.e., z_thread_entry_wrapper and z_user_thread_entry_wrapper
*/
#define _ARC_V2_INIT_IRQ_LOCK_KEY (0x10 | _ARC_V2_DEF_IRQ_LEVEL)
#ifndef _ASMLANGUAGE
/*
* z_irq_setup
*
* Configures interrupt handling parameters
*/
static ALWAYS_INLINE void z_irq_setup(void)
{
uint32_t aux_irq_ctrl_value = (
#ifdef CONFIG_ARC_HAS_ZOL
_ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */
#endif /* CONFIG_ARC_HAS_ZOL */
#ifdef CONFIG_CODE_DENSITY
_ARC_V2_AUX_IRQ_CTRL_LP | /* save code density registers */
#endif
_ARC_V2_AUX_IRQ_CTRL_BLINK | /* save blink */
_ARC_V2_AUX_IRQ_CTRL_14_REGS /* save r0 -> r13 (caller-saved) */
);
z_arc_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* normal mode cannot write irq_ctrl, ignore it */
aux_irq_ctrl_value = aux_irq_ctrl_value;
#else
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
#endif
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_V2_IRQ_H_ */
``` | /content/code_sandbox/arch/arc/include/v2/irq.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 573 |
```objective-c
/* swap_macros.h - helper macros for context switch */
/*
*
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/arch/arc/tool-compat.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
#include <zephyr/kernel.h>
#include "../core/dsp/swap_dsp_macros.h"
#ifdef _ASMLANGUAGE
/* save callee regs of current thread in r2 */
.macro _save_callee_saved_regs
SUBR sp, sp, ___callee_saved_stack_t_SIZEOF
/* save regs on stack */
STR r13, sp, ___callee_saved_stack_t_r13_OFFSET
STR r14, sp, ___callee_saved_stack_t_r14_OFFSET
STR r15, sp, ___callee_saved_stack_t_r15_OFFSET
STR r16, sp, ___callee_saved_stack_t_r16_OFFSET
STR r17, sp, ___callee_saved_stack_t_r17_OFFSET
STR r18, sp, ___callee_saved_stack_t_r18_OFFSET
STR r19, sp, ___callee_saved_stack_t_r19_OFFSET
STR r20, sp, ___callee_saved_stack_t_r20_OFFSET
STR r21, sp, ___callee_saved_stack_t_r21_OFFSET
STR r22, sp, ___callee_saved_stack_t_r22_OFFSET
STR r23, sp, ___callee_saved_stack_t_r23_OFFSET
STR r24, sp, ___callee_saved_stack_t_r24_OFFSET
STR r25, sp, ___callee_saved_stack_t_r25_OFFSET
STR r26, sp, ___callee_saved_stack_t_r26_OFFSET
STR fp, sp, ___callee_saved_stack_t_fp_OFFSET
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r13, [_ARC_V2_SEC_U_SP]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
lr r13, [_ARC_V2_SEC_K_SP]
st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#else
lr r13, [_ARC_V2_USER_SP]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
lr r13, [_ARC_V2_KERNEL_SP]
st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
lr r13, [_ARC_V2_USER_SP]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
#endif
#endif
STR r30, sp, ___callee_saved_stack_t_r30_OFFSET
#ifdef CONFIG_ARC_HAS_ACCL_REGS
STR r58, sp, ___callee_saved_stack_t_r58_OFFSET
#ifndef CONFIG_64BIT
STR r59, sp, ___callee_saved_stack_t_r59_OFFSET
#endif /* !CONFIG_64BIT */
#endif
#ifdef CONFIG_FPU_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
bbit0 r13, K_FP_IDX, fpu_skip_save
lr r13, [_ARC_V2_FPU_STATUS]
st_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
lr r13, [_ARC_V2_FPU_CTRL]
st_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
#ifdef CONFIG_FP_FPU_DA
lr r13, [_ARC_V2_FPU_DPFP1L]
st_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
lr r13, [_ARC_V2_FPU_DPFP1H]
st_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
lr r13, [_ARC_V2_FPU_DPFP2L]
st_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
lr r13, [_ARC_V2_FPU_DPFP2H]
st_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
#endif
#endif
fpu_skip_save :
_save_dsp_regs
/* save stack pointer in struct k_thread */
STR sp, r2, _thread_offset_to_sp
.endm
/* load the callee regs of thread (in r2)*/
.macro _load_callee_saved_regs
/* restore stack pointer from struct k_thread */
LDR sp, r2, _thread_offset_to_sp
#ifdef CONFIG_ARC_HAS_ACCL_REGS
LDR r58, sp, ___callee_saved_stack_t_r58_OFFSET
#ifndef CONFIG_64BIT
LDR r59, sp, ___callee_saved_stack_t_r59_OFFSET
#endif /* !CONFIG_64BIT */
#endif
#ifdef CONFIG_FPU_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
bbit0 r13, K_FP_IDX, fpu_skip_load
ld_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
sr r13, [_ARC_V2_FPU_STATUS]
ld_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
sr r13, [_ARC_V2_FPU_CTRL]
#ifdef CONFIG_FP_FPU_DA
ld_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP1L]
ld_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP1H]
ld_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP2L]
ld_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP2H]
#endif
#endif
fpu_skip_load :
_load_dsp_regs
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_SEC_U_SP]
ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
sr r13, [_ARC_V2_SEC_K_SP]
#else
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_USER_SP]
ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
sr r13, [_ARC_V2_KERNEL_SP]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_USER_SP]
#endif
#endif
LDR r13, sp, ___callee_saved_stack_t_r13_OFFSET
LDR r14, sp, ___callee_saved_stack_t_r14_OFFSET
LDR r15, sp, ___callee_saved_stack_t_r15_OFFSET
LDR r16, sp, ___callee_saved_stack_t_r16_OFFSET
LDR r17, sp, ___callee_saved_stack_t_r17_OFFSET
LDR r18, sp, ___callee_saved_stack_t_r18_OFFSET
LDR r19, sp, ___callee_saved_stack_t_r19_OFFSET
LDR r20, sp, ___callee_saved_stack_t_r20_OFFSET
LDR r21, sp, ___callee_saved_stack_t_r21_OFFSET
LDR r22, sp, ___callee_saved_stack_t_r22_OFFSET
LDR r23, sp, ___callee_saved_stack_t_r23_OFFSET
LDR r24, sp, ___callee_saved_stack_t_r24_OFFSET
LDR r25, sp, ___callee_saved_stack_t_r25_OFFSET
LDR r26, sp, ___callee_saved_stack_t_r26_OFFSET
LDR fp, sp, ___callee_saved_stack_t_fp_OFFSET
LDR r30, sp, ___callee_saved_stack_t_r30_OFFSET
ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
.endm
/* discard callee regs */
.macro _discard_callee_saved_regs
ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
.endm
/*
* Must be called with interrupts locked or in P0.
* Upon exit, sp will be pointing to the stack frame.
*/
.macro _create_irq_stack_frame
SUBR sp, sp, ___isf_t_SIZEOF
STR blink, sp, ___isf_t_blink_OFFSET
/* store these right away so we can use them if needed */
STR r13, sp, ___isf_t_r13_OFFSET
STR r12, sp, ___isf_t_r12_OFFSET
STR r11, sp, ___isf_t_r11_OFFSET
STR r10, sp, ___isf_t_r10_OFFSET
STR r9, sp, ___isf_t_r9_OFFSET
STR r8, sp, ___isf_t_r8_OFFSET
STR r7, sp, ___isf_t_r7_OFFSET
STR r6, sp, ___isf_t_r6_OFFSET
STR r5, sp, ___isf_t_r5_OFFSET
STR r4, sp, ___isf_t_r4_OFFSET
STR r3, sp, ___isf_t_r3_OFFSET
STR r2, sp, ___isf_t_r2_OFFSET
STR r1, sp, ___isf_t_r1_OFFSET
STR r0, sp, ___isf_t_r0_OFFSET
#ifdef CONFIG_ARC_HAS_ZOL
MOVR r0, lp_count
STR r0, sp, ___isf_t_lp_count_OFFSET
LRR r1, [_ARC_V2_LP_START]
LRR r0, [_ARC_V2_LP_END]
STR r1, sp, ___isf_t_lp_start_OFFSET
STR r0, sp, ___isf_t_lp_end_OFFSET
#endif /* CONFIG_ARC_HAS_ZOL */
#ifdef CONFIG_CODE_DENSITY
lr r1, [_ARC_V2_JLI_BASE]
lr r0, [_ARC_V2_LDI_BASE]
lr r2, [_ARC_V2_EI_BASE]
st_s r1, [sp, ___isf_t_jli_base_OFFSET]
st_s r0, [sp, ___isf_t_ldi_base_OFFSET]
st_s r2, [sp, ___isf_t_ei_base_OFFSET]
#endif
.endm
/*
* Must be called with interrupts locked or in P0.
* sp must be pointing the to stack frame.
*/
.macro _pop_irq_stack_frame
LDR blink, sp, ___isf_t_blink_OFFSET
#ifdef CONFIG_CODE_DENSITY
ld_s r1, [sp, ___isf_t_jli_base_OFFSET]
ld_s r0, [sp, ___isf_t_ldi_base_OFFSET]
ld_s r2, [sp, ___isf_t_ei_base_OFFSET]
sr r1, [_ARC_V2_JLI_BASE]
sr r0, [_ARC_V2_LDI_BASE]
sr r2, [_ARC_V2_EI_BASE]
#endif
#ifdef CONFIG_ARC_HAS_ZOL
LDR r0, sp, ___isf_t_lp_count_OFFSET
MOVR lp_count, r0
LDR r1, sp, ___isf_t_lp_start_OFFSET
LDR r0, sp, ___isf_t_lp_end_OFFSET
SRR r1, [_ARC_V2_LP_START]
SRR r0, [_ARC_V2_LP_END]
#endif /* CONFIG_ARC_HAS_ZOL */
LDR r13, sp, ___isf_t_r13_OFFSET
LDR r12, sp, ___isf_t_r12_OFFSET
LDR r11, sp, ___isf_t_r11_OFFSET
LDR r10, sp, ___isf_t_r10_OFFSET
LDR r9, sp, ___isf_t_r9_OFFSET
LDR r8, sp, ___isf_t_r8_OFFSET
LDR r7, sp, ___isf_t_r7_OFFSET
LDR r6, sp, ___isf_t_r6_OFFSET
LDR r5, sp, ___isf_t_r5_OFFSET
LDR r4, sp, ___isf_t_r4_OFFSET
LDR r3, sp, ___isf_t_r3_OFFSET
LDR r2, sp, ___isf_t_r2_OFFSET
LDR r1, sp, ___isf_t_r1_OFFSET
LDR r0, sp, ___isf_t_r0_OFFSET
/*
* All gprs have been reloaded, the only one that is still usable is
* ilink.
*
* The pc and status32 values will still be on the stack. We cannot
* pop them yet because the callers of _pop_irq_stack_frame must reload
* status32 differently depending on the execution context they are
* running in (arch_switch(), firq or exception).
*/
ADDR sp, sp, ___isf_t_SIZEOF
.endm
/*
* To use this macro, r2 should have the value of thread struct pointer to
* _kernel.current. r3 is a scratch reg.
*/
.macro _load_stack_check_regs
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
ld r3, [r2, _thread_offset_to_k_stack_base]
sr r3, [_ARC_V2_S_KSTACK_BASE]
ld r3, [r2, _thread_offset_to_k_stack_top]
sr r3, [_ARC_V2_S_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
ld r3, [r2, _thread_offset_to_u_stack_base]
sr r3, [_ARC_V2_S_USTACK_BASE]
ld r3, [r2, _thread_offset_to_u_stack_top]
sr r3, [_ARC_V2_S_USTACK_TOP]
#endif
#else /* CONFIG_ARC_HAS_SECURE */
ld r3, [r2, _thread_offset_to_k_stack_base]
sr r3, [_ARC_V2_KSTACK_BASE]
ld r3, [r2, _thread_offset_to_k_stack_top]
sr r3, [_ARC_V2_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
ld r3, [r2, _thread_offset_to_u_stack_base]
sr r3, [_ARC_V2_USTACK_BASE]
ld r3, [r2, _thread_offset_to_u_stack_top]
sr r3, [_ARC_V2_USTACK_TOP]
#endif
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
.endm
/* check and increase the interrupt nest counter
* after increase, check whether nest counter == 1
* the result will be EQ bit of status32
* two temp regs are needed
*/
.macro _check_and_inc_int_nest_counter, reg1, reg2
#ifdef CONFIG_SMP
/* get pointer to _cpu_t of this CPU */
_get_cpu_id MACRO_ARG(reg1)
ASLR MACRO_ARG(reg1), MACRO_ARG(reg1), ARC_REGSHIFT
LDR MACRO_ARG(reg1), MACRO_ARG(reg1), _curr_cpu
/* _cpu_t.nested is 32 bit despite of platform bittnes */
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
MOVR MACRO_ARG(reg1), _kernel
/* z_kernel.nested is 32 bit despite of platform bittnes */
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
add MACRO_ARG(reg2), MACRO_ARG(reg2), 1
#ifdef CONFIG_SMP
st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
cmp MACRO_ARG(reg2), 1
.endm
/* decrease interrupt stack nest counter
* the counter > 0, interrupt stack is used, or
* not used
*/
.macro _dec_int_nest_counter, reg1, reg2
#ifdef CONFIG_SMP
/* get pointer to _cpu_t of this CPU */
_get_cpu_id MACRO_ARG(reg1)
ASLR MACRO_ARG(reg1), MACRO_ARG(reg1), ARC_REGSHIFT
LDR MACRO_ARG(reg1), MACRO_ARG(reg1), _curr_cpu
/* _cpu_t.nested is 32 bit despite of platform bittnes */
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
MOVR MACRO_ARG(reg1), _kernel
/* z_kernel.nested is 32 bit despite of platform bittnes */
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
sub MACRO_ARG(reg2), MACRO_ARG(reg2), 1
#ifdef CONFIG_SMP
st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
.endm
/* If multi bits in IRQ_ACT are set, i.e. last bit != fist bit, it's
* in nest interrupt. The result will be EQ bit of status32
* need two temp reg to do this
*/
.macro _check_nest_int_by_irq_act, reg1, reg2
lr MACRO_ARG(reg1), [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
and MACRO_ARG(reg1), MACRO_ARG(reg1), ((1 << ARC_N_IRQ_START_LEVEL) - 1)
#else
and MACRO_ARG(reg1), MACRO_ARG(reg1), 0xffff
#endif
ffs MACRO_ARG(reg2), MACRO_ARG(reg1)
fls MACRO_ARG(reg1), MACRO_ARG(reg1)
cmp MACRO_ARG(reg1), MACRO_ARG(reg2)
.endm
/* macro to get id of current cpu
* the result will be in reg (a reg)
*/
.macro _get_cpu_id, reg
LRR MACRO_ARG(reg), [_ARC_V2_IDENTITY]
xbfu MACRO_ARG(reg), MACRO_ARG(reg), 0xe8
.endm
/* macro to get the interrupt stack of current cpu
* the result will be in irq_sp (a reg)
*/
.macro _get_curr_cpu_irq_stack, irq_sp
#ifdef CONFIG_SMP
/* get pointer to _cpu_t of this CPU */
_get_cpu_id MACRO_ARG(irq_sp)
ASLR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), ARC_REGSHIFT
LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _curr_cpu
/* get pointer to irq_stack itself */
LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), ___cpu_t_irq_stack_OFFSET
#else
MOVR MACRO_ARG(irq_sp), _kernel
LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _kernel_offset_to_irq_stack
#endif
.endm
/* macro to push aux reg through reg */
.macro PUSHAX, reg, aux
LRR MACRO_ARG(reg), [MACRO_ARG(aux)]
PUSHR MACRO_ARG(reg)
.endm
/* macro to pop aux reg through reg */
.macro POPAX, reg, aux
POPR MACRO_ARG(reg)
SRR MACRO_ARG(reg), [MACRO_ARG(aux)]
.endm
/* macro to store old thread call regs */
.macro _store_old_thread_callee_regs
_save_callee_saved_regs
/* Save old thread into switch handle which is required by z_sched_switch_spin.
* NOTE: we shouldn't save anything related to old thread context after this point!
* TODO: we should add SMP write-after-write data memory barrier here, as we want all
* previous writes completed before setting switch_handle which is polled by other cores
* in z_sched_switch_spin in case of SMP. Though it's not likely that this issue
* will reproduce in real world as there is some gap before reading switch_handle and
* reading rest of the data we've stored before.
*/
STR r2, r2, ___thread_t_switch_handle_OFFSET
.endm
/* macro to store old thread call regs in interrupt*/
.macro _irq_store_old_thread_callee_regs
#if defined(CONFIG_USERSPACE)
/*
* when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
* if interrupt comes out in user mode, and will be recorded in bit 31
* (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
* according to U bit.
*
* need to remember the user/kernel status of interrupted thread, will be
* restored when thread switched back
*
*/
lr r1, [_ARC_V2_AUX_IRQ_ACT]
and r3, r1, 0x80000000
push_s r3
bclr r1, r1, 31
sr r1, [_ARC_V2_AUX_IRQ_ACT]
#endif
_store_old_thread_callee_regs
.endm
/* macro to load new thread callee regs */
.macro _load_new_thread_callee_regs
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
#endif
/*
* _load_callee_saved_regs expects incoming thread in r2.
* _load_callee_saved_regs restores the stack pointer.
*/
_load_callee_saved_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
bl configure_mpu_thread
pop_s r2
#endif
/* _thread_arch.relinquish_cause is 32 bit despite of platform bittnes */
ld r3, [r2, _thread_offset_to_relinquish_cause]
.endm
/* when switch to thread caused by coop, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_coop
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* must return to secure mode, so set IRM bit to 1 */
lr r0, [_ARC_V2_SEC_STAT]
bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
sflag r0
#endif
.endm
/* when switch to thread caused by irq, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_irq
#if defined(CONFIG_USERSPACE)
/*
* need to recover the user/kernel status of interrupted thread
*/
pop_s r3
lr r2, [_ARC_V2_AUX_IRQ_ACT]
or r2, r2, r3
sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop_s r3
sflag r3
#endif
.endm
/* macro to get next switch handle in assembly */
.macro _get_next_switch_handle
PUSHR r2
MOVR r0, sp
bl z_arch_get_next_switch_handle
POPR r2
.endm
/* macro to disable stack checking in assembly, need a GPR
* to do this
*/
.macro _disable_stack_checking, reg
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
sflag MACRO_ARG(reg)
#else
lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
kflag MACRO_ARG(reg)
#endif
#endif
.endm
/* macro to enable stack checking in assembly, need a GPR
* to do this
*/
.macro _enable_stack_checking, reg
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
sflag MACRO_ARG(reg)
#else
lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
kflag MACRO_ARG(reg)
#endif
#endif
.endm
#define __arc_u9_max (255)
#define __arc_u9_min (-256)
#define __arc_ldst32_as_shift 2
/*
* When we accessing bloated struct member we can exceed u9 operand in store
* instruction. So we can use _st32_huge_offset macro instead
*/
.macro _st32_huge_offset, d, s, offset, temp
.if MACRO_ARG(offset) <= __arc_u9_max && MACRO_ARG(offset) >= __arc_u9_min
st MACRO_ARG(d), [MACRO_ARG(s), MACRO_ARG(offset)]
/* Technically we can optimize with .as both big positive and negative offsets here, but
* as we use only positive offsets in hand-written assembly code we keep only
* positive offset case here for simplicity.
*/
.elseif !(MACRO_ARG(offset) % (1 << __arc_ldst32_as_shift)) && \
MACRO_ARG(offset) <= (__arc_u9_max << __arc_ldst32_as_shift) && \
MACRO_ARG(offset) >= 0
st.as MACRO_ARG(d), [MACRO_ARG(s), MACRO_ARG(offset) >> __arc_ldst32_as_shift]
.else
ADDR MACRO_ARG(temp), MACRO_ARG(s), MACRO_ARG(offset)
st MACRO_ARG(d), [MACRO_ARG(temp)]
.endif
.endm
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_ */
``` | /content/code_sandbox/arch/arc/include/swap_macros.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,667 |
```python
#!/usr/bin/env python3
import argparse
from dataclasses import dataclass
from pathlib import Path, PurePath
import pykwalify.core
import sys
from typing import List
import yaml
import re
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
SOC_SCHEMA_PATH = str(Path(__file__).parent / 'schemas' / 'soc-schema.yml')
with open(SOC_SCHEMA_PATH, 'r') as f:
soc_schema = yaml.load(f.read(), Loader=SafeLoader)
ARCH_SCHEMA_PATH = str(Path(__file__).parent / 'schemas' / 'arch-schema.yml')
with open(ARCH_SCHEMA_PATH, 'r') as f:
arch_schema = yaml.load(f.read(), Loader=SafeLoader)
SOC_YML = 'soc.yml'
ARCHS_YML_PATH = PurePath('arch/archs.yml')
class Systems:
def __init__(self, folder='', soc_yaml=None):
self._socs = []
self._series = []
self._families = []
if soc_yaml is None:
return
try:
data = yaml.load(soc_yaml, Loader=SafeLoader)
pykwalify.core.Core(source_data=data,
schema_data=soc_schema).validate()
except (yaml.YAMLError, pykwalify.errors.SchemaError) as e:
sys.exit(f'ERROR: Malformed yaml {soc_yaml.as_posix()}', e)
for f in data.get('family', []):
family = Family(f['name'], folder, [], [])
for s in f.get('series', []):
series = Series(s['name'], folder, f['name'], [])
socs = [(Soc(soc['name'],
[c['name'] for c in soc.get('cpuclusters', [])],
folder, s['name'], f['name']))
for soc in s.get('socs', [])]
series.socs.extend(socs)
self._series.append(series)
self._socs.extend(socs)
family.series.append(series)
family.socs.extend(socs)
socs = [(Soc(soc['name'],
[c['name'] for c in soc.get('cpuclusters', [])],
folder, None, f['name']))
for soc in f.get('socs', [])]
self._socs.extend(socs)
self._families.append(family)
for s in data.get('series', []):
series = Series(s['name'], folder, '', [])
socs = [(Soc(soc['name'],
[c['name'] for c in soc.get('cpuclusters', [])],
folder, s['name'], ''))
for soc in s.get('socs', [])]
series.socs.extend(socs)
self._series.append(series)
self._socs.extend(socs)
socs = [(Soc(soc['name'],
[c['name'] for c in soc.get('cpuclusters', [])],
folder, '', ''))
for soc in data.get('socs', [])]
self._socs.extend(socs)
# Ensure that any runner configuration matches socs and cpuclusters declared in the same
# soc.yml file
if 'runners' in data and 'run_once' in data['runners']:
for grp in data['runners']['run_once']:
for item_data in data['runners']['run_once'][grp]:
for group in item_data['groups']:
for qualifiers in group['qualifiers']:
soc_name, *components = qualifiers.split('/')
found_match = False
# Allow 'ns' as final qualifier until "virtual" CPUs are ported to soc.yml
# path_to_url
if components and components[-1] == 'ns':
components.pop()
for soc in self._socs:
if re.match(fr'^{soc_name}$', soc.name) is not None:
if soc.cpuclusters and components:
check_string = '/'.join(components)
for cpucluster in soc.cpuclusters:
if re.match(fr'^{check_string}$', cpucluster) is not None:
found_match = True
break
elif not soc.cpuclusters and not components:
found_match = True
break
if found_match is False:
sys.exit(f'ERROR: SoC qualifier match unresolved: {qualifiers}')
@staticmethod
def from_file(socs_file):
'''Load SoCs from a soc.yml file.
'''
try:
with open(socs_file, 'r') as f:
socs_yaml = f.read()
except FileNotFoundError as e:
sys.exit(f'ERROR: socs.yml file not found: {socs_file.as_posix()}', e)
return Systems(str(socs_file.parent), socs_yaml)
@staticmethod
def from_yaml(socs_yaml):
'''Load socs from a string with YAML contents.
'''
return Systems('', socs_yaml)
def extend(self, systems):
self._families.extend(systems.get_families())
self._series.extend(systems.get_series())
self._socs.extend(systems.get_socs())
def get_families(self):
return self._families
def get_series(self):
return self._series
def get_socs(self):
return self._socs
def get_soc(self, name):
try:
return next(s for s in self._socs if s.name == name)
except StopIteration:
sys.exit(f"ERROR: SoC '{name}' is not found, please ensure that the SoC exists "
f"and that soc-root containing '{name}' has been correctly defined.")
@dataclass
class Soc:
name: str
cpuclusters: List[str]
folder: str
series: str = ''
family: str = ''
@dataclass
class Series:
name: str
folder: str
family: str
socs: List[Soc]
@dataclass
class Family:
name: str
folder: str
series: List[Series]
socs: List[Soc]
def unique_paths(paths):
# Using dict keys ensures both uniqueness and a deterministic order.
yield from dict.fromkeys(map(Path.resolve, paths)).keys()
def find_v2_archs(args):
ret = {'archs': []}
for root in unique_paths(args.arch_roots):
archs_yml = root / ARCHS_YML_PATH
if Path(archs_yml).is_file():
with Path(archs_yml).open('r') as f:
archs = yaml.load(f.read(), Loader=SafeLoader)
try:
pykwalify.core.Core(source_data=archs, schema_data=arch_schema).validate()
except pykwalify.errors.SchemaError as e:
sys.exit('ERROR: Malformed "build" section in file: {}\n{}'
.format(archs_yml.as_posix(), e))
if args.arch is not None:
archs = {'archs': list(filter(
lambda arch: arch.get('name') == args.arch, archs['archs']))}
for arch in archs['archs']:
arch.update({'path': root / 'arch' / arch['path']})
arch.update({'hwm': 'v2'})
arch.update({'type': 'arch'})
ret['archs'].extend(archs['archs'])
return ret
def find_v2_systems(args):
yml_files = []
systems = Systems()
for root in unique_paths(args.soc_roots):
yml_files.extend(sorted((root / 'soc').rglob(SOC_YML)))
for soc_yml in yml_files:
if soc_yml.is_file():
systems.extend(Systems.from_file(soc_yml))
return systems
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
add_args(parser)
return parser.parse_args()
def add_args(parser):
default_fmt = '{name}'
parser.add_argument("--soc-root", dest='soc_roots', default=[],
type=Path, action='append',
help='add a SoC root, may be given more than once')
parser.add_argument("--soc", default=None, help='lookup the specific soc')
parser.add_argument("--soc-series", default=None, help='lookup the specific soc series')
parser.add_argument("--soc-family", default=None, help='lookup the specific family')
parser.add_argument("--socs", action='store_true', help='lookup all socs')
parser.add_argument("--arch-root", dest='arch_roots', default=[],
type=Path, action='append',
help='add a arch root, may be given more than once')
parser.add_argument("--arch", default=None, help='lookup the specific arch')
parser.add_argument("--archs", action='store_true', help='lookup all archs')
parser.add_argument("--format", default=default_fmt,
help='''Format string to use to list each soc.''')
parser.add_argument("--cmakeformat", default=None,
help='''CMake format string to use to list each arch/soc.''')
def dump_v2_archs(args):
archs = find_v2_archs(args)
for arch in archs['archs']:
if args.cmakeformat is not None:
info = args.cmakeformat.format(
TYPE='TYPE;' + arch['type'],
NAME='NAME;' + arch['name'],
DIR='DIR;' + str(arch['path'].as_posix()),
HWM='HWM;' + arch['hwm'],
# Below is non exising for arch but is defined here to support
# common formatting string.
SERIES='',
FAMILY='',
ARCH='',
VENDOR=''
)
else:
info = args.format.format(
type=arch.get('type'),
name=arch.get('name'),
dir=arch.get('path'),
hwm=arch.get('hwm'),
# Below is non exising for arch but is defined here to support
# common formatting string.
series='',
family='',
arch='',
vendor=''
)
print(info)
def dump_v2_system(args, type, system):
if args.cmakeformat is not None:
info = args.cmakeformat.format(
TYPE='TYPE;' + type,
NAME='NAME;' + system.name,
DIR='DIR;' + Path(system.folder).as_posix(),
HWM='HWM;' + 'v2'
)
else:
info = args.format.format(
type=type,
name=system.name,
dir=system.folder,
hwm='v2'
)
print(info)
def dump_v2_systems(args):
systems = find_v2_systems(args)
for f in systems.get_families():
dump_v2_system(args, 'family', f)
for s in systems.get_series():
dump_v2_system(args, 'series', s)
for s in systems.get_socs():
dump_v2_system(args, 'soc', s)
if __name__ == '__main__':
args = parse_args()
if any([args.socs, args.soc, args.soc_series, args.soc_family]):
dump_v2_systems(args)
if args.archs or args.arch is not None:
dump_v2_archs(args)
``` | /content/code_sandbox/scripts/list_hardware.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,445 |
```python
#!/usr/bin/env python3
import argparse
import sys
import os
import time
import datetime
from github import Github, GithubException
from github.GithubException import UnknownObjectException
from collections import defaultdict
from west.manifest import Manifest
from west.manifest import ManifestProject
TOP_DIR = os.path.join(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(TOP_DIR, "scripts"))
from get_maintainer import Maintainers
def log(s):
if args.verbose > 0:
print(s, file=sys.stdout)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument("-M", "--maintainer-file", required=False, default="MAINTAINERS.yml",
help="Maintainer file to be used.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-P", "--pull_request", required=False, default=None, type=int,
help="Operate on one pull-request only.")
group.add_argument("-I", "--issue", required=False, default=None, type=int,
help="Operate on one issue only.")
group.add_argument("-s", "--since", required=False,
help="Process pull-requests since date.")
group.add_argument("-m", "--modules", action="store_true",
help="Process pull-requests from modules.")
parser.add_argument("-y", "--dry-run", action="store_true", default=False,
help="Dry run only.")
parser.add_argument("-o", "--org", default="zephyrproject-rtos",
help="Github organisation")
parser.add_argument("-r", "--repo", default="zephyr",
help="Github repository")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Verbose Output")
args = parser.parse_args()
def process_pr(gh, maintainer_file, number):
gh_repo = gh.get_repo(f"{args.org}/{args.repo}")
pr = gh_repo.get_pull(number)
log(f"working on path_to_url{args.org}/{args.repo}/pull/{pr.number} : {pr.title}")
labels = set()
area_counter = defaultdict(int)
found_maintainers = defaultdict(int)
num_files = 0
all_areas = set()
fn = list(pr.get_files())
for changed_file in fn:
if changed_file.filename in ['west.yml','submanifests/optional.yaml']:
break
if pr.commits == 1 and (pr.additions <= 1 and pr.deletions <= 1):
labels = {'size: XS'}
if len(fn) > 500:
log(f"Too many files changed ({len(fn)}), skipping....")
return
for changed_file in fn:
num_files += 1
log(f"file: {changed_file.filename}")
areas = maintainer_file.path2areas(changed_file.filename)
if not areas:
continue
all_areas.update(areas)
is_instance = False
sorted_areas = sorted(areas, key=lambda x: 'Platform' in x.name, reverse=True)
for area in sorted_areas:
c = 1 if not is_instance else 0
area_counter[area] += c
labels.update(area.labels)
# FIXME: Here we count the same file multiple times if it exists in
# multiple areas with same maintainer
for area_maintainer in area.maintainers:
found_maintainers[area_maintainer] += c
if 'Platform' in area.name:
is_instance = True
area_counter = dict(sorted(area_counter.items(), key=lambda item: item[1], reverse=True))
log(f"Area matches: {area_counter}")
log(f"labels: {labels}")
# Create a list of collaborators ordered by the area match
collab = list()
for area in area_counter:
collab += maintainer_file.areas[area.name].maintainers
collab += maintainer_file.areas[area.name].collaborators
collab = list(dict.fromkeys(collab))
log(f"collab: {collab}")
_all_maintainers = dict(sorted(found_maintainers.items(), key=lambda item: item[1], reverse=True))
log(f"Submitted by: {pr.user.login}")
log(f"candidate maintainers: {_all_maintainers}")
assignees = []
tmp_assignees = []
# we start with areas with most files changed and pick the maintainer from the first one.
# if the first area is an implementation, i.e. driver or platform, we
# continue searching for any other areas involved
for area, count in area_counter.items():
if count == 0:
continue
if len(area.maintainers) > 0:
tmp_assignees = area.maintainers
if pr.user.login in area.maintainers:
# submitter = assignee, try to pick next area and
# assign someone else other than the submitter
continue
else:
assignees = area.maintainers
if 'Platform' not in area.name:
break
if tmp_assignees and not assignees:
assignees = tmp_assignees
if assignees:
prop = (found_maintainers[assignees[0]] / num_files) * 100
log(f"Picked assignees: {assignees} ({prop:.2f}% ownership)")
log("+++++++++++++++++++++++++")
# Set labels
if labels:
if len(labels) < 10:
for l in labels:
log(f"adding label {l}...")
if not args.dry_run:
pr.add_to_labels(l)
else:
log(f"Too many labels to be applied")
if collab:
reviewers = []
existing_reviewers = set()
revs = pr.get_reviews()
for review in revs:
existing_reviewers.add(review.user)
rl = pr.get_review_requests()
page = 0
for r in rl:
existing_reviewers |= set(r.get_page(page))
page += 1
# check for reviewers that remove themselves from list of reviewer and
# do not attempt to add them again based on MAINTAINERS file.
self_removal = []
for event in pr.get_issue_events():
if event.event == 'review_request_removed' and event.actor == event.requested_reviewer:
self_removal.append(event.actor)
for collaborator in collab:
try:
gh_user = gh.get_user(collaborator)
if pr.user == gh_user or gh_user in existing_reviewers:
continue
if not gh_repo.has_in_collaborators(gh_user):
log(f"Skip '{collaborator}': not in collaborators")
continue
if gh_user in self_removal:
log(f"Skip '{collaborator}': self removed")
continue
reviewers.append(collaborator)
except UnknownObjectException as e:
log(f"Can't get user '{collaborator}', account does not exist anymore? ({e})")
if len(existing_reviewers) < 15:
reviewer_vacancy = 15 - len(existing_reviewers)
reviewers = reviewers[:reviewer_vacancy]
if reviewers:
try:
log(f"adding reviewers {reviewers}...")
if not args.dry_run:
pr.create_review_request(reviewers=reviewers)
except GithubException:
log("cant add reviewer")
else:
log("not adding reviewers because the existing reviewer count is greater than or "
"equal to 15")
ms = []
# assignees
if assignees and not pr.assignee:
try:
for assignee in assignees:
u = gh.get_user(assignee)
ms.append(u)
except GithubException:
log(f"Error: Unknown user")
for mm in ms:
log(f"Adding assignee {mm}...")
if not args.dry_run:
pr.add_to_assignees(mm)
else:
log("not setting assignee")
time.sleep(1)
def process_issue(gh, maintainer_file, number):
gh_repo = gh.get_repo(f"{args.org}/{args.repo}")
issue = gh_repo.get_issue(number)
log(f"Working on {issue.url}: {issue.title}")
if issue.assignees:
print(f"Already assigned {issue.assignees}, bailing out")
return
label_to_maintainer = defaultdict(set)
for _, area in maintainer_file.areas.items():
if not area.labels:
continue
labels = set()
for label in area.labels:
labels.add(label.lower())
labels = tuple(sorted(labels))
for maintainer in area.maintainers:
label_to_maintainer[labels].add(maintainer)
# Add extra entries for areas with multiple labels so they match with just
# one label if it's specific enough.
for areas, maintainers in dict(label_to_maintainer).items():
for area in areas:
if tuple([area]) not in label_to_maintainer:
label_to_maintainer[tuple([area])] = maintainers
issue_labels = set()
for label in issue.labels:
label_name = label.name.lower()
if tuple([label_name]) not in label_to_maintainer:
print(f"Ignoring label: {label}")
continue
issue_labels.add(label_name)
issue_labels = tuple(sorted(issue_labels))
print(f"Using labels: {issue_labels}")
if issue_labels not in label_to_maintainer:
print(f"no match for the label set, not assigning")
return
for maintainer in label_to_maintainer[issue_labels]:
log(f"Adding {maintainer} to {issue.html_url}")
if not args.dry_run:
issue.add_to_assignees(maintainer)
def process_modules(gh, maintainers_file):
manifest = Manifest.from_file()
repos = {}
for project in manifest.get_projects([]):
if not manifest.is_active(project):
continue
if isinstance(project, ManifestProject):
continue
area = f"West project: {project.name}"
if area not in maintainers_file.areas:
log(f"No area for: {area}")
continue
maintainers = maintainers_file.areas[area].maintainers
if not maintainers:
log(f"No maintainers for: {area}")
continue
collaborators = maintainers_file.areas[area].collaborators
log(f"Found {area}, maintainers={maintainers}, collaborators={collaborators}")
repo_name = f"{args.org}/{project.name}"
repos[repo_name] = maintainers_file.areas[area]
query = f"is:open is:pr no:assignee"
for repo in repos:
query += f" repo:{repo}"
issues = gh.search_issues(query=query)
for issue in issues:
pull = issue.as_pull_request()
if pull.draft:
continue
if pull.assignees:
log(f"ERROR: {pull.html_url} should have no assignees, found {pull.assignees}")
continue
repo_name = f"{args.org}/{issue.repository.name}"
area = repos[repo_name]
for maintainer in area.maintainers:
log(f"Assigning {maintainer} to {pull.html_url}")
if not args.dry_run:
pull.add_to_assignees(maintainer)
pull.create_review_request(maintainer)
for collaborator in area.collaborators:
log(f"Adding {collaborator} to {pull.html_url}")
if not args.dry_run:
pull.create_review_request(collaborator)
def main():
parse_args()
token = os.environ.get('GITHUB_TOKEN', None)
if not token:
sys.exit('Github token not set in environment, please set the '
'GITHUB_TOKEN environment variable and retry.')
gh = Github(token)
maintainer_file = Maintainers(args.maintainer_file)
if args.pull_request:
process_pr(gh, maintainer_file, args.pull_request)
elif args.issue:
process_issue(gh, maintainer_file, args.issue)
elif args.modules:
process_modules(gh, maintainer_file)
else:
if args.since:
since = args.since
else:
today = datetime.date.today()
since = today - datetime.timedelta(days=1)
common_prs = f'repo:{args.org}/{args.repo} is:open is:pr base:main -is:draft no:assignee created:>{since}'
pulls = gh.search_issues(query=f'{common_prs}')
for issue in pulls:
process_pr(gh, maintainer_file, issue.number)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/set_assignees.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,789 |
```shell
#!/bin/sh
#
#
#
remote=$1
url=$2
local_ref=$3
local_sha=$4
remote_ref=$5
remote_sha=$6
z40=0000000000000000000000000000000000000000
set -e exec
echo "Run push "
if [ "$local_sha" = $z40 ]
then
# Handle delete
:
else
# At each (forced) push, examine all commits since $remote/main
base_commit=`git rev-parse $remote/main`
range="$base_commit..$local_sha"
echo "Perform check patch"
${ZEPHYR_BASE}/scripts/checkpatch.pl --git $range
fi
``` | /content/code_sandbox/scripts/series-push-hook.sh | shell | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 147 |
```unknown
#!/usr/bin/env bash
#
ZEPHYR_BASE=$( builtin cd "$( dirname "$DIR" )" && pwd ${PWD_OPT})
DIR="$(dirname $(readlink -f $0))/.."
SPATCH="`which ${SPATCH:=spatch}`"
if [ ! -x "$SPATCH" ]; then
echo 'spatch is part of the Coccinelle project and is available at path_to_url
exit 1
fi
VERBOSE=0
usage="Usage: ./scripts/coccicheck [OPTIONS]... [DIRECTORY|FILE]...
OPTIONS:
-------
-m= , --mode= specify the mode use {report, patch, org, context, chain}
-v= , --verbose= enable verbose output {1}
-j= , --jobs= number of jobs to use {0 - `nproc`}
-c= , --cocci= specify cocci script to use
-d= , --debug= specify file to store debug log
-f= , --sp-flag= pass additional flag to spatch
-h , --help display help and exit
Default values if any OPTION is not supplied:
--------------------------------------------
mode = report
verbose = 0 (disabled)
jobs = maximum jobs available on the machine
cocci = all cocci scripts available at scripts/coccinelle/*
If no [DIRECTORY|FILE] is supplied, entire codebase is processed.
For detailed documentation refer: doc/guides/coccinelle.rst"
for i in "$@"
do
case $i in
-m=*|--mode=*)
MODE="${i#*=}"
shift # past argument=value
;;
-v=*|--verbose=*)
VERBOSE="${i#*=}"
shift # past argument=value
;;
-j=*|--jobs=*)
J="${i#*=}"
shift
;;
-c=*|--cocci=*)
COCCI="${i#*=}"
shift
;;
-d=*|--debug=*)
DEBUG_FILE="${i#*=}"
shift
;;
-f=*|--sp-flag=*)
SPFLAGS="${i#*=}"
shift
;;
-h|--help)
echo "$usage"
exit 1
;;
*)
FILE="${i#*=}"
if [ ! -e "$FILE" ]; then
echo "unknown option: '${i#*=}'"
echo "$usage"
exit 2
fi
;;
esac
done
FLAGS="--very-quiet"
if [ "$FILE" = "" ] ; then
OPTIONS="--dir $ZEPHYR_BASE"
else
OPTIONS="--dir $FILE"
fi
if [ -z "$J" ]; then
NPROC=$(getconf _NPROCESSORS_ONLN)
else
NPROC="$J"
fi
OPTIONS="--macro-file $ZEPHYR_BASE/scripts/coccinelle/macros.h $OPTIONS"
if [ "$FILE" != "" ] ; then
OPTIONS="--patch $ZEPHYR_BASE $OPTIONS"
fi
if [ "$NPROC" != "1" ]; then
# Using 0 should work as well, refer to _SC_NPROCESSORS_ONLN use on
# path_to_url
OPTIONS="$OPTIONS --jobs $NPROC --chunksize 1"
fi
if [ "$MODE" = "" ] ; then
echo 'You have not explicitly specified the mode to use. Using default "report" mode.'
echo 'Available modes are the following: 'patch', 'report', 'context', 'org''
echo 'You can specify the mode with "./scripts/coccicheck --mode=<mode>"'
echo 'Note however that some modes are not implemented by some semantic patches.'
MODE="report"
fi
if [ "$MODE" = "chain" ] ; then
echo 'You have selected the "chain" mode.'
echo 'All available modes will be tried (in that order): patch, report, context, org'
elif [ "$MODE" = "report" -o "$MODE" = "org" ] ; then
FLAGS="--no-show-diff $FLAGS"
fi
echo ''
echo 'Please check for false positives in the output before submitting a patch.'
echo 'When using "patch" mode, carefully review the patch before submitting it.'
echo ''
run_cmd_parmap() {
if [ $VERBOSE -ne 0 ] ; then
echo "Running ($NPROC in parallel): $@"
fi
echo $@ >>$DEBUG_FILE
$@ 2>>$DEBUG_FILE
err=$?
if [[ $err -ne 0 ]]; then
echo "coccicheck failed"
exit $err
fi
}
# You can override heuristics with SPFLAGS, these must always go last
OPTIONS="$OPTIONS $SPFLAGS"
coccinelle () {
COCCI="$1"
OPT=`grep "Options:" $COCCI | cut -d':' -f2`
VIRTUAL=`grep "virtual" $COCCI | cut -d' ' -f2`
if [[ $VIRTUAL = "" ]]; then
echo "No available modes found in \"$COCCI\" script."
echo "Consider adding virtual rules to the script."
exit 1
elif [[ $VIRTUAL != *"$MODE"* ]]; then
echo "Invalid mode \"$MODE\" supplied!"
echo "Available modes for \"`basename $COCCI`\" are: "$VIRTUAL""
if [[ $VIRTUAL == *report* ]]; then
MODE=report
elif [[ $VIRTUAL == *context* ]]; then
MODE=context
elif [[ $VIRTUAL == *patch* ]]; then
MODE=patch
else
MODE=org
fi
echo "Using random available mode: \"$MODE\""
echo ''
fi
if [ $VERBOSE -ne 0 ] ; then
FILE=${COCCI#$ZEPHYR_BASE/}
echo "Processing `basename $COCCI`"
echo "with option(s) \"$OPT\""
echo ''
echo 'Message example to submit a patch:'
sed -ne 's|^///||p' $COCCI
if [ "$MODE" = "patch" ] ; then
echo ' The semantic patch that makes this change is available'
elif [ "$MODE" = "report" ] ; then
echo ' The semantic patch that makes this report is available'
elif [ "$MODE" = "context" ] ; then
echo ' The semantic patch that spots this code is available'
elif [ "$MODE" = "org" ] ; then
echo ' The semantic patch that makes this Org report is available'
else
echo ' The semantic patch that makes this output is available'
fi
echo " in $FILE."
echo ''
echo ' More information about semantic patching is available at'
echo ' path_to_url
echo ''
if [ "`sed -ne 's|^//#||p' $COCCI`" ] ; then
echo 'Semantic patch information:'
sed -ne 's|^//#||p' $COCCI
echo ''
fi
fi
if [ "$MODE" = "chain" ] ; then
run_cmd_parmap $SPATCH -D patch \
$FLAGS --cocci-file $COCCI $OPT $OPTIONS || \
run_cmd_parmap $SPATCH -D report \
$FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || \
run_cmd_parmap $SPATCH -D context \
$FLAGS --cocci-file $COCCI $OPT $OPTIONS || \
run_cmd_parmap $SPATCH -D org \
$FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff || exit 1
elif [ "$MODE" = "rep+ctxt" ] ; then
run_cmd_parmap $SPATCH -D report \
$FLAGS --cocci-file $COCCI $OPT $OPTIONS --no-show-diff && \
run_cmd_parmap $SPATCH -D context \
$FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1
else
run_cmd_parmap $SPATCH -D $MODE $FLAGS --cocci-file $COCCI $OPT $OPTIONS || exit 1
fi
MODE=report
}
if [ "$DEBUG_FILE" != "/dev/null" -a "$DEBUG_FILE" != "" ]; then
if [ -f $DEBUG_FILE ]; then
echo "Debug file \"$DEBUG_FILE\" exists, bailing ..."
exit
fi
else
DEBUG_FILE="/dev/null"
fi
if [ "$COCCI" = "" ] ; then
for f in `find $ZEPHYR_BASE/scripts/coccinelle/ -name '*.cocci' -type f | sort`; do
coccinelle $f
echo your_sha256_hash---------'
echo ''
done
else
coccinelle $COCCI
fi
``` | /content/code_sandbox/scripts/coccicheck | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,930 |
```python
#!/usr/bin/env python3
import argparse
from collections import defaultdict
from dataclasses import dataclass, field
import itertools
from pathlib import Path
import pykwalify.core
import sys
from typing import List
import yaml
import list_hardware
from list_hardware import unique_paths
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
BOARD_SCHEMA_PATH = str(Path(__file__).parent / 'schemas' / 'board-schema.yml')
with open(BOARD_SCHEMA_PATH, 'r') as f:
board_schema = yaml.load(f.read(), Loader=SafeLoader)
BOARD_YML = 'board.yml'
#
# This is shared code between the build system's 'boards' target
# and the 'west boards' extension command. If you change it, make
# sure to test both ways it can be used.
#
# (It's done this way to keep west optional, making it possible to run
# 'ninja boards' in a build directory without west installed.)
#
@dataclass
class Revision:
name: str
variants: List[str] = field(default_factory=list)
@staticmethod
def from_dict(revision):
revisions = []
for r in revision.get('revisions', []):
revisions.append(Revision.from_dict(r))
return Revision(revision['name'], revisions)
@dataclass
class Variant:
name: str
variants: List[str] = field(default_factory=list)
@staticmethod
def from_dict(variant):
variants = []
for v in variant.get('variants', []):
variants.append(Variant.from_dict(v))
return Variant(variant['name'], variants)
@dataclass
class Cpucluster:
name: str
variants: List[str] = field(default_factory=list)
@dataclass
class Soc:
name: str
cpuclusters: List[str] = field(default_factory=list)
variants: List[str] = field(default_factory=list)
@staticmethod
def from_soc(soc, variants):
if soc is None:
return None
if soc.cpuclusters:
cpus = []
for c in soc.cpuclusters:
cpus.append(Cpucluster(c,
[Variant.from_dict(v) for v in variants if c == v['cpucluster']]
))
return Soc(soc.name, cpuclusters=cpus)
return Soc(soc.name, variants=[Variant.from_dict(v) for v in variants])
@dataclass(frozen=True)
class Board:
name: str
dir: Path
hwm: str
arch: str = None
vendor: str = None
revision_format: str = None
revision_default: str = None
revision_exact: bool = False
revisions: List[str] = field(default_factory=list, compare=False)
socs: List[Soc] = field(default_factory=list, compare=False)
variants: List[str] = field(default_factory=list, compare=False)
def board_key(board):
return board.name
def find_arch2boards(args):
arch2board_set = find_arch2board_set(args)
return {arch: sorted(arch2board_set[arch], key=board_key)
for arch in arch2board_set}
def find_boards(args):
return sorted(itertools.chain(*find_arch2board_set(args).values()),
key=board_key)
def find_arch2board_set(args):
arches = sorted(find_arches(args))
ret = defaultdict(set)
for root in unique_paths(args.board_roots):
for arch, boards in find_arch2board_set_in(root, arches, args.board_dir).items():
if args.board is not None:
ret[arch] |= {b for b in boards if b.name == args.board}
else:
ret[arch] |= boards
return ret
def find_arches(args):
arch_set = set()
for root in unique_paths(args.arch_roots):
arch_set |= find_arches_in(root)
return arch_set
def find_arches_in(root):
ret = set()
arch = root / 'arch'
common = arch / 'common'
if not arch.is_dir():
return ret
for maybe_arch in arch.iterdir():
if not maybe_arch.is_dir() or maybe_arch == common:
continue
ret.add(maybe_arch.name)
return ret
def find_arch2board_set_in(root, arches, board_dir):
ret = defaultdict(set)
boards = root / 'boards'
for arch in arches:
if not (boards / arch).is_dir():
continue
for maybe_board in (boards / arch).iterdir():
if not maybe_board.is_dir():
continue
if board_dir is not None and board_dir != maybe_board:
continue
for maybe_defconfig in maybe_board.iterdir():
file_name = maybe_defconfig.name
if file_name.endswith('_defconfig') and not (maybe_board / BOARD_YML).is_file():
board_name = file_name[:-len('_defconfig')]
ret[arch].add(Board(board_name, maybe_board, 'v1', arch=arch))
return ret
def load_v2_boards(board_name, board_yml, systems):
boards = []
if board_yml.is_file():
with board_yml.open('r') as f:
b = yaml.load(f.read(), Loader=SafeLoader)
try:
pykwalify.core.Core(source_data=b, schema_data=board_schema).validate()
except pykwalify.errors.SchemaError as e:
sys.exit('ERROR: Malformed "build" section in file: {}\n{}'
.format(board_yml.as_posix(), e))
mutual_exclusive = {'board', 'boards'}
if len(mutual_exclusive - b.keys()) < 1:
sys.exit(f'ERROR: Malformed content in file: {board_yml.as_posix()}\n'
f'{mutual_exclusive} are mutual exclusive at this level.')
board_array = b.get('boards', [b.get('board', None)])
for board in board_array:
if board_name is not None:
if board['name'] != board_name:
# Not the board we're looking for, ignore.
continue
board_revision = board.get('revision')
if board_revision is not None and board_revision.get('format') != 'custom':
if board_revision.get('default') is None:
sys.exit(f'ERROR: Malformed "board" section in file: {board_yml.as_posix()}\n'
"Cannot find required key 'default'. Path: '/board/revision.'")
if board_revision.get('revisions') is None:
sys.exit(f'ERROR: Malformed "board" section in file: {board_yml.as_posix()}\n'
"Cannot find required key 'revisions'. Path: '/board/revision.'")
mutual_exclusive = {'socs', 'variants'}
if len(mutual_exclusive - board.keys()) < 1:
sys.exit(f'ERROR: Malformed "board" section in file: {board_yml.as_posix()}\n'
f'{mutual_exclusive} are mutual exclusive at this level.')
socs = [Soc.from_soc(systems.get_soc(s['name']), s.get('variants', []))
for s in board.get('socs', {})]
board = Board(
name=board['name'],
dir=board_yml.parent,
vendor=board.get('vendor'),
revision_format=board.get('revision', {}).get('format'),
revision_default=board.get('revision', {}).get('default'),
revision_exact=board.get('revision', {}).get('exact', False),
revisions=[Revision.from_dict(v) for v in
board.get('revision', {}).get('revisions', [])],
socs=socs,
variants=[Variant.from_dict(v) for v in board.get('variants', [])],
hwm='v2',
)
boards.append(board)
return boards
# Note that this does not share the args.board functionality of find_v2_boards
def find_v2_board_dirs(args):
dirs = []
board_files = []
for root in unique_paths(args.board_roots):
board_files.extend((root / 'boards').rglob(BOARD_YML))
dirs = [board_yml.parent for board_yml in board_files if board_yml.is_file()]
return dirs
def find_v2_boards(args):
root_args = argparse.Namespace(**{'soc_roots': args.soc_roots})
systems = list_hardware.find_v2_systems(root_args)
boards = []
board_files = []
for root in unique_paths(args.board_roots):
board_files.extend((root / 'boards').rglob(BOARD_YML))
for board_yml in board_files:
b = load_v2_boards(args.board, board_yml, systems)
boards.extend(b)
return boards
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
add_args(parser)
add_args_formatting(parser)
return parser.parse_args()
def add_args(parser):
# Remember to update west-completion.bash if you add or remove
# flags
parser.add_argument("--arch-root", dest='arch_roots', default=[],
type=Path, action='append',
help='add an architecture root, may be given more than once')
parser.add_argument("--board-root", dest='board_roots', default=[],
type=Path, action='append',
help='add a board root, may be given more than once')
parser.add_argument("--soc-root", dest='soc_roots', default=[],
type=Path, action='append',
help='add a soc root, may be given more than once')
parser.add_argument("--board", dest='board', default=None,
help='lookup the specific board, fail if not found')
parser.add_argument("--board-dir", default=None, type=Path,
help='Only look for boards at the specific location')
def add_args_formatting(parser):
parser.add_argument("--cmakeformat", default=None,
help='''CMake Format string to use to list each board''')
def variant_v2_qualifiers(variant, qualifiers = None):
qualifiers_list = [variant.name] if qualifiers is None else [qualifiers + '/' + variant.name]
for v in variant.variants:
qualifiers_list.extend(variant_v2_qualifiers(v, qualifiers_list[0]))
return qualifiers_list
def board_v2_qualifiers(board):
qualifiers_list = []
for s in board.socs:
if s.cpuclusters:
for c in s.cpuclusters:
id_str = s.name + '/' + c.name
qualifiers_list.append(id_str)
for v in c.variants:
qualifiers_list.extend(variant_v2_qualifiers(v, id_str))
else:
qualifiers_list.append(s.name)
for v in s.variants:
qualifiers_list.extend(variant_v2_qualifiers(v, s.name))
for v in board.variants:
qualifiers_list.extend(variant_v2_qualifiers(v))
return qualifiers_list
def board_v2_qualifiers_csv(board):
# Return in csv (comma separated value) format
return ",".join(board_v2_qualifiers(board))
def dump_v2_boards(args):
if args.board_dir:
root_args = argparse.Namespace(**{'soc_roots': args.soc_roots})
systems = list_hardware.find_v2_systems(root_args)
boards = load_v2_boards(args.board, args.board_dir / BOARD_YML, systems)
else:
boards = find_v2_boards(args)
for b in boards:
qualifiers_list = board_v2_qualifiers(b)
if args.cmakeformat is not None:
notfound = lambda x: x or 'NOTFOUND'
info = args.cmakeformat.format(
NAME='NAME;' + b.name,
DIR='DIR;' + str(b.dir.as_posix()),
VENDOR='VENDOR;' + notfound(b.vendor),
HWM='HWM;' + b.hwm,
REVISION_DEFAULT='REVISION_DEFAULT;' + notfound(b.revision_default),
REVISION_FORMAT='REVISION_FORMAT;' + notfound(b.revision_format),
REVISION_EXACT='REVISION_EXACT;' + str(b.revision_exact),
REVISIONS='REVISIONS;' + ';'.join(
[x.name for x in b.revisions]),
SOCS='SOCS;' + ';'.join([s.name for s in b.socs]),
QUALIFIERS='QUALIFIERS;' + ';'.join(qualifiers_list)
)
print(info)
else:
print(f'{b.name}')
def dump_boards(args):
arch2boards = find_arch2boards(args)
for arch, boards in arch2boards.items():
if args.cmakeformat is None:
print(f'{arch}:')
for board in boards:
if args.cmakeformat is not None:
info = args.cmakeformat.format(
NAME='NAME;' + board.name,
DIR='DIR;' + str(board.dir.as_posix()),
HWM='HWM;' + board.hwm,
VENDOR='VENDOR;NOTFOUND',
REVISION_DEFAULT='REVISION_DEFAULT;NOTFOUND',
REVISION_FORMAT='REVISION_FORMAT;NOTFOUND',
REVISION_EXACT='REVISION_EXACT;NOTFOUND',
REVISIONS='REVISIONS;NOTFOUND',
VARIANT_DEFAULT='VARIANT_DEFAULT;NOTFOUND',
SOCS='SOCS;',
QUALIFIERS='QUALIFIERS;'
)
print(info)
else:
print(f' {board.name}')
if __name__ == '__main__':
args = parse_args()
dump_boards(args)
dump_v2_boards(args)
``` | /content/code_sandbox/scripts/list_boards.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,982 |
```python
#
'''
Generic GitHub helper routines which may be useful to other scripts.
This file is not meant to be run directly, but rather to be imported
as a module from other scripts.
'''
# Note that the type annotations are not currently checked by mypy.
# Unless that changes, they serve as documentation, rather than
# guarantees from a type checker.
# stdlib
import getpass
import os
import netrc
import sys
from typing import Dict
# third party
import github
def get_github_credentials(ask: bool = True) -> Dict[str, str]:
'''Get credentials for constructing a github.Github object.
This function tries to get github.com credentials from these
places, in order:
1. a ~/.netrc file, if one exists
2. a GITHUB_TOKEN environment variable
3. if the 'ask' kwarg is truthy, from the user on the
at the command line.
On failure, RuntimeError is raised.
Scripts often need credentials because anonym access to
api.github.com is rate limited more aggressively than
authenticated access. Scripts which use anonymous access are
therefore more likely to fail due to rate limiting.
The return value is a dict which can be passed to the
github.Github constructor as **kwargs.
:param ask: if truthy, the user will be prompted for credentials
if none are found from other sources
'''
try:
nrc = netrc.netrc()
except (FileNotFoundError, netrc.NetrcParseError):
nrc = None
if nrc is not None:
auth = nrc.authenticators('github.com')
if auth is not None:
return {'login_or_token': auth[0], 'password': auth[2]}
token = os.environ.get('GITHUB_TOKEN')
if token:
return {'login_or_token': token}
if ask:
print('Missing GitHub credentials:\n'
'~/.netrc file not found or has no github.com credentials, '
'and GITHUB_TOKEN is not set in the environment. '
'Please give your GitHub token.',
file=sys.stderr)
token = getpass.getpass('token: ')
return {'login_or_token': token}
raise RuntimeError('no credentials found')
def get_github_object(ask: bool = True) -> github.Github:
'''Get a github.Github object, created with credentials.
:param ask: passed to get_github_credentials()
'''
return github.Github(**get_github_credentials())
``` | /content/code_sandbox/scripts/github_helpers.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 545 |
```unknown
{
getpwuid() libC issue
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
fun:realloc
fun:load_blacklist
fun:bindresvport
}
{
POSIX arch no thread cleanup
Memcheck:Leak
match-leak-kinds: reachable,possible
...
fun:posix_new_thread
fun:arch_new_thread
}
{
POSIX soc no cpu cleanup
Memcheck:Leak
match-leak-kinds: reachable,possible
...
fun:posix_boot_cpu
...
fun:main
}
{
POSIX arch no cpu cleanup
Memcheck:Leak
match-leak-kinds: reachable
...
fun:nct_init
fun:posix_arch_init
}
{
lvgl no cleanup
Memcheck:Leak
match-leak-kinds: possible
fun:malloc
fun:lv_mem_alloc
}
{
lvgl no cleanup 2
Memcheck:Leak
match-leak-kinds: possible
fun:malloc
fun:lvgl_allocate_rendering_buffers
}
``` | /content/code_sandbox/scripts/valgrind.supp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 256 |
```python
#!/usr/bin/env python3
import argparse
from dataclasses import dataclass
from pathlib import Path
#
# This is shared code between the build system's 'shields' target
# and the 'west shields' extension command. If you change it, make
# sure to test both ways it can be used.
#
# (It's done this way to keep west optional, making it possible to run
# 'ninja shields' in a build directory without west installed.)
#
@dataclass(frozen=True)
class Shield:
name: str
dir: Path
def shield_key(shield):
return shield.name
def find_shields(args):
ret = []
for root in args.board_roots:
for shields in find_shields_in(root):
ret.append(shields)
return sorted(ret, key=shield_key)
def find_shields_in(root):
shields = root / 'boards' / 'shields'
ret = []
for maybe_shield in (shields).iterdir():
if not maybe_shield.is_dir():
continue
for maybe_kconfig in maybe_shield.iterdir():
if maybe_kconfig.name == 'Kconfig.shield':
for maybe_overlay in maybe_shield.iterdir():
file_name = maybe_overlay.name
if file_name.endswith('.overlay'):
shield_name = file_name[:-len('.overlay')]
ret.append(Shield(shield_name, maybe_shield))
return sorted(ret, key=shield_key)
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
add_args(parser)
return parser.parse_args()
def add_args(parser):
# Remember to update west-completion.bash if you add or remove
# flags
parser.add_argument("--board-root", dest='board_roots', default=[],
type=Path, action='append',
help='add a board root, may be given more than once')
def dump_shields(shields):
for shield in shields:
print(f' {shield.name}')
if __name__ == '__main__':
dump_shields(find_shields(parse_args()))
``` | /content/code_sandbox/scripts/list_shields.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 445 |
```shell
#!/bin/sh
# Generate tags or cscope files
# Usage tags.sh <mode>
#
# mode may be any of: tags, TAGS, cscope
#
# Uses the following environment variables:
# ARCH, SOC, BOARD, ZEPHYR_BASE
find_sources()
{
find "$1" -name "$2"
}
# Looks for assembly, c and header files of selected architectures
all_sources()
{
for dir in $ALLSOURCES
do
find_sources "$dir" '*.[chS]'
done
}
all_target_sources()
{
all_sources
}
all_kconfigs()
{
for dir in $ALLSOURCES; do
find_sources $dir 'Kconfig*'
done
}
all_defconfigs()
{
for dir in $ALLSOURCES; do
find_sources $dir 'defconfig*'
done
}
docscope()
{
(echo \-k; echo \-q; all_target_sources) > cscope.files
cscope -b -f cscope.out
}
dogtags()
{
all_target_sources | gtags -i -f -
}
exuberant()
{
all_target_sources | xargs $1 -a \
-I __initdata,__exitdata,__initconst, \
-I __initdata_memblock \
-I __refdata,__attribute,__maybe_unused,__always_unused \
-I __acquires,__releases,__deprecated \
-I __read_mostly,__aligned,____cacheline_aligned \
-I ____cacheline_aligned_in_smp \
-I __cacheline_aligned,__cacheline_aligned_in_smp \
-I ____cacheline_internodealigned_in_smp \
-I __used,__packed,__packed2__,__must_check,__must_hold \
-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL \
-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
-I static,const \
--extra=+f --c-kinds=+px \
--regex-asm='/^(ENTRY|_GLOBAL)\(([^)]*)\).*/\2/' \
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
--regex-c='/^COMPAT_SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/compat_sys_\1/' \
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \
--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1_rcuidle/' \
--regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/' \
--regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1_rcuidle/' \
--regex-c++='/PAGEFLAG\(([^,)]*).*/Page\1/' \
--regex-c++='/PAGEFLAG\(([^,)]*).*/SetPage\1/' \
--regex-c++='/PAGEFLAG\(([^,)]*).*/ClearPage\1/' \
--regex-c++='/TESTSETFLAG\(([^,)]*).*/TestSetPage\1/' \
--regex-c++='/TESTPAGEFLAG\(([^,)]*).*/Page\1/' \
--regex-c++='/SETPAGEFLAG\(([^,)]*).*/SetPage\1/' \
--regex-c++='/__SETPAGEFLAG\(([^,)]*).*/__SetPage\1/' \
--regex-c++='/TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/' \
--regex-c++='/__TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/' \
--regex-c++='/CLEARPAGEFLAG\(([^,)]*).*/ClearPage\1/' \
--regex-c++='/__CLEARPAGEFLAG\(([^,)]*).*/__ClearPage\1/' \
--regex-c++='/__PAGEFLAG\(([^,)]*).*/__SetPage\1/' \
--regex-c++='/__PAGEFLAG\(([^,)]*).*/__ClearPage\1/' \
--regex-c++='/PAGEFLAG_FALSE\(([^,)]*).*/Page\1/' \
--regex-c++='/TESTSCFLAG\(([^,)]*).*/TestSetPage\1/' \
--regex-c++='/TESTSCFLAG\(([^,)]*).*/TestClearPage\1/' \
--regex-c++='/SETPAGEFLAG_NOOP\(([^,)]*).*/SetPage\1/' \
--regex-c++='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/' \
--regex-c++='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/' \
--regex-c++='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
--regex-c++='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/' \
--regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/' \
--regex-c++='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/' \
--regex-c++='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/' \
--regex-c++='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/'\
--regex-c='/PCI_OP_READ\((\w*).*[1-4]\)/pci_bus_read_config_\1/' \
--regex-c='/PCI_OP_WRITE\((\w*).*[1-4]\)/pci_bus_write_config_\1/' \
--regex-c='/DEFINE_(MUTEX|SEMAPHORE|SPINLOCK)\((\w*)/\2/v/' \
--regex-c='/DEFINE_(RAW_SPINLOCK|RWLOCK|SEQLOCK)\((\w*)/\2/v/' \
--regex-c='/DECLARE_(RWSEM|COMPLETION)\((\w*)/\2/v/' \
--regex-c='/DECLARE_BITMAP\((\w*)/\1/v/' \
--regex-c='/(^|\s)(|L|H)LIST_HEAD\((\w*)/\3/v/' \
--regex-c='/(^|\s)RADIX_TREE\((\w*)/\2/v/' \
--regex-c='/DEFINE_PER_CPU\(([^,]*,\s*)(\w*).*\)/\2/v/' \
--regex-c='/DEFINE_PER_CPU_SHARED_ALIGNED\(([^,]*,\s*)(\w*).*\)/\2/v/' \
--regex-c='/DECLARE_WAIT_QUEUE_HEAD\((\w*)/\1/v/' \
--regex-c='/DECLARE_(TASKLET|WORK|DELAYED_WORK)\((\w*)/\2/v/' \
--regex-c='/DEFINE_PCI_DEVICE_TABLE\((\w*)/\1/v/' \
--regex-c='/(^\s)OFFSET\((\w*)/\2/v/' \
--regex-c='/(^\s)DEFINE\((\w*)/\2/v/' \
--regex-c='/DEFINE_HASHTABLE\((\w*)/\1/v/'
all_kconfigs | xargs $1 -a \
--langdef=kconfig --language-force=kconfig \
--regex-kconfig='/^[[:blank:]]*(menu|)config[[:blank:]]+([[:alnum:]_]+)/\2/'
all_kconfigs | xargs $1 -a \
--langdef=kconfig --language-force=kconfig \
--regex-kconfig='/^[[:blank:]]*(menu|)config[[:blank:]]+([[:alnum:]_]+)/CONFIG_\2/'
all_defconfigs | xargs -r $1 -a \
--langdef=dotconfig --language-force=dotconfig \
--regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/'
}
emacs()
{
all_target_sources | xargs $1 -a \
--regex='/^\(ENTRY\|_GLOBAL\)(\([^)]*\)).*/\2/' \
--regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \
--regex='/^COMPAT_SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/compat_sys_\1/' \
--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \
--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1_rcuidle/' \
--regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/' \
--regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1_rcuidle/' \
--regex='/PAGEFLAG(\([^,)]*\).*/Page\1/' \
--regex='/PAGEFLAG(\([^,)]*\).*/SetPage\1/' \
--regex='/PAGEFLAG(\([^,)]*\).*/ClearPage\1/' \
--regex='/TESTSETFLAG(\([^,)]*\).*/TestSetPage\1/' \
--regex='/TESTPAGEFLAG(\([^,)]*\).*/Page\1/' \
--regex='/SETPAGEFLAG(\([^,)]*\).*/SetPage\1/' \
--regex='/__SETPAGEFLAG(\([^,)]*\).*/__SetPage\1/' \
--regex='/TESTCLEARFLAG(\([^,)]*\).*/TestClearPage\1/' \
--regex='/__TESTCLEARFLAG(\([^,)]*\).*/TestClearPage\1/' \
--regex='/CLEARPAGEFLAG(\([^,)]*\).*/ClearPage\1/' \
--regex='/__CLEARPAGEFLAG(\([^,)]*\).*/__ClearPage\1/' \
--regex='/__PAGEFLAG(\([^,)]*\).*/__SetPage\1/' \
--regex='/__PAGEFLAG(\([^,)]*\).*/__ClearPage\1/' \
--regex='/PAGEFLAG_FALSE(\([^,)]*\).*/Page\1/' \
--regex='/TESTSCFLAG(\([^,)]*\).*/TestSetPage\1/' \
--regex='/TESTSCFLAG(\([^,)]*\).*/TestClearPage\1/' \
--regex='/SETPAGEFLAG_NOOP(\([^,)]*\).*/SetPage\1/' \
--regex='/CLEARPAGEFLAG_NOOP(\([^,)]*\).*/ClearPage\1/' \
--regex='/__CLEARPAGEFLAG_NOOP(\([^,)]*\).*/__ClearPage\1/' \
--regex='/TESTCLEARFLAG_FALSE(\([^,)]*\).*/TestClearPage\1/' \
--regex='/__TESTCLEARFLAG_FALSE(\([^,)]*\).*/__TestClearPage\1/' \
--regex='/TASK_PFA_TEST\([^,]*,\s*([^)]*)\)/task_\1/' \
--regex='/TASK_PFA_SET\([^,]*,\s*([^)]*)\)/task_set_\1/' \
--regex='/TASK_PFA_CLEAR\([^,]*,\s*([^)]*)\)/task_clear_\1/' \
--regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' \
--regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \
--regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'\
--regex='/[^#]*DEFINE_HASHTABLE(\([^,)]*\)/\1/'
all_kconfigs | xargs $1 -a \
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
all_kconfigs | xargs $1 -a \
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/CONFIG_\3/'
all_defconfigs | xargs -r $1 -a \
--regex='/^#?[ \t]?\(CONFIG_[a-zA-Z0-9_]+\)/\1/'
}
xtags()
{
if $1 --version 2>&1 | grep -iq exuberant; then
exuberant $1
elif $1 --version 2>&1 | grep -iq emacs; then
emacs $1
else
all_target_sources | xargs $1 -a
fi
}
# Used for debugging
if [ "$TAGS_VERBOSE" = "1" ]; then
set -x
fi
# Set project base directory
if [ "${ZEPHYR_BASE}" = "" ]; then
echo "error: run zephyr-env.sh before $0"
exit 1
fi
tree="${ZEPHYR_BASE}/"
# List of always explored directories
COMMON_DIRS="drivers dts include kernel lib misc subsys"
# Detect if ARCH is set. If not, we look for all archs
if [ "${ARCH}" = "" ]; then
ALLSOURCES="${COMMON_DIRS} arch boards soc"
else
ALLSOURCES="${COMMON_DIRS} arch/${ARCH} boards/${ARCH} soc/${ARCH}"
fi
# TODO: detect if BOARD is set so we can select certain files
# Perform main action
remove_structs=
case "$1" in
"cscope")
docscope
;;
"gtags")
dogtags
;;
"tags")
rm -f tags
xtags ctags
remove_structs=y
;;
"TAGS")
rm -f TAGS
xtags etags
remove_structs=y
;;
*)
echo "error: incorrect parameter"
;;
esac
# Remove structure forward declarations.
if [ -n "$remove_structs" ]; then
LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' $1
fi
``` | /content/code_sandbox/scripts/tags.sh | shell | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,227 |
```prolog
#!/usr/bin/perl
# Check the stack usage of functions
#
# Inspired by Linus Torvalds
# Original idea maybe from Keith Owens
# s390 port and big speedup by Arnd Bergmann <arnd@bergmann-dalldorf.de>
# Mips port by Juan Quintela <quintela@mandrakesoft.com>
# IA64 port via Andreas Dilger
# Arm port by Holger Schurig
# sh64 port by Paul Mundt
# Random bits by Matt Mackall <mpm@selenic.com>
# M68k port by Geert Uytterhoeven and Andreas Schwab
# AVR32 port by Haavard Skinnemoen (Atmel)
# AArch64, PARISC ports by Kyle McMartin
# sparc port by Martin Habets <errandir_news@mph.eclipse.co.uk>
#
# Usage:
# objdump -d vmlinux | scripts/checkstack.pl [arch]
#
# TODO : Port to all architectures (one regex per arch)
use strict;
# check for arch
#
# $re is used for two matches:
# $& (whole re) matches the complete objdump line with the stack growth
# $1 (first bracket) matches the size of the stack growth
#
# $dre is similar, but for dynamic stack redutions:
# $& (whole re) matches the complete objdump line with the stack growth
# $1 (first bracket) matches the dynamic amount of the stack growth
#
# use anything else and feel the pain ;)
my (@stack, $re, $dre, $x, $xs, $funcre);
{
my $arch = shift;
if ($arch eq "") {
$arch = `uname -m`;
chomp($arch);
}
$x = "[0-9a-f]"; # hex character
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
if ($arch eq 'aarch64') {
#ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
$re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
} elsif ($arch eq 'avr32') {
#8000008a: 20 1d sub sp,4
#80000ca8: fa cd 05 b0 sub sp,sp,1456
$re = qr/^.*sub.*sp.*,([0-9]{1,8})/o;
} elsif ($arch =~ /^x86(_64)?$/ || $arch =~ /^i[3456]86$/) {
#c0105234: 81 ec ac 05 00 00 sub $0x5ac,%esp
# or
# 2f60: 48 81 ec e8 05 00 00 sub $0x5e8,%rsp
$re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%(e|r)sp$/o;
$dre = qr/^.*[as][du][db] (%.*),\%(e|r)sp$/o;
} elsif ($arch eq 'ia64') {
#e0000000044011fc: 01 0f fc 8c adds r12=-384,r12
$re = qr/.*adds.*r12=-(([0-9]{2}|[3-9])[0-9]{2}),r12/o;
} elsif ($arch eq 'm68k') {
# 2b6c: 4e56 fb70 linkw %fp,#-1168
# 1df770: defc ffe4 addaw #-28,%sp
$re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o;
} elsif ($arch eq 'metag') {
#400026fc: 40 00 00 82 ADD A0StP,A0StP,#0x8
$re = qr/.*ADD.*A0StP,A0StP,\#(0x$x{1,8})/o;
$funcre = qr/^$x* <[^\$](.*)>:$/;
} elsif ($arch eq 'mips64') {
#8800402c: 67bdfff0 daddiu sp,sp,-16
$re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
} elsif ($arch eq 'mips') {
#88003254: 27bdffe0 addiu sp,sp,-32
$re = qr/.*addiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
} elsif ($arch eq 'parisc' || $arch eq 'parisc64') {
$re = qr/.*ldo ($x{1,8})\(sp\),sp/o;
} elsif ($arch eq 'ppc') {
#c00029f4: 94 21 ff 30 stwu r1,-208(r1)
$re = qr/.*stwu.*r1,-($x{1,8})\(r1\)/o;
} elsif ($arch eq 'ppc64') {
#XXX
$re = qr/.*stdu.*r1,-($x{1,8})\(r1\)/o;
} elsif ($arch eq 'powerpc') {
$re = qr/.*st[dw]u.*r1,-($x{1,8})\(r1\)/o;
} elsif ($arch =~ /^s390x?$/) {
# 11160: a7 fb ff 60 aghi %r15,-160
# or
# 100092: e3 f0 ff c8 ff 71 lay %r15,-56(%r15)
$re = qr/.*(?:lay|ag?hi).*\%r15,-(([0-9]{2}|[3-9])[0-9]{2})
(?:\(\%r15\))?$/ox;
} elsif ($arch =~ /^sh64$/) {
#XXX: we only check for the immediate case presently,
# though we will want to check for the movi/sub
# pair for larger users. -- PFM.
#a00048e0: d4fc40f0 addi.l r15,-240,r15
$re = qr/.*addi\.l.*r15,-(([0-9]{2}|[3-9])[0-9]{2}),r15/o;
} elsif ($arch =~ /^blackfin$/) {
# 0: 00 e8 38 01 LINK 0x4e0;
$re = qr/.*[[:space:]]LINK[[:space:]]*(0x$x{1,8})/o;
} elsif ($arch eq 'sparc' || $arch eq 'sparc64') {
# f0019d10: 9d e3 bf 90 save %sp, -112, %sp
$re = qr/.*save.*%sp, -(([0-9]{2}|[3-9])[0-9]{2}), %sp/o;
} else {
print("wrong or unknown architecture \"$arch\"\n");
exit
}
}
#
# main()
#
my ($func, $file, $lastslash);
while (my $line = <STDIN>) {
if ($line =~ m/$funcre/) {
$func = $1;
}
elsif ($line =~ m/(.*):\s*file format/) {
$file = $1;
$file =~ s/\.ko//;
$lastslash = rindex($file, "/");
if ($lastslash != -1) {
$file = substr($file, $lastslash + 1);
}
}
elsif ($line =~ m/$re/) {
my $size = $1;
$size = hex($size) if ($size =~ /^0x/);
if ($size > 0xf0000000) {
$size = - $size;
$size += 0x80000000;
$size += 0x80000000;
}
next if ($size > 0x10000000);
next if $line !~ m/^($xs*)/;
my $addr = $1;
$addr =~ s/ /0/g;
$addr = "0x$addr";
my $intro = "$addr $func [$file]:";
my $padlen = 56 - length($intro);
while ($padlen > 0) {
$intro .= ' ';
$padlen -= 8;
}
next if ($size < 100);
push @stack, "$intro$size\n";
}
elsif (defined $dre && $line =~ m/$dre/) {
my $size = "Dynamic ($1)";
next if $line !~ m/^($xs*)/;
my $addr = $1;
$addr =~ s/ /0/g;
$addr = "0x$addr";
my $intro = "$addr $func [$file]:";
my $padlen = 56 - length($intro);
while ($padlen > 0) {
$intro .= ' ';
$padlen -= 8;
}
push @stack, "$intro$size\n";
}
}
# Sort output by size (last field)
print sort { ($b =~ /:\t*(\d+)$/)[0] <=> ($a =~ /:\t*(\d+)$/)[0] } @stack;
``` | /content/code_sandbox/scripts/checkstack.pl | prolog | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,372 |
```python
#!/usr/bin/env python3
"""
Lists maintainers for files or commits. Similar in function to
scripts/get_maintainer.pl from Linux, but geared towards GitHub. The mapping is
in MAINTAINERS.yml.
The comment at the top of MAINTAINERS.yml in Zephyr documents the file format.
See the help texts for the various subcommands for more information. They can
be viewed with e.g.
./get_maintainer.py path --help
This executable doubles as a Python library. Identifiers not prefixed with '_'
are part of the library API. The library documentation can be viewed with this
command:
$ pydoc get_maintainer
"""
import argparse
import operator
import os
import pathlib
import re
import shlex
import subprocess
import sys
from yaml import load, YAMLError
try:
# Use the speedier C LibYAML parser if available
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
def _main():
# Entry point when run as an executable
args = _parse_args()
try:
args.cmd_fn(Maintainers(args.maintainers), args)
except (MaintainersError, GitError) as e:
_serr(e)
def _parse_args():
# Parses arguments when run as an executable
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__, allow_abbrev=False)
parser.add_argument(
"-m", "--maintainers",
metavar="MAINTAINERS_FILE",
help="Maintainers file to load. If not specified, MAINTAINERS.yml in "
"the top-level repository directory is used, and must exist. "
"Paths in the maintainers file will always be taken as relative "
"to the top-level directory.")
subparsers = parser.add_subparsers(
help="Available commands (each has a separate --help text)")
id_parser = subparsers.add_parser(
"path",
help="List area(s) for paths")
id_parser.add_argument(
"paths",
metavar="PATH",
nargs="*",
help="Path to list areas for")
id_parser.set_defaults(cmd_fn=Maintainers._path_cmd)
commits_parser = subparsers.add_parser(
"commits",
help="List area(s) for commit range")
commits_parser.add_argument(
"commits",
metavar="COMMIT_RANGE",
nargs="*",
help="Commit range to list areas for (default: HEAD~..)")
commits_parser.set_defaults(cmd_fn=Maintainers._commits_cmd)
list_parser = subparsers.add_parser(
"list",
help="List files in areas")
list_parser.add_argument(
"area",
metavar="AREA",
nargs="?",
help="Name of area to list files in. If not specified, all "
"non-orphaned files are listed (all files that do not appear in "
"any area).")
list_parser.set_defaults(cmd_fn=Maintainers._list_cmd)
areas_parser = subparsers.add_parser(
"areas",
help="List areas and maintainers")
areas_parser.add_argument(
"maintainer",
metavar="MAINTAINER",
nargs="?",
help="List all areas maintained by maintainer.")
areas_parser.set_defaults(cmd_fn=Maintainers._areas_cmd)
orphaned_parser = subparsers.add_parser(
"orphaned",
help="List orphaned files (files that do not appear in any area)")
orphaned_parser.add_argument(
"path",
metavar="PATH",
nargs="?",
help="Limit to files under PATH")
orphaned_parser.set_defaults(cmd_fn=Maintainers._orphaned_cmd)
count_parser = subparsers.add_parser(
"count",
help="Count areas, unique maintainers, and / or unique collaborators")
count_parser.add_argument(
"-a",
"--count-areas",
action="store_true",
help="Count the number of areas")
count_parser.add_argument(
"-c",
"--count-collaborators",
action="store_true",
help="Count the number of unique collaborators")
count_parser.add_argument(
"-n",
"--count-maintainers",
action="store_true",
help="Count the number of unique maintainers")
count_parser.add_argument(
"-o",
"--count-unmaintained",
action="store_true",
help="Count the number of unmaintained areas")
count_parser.set_defaults(cmd_fn=Maintainers._count_cmd)
args = parser.parse_args()
if not hasattr(args, "cmd_fn"):
# Called without a subcommand
sys.exit(parser.format_usage().rstrip())
return args
class Maintainers:
"""
Represents the contents of a maintainers YAML file.
These attributes are available:
areas:
A dictionary that maps area names to Area instances, for all areas
defined in the maintainers file
filename:
The path to the maintainers file
"""
def __init__(self, filename=None):
"""
Creates a Maintainers instance.
filename (default: None):
Path to the maintainers file to parse. If None, MAINTAINERS.yml in
the top-level directory of the Git repository is used, and must
exist.
"""
if (filename is not None) and (pathlib.Path(filename).exists()):
self.filename = pathlib.Path(filename)
self._toplevel = self.filename.parent
else:
self._toplevel = pathlib.Path(_git("rev-parse", "--show-toplevel"))
self.filename = self._toplevel / "MAINTAINERS.yml"
self.areas = {}
for area_name, area_dict in _load_maintainers(self.filename).items():
area = Area()
area.name = area_name
area.status = area_dict.get("status")
area.maintainers = area_dict.get("maintainers", [])
area.collaborators = area_dict.get("collaborators", [])
area.inform = area_dict.get("inform", [])
area.labels = area_dict.get("labels", [])
area.tests = area_dict.get("tests", [])
area.tags = area_dict.get("tags", [])
area.description = area_dict.get("description")
# area._match_fn(path) tests if the path matches files and/or
# files-regex
area._match_fn = \
_get_match_fn(area_dict.get("files"),
area_dict.get("files-regex"))
# Like area._match_fn(path), but for files-exclude and
# files-regex-exclude
area._exclude_match_fn = \
_get_match_fn(area_dict.get("files-exclude"),
area_dict.get("files-regex-exclude"))
self.areas[area_name] = area
def path2areas(self, path):
"""
Returns a list of Area instances for the areas that contain 'path',
taken as relative to the current directory
"""
# Make directory paths end in '/' so that foo/bar matches foo/bar/.
# Skip this check in _contains() itself, because the isdir() makes it
# twice as slow in cases where it's not needed.
is_dir = os.path.isdir(path)
# Make 'path' relative to the repository root and normalize it.
# normpath() would remove a trailing '/', so we add it afterwards.
path = os.path.normpath(os.path.join(
os.path.relpath(os.getcwd(), self._toplevel),
path))
if is_dir:
path += "/"
return [area for area in self.areas.values()
if area._contains(path)]
def commits2areas(self, commits):
"""
Returns a set() of Area instances for the areas that contain files that
are modified by the commit range in 'commits'. 'commits' could be e.g.
"HEAD~..", to inspect the tip commit
"""
res = set()
# Final '--' is to make sure 'commits' is interpreted as a commit range
# rather than a path. That might give better error messages.
for path in _git("diff", "--name-only", commits, "--").splitlines():
res.update(self.path2areas(path))
return res
def __repr__(self):
return "<Maintainers for '{}'>".format(self.filename)
#
# Command-line subcommands
#
def _path_cmd(self, args):
# 'path' subcommand implementation
for path in args.paths:
if not os.path.exists(path):
_serr("'{}': no such file or directory".format(path))
res = set()
orphaned = []
for path in args.paths:
areas = self.path2areas(path)
res.update(areas)
if not areas:
orphaned.append(path)
_print_areas(res)
if orphaned:
if res:
print()
print("Orphaned paths (not in any area):\n" + "\n".join(orphaned))
def _commits_cmd(self, args):
# 'commits' subcommand implementation
commits = args.commits or ("HEAD~..",)
_print_areas({area for commit_range in commits
for area in self.commits2areas(commit_range)})
def _areas_cmd(self, args):
# 'areas' subcommand implementation
for area in self.areas.values():
if args.maintainer:
if args.maintainer in area.maintainers:
print("{:25}\t{}".format(area.name, ",".join(area.maintainers)))
else:
print("{:25}\t{}".format(area.name, ",".join(area.maintainers)))
def _count_cmd(self, args):
# 'count' subcommand implementation
if not (args.count_areas or args.count_collaborators or args.count_maintainers or args.count_unmaintained):
# if no specific count is provided, print them all
args.count_areas = True
args.count_collaborators = True
args.count_maintainers = True
args.count_unmaintained = True
unmaintained = 0
collaborators = set()
maintainers = set()
for area in self.areas.values():
if area.status == 'maintained':
maintainers = maintainers.union(set(area.maintainers))
elif area.status == 'odd fixes':
unmaintained += 1
collaborators = collaborators.union(set(area.collaborators))
if args.count_areas:
print('{:14}\t{}'.format('areas:', len(self.areas)))
if args.count_maintainers:
print('{:14}\t{}'.format('maintainers:', len(maintainers)))
if args.count_collaborators:
print('{:14}\t{}'.format('collaborators:', len(collaborators)))
if args.count_unmaintained:
print('{:14}\t{}'.format('unmaintained:', unmaintained))
def _list_cmd(self, args):
# 'list' subcommand implementation
if args.area is None:
# List all files that appear in some area
for path in _ls_files():
for area in self.areas.values():
if area._contains(path):
print(path)
break
else:
# List all files that appear in the given area
area = self.areas.get(args.area)
if area is None:
_serr("'{}': no such area defined in '{}'"
.format(args.area, self.filename))
for path in _ls_files():
if area._contains(path):
print(path)
def _orphaned_cmd(self, args):
# 'orphaned' subcommand implementation
if args.path is not None and not os.path.exists(args.path):
_serr("'{}': no such file or directory".format(args.path))
for path in _ls_files(args.path):
for area in self.areas.values():
if area._contains(path):
break
else:
print(path) # We get here if we never hit the 'break'
class Area:
"""
Represents an entry for an area in MAINTAINERS.yml.
These attributes are available:
status:
The status of the area, as a string. None if the area has no 'status'
key. See MAINTAINERS.yml.
maintainers:
List of maintainers. Empty if the area has no 'maintainers' key.
collaborators:
List of collaborators. Empty if the area has no 'collaborators' key.
inform:
List of people to inform on pull requests. Empty if the area has no
'inform' key.
labels:
List of GitHub labels for the area. Empty if the area has no 'labels'
key.
description:
Text from 'description' key, or None if the area has no 'description'
key
"""
def _contains(self, path):
# Returns True if the area contains 'path', and False otherwise
return self._match_fn and self._match_fn(path) and not \
(self._exclude_match_fn and self._exclude_match_fn(path))
def __repr__(self):
return "<Area {}>".format(self.name)
def _print_areas(areas):
first = True
for area in sorted(areas, key=operator.attrgetter("name")):
if not first:
print()
first = False
print("""\
{}
\tstatus: {}
\tmaintainers: {}
\tcollaborators: {}
\tinform: {}
\tlabels: {}
\ttests: {}
\ttags: {}
\tdescription: {}""".format(area.name,
area.status,
", ".join(area.maintainers),
", ".join(area.collaborators),
", ".join(area.inform),
", ".join(area.labels),
", ".join(area.tests),
", ".join(area.tags),
area.description or ""))
def _get_match_fn(globs, regexes):
# Constructs a single regex that tests for matches against the globs in
# 'globs' and the regexes in 'regexes'. Parts are joined with '|' (OR).
# Returns the search() method of the compiled regex.
#
# Returns None if there are neither globs nor regexes, which should be
# interpreted as no match.
if not (globs or regexes):
return None
regex = ""
if globs:
glob_regexes = []
for glob in globs:
# Construct a regex equivalent to the glob
glob_regex = glob.replace(".", "\\.").replace("*", "[^/]*") \
.replace("?", "[^/]")
if not glob.endswith("/"):
# Require a full match for globs that don't end in /
glob_regex += "$"
glob_regexes.append(glob_regex)
# The glob regexes must anchor to the beginning of the path, since we
# return search(). (?:) is a non-capturing group.
regex += "^(?:{})".format("|".join(glob_regexes))
if regexes:
if regex:
regex += "|"
regex += "|".join(regexes)
return re.compile(regex).search
def _load_maintainers(path):
# Returns the parsed contents of the maintainers file 'filename', also
# running checks on the contents. The returned format is plain Python
# dicts/lists/etc., mirroring the structure of the file.
with open(path, encoding="utf-8") as f:
try:
yaml = load(f, Loader=SafeLoader)
except YAMLError as e:
raise MaintainersError("{}: YAML error: {}".format(path, e))
_check_maintainers(path, yaml)
return yaml
def _check_maintainers(maints_path, yaml):
# Checks the maintainers data in 'yaml', which comes from the maintainers
# file at maints_path, which is a pathlib.Path instance
root = maints_path.parent
def ferr(msg):
_err("{}: {}".format(maints_path, msg)) # Prepend the filename
if not isinstance(yaml, dict):
ferr("empty or malformed YAML (not a dict)")
ok_keys = {"status", "maintainers", "collaborators", "inform", "files",
"files-exclude", "files-regex", "files-regex-exclude",
"labels", "description", "tests", "tags"}
ok_status = {"maintained", "odd fixes", "unmaintained", "obsolete"}
ok_status_s = ", ".join('"' + s + '"' for s in ok_status) # For messages
for area_name, area_dict in yaml.items():
if not isinstance(area_dict, dict):
ferr("malformed entry for area '{}' (not a dict)"
.format(area_name))
for key in area_dict:
if key not in ok_keys:
ferr("unknown key '{}' in area '{}'"
.format(key, area_name))
if "status" in area_dict and \
area_dict["status"] not in ok_status:
ferr("bad 'status' key on area '{}', should be one of {}"
.format(area_name, ok_status_s))
if not area_dict.keys() & {"files", "files-regex"}:
ferr("either 'files' or 'files-regex' (or both) must be specified "
"for area '{}'".format(area_name))
if not area_dict.get("maintainers") and area_dict.get("status") == "maintained":
ferr("maintained area '{}' with no maintainers".format(area_name))
for list_name in "maintainers", "collaborators", "inform", "files", \
"files-regex", "labels", "tags", "tests":
if list_name in area_dict:
lst = area_dict[list_name]
if not (isinstance(lst, list) and
all(isinstance(elm, str) for elm in lst)):
ferr("malformed '{}' value for area '{}' -- should "
"be a list of strings".format(list_name, area_name))
for files_key in "files", "files-exclude":
if files_key in area_dict:
for glob_pattern in area_dict[files_key]:
# This could be changed if it turns out to be too slow,
# e.g. to only check non-globbing filenames. The tuple() is
# needed due to pathlib's glob() returning a generator.
paths = tuple(root.glob(glob_pattern))
if not paths:
ferr("glob pattern '{}' in '{}' in area '{}' does not "
"match any files".format(glob_pattern, files_key,
area_name))
if not glob_pattern.endswith("/"):
if all(path.is_dir() for path in paths):
ferr("glob pattern '{}' in '{}' in area '{}' "
"matches only directories, but has no "
"trailing '/'"
.format(glob_pattern, files_key,
area_name))
for files_regex_key in "files-regex", "files-regex-exclude":
if files_regex_key in area_dict:
for regex in area_dict[files_regex_key]:
try:
re.compile(regex)
except re.error as e:
ferr("bad regular expression '{}' in '{}' in "
"'{}': {}".format(regex, files_regex_key,
area_name, e.msg))
if "description" in area_dict and \
not isinstance(area_dict["description"], str):
ferr("malformed 'description' value for area '{}' -- should be a "
"string".format(area_name))
def _git(*args):
# Helper for running a Git command. Returns the rstrip()ed stdout output.
# Called like git("diff"). Exits with SystemError (raised by sys.exit()) on
# errors.
git_cmd = ("git",) + args
git_cmd_s = " ".join(shlex.quote(word) for word in git_cmd) # For errors
try:
git_process = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
_giterr("git executable not found (when running '{}'). Check that "
"it's in listed in the PATH environment variable"
.format(git_cmd_s))
except OSError as e:
_giterr("error running '{}': {}".format(git_cmd_s, e))
stdout, stderr = git_process.communicate()
if git_process.returncode:
_giterr("error running '{}'\n\nstdout:\n{}\nstderr:\n{}".format(
git_cmd_s, stdout.decode("utf-8"), stderr.decode("utf-8")))
return stdout.decode("utf-8").rstrip()
def _ls_files(path=None):
cmd = ["ls-files"]
if path is not None:
cmd.append(path)
return _git(*cmd).splitlines()
def _err(msg):
raise MaintainersError(msg)
def _giterr(msg):
raise GitError(msg)
def _serr(msg):
# For reporting errors when get_maintainer.py is run as a script.
# sys.exit() shouldn't be used otherwise.
sys.exit("{}: error: {}".format(sys.argv[0], msg))
class MaintainersError(Exception):
"Exception raised for MAINTAINERS.yml-related errors"
class GitError(Exception):
"Exception raised for Git-related errors"
if __name__ == "__main__":
_main()
``` | /content/code_sandbox/scripts/get_maintainer.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,625 |
```yaml
# Keep the help strings in sync with the values in the .py files!
west-commands:
- file: scripts/west_commands/completion.py
commands:
- name: completion
class: Completion
help: output shell completion scripts
- file: scripts/west_commands/boards.py
commands:
- name: boards
class: Boards
help: display information about supported boards
- file: scripts/west_commands/shields.py
commands:
- name: shields
class: Shields
help: display list of supported shields
- file: scripts/west_commands/build.py
commands:
- name: build
class: Build
help: compile a Zephyr application
- file: scripts/west_commands/twister_cmd.py
commands:
- name: twister
class: Twister
help: west twister wrapper
- file: scripts/west_commands/sign.py
commands:
- name: sign
class: Sign
help: sign a Zephyr binary for bootloader chain-loading
- file: scripts/west_commands/flash.py
commands:
- name: flash
class: Flash
help: flash and run a binary on a board
- file: scripts/west_commands/debug.py
commands:
- name: debug
class: Debug
help: flash and interactively debug a Zephyr application
- name: debugserver
class: DebugServer
help: connect to board and launch a debug server
- name: attach
class: Attach
help: interactively debug a board
- file: scripts/west_commands/export.py
commands:
- name: zephyr-export
class: ZephyrExport
help: export Zephyr installation as a CMake config package
- file: scripts/west_commands/spdx.py
commands:
- name: spdx
class: ZephyrSpdx
help: create SPDX bill of materials
- file: scripts/west_commands/blobs.py
commands:
- name: blobs
class: Blobs
help: work with binary blobs
- file: scripts/west_commands/bindesc.py
commands:
- name: bindesc
class: Bindesc
help: work with Binary Descriptors
- file: scripts/west_commands/robot.py
commands:
- name: robot
class: Robot
help: run RobotFramework test suites
- file: scripts/west_commands/simulate.py
commands:
- name: simulate
class: Simulate
help: simulate board
``` | /content/code_sandbox/scripts/west-commands.yml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 586 |
```python
#!/usr/bin/env python3
#
# stdlib
import argparse
import pickle
from pathlib import Path
from typing import List
# third party
from github.Issue import Issue
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument('pickle_file', metavar='PICKLE-FILE', type=Path,
help='pickle file containing list of issues')
return parser.parse_args()
def issue_has_label(issue: Issue, label: str) -> bool:
for lbl in issue.labels:
if lbl.name == label:
return True
return False
def is_open_bug(issue: Issue) -> bool:
return (issue.pull_request is None and
issue.state == 'open' and
issue_has_label(issue, 'bug'))
def get_bugs(args: argparse.Namespace) -> List[Issue]:
'''Get the bugs to use for analysis, given command line arguments.'''
with open(args.pickle_file, 'rb') as f:
return [issue for issue in pickle.load(f) if
is_open_bug(issue)]
def main() -> None:
args = parse_args()
bugs = get_bugs(args)
for bug in sorted(bugs, key=lambda bug: bug.number):
title = bug.title.strip()
print(f'- :github:`{bug.number}` - {title}')
if __name__ == '__main__':
main()
``` | /content/code_sandbox/scripts/dump_bugs_pickle.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 311 |
```python
#!/usr/bin/env python3
#
#
'''Internal snippets tool.
This is part of the build system's support for snippets.
It is not meant for use outside of the build system.
Output CMake variables:
- SNIPPET_NAMES: CMake list of discovered snippet names
- SNIPPET_FOUND_{snippet}: one per discovered snippet
'''
from collections import defaultdict, UserDict
from dataclasses import dataclass, field
from pathlib import Path, PurePosixPath
from typing import Dict, Iterable, List, Set
import argparse
import logging
import os
import pykwalify.core
import pykwalify.errors
import re
import sys
import textwrap
import yaml
import platform
# Marker type for an 'append:' configuration. Maps variables
# to the list of values to append to them.
Appends = Dict[str, List[str]]
def _new_append():
return defaultdict(list)
def _new_board2appends():
return defaultdict(_new_append)
@dataclass
class Snippet:
'''Class for keeping track of all the settings discovered for an
individual snippet.'''
name: str
appends: Appends = field(default_factory=_new_append)
board2appends: Dict[str, Appends] = field(default_factory=_new_board2appends)
def process_data(self, pathobj: Path, snippet_data: dict):
'''Process the data in a snippet.yml file, after it is loaded into a
python object and validated by pykwalify.'''
def append_value(variable, value):
if variable in ('EXTRA_DTC_OVERLAY_FILE', 'EXTRA_CONF_FILE'):
path = pathobj.parent / value
if not path.is_file():
_err(f'snippet file {pathobj}: {variable}: file not found: {path}')
return f'"{path.as_posix()}"'
if variable in ('DTS_EXTRA_CPPFLAGS'):
return f'"{value}"'
_err(f'unknown append variable: {variable}')
for variable, value in snippet_data.get('append', {}).items():
self.appends[variable].append(append_value(variable, value))
for board, settings in snippet_data.get('boards', {}).items():
if board.startswith('/') and not board.endswith('/'):
_err(f"snippet file {pathobj}: board {board} starts with '/', so "
"it must end with '/' to use a regular expression")
for variable, value in settings.get('append', {}).items():
self.board2appends[board][variable].append(
append_value(variable, value))
class Snippets(UserDict):
'''Type for all the information we have discovered about all snippets.
As a dict, this maps a snippet's name onto the Snippet object.
Any additional global attributes about all snippets go here as
instance attributes.'''
def __init__(self, requested: Iterable[str] = None):
super().__init__()
self.paths: Set[Path] = set()
self.requested: List[str] = list(requested or [])
class SnippetsError(Exception):
'''Class for signalling expected errors'''
def __init__(self, msg):
self.msg = msg
class SnippetToCMakePrinter:
'''Helper class for printing a Snippets's semantics to a .cmake
include file for use by snippets.cmake.'''
def __init__(self, snippets: Snippets, out_file):
self.snippets = snippets
self.out_file = out_file
self.section = '#' * 79
def print_cmake(self):
'''Print to the output file provided to the constructor.'''
# TODO: add source file info
snippets = self.snippets
snippet_names = sorted(snippets.keys())
if platform.system() == "Windows":
# Change to linux-style paths for windows to avoid cmake escape character code issues
snippets.paths = set(map(lambda x: str(PurePosixPath(x)), snippets.paths))
for this_snippet in snippets:
for snippet_append in (snippets[this_snippet].appends):
snippets[this_snippet].appends[snippet_append] = \
set(map(lambda x: str(x.replace("\\", "/")), \
snippets[this_snippet].appends[snippet_append]))
snippet_path_list = " ".join(
sorted(f'"{path}"' for path in snippets.paths))
self.print('''\
# WARNING. THIS FILE IS AUTO-GENERATED. DO NOT MODIFY!
#
# This file contains build system settings derived from your snippets.
# Its contents are an implementation detail that should not be used outside
# of Zephyr's snippets CMake module.
#
# See the Snippets guide in the Zephyr documentation for more information.
''')
self.print(f'''\
{self.section}
# Global information about all snippets.
# The name of every snippet that was discovered.
set(SNIPPET_NAMES {' '.join(f'"{name}"' for name in snippet_names)})
# The paths to all the snippet.yml files. One snippet
# can have multiple snippet.yml files.
set(SNIPPET_PATHS {snippet_path_list})
# Create variable scope for snippets build variables
zephyr_create_scope(snippets)
''')
for snippet_name in snippets.requested:
self.print_cmake_for(snippets[snippet_name])
self.print()
def print_cmake_for(self, snippet: Snippet):
self.print(f'''\
{self.section}
# Snippet '{snippet.name}'
# Common variable appends.''')
self.print_appends(snippet.appends, 0)
for board, appends in snippet.board2appends.items():
self.print_appends_for_board(board, appends)
def print_appends_for_board(self, board: str, appends: Appends):
if board.startswith('/'):
board_re = board[1:-1]
self.print(f'''\
# Appends for board regular expression '{board_re}'
if("${{BOARD}}${{BOARD_QUALIFIERS}}" MATCHES "^{board_re}$")''')
else:
self.print(f'''\
# Appends for board '{board}'
if("${{BOARD}}${{BOARD_QUALIFIERS}}" STREQUAL "{board}")''')
self.print_appends(appends, 1)
self.print('endif()')
def print_appends(self, appends: Appends, indent: int):
space = ' ' * indent
for name, values in appends.items():
for value in values:
self.print(f'{space}zephyr_set({name} {value} SCOPE snippets APPEND)')
def print(self, *args, **kwargs):
kwargs['file'] = self.out_file
print(*args, **kwargs)
# Name of the file containing the pykwalify schema for snippet.yml
# files.
SCHEMA_PATH = str(Path(__file__).parent / 'schemas' / 'snippet-schema.yml')
with open(SCHEMA_PATH, 'r') as f:
SNIPPET_SCHEMA = yaml.safe_load(f.read())
# The name of the file which contains metadata about the snippets
# being defined in a directory.
SNIPPET_YML = 'snippet.yml'
# Regular expression for validating snippet names. Snippet names must
# begin with an alphanumeric character, and may contain alphanumeric
# characters or underscores. This is intentionally very restrictive to
# keep things consistent and easy to type and remember. We can relax
# this a bit later if needed.
SNIPPET_NAME_RE = re.compile('[A-Za-z0-9][A-Za-z0-9_-]*')
# Logger for this module.
LOG = logging.getLogger('snippets')
def _err(msg):
raise SnippetsError(f'error: {msg}')
def parse_args():
parser = argparse.ArgumentParser(description='snippets helper',
allow_abbrev=False)
parser.add_argument('--snippet-root', default=[], action='append', type=Path,
help='''a SNIPPET_ROOT element; may be given
multiple times''')
parser.add_argument('--snippet', dest='snippets', default=[], action='append',
help='''a SNIPPET element; may be given
multiple times''')
parser.add_argument('--cmake-out', type=Path,
help='''file to write cmake output to; include()
this file after calling this script''')
return parser.parse_args()
def setup_logging():
# Silence validation errors from pykwalify, which are logged at
# logging.ERROR level. We want to handle those ourselves as
# needed.
logging.getLogger('pykwalify').setLevel(logging.CRITICAL)
logging.basicConfig(level=logging.INFO,
format=' %(name)s: %(message)s')
def process_snippets(args: argparse.Namespace) -> Snippets:
'''Process snippet.yml files under each *snippet_root*
by recursive search. Return a Snippets object describing
the results of the search.
'''
# This will contain information about all the snippets
# we discover in each snippet_root element.
snippets = Snippets(requested=args.snippets)
# Process each path in snippet_root in order, adjusting
# snippets as needed for each one.
for root in args.snippet_root:
process_snippets_in(root, snippets)
return snippets
def find_snippets_in_roots(requested_snippets, snippet_roots) -> Snippets:
'''Process snippet.yml files under each *snippet_root*
by recursive search. Return a Snippets object describing
the results of the search.
'''
# This will contain information about all the snippets
# we discover in each snippet_root element.
snippets = Snippets(requested=requested_snippets)
# Process each path in snippet_root in order, adjusting
# snippets as needed for each one.
for root in snippet_roots:
process_snippets_in(root, snippets)
return snippets
def process_snippets_in(root_dir: Path, snippets: Snippets) -> None:
'''Process snippet.yml files in *root_dir*,
updating *snippets* as needed.'''
if not root_dir.is_dir():
LOG.warning(f'SNIPPET_ROOT {root_dir} '
'is not a directory; ignoring it')
return
snippets_dir = root_dir / 'snippets'
if not snippets_dir.is_dir():
return
for dirpath, _, filenames in os.walk(snippets_dir):
if SNIPPET_YML not in filenames:
continue
snippet_yml = Path(dirpath) / SNIPPET_YML
snippet_data = load_snippet_yml(snippet_yml)
name = snippet_data['name']
if name not in snippets:
snippets[name] = Snippet(name=name)
snippets[name].process_data(snippet_yml, snippet_data)
snippets.paths.add(snippet_yml)
def load_snippet_yml(snippet_yml: Path) -> dict:
'''Load a snippet.yml file *snippet_yml*, validate the contents
against the schema, and do other basic checks. Return the dict
of the resulting YAML data.'''
with open(snippet_yml, 'r') as f:
try:
snippet_data = yaml.safe_load(f.read())
except yaml.scanner.ScannerError:
_err(f'snippets file {snippet_yml} is invalid YAML')
def pykwalify_err(e):
return f'''\
invalid {SNIPPET_YML} file: {snippet_yml}
{textwrap.indent(e.msg, ' ')}
'''
try:
pykwalify.core.Core(source_data=snippet_data,
schema_data=SNIPPET_SCHEMA).validate()
except pykwalify.errors.PyKwalifyException as e:
_err(pykwalify_err(e))
name = snippet_data['name']
if not SNIPPET_NAME_RE.fullmatch(name):
_err(f"snippet file {snippet_yml}: invalid snippet name '{name}'; "
'snippet names must begin with a letter '
'or number, and may only contain letters, numbers, '
'dashes (-), and underscores (_)')
return snippet_data
def check_for_errors(snippets: Snippets) -> None:
unknown_snippets = sorted(snippet for snippet in snippets.requested
if snippet not in snippets)
if unknown_snippets:
all_snippets = '\n '.join(sorted(snippets))
_err(f'''\
snippets not found: {', '.join(unknown_snippets)}
Please choose from among the following snippets:
{all_snippets}''')
def write_cmake_out(snippets: Snippets, cmake_out: Path) -> None:
'''Write a cmake include file to *cmake_out* which
reflects the information in *snippets*.
The contents of this file should be considered an implementation
detail and are not meant to be used outside of snippets.cmake.'''
if not cmake_out.parent.exists():
cmake_out.parent.mkdir()
with open(cmake_out, 'w', encoding="utf-8") as f:
SnippetToCMakePrinter(snippets, f).print_cmake()
def main():
args = parse_args()
setup_logging()
try:
snippets = process_snippets(args)
check_for_errors(snippets)
except SnippetsError as e:
LOG.critical(e.msg)
sys.exit(1)
write_cmake_out(snippets, args.cmake_out)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/snippets.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,873 |
```python
#!/usr/bin/env python3
#
#
'''Tool for parsing a list of projects to determine if they are Zephyr
projects. If no projects are given then the output from `west list` will be
used as project list.
Include file is generated for Kconfig using --kconfig-out.
A <name>:<path> text file is generated for use with CMake using --cmake-out.
Using --twister-out <filename> an argument file for twister script will
be generated which would point to test and sample roots available in modules
that can be included during a twister run. This allows testing code
maintained in modules in addition to what is available in the main Zephyr tree.
'''
import argparse
import hashlib
import os
import re
import subprocess
import sys
import yaml
import pykwalify.core
from pathlib import Path, PurePath
from collections import namedtuple
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
METADATA_SCHEMA = '''
## A pykwalify schema for basic validation of the structure of a
## metadata YAML file.
##
# The zephyr/module.yml file is a simple list of key value pairs to be used by
# the build system.
type: map
mapping:
name:
required: false
type: str
build:
required: false
type: map
mapping:
cmake:
required: false
type: str
kconfig:
required: false
type: str
cmake-ext:
required: false
type: bool
default: false
kconfig-ext:
required: false
type: bool
default: false
sysbuild-cmake:
required: false
type: str
sysbuild-kconfig:
required: false
type: str
sysbuild-cmake-ext:
required: false
type: bool
default: false
sysbuild-kconfig-ext:
required: false
type: bool
default: false
depends:
required: false
type: seq
sequence:
- type: str
settings:
required: false
type: map
mapping:
board_root:
required: false
type: str
dts_root:
required: false
type: str
snippet_root:
required: false
type: str
soc_root:
required: false
type: str
arch_root:
required: false
type: str
module_ext_root:
required: false
type: str
sca_root:
required: false
type: str
tests:
required: false
type: seq
sequence:
- type: str
samples:
required: false
type: seq
sequence:
- type: str
boards:
required: false
type: seq
sequence:
- type: str
blobs:
required: false
type: seq
sequence:
- type: map
mapping:
path:
required: true
type: str
sha256:
required: true
type: str
type:
required: true
type: str
enum: ['img', 'lib']
version:
required: true
type: str
license-path:
required: true
type: str
url:
required: true
type: str
description:
required: true
type: str
doc-url:
required: false
type: str
security:
required: false
type: map
mapping:
external-references:
required: false
type: seq
sequence:
- type: str
'''
MODULE_YML_PATH = PurePath('zephyr/module.yml')
# Path to the blobs folder
MODULE_BLOBS_PATH = PurePath('zephyr/blobs')
BLOB_PRESENT = 'A'
BLOB_NOT_PRESENT = 'D'
BLOB_OUTDATED = 'M'
schema = yaml.load(METADATA_SCHEMA, Loader=SafeLoader)
def validate_setting(setting, module_path, filename=None):
if setting is not None:
if filename is not None:
checkfile = Path(module_path) / setting / filename
else:
checkfile = Path(module_path) / setting
if not checkfile.resolve().is_file():
return False
return True
def process_module(module):
module_path = PurePath(module)
# The input is a module if zephyr/module.{yml,yaml} is a valid yaml file
# or if both zephyr/CMakeLists.txt and zephyr/Kconfig are present.
for module_yml in [module_path / MODULE_YML_PATH,
module_path / MODULE_YML_PATH.with_suffix('.yaml')]:
if Path(module_yml).is_file():
with Path(module_yml).open('r') as f:
meta = yaml.load(f.read(), Loader=SafeLoader)
try:
pykwalify.core.Core(source_data=meta, schema_data=schema)\
.validate()
except pykwalify.errors.SchemaError as e:
sys.exit('ERROR: Malformed "build" section in file: {}\n{}'
.format(module_yml.as_posix(), e))
meta['name'] = meta.get('name', module_path.name)
meta['name-sanitized'] = re.sub('[^a-zA-Z0-9]', '_', meta['name'])
return meta
if Path(module_path.joinpath('zephyr/CMakeLists.txt')).is_file() and \
Path(module_path.joinpath('zephyr/Kconfig')).is_file():
return {'name': module_path.name,
'name-sanitized': re.sub('[^a-zA-Z0-9]', '_', module_path.name),
'build': {'cmake': 'zephyr', 'kconfig': 'zephyr/Kconfig'}}
return None
def process_cmake(module, meta):
section = meta.get('build', dict())
module_path = PurePath(module)
module_yml = module_path.joinpath('zephyr/module.yml')
cmake_extern = section.get('cmake-ext', False)
if cmake_extern:
return('\"{}\":\"{}\":\"{}\"\n'
.format(meta['name'],
module_path.as_posix(),
"${ZEPHYR_" + meta['name-sanitized'].upper() + "_CMAKE_DIR}"))
cmake_setting = section.get('cmake', None)
if not validate_setting(cmake_setting, module, 'CMakeLists.txt'):
sys.exit('ERROR: "cmake" key in {} has folder value "{}" which '
'does not contain a CMakeLists.txt file.'
.format(module_yml.as_posix(), cmake_setting))
cmake_path = os.path.join(module, cmake_setting or 'zephyr')
cmake_file = os.path.join(cmake_path, 'CMakeLists.txt')
if os.path.isfile(cmake_file):
return('\"{}\":\"{}\":\"{}\"\n'
.format(meta['name'],
module_path.as_posix(),
Path(cmake_path).resolve().as_posix()))
else:
return('\"{}\":\"{}\":\"\"\n'
.format(meta['name'],
module_path.as_posix()))
def process_sysbuildcmake(module, meta):
section = meta.get('build', dict())
module_path = PurePath(module)
module_yml = module_path.joinpath('zephyr/module.yml')
cmake_extern = section.get('sysbuild-cmake-ext', False)
if cmake_extern:
return('\"{}\":\"{}\":\"{}\"\n'
.format(meta['name'],
module_path.as_posix(),
"${SYSBUILD_" + meta['name-sanitized'].upper() + "_CMAKE_DIR}"))
cmake_setting = section.get('sysbuild-cmake', None)
if not validate_setting(cmake_setting, module, 'CMakeLists.txt'):
sys.exit('ERROR: "cmake" key in {} has folder value "{}" which '
'does not contain a CMakeLists.txt file.'
.format(module_yml.as_posix(), cmake_setting))
if cmake_setting is None:
return ""
cmake_path = os.path.join(module, cmake_setting or 'zephyr')
cmake_file = os.path.join(cmake_path, 'CMakeLists.txt')
if os.path.isfile(cmake_file):
return('\"{}\":\"{}\":\"{}\"\n'
.format(meta['name'],
module_path.as_posix(),
Path(cmake_path).resolve().as_posix()))
else:
return('\"{}\":\"{}\":\"\"\n'
.format(meta['name'],
module_path.as_posix()))
def process_settings(module, meta):
section = meta.get('build', dict())
build_settings = section.get('settings', None)
out_text = ""
if build_settings is not None:
for root in ['board', 'dts', 'snippet', 'soc', 'arch', 'module_ext', 'sca']:
setting = build_settings.get(root+'_root', None)
if setting is not None:
root_path = PurePath(module) / setting
out_text += f'"{root.upper()}_ROOT":'
out_text += f'"{root_path.as_posix()}"\n'
return out_text
def get_blob_status(path, sha256):
if not path.is_file():
return BLOB_NOT_PRESENT
with path.open('rb') as f:
m = hashlib.sha256()
m.update(f.read())
if sha256.lower() == m.hexdigest():
return BLOB_PRESENT
else:
return BLOB_OUTDATED
def process_blobs(module, meta):
blobs = []
mblobs = meta.get('blobs', None)
if not mblobs:
return blobs
blobs_path = Path(module) / MODULE_BLOBS_PATH
for blob in mblobs:
blob['module'] = meta.get('name', None)
blob['abspath'] = blobs_path / Path(blob['path'])
blob['status'] = get_blob_status(blob['abspath'], blob['sha256'])
blobs.append(blob)
return blobs
def kconfig_snippet(meta, path, kconfig_file=None, blobs=False, sysbuild=False):
name = meta['name']
name_sanitized = meta['name-sanitized']
snippet = [f'menu "{name} ({path.as_posix()})"',
f'osource "{kconfig_file.resolve().as_posix()}"' if kconfig_file
else f'osource "$(SYSBUILD_{name_sanitized.upper()}_KCONFIG)"' if sysbuild is True
else f'osource "$(ZEPHYR_{name_sanitized.upper()}_KCONFIG)"',
f'config ZEPHYR_{name_sanitized.upper()}_MODULE',
' bool',
' default y',
'endmenu\n']
if blobs:
snippet.insert(-1, ' select TAINT_BLOBS')
return '\n'.join(snippet)
def process_kconfig(module, meta):
blobs = process_blobs(module, meta)
taint_blobs = any(b['status'] != BLOB_NOT_PRESENT for b in blobs)
section = meta.get('build', dict())
module_path = PurePath(module)
module_yml = module_path.joinpath('zephyr/module.yml')
kconfig_extern = section.get('kconfig-ext', False)
if kconfig_extern:
return kconfig_snippet(meta, module_path, blobs=taint_blobs)
kconfig_setting = section.get('kconfig', None)
if not validate_setting(kconfig_setting, module):
sys.exit('ERROR: "kconfig" key in {} has value "{}" which does '
'not point to a valid Kconfig file.'
.format(module_yml, kconfig_setting))
kconfig_file = os.path.join(module, kconfig_setting or 'zephyr/Kconfig')
if os.path.isfile(kconfig_file):
return kconfig_snippet(meta, module_path, Path(kconfig_file),
blobs=taint_blobs)
else:
name_sanitized = meta['name-sanitized']
return (f'config ZEPHYR_{name_sanitized.upper()}_MODULE\n'
f' bool\n'
f' default y\n')
def process_sysbuildkconfig(module, meta):
section = meta.get('build', dict())
module_path = PurePath(module)
module_yml = module_path.joinpath('zephyr/module.yml')
kconfig_extern = section.get('sysbuild-kconfig-ext', False)
if kconfig_extern:
return kconfig_snippet(meta, module_path, sysbuild=True)
kconfig_setting = section.get('sysbuild-kconfig', None)
if not validate_setting(kconfig_setting, module):
sys.exit('ERROR: "kconfig" key in {} has value "{}" which does '
'not point to a valid Kconfig file.'
.format(module_yml, kconfig_setting))
if kconfig_setting is not None:
kconfig_file = os.path.join(module, kconfig_setting)
if os.path.isfile(kconfig_file):
return kconfig_snippet(meta, module_path, Path(kconfig_file))
name_sanitized = meta['name-sanitized']
return (f'config ZEPHYR_{name_sanitized.upper()}_MODULE\n'
f' bool\n'
f' default y\n')
def process_twister(module, meta):
out = ""
tests = meta.get('tests', [])
samples = meta.get('samples', [])
boards = meta.get('boards', [])
for pth in tests + samples:
if pth:
dir = os.path.join(module, pth)
out += '-T\n{}\n'.format(PurePath(os.path.abspath(dir))
.as_posix())
for pth in boards:
if pth:
dir = os.path.join(module, pth)
out += '--board-root\n{}\n'.format(PurePath(os.path.abspath(dir))
.as_posix())
return out
def _create_meta_project(project_path):
def git_revision(path):
rc = subprocess.Popen(['git', 'rev-parse', '--is-inside-work-tree'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path).wait()
if rc == 0:
# A git repo.
popen = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path)
stdout, stderr = popen.communicate()
stdout = stdout.decode('utf-8')
if not (popen.returncode or stderr):
revision = stdout.rstrip()
rc = subprocess.Popen(['git', 'diff-index', '--quiet', 'HEAD',
'--'],
stdout=None,
stderr=None,
cwd=path).wait()
if rc:
return revision + '-dirty', True
return revision, False
return None, False
def git_remote(path):
popen = subprocess.Popen(['git', 'remote'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path)
stdout, stderr = popen.communicate()
stdout = stdout.decode('utf-8')
remotes_name = []
if not (popen.returncode or stderr):
remotes_name = stdout.rstrip().split('\n')
remote_url = None
# If more than one remote, do not return any remote
if len(remotes_name) == 1:
remote = remotes_name[0]
popen = subprocess.Popen(['git', 'remote', 'get-url', remote],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path)
stdout, stderr = popen.communicate()
stdout = stdout.decode('utf-8')
if not (popen.returncode or stderr):
remote_url = stdout.rstrip()
return remote_url
def git_tags(path, revision):
if not revision or len(revision) == 0:
return None
popen = subprocess.Popen(['git', '-P', 'tag', '--points-at', revision],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path)
stdout, stderr = popen.communicate()
stdout = stdout.decode('utf-8')
tags = None
if not (popen.returncode or stderr):
tags = stdout.rstrip().splitlines()
return tags
workspace_dirty = False
path = PurePath(project_path).as_posix()
revision, dirty = git_revision(path)
workspace_dirty |= dirty
remote = git_remote(path)
tags = git_tags(path, revision)
meta_project = {'path': path,
'revision': revision}
if remote:
meta_project['remote'] = remote
if tags:
meta_project['tags'] = tags
return meta_project, workspace_dirty
def _get_meta_project(meta_projects_list, project_path):
projects = [ prj for prj in meta_projects_list[1:] if prj["path"] == project_path ]
return projects[0] if len(projects) == 1 else None
def process_meta(zephyr_base, west_projs, modules, extra_modules=None,
propagate_state=False):
# Process zephyr_base, projects, and modules and create a dictionary
# with meta information for each input.
#
# The dictionary will contain meta info in the following lists:
# - zephyr: path and revision
# - modules: name, path, and revision
# - west-projects: path and revision
#
# returns the dictionary with said lists
meta = {'zephyr': None, 'modules': None, 'workspace': None}
zephyr_project, zephyr_dirty = _create_meta_project(zephyr_base)
zephyr_off = zephyr_project.get("remote") is None
workspace_dirty = zephyr_dirty
workspace_extra = extra_modules is not None
workspace_off = zephyr_off
if zephyr_off:
zephyr_project['revision'] += '-off'
meta['zephyr'] = zephyr_project
meta['workspace'] = {}
if west_projs is not None:
from west.manifest import MANIFEST_REV_BRANCH
projects = west_projs['projects']
meta_projects = []
manifest_path = projects[0].posixpath
# Special treatment of manifest project
# Git information (remote/revision) are not provided by west for the Manifest (west.yml)
# To mitigate this, we check if we don't use the manifest from the zephyr repository or an other project.
# If it's from zephyr, reuse zephyr information
# If it's from an other project, ignore it, it will be added later
# If it's not found, we extract data manually (remote/revision) from the directory
manifest_project = None
manifest_dirty = False
manifest_off = False
if zephyr_base == manifest_path:
manifest_project = zephyr_project
manifest_dirty = zephyr_dirty
manifest_off = zephyr_off
elif not [ prj for prj in projects[1:] if prj.posixpath == manifest_path ]:
manifest_project, manifest_dirty = _create_meta_project(
projects[0].posixpath)
manifest_off = manifest_project.get("remote") is None
if manifest_off:
manifest_project["revision"] += "-off"
if manifest_project:
workspace_off |= manifest_off
workspace_dirty |= manifest_dirty
meta_projects.append(manifest_project)
# Iterates on all projects except the first one (manifest)
for project in projects[1:]:
meta_project, dirty = _create_meta_project(project.posixpath)
workspace_dirty |= dirty
meta_projects.append(meta_project)
off = False
if not meta_project.get("remote") or project.sha(MANIFEST_REV_BRANCH) != meta_project['revision'].removesuffix("-dirty"):
off = True
if not meta_project.get('remote') or project.url != meta_project['remote']:
# Force manifest URL and set commit as 'off'
meta_project['url'] = project.url
off = True
if off:
meta_project['revision'] += '-off'
workspace_off |= off
# If manifest is in project, updates related variables
if project.posixpath == manifest_path:
manifest_dirty |= dirty
manifest_off |= off
manifest_project = meta_project
meta.update({'west': {'manifest': west_projs['manifest_path'],
'projects': meta_projects}})
meta['workspace'].update({'off': workspace_off})
# Iterates on all modules
meta_modules = []
for module in modules:
# Check if modules is not in projects
# It allows to have the "-off" flag since `modules` variable` does not provide URL/remote
meta_module = _get_meta_project(meta_projects, module.project)
if not meta_module:
meta_module, dirty = _create_meta_project(module.project)
workspace_dirty |= dirty
meta_module['name'] = module.meta.get('name')
if module.meta.get('security'):
meta_module['security'] = module.meta.get('security')
meta_modules.append(meta_module)
meta['modules'] = meta_modules
meta['workspace'].update({'dirty': workspace_dirty,
'extra': workspace_extra})
if propagate_state:
zephyr_revision = zephyr_project['revision']
if workspace_dirty and not zephyr_dirty:
zephyr_revision += '-dirty'
if workspace_extra:
zephyr_revision += '-extra'
if workspace_off and not zephyr_off:
zephyr_revision += '-off'
zephyr_project.update({'revision': zephyr_revision})
if west_projs is not None:
manifest_revision = manifest_project['revision']
if workspace_dirty and not manifest_dirty:
manifest_revision += '-dirty'
if workspace_extra:
manifest_revision += '-extra'
if workspace_off and not manifest_off:
manifest_revision += '-off'
manifest_project.update({'revision': manifest_revision})
return meta
def west_projects(manifest=None):
manifest_path = None
projects = []
# West is imported here, as it is optional
# (and thus maybe not installed)
# if user is providing a specific modules list.
try:
from west.manifest import Manifest
except ImportError:
# West is not installed, so don't return any projects.
return None
# If west *is* installed, we need all of the following imports to
# work. West versions that are excessively old may fail here:
# west.configuration.MalformedConfig was
# west.manifest.MalformedConfig until west v0.14.0, for example.
# These should be hard errors.
from west.manifest import \
ManifestImportFailed, MalformedManifest, ManifestVersionError
from west.configuration import MalformedConfig
from west.util import WestNotFound
from west.version import __version__ as WestVersion
from packaging import version
try:
if not manifest:
manifest = Manifest.from_file()
if version.parse(WestVersion) >= version.parse('0.9.0'):
projects = [p for p in manifest.get_projects([])
if manifest.is_active(p)]
else:
projects = manifest.get_projects([])
manifest_path = manifest.path
return {'manifest_path': manifest_path, 'projects': projects}
except (ManifestImportFailed, MalformedManifest,
ManifestVersionError, MalformedConfig) as e:
sys.exit(f'ERROR: {e}')
except WestNotFound:
# Only accept WestNotFound, meaning we are not in a west
# workspace. Such setup is allowed, as west may be installed
# but the project is not required to use west.
pass
return None
def parse_modules(zephyr_base, manifest=None, west_projs=None, modules=None,
extra_modules=None):
if modules is None:
west_projs = west_projs or west_projects(manifest)
modules = ([p.posixpath for p in west_projs['projects']]
if west_projs else [])
if extra_modules is None:
extra_modules = []
Module = namedtuple('Module', ['project', 'meta', 'depends'])
all_modules_by_name = {}
# dep_modules is a list of all modules that has an unresolved dependency
dep_modules = []
# start_modules is a list modules with no depends left (no incoming edge)
start_modules = []
# sorted_modules is a topological sorted list of the modules
sorted_modules = []
for project in modules + extra_modules:
# Avoid including Zephyr base project as module.
if project == zephyr_base:
continue
meta = process_module(project)
if meta:
depends = meta.get('build', {}).get('depends', [])
all_modules_by_name[meta['name']] = Module(project, meta, depends)
elif project in extra_modules:
sys.exit(f'{project}, given in ZEPHYR_EXTRA_MODULES, '
'is not a valid zephyr module')
for module in all_modules_by_name.values():
if not module.depends:
start_modules.append(module)
else:
dep_modules.append(module)
# This will do a topological sort to ensure the modules are ordered
# according to dependency settings.
while start_modules:
node = start_modules.pop(0)
sorted_modules.append(node)
node_name = node.meta['name']
to_remove = []
for module in dep_modules:
if node_name in module.depends:
module.depends.remove(node_name)
if not module.depends:
start_modules.append(module)
to_remove.append(module)
for module in to_remove:
dep_modules.remove(module)
if dep_modules:
# If there are any modules with unresolved dependencies, then the
# modules contains unmet or cyclic dependencies. Error out.
error = 'Unmet or cyclic dependencies in modules:\n'
for module in dep_modules:
error += f'{module.project} depends on: {module.depends}\n'
sys.exit(error)
return sorted_modules
def main():
parser = argparse.ArgumentParser(description='''
Process a list of projects and create Kconfig / CMake include files for
projects which are also a Zephyr module''', allow_abbrev=False)
parser.add_argument('--kconfig-out',
help="""File to write with resulting KConfig import
statements.""")
parser.add_argument('--twister-out',
help="""File to write with resulting twister
parameters.""")
parser.add_argument('--cmake-out',
help="""File to write with resulting <name>:<path>
values to use for including in CMake""")
parser.add_argument('--sysbuild-kconfig-out',
help="""File to write with resulting KConfig import
statements.""")
parser.add_argument('--sysbuild-cmake-out',
help="""File to write with resulting <name>:<path>
values to use for including in CMake""")
parser.add_argument('--meta-out',
help="""Write a build meta YaML file containing a list
of Zephyr modules and west projects.
If a module or project is also a git repository
the current SHA revision will also be written.""")
parser.add_argument('--meta-state-propagate', action='store_true',
help="""Propagate state of modules and west projects
to the suffix of the Zephyr SHA and if west is
used, to the suffix of the manifest SHA""")
parser.add_argument('--settings-out',
help="""File to write with resulting <name>:<value>
values to use for including in CMake""")
parser.add_argument('-m', '--modules', nargs='+',
help="""List of modules to parse instead of using `west
list`""")
parser.add_argument('-x', '--extra-modules', nargs='+',
help='List of extra modules to parse')
parser.add_argument('-z', '--zephyr-base',
help='Path to zephyr repository')
args = parser.parse_args()
kconfig = ""
cmake = ""
sysbuild_kconfig = ""
sysbuild_cmake = ""
settings = ""
twister = ""
west_projs = west_projects()
modules = parse_modules(args.zephyr_base, None, west_projs,
args.modules, args.extra_modules)
for module in modules:
kconfig += process_kconfig(module.project, module.meta)
cmake += process_cmake(module.project, module.meta)
sysbuild_kconfig += process_sysbuildkconfig(
module.project, module.meta)
sysbuild_cmake += process_sysbuildcmake(module.project, module.meta)
settings += process_settings(module.project, module.meta)
twister += process_twister(module.project, module.meta)
if args.kconfig_out:
with open(args.kconfig_out, 'w', encoding="utf-8") as fp:
fp.write(kconfig)
if args.cmake_out:
with open(args.cmake_out, 'w', encoding="utf-8") as fp:
fp.write(cmake)
if args.sysbuild_kconfig_out:
with open(args.sysbuild_kconfig_out, 'w', encoding="utf-8") as fp:
fp.write(sysbuild_kconfig)
if args.sysbuild_cmake_out:
with open(args.sysbuild_cmake_out, 'w', encoding="utf-8") as fp:
fp.write(sysbuild_cmake)
if args.settings_out:
with open(args.settings_out, 'w', encoding="utf-8") as fp:
fp.write('''\
# WARNING. THIS FILE IS AUTO-GENERATED. DO NOT MODIFY!
#
# This file contains build system settings derived from your modules.
#
# Modules may be set via ZEPHYR_MODULES, ZEPHYR_EXTRA_MODULES,
# and/or the west manifest file.
#
# See the Modules guide for more information.
''')
fp.write(settings)
if args.twister_out:
with open(args.twister_out, 'w', encoding="utf-8") as fp:
fp.write(twister)
if args.meta_out:
meta = process_meta(args.zephyr_base, west_projs, modules,
args.extra_modules, args.meta_state_propagate)
with open(args.meta_out, 'w', encoding="utf-8") as fp:
# Ignore references and insert data instead
yaml.Dumper.ignore_aliases = lambda self, data: True
fp.write(yaml.dump(meta))
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/zephyr_module.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,653 |
```python
#!/usr/bin/env python3
#
# stdlib
import argparse
import pickle
import sys
from pathlib import Path
from typing import BinaryIO, List
# third party
from github.Issue import Issue
# other zephyr/scripts modules
from github_helpers import get_github_object
# Note that type annotations are not currently statically checked, and
# should only be considered documentation.
def parse_args() -> argparse.Namespace:
'''Parse command line arguments.'''
parser = argparse.ArgumentParser(
description='''
A helper script which loads all open bugs in the
zephyrproject-rtos/zephyr repository using the GitHub API, and writes
them to a new pickle file as a list of github.Issue.Issue objects.
For more information, see:
- GitHub API: path_to_url
- github.Issue.Issue:
path_to_url
- pickle: path_to_url
''',
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument('out_file', metavar='OUTFILE', type=Path, nargs='?',
help='''file to write pickle data to (default:
stdout)''')
return parser.parse_args()
def get_open_bugs() -> List[Issue]:
zephyr_repo = get_github_object().get_repo('zephyrproject-rtos/zephyr')
return list(zephyr_repo.get_issues(state='open', labels=['bug']))
def open_out_file(args: argparse.Namespace) -> BinaryIO:
if args.out_file is None:
return open(sys.stdout.fileno(), 'wb', closefd=False)
return open(args.out_file, 'wb')
def main() -> None:
args = parse_args()
open_bugs = [issue for issue in get_open_bugs() if not issue.pull_request]
with open_out_file(args) as out_file:
pickle.dump(open_bugs, out_file)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/scripts/make_bugs_pickle.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 417 |
```unknown
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
"""Zephyr Test Runner (twister)
Also check the "User and Developer Guides" at path_to_url
This script scans for the set of unit test applications in the git
repository and attempts to execute them. By default, it tries to
build each test case on one platform per architecture, using a precedence
list defined in an architecture configuration file, and if possible
run the tests in any available emulators or simulators on the system.
Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
files in the application's project directory. This file may contain one or more
blocks, each identifying a test scenario. The title of the block is a name for
the test case, which only needs to be unique for the test cases specified in
that testsuite meta-data. The full canonical name for each test case is <path to
test case>/<block>.
Each test block in the testsuite meta data can define the following key/value
pairs:
tags: <list of tags> (required)
A set of string tags for the testsuite. Usually pertains to
functional domains but can be anything. Command line invocations
of this script can filter the set of tests to run based on tag.
skip: <True|False> (default False)
skip testsuite unconditionally. This can be used for broken tests.
slow: <True|False> (default False)
Don't build or run this test case unless --enable-slow was passed
in on the command line. Intended for time-consuming test cases
that are only run under certain circumstances, like daily
builds.
extra_args: <list of extra arguments>
Extra cache entries to pass to CMake when building or running the
test case.
extra_configs: <list of extra configurations>
Extra configuration options to be merged with a master prj.conf
when building or running the test case.
required_snippets: <list of snippets>
Snippets that must be applied for the test case to run.
sysbuild: <True|False> (default False)
If true, build the sample using the sysbuild infrastructure. Filtering
will only be enabled for the main project, and is not supported for
other projects included by sysbuild.
build_only: <True|False> (default False)
If true, don't try to run the test even if the selected platform
supports it.
build_on_all: <True|False> (default False)
If true, attempt to build test on all available platforms.
depends_on: <list of features>
A board or platform can announce what features it supports, this option
will enable the test only those platforms that provide this feature.
min_ram: <integer>
minimum amount of RAM needed for this test to build and run. This is
compared with information provided by the board metadata.
min_flash: <integer>
minimum amount of ROM needed for this test to build and run. This is
compared with information provided by the board metadata.
modules: <list of modules>
Add list of modules needed for this sample to build and run.
timeout: <number of seconds>
Length of time to run test in emulator before automatically killing it.
Default to 60 seconds.
arch_allow: <list of arches, such as x86, arm, arc>
Set of architectures that this test case should only be run for.
arch_exclude: <list of arches, such as x86, arm, arc>
Set of architectures that this test case should not run on.
platform_allow: <list of platforms>
Set of platforms that this test case should only be run for.
platform_exclude: <list of platforms>
Set of platforms that this test case should not run on.
simulation_exclude: <list of simulators>
Set of simulators that this test case should not run on.
extra_sections: <list of extra binary sections>
When computing sizes, twister will report errors if it finds
extra, unexpected sections in the Zephyr binary unless they are named
here. They will not be included in the size calculation.
filter: <expression>
Filter whether the testsuite should be run by evaluating an expression
against an environment containing the following values:
{ ARCH : <architecture>,
PLATFORM : <platform>,
<all CONFIG_* key/value pairs in the test's generated defconfig>,
<all DT_* key/value pairs in the test's generated device tree file>,
<all CMake key/value pairs in the test's generated CMakeCache.txt file>,
*<env>: any environment variable available
}
The grammar for the expression language is as follows:
expression ::= expression "and" expression
| expression "or" expression
| "not" expression
| "(" expression ")"
| symbol "==" constant
| symbol "!=" constant
| symbol "<" number
| symbol ">" number
| symbol ">=" number
| symbol "<=" number
| symbol "in" list
| symbol ":" string
| symbol
list ::= "[" list_contents "]"
list_contents ::= constant
| list_contents "," constant
constant ::= number
| string
For the case where expression ::= symbol, it evaluates to true
if the symbol is defined to a non-empty string.
Operator precedence, starting from lowest to highest:
or (left associative)
and (left associative)
not (right associative)
all comparison operators (non-associative)
The ':' operator compiles the string argument as a regular expression,
and then returns a true value only if the symbol's value in the environment
matches. For example, if CONFIG_SOC="stm32f107xc" then
filter = CONFIG_SOC : "stm.*"
Would match it.
Note that arch_allow, arch_exclude, platform_allow, platform_exclude
are not just syntactic sugar for filter expressions. For instance
arch_exclude = x86 arc
Can appear at first glance to have a similar effect to
filter = not ARCH in ["x86", "arc"]
but unlike "filter", these cause platforms to be filtered already during the testplan
generation. While "filter" does not exclue platforms at the testplan generation, and instead
relies on the result of running the build configuration stage. That is, to evaluate the filter
expression, cmake is run for that target, and then the filter evaluated as a gate for the
build and run steps.
Therefore filtering by using {platform|arch}_{exclude|allow} is much faster.
The set of test cases that actually run depends on directives in the testsuite
files and options passed in on the command line. If there is any confusion,
running with -v or examining the test plan report (testplan.json)
can help show why particular test cases were skipped.
To load arguments from a file, write '+' before the file name, e.g.,
+file_name. File content must be one or more valid arguments separated by
line break instead of white spaces.
Most everyday users will run with no arguments.
"""
import os
import sys
from pathlib import Path
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
# This file has been zephyr/scripts/twister for years,
# and that is not going to change anytime soon. Let the user
# run this script as ./scripts/twister without making them
# set ZEPHYR_BASE.
ZEPHYR_BASE = str(Path(__file__).resolve().parents[1])
# Propagate this decision to child processes.
os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE
print(f'ZEPHYR_BASE unset, using "{ZEPHYR_BASE}"')
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister/"))
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/build_helpers"))
from twisterlib.environment import add_parse_arguments, parse_arguments
from twisterlib.twister_main import main
if __name__ == "__main__":
ret = 0
try:
parser = add_parse_arguments()
options = parse_arguments(parser, sys.argv[1:])
default_options = parse_arguments(parser, [], on_init=False)
ret = main(options, default_options)
finally:
if (os.name != "nt") and os.isatty(1):
# (OS is not Windows) and (stdout is interactive)
os.system("stty sane <&1")
sys.exit(ret)
``` | /content/code_sandbox/scripts/twister | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,877 |
```python
#!/usr/bin/env python3
#
#
"""This script will parse the serial console log file and create the required
gcda files.
"""
import argparse
import os
import re
def retrieve_data(input_file):
extracted_coverage_info = {}
capture_data = False
reached_end = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
reached_end = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
# Remove the leading delimiter "*"
file_name = line.split("<")[0][1:]
# Remove the trailing new line char
hex_dump = line.split("<")[1][:-1]
extracted_coverage_info.update({file_name: hex_dump})
if not reached_end:
print("incomplete data captured from %s" % input_file)
return extracted_coverage_info
def create_gcda_files(extracted_coverage_info):
if args.verbose:
print("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
if args.verbose:
print(filename)
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = filename[:-4] + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument("-i", "--input", required=True,
help="Input dump data")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Verbose Output")
args = parser.parse_args()
def main():
parse_args()
input_file = args.input
extracted_coverage_info = retrieve_data(input_file)
create_gcda_files(extracted_coverage_info)
if __name__ == '__main__':
main()
``` | /content/code_sandbox/scripts/gen_gcov_files.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 499 |
```python
from __future__ import annotations
from pylint.checkers import BaseChecker
import astroid
from astroid import nodes
class ZephyrArgParseChecker(BaseChecker):
"""Class implementing function checker for Zephyr."""
# The name defines a custom section of the config for this checker.
name = "zephyr-arg-parse"
# Register messages emitted by the checker.
msgs = {
"E9901": (
"Argument parser with abbreviations is disallowed",
"argument-parser-with-abbreviations",
"An ArgumentParser object must set `allow_abbrev=false` to disable "
"abbreviations and prevent issues with these being used by projects"
" and/or scripts."
)
}
# Function that looks at evert function call for ArgumentParser invocation
def visit_call(self, node: nodes.Call) -> None:
if isinstance(node.func, astroid.nodes.node_classes.Attribute) and \
node.func.attrname == "ArgumentParser":
abbrev_disabled = False
# Check that allow_abbrev is set and that the value is False
for keyword in node.keywords:
if keyword.arg == "allow_abbrev":
if not isinstance(keyword.value, astroid.nodes.node_classes.Const):
continue
if keyword.value.pytype() != "builtins.bool":
continue
if keyword.value.value is False:
abbrev_disabled = True
if abbrev_disabled is False:
self.add_message(
"argument-parser-with-abbreviations", node=node
)
return ()
# This is called from pylint, hence PyLinter not being declared in this file
# pylint: disable=undefined-variable
def register(linter: PyLinter) -> None:
linter.register_checker(ZephyrArgParseChecker(linter))
``` | /content/code_sandbox/scripts/pylint/checkers/argparse-checker.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 377 |
```python
#!/usr/bin/env python3
#
from anytree.importer import DictImporter
from anytree import PreOrderIter
from anytree.search import find
importer = DictImporter()
from datetime import datetime
from dateutil.relativedelta import relativedelta
import os
import json
from git import Repo
from git.exc import BadName
from influxdb import InfluxDBClient
import glob
import argparse
from tabulate import tabulate
TODAY = datetime.utcnow()
two_mon_rel = relativedelta(months=4)
influx_dsn = 'influxdb://localhost:8086/footprint_tracking'
def create_event(data, board, feature, commit, current_time, typ, application):
footprint_data = []
client = InfluxDBClient.from_dsn(influx_dsn)
client.create_database('footprint_tracking')
for d in data.keys():
footprint_data.append({
"measurement": d,
"tags": {
"board": board,
"commit": commit,
"application": application,
"type": typ,
"feature": feature
},
"time": current_time,
"fields": {
"value": data[d]
}
})
client.write_points(footprint_data, time_precision='s', database='footprint_tracking')
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument("-d", "--data", help="Data Directory")
parser.add_argument("-y", "--dryrun", action="store_true", help="Dry run, do not upload to database")
parser.add_argument("-z", "--zephyr-base", help="Zephyr tree")
parser.add_argument("-f", "--file", help="JSON file with footprint data")
args = parser.parse_args()
def parse_file(json_file):
with open(json_file, "r") as fp:
contents = json.load(fp)
root = importer.import_(contents['symbols'])
zr = find(root, lambda node: node.name == 'ZEPHYR_BASE')
ws = find(root, lambda node: node.name == 'WORKSPACE')
data = {}
if zr and ws:
trees = [zr, ws]
else:
trees = [root]
for node in PreOrderIter(root, maxlevel=2):
if node.name not in ['WORKSPACE', 'ZEPHYR_BASE']:
if node.name in ['Root', 'Symbols']:
data['all'] = node.size
else:
data[node.name] = node.size
for t in trees:
root = t.name
for node in PreOrderIter(t, maxlevel=2):
if node.name == root:
continue
comp = node.name
if comp in ['Root', 'Symbols']:
data['all'] = node.size
else:
data[comp] = node.size
return data
def process_files(data_dir, zephyr_base, dry_run):
repo = Repo(zephyr_base)
for hash in os.listdir(f'{data_dir}'):
if not dry_run:
client = InfluxDBClient.from_dsn(influx_dsn)
result = client.query(f"select * from kernel where commit = '{hash}';")
if result:
print(f"Skipping {hash}...")
continue
print(f"Importing {hash}...")
for file in glob.glob(f"{args.data}/{hash}/**/*json", recursive=True):
file_data = file.split("/")
json_file = os.path.basename(file)
if 'ram' in json_file:
typ = 'ram'
else:
typ = 'rom'
commit = file_data[1]
app = file_data[2]
feature = file_data[3]
board = file_data[4]
data = parse_file(file)
try:
gitcommit = repo.commit(f'{commit}')
current_time = gitcommit.committed_datetime
except BadName:
cidx = commit.find('-g') + 2
gitcommit = repo.commit(f'{commit[cidx:]}')
current_time = gitcommit.committed_datetime
print(current_time)
if not dry_run:
create_event(data, board, feature, commit, current_time, typ, app)
def main():
parse_args()
if args.data and args.zephyr_base:
process_files(args.data, args.zephyr_base, args.dryrun)
if args.file:
data = parse_file(args.file)
items = []
for component,value in data.items():
items.append([component,value])
table = tabulate(items, headers=['Component', 'Size'], tablefmt='orgtbl')
print(table)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/footprint/upload_data.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,038 |
```python
#!/usr/bin/env python3
#
import csv
import subprocess
from git import Git
import pathlib
import shutil
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description="Generate footprint data based on a predefined plan.",
allow_abbrev=False)
parser.add_argument("-p", "--plan", help="Path of test plan", required=True)
return parser.parse_args()
def main():
args = parse_args()
g = Git(".")
version = g.describe("--abbrev=12")
pathlib.Path(f'footprint_data/{version}').mkdir(exist_ok=True, parents=True)
with open(args.plan) as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
name=row[0]
feature=row[1]
board=row[2]
app=row[3]
options=row[4]
cmd = ['west',
'build',
'-d',
f'out/{name}/{feature}/{board}',
'-b',
board,
f'{app}',
'-t',
'footprint']
if options != '':
cmd += ['--', f'{options}']
print(" ".join(cmd))
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, timeout=120, universal_newlines=True)
print("Copying files...")
pathlib.Path(f'footprint_data/{version}/{name}/{feature}/{board}').mkdir(parents=True, exist_ok=True)
shutil.copy(f'out/{name}/{feature}/{board}/ram.json', f'footprint_data/{version}/{name}/{feature}/{board}')
shutil.copy(f'out/{name}/{feature}/{board}/rom.json', f'footprint_data/{version}/{name}/{feature}/{board}')
except subprocess.CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/footprint/track.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 405 |
```unknown
#!/usr/bin/env python3
"""
This script help you to compare footprint results with previous commits in git.
If you don't have a git repository, it will compare your current tree
against the last release results.
To run it you need to set up the same environment as twister.
The scripts take 2 optional args COMMIT and BASE_COMMIT, which tell the scripts
which commit to use as current commit and as base for comparing, respectively.
The script can take any SHA commit recognized for git.
COMMIT is the commit to compare against BASE_COMMIT.
Default
current working directory if we have changes in git tree or we don't have git.
HEAD in any other case.
BASE_COMMIT is the commit used as base to compare results.
Default:
twister_last_release.csv if we don't have git tree.
HEAD is we have changes in the working tree.
HEAD~1 if we don't have changes and we have default COMMIT.
COMMIT~1 if we have a valid COMMIT.
"""
import argparse
import os
import csv
import subprocess
import logging
import tempfile
import shutil
if "ZEPHYR_BASE" not in os.environ:
logging.error("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
logger = None
GIT_ENABLED = False
RELEASE_DATA = 'twister_last_release.csv'
def is_git_enabled():
global GIT_ENABLED
proc = subprocess.Popen('git rev-parse --is-inside-work-tree',
stdout=subprocess.PIPE,
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
if proc.wait() != 0:
GIT_ENABLED = False
GIT_ENABLED = True
def init_logs():
global logger
log_lev = os.environ.get('LOG_LEVEL', None)
level = logging.INFO
if log_lev == "DEBUG":
level = logging.DEBUG
elif log_lev == "ERROR":
level = logging.ERROR
console = logging.StreamHandler()
format = logging.Formatter('%(levelname)-8s: %(message)s')
console.setFormatter(format)
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(level)
logging.debug("Log init completed")
def parse_args():
parser = argparse.ArgumentParser(
description="Compare footprint apps RAM and ROM sizes. Note: "
"To run it you need to set up the same environment as twister.",
allow_abbrev=False)
parser.add_argument('-b', '--base-commit', default=None,
help="Commit ID to use as base for footprint "
"compare. Default is parent current commit."
" or twister_last_release.csv if we don't have git.")
parser.add_argument('-c', '--commit', default=None,
help="Commit ID to use compare footprint against base. "
"Default is HEAD or working tree.")
return parser.parse_args()
def get_git_commit(commit):
commit_id = None
proc = subprocess.Popen('git rev-parse %s' % commit, stdout=subprocess.PIPE,
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
if proc.wait() == 0:
commit_id = proc.stdout.read().decode("utf-8").strip()
return commit_id
def sanity_results_filename(commit=None, cwd=os.environ.get('ZEPHYR_BASE')):
if not commit:
file_name = "tmp.csv"
else:
if commit == RELEASE_DATA:
file_name = RELEASE_DATA
else:
file_name = "%s.csv" % commit
return os.path.join(cwd,'scripts', 'sanity_chk', file_name)
def git_checkout(commit, cwd=os.environ.get('ZEPHYR_BASE')):
proc = subprocess.Popen('git diff --quiet', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd, shell=True)
if proc.wait() != 0:
raise Exception("Cannot continue, you have unstaged changes in your working tree")
proc = subprocess.Popen('git reset %s --hard' % commit,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd, shell=True)
if proc.wait() == 0:
return True
else:
logger.error(proc.stdout.read())
return False
def run_sanity_footprint(commit=None, cwd=os.environ.get('ZEPHYR_BASE'),
output_file=None):
if not output_file:
output_file = sanity_results_filename(commit)
cmd = '/bin/bash -c "source ./zephyr-env.sh && twister'
cmd += ' +scripts/sanity_chk/sanity_compare.args -o %s"' % output_file
logger.debug('Sanity (%s) %s' %(commit, cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
cwd=cwd, shell=True)
output,_ = proc.communicate()
if proc.wait() == 0:
logger.debug(output)
return True
logger.error("Couldn't build footprint apps in commit %s" % commit)
logger.error(output)
raise Exception("Couldn't build footprint apps in commit %s" % commit)
def run_footprint_build(commit=None):
logging.debug("footprint build for %s" % commit)
if not commit:
run_sanity_footprint()
else:
cmd = "git clone --no-hardlinks %s" % os.environ.get('ZEPHYR_BASE')
tmp_location = os.path.join(tempfile.gettempdir(),
os.path.basename(os.environ.get('ZEPHYR_BASE')))
if os.path.exists(tmp_location):
shutil.rmtree(tmp_location)
logging.debug("cloning into %s" % tmp_location)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tempfile.gettempdir(), shell=True)
if proc.wait() == 0:
if git_checkout(commit, tmp_location):
run_sanity_footprint(commit, tmp_location)
else:
logger.error(proc.stdout.read())
shutil.rmtree(tmp_location, ignore_errors=True)
return True
def read_sanity_report(filename):
data = []
with open(filename) as fp:
tmp = csv.DictReader(fp)
for row in tmp:
data.append(row)
return data
def get_footprint_results(commit=None):
sanity_file = sanity_results_filename(commit)
if (not os.path.exists(sanity_file) or not commit) and commit != RELEASE_DATA:
run_footprint_build(commit)
return read_sanity_report(sanity_file)
def tree_changes():
proc = subprocess.Popen('git diff --quiet', stdout=subprocess.PIPE,
cwd=os.environ.get('ZEPHYR_BASE'), shell=True)
if proc.wait() != 0:
return True
return False
def get_default_current_commit():
if tree_changes():
return None
else:
return get_git_commit('HEAD')
def get_default_base_commit(current_commit):
if not current_commit:
if tree_changes():
return get_git_commit('HEAD')
else:
return get_git_commit('HEAD~1')
else:
return get_git_commit('%s~1'%current_commit)
def build_history(b_commit=None, c_commit=None):
if not GIT_ENABLED:
logger.info('Working on current tree, not git enabled.')
current_commit = None
base_commit = RELEASE_DATA
else:
if not c_commit:
current_commit = get_default_current_commit()
else:
current_commit = get_git_commit(c_commit)
if not b_commit:
base_commit = get_default_base_commit(current_commit)
else:
base_commit = get_git_commit(b_commit)
if not base_commit:
logger.error("Cannot resolve base commit")
return
logger.info("Base: %s" % base_commit)
logger.info("Current: %s" % (current_commit if current_commit else
'working space'))
current_results = get_footprint_results(current_commit)
base_results = get_footprint_results(base_commit)
deltas = compare_results(base_results, current_results)
print_deltas(deltas)
def compare_results(base_results, current_results):
interesting_metrics = [("ram_size", int),
("rom_size", int)]
results = {}
metrics = {}
for type, data in {'base': base_results, 'current': current_results}.items():
metrics[type] = {}
for row in data:
d = {}
for m, mtype in interesting_metrics:
if row[m]:
d[m] = mtype(row[m])
if not row["test"] in metrics[type]:
metrics[type][row["test"]] = {}
metrics[type][row["test"]][row["platform"]] = d
for test, platforms in metrics['current'].items():
if not test in metrics['base']:
continue
tests = {}
for platform, test_data in platforms.items():
if not platform in metrics['base'][test]:
continue
golden_metric = metrics['base'][test][platform]
tmp = {}
for metric, _ in interesting_metrics:
if metric not in golden_metric or metric not in test_data:
continue
if test_data[metric] == "":
continue
delta = test_data[metric] - golden_metric[metric]
if delta == 0:
continue
tmp[metric] = {
'delta': delta,
'current': test_data[metric],
}
if tmp:
tests[platform] = tmp
if tests:
results[test] = tests
return results
def print_deltas(deltas):
error_count = 0
for test in sorted(deltas):
print("\n{:<25}".format(test))
for platform, data in deltas[test].items():
print(" {:<25}".format(platform))
for metric, value in data.items():
percentage = (float(value['delta']) / float(value['current'] -
value['delta']))
print(" {} ({:+.2%}) {:+6} current size {:>7} bytes".format(
"RAM" if metric == "ram_size" else "ROM", percentage,
value['delta'], value['current']))
error_count = error_count + 1
if error_count == 0:
print("There are no changes in RAM neither in ROM of footprint apps.")
return error_count
def main():
args = parse_args()
build_history(args.base_commit, args.commit)
if __name__ == "__main__":
init_logs()
is_git_enabled()
main()
``` | /content/code_sandbox/scripts/footprint/compare_footprint | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,224 |
```unknown
#!/usr/bin/env python3
#
#
# Based on a script by:
# Chereau, Fabien <fabien.chereau@intel.com>
"""
Process an ELF file to generate size report on RAM and ROM.
"""
import argparse
import locale
import os
import sys
import re
from pathlib import Path
import json
from packaging import version
from colorama import init, Fore
from anytree import RenderTree, NodeMixin, findall_by_attr
from anytree.exporter import DictExporter
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from elftools.dwarf.descriptions import describe_form_class
from elftools.dwarf.descriptions import (
describe_DWARF_expr, set_global_machine_arch)
from elftools.dwarf.locationlists import (
LocationExpr, LocationParser)
if version.parse(elftools.__version__) < version.parse('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
# ELF section flags
SHF_WRITE = 0x1
SHF_ALLOC = 0x2
SHF_EXEC = 0x4
SHF_WRITE_ALLOC = SHF_WRITE | SHF_ALLOC
SHF_ALLOC_EXEC = SHF_ALLOC | SHF_EXEC
DT_LOCATION = re.compile(r"\(DW_OP_addr: ([0-9a-f]+)\)")
SRC_FILE_EXT = ('.h', '.c', '.hpp', '.cpp', '.hxx', '.cxx', '.c++')
def get_symbol_addr(sym):
"""Get the address of a symbol"""
return sym['st_value']
def get_symbol_size(sym):
"""Get the size of a symbol"""
return sym['st_size']
def is_symbol_in_ranges(sym, ranges):
"""
Given a list of start/end addresses, test if the symbol
lies within any of these address ranges.
"""
for bound in ranges:
if bound['start'] <= sym['st_value'] <= bound['end']:
return bound
return None
def get_die_mapped_address(die, parser, dwarfinfo):
"""Get the bounding addresses from a DIE variable or subprogram"""
low = None
high = None
if die.tag == 'DW_TAG_variable':
if 'DW_AT_location' in die.attributes:
loc_attr = die.attributes['DW_AT_location']
if parser.attribute_has_location(loc_attr, die.cu['version']):
loc = parser.parse_from_attribute(loc_attr, die.cu['version'], die)
if isinstance(loc, LocationExpr):
addr = describe_DWARF_expr(loc.loc_expr,
dwarfinfo.structs)
matcher = DT_LOCATION.match(addr)
if matcher:
low = int(matcher.group(1), 16)
high = low + 1
if die.tag == 'DW_TAG_subprogram':
if 'DW_AT_low_pc' in die.attributes:
low = die.attributes['DW_AT_low_pc'].value
high_pc = die.attributes['DW_AT_high_pc']
high_pc_class = describe_form_class(high_pc.form)
if high_pc_class == 'address':
high = high_pc.value
elif high_pc_class == 'constant':
high = low + high_pc.value
return low, high
def match_symbol_address(symlist, die, parser, dwarfinfo):
"""
Find the symbol from a symbol list
where it matches the address in DIE variable,
or within the range of a DIE subprogram.
"""
low, high = get_die_mapped_address(die, parser, dwarfinfo)
if low is None:
return None
for sym in symlist:
if low <= sym['symbol']['st_value'] < high:
return sym
return None
def get_symbols(elf, addr_ranges):
"""
Fetch the symbols from the symbol table and put them
into ROM, RAM, unassigned buckets.
"""
rom_syms = dict()
ram_syms = dict()
unassigned_syms = dict()
rom_addr_ranges = addr_ranges['rom']
ram_addr_ranges = addr_ranges['ram']
unassigned_addr_ranges = addr_ranges['unassigned']
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
for sym in section.iter_symbols():
# Ignore symbols with size == 0
if get_symbol_size(sym) == 0:
continue
found_sec = False
entry = {'name': sym.name,
'symbol': sym,
'mapped_files': set(),
'section': None}
# If symbol is in ROM area?
bound = is_symbol_in_ranges(sym, rom_addr_ranges)
if bound:
if sym.name not in rom_syms:
rom_syms[sym.name] = list()
entry['section'] = bound['name']
rom_syms[sym.name].append(entry)
found_sec = True
# If symbol is in RAM area?
bound = is_symbol_in_ranges(sym, ram_addr_ranges)
if bound:
if sym.name not in ram_syms:
ram_syms[sym.name] = list()
entry['section'] = bound['name']
ram_syms[sym.name].append(entry)
found_sec = True
if not found_sec:
bound = is_symbol_in_ranges(sym, unassigned_addr_ranges)
if bound:
entry['section'] = bound['name']
if sym.name not in unassigned_syms:
unassigned_syms[sym.name] = list()
unassigned_syms[sym.name].append(entry)
ret = {'rom': rom_syms,
'ram': ram_syms,
'unassigned': unassigned_syms}
return ret
def print_section_info(section, descr=""):
if args.verbose:
sec_size = section['sh_size']
sec_start = section['sh_addr']
sec_end = sec_start + (sec_size - 1 if sec_size else 0)
print(f"DEBUG: "
f"0x{sec_start:08x}-0x{sec_end:08x} "
f"{descr} '{section.name}': size={sec_size}, "
f"{section['sh_type']}, 0x{section['sh_flags']:08x}")
#
def get_section_ranges(elf):
"""
Parse ELF header to find out the address ranges of ROM or RAM sections
and their total sizes.
"""
rom_addr_ranges = list()
ram_addr_ranges = list()
unassigned_addr_ranges = list()
rom_size = 0
ram_size = 0
unassigned_size = 0
for section in elf.iter_sections():
size = section['sh_size']
sec_start = section['sh_addr']
sec_end = sec_start + (size - 1 if size else 0)
bound = {'start': sec_start, 'end': sec_end, 'name': section.name}
is_assigned = False
if section['sh_type'] == 'SHT_NOBITS':
# BSS and noinit sections
ram_addr_ranges.append(bound)
ram_size += size
is_assigned = True
print_section_info(section, "RAM bss section")
elif section['sh_type'] == 'SHT_PROGBITS':
# Sections to be in flash or memory
flags = section['sh_flags']
if (flags & SHF_ALLOC_EXEC) == SHF_ALLOC_EXEC:
# Text section
rom_addr_ranges.append(bound)
rom_size += size
is_assigned = True
print_section_info(section, "ROM txt section")
elif (flags & SHF_WRITE_ALLOC) == SHF_WRITE_ALLOC:
# Data occupies both ROM and RAM
# since at boot, content is copied from ROM to RAM
rom_addr_ranges.append(bound)
rom_size += size
ram_addr_ranges.append(bound)
ram_size += size
is_assigned = True
print_section_info(section, "ROM,RAM section")
elif (flags & SHF_ALLOC) == SHF_ALLOC:
# Read only data
rom_addr_ranges.append(bound)
rom_size += size
is_assigned = True
print_section_info(section, "ROM r/o section")
if not is_assigned:
print_section_info(section, "unassigned section")
unassigned_addr_ranges.append(bound)
unassigned_size += size
ret = {'rom': rom_addr_ranges,
'rom_total_size': rom_size,
'ram': ram_addr_ranges,
'ram_total_size': ram_size,
'unassigned': unassigned_addr_ranges,
'unassigned_total_size': unassigned_size}
return ret
def get_die_filename(die, lineprog):
"""Get the source code filename associated with a DIE"""
file_index = die.attributes['DW_AT_decl_file'].value
file_entry = lineprog['file_entry'][file_index - 1]
dir_index = file_entry['dir_index']
if dir_index == 0:
filename = file_entry.name
else:
directory = lineprog.header['include_directory'][dir_index - 1]
filename = os.path.join(directory, file_entry.name)
path = Path(filename.decode(locale.getpreferredencoding()))
# Prepend output path to relative path
if not path.is_absolute():
output = Path(args.output)
path = output.joinpath(path)
# Change path to relative to Zephyr base
try:
path = path.resolve()
except OSError as e:
# built-ins can't be resolved, so it's not an issue
if '<built-in>' not in str(path):
raise e
return path
def do_simple_name_matching(dwarfinfo, symbol_dict, processed):
"""
Sequentially process DIEs in compiler units with direct file mappings
within the DIEs themselves, and do simply matching between DIE names
and symbol names.
"""
mapped_symbols = processed['mapped_symbols']
mapped_addresses = processed['mapped_addr']
unmapped_symbols = processed['unmapped_symbols']
newly_mapped_syms = set()
location_lists = dwarfinfo.location_lists()
location_parser = LocationParser(location_lists)
unmapped_dies = set()
# Loop through all compile units
for compile_unit in dwarfinfo.iter_CUs():
lineprog = dwarfinfo.line_program_for_CU(compile_unit)
if lineprog is None:
continue
# Loop through each DIE and find variables and
# subprograms (i.e. functions)
for die in compile_unit.iter_DIEs():
sym_name = None
# Process variables
if die.tag == 'DW_TAG_variable':
# DW_AT_declaration
# having 'DW_AT_location' means this maps
# to an actual address (e.g. not an extern)
if 'DW_AT_location' in die.attributes:
sym_name = die.get_full_path()
# Process subprograms (i.e. functions) if they are valid
if die.tag == 'DW_TAG_subprogram':
# Refer to another DIE for name
if ('DW_AT_abstract_origin' in die.attributes) or (
'DW_AT_specification' in die.attributes):
unmapped_dies.add(die)
# having 'DW_AT_low_pc' means it maps to
# an actual address
elif 'DW_AT_low_pc' in die.attributes:
# DW_AT_low_pc == 0 is a weak function
# which has been overriden
if die.attributes['DW_AT_low_pc'].value != 0:
sym_name = die.get_full_path()
# For mangled function names, the linkage name
# is what appears in the symbol list
if 'DW_AT_linkage_name' in die.attributes:
linkage = die.attributes['DW_AT_linkage_name']
sym_name = linkage.value.decode()
if sym_name is not None:
# Skip DIE with no reference back to a file
if not 'DW_AT_decl_file' in die.attributes:
continue
is_die_mapped = False
if sym_name in symbol_dict:
mapped_symbols.add(sym_name)
symlist = symbol_dict[sym_name]
symbol = match_symbol_address(symlist, die,
location_parser,
dwarfinfo)
if symbol is not None:
symaddr = symbol['symbol']['st_value']
if symaddr not in mapped_addresses:
is_die_mapped = True
path = get_die_filename(die, lineprog)
symbol['mapped_files'].add(path)
mapped_addresses.add(symaddr)
newly_mapped_syms.add(sym_name)
if not is_die_mapped:
unmapped_dies.add(die)
mapped_symbols = mapped_symbols.union(newly_mapped_syms)
unmapped_symbols = unmapped_symbols.difference(newly_mapped_syms)
processed['mapped_symbols'] = mapped_symbols
processed['mapped_addr'] = mapped_addresses
processed['unmapped_symbols'] = unmapped_symbols
processed['unmapped_dies'] = unmapped_dies
def mark_address_aliases(symbol_dict, processed):
"""
Mark symbol aliases as already mapped to prevent
double counting.
There are functions and variables which are aliases to
other functions/variables. So this marks them as mapped
so they will not get counted again when a tree is being
built for display.
"""
mapped_symbols = processed['mapped_symbols']
mapped_addresses = processed['mapped_addr']
unmapped_symbols = processed['unmapped_symbols']
already_mapped_syms = set()
for ums in unmapped_symbols:
for one_sym in symbol_dict[ums]:
symbol = one_sym['symbol']
if symbol['st_value'] in mapped_addresses:
already_mapped_syms.add(ums)
mapped_symbols = mapped_symbols.union(already_mapped_syms)
unmapped_symbols = unmapped_symbols.difference(already_mapped_syms)
processed['mapped_symbols'] = mapped_symbols
processed['mapped_addr'] = mapped_addresses
processed['unmapped_symbols'] = unmapped_symbols
def do_address_range_matching(dwarfinfo, symbol_dict, processed):
"""
Match symbols indirectly using address ranges.
This uses the address ranges of DIEs and map them to symbols
residing within those ranges, and works on DIEs that have not
been mapped in previous steps. This works on symbol names
that do not match the names in DIEs, e.g. "<func>" in DIE,
but "<func>.constprop.*" in symbol name list. This also
helps with mapping the mangled function names in C++,
since the names in DIE are actual function names in source
code and not mangled version of them.
"""
if 'unmapped_dies' not in processed:
return
mapped_symbols = processed['mapped_symbols']
mapped_addresses = processed['mapped_addr']
unmapped_symbols = processed['unmapped_symbols']
newly_mapped_syms = set()
location_lists = dwarfinfo.location_lists()
location_parser = LocationParser(location_lists)
unmapped_dies = processed['unmapped_dies']
# Group DIEs by compile units
cu_list = dict()
for die in unmapped_dies:
cu = die.cu
if cu not in cu_list:
cu_list[cu] = {'dies': set()}
cu_list[cu]['dies'].add(die)
# Loop through all compile units
for cu in cu_list:
lineprog = dwarfinfo.line_program_for_CU(cu)
# Map offsets from DIEs
offset_map = dict()
for die in cu.iter_DIEs():
offset_map[die.offset] = die
for die in cu_list[cu]['dies']:
if not die.tag == 'DW_TAG_subprogram':
continue
path = None
# Has direct reference to file, so use it
if 'DW_AT_decl_file' in die.attributes:
path = get_die_filename(die, lineprog)
# Loop through indirect reference until a direct
# reference to file is found
if ('DW_AT_abstract_origin' in die.attributes) or (
'DW_AT_specification' in die.attributes):
die_ptr = die
while path is None:
if not (die_ptr.tag == 'DW_TAG_subprogram') or not (
('DW_AT_abstract_origin' in die_ptr.attributes) or
('DW_AT_specification' in die_ptr.attributes)):
break
if 'DW_AT_abstract_origin' in die_ptr.attributes:
ofname = 'DW_AT_abstract_origin'
elif 'DW_AT_specification' in die_ptr.attributes:
ofname = 'DW_AT_specification'
offset = die_ptr.attributes[ofname].value
offset += die_ptr.cu.cu_offset
# There is nothing to reference so no need to continue
if offset not in offset_map:
break
die_ptr = offset_map[offset]
if 'DW_AT_decl_file' in die_ptr.attributes:
path = get_die_filename(die_ptr, lineprog)
# Nothing to map
if path is not None:
low, high = get_die_mapped_address(die, location_parser,
dwarfinfo)
if low is None:
continue
for ums in unmapped_symbols:
for one_sym in symbol_dict[ums]:
symbol = one_sym['symbol']
symaddr = symbol['st_value']
if symaddr not in mapped_addresses:
if low <= symaddr < high:
one_sym['mapped_files'].add(path)
mapped_addresses.add(symaddr)
newly_mapped_syms.add(ums)
mapped_symbols = mapped_symbols.union(newly_mapped_syms)
unmapped_symbols = unmapped_symbols.difference(newly_mapped_syms)
processed['mapped_symbols'] = mapped_symbols
processed['mapped_addr'] = mapped_addresses
processed['unmapped_symbols'] = unmapped_symbols
def set_root_path_for_unmapped_symbols(symbol_dict, addr_range, processed):
"""
Set root path for unmapped symbols.
Any unmapped symbols are added under the root node if those
symbols reside within the desired memory address ranges
(e.g. ROM or RAM).
"""
mapped_symbols = processed['mapped_symbols']
mapped_addresses = processed['mapped_addr']
unmapped_symbols = processed['unmapped_symbols']
newly_mapped_syms = set()
for ums in unmapped_symbols:
for one_sym in symbol_dict[ums]:
symbol = one_sym['symbol']
symaddr = symbol['st_value']
if is_symbol_in_ranges(symbol, addr_range):
if symaddr not in mapped_addresses:
path = Path(':')
one_sym['mapped_files'].add(path)
mapped_addresses.add(symaddr)
newly_mapped_syms.add(ums)
mapped_symbols = mapped_symbols.union(newly_mapped_syms)
unmapped_symbols = unmapped_symbols.difference(newly_mapped_syms)
processed['mapped_symbols'] = mapped_symbols
processed['mapped_addr'] = mapped_addresses
processed['unmapped_symbols'] = unmapped_symbols
def find_common_path_prefix(symbol_dict):
"""
Find the common path prefix of all mapped files.
Must be called before set_root_path_for_unmapped_symbols().
"""
paths = list()
for _, sym in symbol_dict.items():
for symbol in sym:
for file in symbol['mapped_files']:
paths.append(file)
try:
return os.path.commonpath(paths)
except ValueError:
return None
class TreeNode(NodeMixin):
"""
A symbol node.
"""
def __init__(self, name, identifier, size=0, parent=None, children=None, address=None, section=None):
super().__init__()
self._name = name
self._size = size
self.parent = parent
self._identifier = identifier
if address is not None:
self.address = address
if section is not None:
self.section = section
if children:
self.children = children
def __repr__(self):
return self._name
def sum_node_children_size(node):
"""
Calculate the sum of symbol size of all direct children.
"""
size = 0
for child in node.children:
size += child._size
return size
def generate_any_tree(symbol_dict, total_size, path_prefix):
"""
Generate a symbol tree for output.
"""
root = TreeNode('Root', "root")
node_no_paths = TreeNode('(no paths)', ":", parent=root)
if path_prefix and Path(path_prefix) == Path(args.zephyrbase):
# All source files are under ZEPHYR_BASE so there is
# no need for another level.
node_zephyr_base = root
node_output_dir = root
node_workspace = root
node_others = root
else:
node_zephyr_base = TreeNode('ZEPHYR_BASE', args.zephyrbase)
node_output_dir = TreeNode('OUTPUT_DIR', args.output)
node_others = TreeNode("/", "/")
if args.workspace:
node_workspace = TreeNode('WORKSPACE', args.workspace)
else:
node_workspace = node_others
# A set of helper function for building a simple tree with a path-like
# hierarchy.
def _insert_one_elem(root, path, size, addr, section):
cur = None
node = None
parent = root
for part in path.parts:
if cur is None:
cur = part
else:
cur = str(Path(cur, part))
results = findall_by_attr(root, cur, name="_identifier")
if results:
item = results[0]
if not hasattr(item, 'address'):
# Passing down through a non-terminal parent node.
parent = item
parent._size += size
else:
# Another symbol node here with the same name; stick to its parent as well.
parent = item.parent
node = TreeNode(name=str(part), identifier=cur, size=size, parent=parent)
else:
# There is no such terminal symbol in the tree yet; let's add it.
if node:
parent = node
node = TreeNode(name=str(part), identifier=cur, size=size, parent=parent)
if node:
# Set memory block address and section name properties only for terminal symbol nodes.
# Don't do it on file- and directory- level parent nodes.
node.address = addr
node.section = section
else:
# normally this shouldn't happen; just to detect data or logic errors.
print(f"ERROR: no end node created for {root}, {path}, 0x{addr:08x}+{size}@{section}")
#
# Mapping paths to tree nodes
path_node_map = [
[Path(args.zephyrbase), node_zephyr_base],
[Path(args.output), node_output_dir],
]
if args.workspace:
path_node_map.append(
[Path(args.workspace), node_workspace]
)
for name, sym in symbol_dict.items():
for symbol in sym:
size = get_symbol_size(symbol['symbol'])
addr = get_symbol_addr(symbol['symbol'])
section = symbol['section']
for file in symbol['mapped_files']:
path = Path(file, name)
if path.is_absolute():
has_node = False
for one_path in path_node_map:
if one_path[0] in path.parents:
path = path.relative_to(one_path[0])
dest_node = one_path[1]
has_node = True
break
if not has_node:
dest_node = node_others
else:
dest_node = node_no_paths
_insert_one_elem(dest_node, path, size, addr, section)
if node_zephyr_base is not root:
# ZEPHYR_BASE and OUTPUT_DIR nodes don't have sum of symbol size
# so calculate them here.
node_zephyr_base._size = sum_node_children_size(node_zephyr_base)
node_output_dir._size = sum_node_children_size(node_output_dir)
# Find out which nodes need to be in the tree.
# "(no path)", ZEPHYR_BASE nodes are essential.
children = [node_no_paths, node_zephyr_base]
if node_output_dir.height != 0:
# OUTPUT_DIR may be under ZEPHYR_BASE.
children.append(node_output_dir)
if node_others.height != 0:
# Only include "others" node if there is something.
children.append(node_others)
if args.workspace:
node_workspace._size = sum_node_children_size(node_workspace)
if node_workspace.height != 0:
children.append(node_workspace)
root.children = children
root._size = total_size
# Need to account for code and data where there are not emitted
# symbols associated with them.
node_hidden_syms = TreeNode('(hidden)', "(hidden)", parent=root)
node_hidden_syms._size = root._size - sum_node_children_size(root)
return root
def node_sort(items):
"""
Node sorting used with RenderTree.
"""
return sorted(items, key=lambda item: item._name)
def print_any_tree(root, total_size, depth):
"""
Print the symbol tree.
"""
print('{:98s} {:>7s} {:>7s} {:11s} {:16s}'.format(
Fore.YELLOW + "Path", "Size", "%", " Address", "Section" + Fore.RESET))
print('=' * 138)
for row in RenderTree(root, childiter=node_sort, maxlevel=depth):
f = len(row.pre) + len(row.node._name)
s = str(row.node._size).rjust(100-f)
percent = 100 * float(row.node._size) / float(total_size)
hex_addr = "-"
section_name = ""
cc = cr = ""
if not row.node.children:
if hasattr(row.node, 'section'):
section_name = row.node.section
if hasattr(row.node, 'address'):
hex_addr = "0x{:08x}".format(row.node.address)
cc = Fore.CYAN
cr = Fore.RESET
elif row.node._name.endswith(SRC_FILE_EXT):
cc = Fore.GREEN
cr = Fore.RESET
print(f"{row.pre}{cc}{row.node._name} {s} {cr}{Fore.BLUE}{percent:6.2f}%{Fore.RESET} {hex_addr} {section_name}")
print('=' * 138)
print(f'{total_size:>101}')
def parse_args():
"""
Parse command line arguments.
"""
global args
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("-k", "--kernel", required=True,
help="Zephyr ELF binary")
parser.add_argument("-z", "--zephyrbase", required=True,
help="Zephyr base path")
parser.add_argument("-q", "--quiet", action="store_true",
help="Do not output anything on the screen.")
parser.add_argument("-o", "--output", required=True,
help="Output path")
parser.add_argument("-w", "--workspace", default=None,
help="Workspace path (Usually the same as WEST_TOPDIR)")
parser.add_argument("target", choices=['rom', 'ram', 'all'])
parser.add_argument("-d", "--depth", dest="depth",
type=int, default=None,
help="How deep should we go into the tree",
metavar="DEPTH")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
parser.add_argument("--json", help="store results in a JSON file.")
args = parser.parse_args()
def main():
"""
Main program.
"""
parse_args()
sys.stdout.reconfigure(encoding='utf-8')
# Init colorama
init()
assert os.path.exists(args.kernel), "{0} does not exist.".format(args.kernel)
if args.target == 'ram':
targets = ['ram']
elif args.target == 'rom':
targets = ['rom']
elif args.target == 'all':
targets = ['rom', 'ram']
elf = ELFFile(open(args.kernel, "rb"))
assert elf.has_dwarf_info(), "ELF file has no DWARF information"
set_global_machine_arch(elf.get_machine_arch())
addr_ranges = get_section_ranges(elf)
dwarfinfo = elf.get_dwarf_info()
for t in targets:
symbols = get_symbols(elf, addr_ranges)
for sym in symbols['unassigned'].values():
for sym_entry in sym:
print(f"WARN: Symbol '{sym_entry['name']}' section '{sym_entry['section']}' "
"is not in RAM or ROM.")
if args.json:
jsonout = args.json
else:
jsonout = os.path.join(args.output, f'{t}.json')
symbol_dict = symbols[t]
symsize = addr_ranges[f'{t}_total_size']
ranges = addr_ranges[t]
if symbol_dict is not None:
processed = {"mapped_symbols": set(),
"mapped_addr": set(),
"unmapped_symbols": set(symbol_dict.keys())}
do_simple_name_matching(dwarfinfo, symbol_dict, processed)
mark_address_aliases(symbol_dict, processed)
do_address_range_matching(dwarfinfo, symbol_dict, processed)
mark_address_aliases(symbol_dict, processed)
common_path_prefix = find_common_path_prefix(symbol_dict)
set_root_path_for_unmapped_symbols(symbol_dict, ranges, processed)
if args.verbose:
for sym in processed['unmapped_symbols']:
print("INFO: Unmapped symbol: {0}".format(sym))
root = generate_any_tree(symbol_dict, symsize, common_path_prefix)
if not args.quiet:
print_any_tree(root, symsize, args.depth)
exporter = DictExporter(attriter=lambda attrs: [(k.lstrip('_'), v) for k, v in attrs])
data = dict()
data["symbols"] = exporter.export(root)
data["total_size"] = symsize
with open(jsonout, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/footprint/size_report | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,518 |
```python
#!/usr/bin/env python3
#
# A script to diff between two ram or rom reports generated by
# size_report. When you call call the ram_report or rom_report targets you
# end up with a json file in the build directory that can be used as input
# for this script.
# The output shows which symbols increased and which decreased in size and
# also tracked added/remove symbols as well.
# Example:
# ./scripts/footprint/fpdiff.py ram1.json ram2.json
from anytree.importer import DictImporter
from anytree import PreOrderIter, AnyNode
from anytree.search import find
import colorama
from colorama import Fore
import json
import argparse
importer = DictImporter()
def parse_args():
parser = argparse.ArgumentParser(
description="Compare footprint sizes of two builds.", allow_abbrev=False)
parser.add_argument("file1", help="First file")
parser.add_argument("file2", help="Second file")
return parser.parse_args()
def main():
colorama.init()
args = parse_args()
with open(args.file1, "r") as f:
data1 = json.load(f)
with open(args.file2, "r") as f:
data2 = json.load(f)
for idx, ch in enumerate(data1['symbols']['children']):
root1 = importer.import_(ch)
if idx >= len(data2['symbols']['children']):
root2 = AnyNode(identifier=None)
else:
root2 = importer.import_(data2['symbols']['children'][idx])
print(f"{root1.name}\n+++++++++++++++++++++")
for node in PreOrderIter(root1):
# pylint: disable=undefined-loop-variable
n = find(root2, lambda node2: node2.identifier == node.identifier)
if n:
if n.size != node.size:
diff = n.size - node.size
if diff == 0:
continue
if not n.children or not n.parent:
if diff < 0:
print(f"{n.identifier} -> {Fore.GREEN}{diff}{Fore.RESET}")
else:
print(f"{n.identifier} -> {Fore.RED}+{diff}{Fore.RESET}")
else:
if not node.children:
print(f"{node.identifier} ({Fore.GREEN}-{node.size}{Fore.RESET}) disappeared.")
for node in PreOrderIter(root2):
n = find(root1, lambda node2: node2.identifier == node.identifier)
if not n:
if not node.children and node.size != 0:
print(f"{node.identifier} ({Fore.RED}+{node.size}{Fore.RESET}) is new.")
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/footprint/fpdiff.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 578 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions concerning addons to normal functions
"""
import importlib
import mock
import os
import pkg_resources
import pytest
import re
import shutil
import subprocess
import sys
from conftest import ZEPHYR_BASE, TEST_DATA, sample_filename_mock, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestAddon:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'ubsan_flags, expected_exit_value',
[
# No sanitiser, no problem
([], '0'),
# Sanitiser catches a mistake, error is raised
(['--enable-ubsan'], '1')
],
ids=['no sanitiser', 'ubsan']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_ubsan(self, out_path, ubsan_flags, expected_exit_value):
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'tests', 'san', 'ubsan')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
ubsan_flags + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == expected_exit_value
@pytest.mark.parametrize(
'lsan_flags, expected_exit_value',
[
# No sanitiser, no problem
([], '0'),
# Sanitiser catches a mistake, error is raised
(['--enable-asan', '--enable-lsan'], '1')
],
ids=['no sanitiser', 'lsan']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_lsan(self, out_path, lsan_flags, expected_exit_value):
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'tests', 'san', 'lsan')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
lsan_flags + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == expected_exit_value
@pytest.mark.parametrize(
'asan_flags, expected_exit_value, expect_asan',
[
# No sanitiser, no problem
# Note that on some runs it may fail,
# as the process is killed instead of ending normally.
# This is not 100% repeatable, so this test is removed for now.
# ([], '0', False),
# Sanitiser catches a mistake, error is raised
(['--enable-asan'], '1', True)
],
ids=[
#'no sanitiser',
'asan'
]
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_asan(self, capfd, out_path, asan_flags, expected_exit_value, expect_asan):
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'tests', 'san', 'asan')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
asan_flags + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == expected_exit_value
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
asan_template = r'^==\d+==ERROR:\s+AddressSanitizer:'
assert expect_asan == bool(re.search(asan_template, err, re.MULTILINE))
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_extra_test_args(self, capfd, out_path):
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'tests', 'params', 'dummy')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
[] + \
['-vvv'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair] + \
['--', '-list']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
# Use of -list makes tests not run.
# Thus, the tests 'failed'.
assert str(sys_exit.value) == '1'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
expected_test_names = [
'param_tests::test_assert1',
'param_tests::test_assert2',
'param_tests::test_assert3',
]
assert all([testname in err for testname in expected_test_names])
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_extra_args(self, caplog, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2')
args = ['--outdir', out_path, '-T', path] + \
['--extra-args', 'USE_CCACHE=0', '--extra-args', 'DUMMY=1'] + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'twister.log')) as f:
twister_log = f.read()
pattern_cache = r'Calling cmake: [^\n]+ -DUSE_CCACHE=0 [^\n]+\n'
pattern_dummy = r'Calling cmake: [^\n]+ -DDUMMY=1 [^\n]+\n'
assert ' -DUSE_CCACHE=0 ' in twister_log
res = re.search(pattern_cache, twister_log)
assert res
assert ' -DDUMMY=1 ' in twister_log
res = re.search(pattern_dummy, twister_log)
assert res
# This test is not side-effect free.
# It installs and uninstalls pytest-twister-harness using pip
# It uses pip to check whether that plugin is previously installed
# and reinstalls it if detected at the start of its run.
# However, it does NOT restore the original plugin, ONLY reinstalls it.
@pytest.mark.parametrize(
'allow_flags, do_install, expected_exit_value, expected_logs',
[
([], True, '1', ['By default Twister should work without pytest-twister-harness'
' plugin being installed, so please, uninstall it by'
' `pip uninstall pytest-twister-harness` and'
' `git clean -dxf scripts/pylib/pytest-twister-harness`.']),
(['--allow-installed-plugin'], True, '0', ['You work with installed version'
' of pytest-twister-harness plugin.']),
([], False, '0', []),
(['--allow-installed-plugin'], False, '0', []),
],
ids=['installed, but not allowed', 'installed, allowed',
'not installed, not allowed', 'not installed, but allowed']
)
@mock.patch.object(TestPlan, 'SAMPLE_FILENAME', sample_filename_mock)
def test_allow_installed_plugin(self, caplog, out_path, allow_flags, do_install,
expected_exit_value, expected_logs):
environment_twister_module = importlib.import_module('twisterlib.environment')
harness_twister_module = importlib.import_module('twisterlib.harness')
runner_twister_module = importlib.import_module('twisterlib.runner')
pth_path = os.path.join(ZEPHYR_BASE, 'scripts', 'pylib', 'pytest-twister-harness')
check_installed_command = [sys.executable, '-m', 'pip', 'list']
install_command = [sys.executable, '-m', 'pip', 'install', '--no-input', pth_path]
uninstall_command = [sys.executable, '-m', 'pip', 'uninstall', '--yes',
'pytest-twister-harness']
def big_uninstall():
pth_path = os.path.join(ZEPHYR_BASE, 'scripts', 'pylib', 'pytest-twister-harness')
subprocess.run(uninstall_command, check=True,)
# For our registration to work, we have to delete the installation cache
additional_cache_paths = [
# Plugin cache
os.path.join(pth_path, 'src', 'pytest_twister_harness.egg-info'),
# Additional caches
os.path.join(pth_path, 'src', 'pytest_twister_harness', '__pycache__'),
os.path.join(pth_path, 'src', 'pytest_twister_harness', 'device', '__pycache__'),
os.path.join(pth_path, 'src', 'pytest_twister_harness', 'helpers', '__pycache__'),
os.path.join(pth_path, 'src', 'pytest_twister_harness', 'build'),
]
for additional_cache_path in additional_cache_paths:
if os.path.exists(additional_cache_path):
if os.path.isfile(additional_cache_path):
os.unlink(additional_cache_path)
else:
shutil.rmtree(additional_cache_path)
# To refresh the PYTEST_PLUGIN_INSTALLED global variable
def refresh_plugin_installed_variable():
pkg_resources._initialize_master_working_set()
importlib.reload(environment_twister_module)
importlib.reload(harness_twister_module)
importlib.reload(runner_twister_module)
check_installed_result = subprocess.run(check_installed_command, check=True,
capture_output=True, text=True)
previously_installed = 'pytest-twister-harness' in check_installed_result.stdout
# To ensure consistent test start
big_uninstall()
if do_install:
subprocess.run(install_command, check=True)
# Refresh before the test, no matter the testcase
refresh_plugin_installed_variable()
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'samples', 'pytest', 'shell')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
allow_flags + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
# To ensure consistent test exit, prevent dehermetisation
if do_install:
big_uninstall()
# To restore previously-installed plugin as well as we can
if previously_installed:
subprocess.run(install_command, check=True)
if previously_installed or do_install:
refresh_plugin_installed_variable()
assert str(sys_exit.value) == expected_exit_value
assert all([log in caplog.text for log in expected_logs])
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_pytest_args(self, out_path):
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'tests', 'pytest')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
['--pytest-args=--custom-pytest-arg', '--pytest-args=foo',
'--pytest-args=--cmdopt', '--pytest-args=.'] + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
# YAML was modified so that the test will fail without command line override.
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'valgrind_flags, expected_exit_value',
[
# No sanitiser, leak is ignored
([], '0'),
# Sanitiser catches a mistake, error is raised
(['--enable-valgrind'], '1')
],
ids=['no valgrind', 'valgrind']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_valgrind(self, capfd, out_path, valgrind_flags, expected_exit_value):
test_platforms = ['native_sim']
test_path = os.path.join(TEST_DATA, 'tests', 'san', 'val')
args = ['-i', '--outdir', out_path, '-T', test_path] + \
valgrind_flags + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == expected_exit_value
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_addon.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,208 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions
"""
# pylint: disable=duplicate-code
import importlib
import mock
import os
import pytest
import re
import sys
import time
from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestRunner:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
{
'executed_on_platform': 0,
'only_built': 6
}
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
{
'executed_on_platform': 0,
'only_built': 1
}
),
]
TESTDATA_2 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
{
'selected_test_scenarios': 3,
'selected_test_instances': 6,
'skipped_configurations': 0,
'skipped_by_static_filter': 0,
'skipped_at_runtime': 0,
'passed_configurations': 4,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 8,
'skipped_test_cases': 0,
'platform_count': 0,
'executed_on_platform': 4,
'only_built': 2
}
)
]
TESTDATA_3 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
),
]
TESTDATA_4 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64'],
{
'passed_configurations': 6,
'selected_test_instances': 6,
'executed_on_platform': 0,
'only_built': 6,
}
),
]
TESTDATA_5 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
os.path.join(TEST_DATA, "pre_script.sh")
),
]
TESTDATA_6 = [
(
os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
['qemu_x86_64'],
'1',
),
(
os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
['qemu_x86'],
'2',
),
]
TESTDATA_7 = [
(
os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
['qemu_x86'],
'15',
),
(
os.path.join(TEST_DATA, 'tests', 'always_fail', 'dummy'),
['qemu_x86'],
'30',
),
]
TESTDATA_8 = [
(
os.path.join(TEST_DATA, 'tests', 'always_timeout', 'dummy'),
['qemu_x86'],
'2',
),
(
os.path.join(TEST_DATA, 'tests', 'always_timeout', 'dummy'),
['qemu_x86'],
'0.5',
),
]
TESTDATA_9 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['qemu_x86'],
['device'],
['dummy.agnostic.group2 SKIPPED: Command line testsuite tag filter',
'dummy.agnostic.group1.subgroup2 SKIPPED: Command line testsuite tag filter',
'dummy.agnostic.group1.subgroup1 SKIPPED: Command line testsuite tag filter',
r'0 of 4 test configurations passed \(0.00%\), 0 failed, 0 errored, 4 skipped'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['qemu_x86'],
['subgrouped'],
['dummy.agnostic.group2 SKIPPED: Command line testsuite tag filter',
r'2 of 4 test configurations passed \(100.00%\), 0 failed, 0 errored, 2 skipped'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['qemu_x86'],
['agnostic', 'device'],
[r'3 of 4 test configurations passed \(100.00%\), 0 failed, 0 errored, 1 skipped']
),
]
TESTDATA_10 = [
(
os.path.join(TEST_DATA, 'tests', 'one_fail_one_pass'),
['qemu_x86'],
{
'selected_test_instances': 2,
'skipped_configurations': 0,
'passed_configurations': 0,
'failed_configurations': 1,
'errored_configurations': 0,
}
)
]
TESTDATA_11 = [
(
os.path.join(TEST_DATA, 'tests', 'always_build_error'),
['qemu_x86_64'],
'1',
),
(
os.path.join(TEST_DATA, 'tests', 'always_build_error'),
['qemu_x86'],
'4',
),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_1,
ids=[
'build_only tests/dummy/agnostic',
'build_only tests/dummy/device',
],
)
@pytest.mark.parametrize(
'flag',
['--build-only', '-b']
)
def test_build_only(self, capfd, out_path, test_path, test_platforms, expected, flag):
args = ['-i', '--outdir', out_path, '-T', test_path, flag] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
assert int(built_search.group('executed_on_platform')) == \
expected['executed_on_platform']
assert int(built_search.group('only_built')) == \
expected['only_built']
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_2,
ids=[
'test_only'
],
)
def test_runtest_only(self, capfd, out_path, test_path, test_platforms, expected):
args = ['--outdir', out_path,'-i', '-T', test_path, '--build-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
capfd.readouterr()
clear_log_in_test()
args = ['--outdir', out_path,'-i', '-T', test_path, '--test-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
select_regex = r'^INFO - (?P<test_scenarios>[0-9]+) test scenarios' \
r' \((?P<test_instances>[0-9]+) test instances\) selected,' \
r' (?P<skipped_configurations>[0-9]+) configurations skipped' \
r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
case_regex = r'^INFO - In total (?P<executed_test_cases>[0-9]+)' \
r' test cases were executed, (?P<skipped_test_cases>[0-9]+) skipped' \
r' on (?P<platform_count>[0-9]+) out of total [0-9]+ platforms' \
r' \([0-9]+\.[0-9]+%\)$'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
select_search = re.search(select_regex, err, re.MULTILINE)
assert select_search
assert int(select_search.group('test_scenarios')) == \
expected['selected_test_scenarios']
assert int(select_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(select_search.group('skipped_configurations')) == \
expected['skipped_configurations']
assert int(select_search.group('skipped_by_static_filter')) == \
expected['skipped_by_static_filter']
assert int(select_search.group('skipped_at_runtime')) == \
expected['skipped_at_runtime']
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(pass_search.group('failed_configurations')) == \
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
case_search = re.search(case_regex, err, re.MULTILINE)
assert case_search
assert int(case_search.group('executed_test_cases')) == \
expected['executed_test_cases']
assert int(case_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
assert int(case_search.group('platform_count')) == \
expected['platform_count']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
assert int(built_search.group('executed_on_platform')) == \
expected['executed_on_platform']
assert int(built_search.group('only_built')) == \
expected['only_built']
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms',
TESTDATA_3,
ids=[
'dry_run',
],
)
@pytest.mark.parametrize(
'flag',
['--dry-run', '-y']
)
def test_dry_run(self, capfd, out_path, test_path, test_platforms, flag):
args = ['--outdir', out_path, '-T', test_path, flag] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_4,
ids=[
'cmake_only',
],
)
def test_cmake_only(self, capfd, out_path, test_path, test_platforms, expected):
args = ['--outdir', out_path, '-T', test_path, '--cmake-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
assert int(built_search.group('executed_on_platform')) == \
expected['executed_on_platform']
assert int(built_search.group('only_built')) == \
expected['only_built']
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, file_name',
TESTDATA_5,
ids=[
'pre_script',
],
)
def test_pre_script(self, capfd, out_path, test_path, test_platforms, file_name):
args = ['--outdir', out_path, '-T', test_path, '--pre-script', file_name] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms',
TESTDATA_3,
ids=[
'device_flash_timeout',
],
)
def test_device_flash_timeout(self, capfd, out_path, test_path, test_platforms):
args = ['--outdir', out_path, '-T', test_path, '--device-flash-timeout', "240"] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, iterations',
TESTDATA_6,
ids=[
'retry 2',
'retry 3'
],
)
def test_retry(self, capfd, out_path, test_path, test_platforms, iterations):
args = ['--outdir', out_path, '-T', test_path, '--retry-failed', iterations, '--retry-interval', '1'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
pattern = re.compile(r'INFO\s+-\s+(\d+)\s+Iteration:[\s\S]*?ERROR\s+-\s+(\w+)')
matches = pattern.findall(err)
if matches:
last_iteration = max(int(match[0]) for match in matches)
last_match = next(match for match in matches if int(match[0]) == last_iteration)
iteration_number, platform_name = int(last_match[0]), last_match[1]
assert int(iteration_number) == int(iterations) + 1
assert [platform_name] == test_platforms
else:
assert 'Pattern not found in the output'
assert str(sys_exit.value) == '1'
@pytest.mark.parametrize(
'test_path, test_platforms, interval',
TESTDATA_7,
ids=[
'retry interval 15',
'retry interval 30'
],
)
def test_retry_interval(self, capfd, out_path, test_path, test_platforms, interval):
args = ['--outdir', out_path, '-T', test_path, '--retry-failed', '1', '--retry-interval', interval] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
start_time = time.time()
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
end_time = time.time()
elapsed_time = end_time - start_time
if elapsed_time < int(interval):
assert 'interval was too short'
assert str(sys_exit.value) == '1'
@pytest.mark.parametrize(
'test_path, test_platforms, timeout',
TESTDATA_8,
ids=[
'timeout-multiplier 2 - 20s',
'timeout-multiplier 0.5 - 5s'
],
)
def test_timeout_multiplier(self, capfd, out_path, test_path, test_platforms, timeout):
args = ['--outdir', out_path, '-T', test_path, '--timeout-multiplier', timeout, '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
tolerance = 1.0
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
elapsed_time = float(re.search(r'Timeout \(qemu (\d+\.\d+)s\)', err).group(1))
assert abs(
elapsed_time - float(timeout) * 10) <= tolerance, f"Time is different from expected"
assert str(sys_exit.value) == '1'
@pytest.mark.parametrize(
'test_path, test_platforms, tags, expected',
TESTDATA_9,
ids=[
'tags device',
'tags subgruped',
'tag agnostic and device'
],
)
def test_tag(self, capfd, out_path, test_path, test_platforms, tags, expected):
args = ['--outdir', out_path, '-T', test_path, '-vv'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair] + \
[val for pairs in zip(
['-t'] * len(tags), tags
) for val in pairs]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for line in expected:
assert re.search(line, err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_10,
ids=[
'only_failed'
],
)
def test_only_failed(self, capfd, out_path, test_path, test_platforms, expected):
args = ['--outdir', out_path,'-i', '-T', test_path, '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
capfd.readouterr()
clear_log_in_test()
args = ['--outdir', out_path,'-i', '-T', test_path, '--only-failed'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert re.search(
r'one_fail_one_pass.agnostic.group1.subgroup2 on qemu_x86 failed \(.*\)', err)
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(pass_search.group('failed_configurations')) == \
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
assert str(sys_exit.value) == '1'
@pytest.mark.parametrize(
'test_path, test_platforms, iterations',
TESTDATA_11,
ids=[
'retry 2',
'retry 3'
],
)
def test_retry_build_errors(self, capfd, out_path, test_path, test_platforms, iterations):
args = ['--outdir', out_path, '-T', test_path, '--retry-build-errors', '--retry-failed', iterations,
'--retry-interval', '10'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
pattern = re.compile(r'INFO\s+-\s+(\d+)\s+Iteration:[\s\S]*?ERROR\s+-\s+(\w+)')
matches = pattern.findall(err)
if matches:
last_iteration = max(int(match[0]) for match in matches)
last_match = next(match for match in matches if int(match[0]) == last_iteration)
iteration_number, platform_name = int(last_match[0]), last_match[1]
assert int(iteration_number) == int(iterations) + 1
assert [platform_name] == test_platforms
else:
assert 'Pattern not found in the output'
assert str(sys_exit.value) == '1'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_runner.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,759 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to Zephyr platforms.
"""
import importlib
import re
import mock
import os
import pytest
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestPlatform:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
{
'selected_test_scenarios': 3,
'selected_test_instances': 9,
'skipped_configurations': 3,
'skipped_by_static_filter': 3,
'skipped_at_runtime': 0,
'passed_configurations': 6,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 10,
'skipped_test_cases': 5,
'platform_count': 3,
'executed_on_platform': 4,
'only_built': 2
}
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
{
'selected_test_scenarios': 1,
'selected_test_instances': 3,
'skipped_configurations': 3,
'skipped_by_static_filter': 3,
'skipped_at_runtime': 0,
'passed_configurations': 0,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 0,
'skipped_test_cases': 3,
'platform_count': 3,
'executed_on_platform': 0,
'only_built': 0
}
),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'board_root, expected_returncode',
[(True, '0'), (False, '2')],
ids=['dummy in additional board root', 'no additional board root, crash']
)
def test_board_root(self, out_path, board_root, expected_returncode):
test_platforms = ['qemu_x86', 'dummy_board/dummy_soc']
board_root_path = os.path.join(TEST_DATA, 'boards')
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
(['--board-root', board_root_path] if board_root else []) + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
# Checking twister.log increases coupling,
# but we need to differentiate crashes.
with open(os.path.join(out_path, 'twister.log')) as f:
log = f.read()
error_regex = r'ERROR.*platform_filter\s+-\s+unrecognized\s+platform\s+-\s+dummy_board/dummy_soc$'
board_error = re.search(error_regex, log)
assert board_error if not board_root else not board_error
assert str(sys_exit.value) == expected_returncode
def test_force_platform(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--force-platform'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 12
def test_platform(self, out_path):
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--platform', 'qemu_x86']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert all([platform == 'qemu_x86' for platform, _, _ in filtered_j])
@pytest.mark.parametrize(
'test_path, test_platforms',
[
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
),
],
ids=[
'any_platform',
],
)
@pytest.mark.parametrize(
'flag',
['-l', '--all']
)
def test_any_platform(self, capfd, out_path, test_path, test_platforms, flag):
args = ['--outdir', out_path, '-T', test_path, '-y'] + \
[flag] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
[
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64'],
{
'passed_configurations': 3,
'selected_test_instances': 6,
'executed_on_platform': 2,
'only_built': 1,
}
),
],
ids=[
'exclude_platform',
],
)
def test_exclude_platform(self, capfd, out_path, test_path, test_platforms, expected):
args = ['--outdir', out_path, '-T', test_path] + \
['--exclude-platform', "qemu_x86"] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
assert int(built_search.group('executed_on_platform')) == \
expected['executed_on_platform']
assert int(built_search.group('only_built')) == \
expected['only_built']
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_1,
ids=[
'emulation_only tests/dummy/agnostic',
'emulation_only tests/dummy/device',
]
)
def test_emulation_only(self, capfd, out_path, test_path, test_platforms, expected):
args = ['-i', '--outdir', out_path, '-T', test_path] + \
['--emulation-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
select_regex = r'^INFO - (?P<test_scenarios>[0-9]+) test scenarios' \
r' \((?P<test_instances>[0-9]+) test instances\) selected,' \
r' (?P<skipped_configurations>[0-9]+) configurations skipped' \
r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
case_regex = r'^INFO - In total (?P<executed_test_cases>[0-9]+)' \
r' test cases were executed, (?P<skipped_test_cases>[0-9]+) skipped' \
r' on (?P<platform_count>[0-9]+) out of total [0-9]+ platforms' \
r' \([0-9]+\.[0-9]+%\)$'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
select_search = re.search(select_regex, err, re.MULTILINE)
assert select_search
assert int(select_search.group('test_scenarios')) == \
expected['selected_test_scenarios']
assert int(select_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(select_search.group('skipped_configurations')) == \
expected['skipped_configurations']
assert int(select_search.group('skipped_by_static_filter')) == \
expected['skipped_by_static_filter']
assert int(select_search.group('skipped_at_runtime')) == \
expected['skipped_at_runtime']
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(pass_search.group('failed_configurations')) == \
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
case_search = re.search(case_regex, err, re.MULTILINE)
assert case_search
assert int(case_search.group('executed_test_cases')) == \
expected['executed_test_cases']
assert int(case_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
assert int(case_search.group('platform_count')) == \
expected['platform_count']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
assert int(built_search.group('executed_on_platform')) == \
expected['executed_on_platform']
assert int(built_search.group('only_built')) == \
expected['only_built']
assert str(sys_exit.value) == '0'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_platform.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,030 |
```prolog
#!/usr/bin/env perl
#
# (c) 2001, Dave Jones. (the file handling bit)
# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
# (c) 2010-2018 Joe Perches <joe@perches.com>
use strict;
use warnings;
use POSIX;
use File::Basename;
use Cwd 'abs_path';
use Term::ANSIColor qw(:constants);
use Encode qw(decode encode);
my $P = $0;
my $D = dirname(abs_path($P));
my $V = '0.32';
use Getopt::Long qw(:config no_auto_abbrev);
my $quiet = 0;
my $tree = 1;
my $chk_signoff = 1;
my $chk_patch = 1;
my $tst_only;
my $emacs = 0;
my $terse = 0;
my $showfile = 0;
my $file = 0;
my $git = 0;
my %git_commits = ();
my $check = 0;
my $check_orig = 0;
my $summary = 1;
my $mailback = 0;
my $summary_file = 0;
my $show_types = 0;
my $list_types = 0;
my $fix = 0;
my $fix_inplace = 0;
my $root;
my %debug;
my %camelcase = ();
my %use_type = ();
my @use = ();
my %ignore_type = ();
my @ignore = ();
my @exclude = ();
my $help = 0;
my $configuration_file = ".checkpatch.conf";
my $max_line_length = 80;
my $ignore_perl_version = 0;
my $minimum_perl_version = 5.10.0;
my $min_conf_desc_length = 4;
my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
my $conststructsfile = "$D/const_structs.checkpatch";
my $typedefsfile;
my $color = "auto";
my $allow_c99_comments = 0;
# git output parsing needs US English output, so first set backtick child process LANGUAGE
my $git_command ='export LANGUAGE=en_US.UTF-8; git';
my $tabsize = 8;
sub help {
my ($exitcode) = @_;
print << "EOM";
Usage: $P [OPTION]... [FILE]...
Version: $V
Options:
-q, --quiet quiet
--no-tree run without a kernel tree
--no-signoff do not check for 'Signed-off-by' line
--patch treat FILE as patchfile (default)
--emacs emacs compile window format
--terse one line per report
--showfile emit diffed file position, not input file position
-g, --git treat FILE as a single commit or git revision range
single git commit with:
<rev>
<rev>^
<rev>~n
multiple git commits with:
<rev1>..<rev2>
<rev1>...<rev2>
<rev>-<count>
git merges are ignored
-f, --file treat FILE as regular source file
--subjective, --strict enable more subjective tests
--list-types list the possible message types
--types TYPE(,TYPE2...) show only these comma separated message types
--ignore TYPE(,TYPE2...) ignore various comma separated message types
--exclude DIR (--exclude DIR2...) exclude directories
--show-types show the specific message type in the output
--max-line-length=n set the maximum line length, (default $max_line_length)
if exceeded, warn on patches
requires --strict for use with --file
--min-conf-desc-length=n set the min description length, if shorter, warn
--tab-size=n set the number of spaces for tab (default $tabsize)
--root=PATH PATH to the kernel tree root
--no-summary suppress the per-file summary
--mailback only produce a report in case of warnings/errors
--summary-file include the filename in summary
--debug KEY=[0|1] turn on/off debugging of KEY, where KEY is one of
'values', 'possible', 'type', and 'attr' (default
is all off)
--test-only=WORD report only warnings/errors containing WORD
literally
--fix EXPERIMENTAL - may create horrible results
If correctable single-line errors exist, create
"<inputfile>.EXPERIMENTAL-checkpatch-fixes"
with potential errors corrected to the preferred
checkpatch style
--fix-inplace EXPERIMENTAL - may create horrible results
Is the same as --fix, but overwrites the input
file. It's your fault if there's no backup or git
--ignore-perl-version override checking of perl version. expect
runtime errors.
--codespell Use the codespell dictionary for spelling/typos
(default:/usr/share/codespell/dictionary.txt)
--codespellfile Use this codespell dictionary
--typedefsfile Read additional types from this file
--color[=WHEN] Use colors 'always', 'never', or only when output
is a terminal ('auto'). Default is 'auto'.
-h, --help, --version display this help and exit
When FILE is - read standard input.
EOM
exit($exitcode);
}
sub uniq {
my %seen;
return grep { !$seen{$_}++ } @_;
}
sub list_types {
my ($exitcode) = @_;
my $count = 0;
local $/ = undef;
open(my $script, '<', abs_path($P)) or
die "$P: Can't read '$P' $!\n";
my $text = <$script>;
close($script);
my @types = ();
# Also catch when type or level is passed through a variable
for ($text =~ /(?:(?:\bCHK|\bWARN|\bERROR|&\{\$msg_level})\s*\(|\$msg_type\s*=)\s*"([^"]+)"/g) {
push (@types, $_);
}
@types = sort(uniq(@types));
print("#\tMessage type\n\n");
foreach my $type (@types) {
print(++$count . "\t" . $type . "\n");
}
exit($exitcode);
}
my $conf = which_conf($configuration_file);
if (-f $conf) {
my @conf_args;
open(my $conffile, '<', "$conf")
or warn "$P: Can't find a readable $configuration_file file $!\n";
while (<$conffile>) {
my $line = $_;
$line =~ s/\s*\n?$//g;
$line =~ s/^\s*//g;
$line =~ s/\s+/ /g;
next if ($line =~ m/^\s*#/);
next if ($line =~ m/^\s*$/);
my @words = split(" ", $line);
foreach my $word (@words) {
last if ($word =~ m/^#/);
push (@conf_args, $word);
}
}
close($conffile);
unshift(@ARGV, @conf_args) if @conf_args;
}
# Perl's Getopt::Long allows options to take optional arguments after a space.
# Prevent --color by itself from consuming other arguments
foreach (@ARGV) {
if ($_ eq "--color" || $_ eq "-color") {
$_ = "--color=$color";
}
}
GetOptions(
'q|quiet+' => \$quiet,
'tree!' => \$tree,
'signoff!' => \$chk_signoff,
'patch!' => \$chk_patch,
'emacs!' => \$emacs,
'terse!' => \$terse,
'showfile!' => \$showfile,
'f|file!' => \$file,
'g|git!' => \$git,
'subjective!' => \$check,
'strict!' => \$check,
'ignore=s' => \@ignore,
'exclude=s' => \@exclude,
'types=s' => \@use,
'show-types!' => \$show_types,
'list-types!' => \$list_types,
'max-line-length=i' => \$max_line_length,
'min-conf-desc-length=i' => \$min_conf_desc_length,
'tab-size=i' => \$tabsize,
'root=s' => \$root,
'summary!' => \$summary,
'mailback!' => \$mailback,
'summary-file!' => \$summary_file,
'fix!' => \$fix,
'fix-inplace!' => \$fix_inplace,
'ignore-perl-version!' => \$ignore_perl_version,
'debug=s' => \%debug,
'test-only=s' => \$tst_only,
'codespell!' => \$codespell,
'codespellfile=s' => \$codespellfile,
'typedefsfile=s' => \$typedefsfile,
'color=s' => \$color,
'no-color' => \$color, #keep old behaviors of -nocolor
'nocolor' => \$color, #keep old behaviors of -nocolor
'h|help' => \$help,
'version' => \$help
) or help(1);
help(0) if ($help);
list_types(0) if ($list_types);
$fix = 1 if ($fix_inplace);
$check_orig = $check;
die "$P: --git cannot be used with --file or --fix\n" if ($git && ($file || $fix));
my $exit = 0;
my $perl_version_ok = 1;
if ($^V && $^V lt $minimum_perl_version) {
$perl_version_ok = 0;
printf "$P: requires at least perl version %vd\n", $minimum_perl_version;
exit(1) if (!$ignore_perl_version);
}
#if no filenames are given, push '-' to read patch from stdin
if ($#ARGV < 0) {
push(@ARGV, '-');
}
if ($color =~ /^[01]$/) {
$color = !$color;
} elsif ($color =~ /^always$/i) {
$color = 1;
} elsif ($color =~ /^never$/i) {
$color = 0;
} elsif ($color =~ /^auto$/i) {
$color = (-t STDOUT);
} else {
die "$P: Invalid color mode: $color\n";
}
# skip TAB size 1 to avoid additional checks on $tabsize - 1
die "$P: Invalid TAB size: $tabsize\n" if ($tabsize < 2);
sub hash_save_array_words {
my ($hashRef, $arrayRef) = @_;
my @array = split(/,/, join(',', @$arrayRef));
foreach my $word (@array) {
$word =~ s/\s*\n?$//g;
$word =~ s/^\s*//g;
$word =~ s/\s+/ /g;
$word =~ tr/[a-z]/[A-Z]/;
next if ($word =~ m/^\s*#/);
next if ($word =~ m/^\s*$/);
$hashRef->{$word}++;
}
}
sub hash_show_words {
my ($hashRef, $prefix) = @_;
if (keys %$hashRef) {
print "\nNOTE: $prefix message types:";
foreach my $word (sort keys %$hashRef) {
print " $word";
}
print "\n";
}
}
hash_save_array_words(\%ignore_type, \@ignore);
hash_save_array_words(\%use_type, \@use);
my $dbg_values = 0;
my $dbg_possible = 0;
my $dbg_type = 0;
my $dbg_attr = 0;
for my $key (keys %debug) {
## no critic
eval "\${dbg_$key} = '$debug{$key}';";
die "$@" if ($@);
}
my $rpt_cleaners = 0;
if ($terse) {
$emacs = 1;
$quiet++;
}
if ($tree) {
if (defined $root) {
if (!top_of_kernel_tree($root)) {
die "$P: $root: --root does not point at a valid tree\n";
}
} else {
if (top_of_kernel_tree('.')) {
$root = '.';
} elsif ($0 =~ m@(.*)/scripts/[^/]*$@ &&
top_of_kernel_tree($1)) {
$root = $1;
}
}
if (!defined $root) {
print "Must be run from the top-level dir. of a kernel tree\n";
exit(2);
}
}
my $emitted_corrupt = 0;
our $Ident = qr{
[A-Za-z_][A-Za-z\d_]*
(?:\s*\#\#\s*[A-Za-z_][A-Za-z\d_]*)*
}x;
our $Storage = qr{extern|static|asmlinkage};
our $Sparse = qr{
__user|
__force|
__iomem|
__must_check|
__kprobes|
__ref|
__refconst|
__refdata|
__rcu|
__private
}x;
our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
our $InitAttributeConst = qr{$InitAttributePrefix(?:initconst\b)};
our $InitAttributeInit = qr{$InitAttributePrefix(?:init\b)};
our $InitAttribute = qr{$InitAttributeData|$InitAttributeConst|$InitAttributeInit};
# Notes to $Attribute:
# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
our $Attribute = qr{
const|
__percpu|
__nocast|
__safe|
__bitwise|
__packed__|
__packed2__|
__naked|
__maybe_unused|
__always_unused|
__noreturn|
__used|
__unused|
__cold|
__pure|
__noclone|
__deprecated|
__read_mostly|
__ro_after_init|
__kprobes|
$InitAttribute|
____cacheline_aligned|
____cacheline_aligned_in_smp|
____cacheline_internodealigned_in_smp|
__weak|
__syscall
}x;
our $Modifier;
our $Inline = qr{inline|__always_inline|noinline|__inline|__inline__};
our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
our $Lval = qr{$Ident(?:$Member)*};
our $Int_type = qr{(?i)llu|ull|ll|lu|ul|l|u};
our $Binary = qr{(?i)0b[01]+$Int_type?};
our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?};
our $Int = qr{[0-9]+$Int_type?};
our $Octal = qr{0[0-7]+$Int_type?};
our $String = qr{"[X\t]*"};
our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
our $Float = qr{$Float_hex|$Float_dec|$Float_int};
our $Constant = qr{$Float|$Binary|$Octal|$Hex|$Int};
our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};
our $Compare = qr{<=|>=|==|!=|<|(?<!-)>};
our $Arithmetic = qr{\+|-|\*|\/|%};
our $Operators = qr{
<=|>=|==|!=|
=>|->|<<|>>|<|>|!|~|
&&|\|\||,|\^|\+\+|--|&|\||$Arithmetic
}x;
our $c90_Keywords = qr{do|for|while|if|else|return|goto|continue|switch|default|case|break}x;
our $BasicType;
our $NonptrType;
our $NonptrTypeMisordered;
our $NonptrTypeWithAttr;
our $Type;
our $TypeMisordered;
our $Declare;
our $DeclareMisordered;
our $NON_ASCII_UTF8 = qr{
[\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte
| \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs
| [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte
| \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates
| \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3
| [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15
| \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16
}x;
our $UTF8 = qr{
[\x09\x0A\x0D\x20-\x7E] # ASCII
| $NON_ASCII_UTF8
}x;
our $typeC99Typedefs = qr{(?:__)?(?:[us]_?)?int_?(?:8|16|32|64)_t};
our $typeOtherOSTypedefs = qr{(?x:
u_(?:char|short|int|long) | # bsd
u(?:nchar|short|int|long) # sysv
)};
our $typeKernelTypedefs = qr{(?x:
(?:__)?(?:u|s|be|le)(?:8|16|32|64)|
atomic_t
)};
our $typeTypedefs = qr{(?x:
$typeC99Typedefs\b|
$typeOtherOSTypedefs\b|
$typeKernelTypedefs\b
)};
our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b};
our $logFunctions = qr{(?x:
printk(?:_ratelimited|_once|_deferred_once|_deferred|)|
(?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|
TP_printk|
WARN(?:_RATELIMIT|_ONCE|)|
panic|
MODULE_[A-Z_]+|
seq_vprintf|seq_printf|seq_puts
)};
our $allocFunctions = qr{(?x:
(?:(?:devm_)?
(?:kv|k|v)[czm]alloc(?:_node|_array)? |
kstrdup(?:_const)? |
kmemdup(?:_nul)?) |
(?:\w+)?alloc_skb(?:_ip_align)? |
# dev_alloc_skb/netdev_alloc_skb, et al
dma_alloc_coherent
)};
our $signature_tags = qr{(?xi:
Signed-off-by:|
Co-authored-by:|
Co-developed-by:|
Acked-by:|
Tested-by:|
Reviewed-by:|
Reported-by:|
Suggested-by:|
To:|
Cc:
)};
our @typeListMisordered = (
qr{char\s+(?:un)?signed},
qr{int\s+(?:(?:un)?signed\s+)?short\s},
qr{int\s+short(?:\s+(?:un)?signed)},
qr{short\s+int(?:\s+(?:un)?signed)},
qr{(?:un)?signed\s+int\s+short},
qr{short\s+(?:un)?signed},
qr{long\s+int\s+(?:un)?signed},
qr{int\s+long\s+(?:un)?signed},
qr{long\s+(?:un)?signed\s+int},
qr{int\s+(?:un)?signed\s+long},
qr{int\s+(?:un)?signed},
qr{int\s+long\s+long\s+(?:un)?signed},
qr{long\s+long\s+int\s+(?:un)?signed},
qr{long\s+long\s+(?:un)?signed\s+int},
qr{long\s+long\s+(?:un)?signed},
qr{long\s+(?:un)?signed},
);
our @typeList = (
qr{void},
qr{(?:(?:un)?signed\s+)?char},
qr{(?:(?:un)?signed\s+)?short\s+int},
qr{(?:(?:un)?signed\s+)?short},
qr{(?:(?:un)?signed\s+)?int},
qr{(?:(?:un)?signed\s+)?long\s+int},
qr{(?:(?:un)?signed\s+)?long\s+long\s+int},
qr{(?:(?:un)?signed\s+)?long\s+long},
qr{(?:(?:un)?signed\s+)?long},
qr{(?:un)?signed},
qr{float},
qr{double},
qr{bool},
qr{struct\s+$Ident},
qr{union\s+$Ident},
qr{enum\s+$Ident},
qr{${Ident}_t},
qr{${Ident}_handler},
qr{${Ident}_handler_fn},
@typeListMisordered,
);
our $C90_int_types = qr{(?x:
long\s+long\s+int\s+(?:un)?signed|
long\s+long\s+(?:un)?signed\s+int|
long\s+long\s+(?:un)?signed|
(?:(?:un)?signed\s+)?long\s+long\s+int|
(?:(?:un)?signed\s+)?long\s+long|
int\s+long\s+long\s+(?:un)?signed|
int\s+(?:(?:un)?signed\s+)?long\s+long|
long\s+int\s+(?:un)?signed|
long\s+(?:un)?signed\s+int|
long\s+(?:un)?signed|
(?:(?:un)?signed\s+)?long\s+int|
(?:(?:un)?signed\s+)?long|
int\s+long\s+(?:un)?signed|
int\s+(?:(?:un)?signed\s+)?long|
int\s+(?:un)?signed|
(?:(?:un)?signed\s+)?int
)};
our @typeListFile = ();
our @typeListWithAttr = (
@typeList,
qr{struct\s+$InitAttribute\s+$Ident},
qr{union\s+$InitAttribute\s+$Ident},
);
our @modifierList = (
qr{fastcall},
);
our @modifierListFile = ();
our @mode_permission_funcs = (
["module_param", 3],
["module_param_(?:array|named|string)", 4],
["module_param_array_named", 5],
["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2],
["proc_create(?:_data|)", 2],
["(?:CLASS|DEVICE|SENSOR|SENSOR_DEVICE|IIO_DEVICE)_ATTR", 2],
["IIO_DEV_ATTR_[A-Z_]+", 1],
["SENSOR_(?:DEVICE_|)ATTR_2", 2],
["SENSOR_TEMPLATE(?:_2|)", 3],
["__ATTR", 2],
);
our $api_defines = qr{(?x:
_ATFILE_SOURCE|
_BSD_SOURCE|
_DEFAULT_SOURCE|
_GNU_SOURCE|
_ISOC11_SOURCE|
_ISOC99_SOURCE|
_POSIX_SOURCE|
_SVID_SOURCE|
_XOPEN_SOURCE_EXTENDED
)};
my $word_pattern = '\b[A-Z]?[a-z]{2,}\b';
#Create a search pattern for all these functions to speed up a loop below
our $mode_perms_search = "";
foreach my $entry (@mode_permission_funcs) {
$mode_perms_search .= '|' if ($mode_perms_search ne "");
$mode_perms_search .= $entry->[0];
}
$mode_perms_search = "(?:${mode_perms_search})";
our %deprecated_apis = (
"synchronize_rcu_bh" => "synchronize_rcu",
"synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited",
"call_rcu_bh" => "call_rcu",
"rcu_barrier_bh" => "rcu_barrier",
"synchronize_sched" => "synchronize_rcu",
"synchronize_sched_expedited" => "synchronize_rcu_expedited",
"call_rcu_sched" => "call_rcu",
"rcu_barrier_sched" => "rcu_barrier",
"get_state_synchronize_sched" => "get_state_synchronize_rcu",
"cond_synchronize_sched" => "cond_synchronize_rcu",
);
#Create a search pattern for all these strings to speed up a loop below
our $deprecated_apis_search = "";
foreach my $entry (keys %deprecated_apis) {
$deprecated_apis_search .= '|' if ($deprecated_apis_search ne "");
$deprecated_apis_search .= $entry;
}
$deprecated_apis_search = "(?:${deprecated_apis_search})";
our $mode_perms_world_writable = qr{
S_IWUGO |
S_IWOTH |
S_IRWXUGO |
S_IALLUGO |
0[0-7][0-7][2367]
}x;
our %mode_permission_string_types = (
"S_IRWXU" => 0700,
"S_IRUSR" => 0400,
"S_IWUSR" => 0200,
"S_IXUSR" => 0100,
"S_IRWXG" => 0070,
"S_IRGRP" => 0040,
"S_IWGRP" => 0020,
"S_IXGRP" => 0010,
"S_IRWXO" => 0007,
"S_IROTH" => 0004,
"S_IWOTH" => 0002,
"S_IXOTH" => 0001,
"S_IRWXUGO" => 0777,
"S_IRUGO" => 0444,
"S_IWUGO" => 0222,
"S_IXUGO" => 0111,
);
#Create a search pattern for all these strings to speed up a loop below
our $mode_perms_string_search = "";
foreach my $entry (keys %mode_permission_string_types) {
$mode_perms_string_search .= '|' if ($mode_perms_string_search ne "");
$mode_perms_string_search .= $entry;
}
our $single_mode_perms_string_search = "(?:${mode_perms_string_search})";
our $multi_mode_perms_string_search = qr{
${single_mode_perms_string_search}
(?:\s*\|\s*${single_mode_perms_string_search})*
}x;
sub perms_to_octal {
my ($string) = @_;
return trim($string) if ($string =~ /^\s*0[0-7]{3,3}\s*$/);
my $val = "";
my $oval = "";
my $to = 0;
my $curpos = 0;
my $lastpos = 0;
while ($string =~ /\b(($single_mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) {
$curpos = pos($string);
my $match = $2;
my $omatch = $1;
last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos));
$lastpos = $curpos;
$to |= $mode_permission_string_types{$match};
$val .= '\s*\|\s*' if ($val ne "");
$val .= $match;
$oval .= $omatch;
}
$oval =~ s/^\s*\|\s*//;
$oval =~ s/\s*\|\s*$//;
return sprintf("%04o", $to);
}
our $allowed_asm_includes = qr{(?x:
irq|
memory|
time|
reboot
)};
# memory.h: ARM has a custom one
# Load common spelling mistakes and build regular expression list.
my $misspellings;
my %spelling_fix;
if (open(my $spelling, '<', $spelling_file)) {
while (<$spelling>) {
my $line = $_;
$line =~ s/\s*\n?$//g;
$line =~ s/^\s*//g;
next if ($line =~ m/^\s*#/);
next if ($line =~ m/^\s*$/);
my ($suspect, $fix) = split(/\|\|/, $line);
$spelling_fix{$suspect} = $fix;
}
close($spelling);
} else {
warn "No typos will be found - file '$spelling_file': $!\n";
}
if ($codespell) {
if (open(my $spelling, '<', $codespellfile)) {
while (<$spelling>) {
my $line = $_;
$line =~ s/\s*\n?$//g;
$line =~ s/^\s*//g;
next if ($line =~ m/^\s*#/);
next if ($line =~ m/^\s*$/);
next if ($line =~ m/, disabled/i);
$line =~ s/,.*$//;
my ($suspect, $fix) = split(/->/, $line);
$spelling_fix{$suspect} = $fix;
}
close($spelling);
} else {
warn "No codespell typos will be found - file '$codespellfile': $!\n";
}
}
$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
sub read_words {
my ($wordsRef, $file) = @_;
if (open(my $words, '<', $file)) {
while (<$words>) {
my $line = $_;
$line =~ s/\s*\n?$//g;
$line =~ s/^\s*//g;
next if ($line =~ m/^\s*#/);
next if ($line =~ m/^\s*$/);
if ($line =~ /\s/) {
print("$file: '$line' invalid - ignored\n");
next;
}
$$wordsRef .= '|' if (defined $$wordsRef);
$$wordsRef .= $line;
}
close($file);
return 1;
}
return 0;
}
my $const_structs;
#if (show_type("CONST_STRUCT")) {
# read_words(\$const_structs, $conststructsfile)
# or warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
#}
if (defined($typedefsfile)) {
my $typeOtherTypedefs;
read_words(\$typeOtherTypedefs, $typedefsfile)
or warn "No additional types will be considered - file '$typedefsfile': $!\n";
$typeTypedefs .= '|' . $typeOtherTypedefs if (defined $typeOtherTypedefs);
}
sub build_types {
my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)";
my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)";
my $Misordered = "(?x: \n" . join("|\n ", @typeListMisordered) . "\n)";
my $allWithAttr = "(?x: \n" . join("|\n ", @typeListWithAttr) . "\n)";
$Modifier = qr{(?:$Attribute|$Sparse|$mods)};
$BasicType = qr{
(?:$typeTypedefs\b)|
(?:${all}\b)
}x;
$NonptrType = qr{
(?:$Modifier\s+|const\s+)*
(?:
(?:typeof|__typeof__)\s*\([^\)]*\)|
(?:$typeTypedefs\b)|
(?:${all}\b)
)
(?:\s+$Modifier|\s+const)*
}x;
$NonptrTypeMisordered = qr{
(?:$Modifier\s+|const\s+)*
(?:
(?:${Misordered}\b)
)
(?:\s+$Modifier|\s+const)*
}x;
$NonptrTypeWithAttr = qr{
(?:$Modifier\s+|const\s+)*
(?:
(?:typeof|__typeof__)\s*\([^\)]*\)|
(?:$typeTypedefs\b)|
(?:${allWithAttr}\b)
)
(?:\s+$Modifier|\s+const)*
}x;
$Type = qr{
$NonptrType
(?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+){0,4}
(?:\s+$Inline|\s+$Modifier)*
}x;
$TypeMisordered = qr{
$NonptrTypeMisordered
(?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+){0,4}
(?:\s+$Inline|\s+$Modifier)*
}x;
$Declare = qr{(?:$Storage\s+(?:$Inline\s+)?)?$Type};
$DeclareMisordered = qr{(?:$Storage\s+(?:$Inline\s+)?)?$TypeMisordered};
}
build_types();
our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*};
# Using $balanced_parens, $LvalOrFunc, or $FuncArg
# requires at least perl version v5.10.0
# Any use must be runtime checked with $^V
our $balanced_parens = qr/(\((?:[^\(\)]++|(?-1))*\))/;
our $LvalOrFunc = qr{((?:[\&\*]\s*)?$Lval)\s*($balanced_parens{0,1})\s*};
our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant|$String)};
our $declaration_macros = qr{(?x:
(?:$Storage\s+)?(?:[A-Z_][A-Z0-9]*_){0,2}(?:DEFINE|DECLARE)(?:_[A-Z0-9]+){1,6}\s*\(|
(?:$Storage\s+)?[HLP]?LIST_HEAD\s*\(|
(?:SKCIPHER_REQUEST|SHASH_DESC|AHASH_REQUEST)_ON_STACK\s*\(
)};
sub deparenthesize {
my ($string) = @_;
return "" if (!defined($string));
while ($string =~ /^\s*\(.*\)\s*$/) {
$string =~ s@^\s*\(\s*@@;
$string =~ s@\s*\)\s*$@@;
}
$string =~ s@\s+@ @g;
return $string;
}
sub seed_camelcase_file {
my ($file) = @_;
return if (!(-f $file));
local $/;
open(my $include_file, '<', "$file")
or warn "$P: Can't read '$file' $!\n";
my $text = <$include_file>;
close($include_file);
my @lines = split('\n', $text);
foreach my $line (@lines) {
next if ($line !~ /(?:[A-Z][a-z]|[a-z][A-Z])/);
if ($line =~ /^[ \t]*(?:#[ \t]*define|typedef\s+$Type)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)/) {
$camelcase{$1} = 1;
} elsif ($line =~ /^\s*$Declare\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[\(\[,;]/) {
$camelcase{$1} = 1;
} elsif ($line =~ /^\s*(?:union|struct|enum)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[;\{]/) {
$camelcase{$1} = 1;
}
}
}
our %maintained_status = ();
sub is_maintained_obsolete {
my ($filename) = @_;
return 0 if (!$tree || !(-e "$root/scripts/get_maintainer.pl"));
if (!exists($maintained_status{$filename})) {
$maintained_status{$filename} = `perl $root/scripts/get_maintainer.pl --status --nom --nol --nogit --nogit-fallback -f $filename 2>&1`;
}
return $maintained_status{$filename} =~ /obsolete/i;
}
my ($license) = @_;
return 1 if (!$tree || which("python") eq "" || !(-e "$root/scripts/spdxcheck.py") || !(-e "$root/.git"));
my $root_path = abs_path($root);
my $status = `cd "$root_path"; echo "$license" | python scripts/spdxcheck.py -`;
return 0 if ($status ne "");
return 1;
}
my $camelcase_seeded = 0;
sub seed_camelcase_includes {
return if ($camelcase_seeded);
my $files;
my $camelcase_cache = "";
my @include_files = ();
$camelcase_seeded = 1;
if (-e ".git") {
my $git_last_include_commit = `${git_command} log --no-merges --pretty=format:"%h%n" -1 -- include`;
chomp $git_last_include_commit;
$camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
} else {
my $last_mod_date = 0;
$files = `find $root/include -name "*.h"`;
@include_files = split('\n', $files);
foreach my $file (@include_files) {
my $date = POSIX::strftime("%Y%m%d%H%M",
localtime((stat $file)[9]));
$last_mod_date = $date if ($last_mod_date < $date);
}
$camelcase_cache = ".checkpatch-camelcase.date.$last_mod_date";
}
if ($camelcase_cache ne "" && -f $camelcase_cache) {
open(my $camelcase_file, '<', "$camelcase_cache")
or warn "$P: Can't read '$camelcase_cache' $!\n";
while (<$camelcase_file>) {
chomp;
$camelcase{$_} = 1;
}
close($camelcase_file);
return;
}
if (-e ".git") {
$files = `${git_command} ls-files "include/*.h"`;
@include_files = split('\n', $files);
}
foreach my $file (@include_files) {
seed_camelcase_file($file);
}
if ($camelcase_cache ne "") {
unlink glob ".checkpatch-camelcase.*";
open(my $camelcase_file, '>', "$camelcase_cache")
or warn "$P: Can't write '$camelcase_cache' $!\n";
foreach (sort { lc($a) cmp lc($b) } keys(%camelcase)) {
print $camelcase_file ("$_\n");
}
close($camelcase_file);
}
}
sub git_commit_info {
my ($commit, $id, $desc) = @_;
return ($id, $desc) if ((which("git") eq "") || !(-e ".git"));
my $output = `${git_command} log --no-color --format='%H %s' -1 $commit 2>&1`;
$output =~ s/^\s*//gm;
my @lines = split("\n", $output);
return ($id, $desc) if ($#lines < 0);
if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous/) {
# Maybe one day convert this block of bash into something that returns
# all matching commit ids, but it's very slow...
#
# echo "checking commits $1..."
# git rev-list --remotes | grep -i "^$1" |
# while read line ; do
# git log --format='%H %s' -1 $line |
# echo "commit $(cut -c 1-12,41-)"
# done
} elsif ($lines[0] =~ /^fatal: ambiguous argument '$commit': unknown revision or path not in the working tree\./) {
$id = undef;
} else {
$id = substr($lines[0], 0, 12);
$desc = substr($lines[0], 41);
}
return ($id, $desc);
}
$chk_signoff = 0 if ($file);
my @rawlines = ();
my @lines = ();
my @fixed = ();
my @fixed_inserted = ();
my @fixed_deleted = ();
my $fixlinenr = -1;
# If input is git commits, extract all commits from the commit expressions.
# For example, HEAD-3 means we need check 'HEAD, HEAD~1, HEAD~2'.
die "$P: No git repository found\n" if ($git && !-e ".git");
if ($git) {
my @commits = ();
foreach my $commit_expr (@ARGV) {
my $git_range;
if ($commit_expr =~ m/^(.*)-(\d+)$/) {
$git_range = "-$2 $1";
} elsif ($commit_expr =~ m/\.\./) {
$git_range = "$commit_expr";
} else {
$git_range = "-1 $commit_expr";
}
my $lines = `${git_command} log --no-color --no-merges --pretty=format:'%H %s' $git_range`;
foreach my $line (split(/\n/, $lines)) {
$line =~ /^([0-9a-fA-F]{40,40}) (.*)$/;
next if (!defined($1) || !defined($2));
my $sha1 = $1;
my $subject = $2;
unshift(@commits, $sha1);
$git_commits{$sha1} = $subject;
}
}
die "$P: no git commits after extraction!\n" if (@commits == 0);
@ARGV = @commits;
}
my $vname;
$allow_c99_comments = !defined $ignore_type{"C99_COMMENT_TOLERANCE"};
for my $filename (@ARGV) {
my $FILE;
if ($git) {
open($FILE, '-|', "git format-patch -M --stdout -1 $filename") ||
die "$P: $filename: git format-patch failed - $!\n";
} elsif ($file) {
open($FILE, '-|', "diff -u /dev/null $filename") ||
die "$P: $filename: diff failed - $!\n";
} elsif ($filename eq '-') {
open($FILE, '<&STDIN');
} else {
open($FILE, '<', "$filename") ||
die "$P: $filename: open failed - $!\n";
}
if ($filename eq '-') {
$vname = 'Your patch';
} elsif ($git) {
$vname = "Commit " . substr($filename, 0, 12) . ' ("' . $git_commits{$filename} . '")';
} else {
$vname = $filename;
}
while (<$FILE>) {
chomp;
push(@rawlines, $_);
$vname = qq("$1") if ($filename eq '-' && $_ =~ m/^Subject:\s+(.+)/i);
}
close($FILE);
if ($#ARGV > 0 && $quiet == 0) {
print '-' x length($vname) . "\n";
print "$vname\n";
print '-' x length($vname) . "\n";
}
if (!process($filename)) {
$exit = 1;
}
@rawlines = ();
@lines = ();
@fixed = ();
@fixed_inserted = ();
@fixed_deleted = ();
$fixlinenr = -1;
@modifierListFile = ();
@typeListFile = ();
build_types();
}
if (!$quiet) {
hash_show_words(\%use_type, "Used");
hash_show_words(\%ignore_type, "Ignored");
if (!$perl_version_ok) {
print << "EOM"
NOTE: perl $^V is not modern enough to detect all possible issues.
An upgrade to at least perl $minimum_perl_version is suggested.
EOM
}
if ($exit) {
print << "EOM"
NOTE: If any of the errors are false positives, please report
them to the maintainers.
EOM
}
}
exit($exit);
sub top_of_kernel_tree {
my ($root) = @_;
my @tree_check = (
"LICENSE", "CODEOWNERS", "Kconfig", "README.rst",
"doc", "arch", "include", "drivers", "boards",
"kernel", "lib", "scripts",
);
foreach my $check (@tree_check) {
if (! -e $root . '/' . $check) {
return 0;
}
}
return 1;
}
sub parse_email {
my ($formatted_email) = @_;
my $name = "";
my $address = "";
my $comment = "";
if ($formatted_email =~ /^(.*)<(\S+\@\S+)>(.*)$/) {
$name = $1;
$address = $2;
$comment = $3 if defined $3;
} elsif ($formatted_email =~ /^\s*<(\S+\@\S+)>(.*)$/) {
$address = $1;
$comment = $2 if defined $2;
} elsif ($formatted_email =~ /(\S+\@\S+)(.*)$/) {
$address = $1;
$comment = $2 if defined $2;
$formatted_email =~ s/\Q$address\E.*$//;
$name = $formatted_email;
$name = trim($name);
$name =~ s/^\"|\"$//g;
# If there's a name left after stripping spaces and
# leading quotes, and the address doesn't have both
# leading and trailing angle brackets, the address
# is invalid. ie:
# "joe smith joe@smith.com" bad
# "joe smith <joe@smith.com" bad
if ($name ne "" && $address !~ /^<[^>]+>$/) {
$name = "";
$address = "";
$comment = "";
}
}
$name = trim($name);
$name =~ s/^\"|\"$//g;
$address = trim($address);
$address =~ s/^\<|\>$//g;
if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
$name =~ s/(?<!\\)"/\\"/g; ##escape quotes
$name = "\"$name\"";
}
return ($name, $address, $comment);
}
sub format_email {
my ($name, $address) = @_;
my $formatted_email;
$name = trim($name);
$name =~ s/^\"|\"$//g;
$address = trim($address);
if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
$name =~ s/(?<!\\)"/\\"/g; ##escape quotes
$name = "\"$name\"";
}
if ("$name" eq "") {
$formatted_email = "$address";
} else {
$formatted_email = "$name <$address>";
}
return $formatted_email;
}
sub which {
my ($bin) = @_;
foreach my $path (split(/:/, $ENV{PATH})) {
if (-e "$path/$bin") {
return "$path/$bin";
}
}
return "";
}
sub which_conf {
my ($conf) = @_;
foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
if (-e "$path/$conf") {
return "$path/$conf";
}
}
return "";
}
sub expand_tabs {
my ($str) = @_;
my $res = '';
my $n = 0;
for my $c (split(//, $str)) {
if ($c eq "\t") {
$res .= ' ';
$n++;
for (; ($n % $tabsize) != 0; $n++) {
$res .= ' ';
}
next;
}
$res .= $c;
$n++;
}
return $res;
}
sub copy_spacing {
(my $res = shift) =~ tr/\t/ /c;
return $res;
}
sub line_stats {
my ($line) = @_;
# Drop the diff line leader and expand tabs
$line =~ s/^.//;
$line = expand_tabs($line);
# Pick the indent from the front of the line.
my ($white) = ($line =~ /^(\s*)/);
return (length($line), length($white));
}
my $sanitise_quote = '';
sub sanitise_line_reset {
my ($in_comment) = @_;
if ($in_comment) {
$sanitise_quote = '*/';
} else {
$sanitise_quote = '';
}
}
sub sanitise_line {
my ($line) = @_;
my $res = '';
my $l = '';
my $qlen = 0;
my $off = 0;
my $c;
# Always copy over the diff marker.
$res = substr($line, 0, 1);
for ($off = 1; $off < length($line); $off++) {
$c = substr($line, $off, 1);
# Comments we are whacking completely including the begin
# and end, all to $;.
if ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {
$sanitise_quote = '*/';
substr($res, $off, 2, "$;$;");
$off++;
next;
}
if ($sanitise_quote eq '*/' && substr($line, $off, 2) eq '*/') {
$sanitise_quote = '';
substr($res, $off, 2, "$;$;");
$off++;
next;
}
if ($sanitise_quote eq '' && substr($line, $off, 2) eq '//') {
$sanitise_quote = '//';
substr($res, $off, 2, $sanitise_quote);
$off++;
next;
}
# A \ in a string means ignore the next character.
if (($sanitise_quote eq "'" || $sanitise_quote eq '"') &&
$c eq "\\") {
substr($res, $off, 2, 'XX');
$off++;
next;
}
# Regular quotes.
if ($c eq "'" || $c eq '"') {
if ($sanitise_quote eq '') {
$sanitise_quote = $c;
substr($res, $off, 1, $c);
next;
} elsif ($sanitise_quote eq $c) {
$sanitise_quote = '';
}
}
#print "c<$c> SQ<$sanitise_quote>\n";
if ($off != 0 && $sanitise_quote eq '*/' && $c ne "\t") {
substr($res, $off, 1, $;);
} elsif ($off != 0 && $sanitise_quote eq '//' && $c ne "\t") {
substr($res, $off, 1, $;);
} elsif ($off != 0 && $sanitise_quote && $c ne "\t") {
substr($res, $off, 1, 'X');
} else {
substr($res, $off, 1, $c);
}
}
if ($sanitise_quote eq '//') {
$sanitise_quote = '';
}
# The pathname on a #include may be surrounded by '<' and '>'.
if ($res =~ /^.\s*\#\s*include\s+\<(.*)\>/) {
my $clean = 'X' x length($1);
$res =~ s@\<.*\>@<$clean>@;
# The whole of a #error is a string.
} elsif ($res =~ /^.\s*\#\s*(?:error|warning)\s+(.*)\b/) {
my $clean = 'X' x length($1);
$res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@;
}
if ($allow_c99_comments && $res =~ m@(//.*$)@) {
my $match = $1;
$res =~ s/\Q$match\E/"$;" x length($match)/e;
}
return $res;
}
sub get_quoted_string {
my ($line, $rawline) = @_;
return "" if (!defined($line) || !defined($rawline));
return "" if ($line !~ m/($String)/g);
return substr($rawline, $-[0], $+[0] - $-[0]);
}
sub ctx_statement_block {
my ($linenr, $remain, $off) = @_;
my $line = $linenr - 1;
my $blk = '';
my $soff = $off;
my $coff = $off - 1;
my $coff_set = 0;
my $loff = 0;
my $type = '';
my $level = 0;
my @stack = ();
my $p;
my $c;
my $len = 0;
my $remainder;
while (1) {
@stack = (['', 0]) if ($#stack == -1);
#warn "CSB: blk<$blk> remain<$remain>\n";
# If we are about to drop off the end, pull in more
# context.
if ($off >= $len) {
for (; $remain > 0; $line++) {
last if (!defined $lines[$line]);
next if ($lines[$line] =~ /^-/);
$remain--;
$loff = $len;
$blk .= $lines[$line] . "\n";
$len = length($blk);
$line++;
last;
}
# Bail if there is no further context.
#warn "CSB: blk<$blk> off<$off> len<$len>\n";
if ($off >= $len) {
last;
}
if ($level == 0 && substr($blk, $off) =~ /^.\s*#\s*define/) {
$level++;
$type = '#';
}
}
$p = $c;
$c = substr($blk, $off, 1);
$remainder = substr($blk, $off);
#warn "CSB: c<$c> type<$type> level<$level> remainder<$remainder> coff_set<$coff_set>\n";
# Handle nested #if/#else.
if ($remainder =~ /^#\s*(?:ifndef|ifdef|if)\s/) {
push(@stack, [ $type, $level ]);
} elsif ($remainder =~ /^#\s*(?:else|elif)\b/) {
($type, $level) = @{$stack[$#stack - 1]};
} elsif ($remainder =~ /^#\s*endif\b/) {
($type, $level) = @{pop(@stack)};
}
# Statement ends at the ';' or a close '}' at the
# outermost level.
if ($level == 0 && $c eq ';') {
last;
}
# An else is really a conditional as long as its not else if
if ($level == 0 && $coff_set == 0 &&
(!defined($p) || $p =~ /(?:\s|\}|\+)/) &&
$remainder =~ /^(else)(?:\s|{)/ &&
$remainder !~ /^else\s+if\b/) {
$coff = $off + length($1) - 1;
$coff_set = 1;
#warn "CSB: mark coff<$coff> soff<$soff> 1<$1>\n";
#warn "[" . substr($blk, $soff, $coff - $soff + 1) . "]\n";
}
if (($type eq '' || $type eq '(') && $c eq '(') {
$level++;
$type = '(';
}
if ($type eq '(' && $c eq ')') {
$level--;
$type = ($level != 0)? '(' : '';
if ($level == 0 && $coff < $soff) {
$coff = $off;
$coff_set = 1;
#warn "CSB: mark coff<$coff>\n";
}
}
if (($type eq '' || $type eq '{') && $c eq '{') {
$level++;
$type = '{';
}
if ($type eq '{' && $c eq '}') {
$level--;
$type = ($level != 0)? '{' : '';
if ($level == 0) {
if (substr($blk, $off + 1, 1) eq ';') {
$off++;
}
last;
}
}
# Preprocessor commands end at the newline unless escaped.
if ($type eq '#' && $c eq "\n" && $p ne "\\") {
$level--;
$type = '';
$off++;
last;
}
$off++;
}
# We are truly at the end, so shuffle to the next line.
if ($off == $len) {
$loff = $len + 1;
$line++;
$remain--;
}
my $statement = substr($blk, $soff, $off - $soff + 1);
my $condition = substr($blk, $soff, $coff - $soff + 1);
#warn "STATEMENT<$statement>\n";
#warn "CONDITION<$condition>\n";
#print "coff<$coff> soff<$off> loff<$loff>\n";
return ($statement, $condition,
$line, $remain + 1, $off - $loff + 1, $level);
}
sub statement_lines {
my ($stmt) = @_;
# Strip the diff line prefixes and rip blank lines at start and end.
$stmt =~ s/(^|\n)./$1/g;
$stmt =~ s/^\s*//;
$stmt =~ s/\s*$//;
my @stmt_lines = ($stmt =~ /\n/g);
return $#stmt_lines + 2;
}
sub statement_rawlines {
my ($stmt) = @_;
my @stmt_lines = ($stmt =~ /\n/g);
return $#stmt_lines + 2;
}
sub statement_block_size {
my ($stmt) = @_;
$stmt =~ s/(^|\n)./$1/g;
$stmt =~ s/^\s*{//;
$stmt =~ s/}\s*$//;
$stmt =~ s/^\s*//;
$stmt =~ s/\s*$//;
my @stmt_lines = ($stmt =~ /\n/g);
my @stmt_statements = ($stmt =~ /;/g);
my $stmt_lines = $#stmt_lines + 2;
my $stmt_statements = $#stmt_statements + 1;
if ($stmt_lines > $stmt_statements) {
return $stmt_lines;
} else {
return $stmt_statements;
}
}
sub ctx_statement_full {
my ($linenr, $remain, $off) = @_;
my ($statement, $condition, $level);
my (@chunks);
# Grab the first conditional/block pair.
($statement, $condition, $linenr, $remain, $off, $level) =
ctx_statement_block($linenr, $remain, $off);
#print "F: c<$condition> s<$statement> remain<$remain>\n";
push(@chunks, [ $condition, $statement ]);
if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) {
return ($level, $linenr, @chunks);
}
# Pull in the following conditional/block pairs and see if they
# could continue the statement.
for (;;) {
($statement, $condition, $linenr, $remain, $off, $level) =
ctx_statement_block($linenr, $remain, $off);
#print "C: c<$condition> s<$statement> remain<$remain>\n";
last if (!($remain > 0 && $condition =~ /^(?:\s*\n[+-])*\s*(?:else|do)\b/s));
#print "C: push\n";
push(@chunks, [ $condition, $statement ]);
}
return ($level, $linenr, @chunks);
}
sub ctx_block_get {
my ($linenr, $remain, $outer, $open, $close, $off) = @_;
my $line;
my $start = $linenr - 1;
my $blk = '';
my @o;
my @c;
my @res = ();
my $level = 0;
my @stack = ($level);
for ($line = $start; $remain > 0; $line++) {
next if ($rawlines[$line] =~ /^-/);
$remain--;
$blk .= $rawlines[$line];
# Handle nested #if/#else.
if ($lines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
push(@stack, $level);
} elsif ($lines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
$level = $stack[$#stack - 1];
} elsif ($lines[$line] =~ /^.\s*#\s*endif\b/) {
$level = pop(@stack);
}
foreach my $c (split(//, $lines[$line])) {
##print "C<$c>L<$level><$open$close>O<$off>\n";
if ($off > 0) {
$off--;
next;
}
if ($c eq $close && $level > 0) {
$level--;
last if ($level == 0);
} elsif ($c eq $open) {
$level++;
}
}
if (!$outer || $level <= 1) {
push(@res, $rawlines[$line]);
}
last if ($level == 0);
}
return ($level, @res);
}
sub ctx_block_outer {
my ($linenr, $remain) = @_;
my ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);
return @r;
}
sub ctx_block {
my ($linenr, $remain) = @_;
my ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);
return @r;
}
sub ctx_statement {
my ($linenr, $remain, $off) = @_;
my ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);
return @r;
}
sub ctx_block_level {
my ($linenr, $remain) = @_;
return ctx_block_get($linenr, $remain, 0, '{', '}', 0);
}
sub ctx_statement_level {
my ($linenr, $remain, $off) = @_;
return ctx_block_get($linenr, $remain, 0, '(', ')', $off);
}
sub ctx_locate_comment {
my ($first_line, $end_line) = @_;
# If c99 comment on the current line, or the line before or after
my ($current_comment) = ($rawlines[$end_line - 1] =~ m@^\+.*(//.*$)@);
return $current_comment if (defined $current_comment);
($current_comment) = ($rawlines[$end_line - 2] =~ m@^[\+ ].*(//.*$)@);
return $current_comment if (defined $current_comment);
($current_comment) = ($rawlines[$end_line] =~ m@^[\+ ].*(//.*$)@);
return $current_comment if (defined $current_comment);
# Catch a comment on the end of the line itself.
($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
return $current_comment if (defined $current_comment);
# Look through the context and try and figure out if there is a
# comment.
my $in_comment = 0;
$current_comment = '';
for (my $linenr = $first_line; $linenr < $end_line; $linenr++) {
my $line = $rawlines[$linenr - 1];
#warn " $line\n";
if ($linenr == $first_line and $line =~ m@^.\s*\*@) {
$in_comment = 1;
}
if ($line =~ m@/\*@) {
$in_comment = 1;
}
if (!$in_comment && $current_comment ne '') {
$current_comment = '';
}
$current_comment .= $line . "\n" if ($in_comment);
if ($line =~ m@\*/@) {
$in_comment = 0;
}
}
chomp($current_comment);
return($current_comment);
}
sub ctx_has_comment {
my ($first_line, $end_line) = @_;
my $cmt = ctx_locate_comment($first_line, $end_line);
##print "LINE: $rawlines[$end_line - 1 ]\n";
##print "CMMT: $cmt\n";
return ($cmt ne '');
}
sub raw_line {
my ($linenr, $cnt) = @_;
my $offset = $linenr - 1;
$cnt++;
my $line;
while ($cnt) {
$line = $rawlines[$offset++];
next if (defined($line) && $line =~ /^-/);
$cnt--;
}
return $line;
}
sub get_stat_real {
my ($linenr, $lc) = @_;
my $stat_real = raw_line($linenr, 0);
for (my $count = $linenr + 1; $count <= $lc; $count++) {
$stat_real = $stat_real . "\n" . raw_line($count, 0);
}
return $stat_real;
}
sub get_stat_here {
my ($linenr, $cnt, $here) = @_;
my $herectx = $here . "\n";
for (my $n = 0; $n < $cnt; $n++) {
$herectx .= raw_line($linenr, $n) . "\n";
}
return $herectx;
}
sub cat_vet {
my ($vet) = @_;
my ($res, $coded);
$res = '';
while ($vet =~ /([^[:cntrl:]]*)([[:cntrl:]]|$)/g) {
$res .= $1;
if ($2 ne '') {
$coded = sprintf("^%c", unpack('C', $2) + 64);
$res .= $coded;
}
}
$res =~ s/$/\$/;
return $res;
}
my $av_preprocessor = 0;
my $av_pending;
my @av_paren_type;
my $av_pend_colon;
sub annotate_reset {
$av_preprocessor = 0;
$av_pending = '_';
@av_paren_type = ('E');
$av_pend_colon = 'O';
}
sub annotate_values {
my ($stream, $type) = @_;
my $res;
my $var = '_' x length($stream);
my $cur = $stream;
print "$stream\n" if ($dbg_values > 1);
while (length($cur)) {
@av_paren_type = ('E') if ($#av_paren_type < 0);
print " <" . join('', @av_paren_type) .
"> <$type> <$av_pending>" if ($dbg_values > 1);
if ($cur =~ /^(\s+)/o) {
print "WS($1)\n" if ($dbg_values > 1);
if ($1 =~ /\n/ && $av_preprocessor) {
$type = pop(@av_paren_type);
$av_preprocessor = 0;
}
} elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
print "CAST($1)\n" if ($dbg_values > 1);
push(@av_paren_type, $type);
$type = 'c';
} elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
print "DECLARE($1)\n" if ($dbg_values > 1);
$type = 'T';
} elsif ($cur =~ /^($Modifier)\s*/) {
print "MODIFIER($1)\n" if ($dbg_values > 1);
$type = 'T';
} elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) {
print "DEFINE($1,$2)\n" if ($dbg_values > 1);
$av_preprocessor = 1;
push(@av_paren_type, $type);
if ($2 ne '') {
$av_pending = 'N';
}
$type = 'E';
} elsif ($cur =~ /^(\#\s*(?:undef\s*$Ident|include\b))/o) {
print "UNDEF($1)\n" if ($dbg_values > 1);
$av_preprocessor = 1;
push(@av_paren_type, $type);
} elsif ($cur =~ /^(\#\s*(?:ifdef|ifndef|if))/o) {
print "PRE_START($1)\n" if ($dbg_values > 1);
$av_preprocessor = 1;
push(@av_paren_type, $type);
push(@av_paren_type, $type);
$type = 'E';
} elsif ($cur =~ /^(\#\s*(?:else|elif))/o) {
print "PRE_RESTART($1)\n" if ($dbg_values > 1);
$av_preprocessor = 1;
push(@av_paren_type, $av_paren_type[$#av_paren_type]);
$type = 'E';
} elsif ($cur =~ /^(\#\s*(?:endif))/o) {
print "PRE_END($1)\n" if ($dbg_values > 1);
$av_preprocessor = 1;
# Assume all arms of the conditional end as this
# one does, and continue as if the #endif was not here.
pop(@av_paren_type);
push(@av_paren_type, $type);
$type = 'E';
} elsif ($cur =~ /^(\\\n)/o) {
print "PRECONT($1)\n" if ($dbg_values > 1);
} elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
print "ATTR($1)\n" if ($dbg_values > 1);
$av_pending = $type;
$type = 'N';
} elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
print "SIZEOF($1)\n" if ($dbg_values > 1);
if (defined $2) {
$av_pending = 'V';
}
$type = 'N';
} elsif ($cur =~ /^(if|while|for)\b/o) {
print "COND($1)\n" if ($dbg_values > 1);
$av_pending = 'E';
$type = 'N';
} elsif ($cur =~/^(case)/o) {
print "CASE($1)\n" if ($dbg_values > 1);
$av_pend_colon = 'C';
$type = 'N';
} elsif ($cur =~/^(return|else|goto|typeof|__typeof__)\b/o) {
print "KEYWORD($1)\n" if ($dbg_values > 1);
$type = 'N';
} elsif ($cur =~ /^(\()/o) {
print "PAREN('$1')\n" if ($dbg_values > 1);
push(@av_paren_type, $av_pending);
$av_pending = '_';
$type = 'N';
} elsif ($cur =~ /^(\))/o) {
my $new_type = pop(@av_paren_type);
if ($new_type ne '_') {
$type = $new_type;
print "PAREN('$1') -> $type\n"
if ($dbg_values > 1);
} else {
print "PAREN('$1')\n" if ($dbg_values > 1);
}
} elsif ($cur =~ /^($Ident)\s*\(/o) {
print "FUNC($1)\n" if ($dbg_values > 1);
$type = 'V';
$av_pending = 'V';
} elsif ($cur =~ /^($Ident\s*):(?:\s*\d+\s*(,|=|;))?/) {
if (defined $2 && $type eq 'C' || $type eq 'T') {
$av_pend_colon = 'B';
} elsif ($type eq 'E') {
$av_pend_colon = 'L';
}
print "IDENT_COLON($1,$type>$av_pend_colon)\n" if ($dbg_values > 1);
$type = 'V';
} elsif ($cur =~ /^($Ident|$Constant)/o) {
print "IDENT($1)\n" if ($dbg_values > 1);
$type = 'V';
} elsif ($cur =~ /^($Assignment)/o) {
print "ASSIGN($1)\n" if ($dbg_values > 1);
$type = 'N';
} elsif ($cur =~/^(;|{|})/) {
print "END($1)\n" if ($dbg_values > 1);
$type = 'E';
$av_pend_colon = 'O';
} elsif ($cur =~/^(,)/) {
print "COMMA($1)\n" if ($dbg_values > 1);
$type = 'C';
} elsif ($cur =~ /^(\?)/o) {
print "QUESTION($1)\n" if ($dbg_values > 1);
$type = 'N';
} elsif ($cur =~ /^(:)/o) {
print "COLON($1,$av_pend_colon)\n" if ($dbg_values > 1);
substr($var, length($res), 1, $av_pend_colon);
if ($av_pend_colon eq 'C' || $av_pend_colon eq 'L') {
$type = 'E';
} else {
$type = 'N';
}
$av_pend_colon = 'O';
} elsif ($cur =~ /^(\[)/o) {
print "CLOSE($1)\n" if ($dbg_values > 1);
$type = 'N';
} elsif ($cur =~ /^(-(?![->])|\+(?!\+)|\*|\&\&|\&)/o) {
my $variant;
print "OPV($1)\n" if ($dbg_values > 1);
if ($type eq 'V') {
$variant = 'B';
} else {
$variant = 'U';
}
substr($var, length($res), 1, $variant);
$type = 'N';
} elsif ($cur =~ /^($Operators)/o) {
print "OP($1)\n" if ($dbg_values > 1);
if ($1 ne '++' && $1 ne '--') {
$type = 'N';
}
} elsif ($cur =~ /(^.)/o) {
print "C($1)\n" if ($dbg_values > 1);
}
if (defined $1) {
$cur = substr($cur, length($1));
$res .= $type x length($1);
}
}
return ($res, $var);
}
sub possible {
my ($possible, $line) = @_;
my $notPermitted = qr{(?:
^(?:
$Modifier|
$Storage|
$Type|
DEFINE_\S+
)$|
^(?:
goto|
return|
case|
else|
asm|__asm__|
do|
\#|
\#\#|
)(?:\s|$)|
^(?:typedef|struct|enum)\b
)}x;
warn "CHECK<$possible> ($line)\n" if ($dbg_possible > 2);
if ($possible !~ $notPermitted) {
# Check for modifiers.
$possible =~ s/\s*$Storage\s*//g;
$possible =~ s/\s*$Sparse\s*//g;
if ($possible =~ /^\s*$/) {
} elsif ($possible =~ /\s/) {
$possible =~ s/\s*$Type\s*//g;
for my $modifier (split(' ', $possible)) {
if ($modifier !~ $notPermitted) {
warn "MODIFIER: $modifier ($possible) ($line)\n" if ($dbg_possible);
push(@modifierListFile, $modifier);
}
}
} else {
warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible);
push(@typeListFile, $possible);
}
build_types();
} else {
warn "NOTPOSS: $possible ($line)\n" if ($dbg_possible > 1);
}
}
my $prefix = '';
sub show_type {
my ($type) = @_;
$type =~ tr/[a-z]/[A-Z]/;
return defined $use_type{$type} if (scalar keys %use_type > 0);
return !defined $ignore_type{$type};
}
sub report {
my ($level, $type, $msg) = @_;
if (!show_type($type) ||
(defined $tst_only && $msg !~ /\Q$tst_only\E/)) {
return 0;
}
my $output = '';
if ($color) {
if ($level eq 'ERROR') {
$output .= RED;
} elsif ($level eq 'WARNING') {
$output .= YELLOW;
} else {
$output .= GREEN;
}
}
$output .= $prefix . $level . ':';
if ($show_types) {
$output .= BLUE if ($color);
$output .= "$type:";
}
$output .= RESET if ($color);
$output .= ' ' . $msg . "\n";
if ($showfile) {
my @lines = split("\n", $output, -1);
splice(@lines, 1, 1);
$output = join("\n", @lines);
}
$output = (split('\n', $output))[0] . "\n" if ($terse);
push(our @report, $output);
return 1;
}
sub report_dump {
our @report;
}
sub fixup_current_range {
my ($lineRef, $offset, $length) = @_;
if ($$lineRef =~ /^\@\@ -\d+,\d+ \+(\d+),(\d+) \@\@/) {
my $o = $1;
my $l = $2;
my $no = $o + $offset;
my $nl = $l + $length;
$$lineRef =~ s/\+$o,$l \@\@/\+$no,$nl \@\@/;
}
}
sub fix_inserted_deleted_lines {
my ($linesRef, $insertedRef, $deletedRef) = @_;
my $range_last_linenr = 0;
my $delta_offset = 0;
my $old_linenr = 0;
my $new_linenr = 0;
my $next_insert = 0;
my $next_delete = 0;
my @lines = ();
my $inserted = @{$insertedRef}[$next_insert++];
my $deleted = @{$deletedRef}[$next_delete++];
foreach my $old_line (@{$linesRef}) {
my $save_line = 1;
my $line = $old_line; #don't modify the array
if ($line =~ /^(?:\+\+\+|\-\-\-)\s+\S+/) { #new filename
$delta_offset = 0;
} elsif ($line =~ /^\@\@ -\d+,\d+ \+\d+,\d+ \@\@/) { #new hunk
$range_last_linenr = $new_linenr;
fixup_current_range(\$line, $delta_offset, 0);
}
while (defined($deleted) && ${$deleted}{'LINENR'} == $old_linenr) {
$deleted = @{$deletedRef}[$next_delete++];
$save_line = 0;
fixup_current_range(\$lines[$range_last_linenr], $delta_offset--, -1);
}
while (defined($inserted) && ${$inserted}{'LINENR'} == $old_linenr) {
push(@lines, ${$inserted}{'LINE'});
$inserted = @{$insertedRef}[$next_insert++];
$new_linenr++;
fixup_current_range(\$lines[$range_last_linenr], $delta_offset++, 1);
}
if ($save_line) {
push(@lines, $line);
$new_linenr++;
}
$old_linenr++;
}
return @lines;
}
sub fix_insert_line {
my ($linenr, $line) = @_;
my $inserted = {
LINENR => $linenr,
LINE => $line,
};
push(@fixed_inserted, $inserted);
}
sub fix_delete_line {
my ($linenr, $line) = @_;
my $deleted = {
LINENR => $linenr,
LINE => $line,
};
push(@fixed_deleted, $deleted);
}
sub ERROR {
my ($type, $msg) = @_;
if (report("ERROR", $type, $msg)) {
our $clean = 0;
our $cnt_error++;
return 1;
}
return 0;
}
sub WARN {
my ($type, $msg) = @_;
if (report("WARNING", $type, $msg)) {
our $clean = 0;
our $cnt_warn++;
return 1;
}
return 0;
}
sub CHK {
my ($type, $msg) = @_;
if ($check && report("CHECK", $type, $msg)) {
our $clean = 0;
our $cnt_chk++;
return 1;
}
return 0;
}
sub check_absolute_file {
my ($absolute, $herecurr) = @_;
my $file = $absolute;
##print "absolute<$absolute>\n";
# See if any suffix of this path is a path within the tree.
while ($file =~ s@^[^/]*/@@) {
if (-f "$root/$file") {
##print "file<$file>\n";
last;
}
}
if (! -f _) {
return 0;
}
# It is, so see if the prefix is acceptable.
my $prefix = $absolute;
substr($prefix, -length($file)) = '';
##print "prefix<$prefix>\n";
if ($prefix ne ".../") {
WARN("USE_RELATIVE_PATH",
"use relative pathname instead of absolute in changelog text\n" . $herecurr);
}
}
sub trim {
my ($string) = @_;
$string =~ s/^\s+|\s+$//g;
return $string;
}
sub ltrim {
my ($string) = @_;
$string =~ s/^\s+//;
return $string;
}
sub rtrim {
my ($string) = @_;
$string =~ s/\s+$//;
return $string;
}
sub string_find_replace {
my ($string, $find, $replace) = @_;
$string =~ s/$find/$replace/g;
return $string;
}
sub tabify {
my ($leading) = @_;
my $source_indent = $tabsize;
my $max_spaces_before_tab = $source_indent - 1;
my $spaces_to_tab = " " x $source_indent;
#convert leading spaces to tabs
1 while $leading =~ s@^([\t]*)$spaces_to_tab@$1\t@g;
#Remove spaces before a tab
1 while $leading =~ s@^([\t]*)( {1,$max_spaces_before_tab})\t@$1\t@g;
return "$leading";
}
sub pos_last_openparen {
my ($line) = @_;
my $pos = 0;
my $opens = $line =~ tr/\(/\(/;
my $closes = $line =~ tr/\)/\)/;
my $last_openparen = 0;
if (($opens == 0) || ($closes >= $opens)) {
return -1;
}
my $len = length($line);
for ($pos = 0; $pos < $len; $pos++) {
my $string = substr($line, $pos);
if ($string =~ /^($FuncArg|$balanced_parens)/) {
$pos += length($1) - 1;
} elsif (substr($line, $pos, 1) eq '(') {
$last_openparen = $pos;
} elsif (index($string, '(') == -1) {
last;
}
}
return length(expand_tabs(substr($line, 0, $last_openparen))) + 1;
}
sub process {
my $filename = shift;
my $linenr=0;
my $prevline="";
my $prevrawline="";
my $stashline="";
my $stashrawline="";
my $length;
my $indent;
my $previndent=0;
my $stashindent=0;
our $clean = 1;
my $signoff = 0;
my $author = '';
my $authorsignoff = 0;
my $is_patch = 0;
my $is_binding_patch = -1;
my $in_header_lines = $file ? 0 : 1;
my $in_commit_log = 0; #Scanning lines before patch
my $has_patch_separator = 0; #Found a --- line
my $has_commit_log = 0; #Encountered lines before patch
my $commit_log_lines = 0; #Number of commit log lines
my $commit_log_possible_stack_dump = 0;
my $commit_log_long_line = 0;
my $commit_log_has_diff = 0;
my $reported_maintainer_file = 0;
my $non_utf8_charset = 0;
my $last_blank_line = 0;
my $last_coalesced_string_linenr = -1;
our @report = ();
our $cnt_lines = 0;
our $cnt_error = 0;
our $cnt_warn = 0;
our $cnt_chk = 0;
# Trace the real file/line as we go.
my $realfile = '';
my $realline = 0;
my $realcnt = 0;
my $here = '';
my $context_function; #undef'd unless there's a known function
my $in_comment = 0;
my $comment_edge = 0;
my $first_line = 0;
my $p1_prefix = '';
my $prev_values = 'E';
# suppression flags
my %suppress_ifbraces;
my %suppress_whiletrailers;
my %suppress_export;
my $suppress_statement = 0;
my %signatures = ();
# Pre-scan the patch sanitizing the lines.
# Pre-scan the patch looking for any __setup documentation.
#
my @setup_docs = ();
my $setup_docs = 0;
my $camelcase_file_seeded = 0;
my $checklicenseline = 1;
sanitise_line_reset();
my $line;
foreach my $rawline (@rawlines) {
$linenr++;
$line = $rawline;
push(@fixed, $rawline) if ($fix);
if ($rawline=~/^\+\+\+\s+(\S+)/) {
$setup_docs = 0;
if ($1 =~ m@Documentation/admin-guide/kernel-parameters.txt$@) {
$setup_docs = 1;
}
#next;
}
if ($rawline =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
$realline=$1-1;
if (defined $2) {
$realcnt=$3+1;
} else {
$realcnt=1+1;
}
$in_comment = 0;
# Guestimate if this is a continuing comment. Run
# the context looking for a comment "edge". If this
# edge is a close comment then we must be in a comment
# at context start.
my $edge;
my $cnt = $realcnt;
for (my $ln = $linenr + 1; $cnt > 0; $ln++) {
next if (defined $rawlines[$ln - 1] &&
$rawlines[$ln - 1] =~ /^-/);
$cnt--;
#print "RAW<$rawlines[$ln - 1]>\n";
last if (!defined $rawlines[$ln - 1]);
if ($rawlines[$ln - 1] =~ m@(/\*|\*/)@ &&
$rawlines[$ln - 1] !~ m@"[^"]*(?:/\*|\*/)[^"]*"@) {
($edge) = $1;
last;
}
}
if (defined $edge && $edge eq '*/') {
$in_comment = 1;
}
# Guestimate if this is a continuing comment. If this
# is the start of a diff block and this line starts
# ' *' then it is very likely a comment.
if (!defined $edge &&
$rawlines[$linenr] =~ m@^.\s*(?:\*\*+| \*)(?:\s|$)@)
{
$in_comment = 1;
}
##print "COMMENT:$in_comment edge<$edge> $rawline\n";
sanitise_line_reset($in_comment);
} elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) {
# Standardise the strings and chars within the input to
# simplify matching -- only bother with positive lines.
$line = sanitise_line($rawline);
}
push(@lines, $line);
if ($realcnt > 1) {
$realcnt-- if ($line =~ /^(?:\+| |$)/);
} else {
$realcnt = 0;
}
#print "==>$rawline\n";
#print "-->$line\n";
if ($setup_docs && $line =~ /^\+/) {
push(@setup_docs, $line);
}
}
$prefix = '';
$realcnt = 0;
$linenr = 0;
$fixlinenr = -1;
foreach my $line (@lines) {
$linenr++;
$fixlinenr++;
my $sline = $line; #copy of $line
$sline =~ s/$;/ /g; #with comments as spaces
my $rawline = $rawlines[$linenr - 1];
# check if it's a mode change, rename or start of a patch
if (!$in_commit_log &&
($line =~ /^ mode change [0-7]+ => [0-7]+ \S+\s*$/ ||
($line =~ /^rename (?:from|to) \S+\s*$/ ||
$line =~ /^diff --git a\/[\w\/\.\_\-]+ b\/\S+\s*$/))) {
$is_patch = 1;
}
#extract the line range in the file after the patch is applied
if (!$in_commit_log &&
$line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) {
my $context = $4;
$is_patch = 1;
$first_line = $linenr + 1;
$realline=$1-1;
if (defined $2) {
$realcnt=$3+1;
} else {
$realcnt=1+1;
}
annotate_reset();
$prev_values = 'E';
%suppress_ifbraces = ();
%suppress_whiletrailers = ();
%suppress_export = ();
$suppress_statement = 0;
if ($context =~ /\b(\w+)\s*\(/) {
$context_function = $1;
} else {
undef $context_function;
}
next;
# track the line number as we move through the hunk, note that
# new versions of GNU diff omit the leading space on completely
# blank context lines so we need to count that too.
} elsif ($line =~ /^( |\+|$)/) {
$realline++;
$realcnt-- if ($realcnt != 0);
# Measure the line length and indent.
($length, $indent) = line_stats($rawline);
# Track the previous line.
($prevline, $stashline) = ($stashline, $line);
($previndent, $stashindent) = ($stashindent, $indent);
($prevrawline, $stashrawline) = ($stashrawline, $rawline);
#warn "line<$line>\n";
} elsif ($realcnt == 1) {
$realcnt--;
}
my $hunk_line = ($realcnt != 0);
$here = "#$linenr: " if (!$file);
$here = "#$realline: " if ($file);
my $found_file = 0;
# extract the filename as it passes
if ($line =~ /^diff --git.*?(\S+)$/) {
$realfile = $1;
$realfile =~ s@^([^/]*)/@@ if (!$file);
$in_commit_log = 0;
$found_file = 1;
} elsif ($line =~ /^\+\+\+\s+(\S+)/) {
$realfile = $1;
$realfile =~ s@^([^/]*)/@@ if (!$file);
$in_commit_log = 0;
$p1_prefix = $1;
if (!$file && $tree && $p1_prefix ne '' &&
-e "$root/$p1_prefix") {
WARN("PATCH_PREFIX",
"patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
}
if ($realfile =~ m@^include/asm/@) {
ERROR("MODIFIED_INCLUDE_ASM",
"do not modify files in include/asm, change architecture specific files in include/asm-<architecture>\n" . "$here$rawline\n");
}
$found_file = 1;
}
my $skipme = 0;
foreach (@exclude) {
if ($realfile =~ m@^(?:$_/)@) {
$skipme = 1;
}
}
if ($skipme) {
next;
}
#make up the handle for any error we report on this line
if ($showfile) {
$prefix = "$realfile:$realline: "
} elsif ($emacs) {
if ($file) {
$prefix = "$filename:$realline: ";
} else {
$prefix = "$filename:$linenr: ";
}
}
if ($found_file) {
if (is_maintained_obsolete($realfile)) {
WARN("OBSOLETE",
"$realfile is marked as 'obsolete' in the MAINTAINERS hierarchy. No unnecessary modifications please.\n");
}
if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) {
$check = 1;
} else {
$check = $check_orig;
}
$checklicenseline = 1;
if ($realfile !~ /^MAINTAINERS/) {
my $last_binding_patch = $is_binding_patch;
$is_binding_patch = () = $realfile =~ m@^(?:Documentation/devicetree/|include/dt-bindings/)@;
if (($last_binding_patch != -1) &&
($last_binding_patch ^ $is_binding_patch)) {
WARN("DT_SPLIT_BINDING_PATCH",
"DT binding docs and includes should be a separate patch. See: Documentation/devicetree/bindings/submitting-patches.rst\n");
}
}
next;
}
$here .= "FILE: $realfile:$realline:" if ($realcnt != 0);
my $hereline = "$here\n$rawline\n";
my $herecurr = "$here\n$rawline\n";
my $hereprev = "$here\n$prevrawline\n$rawline\n";
$cnt_lines++ if ($realcnt != 0);
# Verify the existence of a commit log if appropriate
# 2 is used because a $signature is counted in $commit_log_lines
if ($in_commit_log) {
if ($line !~ /^\s*$/) {
$commit_log_lines++; #could be a $signature
}
} elsif ($has_commit_log && $commit_log_lines < 2) {
WARN("COMMIT_MESSAGE",
"Missing commit description - Add an appropriate one\n");
$commit_log_lines = 2; #warn only once
}
# Check if the commit log has what seems like a diff which can confuse patch
if ($in_commit_log && !$commit_log_has_diff &&
(($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
$line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
$line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
$line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
ERROR("DIFF_IN_COMMIT_MSG",
"Avoid using diff content in the commit message - patch(1) might not work\n" . $herecurr);
$commit_log_has_diff = 1;
}
# Check for incorrect file permissions
if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
my $permhere = $here . "FILE: $realfile\n";
if ($realfile !~ m@scripts/@ &&
$realfile !~ /\.(py|pl|awk|sh)$/) {
ERROR("EXECUTE_PERMISSIONS",
"do not set execute permissions for source files\n" . $permhere);
}
}
# Check the patch for a From:
if (decode("MIME-Header", $line) =~ /^From:\s*(.*)/) {
$author = $1;
$author = encode("utf8", $author) if ($line =~ /=\?utf-8\?/i);
$author =~ s/"//g;
}
# Check the patch for a signoff:
if ($line =~ /^\s*signed-off-by:/i) {
$signoff++;
$in_commit_log = 0;
if ($author ne '') {
my $l = $line;
$l =~ s/"//g;
if ($l =~ /^\s*signed-off-by:\s*\Q$author\E/i) {
$authorsignoff = 1;
}
}
}
# Check for patch separator
if ($line =~ /^---$/) {
$has_patch_separator = 1;
$in_commit_log = 0;
}
# Check if CODEOWNERS is being updated. If so, there's probably no need to
# emit the "does CODEOWNERS need updating?" message on file add/move/delete
if ($line =~ /^\s*CODEOWNERS\s*\|/) {
$reported_maintainer_file = 1;
}
# Check signature styles
if (!$in_header_lines &&
$line =~ /^(\s*)([a-z0-9_-]+by:|$signature_tags)(\s*)(.*)/i) {
my $space_before = $1;
my $sign_off = $2;
my $space_after = $3;
my $email = $4;
my $ucfirst_sign_off = ucfirst(lc($sign_off));
if ($sign_off !~ /$signature_tags/) {
WARN("BAD_SIGN_OFF",
"Non-standard signature: $sign_off\n" . $herecurr);
}
if (defined $space_before && $space_before ne "") {
if (WARN("BAD_SIGN_OFF",
"Do not use whitespace before $ucfirst_sign_off\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =
"$ucfirst_sign_off $email";
}
}
if ($sign_off =~ /-by:$/i && $sign_off ne $ucfirst_sign_off) {
if (WARN("BAD_SIGN_OFF",
"'$ucfirst_sign_off' is the preferred signature form\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =
"$ucfirst_sign_off $email";
}
}
if (!defined $space_after || $space_after ne " ") {
if (WARN("BAD_SIGN_OFF",
"Use a single space after $ucfirst_sign_off\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =
"$ucfirst_sign_off $email";
}
}
my ($email_name, $email_address, $comment) = parse_email($email);
my $suggested_email = format_email(($email_name, $email_address));
if ($suggested_email eq "") {
ERROR("BAD_SIGN_OFF",
"Unrecognized email address: '$email'\n" . $herecurr);
} else {
my $dequoted = $suggested_email;
$dequoted =~ s/^"//;
$dequoted =~ s/" </ </;
# Don't force email to have quotes
# Allow just an angle bracketed address
if ("$dequoted$comment" ne $email &&
"<$email_address>$comment" ne $email &&
"$suggested_email$comment" ne $email) {
WARN("BAD_SIGN_OFF",
"email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
}
}
# Check for duplicate signatures
my $sig_nospace = $line;
$sig_nospace =~ s/\s//g;
$sig_nospace = lc($sig_nospace);
if (defined $signatures{$sig_nospace}) {
WARN("BAD_SIGN_OFF",
"Duplicate signature\n" . $herecurr);
} else {
$signatures{$sig_nospace} = 1;
}
# Check Co-developed-by: immediately followed by Signed-off-by: with same name and email
if ($sign_off =~ /^co-developed-by:$/i) {
if ($email eq $author) {
WARN("BAD_SIGN_OFF",
"Co-developed-by: should not be used to attribute nominal patch author '$author'\n" . "$here\n" . $rawline);
}
if (!defined $lines[$linenr]) {
WARN("BAD_SIGN_OFF",
"Co-developed-by: must be immediately followed by Signed-off-by:\n" . "$here\n" . $rawline);
} elsif ($rawlines[$linenr] !~ /^\s*signed-off-by:\s*(.*)/i) {
WARN("BAD_SIGN_OFF",
"Co-developed-by: must be immediately followed by Signed-off-by:\n" . "$here\n" . $rawline . "\n" .$rawlines[$linenr]);
} elsif ($1 ne $email) {
WARN("BAD_SIGN_OFF",
"Co-developed-by and Signed-off-by: name/email do not match \n" . "$here\n" . $rawline . "\n" .$rawlines[$linenr]);
}
}
}
# Check email subject for common tools that don't need to be mentioned
if ($in_header_lines &&
$line =~ /^Subject:.*\b(?:checkpatch|sparse|smatch)\b[^:]/i) {
WARN("EMAIL_SUBJECT",
"A patch subject line should describe the change not the tool that found it\n" . $herecurr);
}
# Check for Gerrit Change-Ids not in any patch context
if ($realfile eq '' && !$has_patch_separator && $line =~ /^\s*change-id:/i) {
ERROR("GERRIT_CHANGE_ID",
"Remove Gerrit Change-Id's before submitting upstream\n" . $herecurr);
}
# Check if the commit log is in a possible stack dump
if ($in_commit_log && !$commit_log_possible_stack_dump &&
($line =~ /^\s*(?:WARNING:|BUG:)/ ||
$line =~ /^\s*\[\s*\d+\.\d{6,6}\s*\]/ ||
# timestamp
$line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/) ||
$line =~ /^(?:\s+\w+:\s+[0-9a-fA-F]+){3,3}/ ||
$line =~ /^\s*\#\d+\s*\[[0-9a-fA-F]+\]\s*\w+ at [0-9a-fA-F]+/) {
# stack dump address styles
$commit_log_possible_stack_dump = 1;
}
# Check for line lengths > 75 in commit log, warn once
if ($in_commit_log && !$commit_log_long_line &&
length($line) > 75 &&
!($line =~ /^\s*[a-zA-Z0-9_\/\.]+\s+\|\s+\d+/ ||
# file delta changes
$line =~ /^\s*(?:[\w\.\-]+\/)++[\w\.\-]+:/ ||
# filename then :
$line =~ /^\s*(?:Fixes:|Link:)/i ||
# A Fixes: or Link: line
$commit_log_possible_stack_dump)) {
WARN("COMMIT_LOG_LONG_LINE",
"Possible unwrapped commit description (prefer a maximum 75 chars per line)\n" . $herecurr);
$commit_log_long_line = 1;
}
# Reset possible stack dump if a blank line is found
if ($in_commit_log && $commit_log_possible_stack_dump &&
$line =~ /^\s*$/) {
$commit_log_possible_stack_dump = 0;
}
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump &&
$line !~ /^\s*(?:Link|Patchwork|http|https|BugLink|base-commit):/i &&
$line !~ /^This reverts commit [0-9a-f]{7,40}/ &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
$line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) {
my $init_char = "c";
my $orig_commit = "";
my $short = 1;
my $long = 0;
my $case = 1;
my $space = 1;
my $hasdesc = 0;
my $hasparens = 0;
my $id = '0123456789ab';
my $orig_desc = "commit description";
my $description = "";
if ($line =~ /\b(c)ommit\s+([0-9a-f]{5,})\b/i) {
$init_char = $1;
$orig_commit = lc($2);
} elsif ($line =~ /\b([0-9a-f]{12,40})\b/i) {
$orig_commit = lc($1);
}
$short = 0 if ($line =~ /\bcommit\s+[0-9a-f]{12,40}/i);
$long = 1 if ($line =~ /\bcommit\s+[0-9a-f]{41,}/i);
$space = 0 if ($line =~ /\bcommit [0-9a-f]/i);
$case = 0 if ($line =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/);
if ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)"\)/i) {
$orig_desc = $1;
$hasparens = 1;
} elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s*$/i &&
defined $rawlines[$linenr] &&
$rawlines[$linenr] =~ /^\s*\("([^"]+)"\)/) {
$orig_desc = $1;
$hasparens = 1;
} elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("[^"]+$/i &&
defined $rawlines[$linenr] &&
$rawlines[$linenr] =~ /^\s*[^"]+"\)/) {
$line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)$/i;
$orig_desc = $1;
$rawlines[$linenr] =~ /^\s*([^"]+)"\)/;
$orig_desc .= " " . $1;
$hasparens = 1;
}
($id, $description) = git_commit_info($orig_commit,
$id, $orig_desc);
if (defined($id) &&
($short || $long || $space || $case || ($orig_desc ne $description) || !$hasparens)) {
ERROR("GIT_COMMIT_ID",
"Please use git commit description style 'commit <12+ chars of sha1> (\"<title line>\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herecurr);
}
}
# Check for added, moved or deleted files
if (!$reported_maintainer_file && !$in_commit_log &&
($line =~ /^(?:new|deleted) file mode\s*\d+\s*$/ ||
$line =~ /^rename (?:from|to) [\w\/\.\-]+\s*$/ ||
($line =~ /\{\s*([\w\/\.\-]*)\s*\=\>\s*([\w\/\.\-]*)\s*\}/ &&
(defined($1) || defined($2))))) {
$is_patch = 1;
$reported_maintainer_file = 1;
WARN("FILE_PATH_CHANGES",
"added, moved or deleted file(s), does CODEOWNERS need updating?\n" . $herecurr);
}
# Check for adding new DT bindings not in schema format
if (!$in_commit_log &&
($line =~ /^new file mode\s*\d+\s*$/) &&
($realfile =~ m@^Documentation/devicetree/bindings/.*\.txt$@)) {
WARN("DT_SCHEMA_BINDING_PATCH",
"DT bindings should be in DT schema format. See: Documentation/devicetree/writing-schema.rst\n");
}
# Check for wrappage within a valid hunk of the file
if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
ERROR("CORRUPTED_PATCH",
"patch seems to be corrupt (line wrapped?)\n" .
$herecurr) if (!$emitted_corrupt++);
}
# UTF-8 regex found at path_to_url
if (($realfile =~ /^$/ || $line =~ /^\+/) &&
$rawline !~ m/^$UTF8*$/) {
my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);
my $blank = copy_spacing($rawline);
my $ptr = substr($blank, 0, length($utf8_prefix)) . "^";
my $hereptr = "$hereline$ptr\n";
CHK("INVALID_UTF8",
"Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr);
}
# Check if it's the start of a commit log
# (not a header line and we haven't seen the patch filename)
if ($in_header_lines && $realfile =~ /^$/ &&
!($rawline =~ /^\s+(?:\S|$)/ ||
$rawline =~ /^(?:commit\b|from\b|[\w-]+:)/i)) {
$in_header_lines = 0;
$in_commit_log = 1;
$has_commit_log = 1;
}
# Check if there is UTF-8 in a commit log when a mail header has explicitly
# declined it, i.e defined some charset where it is missing.
if ($in_header_lines &&
$rawline =~ /^Content-Type:.+charset="(.+)".*$/ &&
$1 !~ /utf-8/i) {
$non_utf8_charset = 1;
}
if ($in_commit_log && $non_utf8_charset && $realfile =~ /^$/ &&
$rawline =~ /$NON_ASCII_UTF8/) {
WARN("UTF8_BEFORE_PATCH",
"8-bit UTF-8 used in possible commit log\n" . $herecurr);
}
# Check for absolute kernel paths in commit message
if ($tree && $in_commit_log) {
while ($line =~ m{(?:^|\s)(/\S*)}g) {
my $file = $1;
if ($file =~ m{^(.*?)(?::\d+)+:?$} &&
check_absolute_file($1, $herecurr)) {
#
} else {
check_absolute_file($file, $herecurr);
}
}
}
# Check for various typo / spelling mistakes
if (defined($misspellings) &&
($spelling_file !~ /$realfile/) &&
($in_commit_log || $line =~ /^(?:\+|Subject:)/i)) {
while ($rawline =~ /(?:^|[^a-z@])($misspellings)(?:\b|$|[^a-z@])/gi) {
my $typo = $1;
my $typo_fix = $spelling_fix{lc($typo)};
$typo_fix = ucfirst($typo_fix) if ($typo =~ /^[A-Z]/);
$typo_fix = uc($typo_fix) if ($typo =~ /^[A-Z]+$/);
my $msg_level = \&WARN;
$msg_level = \&CHK if ($file);
if (&{$msg_level}("TYPO_SPELLING",
"'$typo' may be misspelled - perhaps '$typo_fix'?\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(^|[^A-Za-z@])($typo)($|[^A-Za-z@])/$1$typo_fix$3/;
}
}
}
# check for invalid commit id
if ($in_commit_log && $line =~ /(^fixes:|\bcommit)\s+([0-9a-f]{6,40})\b/i) {
my $id;
my $description;
($id, $description) = git_commit_info($2, undef, undef);
if (!defined($id)) {
WARN("UNKNOWN_COMMIT_ID",
"Unknown commit id '$2', maybe rebased or not pulled?\n" . $herecurr);
}
}
# ignore non-hunk lines and lines being removed
next if (!$hunk_line || $line =~ /^-/);
#trailing whitespace
if ($line =~ /^\+.*\015/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
if (ERROR("DOS_LINE_ENDINGS",
"DOS line endings\n" . $herevet) &&
$fix) {
$fixed[$fixlinenr] =~ s/[\s\015]+$//;
}
} elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
if (ERROR("TRAILING_WHITESPACE",
"trailing whitespace\n" . $herevet) &&
$fix) {
$fixed[$fixlinenr] =~ s/\s+$//;
}
$rpt_cleaners = 1;
}
# Check for FSF mailing addresses.
if ($rawline =~ /\bwrite to the Free/i ||
$rawline =~ /\b675\s+Mass\s+Ave/i ||
$rawline =~ /\b59\s+Temple\s+Pl/i ||
$rawline =~ /\b51\s+Franklin\s+St/i) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
my $msg_level = \&ERROR;
$msg_level = \&CHK if ($file);
&{$msg_level}("FSF_MAILING_ADDRESS",
"Do not include the paragraph about writing to the Free Software Foundation's mailing address from the sample GPL notice. The FSF has changed addresses in the past, and may do so again. Linux already includes a copy of the GPL.\n" . $herevet)
}
# check for Kconfig help text having a real description
# Only applies when adding the entry originally, after that we do not have
# sufficient context to determine whether it is indeed long enough.
if ($realfile =~ /Kconfig/ &&
# 'choice' is usually the last thing on the line (though
# Kconfig supports named choices), so use a word boundary
# (\b) rather than a whitespace character (\s)
$line =~ /^\+\s*(?:config|menuconfig|choice)\b/) {
my $length = 0;
my $cnt = $realcnt;
my $ln = $linenr + 1;
my $f;
my $is_start = 0;
my $is_end = 0;
for (; $cnt > 0 && defined $lines[$ln - 1]; $ln++) {
$f = $lines[$ln - 1];
$cnt-- if ($lines[$ln - 1] !~ /^-/);
$is_end = $lines[$ln - 1] =~ /^\+/;
next if ($f =~ /^-/);
last if (!$file && $f =~ /^\@\@/);
if ($lines[$ln - 1] =~ /^\+\s*(?:bool|tristate|prompt)\s*["']/) {
$is_start = 1;
} elsif ($lines[$ln - 1] =~ /^\+\s*(?:---)?help(?:---)?$/) {
$length = -1;
}
$f =~ s/^.//;
$f =~ s/#.*//;
$f =~ s/^\s+//;
next if ($f =~ /^$/);
# This only checks context lines in the patch
# and so hopefully shouldn't trigger false
# positives, even though some of these are
# common words in help texts
if ($f =~ /^\s*(?:config|menuconfig|choice|endchoice|
if|endif|menu|endmenu|source)\b/x) {
$is_end = 1;
last;
}
$length++;
}
if ($is_start && $is_end && $length < $min_conf_desc_length) {
WARN("CONFIG_DESCRIPTION",
"please write a paragraph that describes the config symbol fully\n" . $herecurr);
}
#print "is_start<$is_start> is_end<$is_end> length<$length>\n";
}
# check MAINTAINERS entries
if ($realfile =~ /^MAINTAINERS$/) {
# check MAINTAINERS entries for the right form
if ($rawline =~ /^\+[A-Z]:/ &&
$rawline !~ /^\+[A-Z]:\t\S/) {
if (WARN("MAINTAINERS_STYLE",
"MAINTAINERS entries use one tab after TYPE:\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/^(\+[A-Z]):\s*/$1:\t/;
}
}
# check MAINTAINERS entries for the right ordering too
my $preferred_order = 'MRLSWQBCPTFXNK';
if ($rawline =~ /^\+[A-Z]:/ &&
$prevrawline =~ /^[\+ ][A-Z]:/) {
$rawline =~ /^\+([A-Z]):\s*(.*)/;
my $cur = $1;
my $curval = $2;
$prevrawline =~ /^[\+ ]([A-Z]):\s*(.*)/;
my $prev = $1;
my $prevval = $2;
my $curindex = index($preferred_order, $cur);
my $previndex = index($preferred_order, $prev);
if ($curindex < 0) {
WARN("MAINTAINERS_STYLE",
"Unknown MAINTAINERS entry type: '$cur'\n" . $herecurr);
} else {
if ($previndex >= 0 && $curindex < $previndex) {
WARN("MAINTAINERS_STYLE",
"Misordered MAINTAINERS entry - list '$cur:' before '$prev:'\n" . $hereprev);
} elsif ((($prev eq 'F' && $cur eq 'F') ||
($prev eq 'X' && $cur eq 'X')) &&
($prevval cmp $curval) > 0) {
WARN("MAINTAINERS_STYLE",
"Misordered MAINTAINERS entry - list file patterns in alphabetic order\n" . $hereprev);
}
}
}
}
# discourage the use of boolean for type definition attributes of Kconfig options
if ($realfile =~ /Kconfig/ &&
$line =~ /^\+\s*\bboolean\b/) {
WARN("CONFIG_TYPE_BOOLEAN",
"Use of boolean is deprecated, please use bool instead.\n" . $herecurr);
}
if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
($line =~ /\+(EXTRA_[A-Z]+FLAGS).*/)) {
my $flag = $1;
my $replacement = {
'EXTRA_AFLAGS' => 'asflags-y',
'EXTRA_CFLAGS' => 'ccflags-y',
'EXTRA_CPPFLAGS' => 'cppflags-y',
'EXTRA_LDFLAGS' => 'ldflags-y',
};
WARN("DEPRECATED_VARIABLE",
"Use of $flag is deprecated, please use \`$replacement->{$flag} instead.\n" . $herecurr) if ($replacement->{$flag});
}
# Kconfig use tabs and no spaces in line
if ($realfile =~ /Kconfig/ && $rawline =~ /^\+ /) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
WARN("LEADING_SPACE",
"please, no spaces at the start of a line\n" . $herevet);
}
# check for DT compatible documentation
if (defined $root &&
(($realfile =~ /\.(dts|dtsi|overlay)$/ && $line =~ /^\+\s*compatible\s*=\s*\"/) ||
($realfile =~ /\.[ch]$/ && $line =~ /^\+.*\.compatible\s*=\s*\"/))) {
my @compats = $rawline =~ /\"([a-zA-Z0-9\-\,\.\+_]+)\"/g;
my $dt_path = $root . "/dts/bindings/";
my $vp_file = $dt_path . "vendor-prefixes.txt";
foreach my $compat (@compats) {
my $compat2 = $compat;
$compat2 =~ s/\,[a-zA-Z0-9]*\-/\,<\.\*>\-/;
my $compat3 = $compat;
$compat3 =~ s/\,([a-z]*)[0-9]*\-/\,$1<\.\*>\-/;
`grep -Erq "$compat|$compat2|$compat3" $dt_path`;
if ( $? >> 8 ) {
WARN("UNDOCUMENTED_DT_STRING",
"DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr);
}
next if $compat !~ /^([a-zA-Z0-9\-]+)\,/;
my $vendor = $1;
`grep -Eq "^$vendor\\b" $vp_file`;
if ( $? >> 8 ) {
WARN("UNDOCUMENTED_DT_STRING",
"DT compatible string vendor \"$vendor\" appears un-documented -- check $vp_file\n" . $herecurr);
}
}
}
# check for using SPDX license tag at beginning of files
if ($realline == $checklicenseline) {
if ($rawline =~ /^[ \+]\s*\#\!\s*\//) {
$checklicenseline = 2;
} elsif ($rawline =~ /^\+/) {
my $comment = "";
if ($realfile =~ /\.(h|s|S)$/) {
$comment = '/*';
} elsif ($realfile =~ /\.(c|dts|dtsi|overlay)$/) {
$comment = '//';
} elsif (($checklicenseline == 2) || $realfile =~ /\.(sh|pl|py|awk|tc|yaml)$/) {
$comment = '#';
} elsif ($realfile =~ /\.rst$/) {
$comment = '..';
}
# check SPDX comment style for .[chsS] files
if ($realfile =~ /\.[chsS]$/ &&
$rawline !~ m@^\+\s*\Q$comment\E\s*@) {
WARN("SPDX_LICENSE_TAG",
"Improper SPDX comment style for '$realfile', please use '$comment' instead\n" . $herecurr);
}
if ($comment !~ /^$/ &&
WARN("SPDX_LICENSE_TAG",
my $spdx_license = $1;
WARN("SPDX_LICENSE_TAG",
"'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
}
if ($realfile =~ m@^Documentation/devicetree/bindings/@ &&
not $spdx_license =~ /GPL-2\.0.*BSD-2-Clause/) {
my $msg_level = \&WARN;
$msg_level = \&CHK if ($file);
if (&{$msg_level}("SPDX_LICENSE_TAG",
"DT binding documents should be licensed (GPL-2.0-only OR BSD-2-Clause)\n" . $herecurr) &&
$fix) {
}
}
}
}
}
# check we are in a valid source file if not then ignore this hunk
next if ($realfile !~ /\.(h|c|s|S|sh|dtsi|dts|overlay)$/);
if ($realline != $checklicenseline &&
substr($line, @-, @+ - @-) eq "$;" x (@+ - @-)) {
WARN("SPDX_LICENSE_TAG",
}
# line length limit (with some exclusions)
#
# There are a few types of lines that may extend beyond $max_line_length:
# logging functions like pr_info that end in a string
# lines with a single string
# #defines that are a single string
# lines with an RFC3986 like URL
#
# There are 3 different line length message types:
# LONG_LINE_COMMENT a comment starts before but extends beyond $max_line_length
# LONG_LINE_STRING a string starts before but extends beyond $max_line_length
# LONG_LINE all other lines longer than $max_line_length
#
# if LONG_LINE is ignored, the other 2 types are also ignored
#
if ($line =~ /^\+/ && $length > $max_line_length) {
my $msg_type = "LONG_LINE";
# Check the allowed long line types first
# logging functions that end in a string that starts
# before $max_line_length
if ($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(?:KERN_\S+\s*|[^"]*))?($String\s*(?:|,|\)\s*;)\s*)$/ &&
length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
$msg_type = "";
# lines with only strings (w/ possible termination)
# #defines with only strings
} elsif ($line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
$line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) {
$msg_type = "";
# More special cases
} elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/ ||
$line =~ /^\+\s*(?:\w+)?\s*DEFINE_PER_CPU/) {
$msg_type = "";
# URL ($rawline is used in case the URL is in a comment)
} elsif ($rawline =~ /^\+.*\b[a-z][\w\.\+\-]*:\/\/\S+/i) {
$msg_type = "";
# Otherwise set the alternate message types
# a comment starts before $max_line_length
} elsif ($line =~ /($;[\s$;]*)$/ &&
length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
$msg_type = "LONG_LINE_COMMENT"
# a quoted string starts before $max_line_length
} elsif ($sline =~ /\s*($String(?:\s*(?:\\|,\s*|\)\s*;\s*))?)$/ &&
length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
$msg_type = "LONG_LINE_STRING"
}
if ($msg_type ne "" &&
(show_type("LONG_LINE") || show_type($msg_type))) {
my $msg_level = \&WARN;
$msg_level = \&CHK if ($file);
&{$msg_level}($msg_type,
"line length of $length exceeds $max_line_length columns\n" . $herecurr);
}
}
# check for adding lines without a newline.
if ($line =~ /^\+/ && defined $lines[$linenr] && $lines[$linenr] =~ /^\\ No newline at end of file/) {
WARN("MISSING_EOF_NEWLINE",
"adding a line without newline at end of file\n" . $herecurr);
}
# check we are in a valid source file C or perl if not then ignore this hunk
next if ($realfile !~ /\.(h|c|pl|dtsi|dts|overlay)$/);
# at the beginning of a line any tabs must come first and anything
# more than $tabsize must use tabs, except multi-line macros which may start
# with spaces on empty lines
if ($rawline =~ /^\+\s* \t\s*\S/ ||
$rawline =~ /^\+\s* \s*/ &&
$rawline !~ /^\+\s*\\$/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
$rpt_cleaners = 1;
if (ERROR("CODE_INDENT",
"code indent should use tabs where possible\n" . $herevet) &&
$fix) {
$fixed[$fixlinenr] =~ s/^\+([ \t]+)/"\+" . tabify($1)/e;
}
}
# check for repeated words separated by a single space
if ($rawline =~ /^\+/) {
while ($rawline =~ /\b($word_pattern) (?=($word_pattern))/g) {
my $first = $1;
my $second = $2;
if ($first =~ /(?:struct|union|enum)/) {
pos($rawline) += length($first) + length($second) + 1;
next;
}
next if ($first ne $second);
next if ($first eq 'long');
if (WARN("REPEATED_WORD",
"Possible repeated word: '$first'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b$first $second\b/$first/;
}
}
# if it's a repeated word on consecutive lines in a comment block
if ($prevline =~ /$;+\s*$/ &&
$prevrawline =~ /($word_pattern)\s*$/) {
my $last_word = $1;
if ($rawline =~ /^\+\s*\*\s*$last_word /) {
if (WARN("REPEATED_WORD",
"Possible repeated word: '$last_word'\n" . $hereprev) &&
$fix) {
$fixed[$fixlinenr] =~ s/(\+\s*\*\s*)$last_word /$1/;
}
}
}
}
# check for space before tabs.
if ($rawline =~ /^\+/ && $rawline =~ / \t/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
if (WARN("SPACE_BEFORE_TAB",
"please, no space before tabs\n" . $herevet) &&
$fix) {
while ($fixed[$fixlinenr] =~
s/(^\+.*) {$tabsize,$tabsize}\t/$1\t\t/) {}
while ($fixed[$fixlinenr] =~
s/(^\+.*) +\t/$1\t/) {}
}
}
# check for assignments on the start of a line
if ($sline =~ /^\+\s+($Assignment)[^=]/) {
CHK("ASSIGNMENT_CONTINUATIONS",
"Assignment operator '$1' should be on the previous line\n" . $hereprev);
}
# check for && or || at the start of a line
if ($rawline =~ /^\+\s*(&&|\|\|)/) {
CHK("LOGICAL_CONTINUATIONS",
"Logical continuations should be on the previous line\n" . $hereprev);
}
# check indentation starts on a tab stop
if ($perl_version_ok &&
$sline =~ /^\+\t+( +)(?:$c90_Keywords\b|\{\s*$|\}\s*(?:else\b|while\b|\s*$)|$Declare\s*$Ident\s*[;=])/) {
my $indent = length($1);
if ($indent % $tabsize) {
if (WARN("TABSTOP",
"Statements should start on a tabstop\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s@(^\+\t+) +@$1 . "\t" x ($indent/$tabsize)@e;
}
}
}
# check multi-line statement indentation matches previous line
if ($perl_version_ok &&
$prevline =~ /^\+([ \t]*)((?:$c90_Keywords(?:\s+if)\s*)|(?:$Declare\s*)?(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*|(?:\*\s*)*$Lval\s*=\s*$Ident\s*)\(.*(\&\&|\|\||,)\s*$/) {
$prevline =~ /^\+(\t*)(.*)$/;
my $oldindent = $1;
my $rest = $2;
my $pos = pos_last_openparen($rest);
if ($pos >= 0) {
$line =~ /^(\+| )([ \t]*)/;
my $newindent = $2;
my $goodtabindent = $oldindent .
"\t" x ($pos / $tabsize) .
" " x ($pos % $tabsize);
my $goodspaceindent = $oldindent . " " x $pos;
if ($newindent ne $goodtabindent &&
$newindent ne $goodspaceindent) {
if (CHK("PARENTHESIS_ALIGNMENT",
"Alignment should match open parenthesis\n" . $hereprev) &&
$fix && $line =~ /^\+/) {
$fixed[$fixlinenr] =~
s/^\+[ \t]*/\+$goodtabindent/;
}
}
}
}
# check for space after cast like "(int) foo" or "(struct foo) bar"
# avoid checking a few false positives:
# "sizeof(<type>)" or "__alignof__(<type>)"
# function pointer declarations like "(*foo)(int) = bar;"
# structure definitions like "(struct foo) { 0 };"
# multiline macros that define functions
# known attributes or the __attribute__ keyword
if ($line =~ /^\+(.*)\(\s*$Type\s*\)([ \t]++)((?![={]|\\$|$Attribute|__attribute__))/ &&
(!defined($1) || $1 !~ /\b(?:sizeof|__alignof__)\s*$/)) {
if (CHK("SPACING",
"No space is necessary after a cast\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/(\(\s*$Type\s*\))[ \t]+/$1/;
}
}
# Block comment styles
# Networking with an initial /*
if ($realfile =~ m@^(drivers/net/|net/)@ &&
$prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ &&
$rawline =~ /^\+[ \t]*\*/ &&
$realline > 2) {
WARN("NETWORKING_BLOCK_COMMENT_STYLE",
"networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev);
}
# Block comments use * on subsequent lines
if ($prevline =~ /$;[ \t]*$/ && #ends in comment
$prevrawline =~ /^\+.*?\/\*/ && #starting /*
$prevrawline !~ /\*\/[ \t]*$/ && #no trailing */
$rawline =~ /^\+/ && #line is new
$rawline !~ /^\+[ \t]*\*/) { #no leading *
WARN("BLOCK_COMMENT_STYLE",
"Block comments use * on subsequent lines\n" . $hereprev);
}
# Block comments use */ on trailing lines
if ($rawline !~ m@^\+[ \t]*\*/[ \t]*$@ && #trailing */
$rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ && #inline /*...*/
$rawline !~ m@^\+.*\*{2,}/[ \t]*$@ && #trailing **/
$rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) { #non blank */
WARN("BLOCK_COMMENT_STYLE",
"Block comments use a trailing */ on a separate line\n" . $herecurr);
}
# Block comment * alignment
if ($prevline =~ /$;[ \t]*$/ && #ends in comment
$line =~ /^\+[ \t]*$;/ && #leading comment
$rawline =~ /^\+[ \t]*\*/ && #leading *
(($prevrawline =~ /^\+.*?\/\*/ && #leading /*
$prevrawline !~ /\*\/[ \t]*$/) || #no trailing */
$prevrawline =~ /^\+[ \t]*\*/)) { #leading *
my $oldindent;
$prevrawline =~ m@^\+([ \t]*/?)\*@;
if (defined($1)) {
$oldindent = expand_tabs($1);
} else {
$prevrawline =~ m@^\+(.*/?)\*@;
$oldindent = expand_tabs($1);
}
$rawline =~ m@^\+([ \t]*)\*@;
my $newindent = $1;
$newindent = expand_tabs($newindent);
if (length($oldindent) ne length($newindent)) {
WARN("BLOCK_COMMENT_STYLE",
"Block comments should align the * on each line\n" . $hereprev);
}
}
# check for missing blank lines after struct/union declarations
# with exceptions for various attributes and macros
if ($prevline =~ /^[\+ ]};?\s*$/ &&
$line =~ /^\+/ &&
!($line =~ /^\+\s*$/ ||
$line =~ /^\+\s*EXPORT_SYMBOL/ ||
$line =~ /^\+\s*MODULE_/i ||
$line =~ /^\+\s*\#\s*(?:end|elif|else)/ ||
$line =~ /^\+[a-z_]*init/ ||
$line =~ /^\+\s*(?:static\s+)?[A-Z_]*ATTR/ ||
$line =~ /^\+\s*DECLARE/ ||
$line =~ /^\+\s*builtin_[\w_]*driver/ ||
$line =~ /^\+\s*__setup/)) {
if (CHK("LINE_SPACING",
"Please use a blank line after function/struct/union/enum declarations\n" . $hereprev) &&
$fix) {
fix_insert_line($fixlinenr, "\+");
}
}
# check for multiple consecutive blank lines
if ($prevline =~ /^[\+ ]\s*$/ &&
$line =~ /^\+\s*$/ &&
$last_blank_line != ($linenr - 1)) {
if (CHK("LINE_SPACING",
"Please don't use multiple blank lines\n" . $hereprev) &&
$fix) {
fix_delete_line($fixlinenr, $rawline);
}
$last_blank_line = $linenr;
}
# check for missing blank lines after declarations
if ($sline =~ /^\+\s+\S/ && #Not at char 1
# actual declarations
($prevline =~ /^\+\s+$Declare\s*$Ident\s*[=,;:\[]/ ||
# function pointer declarations
$prevline =~ /^\+\s+$Declare\s*\(\s*\*\s*$Ident\s*\)\s*[=,;:\[\(]/ ||
# foo bar; where foo is some local typedef or #define
$prevline =~ /^\+\s+$Ident(?:\s+|\s*\*\s*)$Ident\s*[=,;\[]/ ||
# known declaration macros
$prevline =~ /^\+\s+$declaration_macros/) &&
# for "else if" which can look like "$Ident $Ident"
!($prevline =~ /^\+\s+$c90_Keywords\b/ ||
# other possible extensions of declaration lines
$prevline =~ /(?:$Compare|$Assignment|$Operators)\s*$/ ||
# not starting a section or a macro "\" extended line
$prevline =~ /(?:\{\s*|\\)$/) &&
# looks like a declaration
!($sline =~ /^\+\s+$Declare\s*$Ident\s*[=,;:\[]/ ||
# function pointer declarations
$sline =~ /^\+\s+$Declare\s*\(\s*\*\s*$Ident\s*\)\s*[=,;:\[\(]/ ||
# foo bar; where foo is some local typedef or #define
$sline =~ /^\+\s+(?:volatile\s+)?$Ident(?:\s+|\s*\*\s*)$Ident\s*[=,;\[]/ ||
# known declaration macros
$sline =~ /^\+\s+$declaration_macros/ ||
# start of struct or union or enum
$sline =~ /^\+\s+(?:volatile\s+)?(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
# start or end of block or continuation of declaration
$sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
# bitfield continuation
$sline =~ /^\+\s+$Ident\s*:\s*\d+\s*[,;]/ ||
# other possible extensions of declaration lines
$sline =~ /^\+\s+\(?\s*(?:$Compare|$Assignment|$Operators)/) &&
# indentation of previous and current line are the same
(($prevline =~ /\+(\s+)\S/) && $sline =~ /^\+$1\S/)) {
if (WARN("LINE_SPACING",
"Missing a blank line after declarations\n" . $hereprev) &&
$fix) {
fix_insert_line($fixlinenr, "\+");
}
}
# check for spaces at the beginning of a line.
# Exceptions:
# 1) within comments
# 2) indented preprocessor commands
# 3) hanging labels
# 4) empty lines in multi-line macros
if ($rawline =~ /^\+ / && $line !~ /^\+ *(?:$;|#|$Ident:)/ &&
$rawline !~ /^\+\s+\\$/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
if (WARN("LEADING_SPACE",
"please, no spaces at the start of a line\n" . $herevet) &&
$fix) {
$fixed[$fixlinenr] =~ s/^\+([ \t]+)/"\+" . tabify($1)/e;
}
}
# check we are in a valid C source file if not then ignore this hunk
next if ($realfile !~ /\.(h|c)$/);
# check for unusual line ending [ or (
if ($line =~ /^\+.*([\[\(])\s*$/) {
CHK("OPEN_ENDED_LINE",
"Lines should not end with a '$1'\n" . $herecurr);
}
# check if this appears to be the start function declaration, save the name
if ($sline =~ /^\+\{\s*$/ &&
$prevline =~ /^\+(?:(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*)?($Ident)\(/) {
$context_function = $1;
}
# check if this appears to be the end of function declaration
if ($sline =~ /^\+\}\s*$/) {
undef $context_function;
}
# check indentation of any line with a bare else
# (but not if it is a multiple line "if (foo) return bar; else return baz;")
# if the previous line is a break or return and is indented 1 tab more...
if ($sline =~ /^\+([\t]+)(?:}[ \t]*)?else(?:[ \t]*{)?\s*$/) {
my $tabs = length($1) + 1;
if ($prevline =~ /^\+\t{$tabs,$tabs}break\b/ ||
($prevline =~ /^\+\t{$tabs,$tabs}return\b/ &&
defined $lines[$linenr] &&
$lines[$linenr] !~ /^[ \+]\t{$tabs,$tabs}return/)) {
WARN("UNNECESSARY_ELSE",
"else is not generally useful after a break or return\n" . $hereprev);
}
}
# check indentation of a line with a break;
# if the previous line is a goto or return and is indented the same # of tabs
if ($sline =~ /^\+([\t]+)break\s*;\s*$/) {
my $tabs = $1;
if ($prevline =~ /^\+$tabs(?:goto|return)\b/) {
WARN("UNNECESSARY_BREAK",
"break is not useful after a goto or return\n" . $hereprev);
}
}
# check for RCS/CVS revision markers
if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
WARN("CVS_KEYWORD",
"CVS style keyword markers, these will _not_ be updated\n". $herecurr);
}
# check for old HOTPLUG __dev<foo> section markings
if ($line =~ /\b(__dev(init|exit)(data|const|))\b/) {
WARN("HOTPLUG_SECTION",
"Using $1 is unnecessary\n" . $herecurr);
}
# Check for potential 'bare' types
my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
$realline_next);
#print "LINE<$line>\n";
if ($linenr > $suppress_statement &&
$realcnt && $sline =~ /.\s*\S/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0);
$stat =~ s/\n./\n /g;
$cond =~ s/\n./\n /g;
#print "linenr<$linenr> <$stat>\n";
# If this statement has no statement boundaries within
# it there is no point in retrying a statement scan
# until we hit end of it.
my $frag = $stat; $frag =~ s/;+\s*$//;
if ($frag !~ /(?:{|;)/) {
#print "skip<$line_nr_next>\n";
$suppress_statement = $line_nr_next;
}
# Find the real next line.
$realline_next = $line_nr_next;
if (defined $realline_next &&
(!defined $lines[$realline_next - 1] ||
substr($lines[$realline_next - 1], $off_next) =~ /^\s*$/)) {
$realline_next++;
}
my $s = $stat;
$s =~ s/{.*$//s;
# Ignore goto labels.
if ($s =~ /$Ident:\*$/s) {
# Ignore functions being called
} elsif ($s =~ /^.\s*$Ident\s*\(/s) {
} elsif ($s =~ /^.\s*else\b/s) {
# declarations always start with types
} elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+?)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))(?:\s*$Modifier)?\s*(?:;|=|,|\()/s) {
my $type = $1;
$type =~ s/\s+/ /g;
possible($type, "A:" . $s);
# definitions in global scope can only start with types
} elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b\s*(?!:)/s) {
possible($1, "B:" . $s);
}
# any (foo ... *) is a pointer cast, and foo is a type
while ($s =~ /\(($Ident)(?:\s+$Sparse)*[\s\*]+\s*\)/sg) {
possible($1, "C:" . $s);
}
# Check for any sort of function declaration.
# int foo(something bar, other baz);
# void (*store_gdt)(x86_descr_ptr *);
if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) {
my ($name_len) = length($1);
my $ctx = $s;
substr($ctx, 0, $name_len + 1, '');
$ctx =~ s/\)[^\)]*$//;
for my $arg (split(/\s*,\s*/, $ctx)) {
if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/s || $arg =~ /^($Ident)$/s) {
possible($1, "D:" . $s);
}
}
}
}
#
# Checks which may be anchored in the context.
#
# Check for switch () and associated case and default
# statements should be at the same indent.
if ($line=~/\bswitch\s*\(.*\)/) {
my $err = '';
my $sep = '';
my @ctx = ctx_block_outer($linenr, $realcnt);
shift(@ctx);
for my $ctx (@ctx) {
my ($clen, $cindent) = line_stats($ctx);
if ($ctx =~ /^\+\s*(case\s+|default:)/ &&
$indent != $cindent) {
$err .= "$sep$ctx\n";
$sep = '';
} else {
$sep = "[...]\n";
}
}
if ($err ne '') {
ERROR("SWITCH_CASE_INDENT_LEVEL",
"switch and case should be at the same indent\n$hereline$err");
}
}
# if/while/etc brace do not go on next line, unless defining a do while loop,
# or if that brace on the next line is for something else
if ($line =~ /(.*)\b((?:if|while|for|switch|(?:[A-Z_]+|)FOR_EACH(?!_NONEMPTY_TERM)[A-Z_]+)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
my $pre_ctx = "$1$2";
my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
if ($line =~ /^\+\t{6,}/) {
WARN("DEEP_INDENTATION",
"Too many leading tabs - consider code refactoring\n" . $herecurr);
}
my $ctx_cnt = $realcnt - $#ctx - 1;
my $ctx = join("\n", @ctx);
my $ctx_ln = $linenr;
my $ctx_skip = $realcnt;
while ($ctx_skip > $ctx_cnt || ($ctx_skip == $ctx_cnt &&
defined $lines[$ctx_ln - 1] &&
$lines[$ctx_ln - 1] =~ /^-/)) {
##print "SKIP<$ctx_skip> CNT<$ctx_cnt>\n";
$ctx_skip-- if (!defined $lines[$ctx_ln - 1] || $lines[$ctx_ln - 1] !~ /^-/);
$ctx_ln++;
}
#print "realcnt<$realcnt> ctx_cnt<$ctx_cnt>\n";
#print "pre<$pre_ctx>\nline<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>\n";
if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln - 1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
ERROR("OPEN_BRACE",
"that open brace { should be on the previous line\n" .
"$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
}
if ($level == 0 && $pre_ctx !~ /}\s*while\s*\($/ &&
$ctx =~ /\)\s*\;\s*$/ &&
defined $lines[$ctx_ln - 1])
{
my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
if ($nindent > $indent) {
WARN("TRAILING_SEMICOLON",
"trailing semicolon indicates no statements, indent implies otherwise\n" .
"$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
}
}
}
# Check relative indent for conditionals and blocks.
if ($line =~ /\b(?:(?:if|while|for|(?:[A-Z_]+|)FOR_EACH(?!_NONEMPTY_TERM|_IDX|_FIXED_ARG|_IDX_FIXED_ARG)[A-Z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0)
if (!defined $stat);
my ($s, $c) = ($stat, $cond);
substr($s, 0, length($c), '');
# remove inline comments
$s =~ s/$;/ /g;
$c =~ s/$;/ /g;
# Find out how long the conditional actually is.
my @newlines = ($c =~ /\n/gs);
my $cond_lines = 1 + $#newlines;
# Make sure we remove the line prefixes as we have
# none on the first line, and are going to readd them
# where necessary.
$s =~ s/\n./\n/gs;
while ($s =~ /\n\s+\\\n/) {
$cond_lines += $s =~ s/\n\s+\\\n/\n/g;
}
# We want to check the first line inside the block
# starting at the end of the conditional, so remove:
# 1) any blank line termination
# 2) any opening brace { on end of the line
# 3) any do (...) {
my $continuation = 0;
my $check = 0;
$s =~ s/^.*\bdo\b//;
$s =~ s/^\s*{//;
if ($s =~ s/^\s*\\//) {
$continuation = 1;
}
if ($s =~ s/^\s*?\n//) {
$check = 1;
$cond_lines++;
}
# Also ignore a loop construct at the end of a
# preprocessor statement.
if (($prevline =~ /^.\s*#\s*define\s/ ||
$prevline =~ /\\\s*$/) && $continuation == 0) {
$check = 0;
}
my $cond_ptr = -1;
$continuation = 0;
while ($cond_ptr != $cond_lines) {
$cond_ptr = $cond_lines;
# If we see an #else/#elif then the code
# is not linear.
if ($s =~ /^\s*\#\s*(?:else|elif)/) {
$check = 0;
}
# Ignore:
# 1) blank lines, they should be at 0,
# 2) preprocessor lines, and
# 3) labels.
if ($continuation ||
$s =~ /^\s*?\n/ ||
$s =~ /^\s*#\s*?/ ||
$s =~ /^\s*$Ident\s*:/) {
$continuation = ($s =~ /^.*?\\\n/) ? 1 : 0;
if ($s =~ s/^.*?\n//) {
$cond_lines++;
}
}
}
my (undef, $sindent) = line_stats("+" . $s);
my $stat_real = raw_line($linenr, $cond_lines);
# Check if either of these lines are modified, else
# this is not this patch's fault.
if (!defined($stat_real) ||
$stat !~ /^\+/ && $stat_real !~ /^\+/) {
$check = 0;
}
if (defined($stat_real) && $cond_lines > 1) {
$stat_real = "[...]\n$stat_real";
}
#print "line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s> cond_lines<$cond_lines> stat_real<$stat_real> stat<$stat>\n";
if ($check && $s ne '' &&
(($sindent % $tabsize) != 0 ||
($sindent < $indent) ||
($sindent == $indent &&
($s !~ /^\s*(?:\}|\{|else\b)/)) ||
($sindent > $indent + $tabsize))) {
WARN("SUSPECT_CODE_INDENT",
"suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
}
}
# Track the 'values' across context and added lines.
my $opline = $line; $opline =~ s/^./ /;
my ($curr_values, $curr_vars) =
annotate_values($opline . "\n", $prev_values);
$curr_values = $prev_values . $curr_values;
if ($dbg_values) {
my $outline = $opline; $outline =~ s/\t/ /g;
print "$linenr > .$outline\n";
print "$linenr > $curr_values\n";
print "$linenr > $curr_vars\n";
}
$prev_values = substr($curr_values, -1);
#ignore lines not being added
next if ($line =~ /^[^\+]/);
# check for dereferences that span multiple lines
if ($prevline =~ /^\+.*$Lval\s*(?:\.|->)\s*$/ &&
$line =~ /^\+\s*(?!\#\s*(?!define\s+|if))\s*$Lval/) {
$prevline =~ /($Lval\s*(?:\.|->))\s*$/;
my $ref = $1;
$line =~ /^.\s*($Lval)/;
$ref .= $1;
$ref =~ s/\s//g;
WARN("MULTILINE_DEREFERENCE",
"Avoid multiple line dereference - prefer '$ref'\n" . $hereprev);
}
# check for declarations of signed or unsigned without int
while ($line =~ m{\b($Declare)\s*(?!char\b|short\b|int\b|long\b)\s*($Ident)?\s*[=,;\[\)\(]}g) {
my $type = $1;
my $var = $2;
$var = "" if (!defined $var);
if ($type =~ /^(?:(?:$Storage|$Inline|$Attribute)\s+)*((?:un)?signed)((?:\s*\*)*)\s*$/) {
my $sign = $1;
my $pointer = $2;
$pointer = "" if (!defined $pointer);
if (WARN("UNSPECIFIED_INT",
"Prefer '" . trim($sign) . " int" . rtrim($pointer) . "' to bare use of '$sign" . rtrim($pointer) . "'\n" . $herecurr) &&
$fix) {
my $decl = trim($sign) . " int ";
my $comp_pointer = $pointer;
$comp_pointer =~ s/\s//g;
$decl .= $comp_pointer;
$decl = rtrim($decl) if ($var eq "");
$fixed[$fixlinenr] =~ s@\b$sign\s*\Q$pointer\E\s*$var\b@$decl$var@;
}
}
}
# TEST: allow direct testing of the type matcher.
if ($dbg_type) {
if ($line =~ /^.\s*$Declare\s*$/) {
ERROR("TEST_TYPE",
"TEST: is type\n" . $herecurr);
} elsif ($dbg_type > 1 && $line =~ /^.+($Declare)/) {
ERROR("TEST_NOT_TYPE",
"TEST: is not type ($1 is)\n". $herecurr);
}
next;
}
# TEST: allow direct testing of the attribute matcher.
if ($dbg_attr) {
if ($line =~ /^.\s*$Modifier\s*$/) {
ERROR("TEST_ATTR",
"TEST: is attr\n" . $herecurr);
} elsif ($dbg_attr > 1 && $line =~ /^.+($Modifier)/) {
ERROR("TEST_NOT_ATTR",
"TEST: is not attr ($1 is)\n". $herecurr);
}
next;
}
# check for initialisation to aggregates open brace on the next line
if ($line =~ /^.\s*{/ &&
$prevline =~ /(?:^|[^=])=\s*$/) {
if (ERROR("OPEN_BRACE",
"that open brace { should be on the previous line\n" . $hereprev) &&
$fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
fix_delete_line($fixlinenr - 1, $prevrawline);
fix_delete_line($fixlinenr, $rawline);
my $fixedline = $prevrawline;
$fixedline =~ s/\s*=\s*$/ = {/;
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $line;
$fixedline =~ s/^(.\s*)\{\s*/$1/;
fix_insert_line($fixlinenr, $fixedline);
}
}
#
# Checks which are anchored on the added line.
#
# check for malformed paths in #include statements (uses RAW line)
if ($rawline =~ m{^.\s*\#\s*include\s+[<"](.*)[">]}) {
my $path = $1;
if ($path =~ m{//}) {
ERROR("MALFORMED_INCLUDE",
"malformed #include filename\n" . $herecurr);
}
if ($path =~ "^uapi/" && $realfile =~ m@\binclude/uapi/@) {
ERROR("UAPI_INCLUDE",
"No #include in ...include/uapi/... should use a uapi/ path prefix\n" . $herecurr);
}
}
# no C99 // comments
if ($line =~ m{//}) {
if (ERROR("C99_COMMENTS",
"do not use C99 // comments\n" . $herecurr) &&
$fix) {
my $line = $fixed[$fixlinenr];
if ($line =~ /\/\/(.*)$/) {
my $comment = trim($1);
$fixed[$fixlinenr] =~ s@\/\/(.*)$@/\* $comment \*/@;
}
}
}
# Remove C99 comments.
$line =~ s@//.*@@;
$opline =~ s@//.*@@;
# EXPORT_SYMBOL should immediately follow the thing it is exporting, consider
# the whole statement.
#print "APW <$lines[$realline_next - 1]>\n";
if (defined $realline_next &&
exists $lines[$realline_next - 1] &&
!defined $suppress_export{$realline_next} &&
($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
$lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
# Handle definitions which produce identifiers with
# a prefix:
# XXX(foo);
# EXPORT_SYMBOL(something_foo);
my $name = $1;
if ($stat =~ /^(?:.\s*}\s*\n)?.([A-Z_]+)\s*\(\s*($Ident)/ &&
$name =~ /^${Ident}_$2/) {
#print "FOO C name<$name>\n";
$suppress_export{$realline_next} = 1;
} elsif ($stat !~ /(?:
\n.}\s*$|
^.DEFINE_$Ident\(\Q$name\E\)|
^.DECLARE_$Ident\(\Q$name\E\)|
^.LIST_HEAD\(\Q$name\E\)|
^.(?:$Storage\s+)?$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(|
\b\Q$name\E(?:\s+$Attribute)*\s*(?:;|=|\[|\()
)/x) {
#print "FOO A<$lines[$realline_next - 1]> stat<$stat> name<$name>\n";
$suppress_export{$realline_next} = 2;
} else {
$suppress_export{$realline_next} = 1;
}
}
if (!defined $suppress_export{$linenr} &&
$prevline =~ /^.\s*$/ &&
($line =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
$line =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
#print "FOO B <$lines[$linenr - 1]>\n";
$suppress_export{$linenr} = 2;
}
if (defined $suppress_export{$linenr} &&
$suppress_export{$linenr} == 2) {
WARN("EXPORT_SYMBOL",
"EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
}
# check for global initialisers.
if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*($zero_initializer)\s*;/) {
if (ERROR("GLOBAL_INITIALISERS",
"do not initialise globals to $1\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*$zero_initializer\s*;/$1;/;
}
}
# check for static initialisers.
if ($line =~ /^\+.*\bstatic\s.*=\s*($zero_initializer)\s*;/) {
if (ERROR("INITIALISED_STATIC",
"do not initialise statics to $1\n" .
$herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(\bstatic\s.*?)\s*=\s*$zero_initializer\s*;/$1;/;
}
}
# check for misordered declarations of char/short/int/long with signed/unsigned
while ($sline =~ m{(\b$TypeMisordered\b)}g) {
my $tmp = trim($1);
WARN("MISORDERED_TYPE",
"type '$tmp' should be specified in [[un]signed] [short|int|long|long long] order\n" . $herecurr);
}
# check for unnecessary <signed> int declarations of short/long/long long
while ($sline =~ m{\b($TypeMisordered(\s*\*)*|$C90_int_types)\b}g) {
my $type = trim($1);
next if ($type !~ /\bint\b/);
next if ($type !~ /\b(?:short|long\s+long|long)\b/);
my $new_type = $type;
$new_type =~ s/\b\s*int\s*\b/ /;
$new_type =~ s/\b\s*(?:un)?signed\b\s*/ /;
$new_type =~ s/^const\s+//;
$new_type = "unsigned $new_type" if ($type =~ /\bunsigned\b/);
$new_type = "const $new_type" if ($type =~ /^const\b/);
$new_type =~ s/\s+/ /g;
$new_type = trim($new_type);
if (WARN("UNNECESSARY_INT",
"Prefer '$new_type' over '$type' as the int is unnecessary\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b\Q$type\E\b/$new_type/;
}
}
# check for static const char * arrays.
if ($line =~ /\bstatic\s+const\s+char\s*\*\s*(\w+)\s*\[\s*\]\s*=\s*/) {
WARN("STATIC_CONST_CHAR_ARRAY",
"static const char * array should probably be static const char * const\n" .
$herecurr);
}
# check for initialized const char arrays that should be static const
if ($line =~ /^\+\s*const\s+(char|unsigned\s+char|_*u8|(?:[us]_)?int8_t)\s+\w+\s*\[\s*(?:\w+\s*)?\]\s*=\s*"/) {
if (WARN("STATIC_CONST_CHAR_ARRAY",
"const array should probably be static const\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(^.\s*)const\b/${1}static const/;
}
}
# check for static char foo[] = "bar" declarations.
if ($line =~ /\bstatic\s+char\s+(\w+)\s*\[\s*\]\s*=\s*"/) {
WARN("STATIC_CONST_CHAR_ARRAY",
"static char array declaration should probably be static const char\n" .
$herecurr);
}
# check for const <foo> const where <foo> is not a pointer or array type
if ($sline =~ /\bconst\s+($BasicType)\s+const\b/) {
my $found = $1;
if ($sline =~ /\bconst\s+\Q$found\E\s+const\b\s*\*/) {
WARN("CONST_CONST",
"'const $found const *' should probably be 'const $found * const'\n" . $herecurr);
} elsif ($sline !~ /\bconst\s+\Q$found\E\s+const\s+\w+\s*\[/) {
WARN("CONST_CONST",
"'const $found const' should probably be 'const $found'\n" . $herecurr);
}
}
# check for non-global char *foo[] = {"bar", ...} declarations.
if ($line =~ /^.\s+(?:static\s+|const\s+)?char\s+\*\s*\w+\s*\[\s*\]\s*=\s*\{/) {
WARN("STATIC_CONST_CHAR_ARRAY",
"char * array declaration might be better as static const\n" .
$herecurr);
}
# check for sizeof(foo)/sizeof(foo[0]) that could be ARRAY_SIZE(foo)
if ($line =~ m@\bsizeof\s*\(\s*($Lval)\s*\)@) {
my $array = $1;
if ($line =~ m@\b(sizeof\s*\(\s*\Q$array\E\s*\)\s*/\s*sizeof\s*\(\s*\Q$array\E\s*\[\s*0\s*\]\s*\))@) {
my $array_div = $1;
if (WARN("ARRAY_SIZE",
"Prefer ARRAY_SIZE($array)\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\Q$array_div\E/ARRAY_SIZE($array)/;
}
}
}
# check for function declarations without arguments like "int foo()"
if ($line =~ /(\b$Type\s*$Ident)\s*\(\s*\)/) {
if (ERROR("FUNCTION_WITHOUT_ARGS",
"Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
}
}
# check for new typedefs, only function parameters and sparse annotations
# make sense.
if ($realfile =~ /\/include\/zephyr\/posix\/*.h/) {
if ($line =~ /\btypedef\s/ &&
$line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ &&
$line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ &&
$line !~ /\b$typeTypedefs\b/ &&
$line !~ /\b__bitwise\b/) {
WARN("NEW_TYPEDEFS",
"do not add new typedefs\n" . $herecurr);
}
}
# * goes on variable not on type
# (char*[ const])
while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) {
#print "AA<$1>\n";
my ($ident, $from, $to) = ($1, $2, $2);
# Should start with a space.
$to =~ s/^(\S)/ $1/;
# Should not end with a space.
$to =~ s/\s+$//;
# '*'s should not have spaces between.
while ($to =~ s/\*\s+\*/\*\*/) {
}
## print "1: from<$from> to<$to> ident<$ident>\n";
if ($from ne $to) {
if (ERROR("POINTER_LOCATION",
"\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr) &&
$fix) {
my $sub_from = $ident;
my $sub_to = $ident;
$sub_to =~ s/\Q$from\E/$to/;
$fixed[$fixlinenr] =~
s@\Q$sub_from\E@$sub_to@;
}
}
}
while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) {
#print "BB<$1>\n";
my ($match, $from, $to, $ident) = ($1, $2, $2, $3);
# Should start with a space.
$to =~ s/^(\S)/ $1/;
# Should not end with a space.
$to =~ s/\s+$//;
# '*'s should not have spaces between.
while ($to =~ s/\*\s+\*/\*\*/) {
}
# Modifiers should have spaces.
$to =~ s/(\b$Modifier$)/$1 /;
## print "2: from<$from> to<$to> ident<$ident>\n";
if ($from ne $to && $ident !~ /^$Modifier$/) {
if (ERROR("POINTER_LOCATION",
"\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr) &&
$fix) {
my $sub_from = $match;
my $sub_to = $match;
$sub_to =~ s/\Q$from\E/$to/;
$fixed[$fixlinenr] =~
s@\Q$sub_from\E@$sub_to@;
}
}
}
# avoid BUG() or BUG_ON()
if ($line =~ /\b(?:BUG|BUG_ON)\b/) {
my $msg_level = \&WARN;
$msg_level = \&CHK if ($file);
&{$msg_level}("AVOID_BUG",
"Avoid crashing the kernel - try using WARN_ON & recovery code rather than BUG() or BUG_ON()\n" . $herecurr);
}
# avoid LINUX_VERSION_CODE
if ($line =~ /\bLINUX_VERSION_CODE\b/) {
WARN("LINUX_VERSION_CODE",
"LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
}
# check for uses of printk_ratelimit
if ($line =~ /\bprintk_ratelimit\s*\(/) {
WARN("PRINTK_RATELIMITED",
"Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
}
# printk should use KERN_* levels
if ($line =~ /\bprintk\s*\(\s*(?!KERN_[A-Z]+\b)/) {
WARN("PRINTK_WITHOUT_KERN_LEVEL",
"printk() should include KERN_<LEVEL> facility level\n" . $herecurr);
}
if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
my $orig = $1;
my $level = lc($orig);
$level = "warn" if ($level eq "warning");
my $level2 = $level;
$level2 = "dbg" if ($level eq "debug");
WARN("PREFER_PR_LEVEL",
"Prefer [subsystem eg: netdev]_$level2([subsystem]dev, ... then dev_$level2(dev, ... then pr_$level(... to printk(KERN_$orig ...\n" . $herecurr);
}
if ($line =~ /\bdev_printk\s*\(\s*KERN_([A-Z]+)/) {
my $orig = $1;
my $level = lc($orig);
$level = "warn" if ($level eq "warning");
$level = "dbg" if ($level eq "debug");
WARN("PREFER_DEV_LEVEL",
"Prefer dev_$level(... to dev_printk(KERN_$orig, ...\n" . $herecurr);
}
# ENOSYS means "bad syscall nr" and nothing else. This will have a small
# number of false positives, but assembly files are not checked, so at
# least the arch entry code will not trigger this warning.
if ($line =~ /\bENOSYS\b/) {
WARN("ENOSYS",
"ENOSYS means 'invalid syscall nr' and nothing else\n" . $herecurr);
}
# ENOTSUPP is not a standard error code and should be avoided in new patches.
# Folks usually mean EOPNOTSUPP (also called ENOTSUP), when they type ENOTSUPP.
# Similarly to ENOSYS warning a small number of false positives is expected.
if (!$file && $line =~ /\bENOTSUPP\b/) {
if (WARN("ENOTSUPP",
"ENOTSUPP is not a SUSV4 error code, prefer EOPNOTSUPP\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bENOTSUPP\b/EOPNOTSUPP/;
}
}
# function brace can't be on same line, except for #defines of do while,
# or if closed on same line
if ($perl_version_ok &&
$sline =~ /$Type\s*$Ident\s*$balanced_parens\s*\{/ &&
$sline !~ /\#\s*define\b.*do\s*\{/ &&
$sline !~ /}/) {
if (ERROR("OPEN_BRACE",
"open brace '{' following function definitions go on the next line\n" . $herecurr) &&
$fix) {
fix_delete_line($fixlinenr, $rawline);
my $fixed_line = $rawline;
$fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/;
my $line1 = $1;
my $line2 = $2;
fix_insert_line($fixlinenr, ltrim($line1));
fix_insert_line($fixlinenr, "\+{");
if ($line2 !~ /^\s*$/) {
fix_insert_line($fixlinenr, "\+\t" . trim($line2));
}
}
}
# open braces for enum, union and struct go on the same line.
if ($line =~ /^.\s*{/ &&
$prevline =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?\s*$/) {
if (ERROR("OPEN_BRACE",
"open brace '{' following $1 go on the same line\n" . $hereprev) &&
$fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
fix_delete_line($fixlinenr - 1, $prevrawline);
fix_delete_line($fixlinenr, $rawline);
my $fixedline = rtrim($prevrawline) . " {";
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $rawline;
$fixedline =~ s/^(.\s*)\{\s*/$1\t/;
if ($fixedline !~ /^\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
}
}
# missing space after union, struct or enum definition
if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident){1,2}[=\{]/) {
if (WARN("SPACING",
"missing space after $1 definition\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/^(.\s*(?:typedef\s+)?(?:enum|union|struct)(?:\s+$Ident){1,2})([=\{])/$1 $2/;
}
}
# Function pointer declarations
# check spacing between type, funcptr, and args
# canonical declaration is "type (*funcptr)(args...)"
if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)($Ident)(\s*)\)(\s*)\(/) {
my $declare = $1;
my $pre_pointer_space = $2;
my $post_pointer_space = $3;
my $funcname = $4;
my $post_funcname_space = $5;
my $pre_args_space = $6;
# the $Declare variable will capture all spaces after the type
# so check it for a missing trailing missing space but pointer return types
# don't need a space so don't warn for those.
my $post_declare_space = "";
if ($declare =~ /(\s+)$/) {
$post_declare_space = $1;
$declare = rtrim($declare);
}
if ($declare !~ /\*$/ && $post_declare_space =~ /^$/) {
WARN("SPACING",
"missing space after return type\n" . $herecurr);
$post_declare_space = " ";
}
# unnecessary space "type (*funcptr)(args...)"
# This test is not currently implemented because these declarations are
# equivalent to
# int foo(int bar, ...)
# and this is form shouldn't/doesn't generate a checkpatch warning.
#
# elsif ($declare =~ /\s{2,}$/) {
# WARN("SPACING",
# "Multiple spaces after return type\n" . $herecurr);
# }
# unnecessary space "type ( *funcptr)(args...)"
if (defined $pre_pointer_space &&
$pre_pointer_space =~ /^\s/) {
WARN("SPACING",
"Unnecessary space after function pointer open parenthesis\n" . $herecurr);
}
# unnecessary space "type (* funcptr)(args...)"
if (defined $post_pointer_space &&
$post_pointer_space =~ /^\s/) {
WARN("SPACING",
"Unnecessary space before function pointer name\n" . $herecurr);
}
# unnecessary space "type (*funcptr )(args...)"
if (defined $post_funcname_space &&
$post_funcname_space =~ /^\s/) {
WARN("SPACING",
"Unnecessary space after function pointer name\n" . $herecurr);
}
# unnecessary space "type (*funcptr) (args...)"
if (defined $pre_args_space &&
$pre_args_space =~ /^\s/) {
WARN("SPACING",
"Unnecessary space before function pointer arguments\n" . $herecurr);
}
if (show_type("SPACING") && $fix) {
$fixed[$fixlinenr] =~
s/^(.\s*)$Declare\s*\(\s*\*\s*$Ident\s*\)\s*\(/$1 . $declare . $post_declare_space . '(*' . $funcname . ')('/ex;
}
}
# check for spacing round square brackets; allowed:
# 1. with a type on the left -- int [] a;
# 2. at the beginning of a line for slice initialisers -- [0...10] = 5,
# 3. inside a curly brace -- = { [0...10] = 5 }
# 4. inside macro arguments, example: #define HCI_ERR(err) [err] = #err
while ($line =~ /(.*?\s)\[/g) {
my ($where, $prefix) = ($-[1], $1);
if ($prefix !~ /$Type\s+$/ &&
($where != 0 || $prefix !~ /^.\s+$/) &&
$prefix !~ /[{,:]\s+$/ &&
$prefix !~ /\#define\s+.+\s+$/ &&
$prefix !~ /:\s+$/) {
if (ERROR("BRACKET_SPACE",
"space prohibited before open square bracket '['\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/^(\+.*?)\s+\[/$1\[/;
}
}
}
# check for spaces between functions and their parentheses.
while ($line =~ /($Ident)\s+\(/g) {
my $name = $1;
my $ctx_before = substr($line, 0, $-[1]);
my $ctx = "$ctx_before$name";
# Ignore those directives where spaces _are_ permitted.
if ($name =~ /^(?:
if|for|while|switch|return|case|
volatile|__volatile__|
__attribute__|format|__extension__|
asm|__asm__)$/x)
{
# cpp #define statements have non-optional spaces, ie
# if there is a space between the name and the open
# parenthesis it is simply not a parameter group.
} elsif ($ctx_before =~ /^.\s*\#\s*define\s*$/) {
# cpp #elif statement condition may start with a (
} elsif ($ctx =~ /^.\s*\#\s*elif\s*$/) {
# If this whole things ends with a type its most
# likely a typedef for a function.
} elsif ($ctx =~ /$Type$/) {
} else {
if (WARN("SPACING",
"space prohibited between function name and open parenthesis '('\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/\b$name\s+\(/$name\(/;
}
}
}
# Check operator spacing.
if (!($line=~/\#\s*include/)) {
my $fixed_line = "";
my $line_fixed = 0;
my $ops = qr{
<<=|>>=|<=|>=|==|!=|
\+=|-=|\*=|\/=|%=|\^=|\|=|&=|
=>|->|<<|>>|<|>|=|!|~|
&&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%|
\?:|\?|:
}x;
my @elements = split(/($ops|;)/, $opline);
## print("element count: <" . $#elements . ">\n");
## foreach my $el (@elements) {
## print("el: <$el>\n");
## }
my @fix_elements = ();
my $off = 0;
foreach my $el (@elements) {
push(@fix_elements, substr($rawline, $off, length($el)));
$off += length($el);
}
$off = 0;
my $blank = copy_spacing($opline);
my $last_after = -1;
for (my $n = 0; $n < $#elements; $n += 2) {
my $good = $fix_elements[$n] . $fix_elements[$n + 1];
## print("n: <$n> good: <$good>\n");
$off += length($elements[$n]);
# Pick up the preceding and succeeding characters.
my $ca = substr($opline, 0, $off);
my $cc = '';
if (length($opline) >= ($off + length($elements[$n + 1]))) {
$cc = substr($opline, $off + length($elements[$n + 1]));
}
my $cb = "$ca$;$cc";
my $a = '';
$a = 'V' if ($elements[$n] ne '');
$a = 'W' if ($elements[$n] =~ /\s$/);
$a = 'C' if ($elements[$n] =~ /$;$/);
$a = 'B' if ($elements[$n] =~ /(\[|\()$/);
$a = 'O' if ($elements[$n] eq '');
$a = 'E' if ($ca =~ /^\s*$/);
my $op = $elements[$n + 1];
my $c = '';
if (defined $elements[$n + 2]) {
$c = 'V' if ($elements[$n + 2] ne '');
$c = 'W' if ($elements[$n + 2] =~ /^\s/);
$c = 'C' if ($elements[$n + 2] =~ /^$;/);
$c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/);
$c = 'O' if ($elements[$n + 2] eq '');
$c = 'E' if ($elements[$n + 2] =~ /^\s*\\$/);
} else {
$c = 'E';
}
my $ctx = "${a}x${c}";
my $at = "(ctx:$ctx)";
my $ptr = substr($blank, 0, $off) . "^";
my $hereptr = "$hereline$ptr\n";
# Pull out the value of this operator.
my $op_type = substr($curr_values, $off + 1, 1);
# Get the full operator variant.
my $opv = $op . substr($curr_vars, $off, 1);
# Ignore operators passed as parameters.
if ($op_type ne 'V' &&
$ca =~ /\s$/ && $cc =~ /^\s*[,\)]/) {
# # Ignore comments
# } elsif ($op =~ /^$;+$/) {
# ; should have either the end of line or a space or \ after it
} elsif ($op eq ';') {
if ($ctx !~ /.x[WEBC]/ &&
$cc !~ /^\\/ && $cc !~ /^;/) {
if (ERROR("SPACING",
"space required after that '$op' $at\n" . $hereptr)) {
$good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
$line_fixed = 1;
}
}
# // is a comment
} elsif ($op eq '//') {
# : when part of a bitfield
} elsif ($opv eq ':B') {
# skip the bitfield test for now
# No spaces for:
# ->
} elsif ($op eq '->') {
if ($ctx =~ /Wx.|.xW/) {
if (ERROR("SPACING",
"spaces prohibited around that '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
$line_fixed = 1;
}
}
# , must not have a space before and must have a space on the right.
} elsif ($op eq ',') {
my $rtrim_before = 0;
my $space_after = 0;
if ($ctx =~ /Wx./) {
if (ERROR("SPACING",
"space prohibited before that '$op' $at\n" . $hereptr)) {
$line_fixed = 1;
$rtrim_before = 1;
}
}
if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/ && $cc !~ /^\)/) {
if (ERROR("SPACING",
"space required after that '$op' $at\n" . $hereptr)) {
$line_fixed = 1;
$last_after = $n;
$space_after = 1;
}
}
if ($rtrim_before || $space_after) {
if ($rtrim_before) {
$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
} else {
$good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
}
if ($space_after) {
$good .= " ";
}
}
# '*' as part of a type definition -- reported already.
} elsif ($opv eq '*_') {
#warn "'*' is part of type\n";
# unary operators should have a space before and
# none after. May be left adjacent to another
# unary operator, or a cast
} elsif ($op eq '!' || $op eq '~' ||
$opv eq '*U' || $opv eq '-U' ||
$opv eq '&U' || $opv eq '&&U') {
if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
if (ERROR("SPACING",
"space required before that '$op' $at\n" . $hereptr)) {
if ($n != $last_after + 2) {
$good = $fix_elements[$n] . " " . ltrim($fix_elements[$n + 1]);
$line_fixed = 1;
}
}
}
if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
# A unary '*' may be const
} elsif ($ctx =~ /.xW/) {
if (ERROR("SPACING",
"space prohibited after that '$op' $at\n" . $hereptr)) {
$good = $fix_elements[$n] . rtrim($fix_elements[$n + 1]);
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
$line_fixed = 1;
}
}
# unary ++ and unary -- are allowed no space on one side.
} elsif ($op eq '++' or $op eq '--') {
if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
if (ERROR("SPACING",
"space required one side of that '$op' $at\n" . $hereptr)) {
$good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
$line_fixed = 1;
}
}
if ($ctx =~ /Wx[BE]/ ||
($ctx =~ /Wx./ && $cc =~ /^;/)) {
if (ERROR("SPACING",
"space prohibited before that '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
$line_fixed = 1;
}
}
if ($ctx =~ /ExW/) {
if (ERROR("SPACING",
"space prohibited after that '$op' $at\n" . $hereptr)) {
$good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
$line_fixed = 1;
}
}
# << and >> may either have or not have spaces both sides
} elsif ($op eq '<<' or $op eq '>>' or
$op eq '&' or $op eq '^' or $op eq '|' or
$op eq '+' or $op eq '-' or
$op eq '*' or $op eq '/' or
$op eq '%')
{
if ($check) {
if (defined $fix_elements[$n + 2] && $ctx !~ /[EW]x[EW]/) {
if (CHK("SPACING",
"spaces preferred around that '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
$fix_elements[$n + 2] =~ s/^\s+//;
$line_fixed = 1;
}
} elsif (!defined $fix_elements[$n + 2] && $ctx !~ /Wx[OE]/) {
if (CHK("SPACING",
"space preferred before that '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]);
$line_fixed = 1;
}
}
} elsif ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
if (ERROR("SPACING",
"need consistent spacing around '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
$line_fixed = 1;
}
}
# A colon needs no spaces before when it is
# terminating a case value or a label.
} elsif ($opv eq ':C' || $opv eq ':L') {
if ($ctx =~ /Wx./) {
if (ERROR("SPACING",
"space prohibited before that '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
$line_fixed = 1;
}
}
# All the others need spaces both sides.
} elsif ($ctx !~ /[EWC]x[CWE]/) {
my $ok = 0;
# Ignore email addresses <foo@bar>
if (($op eq '<' &&
$cc =~ /^\S+\@\S+>/) ||
($op eq '>' &&
$ca =~ /<\S+\@\S+$/))
{
$ok = 1;
}
# for asm volatile statements
# ignore a colon with another
# colon immediately before or after
if (($op eq ':') &&
($ca =~ /:$/ || $cc =~ /^:/)) {
$ok = 1;
}
# some macros require a separator
# argument to be in parentheses,
# e.g. (||).
if ($ca =~ /\($/ || $cc =~ /^\)/) {
$ok = 1;
}
# messages are ERROR, but ?: are CHK
if ($ok == 0) {
my $msg_level = \&ERROR;
$msg_level = \&CHK if (($op eq '?:' || $op eq '?' || $op eq ':') && $ctx =~ /VxV/);
if (&{$msg_level}("SPACING",
"spaces required around that '$op' $at\n" . $hereptr)) {
$good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
$line_fixed = 1;
}
}
}
$off += length($elements[$n + 1]);
## print("n: <$n> GOOD: <$good>\n");
$fixed_line = $fixed_line . $good;
}
if (($#elements % 2) == 0) {
$fixed_line = $fixed_line . $fix_elements[$#elements];
}
if ($fix && $line_fixed && $fixed_line ne $fixed[$fixlinenr]) {
$fixed[$fixlinenr] = $fixed_line;
}
}
# check for whitespace before a non-naked semicolon
if ($line =~ /^\+.*\S\s+;\s*$/) {
if (WARN("SPACING",
"space prohibited before semicolon\n" . $herecurr) &&
$fix) {
1 while $fixed[$fixlinenr] =~
s/^(\+.*\S)\s+;/$1;/;
}
}
# check for multiple assignments
if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
CHK("MULTIPLE_ASSIGNMENTS",
"multiple assignments should be avoided\n" . $herecurr);
}
## # check for multiple declarations, allowing for a function declaration
## # continuation.
## if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
## $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
##
## # Remove any bracketed sections to ensure we do not
## # falsly report the parameters of functions.
## my $ln = $line;
## while ($ln =~ s/\([^\(\)]*\)//g) {
## }
## if ($ln =~ /,/) {
## WARN("MULTIPLE_DECLARATION",
## "declaring multiple variables together should be avoided\n" . $herecurr);
## }
## }
#need space before brace following if, while, etc
if (($line =~ /\(.*\)\{/ && $line !~ /\($Type\)\{/) ||
$line =~ /\b(?:else|do)\{/) {
if (ERROR("SPACING",
"space required before the open brace '{'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/^(\+.*(?:do|else|\)))\{/$1 {/;
}
}
## # check for blank lines before declarations
## if ($line =~ /^.\t+$Type\s+$Ident(?:\s*=.*)?;/ &&
## $prevrawline =~ /^.\s*$/) {
## WARN("SPACING",
## "No blank lines before declarations\n" . $hereprev);
## }
##
# closing brace should have a space following it when it has anything
# on the line
if ($line =~ /}(?!(?:,|;|\)|\}))\S/) {
if (ERROR("SPACING",
"space required after that close brace '}'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/}((?!(?:,|;|\)))\S)/} $1/;
}
}
# check spacing on square brackets
if ($line =~ /\[\s/ && $line !~ /\[\s*$/) {
if (ERROR("SPACING",
"space prohibited after that open square bracket '['\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/\[\s+/\[/;
}
}
if ($line =~ /\s\]/) {
if (ERROR("SPACING",
"space prohibited before that close square bracket ']'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/\s+\]/\]/;
}
}
# check spacing on parentheses
if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ &&
$line !~ /for\s*\(\s+;/) {
if (ERROR("SPACING",
"space prohibited after that open parenthesis '('\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/\(\s+/\(/;
}
}
if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ &&
$line !~ /for\s*\(.*;\s+\)/ &&
$line !~ /:\s+\)/) {
if (ERROR("SPACING",
"space prohibited before that close parenthesis ')'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/\s+\)/\)/;
}
}
# check unnecessary parentheses around addressof/dereference single $Lvals
# ie: &(foo->bar) should be &foo->bar and *(foo->bar) should be *foo->bar
while ($line =~ /(?:[^&]&\s*|\*)\(\s*($Ident\s*(?:$Member\s*)+)\s*\)/g) {
my $var = $1;
if (CHK("UNNECESSARY_PARENTHESES",
"Unnecessary parentheses around $var\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\(\s*\Q$var\E\s*\)/$var/;
}
}
# check for unnecessary parentheses around function pointer uses
# ie: (foo->bar)(); should be foo->bar();
# but not "if (foo->bar) (" to avoid some false positives
if ($line =~ /(\bif\s*|)(\(\s*$Ident\s*(?:$Member\s*)+\))[ \t]*\(/ && $1 !~ /^if/) {
my $var = $2;
if (CHK("UNNECESSARY_PARENTHESES",
"Unnecessary parentheses around function pointer $var\n" . $herecurr) &&
$fix) {
my $var2 = deparenthesize($var);
$var2 =~ s/\s//g;
$fixed[$fixlinenr] =~ s/\Q$var\E/$var2/;
}
}
# check for unnecessary parentheses around comparisons in if uses
# when !drivers/staging or command-line uses --strict
if (($realfile !~ m@^(?:drivers/staging/)@ || $check_orig) &&
$perl_version_ok && defined($stat) &&
$stat =~ /(^.\s*if\s*($balanced_parens))/) {
my $if_stat = $1;
my $test = substr($2, 1, -1);
my $herectx;
while ($test =~ /(?:^|[^\w\&\!\~])+\s*\(\s*([\&\!\~]?\s*$Lval\s*(?:$Compare\s*$FuncArg)?)\s*\)/g) {
my $match = $1;
# avoid parentheses around potential macro args
next if ($match =~ /^\s*\w+\s*$/);
if (!defined($herectx)) {
$herectx = $here . "\n";
my $cnt = statement_rawlines($if_stat);
for (my $n = 0; $n < $cnt; $n++) {
my $rl = raw_line($linenr, $n);
$herectx .= $rl . "\n";
last if $rl =~ /^[ \+].*\{/;
}
}
CHK("UNNECESSARY_PARENTHESES",
"Unnecessary parentheses around '$match'\n" . $herectx);
}
}
# check that goto labels aren't indented (allow a single space indentation)
# and ignore bitfield definitions like foo:1
# Strictly, labels can have whitespace after the identifier and before the :
# but this is not allowed here as many ?: uses would appear to be labels
if ($sline =~ /^.\s+[A-Za-z_][A-Za-z\d_]*:(?!\s*\d+)/ &&
$sline !~ /^. [A-Za-z\d_][A-Za-z\d_]*:/ &&
$sline !~ /^.\s+default:/) {
if (WARN("INDENTED_LABEL",
"labels should not be indented\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/^(.)\s+/$1/;
}
}
# return is not a function
if (defined($stat) && $stat =~ /^.\s*return(\s*)\(/s) {
my $spacing = $1;
if ($perl_version_ok &&
$stat =~ /^.\s*return\s*($balanced_parens)\s*;\s*$/) {
my $value = $1;
$value = deparenthesize($value);
if ($value =~ m/^\s*$FuncArg\s*(?:\?|$)/) {
ERROR("RETURN_PARENTHESES",
"return is not a function, parentheses are not required\n" . $herecurr);
}
} elsif ($spacing !~ /\s+/) {
ERROR("SPACING",
"space required before the open parenthesis '('\n" . $herecurr);
}
}
# unnecessary return in a void function
# at end-of-function, with the previous line a single leading tab, then return;
# and the line before that not a goto label target like "out:"
if ($sline =~ /^[ \+]}\s*$/ &&
$prevline =~ /^\+\treturn\s*;\s*$/ &&
$linenr >= 3 &&
$lines[$linenr - 3] =~ /^[ +]/ &&
$lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
WARN("RETURN_VOID",
"void function return statements are not generally useful\n" . $hereprev);
}
# if statements using unnecessary parentheses - ie: if ((foo == bar))
if ($perl_version_ok &&
$line =~ /\bif\s*((?:\(\s*){2,})/) {
my $openparens = $1;
my $count = $openparens =~ tr@\(@\(@;
my $msg = "";
if ($line =~ /\bif\s*(?:\(\s*){$count,$count}$LvalOrFunc\s*($Compare)\s*$LvalOrFunc(?:\s*\)){$count,$count}/) {
my $comp = $4; #Not $1 because of $LvalOrFunc
$msg = " - maybe == should be = ?" if ($comp eq "==");
WARN("UNNECESSARY_PARENTHESES",
"Unnecessary parentheses$msg\n" . $herecurr);
}
}
# comparisons with a constant or upper case identifier on the left
# avoid cases like "foo + BAR < baz"
# only fix matches surrounded by parentheses to avoid incorrect
# conversions like "FOO < baz() + 5" being "misfixed" to "baz() > FOO + 5"
if ($perl_version_ok &&
!($line =~ /^\+(.*)($Constant|[A-Z_][A-Z0-9_]*)\s*($Compare)\s*(.*)($Constant|[A-Z_][A-Z0-9_]*)(.*)/) &&
$line =~ /^\+(.*)\b($Constant|[A-Z_][A-Z0-9_]*)\s*($Compare)\s*($LvalOrFunc)/) {
my $lead = $1;
my $const = $2;
my $comp = $3;
my $to = $4;
my $newcomp = $comp;
if ($lead !~ /(?:$Operators|\.)\s*$/ &&
$to !~ /^(?:Constant|[A-Z_][A-Z0-9_]*)$/ &&
WARN("CONSTANT_COMPARISON",
"Comparisons should place the constant on the right side of the test\n" . $herecurr) &&
$fix) {
if ($comp eq "<") {
$newcomp = ">";
} elsif ($comp eq "<=") {
$newcomp = ">=";
} elsif ($comp eq ">") {
$newcomp = "<";
} elsif ($comp eq ">=") {
$newcomp = "<=";
}
$fixed[$fixlinenr] =~ s/\(\s*\Q$const\E\s*$Compare\s*\Q$to\E\s*\)/($to $newcomp $const)/;
}
}
# Return of what appears to be an errno should normally be negative
if ($sline =~ /\breturn(?:\s*\(+\s*|\s+)(E[A-Z]+)(?:\s*\)+\s*|\s*)[;:,]/) {
my $name = $1;
if ($name ne 'EOF' && $name ne 'ERROR') {
# only print this warning if not dealing with 'lib/posix/*.c'
if ($realfile =~ /.*\/lib\/posix\/*.c/) {
WARN("USE_NEGATIVE_ERRNO",
"return of an errno should typically be negative (ie: return -$1)\n" . $herecurr);
}
}
}
# Need a space before open parenthesis after if, while etc
if ($line =~ /\b(if|while|for|switch)\(/) {
if (ERROR("SPACING",
"space required before the open parenthesis '('\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/\b(if|while|for|switch)\(/$1 \(/;
}
}
# Check for illegal assignment in if conditional -- and check for trailing
# statements after the conditional.
if ($line =~ /do\s*(?!{)/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0)
if (!defined $stat);
my ($stat_next) = ctx_statement_block($line_nr_next,
$remain_next, $off_next);
$stat_next =~ s/\n./\n /g;
##print "stat<$stat> stat_next<$stat_next>\n";
if ($stat_next =~ /^\s*while\b/) {
# If the statement carries leading newlines,
# then count those as offsets.
my ($whitespace) =
($stat_next =~ /^((?:\s*\n[+-])*\s*)/s);
my $offset =
statement_rawlines($whitespace) - 1;
$suppress_whiletrailers{$line_nr_next +
$offset} = 1;
}
}
if (!defined $suppress_whiletrailers{$linenr} &&
defined($stat) && defined($cond) &&
$line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
my ($s, $c) = ($stat, $cond);
if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/s) {
if (ERROR("ASSIGN_IN_IF",
"do not use assignment in if condition\n" . $herecurr) &&
$fix && $perl_version_ok) {
if ($rawline =~ /^\+(\s+)if\s*\(\s*(\!)?\s*\(\s*(($Lval)\s*=\s*$LvalOrFunc)\s*\)\s*(?:($Compare)\s*($FuncArg))?\s*\)\s*(\{)?\s*$/) {
my $space = $1;
my $not = $2;
my $statement = $3;
my $assigned = $4;
my $test = $8;
my $against = $9;
my $brace = $15;
fix_delete_line($fixlinenr, $rawline);
fix_insert_line($fixlinenr, "$space$statement;");
my $newline = "${space}if (";
$newline .= '!' if defined($not);
$newline .= '(' if (defined $not && defined($test) && defined($against));
$newline .= "$assigned";
$newline .= " $test $against" if (defined($test) && defined($against));
$newline .= ')' if (defined $not && defined($test) && defined($against));
$newline .= ')';
$newline .= " {" if (defined($brace));
fix_insert_line($fixlinenr + 1, $newline);
}
}
}
# Find out what is on the end of the line after the
# conditional.
substr($s, 0, length($c), '');
$s =~ s/\n.*//g;
$s =~ s/$;//g; # Remove any comments
if (length($c) && $s !~ /^\s*{?\s*\\*\s*$/ &&
$c !~ /}\s*while\s*/)
{
# Find out how long the conditional actually is.
my @newlines = ($c =~ /\n/gs);
my $cond_lines = 1 + $#newlines;
my $stat_real = '';
$stat_real = raw_line($linenr, $cond_lines)
. "\n" if ($cond_lines);
if (defined($stat_real) && $cond_lines > 1) {
$stat_real = "[...]\n$stat_real";
}
ERROR("TRAILING_STATEMENTS",
"trailing statements should be on next line\n" . $herecurr . $stat_real);
}
}
# Check for bitwise tests written as boolean
if ($line =~ /
(?:
(?:\[|\(|\&\&|\|\|)
\s*0[xX][0-9]+\s*
(?:\&\&|\|\|)
|
(?:\&\&|\|\|)
\s*0[xX][0-9]+\s*
(?:\&\&|\|\||\)|\])
)/x)
{
WARN("HEXADECIMAL_BOOLEAN_TEST",
"boolean test with hexadecimal, perhaps just 1 \& or \|?\n" . $herecurr);
}
# if and else should not have general statements after it
if ($line =~ /^.\s*(?:}\s*)?else\b(.*)/) {
my $s = $1;
$s =~ s/$;//g; # Remove any comments
if ($s !~ /^\s*(?:\sif|(?:{|)\s*\\?\s*$)/) {
ERROR("TRAILING_STATEMENTS",
"trailing statements should be on next line\n" . $herecurr);
}
}
# if should not continue a brace
if ($line =~ /}\s*if\b/) {
ERROR("TRAILING_STATEMENTS",
"trailing statements should be on next line (or did you mean 'else if'?)\n" .
$herecurr);
}
# case and default should not have general statements after them
if ($line =~ /^.\s*(?:case\s*.*|default\s*):/g &&
$line !~ /\G(?:
(?:\s*$;*)(?:\s*{)?(?:\s*$;*)(?:\s*\\)?\s*$|
\s*return\s+
)/xg)
{
ERROR("TRAILING_STATEMENTS",
"trailing statements should be on next line\n" . $herecurr);
}
# Check for }<nl>else {, these must be at the same
# indent level to be relevant to each other.
if ($prevline=~/}\s*$/ and $line=~/^.\s*else\s*/ &&
$previndent == $indent) {
if (ERROR("ELSE_AFTER_BRACE",
"else should follow close brace '}'\n" . $hereprev) &&
$fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
fix_delete_line($fixlinenr - 1, $prevrawline);
fix_delete_line($fixlinenr, $rawline);
my $fixedline = $prevrawline;
$fixedline =~ s/}\s*$//;
if ($fixedline !~ /^\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
$fixedline = $rawline;
$fixedline =~ s/^(.\s*)else/$1} else/;
fix_insert_line($fixlinenr, $fixedline);
}
}
if ($prevline=~/}\s*$/ and $line=~/^.\s*while\s*/ &&
$previndent == $indent) {
my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
# Find out what is on the end of the line after the
# conditional.
substr($s, 0, length($c), '');
$s =~ s/\n.*//g;
if ($s =~ /^\s*;/) {
if (ERROR("WHILE_AFTER_BRACE",
"while should follow close brace '}'\n" . $hereprev) &&
$fix && $prevline =~ /^\+/ && $line =~ /^\+/) {
fix_delete_line($fixlinenr - 1, $prevrawline);
fix_delete_line($fixlinenr, $rawline);
my $fixedline = $prevrawline;
my $trailing = $rawline;
$trailing =~ s/^\+//;
$trailing = trim($trailing);
$fixedline =~ s/}\s*$/} $trailing/;
fix_insert_line($fixlinenr, $fixedline);
}
}
}
#Specific variable tests
while ($line =~ m{($Constant|$Lval)}g) {
my $var = $1;
#CamelCase
if ($var !~ /^$Constant$/ &&
$var =~ /[A-Z][a-z]|[a-z][A-Z]/ &&
#Ignore Page<foo> variants
$var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
#Ignore SI style variants like nS, mV and dB
#(ie: max_uV, regulator_min_uA_show, RANGE_mA_VALUE)
$var !~ /^(?:[a-z0-9_]*|[A-Z0-9_]*)?_?[a-z][A-Z](?:_[a-z0-9_]+|_[A-Z0-9_]+)?$/ &&
#Ignore some three character SI units explicitly, like MiB and KHz
$var !~ /^(?:[a-z_]*?)_?(?:[KMGT]iB|[KMGT]?Hz)(?:_[a-z_]+)?$/) {
while ($var =~ m{($Ident)}g) {
my $word = $1;
next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
if ($check) {
seed_camelcase_includes();
if (!$file && !$camelcase_file_seeded) {
seed_camelcase_file($realfile);
$camelcase_file_seeded = 1;
}
}
if (!defined $camelcase{$word}) {
$camelcase{$word} = 1;
CHK("CAMELCASE",
"Avoid CamelCase: <$word>\n" . $herecurr);
}
}
}
}
#no spaces allowed after \ in define
if ($line =~ /\#\s*define.*\\\s+$/) {
if (WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
"Whitespace after \\ makes next lines useless\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\s+$//;
}
}
# warn if <asm/foo.h> is #included and <linux/foo.h> is available and includes
# itself <asm/foo.h> (uses RAW line)
if ($tree && $rawline =~ m{^.\s*\#\s*include\s*\<asm\/(.*)\.h\>}) {
my $file = "$1.h";
my $checkfile = "include/linux/$file";
if (-f "$root/$checkfile" &&
$realfile ne $checkfile &&
$1 !~ /$allowed_asm_includes/)
{
my $asminclude = `grep -Ec "#include\\s+<asm/$file>" $root/$checkfile`;
if ($asminclude > 0) {
if ($realfile =~ m{^arch/}) {
CHK("ARCH_INCLUDE_LINUX",
"Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
} else {
WARN("INCLUDE_LINUX",
"Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
}
}
}
}
# multi-statement macros should be enclosed in a do while loop, grab the
# first statement and ensure its the whole macro if its not enclosed
# in a known good container
if ($realfile !~ m@/vmlinux.lds.h$@ &&
$line =~ /^.\s*\#\s*define\s*$Ident(\()?/) {
my $ln = $linenr;
my $cnt = $realcnt;
my ($off, $dstat, $dcond, $rest);
my $ctx = '';
my $has_flow_statement = 0;
my $has_arg_concat = 0;
($dstat, $dcond, $ln, $cnt, $off) =
ctx_statement_block($linenr, $realcnt, 0);
$ctx = $dstat;
#print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
#print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
$has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/);
$has_arg_concat = 1 if (($ctx =~ /\#\#/ || $ctx =~ /UTIL_CAT/) && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/);
$dstat =~ s/^.\s*\#\s*define\s+$Ident(\([^\)]*\))?\s*//;
my $define_args = $1;
my $define_stmt = $dstat;
my @def_args = ();
if (defined $define_args && $define_args ne "") {
$define_args = substr($define_args, 1, length($define_args) - 2);
$define_args =~ s/\s*//g;
$define_args =~ s/\\\+?//g;
@def_args = split(",", $define_args);
}
$dstat =~ s/$;//g;
$dstat =~ s/\\\n.//g;
$dstat =~ s/^\s*//s;
$dstat =~ s/\s*$//s;
# Flatten any parentheses and braces
while ($dstat =~ s/\([^\(\)]*\)/1/ ||
$dstat =~ s/\{[^\{\}]*\}/1/ ||
$dstat =~ s/.\[[^\[\]]*\]/1/)
{
}
# Flatten any obvious string concatenation.
while ($dstat =~ s/($String)\s*$Ident/$1/ ||
$dstat =~ s/$Ident\s*($String)/$1/)
{
}
# Make asm volatile uses seem like a generic function
$dstat =~ s/\b_*asm_*\s+_*volatile_*\b/asm_volatile/g;
my $exceptions = qr{
$Declare|
module_param_named|
MODULE_PARM_DESC|
DECLARE_PER_CPU|
DEFINE_PER_CPU|
__typeof__\(|
union|
struct|
\.$Ident\s*=\s*|
^\"|\"$|
^\[
}x;
#print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
$ctx =~ s/\n*$//;
my $stmt_cnt = statement_rawlines($ctx);
my $herectx = get_stat_here($linenr, $stmt_cnt, $here);
if ($dstat ne '' &&
$dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(),
$dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo();
$dstat !~ /^[!~-]?(?:$Lval|$Constant)$/ && # 10 // foo() // !foo // ~foo // -foo // foo->bar // foo.bar->baz
$dstat !~ /^'X'$/ && $dstat !~ /^'XX'$/ && # character constants
$dstat !~ /$exceptions/ &&
$dstat !~ /^\.$Ident\s*=/ && # .foo =
$dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo
$dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
$dstat !~ /^for\s*$Constant$/ && # for (...)
$dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar()
$dstat !~ /^do\s*{/ && # do {...
$dstat !~ /^\(\{/ && # ({...
$ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
{
if ($dstat =~ /^\s*if\b/) {
ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
"Macros starting with if should be enclosed by a do - while loop to avoid possible if/else logic defects\n" . "$herectx");
} elsif ($dstat =~ /;/) {
ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
"Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
} else {
WARN("COMPLEX_MACRO",
"Macros with complex values should be enclosed in parentheses\n" . "$herectx");
}
}
# Make $define_stmt single line, comment-free, etc
my @stmt_array = split('\n', $define_stmt);
my $first = 1;
$define_stmt = "";
foreach my $l (@stmt_array) {
$l =~ s/\\$//;
if ($first) {
$define_stmt = $l;
$first = 0;
} elsif ($l =~ /^[\+ ]/) {
$define_stmt .= substr($l, 1);
}
}
$define_stmt =~ s/$;//g;
$define_stmt =~ s/\s+/ /g;
$define_stmt = trim($define_stmt);
# check if any macro arguments are reused (ignore '...' and 'type')
foreach my $arg (@def_args) {
next if ($arg =~ /\.\.\./);
next if ($arg =~ /^type$/i);
my $tmp_stmt = $define_stmt;
$tmp_stmt =~ s/\b(sizeof|typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
$tmp_stmt =~ s/\#+\s*$arg\b//g;
$tmp_stmt =~ s/\b$arg\s*\#\#//g;
my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
if ($use_cnt > 1) {
CHK("MACRO_ARG_REUSE",
"Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
}
# check if any macro arguments may have other precedence issues
if ($tmp_stmt =~ m/($Operators)?\s*\b$arg\b\s*($Operators)?/m &&
((defined($1) && $1 ne ',') ||
(defined($2) && $2 ne ','))) {
CHK("MACRO_ARG_PRECEDENCE",
"Macro argument '$arg' may be better as '($arg)' to avoid precedence issues\n" . "$herectx");
}
}
# check for macros with flow control, but without ## concatenation
# ## concatenation is commonly a macro that defines a function so ignore those
if ($has_flow_statement && !$has_arg_concat) {
my $cnt = statement_rawlines($ctx);
my $herectx = get_stat_here($linenr, $cnt, $here);
WARN("MACRO_WITH_FLOW_CONTROL",
"Macros with flow control statements should be avoided\n" . "$herectx");
}
# check for line continuations outside of #defines, preprocessor #, and asm
} else {
if ($prevline !~ /^..*\\$/ &&
$line !~ /^\+\s*\#.*\\$/ && # preprocessor
$line !~ /^\+.*\b(__asm__|asm)\b.*\\$/ && # asm
$line =~ /^\+.*\\$/) {
WARN("LINE_CONTINUATIONS",
"Avoid unnecessary line continuations\n" . $herecurr);
}
}
# do {} while (0) macro tests:
# single-statement macros do not need to be enclosed in do while (0) loop,
# macro should not end with a semicolon
if ($perl_version_ok &&
$realfile !~ m@/vmlinux.lds.h$@ &&
$line =~ /^.\s*\#\s*define\s+$Ident(\()?/) {
my $ln = $linenr;
my $cnt = $realcnt;
my ($off, $dstat, $dcond, $rest);
my $ctx = '';
($dstat, $dcond, $ln, $cnt, $off) =
ctx_statement_block($linenr, $realcnt, 0);
$ctx = $dstat;
$dstat =~ s/\\\n.//g;
$dstat =~ s/$;/ /g;
if ($dstat =~ /^\+\s*#\s*define\s+$Ident\s*${balanced_parens}\s*do\s*{(.*)\s*}\s*while\s*\(\s*0\s*\)\s*([;\s]*)\s*$/) {
my $stmts = $2;
my $semis = $3;
$ctx =~ s/\n*$//;
my $cnt = statement_rawlines($ctx);
my $herectx = get_stat_here($linenr, $cnt, $here);
if (($stmts =~ tr/;/;/) == 1 &&
$stmts !~ /^\s*(if|while|for|switch)\b/) {
WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
"Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
}
if (defined $semis && $semis ne "") {
WARN("DO_WHILE_MACRO_WITH_TRAILING_SEMICOLON",
"do {} while (0) macros should not be semicolon terminated\n" . "$herectx");
}
} elsif ($dstat =~ /^\+\s*#\s*define\s+$Ident.*;\s*$/) {
$ctx =~ s/\n*$//;
my $cnt = statement_rawlines($ctx);
my $herectx = get_stat_here($linenr, $cnt, $here);
WARN("TRAILING_SEMICOLON",
"macros should not use a trailing semicolon\n" . "$herectx");
}
}
# check for redundant bracing round if etc
if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) {
my ($level, $endln, @chunks) =
ctx_statement_full($linenr, $realcnt, 1);
#print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n";
#print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n";
if ($#chunks > 0 && $level == 0) {
my @allowed = ();
my $allow = 0;
my $seen = 0;
my $herectx = $here . "\n";
my $ln = $linenr - 1;
for my $chunk (@chunks) {
my ($cond, $block) = @{$chunk};
# If the condition carries leading newlines, then count those as offsets.
my ($whitespace) = ($cond =~ /^((?:\s*\n[+-])*\s*)/s);
my $offset = statement_rawlines($whitespace) - 1;
$allowed[$allow] = 0;
#print "COND<$cond> whitespace<$whitespace> offset<$offset>\n";
# We have looked at and allowed this specific line.
$suppress_ifbraces{$ln + $offset} = 1;
$herectx .= "$rawlines[$ln + $offset]\n[...]\n";
$ln += statement_rawlines($block) - 1;
substr($block, 0, length($cond), '');
$seen++ if ($block =~ /^\s*{/);
#print "cond<$cond> block<$block> allowed<$allowed[$allow]>\n";
if (statement_lines($cond) > 1) {
#print "APW: ALLOWED: cond<$cond>\n";
$allowed[$allow] = 1;
}
if ($block =~/\b(?:if|for|while)\b/) {
#print "APW: ALLOWED: block<$block>\n";
$allowed[$allow] = 1;
}
if (statement_block_size($block) > 1) {
#print "APW: ALLOWED: lines block<$block>\n";
$allowed[$allow] = 1;
}
$allow++;
}
if ($seen) {
my $sum_allowed = 0;
foreach (@allowed) {
$sum_allowed += $_;
}
if ($sum_allowed == 0) {
WARN("BRACES",
"braces {} are not necessary for any arm of this statement\n" . $herectx);
} elsif ($sum_allowed != $allow &&
$seen != $allow) {
CHK("BRACES",
"braces {} should be used on all arms of this statement\n" . $herectx);
}
}
}
}
if (!defined $suppress_ifbraces{$linenr - 1} &&
$line =~ /\b(if|while|for|else)\b/) {
my $allowed = 0;
# Check the pre-context.
if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) {
#print "APW: ALLOWED: pre<$1>\n";
$allowed = 1;
}
my ($level, $endln, @chunks) =
ctx_statement_full($linenr, $realcnt, $-[0]);
# Check the condition.
my ($cond, $block) = @{$chunks[0]};
#print "CHECKING<$linenr> cond<$cond> block<$block>\n";
if (defined $cond) {
substr($block, 0, length($cond), '');
}
if (statement_lines($cond) > 1) {
#print "APW: ALLOWED: cond<$cond>\n";
$allowed = 1;
}
if ($block =~/\b(?:if|for|while)\b/) {
#print "APW: ALLOWED: block<$block>\n";
$allowed = 1;
}
if (statement_block_size($block) > 1) {
#print "APW: ALLOWED: lines block<$block>\n";
$allowed = 1;
}
# Check the post-context.
if (defined $chunks[1]) {
my ($cond, $block) = @{$chunks[1]};
if (defined $cond) {
substr($block, 0, length($cond), '');
}
if ($block =~ /^\s*\{/) {
#print "APW: ALLOWED: chunk-1 block<$block>\n";
$allowed = 1;
}
}
if ($level == 0 && $block =~ /^\s*\{/ && !$allowed) {
my $cnt = statement_rawlines($block);
my $herectx = get_stat_here($linenr, $cnt, $here);
WARN("BRACES",
"braces {} are not necessary for single statement blocks\n" . $herectx);
}
}
# check for single line unbalanced braces
if ($sline =~ /^.\s*\}\s*else\s*$/ ||
$sline =~ /^.\s*else\s*\{\s*$/) {
CHK("BRACES", "Unbalanced braces around else statement\n" . $herecurr);
}
# check for unnecessary blank lines around braces
if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) {
if (CHK("BRACES",
"Blank lines aren't necessary before a close brace '}'\n" . $hereprev) &&
$fix && $prevrawline =~ /^\+/) {
fix_delete_line($fixlinenr - 1, $prevrawline);
}
}
if (($rawline =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) {
if (CHK("BRACES",
"Blank lines aren't necessary after an open brace '{'\n" . $hereprev) &&
$fix) {
fix_delete_line($fixlinenr, $rawline);
}
}
# no volatiles please
my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
WARN("VOLATILE",
"Use of volatile is usually wrong: see Documentation/process/volatile-considered-harmful.rst\n" . $herecurr);
}
# Check for user-visible strings broken across lines, which breaks the ability
# to grep for the string. Make exceptions when the previous string ends in a
# newline (multiple lines in one string constant) or '\t', '\r', ';', or '{'
# (common in inline assembly) or is a octal \123 or hexadecimal \xaf value
if ($line =~ /^\+\s*$String/ &&
$prevline =~ /"\s*$/ &&
$prevrawline !~ /(?:\\(?:[ntr]|[0-7]{1,3}|x[0-9a-fA-F]{1,2})|;\s*|\{\s*)"\s*$/) {
if (WARN("SPLIT_STRING",
"quoted string split across lines\n" . $hereprev) &&
$fix &&
$prevrawline =~ /^\+.*"\s*$/ &&
$last_coalesced_string_linenr != $linenr - 1) {
my $extracted_string = get_quoted_string($line, $rawline);
my $comma_close = "";
if ($rawline =~ /\Q$extracted_string\E(\s*\)\s*;\s*$|\s*,\s*)/) {
$comma_close = $1;
}
fix_delete_line($fixlinenr - 1, $prevrawline);
fix_delete_line($fixlinenr, $rawline);
my $fixedline = $prevrawline;
$fixedline =~ s/"\s*$//;
$fixedline .= substr($extracted_string, 1) . trim($comma_close);
fix_insert_line($fixlinenr - 1, $fixedline);
$fixedline = $rawline;
$fixedline =~ s/\Q$extracted_string\E\Q$comma_close\E//;
if ($fixedline !~ /\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
$last_coalesced_string_linenr = $linenr;
}
}
# check for missing a space in a string concatenation
if ($prevrawline =~ /[^\\]\w"$/ && $rawline =~ /^\+[\t ]+"\w/) {
WARN('MISSING_SPACE',
"break quoted strings at a space character\n" . $hereprev);
}
# check for an embedded function name in a string when the function is known
# This does not work very well for -f --file checking as it depends on patch
# context providing the function name or a single line form for in-file
# function declarations
if ($line =~ /^\+.*$String/ &&
defined($context_function) &&
get_quoted_string($line, $rawline) =~ /\b$context_function\b/ &&
length(get_quoted_string($line, $rawline)) != (length($context_function) + 2)) {
WARN("EMBEDDED_FUNCTION_NAME",
"Prefer using '\"%s...\", __func__' to using '$context_function', this function's name, in a string\n" . $herecurr);
}
# check for spaces before a quoted newline
if ($rawline =~ /^.*\".*\s\\n/) {
if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE",
"unnecessary whitespace before a quoted newline\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/^(\+.*\".*)\s+\\n/$1\\n/;
}
}
# concatenated string without spaces between elements
if ($line =~ /$String[A-Za-z0-9_]/ || $line =~ /[A-Za-z0-9_]$String/) {
if (CHK("CONCATENATED_STRING",
"Concatenated strings should use spaces between elements\n" . $herecurr) &&
$fix) {
while ($line =~ /($String)/g) {
my $extracted_string = substr($rawline, $-[0], $+[0] - $-[0]);
$fixed[$fixlinenr] =~ s/\Q$extracted_string\E([A-Za-z0-9_])/$extracted_string $1/;
$fixed[$fixlinenr] =~ s/([A-Za-z0-9_])\Q$extracted_string\E/$1 $extracted_string/;
}
}
}
# uncoalesced string fragments
if ($line =~ /$String\s*"/) {
if (WARN("STRING_FRAGMENTS",
"Consecutive strings are generally better as a single string\n" . $herecurr) &&
$fix) {
while ($line =~ /($String)(?=\s*")/g) {
my $extracted_string = substr($rawline, $-[0], $+[0] - $-[0]);
$fixed[$fixlinenr] =~ s/\Q$extracted_string\E\s*"/substr($extracted_string, 0, -1)/e;
}
}
}
# check for non-standard and hex prefixed decimal printf formats
my $show_L = 1; #don't show the same defect twice
my $show_Z = 1;
while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
my $string = substr($rawline, $-[1], $+[1] - $-[1]);
$string =~ s/%%/__/g;
# check for %L
if ($show_L && $string =~ /%[\*\d\.\$]*L([diouxX])/) {
WARN("PRINTF_L",
"\%L$1 is non-standard C, use %ll$1\n" . $herecurr);
$show_L = 0;
}
# check for %Z
if ($show_Z && $string =~ /%[\*\d\.\$]*Z([diouxX])/) {
WARN("PRINTF_Z",
"%Z$1 is non-standard C, use %z$1\n" . $herecurr);
$show_Z = 0;
}
# check for 0x<decimal>
if ($string =~ /0x%[\*\d\.\$\Llzth]*[diou]/) {
ERROR("PRINTF_0XDECIMAL",
"Prefixing 0x with decimal output is defective\n" . $herecurr);
}
}
# check for line continuations in quoted strings with odd counts of "
if ($rawline =~ /\\$/ && $sline =~ tr/"/"/ % 2) {
WARN("LINE_CONTINUATIONS",
"Avoid line continuations in quoted strings\n" . $herecurr);
}
# warn about #if 0
if ($line =~ /^.\s*\#\s*if\s+0\b/) {
WARN("IF_0",
"Consider removing the code enclosed by this #if 0 and its #endif\n" . $herecurr);
}
# warn about #if 1
if ($line =~ /^.\s*\#\s*if\s+1\b/) {
WARN("IF_1",
"Consider removing the #if 1 and its #endif\n" . $herecurr);
}
# check for needless "if (<foo>) fn(<foo>)" uses
if ($prevline =~ /\bif\s*\(\s*($Lval)\s*\)/) {
my $tested = quotemeta($1);
my $expr = '\s*\(\s*' . $tested . '\s*\)\s*;';
if ($line =~ /\b(kfree|usb_free_urb|debugfs_remove(?:_recursive)?|(?:kmem_cache|mempool|dma_pool)_destroy)$expr/) {
my $func = $1;
if (WARN('NEEDLESS_IF',
"$func(NULL) is safe and this check is probably not required\n" . $hereprev) &&
$fix) {
my $do_fix = 1;
my $leading_tabs = "";
my $new_leading_tabs = "";
if ($lines[$linenr - 2] =~ /^\+(\t*)if\s*\(\s*$tested\s*\)\s*$/) {
$leading_tabs = $1;
} else {
$do_fix = 0;
}
if ($lines[$linenr - 1] =~ /^\+(\t+)$func\s*\(\s*$tested\s*\)\s*;\s*$/) {
$new_leading_tabs = $1;
if (length($leading_tabs) + 1 ne length($new_leading_tabs)) {
$do_fix = 0;
}
} else {
$do_fix = 0;
}
if ($do_fix) {
fix_delete_line($fixlinenr - 1, $prevrawline);
$fixed[$fixlinenr] =~ s/^\+$new_leading_tabs/\+$leading_tabs/;
}
}
}
}
# check for unnecessary "Out of Memory" messages
if ($line =~ /^\+.*\b$logFunctions\s*\(/ &&
$prevline =~ /^[ \+]\s*if\s*\(\s*(\!\s*|NULL\s*==\s*)?($Lval)(\s*==\s*NULL\s*)?\s*\)/ &&
(defined $1 || defined $3) &&
$linenr > 3) {
my $testval = $2;
my $testline = $lines[$linenr - 3];
my ($s, $c) = ctx_statement_block($linenr - 3, $realcnt, 0);
# print("line: <$line>\nprevline: <$prevline>\ns: <$s>\nc: <$c>\n\n\n");
if ($s =~ /(?:^|\n)[ \+]\s*(?:$Type\s*)?\Q$testval\E\s*=\s*(?:\([^\)]*\)\s*)?\s*$allocFunctions\s*\(/ &&
$s !~ /\b__GFP_NOWARN\b/ ) {
WARN("OOM_MESSAGE",
"Possible unnecessary 'out of memory' message\n" . $hereprev);
}
}
# check for logging functions with KERN_<LEVEL>
if ($line !~ /printk(?:_ratelimited|_once)?\s*\(/ &&
$line =~ /\b$logFunctions\s*\(.*\b(KERN_[A-Z]+)\b/) {
my $level = $1;
if (WARN("UNNECESSARY_KERN_LEVEL",
"Possible unnecessary $level\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\s*$level\s*//;
}
}
# check for logging continuations
if ($line =~ /\bprintk\s*\(\s*KERN_CONT\b|\bpr_cont\s*\(/) {
WARN("LOGGING_CONTINUATION",
"Avoid logging continuation uses where feasible\n" . $herecurr);
}
# check for mask then right shift without a parentheses
if ($perl_version_ok &&
$line =~ /$LvalOrFunc\s*\&\s*($LvalOrFunc)\s*>>/ &&
$4 !~ /^\&/) { # $LvalOrFunc may be &foo, ignore if so
WARN("MASK_THEN_SHIFT",
"Possible precedence defect with mask then right shift - may need parentheses\n" . $herecurr);
}
# check for pointer comparisons to NULL
if ($perl_version_ok) {
while ($line =~ /\b$LvalOrFunc\s*(==|\!=)\s*NULL\b/g) {
my $val = $1;
my $equal = "!";
$equal = "" if ($4 eq "!=");
if (CHK("COMPARISON_TO_NULL",
"Comparison to NULL could be written \"${equal}${val}\"\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b\Q$val\E\s*(?:==|\!=)\s*NULL\b/$equal$val/;
}
}
}
# check for bad placement of section $InitAttribute (e.g.: __initdata)
if ($line =~ /(\b$InitAttribute\b)/) {
my $attr = $1;
if ($line =~ /^\+\s*static\s+(?:const\s+)?(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*[=;]/) {
my $ptr = $1;
my $var = $2;
if ((($ptr =~ /\b(union|struct)\s+$attr\b/ &&
ERROR("MISPLACED_INIT",
"$attr should be placed after $var\n" . $herecurr)) ||
($ptr !~ /\b(union|struct)\s+$attr\b/ &&
WARN("MISPLACED_INIT",
"$attr should be placed after $var\n" . $herecurr))) &&
$fix) {
$fixed[$fixlinenr] =~ s/(\bstatic\s+(?:const\s+)?)(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*([=;])\s*/"$1" . trim(string_find_replace($2, "\\s*$attr\\s*", " ")) . " " . trim(string_find_replace($3, "\\s*$attr\\s*", "")) . " $attr" . ("$4" eq ";" ? ";" : " = ")/e;
}
}
}
# check for $InitAttributeData (ie: __initdata) with const
if ($line =~ /\bconst\b/ && $line =~ /($InitAttributeData)/) {
my $attr = $1;
$attr =~ /($InitAttributePrefix)(.*)/;
my $attr_prefix = $1;
my $attr_type = $2;
if (ERROR("INIT_ATTRIBUTE",
"Use of const init definition must use ${attr_prefix}initconst\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/$InitAttributeData/${attr_prefix}initconst/;
}
}
# check for $InitAttributeConst (ie: __initconst) without const
if ($line !~ /\bconst\b/ && $line =~ /($InitAttributeConst)/) {
my $attr = $1;
if (ERROR("INIT_ATTRIBUTE",
"Use of $attr requires a separate use of const\n" . $herecurr) &&
$fix) {
my $lead = $fixed[$fixlinenr] =~
/(^\+\s*(?:static\s+))/;
$lead = rtrim($1);
$lead = "$lead " if ($lead !~ /^\+$/);
$lead = "${lead}const ";
$fixed[$fixlinenr] =~ s/(^\+\s*(?:static\s+))/$lead/;
}
}
# check for __read_mostly with const non-pointer (should just be const)
if ($line =~ /\b__read_mostly\b/ &&
$line =~ /($Type)\s*$Ident/ && $1 !~ /\*\s*$/ && $1 =~ /\bconst\b/) {
if (ERROR("CONST_READ_MOSTLY",
"Invalid use of __read_mostly with const type\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\s+__read_mostly\b//;
}
}
# don't use __constant_<foo> functions outside of include/uapi/
if ($realfile !~ m@^include/uapi/@ &&
$line =~ /(__constant_(?:htons|ntohs|[bl]e(?:16|32|64)_to_cpu|cpu_to_[bl]e(?:16|32|64)))\s*\(/) {
my $constant_func = $1;
my $func = $constant_func;
$func =~ s/^__constant_//;
if (WARN("CONSTANT_CONVERSION",
"$constant_func should be $func\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b$constant_func\b/$func/g;
}
}
# prefer usleep_range over udelay
if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) {
my $delay = $1;
# ignore udelay's < 10, however
if (! ($delay < 10) ) {
CHK("USLEEP_RANGE",
"usleep_range is preferred over udelay; see Documentation/timers/timers-howto.rst\n" . $herecurr);
}
if ($delay > 2000) {
WARN("LONG_UDELAY",
"long udelay - prefer mdelay; see arch/arm/include/asm/delay.h\n" . $herecurr);
}
}
# warn about unexpectedly long msleep's
if ($line =~ /\bmsleep\s*\((\d+)\);/) {
if ($1 < 20) {
WARN("MSLEEP",
"msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.rst\n" . $herecurr);
}
}
# check for comparisons of jiffies
if ($line =~ /\bjiffies\s*$Compare|$Compare\s*jiffies\b/) {
WARN("JIFFIES_COMPARISON",
"Comparing jiffies is almost always wrong; prefer time_after, time_before and friends\n" . $herecurr);
}
# check for comparisons of get_jiffies_64()
if ($line =~ /\bget_jiffies_64\s*\(\s*\)\s*$Compare|$Compare\s*get_jiffies_64\s*\(\s*\)/) {
WARN("JIFFIES_COMPARISON",
"Comparing get_jiffies_64() is almost always wrong; prefer time_after64, time_before64 and friends\n" . $herecurr);
}
# warn about #ifdefs in C files
# if ($line =~ /^.\s*\#\s*if(|n)def/ && ($realfile =~ /\.c$/)) {
# print "#ifdef in C files should be avoided\n";
# print "$herecurr";
# $clean = 0;
# }
# warn about spacing in #ifdefs
if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) {
if (ERROR("SPACING",
"exactly one space required after that #$1\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~
s/^(.\s*\#\s*(ifdef|ifndef|elif))\s{2,}/$1 /;
}
}
# check for spinlock_t definitions without a comment.
if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
$line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
my $which = $1;
if (!ctx_has_comment($first_line, $linenr)) {
CHK("UNCOMMENTED_DEFINITION",
"$1 definition without comment\n" . $herecurr);
}
}
# check for memory barriers without a comment.
my $barriers = qr{
mb|
rmb|
wmb
}x;
my $barrier_stems = qr{
mb__before_atomic|
mb__after_atomic|
store_release|
load_acquire|
store_mb|
(?:$barriers)
}x;
my $all_barriers = qr{
(?:$barriers)|
smp_(?:$barrier_stems)|
virt_(?:$barrier_stems)
}x;
if ($line =~ /\b(?:$all_barriers)\s*\(/) {
if (!ctx_has_comment($first_line, $linenr)) {
WARN("MEMORY_BARRIER",
"memory barrier without comment\n" . $herecurr);
}
}
my $underscore_smp_barriers = qr{__smp_(?:$barrier_stems)}x;
if ($realfile !~ m@^include/asm-generic/@ &&
$realfile !~ m@/barrier\.h$@ &&
$line =~ m/\b(?:$underscore_smp_barriers)\s*\(/ &&
$line !~ m/^.\s*\#\s*define\s+(?:$underscore_smp_barriers)\s*\(/) {
WARN("MEMORY_BARRIER",
"__smp memory barriers shouldn't be used outside barrier.h and asm-generic\n" . $herecurr);
}
# check for waitqueue_active without a comment.
if ($line =~ /\bwaitqueue_active\s*\(/) {
if (!ctx_has_comment($first_line, $linenr)) {
WARN("WAITQUEUE_ACTIVE",
"waitqueue_active without comment\n" . $herecurr);
}
}
# check for data_race without a comment.
if ($line =~ /\bdata_race\s*\(/) {
if (!ctx_has_comment($first_line, $linenr)) {
WARN("DATA_RACE",
"data_race without comment\n" . $herecurr);
}
}
# check of hardware specific defines
if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
CHK("ARCH_DEFINES",
"architecture specific defines should be avoided\n" . $herecurr);
}
# check that the storage class is not after a type
if ($line =~ /\b($Type)\s+($Storage)\b/) {
WARN("STORAGE_CLASS",
"storage class '$2' should be located before type '$1'\n" . $herecurr);
}
# Check that the storage class is at the beginning of a declaration
if ($line =~ /\b$Storage\b/ &&
$line !~ /^.\s*$Storage/ &&
$line =~ /^.\s*(.+?)\$Storage\s/ &&
$1 !~ /[\,\)]\s*$/) {
WARN("STORAGE_CLASS",
"storage class should be at the beginning of the declaration\n" . $herecurr);
}
# check the location of the inline attribute, that it is between
# storage class and type.
if ($line =~ /\b$Type\s+$Inline\b/ ||
$line =~ /\b$Inline\s+$Storage\b/) {
ERROR("INLINE_LOCATION",
"inline keyword should sit between storage class and type\n" . $herecurr);
}
# Check for __inline__ and __inline, prefer inline
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b(__inline__|__inline)\b/) {
if (WARN("INLINE",
"plain inline is preferred over $1\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b(__inline__|__inline)\b/inline/;
}
}
# Check for __attribute__ packed, prefer __packed
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
WARN("PREFER_PACKED",
"__packed is preferred over __attribute__((packed))\n" . $herecurr);
}
# Check for __attribute__ aligned, prefer __aligned
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(.*aligned/) {
WARN("PREFER_ALIGNED",
"__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
}
# Check for __attribute__ section, prefer __section
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(.*_*section_*\s*\(\s*("[^"]*")/) {
my $old = substr($rawline, $-[1], $+[1] - $-[1]);
my $new = substr($old, 1, -1);
if (WARN("PREFER_SECTION",
"__section($new) is preferred over __attribute__((section($old)))\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*_*section_*\s*\(\s*\Q$old\E\s*\)\s*\)\s*\)/__section($new)/;
}
}
# Check for __attribute__ format(printf, prefer __printf
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
if (WARN("PREFER_PRINTF",
"__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf\s*,\s*(.*)\)\s*\)\s*\)/"__printf(" . trim($1) . ")"/ex;
}
}
# Check for __attribute__ format(scanf, prefer __scanf
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
if (WARN("PREFER_SCANF",
"__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\s*,\s*(.*)\)\s*\)\s*\)/"__scanf(" . trim($1) . ")"/ex;
}
}
# Check for __attribute__ weak, or __weak declarations (may have link issues)
if ($perl_version_ok &&
$line =~ /(?:$Declare|$DeclareMisordered)\s*$Ident\s*$balanced_parens\s*(?:$Attribute)?\s*;/ &&
($line =~ /\b__attribute__\s*\(\s*\(.*\bweak\b/ ||
$line =~ /\b__weak\b/)) {
ERROR("WEAK_DECLARATION",
"Using weak declarations can have unintended link defects\n" . $herecurr);
}
# check for c99 types like uint8_t used outside of uapi/
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b($Declare)\s*$Ident\s*[=;,\[]/) {
my $type = $1;
if ($type =~ /\b($typeC99Typedefs)\b/) {
$type = $1;
my $kernel_type = 'u';
$kernel_type = 's' if ($type =~ /^_*[si]/);
$type =~ /(\d+)/;
$kernel_type .= $1;
if (CHK("PREFER_KERNEL_TYPES",
"Prefer kernel type '$kernel_type' over '$type'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b$type\b/$kernel_type/;
}
}
}
# check for cast of C90 native int or longer types constants
if ($line =~ /(\(\s*$C90_int_types\s*\)\s*)($Constant)\b/) {
my $cast = $1;
my $const = $2;
if (WARN("TYPECAST_INT_CONSTANT",
"Unnecessary typecast of c90 int constant\n" . $herecurr) &&
$fix) {
my $suffix = "";
my $newconst = $const;
$newconst =~ s/${Int_type}$//;
$suffix .= 'U' if ($cast =~ /\bunsigned\b/);
if ($cast =~ /\blong\s+long\b/) {
$suffix .= 'LL';
} elsif ($cast =~ /\blong\b/) {
$suffix .= 'L';
}
$fixed[$fixlinenr] =~ s/\Q$cast\E$const\b/$newconst$suffix/;
}
}
# check for sizeof(&)
if ($line =~ /\bsizeof\s*\(\s*\&/) {
WARN("SIZEOF_ADDRESS",
"sizeof(& should be avoided\n" . $herecurr);
}
# check for sizeof without parenthesis
if ($line =~ /\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/) {
if (WARN("SIZEOF_PARENTHESIS",
"sizeof $1 should be sizeof($1)\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/"sizeof(" . trim($1) . ")"/ex;
}
}
# check for struct spinlock declarations
if ($line =~ /^.\s*\bstruct\s+spinlock\s+\w+\s*;/) {
WARN("USE_SPINLOCK_T",
"struct spinlock should be spinlock_t\n" . $herecurr);
}
# check for seq_printf uses that could be seq_puts
if ($sline =~ /\bseq_printf\s*\(.*"\s*\)\s*;\s*$/) {
my $fmt = get_quoted_string($line, $rawline);
$fmt =~ s/%%//g;
if ($fmt !~ /%/) {
if (WARN("PREFER_SEQ_PUTS",
"Prefer seq_puts to seq_printf\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bseq_printf\b/seq_puts/;
}
}
}
# check for vsprintf extension %p<foo> misuses
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
$1 !~ /^_*volatile_*$/) {
my $stat_real;
my $lc = $stat =~ tr@\n@@;
$lc = $lc + $linenr;
for (my $count = $linenr; $count <= $lc; $count++) {
my $specifier;
my $extension;
my $qualifier;
my $bad_specifier = "";
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
while ($fmt =~ /(\%[\*\d\.]*p(\w)(\w*))/g) {
$specifier = $1;
$extension = $2;
$qualifier = $3;
if ($extension !~ /[SsBKRraEehMmIiUDdgVCbGNOxtf]/ ||
($extension eq "f" &&
defined $qualifier && $qualifier !~ /^w/)) {
$bad_specifier = $specifier;
last;
}
if ($extension eq "x" && !defined($stat_real)) {
if (!defined($stat_real)) {
$stat_real = get_stat_real($linenr, $lc);
}
WARN("VSPRINTF_SPECIFIER_PX",
"Using vsprintf specifier '\%px' potentially exposes the kernel memory layout, if you don't really need the address please consider using '\%p'.\n" . "$here\n$stat_real\n");
}
}
if ($bad_specifier ne "") {
my $stat_real = get_stat_real($linenr, $lc);
my $ext_type = "Invalid";
my $use = "";
if ($bad_specifier =~ /p[Ff]/) {
$use = " - use %pS instead";
$use =~ s/pS/ps/ if ($bad_specifier =~ /pf/);
}
WARN("VSPRINTF_POINTER_EXTENSION",
"$ext_type vsprintf pointer extension '$bad_specifier'$use\n" . "$here\n$stat_real\n");
}
}
}
# Check for misused memsets
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*$FuncArg\s*\)/) {
my $ms_addr = $2;
my $ms_val = $7;
my $ms_size = $12;
if ($ms_size =~ /^(0x|)0$/i) {
ERROR("MEMSET",
"memset to 0's uses 0 as the 2nd argument, not the 3rd\n" . "$here\n$stat\n");
} elsif ($ms_size =~ /^(0x|)1$/i) {
WARN("MEMSET",
"single byte memset is suspicious. Swapped 2nd/3rd argument?\n" . "$here\n$stat\n");
}
}
# Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar)
# if ($perl_version_ok &&
# defined $stat &&
# $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
# if (WARN("PREFER_ETHER_ADDR_COPY",
# "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
# $fix) {
# $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
# }
# }
# Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar)
# if ($perl_version_ok &&
# defined $stat &&
# $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
# WARN("PREFER_ETHER_ADDR_EQUAL",
# "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
# }
# check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr
# check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr
# if ($perl_version_ok &&
# defined $stat &&
# $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
#
# my $ms_val = $7;
#
# if ($ms_val =~ /^(?:0x|)0+$/i) {
# if (WARN("PREFER_ETH_ZERO_ADDR",
# "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
# $fix) {
# $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
# }
# } elsif ($ms_val =~ /^(?:0xff|255)$/i) {
# if (WARN("PREFER_ETH_BROADCAST_ADDR",
# "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
# $fix) {
# $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
# }
# }
# }
# typecasts on min/max could be min_t/max_t
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^\+(?:.*?)\b(min|max)\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\)/) {
if (defined $2 || defined $7) {
my $call = $1;
my $cast1 = deparenthesize($2);
my $arg1 = $3;
my $cast2 = deparenthesize($7);
my $arg2 = $8;
my $cast;
if ($cast1 ne "" && $cast2 ne "" && $cast1 ne $cast2) {
$cast = "$cast1 or $cast2";
} elsif ($cast1 ne "") {
$cast = $cast1;
} else {
$cast = $cast2;
}
WARN("MINMAX",
"$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . "$here\n$stat\n");
}
}
# check usleep_range arguments
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^\+(?:.*?)\busleep_range\s*\(\s*($FuncArg)\s*,\s*($FuncArg)\s*\)/) {
my $min = $1;
my $max = $7;
if ($min eq $max) {
WARN("USLEEP_RANGE",
"usleep_range should not use min == max args; see Documentation/timers/timers-howto.rst\n" . "$here\n$stat\n");
} elsif ($min =~ /^\d+$/ && $max =~ /^\d+$/ &&
$min > $max) {
WARN("USLEEP_RANGE",
"usleep_range args reversed, use min then max; see Documentation/timers/timers-howto.rst\n" . "$here\n$stat\n");
}
}
# check for naked sscanf
if ($perl_version_ok &&
defined $stat &&
$line =~ /\bsscanf\b/ &&
($stat !~ /$Ident\s*=\s*sscanf\s*$balanced_parens/ &&
$stat !~ /\bsscanf\s*$balanced_parens\s*(?:$Compare)/ &&
$stat !~ /(?:$Compare)\s*\bsscanf\s*$balanced_parens/)) {
my $lc = $stat =~ tr@\n@@;
$lc = $lc + $linenr;
my $stat_real = get_stat_real($linenr, $lc);
WARN("NAKED_SSCANF",
"unchecked sscanf return value\n" . "$here\n$stat_real\n");
}
# check for simple sscanf that should be kstrto<foo>
if ($perl_version_ok &&
defined $stat &&
$line =~ /\bsscanf\b/) {
my $lc = $stat =~ tr@\n@@;
$lc = $lc + $linenr;
my $stat_real = get_stat_real($linenr, $lc);
if ($stat_real =~ /\bsscanf\b\s*\(\s*$FuncArg\s*,\s*("[^"]+")/) {
my $format = $6;
my $count = $format =~ tr@%@%@;
if ($count == 1 &&
$format =~ /^"\%(?i:ll[udxi]|[udxi]ll|ll|[hl]h?[udxi]|[udxi][hl]h?|[hl]h?|[udxi])"$/) {
WARN("SSCANF_TO_KSTRTO",
"Prefer kstrto<type> to single variable sscanf\n" . "$here\n$stat_real\n");
}
}
}
# check for new externs in .h files.
if ($realfile =~ /\.h$/ &&
$line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
if (CHK("AVOID_EXTERNS",
"extern prototypes should be avoided in .h files\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
}
}
# check for new externs in .c files.
if ($realfile =~ /\.c$/ && defined $stat &&
$stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
{
my $function_name = $1;
my $paren_space = $2;
my $s = $stat;
if (defined $cond) {
substr($s, 0, length($cond), '');
}
if ($s =~ /^\s*;/)
{
WARN("AVOID_EXTERNS",
"externs should be avoided in .c files\n" . $herecurr);
}
if ($paren_space =~ /\n/) {
WARN("FUNCTION_ARGUMENTS",
"arguments for function declarations should follow identifier\n" . $herecurr);
}
} elsif ($realfile =~ /\.c$/ && defined $stat &&
$stat =~ /^.\s*extern\s+/)
{
WARN("AVOID_EXTERNS",
"externs should be avoided in .c files\n" . $herecurr);
}
# check for function declarations that have arguments without identifier names
if (defined $stat &&
$stat =~ /^.\s*(?:extern\s+)?$Type\s*(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*\(\s*([^{]+)\s*\)\s*;/s &&
$1 ne "void") {
my $args = trim($1);
while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) {
my $arg = trim($1);
if ($arg =~ /^$Type$/ && $arg !~ /enum\s+$Ident$/) {
WARN("FUNCTION_ARGUMENTS",
"function definition argument '$arg' should also have an identifier name\n" . $herecurr);
}
}
}
# check for function definitions
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^.\s*(?:$Storage\s+)?$Type\s*($Ident)\s*$balanced_parens\s*{/s) {
$context_function = $1;
# check for multiline function definition with misplaced open brace
my $ok = 0;
my $cnt = statement_rawlines($stat);
my $herectx = $here . "\n";
for (my $n = 0; $n < $cnt; $n++) {
my $rl = raw_line($linenr, $n);
$herectx .= $rl . "\n";
$ok = 1 if ($rl =~ /^[ \+]\{/);
$ok = 1 if ($rl =~ /\{/ && $n == 0);
last if $rl =~ /^[ \+].*\{/;
}
if (!$ok) {
ERROR("OPEN_BRACE",
"open brace '{' following function definitions go on the next line\n" . $herectx);
}
}
# checks for new __setup's
if ($rawline =~ /\b__setup\("([^"]*)"/) {
my $name = $1;
if (!grep(/$name/, @setup_docs)) {
CHK("UNDOCUMENTED_SETUP",
"__setup appears un-documented -- check Documentation/admin-guide/kernel-parameters.txt\n" . $herecurr);
}
}
# check for pointless casting of alloc functions
if ($line =~ /\*\s*\)\s*$allocFunctions\b/) {
WARN("UNNECESSARY_CASTS",
"unnecessary cast may hide bugs, see path_to_url" . $herecurr);
}
# alloc style
# p = alloc(sizeof(struct foo), ...) should be p = alloc(sizeof(*p), ...)
if ($perl_version_ok &&
$line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*((?:kv|k|v)[mz]alloc(?:_node)?)\s*\(\s*(sizeof\s*\(\s*struct\s+$Lval\s*\))/) {
CHK("ALLOC_SIZEOF_STRUCT",
"Prefer $3(sizeof(*$1)...) over $3($4...)\n" . $herecurr);
}
# check for k[mz]alloc with multiplies that could be kmalloc_array/kcalloc
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^\+\s*($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)\s*,/) {
my $oldfunc = $3;
my $a1 = $4;
my $a2 = $10;
my $newfunc = "kmalloc_array";
$newfunc = "kcalloc" if ($oldfunc eq "kzalloc");
my $r1 = $a1;
my $r2 = $a2;
if ($a1 =~ /^sizeof\s*\S/) {
$r1 = $a2;
$r2 = $a1;
}
if ($r1 !~ /^sizeof\b/ && $r2 =~ /^sizeof\s*\S/ &&
!($r1 =~ /^$Constant$/ || $r1 =~ /^[A-Z_][A-Z0-9_]*$/)) {
my $cnt = statement_rawlines($stat);
my $herectx = get_stat_here($linenr, $cnt, $here);
if (WARN("ALLOC_WITH_MULTIPLY",
"Prefer $newfunc over $oldfunc with multiply\n" . $herectx) &&
$cnt == 1 &&
$fix) {
$fixed[$fixlinenr] =~ s/\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)/$1 . ' = ' . "$newfunc(" . trim($r1) . ', ' . trim($r2)/e;
}
}
}
# check for krealloc arg reuse
if ($perl_version_ok &&
$line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*krealloc\s*\(\s*($Lval)\s*,/ &&
$1 eq $3) {
WARN("KREALLOC_ARG_REUSE",
"Reusing the krealloc arg is almost always a bug\n" . $herecurr);
}
# check for alloc argument mismatch
if ($line =~ /\b(kcalloc|kmalloc_array)\s*\(\s*sizeof\b/) {
WARN("ALLOC_ARRAY_ARGS",
"$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr);
}
# check for multiple semicolons
if ($line =~ /;\s*;\s*$/) {
if (WARN("ONE_SEMICOLON",
"Statements terminations use 1 semicolon\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/(\s*;\s*){2,}$/;/g;
}
}
# check for #defines like: 1 << <digit> that could be BIT(digit), it is not exported to uapi
if ($realfile !~ m@^include/uapi/@ &&
$line =~ /#\s*define\s+\w+\s+\(?\s*1\s*([ulUL]*)\s*\<\<\s*(?:\d+|$Ident)\s*\)?/) {
my $ull = "";
$ull = "_ULL" if (defined($1) && $1 =~ /ll/i);
if (CHK("BIT_MACRO",
"Prefer using the BIT$ull macro\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\(?\s*1\s*[ulUL]*\s*<<\s*(\d+|$Ident)\s*\)?/BIT${ull}($1)/;
}
}
# check for feature test macros that request C library API extensions, violating rules A.4 and A.5
if ($line =~ /#\s*define\s+$api_defines/) {
ERROR("API_DEFINE",
"do not specify non-standard feature test macros for embedded code\n" . "$here$rawline\n");
}
# check for IS_ENABLED() without CONFIG_<FOO> ($rawline for comments too)
if ($rawline =~ /\bIS_ENABLED\s*\(\s*(\w+)\s*\)/ && $1 !~ /^CONFIG_/) {
WARN("IS_ENABLED_CONFIG",
"IS_ENABLED($1) is normally used as IS_ENABLED(CONFIG_$1)\n" . $herecurr);
}
# check for #if defined CONFIG_<FOO> || defined CONFIG_<FOO>_MODULE
if ($line =~ /^\+\s*#\s*if\s+defined(?:\s*\(?\s*|\s+)(CONFIG_[A-Z_]+)\s*\)?\s*\|\|\s*defined(?:\s*\(?\s*|\s+)\1_MODULE\s*\)?\s*$/) {
my $config = $1;
if (WARN("PREFER_IS_ENABLED",
"Prefer IS_ENABLED(<FOO>) to CONFIG_<FOO> || CONFIG_<FOO>_MODULE\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] = "\+#if IS_ENABLED($config)";
}
}
# check for switch/default statements without a break;
if ($perl_version_ok &&
defined $stat &&
$stat =~ /^\+[$;\s]*(?:case[$;\s]+\w+[$;\s]*:[$;\s]*|)*[$;\s]*\bdefault[$;\s]*:[$;\s]*;/g) {
my $cnt = statement_rawlines($stat);
my $herectx = get_stat_here($linenr, $cnt, $here);
WARN("DEFAULT_NO_BREAK",
"switch default: should use break\n" . $herectx);
}
# check for gcc specific __FUNCTION__
if ($line =~ /\b__FUNCTION__\b/) {
if (WARN("USE_FUNC",
"__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\b__FUNCTION__\b/__func__/g;
}
}
# check for uses of __DATE__, __TIME__, __TIMESTAMP__
while ($line =~ /\b(__(?:DATE|TIME|TIMESTAMP)__)\b/g) {
ERROR("DATE_TIME",
"Use of the '$1' macro makes the build non-deterministic\n" . $herecurr);
}
# check for uses of __BYTE_ORDER__
while ($line =~ /\b(__BYTE_ORDER__)\b/g) {
ERROR("BYTE_ORDER",
"Use of the '$1' macro is disallowed. Use CONFIG_(BIG|LITTLE)_ENDIAN instead\n" . $herecurr);
}
# check for use of yield()
if ($line =~ /\byield\s*\(\s*\)/) {
WARN("YIELD",
"Using yield() is generally wrong. See yield() kernel-doc (sched/core.c)\n" . $herecurr);
}
# check for comparisons against true and false
if ($line =~ /\+\s*(.*?)\b(true|false|$Lval)\s*(==|\!=)\s*(true|false|$Lval)\b(.*)$/i) {
my $lead = $1;
my $arg = $2;
my $test = $3;
my $otype = $4;
my $trail = $5;
my $op = "!";
($arg, $otype) = ($otype, $arg) if ($arg =~ /^(?:true|false)$/i);
my $type = lc($otype);
if ($type =~ /^(?:true|false)$/) {
if (("$test" eq "==" && "$type" eq "true") ||
("$test" eq "!=" && "$type" eq "false")) {
$op = "";
}
CHK("BOOL_COMPARISON",
"Using comparison to $otype is error prone\n" . $herecurr);
## maybe suggesting a correct construct would better
## "Using comparison to $otype is error prone. Perhaps use '${lead}${op}${arg}${trail}'\n" . $herecurr);
}
}
# check for semaphores initialized locked
if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) {
WARN("CONSIDER_COMPLETION",
"consider using a completion\n" . $herecurr);
}
# recommend kstrto* over simple_strto* and strict_strto*
if ($line =~ /\b((simple|strict)_(strto(l|ll|ul|ull)))\s*\(/) {
WARN("CONSIDER_KSTRTO",
"$1 is obsolete, use k$3 instead\n" . $herecurr);
}
# check for __initcall(), use device_initcall() explicitly or more appropriate function please
if ($line =~ /^.\s*__initcall\s*\(/) {
WARN("USE_DEVICE_INITCALL",
"please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
}
# check for spin_is_locked(), suggest lockdep instead
if ($line =~ /\bspin_is_locked\(/) {
WARN("USE_LOCKDEP",
"Where possible, use lockdep_assert_held instead of assertions based on spin_is_locked\n" . $herecurr);
}
# check for deprecated apis
if ($line =~ /\b($deprecated_apis_search)\b\s*\(/) {
my $deprecated_api = $1;
my $new_api = $deprecated_apis{$deprecated_api};
WARN("DEPRECATED_API",
"Deprecated use of '$deprecated_api', prefer '$new_api' instead\n" . $herecurr);
}
# check for various structs that are normally const (ops, kgdb, device_tree)
# and avoid what seem like struct definitions 'struct foo {'
if (defined($const_structs) &&
$line !~ /\bconst\b/ &&
$line =~ /\bstruct\s+($const_structs)\b(?!\s*\{)/) {
WARN("CONST_STRUCT",
"struct $1 should normally be const\n" . $herecurr);
}
# use of NR_CPUS is usually wrong
# ignore definitions of NR_CPUS and usage to define arrays as likely right
if ($line =~ /\bNR_CPUS\b/ &&
$line !~ /^.\s*\s*#\s*if\b.*\bNR_CPUS\b/ &&
$line !~ /^.\s*\s*#\s*define\b.*\bNR_CPUS\b/ &&
$line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ &&
$line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ &&
$line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/)
{
WARN("NR_CPUS",
"usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
}
# Use of __ARCH_HAS_<FOO> or ARCH_HAVE_<BAR> is wrong.
if ($line =~ /\+\s*#\s*define\s+((?:__)?ARCH_(?:HAS|HAVE)\w*)\b/) {
ERROR("DEFINE_ARCH_HAS",
"#define of '$1' is wrong - use Kconfig variables or standard guards instead\n" . $herecurr);
}
# likely/unlikely comparisons similar to "(likely(foo) > 0)"
if ($perl_version_ok &&
$line =~ /\b((?:un)?likely)\s*\(\s*$FuncArg\s*\)\s*$Compare/) {
WARN("LIKELY_MISUSE",
"Using $1 should generally have parentheses around the comparison\n" . $herecurr);
}
# nested likely/unlikely calls
if ($line =~ /\b(?:(?:un)?likely)\s*\(\s*!?\s*(IS_ERR(?:_OR_NULL|_VALUE)?|WARN)/) {
WARN("LIKELY_MISUSE",
"nested (un)?likely() calls, $1 already uses unlikely() internally\n" . $herecurr);
}
# whine mightly about in_atomic
if ($line =~ /\bin_atomic\s*\(/) {
if ($realfile =~ m@^drivers/@) {
ERROR("IN_ATOMIC",
"do not use in_atomic in drivers\n" . $herecurr);
} elsif ($realfile !~ m@^kernel/@) {
WARN("IN_ATOMIC",
"use of in_atomic() is incorrect outside core kernel code\n" . $herecurr);
}
}
# check for mutex_trylock_recursive usage
if ($line =~ /mutex_trylock_recursive/) {
ERROR("LOCKING",
"recursive locking is bad, do not use this ever.\n" . $herecurr);
}
# check for lockdep_set_novalidate_class
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
if ($realfile !~ m@^kernel/lockdep@ &&
$realfile !~ m@^include/linux/lockdep@ &&
$realfile !~ m@^drivers/base/core@) {
ERROR("LOCKDEP",
"lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
}
}
if ($line =~ /debugfs_create_\w+.*\b$mode_perms_world_writable\b/ ||
$line =~ /DEVICE_ATTR.*\b$mode_perms_world_writable\b/) {
WARN("EXPORTED_WORLD_WRITABLE",
"Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
}
# check for DEVICE_ATTR uses that could be DEVICE_ATTR_<FOO>
# and whether or not function naming is typical and if
# DEVICE_ATTR permissions uses are unusual too
if ($perl_version_ok &&
defined $stat &&
$stat =~ /\bDEVICE_ATTR\s*\(\s*(\w+)\s*,\s*\(?\s*(\s*(?:${multi_mode_perms_string_search}|0[0-7]{3,3})\s*)\s*\)?\s*,\s*(\w+)\s*,\s*(\w+)\s*\)/) {
my $var = $1;
my $perms = $2;
my $show = $3;
my $store = $4;
my $octal_perms = perms_to_octal($perms);
if ($show =~ /^${var}_show$/ &&
$store =~ /^${var}_store$/ &&
$octal_perms eq "0644") {
if (WARN("DEVICE_ATTR_RW",
"Use DEVICE_ATTR_RW\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bDEVICE_ATTR\s*\(\s*$var\s*,\s*\Q$perms\E\s*,\s*$show\s*,\s*$store\s*\)/DEVICE_ATTR_RW(${var})/;
}
} elsif ($show =~ /^${var}_show$/ &&
$store =~ /^NULL$/ &&
$octal_perms eq "0444") {
if (WARN("DEVICE_ATTR_RO",
"Use DEVICE_ATTR_RO\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bDEVICE_ATTR\s*\(\s*$var\s*,\s*\Q$perms\E\s*,\s*$show\s*,\s*NULL\s*\)/DEVICE_ATTR_RO(${var})/;
}
} elsif ($show =~ /^NULL$/ &&
$store =~ /^${var}_store$/ &&
$octal_perms eq "0200") {
if (WARN("DEVICE_ATTR_WO",
"Use DEVICE_ATTR_WO\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bDEVICE_ATTR\s*\(\s*$var\s*,\s*\Q$perms\E\s*,\s*NULL\s*,\s*$store\s*\)/DEVICE_ATTR_WO(${var})/;
}
} elsif ($octal_perms eq "0644" ||
$octal_perms eq "0444" ||
$octal_perms eq "0200") {
my $newshow = "$show";
$newshow = "${var}_show" if ($show ne "NULL" && $show ne "${var}_show");
my $newstore = $store;
$newstore = "${var}_store" if ($store ne "NULL" && $store ne "${var}_store");
my $rename = "";
if ($show ne $newshow) {
$rename .= " '$show' to '$newshow'";
}
if ($store ne $newstore) {
$rename .= " '$store' to '$newstore'";
}
WARN("DEVICE_ATTR_FUNCTIONS",
"Consider renaming function(s)$rename\n" . $herecurr);
} else {
WARN("DEVICE_ATTR_PERMS",
"DEVICE_ATTR unusual permissions '$perms' used\n" . $herecurr);
}
}
# Mode permission misuses where it seems decimal should be octal
# This uses a shortcut match to avoid unnecessary uses of a slow foreach loop
# o Ignore module_param*(...) uses with a decimal 0 permission as that has a
# specific definition of not visible in sysfs.
# o Ignore proc_create*(...) uses with a decimal 0 permission as that means
# use the default permissions
if ($perl_version_ok &&
defined $stat &&
$line =~ /$mode_perms_search/) {
foreach my $entry (@mode_permission_funcs) {
my $func = $entry->[0];
my $arg_pos = $entry->[1];
my $lc = $stat =~ tr@\n@@;
$lc = $lc + $linenr;
my $stat_real = get_stat_real($linenr, $lc);
my $skip_args = "";
if ($arg_pos > 1) {
$arg_pos--;
$skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}";
}
my $test = "\\b$func\\s*\\(${skip_args}($FuncArg(?:\\|\\s*$FuncArg)*)\\s*[,\\)]";
if ($stat =~ /$test/) {
my $val = $1;
$val = $6 if ($skip_args ne "");
if (!($func =~ /^(?:module_param|proc_create)/ && $val eq "0") &&
(($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
($val =~ /^$Octal$/ && length($val) ne 4))) {
ERROR("NON_OCTAL_PERMISSIONS",
"Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real);
}
if ($val =~ /^$Octal$/ && (oct($val) & 02)) {
ERROR("EXPORTED_WORLD_WRITABLE",
"Exporting writable files is usually an error. Consider more restrictive permissions.\n" . "$here\n" . $stat_real);
}
}
}
}
# check for uses of S_<PERMS> that could be octal for readability
while ($line =~ m{\b($multi_mode_perms_string_search)\b}g) {
my $oval = $1;
my $octal = perms_to_octal($oval);
if (WARN("SYMBOLIC_PERMS",
"Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\Q$oval\E/$octal/;
}
}
# validate content of MODULE_LICENSE against list from include/linux/module.h
if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) {
my $extracted_string = get_quoted_string($line, $rawline);
my $valid_licenses = qr{
GPL|
GPL\ v2|
GPL\ and\ additional\ rights|
Dual\ BSD/GPL|
Dual\ MIT/GPL|
Dual\ MPL/GPL|
Proprietary
}x;
if ($extracted_string !~ /^"(?:$valid_licenses)"$/x) {
WARN("MODULE_LICENSE",
"unknown module license " . $extracted_string . "\n" . $herecurr);
}
}
# check for sysctl duplicate constants
if ($line =~ /\.extra[12]\s*=\s*&(zero|one|int_max)\b/) {
WARN("DUPLICATED_SYSCTL_CONST",
"duplicated sysctl range checking value '$1', consider using the shared one in include/linux/sysctl.h\n" . $herecurr);
}
}
# If we have no input at all, then there is nothing to report on
# so just keep quiet.
if ($#rawlines == -1) {
exit(0);
}
# In mailback mode only produce a report in the negative, for
# things that appear to be patches.
if ($mailback && ($clean == 1 || !$is_patch)) {
exit(0);
}
# This is not a patch, and we are are in 'no-patch' mode so
# just keep quiet.
if (!$chk_patch && !$is_patch) {
exit(0);
}
if (!$is_patch && $filename !~ /cover-letter\.patch$/) {
ERROR("NOT_UNIFIED_DIFF",
"Does not appear to be a unified-diff format patch\n");
}
if ($is_patch && $has_commit_log && $chk_signoff) {
if ($signoff == 0) {
ERROR("MISSING_SIGN_OFF",
"Missing Signed-off-by: line(s)\n");
} elsif (!$authorsignoff) {
WARN("NO_AUTHOR_SIGN_OFF",
"Missing Signed-off-by: line by nominal patch author '$author'\n");
}
}
print report_dump();
if ($summary && !($clean == 1 && $quiet == 1)) {
print "$filename " if ($summary_file);
print "total: $cnt_error errors, $cnt_warn warnings, " .
(($check)? "$cnt_chk checks, " : "") .
"$cnt_lines lines checked\n";
}
if ($quiet == 0) {
# If there were any defects found and not already fixing them
if (!$clean and !$fix) {
print << "EOM"
NOTE: For some of the reported defects, checkpatch may be able to
mechanically convert to the typical style using --fix or --fix-inplace.
EOM
}
# If there were whitespace errors which cleanpatch can fix
# then suggest that.
if ($rpt_cleaners) {
$rpt_cleaners = 0;
print << "EOM"
NOTE: Whitespace errors detected.
You may wish to use scripts/cleanpatch or scripts/cleanfile
EOM
}
}
if ($clean == 0 && $fix &&
("@rawlines" ne "@fixed" ||
$#fixed_inserted >= 0 || $#fixed_deleted >= 0)) {
my $newfile = $filename;
$newfile .= ".EXPERIMENTAL-checkpatch-fixes" if (!$fix_inplace);
my $linecount = 0;
my $f;
@fixed = fix_inserted_deleted_lines(\@fixed, \@fixed_inserted, \@fixed_deleted);
open($f, '>', $newfile)
or die "$P: Can't open $newfile for write\n";
foreach my $fixed_line (@fixed) {
$linecount++;
if ($file) {
if ($linecount > 3) {
$fixed_line =~ s/^\+//;
print $f $fixed_line . "\n";
}
} else {
print $f $fixed_line . "\n";
}
}
close($f);
if (!$quiet) {
print << "EOM";
Wrote EXPERIMENTAL --fix correction(s) to '$newfile'
Do _NOT_ trust the results written to this file.
Do _NOT_ submit these changes without inspecting them for correctness.
This EXPERIMENTAL file is simply a convenience to help rewrite patches.
No warranties, expressed or implied...
EOM
}
}
if ($quiet == 0) {
print "\n";
if ($clean == 1) {
print "$vname has no obvious style problems and is ready for submission.\n";
} else {
print "$vname has style problems, please review.\n";
}
}
return $clean;
}
``` | /content/code_sandbox/scripts/checkpatch.pl | prolog | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 72,465 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions
"""
import importlib
import json
import mock
import os
import pytest
import shutil
import sys
import re
from lxml import etree
# pylint: disable=no-name-in-module
from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestReport:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'mps2/an385'],
[
'qemu_x86.xml', 'mps2_an385.xml',
'testplan.json', 'twister.json',
'twister.log', 'twister_report.xml',
'twister_suite_report.xml', 'twister.xml'
]
),
]
TESTDATA_2 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'mps2/an385'],
[
'mps2_an385_TEST.xml', 'qemu_x86_TEST.xml',
'twister_TEST.json', 'twister_TEST_report.xml',
'twister_TEST_suite_report.xml', 'twister_TEST.xml'
]
),
]
TESTDATA_3 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'mps2/an385'],
['--report-name', 'abcd'],
[
'abcd.json', 'abcd_report.xml',
'abcd_suite_report.xml', 'abcd.xml'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'mps2/an385'],
['--report-name', '1234', '--platform-reports'],
[
'mps2_an385.xml', 'qemu_x86.xml',
'1234.json', '1234_report.xml',
'1234_suite_report.xml', '1234.xml'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'mps2/an385'],
['--report-name', 'Final', '--platform-reports', '--report-suffix=Test'],
[
'mps2_an385_Test.xml', 'qemu_x86_Test.xml',
'Final_Test.json', 'Final_Test_report.xml',
'Final_Test_suite_report.xml', 'Final_Test.xml'
]
),
]
TESTDATA_4 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
[
'twister.json', 'twister_report.xml',
'twister_suite_report.xml', 'twister.xml'
],
"TEST_DIR"
),
]
TESTDATA_5 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
[
'testplan.json', 'twister.log',
'twister.json', 'twister_report.xml',
'twister_suite_report.xml', 'twister.xml'
],
"OUT_DIR"
),
]
TESTDATA_6 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
"TEST_LOG_FILE.log"
),
]
TESTDATA_7 = [
(
os.path.join(TEST_DATA, 'tests', 'one_fail_two_error_one_pass'),
['qemu_x86'],
[r'one_fail_two_error_one_pass.agnostic.group1.subgroup2 on qemu_x86 FAILED \(.*\)',
r'one_fail_two_error_one_pass.agnostic.group1.subgroup3 on qemu_x86 ERROR \(Build failure\)',
r'one_fail_two_error_one_pass.agnostic.group1.subgroup4 on qemu_x86 ERROR \(Build failure\)'],
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'test_path, test_platforms, file_name',
TESTDATA_1,
ids=[
'platform_reports'
]
)
def test_platform_reports(self, capfd, out_path, test_path, test_platforms, file_name):
args = ['-i', '--outdir', out_path, '-T', test_path, '--platform-reports'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for f_name in file_name:
path = os.path.join(out_path, f_name)
assert os.path.exists(path), 'file not found'
if path.endswith(".json"):
with open(path, "r") as json_file:
data = json.load(json_file)
assert data, f"JSON file '{path}' is empty"
elif path.endswith(".xml"):
tree = etree.parse(path)
xml_text = etree.tostring(tree, encoding="utf-8").decode("utf-8")
assert xml_text.strip(), f"XML file '{path}' is empty"
elif path.endswith(".log"):
with open(path, "r") as log_file:
text_content = log_file.read()
assert text_content.strip(), f"LOG file '{path}' is empty"
else:
pytest.fail(f"Unsupported file type: '{path}'")
for f_platform in test_platforms:
platform_path = os.path.join(out_path, f_platform.replace("/", "_"))
assert os.path.exists(platform_path), f'file not found {f_platform}'
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, file_name',
TESTDATA_2,
ids=[
'report_suffix',
]
)
def test_report_suffix(self, capfd, out_path, test_path, test_platforms, file_name):
args = ['-i', '--outdir', out_path, '-T', test_path, '--platform-reports', '--report-suffix=TEST'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for f_name in file_name:
path = os.path.join(out_path, f_name)
assert os.path.exists(path), f'file not found {f_name}'
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, report_arg, file_name',
TESTDATA_3,
ids=[
'only_report_name',
'report_name + platform_reports',
'report-name + platform-reports + report-suffix'
]
)
def test_report_name(self, capfd, out_path, test_path, test_platforms, report_arg, file_name):
args = ['-i', '--outdir', out_path, '-T', test_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair] + \
[val for pair in zip(
report_arg
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for f_name in file_name:
path = os.path.join(out_path, f_name)
assert os.path.exists(path), f'file not found {f_name}'
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms, file_name, dir_name',
TESTDATA_4,
ids=[
'report_dir',
]
)
def test_report_dir(self, capfd, out_path, test_path, test_platforms, file_name, dir_name):
args = ['-i', '--outdir', out_path, '-T', test_path, "--report-dir", dir_name] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
twister_path = os.path.join(ZEPHYR_BASE, dir_name)
if os.path.exists(twister_path):
shutil.rmtree(twister_path)
try:
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for f_name in file_name:
path = os.path.join(twister_path, f_name)
assert os.path.exists(path), f'file not found {f_name}'
assert str(sys_exit.value) == '0'
finally:
twister_path = os.path.join(ZEPHYR_BASE, dir_name)
if os.path.exists(twister_path):
shutil.rmtree(twister_path)
@pytest.mark.noclearout
@pytest.mark.parametrize(
'test_path, test_platforms, file_name, dir_name',
TESTDATA_5,
ids=[
'outdir',
]
)
def test_outdir(self, capfd, test_path, test_platforms, file_name, dir_name):
args = ['-i', '-T', test_path, "--outdir", dir_name] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
twister_path = os.path.join(ZEPHYR_BASE, dir_name)
if os.path.exists(twister_path):
shutil.rmtree(twister_path)
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
try:
for f_name in file_name:
path = os.path.join(twister_path, f_name)
assert os.path.exists(path), 'file not found {f_name}'
for f_platform in test_platforms:
platform_path = os.path.join(twister_path, f_platform)
assert os.path.exists(platform_path), f'file not found {f_platform}'
assert str(sys_exit.value) == '0'
finally:
twister_path = os.path.join(ZEPHYR_BASE, dir_name)
if os.path.exists(twister_path):
shutil.rmtree(twister_path)
@pytest.mark.parametrize(
'test_path, test_platforms, file_name',
TESTDATA_6,
ids=[
'log_file',
]
)
def test_log_file(self, capfd, test_path, test_platforms, out_path, file_name):
args = ['-i','--outdir', out_path, '-T', test_path, "--log-file", file_name] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
file_path = os.path.join(ZEPHYR_BASE, file_name)
if os.path.exists(file_path):
os.remove(file_path)
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert os.path.exists(file_path), 'file not found {f_name}'
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, flags, expected_testcase_counts',
[
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['--detailed-skipped-report'],
{'qemu_x86': 5, 'intel_adl_crb': 1}
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['--detailed-skipped-report', '--report-filtered'],
{'qemu_x86': 6, 'intel_adl_crb': 6}
),
],
ids=['dummy tests', 'dummy tests with filtered']
)
def test_detailed_skipped_report(self, out_path, test_path, flags, expected_testcase_counts):
test_platforms = ['qemu_x86', 'intel_adl_crb']
args = ['-i', '--outdir', out_path, '-T', test_path] + \
flags + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
testsuite_counter = 0
xml_data = etree.parse(os.path.join(out_path, 'twister_report.xml')).getroot()
for ts in xml_data.iter('testsuite'):
testsuite_counter += 1
# Without the tested flag, filtered testcases would be missing from the report
testcase_count = len(list(ts.iter('testcase')))
expected_tc_count = expected_testcase_counts[ts.get('name')]
assert testcase_count == expected_tc_count, \
f'Not all expected testcases appear in the report.' \
f' (In {ts.get("name")}, expected {expected_tc_count}, got {testcase_count}.)'
assert testsuite_counter == len(test_platforms), \
'Some platforms are missing from the XML report.'
@pytest.mark.parametrize(
'test_path, report_filtered, expected_filtered_count',
[
(os.path.join(TEST_DATA, 'tests', 'dummy'), False, 0),
(os.path.join(TEST_DATA, 'tests', 'dummy'), True, 4),
],
ids=['no filtered', 'with filtered']
)
def test_report_filtered(self, out_path, test_path, report_filtered, expected_filtered_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
args = ['-i', '--outdir', out_path, '-T', test_path] + \
(['--report-filtered'] if report_filtered else []) + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
testsuites = j.get('testsuites')
assert testsuites, 'No testsuites found.'
statuses = [testsuite.get('status') for testsuite in testsuites]
filtered_status_count = statuses.count("filtered")
assert filtered_status_count == expected_filtered_count, \
f'Expected {expected_filtered_count} filtered statuses, got {filtered_status_count}.'
def test_enable_size_report(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
expected_rel_path = os.path.relpath(os.path.join(path, 'dummy.device.group'), ZEPHYR_BASE)
# twister.json will contain [used/available]_[ram/rom] keys if the flag works
# except for those keys that would have values of 0.
# In this testcase, availables are equal to 0, so they are missing.
assert all(
[
'used_ram' in ts for ts in j['testsuites'] \
if ts['name'] == expected_rel_path and not 'reason' in ts
]
)
assert all(
[
'used_rom' in ts for ts in j['testsuites'] \
if ts['name'] == expected_rel_path and not 'reason' in ts
]
)
@pytest.mark.parametrize(
'test_path, test_platforms, expected_content',
TESTDATA_7,
ids=[
'Report summary test'
]
)
def test_report_summary(self, out_path, capfd, test_path, test_platforms, expected_content):
args = ['-i', '--outdir', out_path, '-T', test_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '1'
capfd.readouterr()
clear_log_in_test()
args += ['--report-summary']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for line in expected_content:
result = re.search(line, err)
assert result, f'missing information in log: {line}'
capfd.readouterr()
clear_log_in_test()
args = ['-i', '--outdir', out_path, '-T', test_path] + \
['--report-summary', '2'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
lines=0
for line in expected_content:
result = re.search(line, err)
if result: lines += 1
assert lines == 2, f'too many or too few lines'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_report.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,368 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions
"""
import importlib
import mock
import os
import pytest
import sys
from conftest import ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister/twisterlib"))
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestHardwaremap:
TESTDATA_1 = [
(
[
'ARM',
'SEGGER',
'MBED'
],
[
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
[1234, 'abcd'],
'pyocd'
),
(
[
'STMicroelectronics',
'Atmel Corp.'
],
[
'J-Link',
'J-Link OB'
],
[1234, 'abcd'],
'jlink'
),
(
[
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.'
],
[
'STM32 STLink',
'^XDS110.*',
'STLINK-V3'
],
[1234, 'abcd'],
'openocd'
),
(
[
'FTDI',
'Digilent',
'Microsoft'
],
[
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
],
[1234, 'abcd'],
'dediprog'
)
]
TESTDATA_2 = [
(
'FTDI',
'DAPLink CMSIS-DAP',
1234,
'pyocd'
)
]
TESTDATA_3 = [
(
'Texas Instruments',
'DAPLink CMSIS-DAP',
'abcd', 'las'
),
(
'Texas Instruments',
'DAPLink CMSIS-DAP',
'abcd', 'dse0'
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
('manufacturer', 'product', 'serial', 'runner'),
TESTDATA_1,
)
def test_generate(self, capfd, out_path, manufacturer, product, serial, runner):
file_name = "test-map.yaml"
path = os.path.join(ZEPHYR_BASE, file_name)
args = ['--outdir', out_path, '--generate-hardware-map', file_name]
if os.path.exists(path):
os.remove(path)
def mocked_comports():
return [
mock.Mock(device='/dev/ttyUSB23',
manufacturer=id_man,
product=id_pro,
serial_number=id_serial
)
]
for id_man in manufacturer:
for id_pro in product:
for id_serial in serial:
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
mock.patch('serial.tools.list_ports.comports',
side_effect=mocked_comports), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert os.path.exists(path)
expected_data = '- connected: true\n' \
f' id: {id_serial}\n' \
' platform: unknown\n' \
f' product: {id_pro}\n' \
f' runner: {runner}\n' \
' serial: /dev/ttyUSB23\n'
load_data = open(path).read()
assert load_data == expected_data
if os.path.exists(path):
os.remove(path)
assert str(sys_exit.value) == '0'
clear_log_in_test()
@pytest.mark.parametrize(
('manufacturer', 'product', 'serial', 'runner'),
TESTDATA_2,
)
def test_few_generate(self, capfd, out_path, manufacturer, product, serial, runner):
file_name = "test-map.yaml"
path = os.path.join(ZEPHYR_BASE, file_name)
args = ['--outdir', out_path, '--generate-hardware-map', file_name]
if os.path.exists(path):
os.remove(path)
def mocked_comports():
return [
mock.Mock(device='/dev/ttyUSB23',
manufacturer=manufacturer,
product=product,
serial_number=serial
),
mock.Mock(device='/dev/ttyUSB24',
manufacturer=manufacturer,
product=product,
serial_number=serial + 1
),
mock.Mock(device='/dev/ttyUSB24',
manufacturer=manufacturer,
product=product,
serial_number=serial + 2
),
mock.Mock(device='/dev/ttyUSB25',
manufacturer=manufacturer,
product=product,
serial_number=serial + 3
)
]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
mock.patch('serial.tools.list_ports.comports',
side_effect=mocked_comports), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert os.path.exists(path)
expected_data = '- connected: true\n' \
f' id: {serial}\n' \
' platform: unknown\n' \
f' product: {product}\n' \
f' runner: {runner}\n' \
' serial: /dev/ttyUSB23\n' \
'- connected: true\n' \
f' id: {serial + 1}\n' \
' platform: unknown\n' \
f' product: {product}\n' \
f' runner: {runner}\n' \
' serial: /dev/ttyUSB24\n' \
'- connected: true\n' \
f' id: {serial + 2}\n' \
' platform: unknown\n' \
f' product: {product}\n' \
f' runner: {runner}\n' \
' serial: /dev/ttyUSB24\n' \
'- connected: true\n' \
f' id: {serial + 3}\n' \
' platform: unknown\n' \
f' product: {product}\n' \
f' runner: {runner}\n' \
' serial: /dev/ttyUSB25\n'
load_data = open(path).read()
assert load_data == expected_data
if os.path.exists(path):
os.remove(path)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
('manufacturer', 'product', 'serial', 'location'),
TESTDATA_3,
)
def test_texas_exeption(self, capfd, out_path, manufacturer, product, serial, location):
file_name = "test-map.yaml"
path = os.path.join(ZEPHYR_BASE, file_name)
args = ['--outdir', out_path, '--generate-hardware-map', file_name]
if os.path.exists(path):
os.remove(path)
def mocked_comports():
return [
mock.Mock(device='/dev/ttyUSB23',
manufacturer=manufacturer,
product=product,
serial_number=serial,
location=location
)
]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
mock.patch('serial.tools.list_ports.comports',
side_effect=mocked_comports), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert os.path.exists(path)
expected_data = '- connected: true\n' \
f' id: {serial}\n' \
' platform: unknown\n' \
f' product: {product}\n' \
' runner: pyocd\n' \
' serial: /dev/ttyUSB23\n'
expected_data2 = '[]\n'
load_data = open(path).read()
if location.endswith('0'):
assert load_data == expected_data
else:
assert load_data == expected_data2
if os.path.exists(path):
os.remove(path)
assert str(sys_exit.value) == '0'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_hardwaremap.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,948 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to test filtering.
"""
import importlib
import mock
import os
import pytest
import sys
import json
import re
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestFilter:
TESTDATA_1 = [
(
'x86',
[
r'(it8xxx2_evb).*?(SKIPPED: Command line testsuite arch filter)',
r'(DEBUG\s+- adding qemu_x86)',
],
),
(
'arm',
[
r'(it8xxx2_evb).*?(SKIPPED: Command line testsuite arch filter)',
r'(qemu_x86).*?(SKIPPED: Command line testsuite arch filter)',
r'(hsdk).*?(SKIPPED: Command line testsuite arch filter)',
]
),
(
'riscv',
[
r'(qemu_x86).*?(SKIPPED: Command line testsuite arch filter)',
r'(hsdk).*?(SKIPPED: Command line testsuite arch filter)',
r'(DEBUG\s+- adding it8xxx2_evb)'
]
)
]
TESTDATA_2 = [
(
'nxp',
[
r'(it8xxx2_evb).*?(SKIPPED: Not a selected vendor platform)',
r'(hsdk).*?(SKIPPED: Not a selected vendor platform)',
r'(qemu_x86).*?(SKIPPED: Not a selected vendor platform)',
],
),
(
'intel',
[
r'(it8xxx2_evb).*?(SKIPPED: Not a selected vendor platform)',
r'(qemu_x86).*?(SKIPPED: Not a selected vendor platform)',
r'(DEBUG\s+- adding intel_adl_crb)'
]
),
(
'ite',
[
r'(qemu_x86).*?(SKIPPED: Not a selected vendor platform)',
r'(intel_adl_crb).*?(SKIPPED: Not a selected vendor platform)',
r'(hsdk).*?(SKIPPED: Not a selected vendor platform)',
r'(DEBUG\s+- adding it8xxx2_evb)'
]
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'tag, expected_test_count',
[
('device', 5), # dummy.agnostic.group1.subgroup1.assert
# dummy.agnostic.group1.subgroup2.assert
# dummy.agnostic.group2.assert1
# dummy.agnostic.group2.assert2
# dummy.agnostic.group2.assert3
('agnostic', 1) # dummy.device.group.assert
],
ids=['no device', 'no agnostic']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_exclude_tag(self, out_path, tag, expected_test_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--exclude-tag', tag] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert len(filtered_j) == expected_test_count
assert str(sys_exit.value) == '0'
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_slow(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic')
alt_config_root = os.path.join(TEST_DATA, 'alt-test-configs', 'dummy', 'agnostic')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-slow'] + \
['--alt-config-root', alt_config_root] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 5
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_enable_slow_only(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic')
alt_config_root = os.path.join(TEST_DATA, 'alt-test-configs', 'dummy', 'agnostic')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-slow-only'] + \
['--alt-config-root', alt_config_root] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 3
@pytest.mark.parametrize(
'arch, expected',
TESTDATA_1,
ids=[
'arch x86',
'arch arm',
'arch riscv'
],
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_arch(self, capfd, out_path, arch, expected):
path = os.path.join(TEST_DATA, 'tests', 'no_filter')
test_platforms = ['qemu_x86', 'hsdk', 'intel_adl_crb', 'it8xxx2_evb']
args = ['--outdir', out_path, '-T', path, '-vv'] + \
['--arch', arch] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
for line in expected:
assert re.search(line, err)
@pytest.mark.parametrize(
'vendor, expected',
TESTDATA_2,
ids=[
'vendor nxp',
'vendor intel',
'vendor ite'
],
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_vendor(self, capfd, out_path, vendor, expected):
path = os.path.join(TEST_DATA, 'tests', 'no_filter')
test_platforms = ['qemu_x86', 'hsdk', 'intel_adl_crb', 'it8xxx2_evb']
args = ['--outdir', out_path, '-T', path, '-vv'] + \
['--vendor', vendor] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
for line in expected:
assert re.search(line, err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'flag, expected_test_count',
[
(['--ignore-platform-key'], 2),
([], 1)
],
ids=['ignore_platform_key', 'without ignore_platform_key']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_ignore_platform_key(self, out_path, flag, expected_test_count):
test_platforms = ['qemu_x86', 'qemu_x86_64']
path = os.path.join(TEST_DATA, 'tests', 'platform_key')
args = ['-i', '--outdir', out_path, '-T', path] + \
flag + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert len(filtered_j) == expected_test_count
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_filter.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,399 |
```python
#!/usr/bin/env python3
#
'''Common fixtures for use in testing the twister tool.'''
import logging
import shutil
import mock
import os
import pytest
import sys
ZEPHYR_BASE = os.getenv('ZEPHYR_BASE')
TEST_DATA = os.path.join(ZEPHYR_BASE, 'scripts', 'tests',
'twister_blackbox', 'test_data')
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts"))
sample_filename_mock = mock.PropertyMock(return_value='test_sample.yaml')
testsuite_filename_mock = mock.PropertyMock(return_value='test_data.yaml')
sample_filename_mock = mock.PropertyMock(return_value='test_sample.yaml')
def pytest_configure(config):
config.addinivalue_line("markers", "noclearlog: disable the clear_log autouse fixture")
config.addinivalue_line("markers", "noclearout: disable the provide_out autouse fixture")
@pytest.fixture(name='zephyr_base')
def zephyr_base_directory():
return ZEPHYR_BASE
@pytest.fixture(name='zephyr_test_data')
def zephyr_test_directory():
return TEST_DATA
@pytest.fixture(autouse=True)
def clear_log(request):
# As this fixture is autouse, one can use the pytest.mark.noclearlog decorator
# in order to be sure that this fixture's code will not fire.
if 'noclearlog' in request.keywords:
return
# clear_log is used by pytest fixture
# However, clear_log_in_test is prepared to be used directly in the code, wherever required
clear_log_in_test()
def clear_log_in_test():
# Required to fix the pytest logging error
# See: path_to_url
loggers = [logging.getLogger()] \
+ list(logging.Logger.manager.loggerDict.values()) \
+ [logging.getLogger(name) for \
name in logging.root.manager.loggerDict]
for logger in loggers:
handlers = getattr(logger, 'handlers', [])
for handler in handlers:
logger.removeHandler(handler)
# This fixture provides blackbox tests with an `out_path` parameter
# It should be used as the `-O` (`--out_dir`) parameter in blackbox tests
# APPRECIATED: method of using this out_path wholly outside of test code
@pytest.fixture(name='out_path', autouse=True)
def provide_out(tmp_path, request):
# As this fixture is autouse, one can use the pytest.mark.noclearout decorator
# in order to be sure that this fixture's code will not fire.
# Most of the time, just omitting the `out_path` parameter is sufficient.
if 'noclearout' in request.keywords:
yield
return
# Before
out_container_path = tmp_path / 'blackbox-out-container'
out_container_path.mkdir()
out_path = os.path.join(out_container_path, "blackbox-out")
# Test
yield out_path
# After
# We're operating in temp, so it is not strictly necessary
# but the files can get large quickly as we do not need them after the test.
shutil.rmtree(out_container_path)
``` | /content/code_sandbox/scripts/tests/twister_blackbox/conftest.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 697 |
```ini
[pytest]
norecursedirs = test_data *.egg .* _darcs build CVS dist node_modules venv {arch}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/pytest.ini | ini | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 27 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to Twister's tooling.
"""
# pylint: disable=duplicate-code
import importlib
import mock
import os
import pytest
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, sample_filename_mock, testsuite_filename_mock
from twisterlib.statuses import TwisterStatus
from twisterlib.testplan import TestPlan
class TestTooling:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'jobs',
['1', '2'],
ids=['single job', 'two jobs']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_jobs(self, out_path, jobs):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--jobs', jobs] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'twister.log')) as f:
log = f.read()
assert f'JOBS: {jobs}' in log
assert str(sys_exit.value) == '0'
@mock.patch.object(TestPlan, 'SAMPLE_FILENAME', sample_filename_mock)
def test_force_toolchain(self, out_path):
# nsim_vpx5 is one of the rare platforms that do not support the zephyr toolchain
test_platforms = ['nsim/nsim_vpx5']
path = os.path.join(TEST_DATA, 'samples', 'hello_world')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--force-toolchain'] + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier'], tc['status']) \
for ts in j['testsuites'] \
for tc in ts['testcases']
]
# Normally, board not supporting our toolchain would be filtered, so we check against that
assert len(filtered_j) == 1
assert filtered_j[0][3] != TwisterStatus.FILTER
@pytest.mark.parametrize(
'test_path, test_platforms',
[
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
)
],
ids=[
'ninja',
]
)
@pytest.mark.parametrize(
'flag',
['--ninja', '-N']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_ninja(self, capfd, out_path, test_path, test_platforms, flag):
args = ['--outdir', out_path, '-T', test_path, flag] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_tooling.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 995 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to the shuffling of the test order.
"""
import importlib
import mock
import os
import pytest
import re
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestShuffle:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'seed, ratio, expected_order',
[
('123', '1/2', ['dummy.agnostic.group1.subgroup1', 'dummy.agnostic.group1.subgroup2']),
('123', '2/2', ['dummy.agnostic.group2', 'dummy.device.group']),
('321', '1/2', ['dummy.agnostic.group1.subgroup1', 'dummy.agnostic.group2']),
('321', '2/2', ['dummy.device.group', 'dummy.agnostic.group1.subgroup2']),
('123', '1/3', ['dummy.agnostic.group1.subgroup1', 'dummy.agnostic.group1.subgroup2']),
('123', '2/3', ['dummy.agnostic.group2']),
('123', '3/3', ['dummy.device.group']),
('321', '1/3', ['dummy.agnostic.group1.subgroup1', 'dummy.agnostic.group2']),
('321', '2/3', ['dummy.device.group']),
('321', '3/3', ['dummy.agnostic.group1.subgroup2'])
],
ids=['first half, 123', 'second half, 123', 'first half, 321', 'second half, 321',
'first third, 123', 'middle third, 123', 'last third, 123',
'first third, 321', 'middle third, 321', 'last third, 321']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_shuffle_tests(self, out_path, seed, ratio, expected_order):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--shuffle-tests', '--shuffle-tests-seed', seed] + \
['--subset', ratio] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
testcases = [re.sub(r'\.assert[^\.]*?$', '', j[2]) for j in filtered_j]
testsuites = list(dict.fromkeys(testcases))
assert testsuites == expected_order
assert str(sys_exit.value) == '0'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_shuffle.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 810 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions - those requiring testplan.json
"""
import importlib
import mock
import os
import pytest
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
from twisterlib.error import TwisterRuntimeError
class TestTestPlan:
TESTDATA_1 = [
('dummy.agnostic.group2.assert1', SystemExit, 3),
(
os.path.join('scripts', 'tests', 'twister_blackbox', 'test_data', 'tests',
'dummy', 'agnostic', 'group1', 'subgroup1',
'dummy.agnostic.group2.assert1'),
TwisterRuntimeError,
None
),
]
TESTDATA_2 = [
('buildable', 6),
('runnable', 5),
]
TESTDATA_3 = [
(True, 1),
(False, 6),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'test, expected_exception, expected_subtest_count',
TESTDATA_1,
ids=['valid', 'invalid']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_subtest(self, out_path, test, expected_exception, expected_subtest_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '--sub-test', test, '-y'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(expected_exception) as exc:
self.loader.exec_module(self.twister_module)
if expected_exception != SystemExit:
assert True
return
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(exc.value) == '0'
assert len(filtered_j) == expected_subtest_count
@pytest.mark.parametrize(
'filter, expected_count',
TESTDATA_2,
ids=['buildable', 'runnable']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_filter(self, out_path, filter, expected_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '--filter', filter, '-y'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as exc:
self.loader.exec_module(self.twister_module)
assert str(exc.value) == '0'
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert expected_count == len(filtered_j)
@pytest.mark.parametrize(
'integration, expected_count',
TESTDATA_3,
ids=['integration', 'no integration']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
@mock.patch.object(TestPlan, 'SAMPLE_FILENAME', '')
def test_integration(self, out_path, integration, expected_count):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
(['--integration'] if integration else []) + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as exc:
self.loader.exec_module(self.twister_module)
assert str(exc.value) == '0'
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert expected_count == len(filtered_j)
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_testplan.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,221 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to test filtering.
"""
import importlib
import mock
import os
import pytest
import sys
import re
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestDevice:
TESTDATA_1 = [
(
1234,
),
(
4321,
),
(
1324,
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'seed',
TESTDATA_1,
ids=[
'seed 1234',
'seed 4321',
'seed 1324'
],
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_seed(self, capfd, out_path, seed):
test_platforms = ['native_sim']
path = os.path.join(TEST_DATA, 'tests', 'seed_native_sim')
args = ['--outdir', out_path, '-i', '-T', path, '-vv',] + \
['--seed', f'{seed[0]}'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '1'
expected_line = r'seed_native_sim.dummy FAILED Failed \(native (\d+\.\d+)s/seed: {}\)'.format(seed[0])
assert re.search(expected_line, err)
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_device.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 493 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions changing the output files.
"""
import importlib
import re
import mock
import os
import shutil
import pytest
import sys
import tarfile
from conftest import ZEPHYR_BASE, TEST_DATA, sample_filename_mock, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
@mock.patch.object(TestPlan, 'SAMPLE_FILENAME', sample_filename_mock)
class TestOutfile:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'flag_section, clobber, expect_straggler',
[
([], True, False),
(['--clobber-output'], False, False),
(['--no-clean'], False, True),
(['--clobber-output', '--no-clean'], False, True),
],
ids=['clobber', 'do not clobber', 'do not clean', 'do not clobber, do not clean']
)
def test_clobber_output(self, out_path, flag_section, clobber, expect_straggler):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
flag_section + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
# We create an empty 'blackbox-out' to trigger the clobbering
os.mkdir(os.path.join(out_path))
# We want to have a single straggler to check for
straggler_name = 'atavi.sm'
straggler_path = os.path.join(out_path, straggler_name)
open(straggler_path, 'a').close()
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
expected_dirs = ['blackbox-out']
if clobber:
expected_dirs += ['blackbox-out.1']
current_dirs = os.listdir(os.path.normpath(os.path.join(out_path, '..')))
print(current_dirs)
assert sorted(current_dirs) == sorted(expected_dirs)
out_contents = os.listdir(os.path.join(out_path))
print(out_contents)
if expect_straggler:
assert straggler_name in out_contents
else:
assert straggler_name not in out_contents
def test_runtime_artifact_cleanup(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'samples', 'hello_world')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--runtime-artifact-cleanup'] + \
[] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
relpath = os.path.relpath(path, ZEPHYR_BASE)
sample_path = os.path.join(out_path, 'qemu_x86', relpath, 'sample.basic.helloworld')
listdir = os.listdir(sample_path)
zephyr_listdir = os.listdir(os.path.join(sample_path, 'zephyr'))
expected_contents = ['CMakeFiles', 'handler.log', 'build.ninja', 'CMakeCache.txt',
'zephyr', 'build.log']
expected_zephyr_contents = ['.config']
assert all([content in expected_zephyr_contents for content in zephyr_listdir]), \
'Cleaned zephyr directory has unexpected files.'
assert all([content in expected_contents for content in listdir]), \
'Cleaned directory has unexpected files.'
def test_short_build_path(self, out_path):
test_platforms = ['qemu_x86']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2')
# twister_links dir does not exist in a dry run.
args = ['-i', '--outdir', out_path, '-T', path] + \
['--short-build-path'] + \
['--ninja'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
relative_test_path = os.path.relpath(path, ZEPHYR_BASE)
test_result_path = os.path.join(out_path, 'qemu_x86',
relative_test_path, 'dummy.agnostic.group2')
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'twister.log')) as f:
twister_log = f.read()
pattern_running = r'Running\s+cmake\s+on\s+(?P<full_path>[\\\/].*)\s+for\s+qemu_x86\s*\n'
res_running = re.search(pattern_running, twister_log)
assert res_running
# Spaces, forward slashes, etc. in the path as well as CMake peculiarities
# require us to forgo simple RegExes.
pattern_calling_line = r'Calling cmake: [^\n]+$'
res_calling = re.search(pattern_calling_line, twister_log[res_running.end():], re.MULTILINE)
calling_line = res_calling.group()
# HIGHLY DANGEROUS pattern!
# If the checked text is not CMake flags only, it is exponential!
# Where N is the length of non-flag space-delimited text section.
flag_pattern = r'(?:\S+(?: \\)?)+- '
cmake_path = shutil.which('cmake')
if not cmake_path:
assert False, 'Cmake not found.'
cmake_call_section = r'^Calling cmake: ' + re.escape(cmake_path)
calling_line = re.sub(cmake_call_section, '', calling_line)
calling_line = calling_line[::-1]
flag_iterable = re.finditer(flag_pattern, calling_line)
for match in flag_iterable:
reversed_flag = match.group()
flag = reversed_flag[::-1]
# Build flag
if flag.startswith(' -B'):
flag_value = flag[3:]
build_filename = os.path.basename(os.path.normpath(flag_value))
unshortened_build_path = os.path.join(test_result_path, build_filename)
assert flag_value != unshortened_build_path, 'Build path unchanged.'
assert len(flag_value) < len(unshortened_build_path), 'Build path not shortened.'
# Pipe flag
if flag.startswith(' -DQEMU_PIPE='):
flag_value = flag[13:]
pipe_filename = os.path.basename(os.path.normpath(flag_value))
unshortened_pipe_path = os.path.join(test_result_path, pipe_filename)
assert flag_value != unshortened_pipe_path, 'Pipe path unchanged.'
assert len(flag_value) < len(unshortened_pipe_path), 'Pipe path not shortened.'
def test_prep_artifacts_for_testing(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'samples', 'hello_world')
relative_test_path = os.path.relpath(path, ZEPHYR_BASE)
zephyr_out_path = os.path.join(out_path, 'qemu_x86', relative_test_path,
'sample.basic.helloworld', 'zephyr')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--prep-artifacts-for-testing'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
zephyr_artifact_list = os.listdir(zephyr_out_path)
# --build-only and normal run leave more files than --prep-artifacts-for-testing
# However, the cost of testing that this leaves less seems to outweigh the benefits.
# So we'll only check for the most important artifact.
assert 'zephyr.elf' in zephyr_artifact_list
def test_package_artifacts(self, out_path):
test_platforms = ['qemu_x86']
path = os.path.join(TEST_DATA, 'samples', 'hello_world')
package_name = 'PACKAGE'
package_path = os.path.join(out_path, package_name)
args = ['-i', '--outdir', out_path, '-T', path] + \
['--package-artifacts', package_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Check whether we have something as basic as zephyr.elf file
with tarfile.open(package_path, "r") as tar:
assert any([path.endswith('zephyr.elf') for path in tar.getnames()])
# Delete everything but for the package
for clean_up in os.listdir(os.path.join(out_path)):
if not clean_up.endswith(package_name):
clean_up_path = os.path.join(out_path, clean_up)
if os.path.isfile(clean_up_path):
os.remove(clean_up_path)
else:
shutil.rmtree(os.path.join(out_path, clean_up))
# Unpack the package
with tarfile.open(package_path, "r") as tar:
tar.extractall(path=out_path)
# Why does package.py put files inside the out_path folder?
# It forces us to move files up one directory after extraction.
file_names = os.listdir(os.path.join(out_path, os.path.basename(out_path)))
for file_name in file_names:
shutil.move(os.path.join(out_path, os.path.basename(out_path), file_name), out_path)
args = ['-i', '--outdir', out_path, '-T', path] + \
['--test-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_outfile.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,523 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to the quarantine.
"""
import importlib
import mock
import os
import pytest
import re
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestQuarantine:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_quarantine_verify(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
quarantine_path = os.path.join(TEST_DATA, 'twister-quarantine-list.yml')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--quarantine-verify'] + \
['--quarantine-list', quarantine_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 2
@pytest.mark.parametrize(
'test_path, test_platforms, quarantine_directory',
[
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb'],
os.path.join(TEST_DATA, 'twister-quarantine-list.yml'),
),
],
ids=[
'quarantine',
],
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_quarantine_list(self, capfd, out_path, test_path, test_platforms, quarantine_directory):
args = ['--outdir', out_path, '-T', test_path] +\
['--quarantine-list', quarantine_directory] + \
['-vv'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
frdm_match = re.search('agnostic/group2/dummy.agnostic.group2 SKIPPED: Quarantine: test '
'intel_adl_crb', err)
frdm_match2 = re.search(
'agnostic/group1/subgroup2/dummy.agnostic.group1.subgroup2 SKIPPED: Quarantine: test '
'intel_adl_crb',
err)
qemu_64_match = re.search(
'agnostic/group1/subgroup2/dummy.agnostic.group1.subgroup2 SKIPPED: Quarantine: test '
'qemu_x86_64',
err)
all_platforms_match = re.search(
'agnostic/group1/subgroup1/dummy.agnostic.group1.subgroup1 SKIPPED: Quarantine: test '
'all platforms',
err)
all_platforms_match2 = re.search(
'agnostic/group1/subgroup1/dummy.agnostic.group1.subgroup1 SKIPPED: Quarantine: test '
'all platforms',
err)
all_platforms_match3 = re.search(
'agnostic/group1/subgroup1/dummy.agnostic.group1.subgroup1 SKIPPED: Quarantine: test '
'all platforms',
err)
assert frdm_match and frdm_match2, 'platform quarantine not work properly'
assert qemu_64_match, 'platform quarantine on scenario not work properly'
assert all_platforms_match and all_platforms_match2 and all_platforms_match3, 'scenario ' \
'quarantine' \
' not work ' \
'properly'
assert str(sys_exit.value) == '0'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_quarantine.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,083 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to test configuration files.
"""
import importlib
import mock
import os
import pytest
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestConfig:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_alt_config_root(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
alt_config_root = os.path.join(TEST_DATA, 'alt-test-configs', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--alt-config-root', alt_config_root] + \
['--tag', 'alternate-config-root'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert len(filtered_j) == 3
@pytest.mark.parametrize(
'level, expected_tests',
[
('smoke', 5),
('acceptance', 6),
],
ids=['smoke', 'acceptance']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_level(self, out_path, level, expected_tests):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
config_path = os.path.join(TEST_DATA, 'test_config.yaml')
args = ['-i','--outdir', out_path, '-T', path, '--level', level, '-y',
'--test-config', config_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert str(sys_exit.value) == '0'
assert expected_tests == len(filtered_j)
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_config.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 789 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions changing test output.
"""
import importlib
import re
import mock
import os
import pytest
import sys
import json
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestOutput:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic')
),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'flag, expect_paths',
[
('--no-detailed-test-id', False),
('--detailed-test-id', True)
],
ids=['no-detailed-test-id', 'detailed-test-id']
)
def test_detailed_test_id(self, out_path, flag, expect_paths):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
[flag] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert len(filtered_j) > 0, "No dummy tests found."
expected_start = os.path.relpath(TEST_DATA, ZEPHYR_BASE) if expect_paths else 'dummy.'
assert all([testsuite.startswith(expected_start)for _, testsuite, _ in filtered_j])
def test_inline_logs(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'always_build_error', 'dummy')
args = ['--outdir', out_path, '-T', path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '1'
rel_path = os.path.relpath(path, ZEPHYR_BASE)
build_path = os.path.join(out_path, 'qemu_x86', rel_path, 'always_fail.dummy', 'build.log')
with open(build_path) as f:
build_log = f.read()
clear_log_in_test()
args = ['--outdir', out_path, '-T', path] + \
['--inline-logs'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '1'
with open(os.path.join(out_path, 'twister.log')) as f:
inline_twister_log = f.read()
# Remove information that differs between the runs
removal_patterns = [
# Remove tmp filepaths, as they will differ
r'(/|\\)tmp(/|\\)\S+',
# Remove object creation order, as it can change
r'^\[[0-9]+/[0-9]+\] ',
# Remove variable CMake flag
r'-DTC_RUNID=[0-9a-zA-Z]+',
# Remove variable order CMake flags
r'-I[0-9a-zA-Z/\\]+',
# Remove duration-sensitive entries
r'-- Configuring done \([0-9.]+s\)',
r'-- Generating done \([0-9.]+s\)',
# Cache location may vary between CI runs
r'^.*-- Cache files will be written to:.*$'
]
for pattern in removal_patterns:
c_pattern = re.compile(pattern, flags=re.MULTILINE)
inline_twister_log = re.sub(c_pattern, '', inline_twister_log)
build_log = re.sub(c_pattern, '', build_log)
split_build_log = build_log.split('\n')
for r in split_build_log:
assert r in inline_twister_log
def _get_matches(self, err, regex_line):
matches = []
for line in err.split('\n'):
columns = line.split()
if len(columns) == 8:
for i in range(8):
match = re.fullmatch(regex_line[i], columns[i])
if match:
matches.append(match)
if len(matches) == 8:
return matches
else:
matches = []
return matches
@pytest.mark.parametrize(
'test_path',
TESTDATA_1,
ids=[
'single_v',
]
)
def test_single_v(self, capfd, out_path, test_path):
args = ['--outdir', out_path, '-T', test_path, '-v']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
regex_line = [r'INFO', r'-', r'\d+/\d+', r'\S+', r'\S+', r'[A-Z]+', r'\(\w+', r'[\d.]+s\)']
matches = self._get_matches(err, regex_line)
print(matches)
assert str(sys_exit.value) == '0'
assert len(matches) > 0
@pytest.mark.parametrize(
'test_path',
TESTDATA_1,
ids=[
'double_v',
]
)
def test_double_v(self, capfd, out_path, test_path):
args = ['--outdir', out_path, '-T', test_path, '-vv']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
regex_line = [r'INFO', r'-', r'\d+/\d+', r'\S+', r'\S+', r'[A-Z]+', r'\(\w+', r'[\d.]+s\)']
matches = self._get_matches(err, regex_line)
booting_zephyr_regex = re.compile(r'^DEBUG\s+-\s+([^*]+)\*\*\*\s+Booting\s+Zephyr\s+OS\s+build.*$', re.MULTILINE)
info_debug_line_regex = r'^\s*(INFO|DEBUG)'
assert str(sys_exit.value) == '0'
assert re.search(booting_zephyr_regex, err) is not None
assert re.search(info_debug_line_regex, err) is not None
assert len(matches) > 0
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_output.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,791 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions
"""
import importlib
import re
import mock
import os
import pytest
import sys
import json
from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestCoverage:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
[
'coverage.log', 'coverage.json',
'coverage'
],
),
]
TESTDATA_2 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
[
'GCOV_COVERAGE_DUMP_START', 'GCOV_COVERAGE_DUMP_END'
],
),
]
TESTDATA_3 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2'),
['qemu_x86'],
[
'coverage.log', 'coverage.json',
'coverage'
],
r'{"files": \[], "gcovr/format_version": ".*"}'
),
]
TESTDATA_4 = [
(
'gcovr',
[
'coverage.log', 'coverage.json',
'coverage', os.path.join('coverage','coverage.xml')
],
'xml'
),
(
'gcovr',
[
'coverage.log', 'coverage.json',
'coverage', os.path.join('coverage','coverage.sonarqube.xml')
],
'sonarqube'
),
(
'gcovr',
[
'coverage.log', 'coverage.json',
'coverage', os.path.join('coverage','coverage.txt')
],
'txt'
),
(
'gcovr',
[
'coverage.log', 'coverage.json',
'coverage', os.path.join('coverage','coverage.csv')
],
'csv'
),
(
'gcovr',
[
'coverage.log', 'coverage.json',
'coverage', os.path.join('coverage','coverage.coveralls.json')
],
'coveralls'
),
(
'gcovr',
[
'coverage.log', 'coverage.json',
'coverage', os.path.join('coverage','index.html')
],
'html'
),
(
'lcov',
[
'coverage.log', 'coverage.info',
'ztest.info', 'coverage',
os.path.join('coverage','index.html')
],
'html'
),
(
'lcov',
[
'coverage.log', 'coverage.info',
'ztest.info'
],
'lcov'
),
]
TESTDATA_5 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2'),
['qemu_x86'],
'gcovr',
'Running gcovr -r'
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2'),
['qemu_x86'],
'lcov',
'Running lcov --gcov-tool'
)
]
TESTDATA_6 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2'),
['qemu_x86'],
['The specified file does not exist.', r'\[Errno 13\] Permission denied:'],
)
]
TESTDATA_7 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2'),
['qemu_x86_64', 'qemu_x86'],
['qemu_x86_64', 'qemu_x86', ['qemu_x86_64', 'qemu_x86']],
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@pytest.mark.parametrize(
'test_path, test_platforms, file_name',
TESTDATA_1,
ids=[
'coverage',
]
)
def test_coverage(self, capfd, test_path, test_platforms, out_path, file_name):
args = ['-i','--outdir', out_path, '-T', test_path] + \
['--coverage', '--coverage-tool', 'gcovr'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
for f_name in file_name:
path = os.path.join(out_path, f_name)
assert os.path.exists(path), f'file not found {f_name}'
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_2,
ids=[
'enable_coverage',
]
)
def test_enable_coverage(self, capfd, test_path, test_platforms, out_path, expected):
args = ['-i','--outdir', out_path, '-T', test_path] + \
['--enable-coverage', '-vv'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
for line in expected:
match = re.search(line, err)
assert match, f'line not found: {line}'
@pytest.mark.parametrize(
'test_path, test_platforms, file_name, expected_content',
TESTDATA_3,
ids=[
'coverage_basedir',
]
)
def test_coverage_basedir(self, capfd, test_path, test_platforms, out_path, file_name, expected_content):
base_dir = os.path.join(TEST_DATA, "test_dir")
if os.path.exists(base_dir):
os.rmdir(base_dir)
os.mkdir(base_dir)
args = ['--outdir', out_path,'-i', '-T', test_path] + \
['--coverage', '--coverage-tool', 'gcovr', '-v', '--coverage-basedir', base_dir] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
for f_name in file_name:
path = os.path.join(out_path, f_name)
assert os.path.exists(path), f'file not found {f_name}'
if f_name == 'coverage.json':
with open(path, "r") as json_file:
json_content = json.load(json_file)
pattern = re.compile(expected_content)
assert pattern.match(json.dumps(json_content))
if os.path.exists(base_dir):
os.rmdir(base_dir)
@pytest.mark.parametrize(
'cov_tool, file_name, cov_format',
TESTDATA_4,
ids=[
'coverage_format gcovr xml',
'coverage_format gcovr sonarqube',
'coverage_format gcovr txt',
'coverage_format gcovr csv',
'coverage_format gcovr coveralls',
'coverage_format gcovr html',
'coverage_format lcov html',
'coverage_format lcov lcov',
]
)
def test_coverage_format(self, capfd, out_path, cov_tool, file_name, cov_format):
test_path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic', 'group2')
test_platforms = ['qemu_x86']
args = ['--outdir', out_path,'-i', '-T', test_path] + \
['--coverage', '--coverage-tool', cov_tool, '--coverage-formats', cov_format, '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
for f_name in file_name:
path = os.path.join(out_path, f_name)
assert os.path.exists(path), f'file not found {f_name}, probably format {cov_format} not work properly'
@pytest.mark.parametrize(
'test_path, test_platforms, cov_tool, expected_content',
TESTDATA_5,
ids=[
'coverage_tool gcovr',
'coverage_tool lcov'
]
)
def test_coverage_tool(self, capfd, caplog, test_path, test_platforms, out_path, cov_tool, expected_content):
args = ['--outdir', out_path,'-i', '-T', test_path] + \
['--coverage', '--coverage-tool', cov_tool, '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
assert re.search(expected_content, caplog.text), f'{cov_tool} line not found'
@pytest.mark.parametrize(
'test_path, test_platforms, expected_content',
TESTDATA_6,
ids=[
'missing tool'
]
)
def test_gcov_tool(self, capfd, test_path, test_platforms, out_path, expected_content):
args = ['--outdir', out_path, '-i', '-T', test_path] + \
['--coverage', '--gcov-tool', TEST_DATA, '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '1'
for line in expected_content:
result = re.search(line, err)
assert result, f'missing information in log: {line}'
@pytest.mark.parametrize(
'test_path, test_platforms, cov_platform',
TESTDATA_7,
ids=[
'coverage platform'
]
)
def test_coverage_platform(self, capfd, test_path, test_platforms, out_path, cov_platform):
def search_cov():
pattern = r'TOTAL\s+(\d+)'
coverage_file_path = os.path.join(out_path, 'coverage', 'coverage.txt')
with open(coverage_file_path, 'r') as file:
data = file.read()
match = re.search(pattern, data)
if match:
total = int(match.group(1))
return total
else:
print('Error, pattern not found')
run = []
for element in cov_platform:
args = ['--outdir', out_path, '-i', '-T', test_path] + \
['--coverage', '--coverage-formats', 'txt', '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
if isinstance(element, list):
for nested_element in element:
args += ['--coverage-platform', nested_element]
else:
args += ['--coverage-platform', element]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
run += [search_cov()]
capfd.readouterr()
clear_log_in_test()
assert run[2] > run[0], 'Broader coverage platform selection did not result in broader coverage'
assert run[2] > run[1], 'Broader coverage platform selection did not result in broader coverage'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_coverage.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,966 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to disable features.
"""
import importlib
import pytest
import mock
import os
import sys
import re
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestDisable:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
'--disable-suite-name-check',
[r"Expected suite names:\[['\w+'\[,\s]*\]", r"Detected suite names:\[['\w+'\[,\s]*\]"],
True
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86'],
'-v',
[r"Expected suite names:\[['(\w+)'[, ]*]+", r"Detected suite names:\[['(\w+)'[, ]*]+"],
False
),
]
TESTDATA_2 = [
(
os.path.join(TEST_DATA, 'tests', 'always_warning'),
['qemu_x86'],
'--disable-warnings-as-errors',
'0'
),
(
os.path.join(TEST_DATA, 'tests', 'always_warning'),
['qemu_x86'],
'-v',
'1'
),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'test_path, test_platforms, flag, expected, expected_none',
TESTDATA_1,
ids=[
'disable-suite-name-check',
'suite-name-check'
],
)
def test_disable_suite_name_check(self, capfd, out_path, test_path, test_platforms, flag, expected, expected_none):
args = ['-i', '--outdir', out_path, '-T', test_path] + \
[flag] + \
['-vv'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
if expected_none:
assert re.search(expected[0], err) is None, f"Not expected string in log: {expected[0]}"
assert re.search(expected[1], err) is None, f"Not expected: {expected[1]}"
else:
assert re.search(expected[0], err) is not None, f"Expected string in log: {expected[0]}"
assert re.search(expected[1], err) is not None, f"Expected string in log: {expected[1]}"
@pytest.mark.parametrize(
'test_path, test_platforms, flag, expected_exit_code',
TESTDATA_2,
ids=[
'disable-warnings-as-errors',
'warnings-as-errors'
],
)
def test_disable_warnings_as_errors(self, capfd, out_path, test_path, test_platforms, flag, expected_exit_code):
args = ['-i', '--outdir', out_path, '-T', test_path] + \
[flag] + \
['-vv'] + \
['--build-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == expected_exit_code, \
f"Twister return not expected ({expected_exit_code}) exit code: ({sys_exit.value})"
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_disable.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 980 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to saving and loading a testlist.
"""
import importlib
import mock
import os
import pytest
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
class TestTestlist:
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_save_tests(self, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic')
saved_tests_file_path = os.path.realpath(os.path.join(out_path, '..', 'saved-tests.json'))
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--save-tests', saved_tests_file_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
# Save agnostics tests
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
clear_log_in_test()
# Load all
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path, '-y'] + \
['--load-tests', saved_tests_file_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'testplan.json')) as f:
j = json.load(f)
filtered_j = [
(ts['platform'], ts['name'], tc['identifier']) \
for ts in j['testsuites'] \
for tc in ts['testcases'] if 'reason' not in tc
]
assert len(filtered_j) == 5
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_testlist.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 614 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions related to memory footprints.
"""
import importlib
import json
import mock
import os
import pytest
import sys
import re
# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock, clear_log_in_test
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestFootprint:
# Log printed when entering delta calculations
FOOTPRINT_LOG = 'running footprint_reports'
# These warnings notify us that deltas were shown in log.
# Coupled with the code under test.
DELTA_WARNING_COMPARE = re.compile(
r'Found [1-9]+[0-9]* footprint deltas to .*blackbox-out\.[0-9]+/twister.json as a baseline'
)
DELTA_WARNING_RUN = re.compile(r'Found [1-9]+[0-9]* footprint deltas to the last twister run')
# Size report key we modify to control for deltas
RAM_KEY = 'used_ram'
DELTA_DETAIL = re.compile(RAM_KEY + r' \+[0-9]+, is now +[0-9]+ \+[0-9.]+%')
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'old_ram_multiplier, expect_delta_log',
[
(0.75, True),
(1.25, False),
],
ids=['footprint increased', 'footprint reduced']
)
def test_compare_report(self, caplog, out_path, old_ram_multiplier, expect_delta_log):
# First run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Modify the older report so we can control the difference.
# Note: if footprint tests take too long, replace first run with a prepared twister.json
# That will increase test-to-code_under_test coupling, however.
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
f.write(json.dumps(j, indent=4))
report_path = os.path.join(
os.path.dirname(out_path),
f'{os.path.basename(out_path)}.1',
'twister.json'
)
# Second run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--compare-report', report_path] + \
['--show-footprint'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
assert self.FOOTPRINT_LOG in caplog.text
if expect_delta_log:
assert self.RAM_KEY in caplog.text
assert re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Expected footprint deltas not logged.'
else:
assert self.RAM_KEY not in caplog.text
assert not re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Unexpected footprint deltas logged.'
def test_footprint_from_buildlog(self, out_path):
# First run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
[] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Get values
old_values = []
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
assert self.RAM_KEY in ts
old_values += [ts[self.RAM_KEY]]
# Second run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--footprint-from-buildlog'] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Get values
new_values = []
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
assert self.RAM_KEY in ts
new_values += [ts[self.RAM_KEY]]
# There can be false positives if our calculations become too accurate.
# Turn this test into a dummy (check only exit value) in such case.
assert sorted(old_values) != sorted(new_values), \
'Same values whether calculating size or using buildlog.'
@pytest.mark.parametrize(
'old_ram_multiplier, threshold, expect_delta_log',
[
(0.75, 95, False),
(0.75, 25, True),
],
ids=['footprint threshold not met', 'footprint threshold met']
)
def test_footprint_threshold(self, caplog, out_path, old_ram_multiplier,
threshold, expect_delta_log):
# First run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Modify the older report so we can control the difference.
# Note: if footprint tests take too long, replace first run with a prepared twister.json
# That will increase test-to-code_under_test coupling, however.
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
f.write(json.dumps(j, indent=4))
report_path = os.path.join(
os.path.dirname(out_path),
f'{os.path.basename(out_path)}.1',
'twister.json'
)
# Second run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path] + \
[f'--footprint-threshold={threshold}'] + \
['--compare-report', report_path, '--show-footprint'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
assert self.FOOTPRINT_LOG in caplog.text
if expect_delta_log:
assert self.RAM_KEY in caplog.text
assert re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Expected footprint deltas not logged.'
else:
assert self.RAM_KEY not in caplog.text
assert not re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Unexpected footprint deltas logged.'
@pytest.mark.parametrize(
'flags, old_ram_multiplier, expect_delta_log',
[
([], 0.75, False),
(['--show-footprint'], 0.75, True),
],
ids=['footprint reduced, no show', 'footprint reduced, show']
)
def test_show_footprint(self, caplog, out_path, flags, old_ram_multiplier, expect_delta_log):
# First run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Modify the older report so we can control the difference.
# Note: if footprint tests take too long, replace first run with a prepared twister.json
# That will increase test-to-code_under_test coupling, however.
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
f.write(json.dumps(j, indent=4))
report_path = os.path.join(
os.path.dirname(out_path),
f'{os.path.basename(out_path)}.1',
'twister.json'
)
# Second run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path] + \
flags + \
['--compare-report', report_path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
print(args)
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
assert self.FOOTPRINT_LOG in caplog.text
if expect_delta_log:
assert self.RAM_KEY in caplog.text
assert re.search(self.DELTA_DETAIL, caplog.text), \
'Expected footprint delta not logged.'
assert re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Expected footprint deltas not logged.'
else:
assert self.RAM_KEY not in caplog.text
assert not re.search(self.DELTA_DETAIL, caplog.text), \
'Expected footprint delta not logged.'
assert re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Expected footprint deltas logged.'
@pytest.mark.parametrize(
'old_ram_multiplier, expect_delta_log',
[
(0.75, True),
(1.25, False),
],
ids=['footprint increased', 'footprint reduced']
)
def test_last_metrics(self, caplog, out_path, old_ram_multiplier, expect_delta_log):
# First run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Modify the older report so we can control the difference.
# Note: if footprint tests take too long, replace first run with a prepared twister.json
# That will increase test-to-code_under_test coupling, however.
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
f.write(json.dumps(j, indent=4))
report_path = os.path.join(
os.path.dirname(out_path),
f'{os.path.basename(out_path)}.1',
'twister.json'
)
# Second run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--last-metrics'] + \
['--show-footprint'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
assert self.FOOTPRINT_LOG in caplog.text
if expect_delta_log:
assert self.RAM_KEY in caplog.text
assert re.search(self.DELTA_WARNING_RUN, caplog.text), \
'Expected footprint deltas not logged.'
else:
assert self.RAM_KEY not in caplog.text
assert not re.search(self.DELTA_WARNING_RUN, caplog.text), \
'Unexpected footprint deltas logged.'
second_logs = caplog.records
caplog.clear()
clear_log_in_test()
# Third run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--compare-report', report_path] + \
['--show-footprint'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Since second run should use the same source as the third, we should compare them.
delta_logs = [
record.getMessage() for record in second_logs \
if self.RAM_KEY in record.getMessage()
]
assert all([log in caplog.text for log in delta_logs])
@pytest.mark.parametrize(
'old_ram_multiplier, expect_delta_log',
[
(0.75, True),
(1.00, False),
(1.25, True),
],
ids=['footprint increased', 'footprint constant', 'footprint reduced']
)
def test_all_deltas(self, caplog, out_path, old_ram_multiplier, expect_delta_log):
# First run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--enable-size-report'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
# Modify the older report so we can control the difference.
# Note: if footprint tests take too long, replace first run with a prepared twister.json
# That will increase test-to-code_under_test coupling, however.
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
for ts in j['testsuites']:
if 'reason' not in ts:
# We assume positive RAM usage.
ts[self.RAM_KEY] *= old_ram_multiplier
with open(os.path.join(out_path, 'twister.json'), 'w') as f:
f.write(json.dumps(j, indent=4))
report_path = os.path.join(
os.path.dirname(out_path),
f'{os.path.basename(out_path)}.1',
'twister.json'
)
# Second run
test_platforms = ['intel_adl_crb']
path = os.path.join(TEST_DATA, 'tests', 'dummy')
args = ['-i', '--outdir', out_path, '-T', path] + \
['--all-deltas'] + \
['--compare-report', report_path, '--show-footprint'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
assert self.FOOTPRINT_LOG in caplog.text
if expect_delta_log:
assert self.RAM_KEY in caplog.text
assert re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Expected footprint deltas not logged.'
else:
assert self.RAM_KEY not in caplog.text
assert not re.search(self.DELTA_WARNING_COMPARE, caplog.text), \
'Unexpected footprint deltas logged.'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_footprint.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,422 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions - simple does-error-out or not tests
"""
import importlib
import mock
import os
import pytest
import sys
import re
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
from twisterlib.error import TwisterRuntimeError
class TestError:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
os.path.join('scripts', 'tests', 'twister_blackbox', 'test_data', 'tests',
'dummy', 'agnostic', 'group1', 'subgroup1',
'dummy.agnostic.group1.subgroup1'),
SystemExit
),
(
None,
'dummy.agnostic.group1.subgroup1',
TwisterRuntimeError
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
'dummy.agnostic.group1.subgroup1',
SystemExit
)
]
TESTDATA_2 = [
(
'',
r'always_overflow.dummy SKIPPED \(RAM overflow\)'
),
(
'--overflow-as-errors',
r'always_overflow.dummy ERROR Build failure \(build\)'
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'testroot, test, expected_exception',
TESTDATA_1,
ids=['valid', 'invalid', 'valid']
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_test(self, out_path, testroot, test, expected_exception):
test_platforms = ['qemu_x86', 'intel_adl_crb']
args = []
if testroot:
args = ['-T', testroot]
args += ['-i', '--outdir', out_path, '--test', test, '-y'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(expected_exception) as exc:
self.loader.exec_module(self.twister_module)
if expected_exception == SystemExit:
assert str(exc.value) == '0'
assert True
@pytest.mark.parametrize(
'switch, expected',
TESTDATA_2,
ids=[
'overflow skip',
'overflow error',
],
)
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
def test_overflow_as_errors(self, capfd, out_path, switch, expected):
path = os.path.join(TEST_DATA, 'tests', 'qemu_overflow')
test_platforms = ['qemu_x86']
args = ['--outdir', out_path, '-T', path, '-vv'] + \
['--build-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
if switch:
args += [switch]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
print(args)
if switch:
assert str(sys_exit.value) == '1'
else:
assert str(sys_exit.value) == '0'
assert re.search(expected, err)
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_error.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 866 |
```yaml
- scenarios:
- dummy.agnostic.group1.subgroup1
comment: >
test all platforms
- platforms:
- intel_adl_crb
comment: >
test intel_adl_crb
- scenarios:
- dummy.agnostic.group1.subgroup2
platforms:
- qemu_x86_64
comment: >
test qemu_x86_64
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/twister-quarantine-list.yml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 84 |
```yaml
platforms:
override_default_platforms: false
increased_platform_scope: true
levels:
- name: smoke
description: >
A plan to be used verifying basic features
adds:
- dummy.agnostic.*
- name: acceptance
description: >
More coverage
adds:
- dummy.*
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/test_config.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 71 |
```yaml
tests:
no_filter.dummy:
tags: no_filter
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/no_filter/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 15 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/no_filter/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/no_filter/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```yaml
tests:
san.valgrind:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/val/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 55 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.