text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```objective-c
/*
*
*
* This file is based on dw1000_regs.h and dw1000_mac.c from
* path_to_url
* (d6b1414f1b4527abda7521a304baa1c648244108)
* The content was modified and restructured to meet the
* coding style and resolve namespace issues.
*
* This file is derived from material that is:
*
*
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing,
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* specific language governing permissions and limitations
*/
#ifndef ZEPHYR_INCLUDE_DW1000_REGS_H_
#define ZEPHYR_INCLUDE_DW1000_REGS_H_
/* Device ID register, includes revision info (0xDECA0130) */
#define DWT_DEV_ID_ID 0x00
#define DWT_DEV_ID_LEN 4
/* Revision */
#define DWT_DEV_ID_REV_MASK 0x0000000FUL
/* Version */
#define DWT_DEV_ID_VER_MASK 0x000000F0UL
/* The MODEL identifies the device. The DW1000 is device type 0x01 */
#define DWT_DEV_ID_MODEL_MASK 0x0000FF00UL
/* Register Identification Tag 0XDECA */
#define DWT_DEV_ID_RIDTAG_MASK 0xFFFF0000UL
/* IEEE Extended Unique Identifier (63:0) */
#define DWT_EUI_64_ID 0x01
#define DWT_EUI_64_OFFSET 0x00
#define DWT_EUI_64_LEN 8
/* PAN ID (31:16) and Short Address (15:0) */
#define DWT_PANADR_ID 0x03
#define DWT_PANADR_LEN 4
#define DWT_PANADR_SHORT_ADDR_OFFSET 0
/* Short Address */
#define DWT_PANADR_SHORT_ADDR_MASK 0x0000FFFFUL
#define DWT_PANADR_PAN_ID_OFFSET 2
/* PAN Identifier */
#define DWT_PANADR_PAN_ID_MASK 0xFFFF00F0UL
#define DWT_REG_05_ID_RESERVED 0x05
/* System Configuration (31:0) */
#define DWT_SYS_CFG_ID 0x04
#define DWT_SYS_CFG_LEN 4
/* Access mask to SYS_CFG_ID */
#define DWT_SYS_CFG_MASK 0xF047FFFFUL
/* Frame filtering options all frames allowed */
#define DWT_SYS_CFG_FF_ALL_EN 0x000001FEUL
/* Frame Filtering Enable. This bit enables the frame filtering functionality */
#define DWT_SYS_CFG_FFE 0x00000001UL
/* Frame Filtering Behave as a Co-ordinator */
#define DWT_SYS_CFG_FFBC 0x00000002UL
/* Frame Filtering Allow Beacon frame reception */
#define DWT_SYS_CFG_FFAB 0x00000004UL
/* Frame Filtering Allow Data frame reception */
#define DWT_SYS_CFG_FFAD 0x00000008UL
/* Frame Filtering Allow Acknowledgment frame reception */
#define DWT_SYS_CFG_FFAA 0x00000010UL
/* Frame Filtering Allow MAC command frame reception */
#define DWT_SYS_CFG_FFAM 0x00000020UL
/* Frame Filtering Allow Reserved frame types */
#define DWT_SYS_CFG_FFAR 0x00000040UL
/* Frame Filtering Allow frames with frame type field of 4, (binary 100) */
#define DWT_SYS_CFG_FFA4 0x00000080UL
/* Frame Filtering Allow frames with frame type field of 5, (binary 101) */
#define DWT_SYS_CFG_FFA5 0x00000100UL
/* Host interrupt polarity */
#define DWT_SYS_CFG_HIRQ_POL 0x00000200UL
/* SPI data launch edge */
#define DWT_SYS_CFG_SPI_EDGE 0x00000400UL
/* Disable frame check error handling */
#define DWT_SYS_CFG_DIS_FCE 0x00000800UL
/* Disable Double RX Buffer */
#define DWT_SYS_CFG_DIS_DRXB 0x00001000UL
/* Disable receiver abort on PHR error */
#define DWT_SYS_CFG_DIS_PHE 0x00002000UL
/* Disable Receiver Abort on RSD error */
#define DWT_SYS_CFG_DIS_RSDE 0x00004000UL
/* initial seed value for the FCS generation and checking function */
#define DWT_SYS_CFG_FCS_INIT2F 0x00008000UL
#define DWT_SYS_CFG_PHR_MODE_SHFT 16
/* Standard Frame mode */
#define DWT_SYS_CFG_PHR_MODE_00 0x00000000UL
/* Long Frames mode */
#define DWT_SYS_CFG_PHR_MODE_11 0x00030000UL
/* Disable Smart TX Power control */
#define DWT_SYS_CFG_DIS_STXP 0x00040000UL
/* Receiver Mode 110 kbps data rate */
#define DWT_SYS_CFG_RXM110K 0x00400000UL
/* Receive Wait Timeout Enable. */
#define DWT_SYS_CFG_RXWTOE 0x10000000UL
/*
* Receiver Auto-Re-enable.
* This bit is used to cause the receiver to re-enable automatically
*/
#define DWT_SYS_CFG_RXAUTR 0x20000000UL
/* Automatic Acknowledgement Enable */
#define DWT_SYS_CFG_AUTOACK 0x40000000UL
/* Automatic Acknowledgement Pending bit control */
#define DWT_SYS_CFG_AACKPEND 0x80000000UL
/* System Time Counter (40-bit) */
#define DWT_SYS_TIME_ID 0x06
#define DWT_SYS_TIME_OFFSET 0x00
/* Note 40 bit register */
#define DWT_SYS_TIME_LEN 5
#define DWT_REG_07_ID_RESERVED 0x07
/* Transmit Frame Control */
#define DWT_TX_FCTRL_ID 0x08
/* Note 40 bit register */
#define DWT_TX_FCTRL_LEN 5
/* Bit mask to access Transmit Frame Length */
#define DWT_TX_FCTRL_TFLEN_MASK 0x0000007FUL
/* Bit mask to access Transmit Frame Length Extension */
#define DWT_TX_FCTRL_TFLE_MASK 0x00000380UL
/* Bit mask to access Frame Length field */
#define DWT_TX_FCTRL_FLE_MASK 0x000003FFUL
/* Bit mask to access Transmit Bit Rate */
#define DWT_TX_FCTRL_TXBR_MASK 0x00006000UL
/* Bit mask to access Transmit Pulse Repetition Frequency */
#define DWT_TX_FCTRL_TXPRF_MASK 0x00030000UL
/* Bit mask to access Transmit Preamble Symbol Repetitions (PSR). */
#define DWT_TX_FCTRL_TXPSR_MASK 0x000C0000UL
/* Bit mask to access Preamble Extension */
#define DWT_TX_FCTRL_PE_MASK 0x00300000UL
/* Bit mask to access Transmit Preamble Symbol Repetitions (PSR). */
#define DWT_TX_FCTRL_TXPSR_PE_MASK 0x003C0000UL
/* FSCTRL has fields which should always be writen zero */
#define DWT_TX_FCTRL_SAFE_MASK_32 0xFFFFE3FFUL
/* Transmit Bit Rate = 110k */
#define DWT_TX_FCTRL_TXBR_110k 0x00000000UL
/* Transmit Bit Rate = 850k */
#define DWT_TX_FCTRL_TXBR_850k 0x00002000UL
/* Transmit Bit Rate = 6.8M */
#define DWT_TX_FCTRL_TXBR_6M 0x00004000UL
/* Shift to access Data Rate field */
#define DWT_TX_FCTRL_TXBR_SHFT 13
/* Transmit Ranging enable */
#define DWT_TX_FCTRL_TR 0x00008000UL
/* Shift to access Ranging bit */
#define DWT_TX_FCTRL_TR_SHFT 15
/* Shift to access Pulse Repetition Frequency field */
#define DWT_TX_FCTRL_TXPRF_SHFT 16
/* Transmit Pulse Repetition Frequency = 4 Mhz */
#define DWT_TX_FCTRL_TXPRF_4M 0x00000000UL
/* Transmit Pulse Repetition Frequency = 16 Mhz */
#define DWT_TX_FCTRL_TXPRF_16M 0x00010000UL
/* Transmit Pulse Repetition Frequency = 64 Mhz */
#define DWT_TX_FCTRL_TXPRF_64M 0x00020000UL
/* Shift to access Preamble Symbol Repetitions field */
#define DWT_TX_FCTRL_TXPSR_SHFT 18
/*
* shift to access Preamble length Extension to allow specification
* of non-standard values
*/
#define DWT_TX_FCTRL_PE_SHFT 20
/* Bit mask to access Preamble Extension = 16 */
#define DWT_TX_FCTRL_TXPSR_PE_16 0x00000000UL
/* Bit mask to access Preamble Extension = 64 */
#define DWT_TX_FCTRL_TXPSR_PE_64 0x00040000UL
/* Bit mask to access Preamble Extension = 128 */
#define DWT_TX_FCTRL_TXPSR_PE_128 0x00140000UL
/* Bit mask to access Preamble Extension = 256 */
#define DWT_TX_FCTRL_TXPSR_PE_256 0x00240000UL
/* Bit mask to access Preamble Extension = 512 */
#define DWT_TX_FCTRL_TXPSR_PE_512 0x00340000UL
/* Bit mask to access Preamble Extension = 1024 */
#define DWT_TX_FCTRL_TXPSR_PE_1024 0x00080000UL
/* Bit mask to access Preamble Extension = 1536 */
#define DWT_TX_FCTRL_TXPSR_PE_1536 0x00180000UL
/* Bit mask to access Preamble Extension = 2048 */
#define DWT_TX_FCTRL_TXPSR_PE_2048 0x00280000UL
/* Bit mask to access Preamble Extension = 4096 */
#define DWT_TX_FCTRL_TXPSR_PE_4096 0x000C0000UL
/* Shift to access transmit buffer index offset */
#define DWT_TX_FCTRL_TXBOFFS_SHFT 22
/* Bit mask to access Transmit buffer index offset 10-bit field */
#define DWT_TX_FCTRL_TXBOFFS_MASK 0xFFC00000UL
/* Bit mask to access Inter-Frame Spacing field */
#define DWT_TX_FCTRL_IFSDELAY_MASK 0xFF00000000ULL
/* Transmit Data Buffer */
#define DWT_TX_BUFFER_ID 0x09
#define DWT_TX_BUFFER_LEN 1024
/* Delayed Send or Receive Time (40-bit) */
#define DWT_DX_TIME_ID 0x0A
#define DWT_DX_TIME_LEN 5
#define DWT_REG_0B_ID_RESERVED 0x0B
/* Receive Frame Wait Timeout Period */
#define DWT_RX_FWTO_ID 0x0C
#define DWT_RX_FWTO_OFFSET 0x00
#define DWT_RX_FWTO_LEN 2
#define DWT_RX_FWTO_MASK 0xFFFF
/* System Control Register */
#define DWT_SYS_CTRL_ID 0x0D
#define DWT_SYS_CTRL_OFFSET 0x00
#define DWT_SYS_CTRL_LEN 4
/*
* System Control Register access mask
* (all unused fields should always be writen as zero)
*/
#define DWT_SYS_CTRL_MASK_32 0x010003CFUL
/* Suppress Auto-FCS Transmission (on this frame) */
#define DWT_SYS_CTRL_SFCST 0x00000001UL
/* Start Transmitting Now */
#define DWT_SYS_CTRL_TXSTRT 0x00000002UL
/* Transmitter Delayed Sending (initiates sending when SYS_TIME == TXD_TIME */
#define DWT_SYS_CTRL_TXDLYS 0x00000004UL
/* Cancel Suppression of auto-FCS transmission (on the current frame) */
#define DWT_SYS_CTRL_CANSFCS 0x00000008UL
/* Transceiver Off. Force Transciever OFF abort TX or RX immediately */
#define DWT_SYS_CTRL_TRXOFF 0x00000040UL
/* Wait for Response */
#define DWT_SYS_CTRL_WAIT4RESP 0x00000080UL
/* Enable Receiver Now */
#define DWT_SYS_CTRL_RXENAB 0x00000100UL
/*
* Receiver Delayed Enable
* (Enables Receiver when SY_TIME[0x??] == RXD_TIME[0x??] CHECK comment
*/
#define DWT_SYS_CTRL_RXDLYE 0x00000200UL
/*
* Host side receiver buffer pointer toggle - toggles 0/1
* host side data set pointer
*/
#define DWT_SYS_CTRL_HSRBTOGGLE 0x01000000UL
#define DWT_SYS_CTRL_HRBT DWT_SYS_CTRL_HSRBTOGGLE
#define DWT_SYS_CTRL_HRBT_OFFSET 3
/* System Event Mask Register */
#define DWT_SYS_MASK_ID 0x0E
#define DWT_SYS_MASK_LEN 4
/*
* System Event Mask Register access mask
* (all unused fields should always be writen as zero)
*/
#define DWT_SYS_MASK_MASK_32 0x3FF7FFFEUL
/* Mask clock PLL lock event */
#define DWT_SYS_MASK_MCPLOCK 0x00000002UL
/* Mask clock PLL lock event */
#define DWT_SYS_MASK_MESYNCR 0x00000004UL
/* Mask automatic acknowledge trigger event */
#define DWT_SYS_MASK_MAAT 0x00000008UL
/* Mask transmit frame begins event */
#define DWT_SYS_MASK_MTXFRB 0x00000010UL
/* Mask transmit preamble sent event */
#define DWT_SYS_MASK_MTXPRS 0x00000020UL
/* Mask transmit PHY Header Sent event */
#define DWT_SYS_MASK_MTXPHS 0x00000040UL
/* Mask transmit frame sent event */
#define DWT_SYS_MASK_MTXFRS 0x00000080UL
/* Mask receiver preamble detected event */
#define DWT_SYS_MASK_MRXPRD 0x00000100UL
/* Mask receiver SFD detected event */
#define DWT_SYS_MASK_MRXSFDD 0x00000200UL
/* Mask LDE processing done event */
#define DWT_SYS_MASK_MLDEDONE 0x00000400UL
/* Mask receiver PHY header detect event */
#define DWT_SYS_MASK_MRXPHD 0x00000800UL
/* Mask receiver PHY header error event */
#define DWT_SYS_MASK_MRXPHE 0x00001000UL
/* Mask receiver data frame ready event */
#define DWT_SYS_MASK_MRXDFR 0x00002000UL
/* Mask receiver FCS good event */
#define DWT_SYS_MASK_MRXFCG 0x00004000UL
/* Mask receiver FCS error event */
#define DWT_SYS_MASK_MRXFCE 0x00008000UL
/* Mask receiver Reed Solomon Frame Sync Loss event */
#define DWT_SYS_MASK_MRXRFSL 0x00010000UL
/* Mask Receive Frame Wait Timeout event */
#define DWT_SYS_MASK_MRXRFTO 0x00020000UL
/* Mask leading edge detection processing error event */
#define DWT_SYS_MASK_MLDEERR 0x00040000UL
/* Mask Receiver Overrun event */
#define DWT_SYS_MASK_MRXOVRR 0x00100000UL
/* Mask Preamble detection timeout event */
#define DWT_SYS_MASK_MRXPTO 0x00200000UL
/* Mask GPIO interrupt event */
#define DWT_SYS_MASK_MGPIOIRQ 0x00400000UL
/* Mask SLEEP to INIT event */
#define DWT_SYS_MASK_MSLP2INIT 0x00800000UL
/* Mask RF PLL Loosing Lock warning event */
#define DWT_SYS_MASK_MRFPLLLL 0x01000000UL
/* Mask Clock PLL Loosing Lock warning event */
#define DWT_SYS_MASK_MCPLLLL 0x02000000UL
/* Mask Receive SFD timeout event */
#define DWT_SYS_MASK_MRXSFDTO 0x04000000UL
/* Mask Half Period Delay Warning event */
#define DWT_SYS_MASK_MHPDWARN 0x08000000UL
/* Mask Transmit Buffer Error event */
#define DWT_SYS_MASK_MTXBERR 0x10000000UL
/* Mask Automatic Frame Filtering rejection event */
#define DWT_SYS_MASK_MAFFREJ 0x20000000UL
/* System event Status Register */
#define DWT_SYS_STATUS_ID 0x0F
#define DWT_SYS_STATUS_OFFSET 0x00
/* Note 40 bit register */
#define DWT_SYS_STATUS_LEN 5
/*
* System event Status Register access mask
* (all unused fields should always be writen as zero)
*/
#define DWT_SYS_STATUS_MASK_32 0xFFF7FFFFUL
/* Interrupt Request Status READ ONLY */
#define DWT_SYS_STATUS_IRQS 0x00000001UL
/* Clock PLL Lock */
#define DWT_SYS_STATUS_CPLOCK 0x00000002UL
/* External Sync Clock Reset */
#define DWT_SYS_STATUS_ESYNCR 0x00000004UL
/* Automatic Acknowledge Trigger */
#define DWT_SYS_STATUS_AAT 0x00000008UL
/* Transmit Frame Begins */
#define DWT_SYS_STATUS_TXFRB 0x00000010UL
/* Transmit Preamble Sent */
#define DWT_SYS_STATUS_TXPRS 0x00000020UL
/* Transmit PHY Header Sent */
#define DWT_SYS_STATUS_TXPHS 0x00000040UL
/*
* Transmit Frame Sent:
* This is set when the transmitter has completed the sending of a frame
*/
#define DWT_SYS_STATUS_TXFRS 0x00000080UL
/* Receiver Preamble Detected status */
#define DWT_SYS_STATUS_RXPRD 0x00000100UL
/* Receiver Start Frame Delimiter Detected. */
#define DWT_SYS_STATUS_RXSFDD 0x00000200UL
/* LDE processing done */
#define DWT_SYS_STATUS_LDEDONE 0x00000400UL
/* Receiver PHY Header Detect */
#define DWT_SYS_STATUS_RXPHD 0x00000800UL
/* Receiver PHY Header Error */
#define DWT_SYS_STATUS_RXPHE 0x00001000UL
/* Receiver Data Frame Ready */
#define DWT_SYS_STATUS_RXDFR 0x00002000UL
/* Receiver FCS Good */
#define DWT_SYS_STATUS_RXFCG 0x00004000UL
/* Receiver FCS Error */
#define DWT_SYS_STATUS_RXFCE 0x00008000UL
/* Receiver Reed Solomon Frame Sync Loss */
#define DWT_SYS_STATUS_RXRFSL 0x00010000UL
/* Receive Frame Wait Timeout */
#define DWT_SYS_STATUS_RXRFTO 0x00020000UL
/* Leading edge detection processing error */
#define DWT_SYS_STATUS_LDEERR 0x00040000UL
/* bit19 reserved */
#define DWT_SYS_STATUS_reserved 0x00080000UL
/* Receiver Overrun */
#define DWT_SYS_STATUS_RXOVRR 0x00100000UL
/* Preamble detection timeout */
#define DWT_SYS_STATUS_RXPTO 0x00200000UL
/* GPIO interrupt */
#define DWT_SYS_STATUS_GPIOIRQ 0x00400000UL
/* SLEEP to INIT */
#define DWT_SYS_STATUS_SLP2INIT 0x00800000UL
/* RF PLL Losing Lock */
#define DWT_SYS_STATUS_RFPLL_LL 0x01000000UL
/* Clock PLL Losing Lock */
#define DWT_SYS_STATUS_CLKPLL_LL 0x02000000UL
/* Receive SFD timeout */
#define DWT_SYS_STATUS_RXSFDTO 0x04000000UL
/* Half Period Delay Warning */
#define DWT_SYS_STATUS_HPDWARN 0x08000000UL
/* Transmit Buffer Error */
#define DWT_SYS_STATUS_TXBERR 0x10000000UL
/* Automatic Frame Filtering rejection */
#define DWT_SYS_STATUS_AFFREJ 0x20000000UL
/* Host Side Receive Buffer Pointer */
#define DWT_SYS_STATUS_HSRBP 0x40000000UL
/* IC side Receive Buffer Pointer READ ONLY */
#define DWT_SYS_STATUS_ICRBP 0x80000000UL
/* Receiver Reed-Solomon Correction Status */
#define DWT_SYS_STATUS_RXRSCS 0x0100000000ULL
/* Receiver Preamble Rejection */
#define DWT_SYS_STATUS_RXPREJ 0x0200000000ULL
/* Transmit power up time error */
#define DWT_SYS_STATUS_TXPUTE 0x0400000000ULL
/*
* These bits are the 16 high bits of status register TXPUTE and
* HPDWARN flags
*/
#define DWT_SYS_STATUS_TXERR (0x0408)
/* All RX events after a correct packet reception mask. */
#define DWT_SYS_STATUS_ALL_RX_GOOD (DWT_SYS_STATUS_RXDFR | \
DWT_SYS_STATUS_RXFCG | \
DWT_SYS_STATUS_RXPRD | \
DWT_SYS_STATUS_RXSFDD | \
DWT_SYS_STATUS_RXPHD)
/* All TX events mask. */
#define DWT_SYS_STATUS_ALL_TX (DWT_SYS_STATUS_AAT | \
DWT_SYS_STATUS_TXFRB | \
DWT_SYS_STATUS_TXPRS | \
DWT_SYS_STATUS_TXPHS | \
DWT_SYS_STATUS_TXFRS)
/* All double buffer events mask. */
#define DWT_SYS_STATUS_ALL_DBLBUFF (DWT_SYS_STATUS_RXDFR | \
DWT_SYS_STATUS_RXFCG)
/* All RX errors mask. */
#define DWT_SYS_STATUS_ALL_RX_ERR (DWT_SYS_STATUS_RXPHE | \
DWT_SYS_STATUS_RXFCE | \
DWT_SYS_STATUS_RXRFSL | \
DWT_SYS_STATUS_RXOVRR | \
DWT_SYS_STATUS_RXSFDTO | \
DWT_SYS_STATUS_AFFREJ)
#define DWT_SYS_MASK_ALL_RX_ERR (DWT_SYS_MASK_MRXPHE | \
DWT_SYS_MASK_MRXFCE | \
DWT_SYS_MASK_MRXRFSL | \
DWT_SYS_STATUS_RXOVRR | \
DWT_SYS_MASK_MRXSFDTO | \
DWT_SYS_MASK_MAFFREJ)
/*
* User defined RX timeouts
* (frame wait timeout and preamble detect timeout) mask.
*/
#define DWT_SYS_STATUS_ALL_RX_TO (DWT_SYS_STATUS_RXRFTO | \
DWT_SYS_STATUS_RXPTO)
#define DWT_SYS_MASK_ALL_RX_TO (DWT_SYS_MASK_MRXRFTO | \
DWT_SYS_MASK_MRXPTO)
/* RX Frame Information (in double buffer set) */
#define DWT_RX_FINFO_ID 0x10
#define DWT_RX_FINFO_OFFSET 0x00
#define DWT_RX_FINFO_LEN 4
/*
* System event Status Register access mask
* (all unused fields should always be writen as zero)
*/
#define DWT_RX_FINFO_MASK_32 0xFFFFFBFFUL
/* Receive Frame Length (0 to 127) */
#define DWT_RX_FINFO_RXFLEN_MASK 0x0000007FUL
/* Receive Frame Length Extension (0 to 7)<<7 */
#define DWT_RX_FINFO_RXFLE_MASK 0x00000380UL
/* Receive Frame Length Extension (0 to 1023) */
#define DWT_RX_FINFO_RXFL_MASK_1023 0x000003FFUL
/* Receive Non-Standard Preamble Length */
#define DWT_RX_FINFO_RXNSPL_MASK 0x00001800UL
/*
* RX Preamble Repetition.
* 00 = 16 symbols, 01 = 64 symbols, 10 = 1024 symbols, 11 = 4096 symbols
*/
#define DWT_RX_FINFO_RXPSR_MASK 0x000C0000UL
/* Receive Preamble Length = RXPSR+RXNSPL */
#define DWT_RX_FINFO_RXPEL_MASK 0x000C1800UL
/* Receive Preamble length = 64 */
#define DWT_RX_FINFO_RXPEL_64 0x00040000UL
/* Receive Preamble length = 128 */
#define DWT_RX_FINFO_RXPEL_128 0x00040800UL
/* Receive Preamble length = 256 */
#define DWT_RX_FINFO_RXPEL_256 0x00041000UL
/* Receive Preamble length = 512 */
#define DWT_RX_FINFO_RXPEL_512 0x00041800UL
/* Receive Preamble length = 1024 */
#define DWT_RX_FINFO_RXPEL_1024 0x00080000UL
/* Receive Preamble length = 1536 */
#define DWT_RX_FINFO_RXPEL_1536 0x00080800UL
/* Receive Preamble length = 2048 */
#define DWT_RX_FINFO_RXPEL_2048 0x00081000UL
/* Receive Preamble length = 4096 */
#define DWT_RX_FINFO_RXPEL_4096 0x000C0000UL
/* Receive Bit Rate report. This field reports the received bit rate */
#define DWT_RX_FINFO_RXBR_MASK 0x00006000UL
/* Received bit rate = 110 kbps */
#define DWT_RX_FINFO_RXBR_110k 0x00000000UL
/* Received bit rate = 850 kbps */
#define DWT_RX_FINFO_RXBR_850k 0x00002000UL
/* Received bit rate = 6.8 Mbps */
#define DWT_RX_FINFO_RXBR_6M 0x00004000UL
#define DWT_RX_FINFO_RXBR_SHIFT 13
/*
* Receiver Ranging. Ranging bit in the received PHY header
* identifying the frame as a ranging packet.
*/
#define DWT_RX_FINFO_RNG 0x00008000UL
#define DWT_RX_FINFO_RNG_SHIFT 15
/* RX Pulse Repetition Rate report */
#define DWT_RX_FINFO_RXPRF_MASK 0x00030000UL
/* PRF being employed in the receiver = 16M */
#define DWT_RX_FINFO_RXPRF_16M 0x00010000UL
/* PRF being employed in the receiver = 64M */
#define DWT_RX_FINFO_RXPRF_64M 0x00020000UL
#define DWT_RX_FINFO_RXPRF_SHIFT 16
/* Preamble Accumulation Count */
#define DWT_RX_FINFO_RXPACC_MASK 0xFFF00000UL
#define DWT_RX_FINFO_RXPACC_SHIFT 20
/* Receive Data Buffer (in double buffer set) */
#define DWT_RX_BUFFER_ID 0x11
#define DWT_RX_BUFFER_LEN 1024
/* Rx Frame Quality information (in double buffer set) */
#define DWT_RX_FQUAL_ID 0x12
/* Note 64 bit register */
#define DWT_RX_FQUAL_LEN 8
/* Standard Deviation of Noise */
#define DWT_RX_EQUAL_STD_NOISE_MASK 0x0000FFFFULL
#define DWT_RX_EQUAL_STD_NOISE_SHIFT 0
#define DWT_STD_NOISE_MASK DWT_RX_EQUAL_STD_NOISE_MASK
#define DWT_STD_NOISE_SHIFT DWT_RX_EQUAL_STD_NOISE_SHIFT
/* First Path Amplitude point 2 */
#define DWT_RX_EQUAL_FP_AMPL2_MASK 0xFFFF0000ULL
#define DWT_RX_EQUAL_FP_AMPL2_SHIFT 16
#define DWT_FP_AMPL2_MASK DWT_RX_EQUAL_FP_AMPL2_MASK
#define DWT_FP_AMPL2_SHIFT DWT_RX_EQUAL_FP_AMPL2_SHIFT
/* First Path Amplitude point 3 */
#define DWT_RX_EQUAL_PP_AMPL3_MASK 0x0000FFFF00000000ULL
#define DWT_RX_EQUAL_PP_AMPL3_SHIFT 32
#define DWT_PP_AMPL3_MASK DWT_RX_EQUAL_PP_AMPL3_MASK
#define DWT_PP_AMPL3_SHIFT DWT_RX_EQUAL_PP_AMPL3_SHIFT
/* Channel Impulse Response Max Growth */
#define DWT_RX_EQUAL_CIR_MXG_MASK 0xFFFF000000000000ULL
#define DWT_RX_EQUAL_CIR_MXG_SHIFT 48
#define DWT_CIR_MXG_MASK DWT_RX_EQUAL_CIR_MXG_MASK
#define DWT_CIR_MXG_SHIFT DWT_RX_EQUAL_CIR_MXG_SHIFT
/* Receiver Time Tracking Interval (in double buffer set) */
#define DWT_RX_TTCKI_ID 0x13
#define DWT_RX_TTCKI_LEN 4
/* Receiver Time Tracking Offset (in double buffer set) */
#define DWT_RX_TTCKO_ID 0x14
/* Note 40 bit register */
#define DWT_RX_TTCKO_LEN 5
/*
* Receiver Time Tracking Offset access mask
* (all unused fields should always be writen as zero)
*/
#define DWT_RX_TTCKO_MASK_32 0xFF07FFFFUL
/* RX time tracking offset. This RXTOFS value is a 19-bit signed quantity */
#define DWT_RX_TTCKO_RXTOFS_MASK 0x0007FFFFUL
/* This 8-bit field reports an internal re-sampler delay value */
#define DWT_RX_TTCKO_RSMPDEL_MASK 0xFF000000UL
/*
* This 7-bit field reports the receive carrier phase adjustment
* at time the ranging timestamp is made.
*/
#define DWT_RX_TTCKO_RCPHASE_MASK 0x7F0000000000ULL
/* Receive Message Time of Arrival (in double buffer set) */
#define DWT_RX_TIME_ID 0x15
#define DWT_RX_TIME_LLEN 14
/* read only 5 bytes (the adjusted timestamp (40:0)) */
#define DWT_RX_TIME_RX_STAMP_LEN 5
#define DWT_RX_STAMP_LEN DWT_RX_TIME_RX_STAMP_LEN
/* byte 0..4 40 bit Reports the fully adjusted time of reception. */
#define DWT_RX_TIME_RX_STAMP_OFFSET 0
/* byte 5..6 16 bit First path index. */
#define DWT_RX_TIME_FP_INDEX_OFFSET 5
/* byte 7..8 16 bit First Path Amplitude point 1 */
#define DWT_RX_TIME_FP_AMPL1_OFFSET 7
/* byte 9..13 40 bit Raw Timestamp for the frame */
#define DWT_RX_TIME_FP_RAWST_OFFSET 9
#define DWT_REG_16_ID_RESERVED 0x16
/* Transmit Message Time of Sending */
#define DWT_TX_TIME_ID 0x17
#define DWT_TX_TIME_LLEN 10
/* 40-bits = 5 bytes */
#define DWT_TX_TIME_TX_STAMP_LEN 5
#define DWT_TX_STAMP_LEN DWT_TX_TIME_TX_STAMP_LEN
/* byte 0..4 40 bit Reports the fully adjusted time of transmission */
#define DWT_TX_TIME_TX_STAMP_OFFSET 0
/* byte 5..9 40 bit Raw Timestamp for the frame */
#define DWT_TX_TIME_TX_RAWST_OFFSET 5
/* 16-bit Delay from Transmit to Antenna */
#define DWT_TX_ANTD_ID 0x18
#define DWT_TX_ANTD_OFFSET 0x00
#define DWT_TX_ANTD_LEN 2
/* System State information READ ONLY */
#define DWT_SYS_STATE_ID 0x19
#define DWT_SYS_STATE_LEN 5
/* 7:0 TX _STATE Bits 3:0 */
#define DWT_TX_STATE_OFFSET 0x00
#define DWT_TX_STATE_MASK 0x07
#define DWT_TX_STATE_IDLE 0x00
#define DWT_TX_STATE_PREAMBLE 0x01
#define DWT_TX_STATE_SFD 0x02
#define DWT_TX_STATE_PHR 0x03
#define DWT_TX_STATE_SDE 0x04
#define DWT_TX_STATE_DATA 0x05
#define DWT_TX_STATE_RSP_DATE 0x06
#define DWT_TX_STATE_TAIL 0x07
#define DWT_RX_STATE_OFFSET 0x01
#define DWT_RX_STATE_IDLE 0x00
#define DWT_RX_STATE_START_ANALOG 0x01
#define DWT_RX_STATE_RX_RDY 0x04
#define DWT_RX_STATE_PREAMBLE_FOUND 0x05
#define DWT_RX_STATE_PRMBL_TIMEOUT 0x06
#define DWT_RX_STATE_SFD_FOUND 0x07
#define DWT_RX_STATE_CNFG_PHR_RX 0x08
#define DWT_RX_STATE_PHR_RX_STRT 0x09
#define DWT_RX_STATE_DATA_RATE_RDY 0x0A
#define DWT_RX_STATE_DATA_RX_SEQ 0x0C
#define DWT_RX_STATE_CNFG_DATA_RX 0x0D
#define DWT_RX_STATE_PHR_NOT_OK 0x0E
#define DWT_RX_STATE_LAST_SYMBOL 0x0F
#define DWT_RX_STATE_WAIT_RSD_DONE 0x10
#define DWT_RX_STATE_RSD_OK 0x11
#define DWT_RX_STATE_RSD_NOT_OK 0x12
#define DWT_RX_STATE_RECONFIG_110 0x13
#define DWT_RX_STATE_WAIT_110_PHR 0x14
#define DWT_PMSC_STATE_OFFSET 0x02
#define DWT_PMSC_STATE_INIT 0x00
#define DWT_PMSC_STATE_IDLE 0x01
#define DWT_PMSC_STATE_TX_WAIT 0x02
#define DWT_PMSC_STATE_RX_WAIT 0x03
#define DWT_PMSC_STATE_TX 0x04
#define DWT_PMSC_STATE_RX 0x05
/*
* Acknowledge (31:24 preamble symbol delay before auto ACK is sent) and
* response (19:0 - unit 1us) timer
*/
/* Acknowledgement Time and Response Time */
#define DWT_ACK_RESP_T_ID 0x1A
#define DWT_ACK_RESP_T_LEN 4
/* Acknowledgement Time and Response access mask */
#define DWT_ACK_RESP_T_MASK 0xFF0FFFFFUL
#define DWT_ACK_RESP_T_W4R_TIM_OFFSET 0
/* Wait-for-Response turn-around Time 20 bit field */
#define DWT_ACK_RESP_T_W4R_TIM_MASK 0x000FFFFFUL
#define DWT_W4R_TIM_MASK DWT_ACK_RESP_T_W4R_TIM_MASK
#define DWT_ACK_RESP_T_ACK_TIM_OFFSET 3
/* Auto-Acknowledgement turn-around Time */
#define DWT_ACK_RESP_T_ACK_TIM_MASK 0xFF000000UL
#define DWT_ACK_TIM_MASK DWT_ACK_RESP_T_ACK_TIM_MASK
#define DWT_REG_1B_ID_RESERVED 0x1B
#define DWT_REG_1C_ID_RESERVED 0x1C
/* Sniff Mode Configuration */
#define DWT_RX_SNIFF_ID 0x1D
#define DWT_RX_SNIFF_OFFSET 0x00
#define DWT_RX_SNIFF_LEN 4
#define DWT_RX_SNIFF_MASK 0x0000FF0FUL
/* SNIFF Mode ON time. Specified in units of PAC */
#define DWT_RX_SNIFF_SNIFF_ONT_MASK 0x0000000FUL
#define DWT_SNIFF_ONT_MASK DWT_RX_SNIFF_SNIFF_ONT_MASK
/*
* SNIFF Mode OFF time specified in units of approximately 1mkS,
* or 128 system clock cycles.
*/
#define DWT_RX_SNIFF_SNIFF_OFFT_MASK 0x0000FF00UL
#define DWT_SNIFF_OFFT_MASK DWT_RX_SNIFF_SNIFF_OFFT_MASK
/* TX Power Control */
#define DWT_TX_POWER_ID 0x1E
#define DWT_TX_POWER_LEN 4
/*
* Mask and shift definition for Smart Transmit Power Control:
*
* This is the normal power setting used for frames that do not fall.
*/
#define DWT_TX_POWER_BOOSTNORM_MASK 0x00000000UL
#define DWT_BOOSTNORM_MASK DWT_TX_POWER_BOOSTNORM_MASK
#define DWT_TX_POWER_BOOSTNORM_SHIFT 0
/*
* This value sets the power applied during transmission
* at the 6.8 Mbps data rate frames that are less than 0.5 ms duration
*/
#define DWT_TX_POWER_BOOSTP500_MASK 0x00000000UL
#define DWT_BOOSTP500_MASK DWT_TX_POWER_BOOSTP500_MASK
#define DWT_TX_POWER_BOOSTP500_SHIFT 8
/*
* This value sets the power applied during transmission
* at the 6.8 Mbps data rate frames that are less than 0.25 ms duration
*/
#define DWT_TX_POWER_BOOSTP250_MASK 0x00000000UL
#define DWT_BOOSTP250_MASK DWT_TX_POWER_BOOSTP250_MASK
#define DWT_TX_POWER_BOOSTP250_SHIFT 16
/*
* This value sets the power applied during transmission
* at the 6.8 Mbps data rate frames that are less than 0.125 ms
*/
#define DWT_TX_POWER_BOOSTP125_MASK 0x00000000UL
#define DWT_BOOSTP125_MASK DWT_TX_POWER_BOOSTP125_MASK
#define DWT_TX_POWER_BOOSTP125_SHIFT 24
/*
* Mask and shift definition for Manual Transmit Power Control
* (DIS_STXP=1 in SYS_CFG)
*/
#define DWT_TX_POWER_MAN_DEFAULT 0x0E080222UL
/*
* This power setting is applied during the transmission
* of the PHY header (PHR) portion of the frame.
*/
#define DWT_TX_POWER_TXPOWPHR_MASK 0x0000FF00UL
/*
* This power setting is applied during the transmission
* of the synchronisation header (SHR) and data portions of the frame.
*/
#define DWT_TX_POWER_TXPOWSD_MASK 0x00FF0000UL
/* Channel Control */
#define DWT_CHAN_CTRL_ID 0x1F
#define DWT_CHAN_CTRL_LEN 4
/* Channel Control Register access mask */
#define DWT_CHAN_CTRL_MASK 0xFFFF00FFUL
/* Supported channels are 1, 2, 3, 4, 5, and 7. */
#define DWT_CHAN_CTRL_TX_CHAN_MASK 0x0000000FUL
/* Bits 0..3 TX channel number 0-15 selection */
#define DWT_CHAN_CTRL_TX_CHAN_SHIFT 0
#define DWT_CHAN_CTRL_RX_CHAN_MASK 0x000000F0UL
/* Bits 4..7 RX channel number 0-15 selection */
#define DWT_CHAN_CTRL_RX_CHAN_SHIFT 4
/*
* Bits 18..19 Specify (Force) RX Pulse Repetition Rate:
* 00 = 4 MHz, 01 = 16 MHz, 10 = 64MHz.
*/
#define DWT_CHAN_CTRL_RXFPRF_MASK 0x000C0000UL
#define DWT_CHAN_CTRL_RXFPRF_SHIFT 18
/*
* Specific RXFPRF configuration:
*
* Specify (Force) RX Pulse Repetition Rate:
* 00 = 4 MHz, 01 = 16 MHz, 10 = 64MHz.
*/
#define DWT_CHAN_CTRL_RXFPRF_4 0x00000000UL
/*
* Specify (Force) RX Pulse Repetition Rate:
* 00 = 4 MHz, 01 = 16 MHz, 10 = 64MHz.
*/
#define DWT_CHAN_CTRL_RXFPRF_16 0x00040000UL
/*
* Specify (Force) RX Pulse Repetition Rate:
* 00 = 4 MHz, 01 = 16 MHz, 10 = 64MHz.
*/
#define DWT_CHAN_CTRL_RXFPRF_64 0x00080000UL
/* Bits 22..26 TX Preamble Code selection, 1 to 24. */
#define DWT_CHAN_CTRL_TX_PCOD_MASK 0x07C00000UL
#define DWT_CHAN_CTRL_TX_PCOD_SHIFT 22
/* Bits 27..31 RX Preamble Code selection, 1 to 24. */
#define DWT_CHAN_CTRL_RX_PCOD_MASK 0xF8000000UL
#define DWT_CHAN_CTRL_RX_PCOD_SHIFT 27
/* Bit 17 This bit enables a non-standard DecaWave proprietary SFD sequence. */
#define DWT_CHAN_CTRL_DWSFD 0x00020000UL
#define DWT_CHAN_CTRL_DWSFD_SHIFT 17
/* Bit 20 Non-standard SFD in the transmitter */
#define DWT_CHAN_CTRL_TNSSFD 0x00100000UL
#define DWT_CHAN_CTRL_TNSSFD_SHIFT 20
/* Bit 21 Non-standard SFD in the receiver */
#define DWT_CHAN_CTRL_RNSSFD 0x00200000UL
#define DWT_CHAN_CTRL_RNSSFD_SHIFT 21
#define DWT_REG_20_ID_RESERVED 0x20
/* User-specified short/long TX/RX SFD sequences */
#define DWT_USR_SFD_ID 0x21
#define DWT_USR_SFD_LEN 41
/* Decawave non-standard SFD length for 110 kbps */
#define DWT_DW_NS_SFD_LEN_110K 64
/* Decawave non-standard SFD length for 850 kbps */
#define DWT_DW_NS_SFD_LEN_850K 16
/* Decawave non-standard SFD length for 6.8 Mbps */
#define DWT_DW_NS_SFD_LEN_6M8 8
#define DWT_REG_22_ID_RESERVED 0x22
/* Automatic Gain Control configuration */
#define DWT_AGC_CTRL_ID 0x23
#define DWT_AGC_CTRL_LEN 32
#define DWT_AGC_CFG_STS_ID DWT_AGC_CTRL_ID
#define DWT_AGC_CTRL1_OFFSET (0x02)
#define DWT_AGC_CTRL1_LEN 2
/* Access mask to AGC configuration and control register */
#define DWT_AGC_CTRL1_MASK 0x0001
/* Disable AGC Measurement. The DIS_AM bit is set by default. */
#define DWT_AGC_CTRL1_DIS_AM 0x0001
/*
* Offset from AGC_CTRL_ID in bytes.
* Please take care not to write other values to this register as doing so
* may cause the DW1000 to malfunction
*/
#define DWT_AGC_TUNE1_OFFSET (0x04)
#define DWT_AGC_TUNE1_LEN 2
/* It is a 16-bit tuning register for the AGC. */
#define DWT_AGC_TUNE1_MASK 0xFFFF
#define DWT_AGC_TUNE1_16M 0x8870
#define DWT_AGC_TUNE1_64M 0x889B
/*
* Offset from AGC_CTRL_ID in bytes.
* Please take care not to write other values to this register as doing so
* may cause the DW1000 to malfunction
*/
#define DWT_AGC_TUNE2_OFFSET (0x0C)
#define DWT_AGC_TUNE2_LEN 4
#define DWT_AGC_TUNE2_MASK 0xFFFFFFFFUL
#define DWT_AGC_TUNE2_VAL 0X2502A907UL
/*
* Offset from AGC_CTRL_ID in bytes.
* Please take care not to write other values to this register as doing so
* may cause the DW1000 to malfunction
*/
#define DWT_AGC_TUNE3_LEN 2
#define DWT_AGC_TUNE3_MASK 0xFFFF
#define DWT_AGC_TUNE3_VAL 0X0055
#define DWT_AGC_STAT1_OFFSET (0x1E)
#define DWT_AGC_STAT1_LEN 3
#define DWT_AGC_STAT1_MASK 0x0FFFFF
/* This 5-bit gain value relates to input noise power measurement. */
#define DWT_AGC_STAT1_EDG1_MASK 0x0007C0
/* This 9-bit value relates to the input noise power measurement. */
#define DWT_AGC_STAT1_EDG2_MASK 0x0FF800
/* External synchronisation control */
#define DWT_EXT_SYNC_ID 0x24
#define DWT_EXT_SYNC_LEN 12
#define DWT_EC_CTRL_OFFSET (0x00)
#define DWT_EC_CTRL_LEN 4
/*
* Sub-register 0x00 is the External clock synchronisation counter
* configuration register
*/
#define DWT_EC_CTRL_MASK 0x00000FFBUL
/* External transmit synchronisation mode enable */
#define DWT_EC_CTRL_OSTSM 0x00000001UL
/* External receive synchronisation mode enable */
#define DWT_EC_CTRL_OSRSM 0x00000002UL
/* PLL lock detect enable */
#define DWT_EC_CTRL_PLLLCK 0x04
/* External timebase reset mode enable */
#define DWT_EC_CTRL_OSTRM 0x00000800UL
/*
* Wait counter used for external transmit synchronisation and
* external timebase reset
*/
#define DWT_EC_CTRL_WAIT_MASK 0x000007F8UL
#define DWT_EC_RXTC_OFFSET (0x04)
#define DWT_EC_RXTC_LEN 4
/* External clock synchronisation counter captured on RMARKER */
#define DWT_EC_RXTC_MASK 0xFFFFFFFFUL
#define DWT_EC_GOLP (0x08)
#define DWT_EC_GOLP_LEN 4
/*
* Sub-register 0x08 is the External clock offset to first path 1 GHz counter,
* EC_GOLP
*/
#define DWT_EC_GOLP_MASK 0x0000003FUL
/*
* This register contains the 1 GHz count from the arrival of the RMARKER and
* the next edge of the external clock.
*/
#define DWT_EC_GOLP_OFFSET_EXT_MASK 0x0000003FUL
/* Read access to accumulator data */
#define DWT_ACC_MEM_ID 0x25
#define DWT_ACC_MEM_LEN 4064
/* Peripheral register bus 1 access - GPIO control */
#define DWT_GPIO_CTRL_ID 0x26
#define DWT_GPIO_CTRL_LEN 44
/* Sub-register 0x00 is the GPIO Mode Control Register */
#define DWT_GPIO_MODE_OFFSET 0x00
#define DWT_GPIO_MODE_LEN 4
#define DWT_GPIO_MODE_MASK 0x00FFFFC0UL
/* Mode Selection for GPIO0/RXOKLED */
#define DWT_GPIO_MSGP0_MASK 0x000000C0UL
/* Mode Selection for GPIO1/SFDLED */
#define DWT_GPIO_MSGP1_MASK 0x00000300UL
/* Mode Selection for GPIO2/RXLED */
#define DWT_GPIO_MSGP2_MASK 0x00000C00UL
/* Mode Selection for GPIO3/TXLED */
#define DWT_GPIO_MSGP3_MASK 0x00003000UL
/* Mode Selection for GPIO4/EXTPA */
#define DWT_GPIO_MSGP4_MASK 0x0000C000UL
/* Mode Selection for GPIO5/EXTTXE */
#define DWT_GPIO_MSGP5_MASK 0x00030000UL
/* Mode Selection for GPIO6/EXTRXE */
#define DWT_GPIO_MSGP6_MASK 0x000C0000UL
/* Mode Selection for SYNC/GPIO7 */
#define DWT_GPIO_MSGP7_MASK 0x00300000UL
/* Mode Selection for IRQ/GPIO8 */
#define DWT_GPIO_MSGP8_MASK 0x00C00000UL
/* The pin operates as the RXLED output */
#define DWT_GPIO_PIN2_RXLED 0x00000400UL
/* The pin operates as the TXLED output */
#define DWT_GPIO_PIN3_TXLED 0x00001000UL
/* The pin operates as the EXTPA output */
#define DWT_GPIO_PIN4_EXTPA 0x00004000UL
/* The pin operates as the EXTTXE output */
#define DWT_GPIO_PIN5_EXTTXE 0x00010000UL
/* The pin operates as the EXTRXE output */
#define DWT_GPIO_PIN6_EXTRXE 0x00040000UL
/* Sub-register 0x08 is the GPIO Direction Control Register */
#define DWT_GPIO_DIR_OFFSET 0x08
#define DWT_GPIO_DIR_LEN 3
#define DWT_GPIO_DIR_MASK 0x0011FFFFUL
/*
* GPIO0 only changed if the GxM0 mask bit has a value of 1
* for the write operation
*/
#define DWT_GxP0 0x00000001UL
/* GPIO1. (See GDP0). */
#define DWT_GxP1 0x00000002UL
/* GPIO2. (See GDP0). */
#define DWT_GxP2 0x00000004UL
/* GPIO3. (See GDP0). */
#define DWT_GxP3 0x00000008UL
/* GPIO4. (See GDP0). */
#define DWT_GxP4 0x00000100UL
/* GPIO5. (See GDP0). */
#define DWT_GxP5 0x00000200UL
/* GPIO6. (See GDP0). */
#define DWT_GxP6 0x00000400UL
/* GPIO7. (See GDP0). */
#define DWT_GxP7 0x00000800UL
/* GPIO8 */
#define DWT_GxP8 0x00010000UL
/* Mask for GPIO0 */
#define DWT_GxM0 0x00000010UL
/* Mask for GPIO1. (See GDM0). */
#define DWT_GxM1 0x00000020UL
/* Mask for GPIO2. (See GDM0). */
#define DWT_GxM2 0x00000040UL
/* Mask for GPIO3. (See GDM0). */
#define DWT_GxM3 0x00000080UL
/* Mask for GPIO4. (See GDM0). */
#define DWT_GxM4 0x00001000UL
/* Mask for GPIO5. (See GDM0). */
#define DWT_GxM5 0x00002000UL
/* Mask for GPIO6. (See GDM0). */
#define DWT_GxM6 0x00004000UL
/* Mask for GPIO7. (See GDM0). */
#define DWT_GxM7 0x00008000UL
/* Mask for GPIO8. (See GDM0). */
#define DWT_GxM8 0x00100000UL
/*
* Direction Selection for GPIO0. 1 = input, 0 = output.
* Only changed if the GDM0 mask bit has a value of 1 for the write operation
*/
#define DWT_GDP0 GxP0
/* Direction Selection for GPIO1. (See GDP0). */
#define DWT_GDP1 GxP1
/* Direction Selection for GPIO2. (See GDP0). */
#define DWT_GDP2 GxP2
/* Direction Selection for GPIO3. (See GDP0). */
#define DWT_GDP3 GxP3
/* Direction Selection for GPIO4. (See GDP0). */
#define DWT_GDP4 GxP4
/* Direction Selection for GPIO5. (See GDP0). */
#define DWT_GDP5 GxP5
/* Direction Selection for GPIO6. (See GDP0). */
#define DWT_GDP6 GxP6
/* Direction Selection for GPIO7. (See GDP0). */
#define DWT_GDP7 GxP7
/* Direction Selection for GPIO8 */
#define DWT_GDP8 GxP8
/* Mask for setting the direction of GPIO0 */
#define DWT_GDM0 GxM0
/* Mask for setting the direction of GPIO1. (See GDM0). */
#define DWT_GDM1 GxM1
/* Mask for setting the direction of GPIO2. (See GDM0). */
#define DWT_GDM2 GxM2
/* Mask for setting the direction of GPIO3. (See GDM0). */
#define DWT_GDM3 GxM3
/* Mask for setting the direction of GPIO4. (See GDM0). */
#define DWT_GDM4 GxM4
/* Mask for setting the direction of GPIO5. (See GDM0). */
#define DWT_GDM5 GxM5
/* Mask for setting the direction of GPIO6. (See GDM0). */
#define DWT_GDM6 GxM6
/* Mask for setting the direction of GPIO7. (See GDM0). */
#define DWT_GDM7 GxM7
/* Mask for setting the direction of GPIO8. (See GDM0). */
#define DWT_GDM8 GxM8
/* Sub-register 0x0C is the GPIO data output register. */
#define DWT_GPIO_DOUT_OFFSET 0x0C
#define DWT_GPIO_DOUT_LEN 3
#define DWT_GPIO_DOUT_MASK DWT_GPIO_DIR_MASK
/* Sub-register 0x10 is the GPIO interrupt enable register */
#define DWT_GPIO_IRQE_OFFSET 0x10
#define DWT_GPIO_IRQE_LEN 4
#define DWT_GPIO_IRQE_MASK 0x000001FFUL
/* IRQ bit0 */
#define DWT_GIRQx0 0x00000001UL
/* IRQ bit1 */
#define DWT_GIRQx1 0x00000002UL
/* IRQ bit2 */
#define DWT_GIRQx2 0x00000004UL
/* IRQ bit3 */
#define DWT_GIRQx3 0x00000008UL
/* IRQ bit4 */
#define DWT_GIRQx4 0x00000010UL
/* IRQ bit5 */
#define DWT_GIRQx5 0x00000020UL
/* IRQ bit6 */
#define DWT_GIRQx6 0x00000040UL
/* IRQ bit7 */
#define DWT_GIRQx7 0x00000080UL
/* IRQ bit8 */
#define DWT_GIRQx8 0x00000100UL
/* GPIO IRQ Enable for GPIO0 input. Value 1 = enable, 0 = disable */
#define DWT_GIRQE0 GIRQx0
#define DWT_GIRQE1 GIRQx1
#define DWT_GIRQE2 GIRQx2
#define DWT_GIRQE3 GIRQx3
#define DWT_GIRQE4 GIRQx4
#define DWT_GIRQE5 GIRQx5
#define DWT_GIRQE6 GIRQx6
#define DWT_GIRQE7 GIRQx7
#define DWT_GIRQE8 GIRQx8
/* Sub-register 0x14 is the GPIO interrupt sense selection register */
#define DWT_GPIO_ISEN_OFFSET 0x14
#define DWT_GPIO_ISEN_LEN 4
#define DWT_GPIO_ISEN_MASK DWT_GPIO_IRQE_MASK
/* GPIO IRQ Sense selection GPIO0 input.
* Value 0 = High or Rising-Edge,
* 1 = Low or falling-edge.
*/
#define DWT_GISEN0 GIRQx0
#define DWT_GISEN1 GIRQx1
#define DWT_GISEN2 GIRQx2
#define DWT_GISEN3 GIRQx3
#define DWT_GISEN4 GIRQx4
#define DWT_GISEN5 GIRQx5
#define DWT_GISEN6 GIRQx6
#define DWT_GISEN7 GIRQx7
#define DWT_GISEN8 GIRQx8
/* Sub-register 0x18 is the GPIO interrupt mode selection register */
#define DWT_GPIO_IMODE_OFFSET 0x18
#define DWT_GPIO_IMODE_LEN 4
#define DWT_GPIO_IMODE_MASK DWT_GPIO_IRQE_MASK
/* GPIO IRQ Mode selection for GPIO0 input.
* Value 0 = Level sensitive interrupt.
* Value 1 = Edge triggered interrupt
*/
#define DWT_GIMOD0 GIRQx0
#define DWT_GIMOD1 GIRQx1
#define DWT_GIMOD2 GIRQx2
#define DWT_GIMOD3 GIRQx3
#define DWT_GIMOD4 GIRQx4
#define DWT_GIMOD5 GIRQx5
#define DWT_GIMOD6 GIRQx6
#define DWT_GIMOD7 GIRQx7
#define DWT_GIMOD8 GIRQx8
/* Sub-register 0x1C is the GPIO interrupt "Both Edge" selection register */
#define DWT_GPIO_IBES_OFFSET 0x1C
#define DWT_GPIO_IBES_LEN 4
#define DWT_GPIO_IBES_MASK DWT_GPIO_IRQE_MASK
/* GPIO IRQ "Both Edge" selection for GPIO0 input.
* Value 0 = GPIO_IMODE register selects the edge.
* Value 1 = Both edges trigger the interrupt.
*/
#define DWT_GIBES0 GIRQx0
#define DWT_GIBES1 GIRQx1
#define DWT_GIBES2 GIRQx2
#define DWT_GIBES3 GIRQx3
#define DWT_GIBES4 GIRQx4
#define DWT_GIBES5 GIRQx5
#define DWT_GIBES6 GIRQx6
#define DWT_GIBES7 GIRQx7
#define DWT_GIBES8 GIRQx8
/* Sub-register 0x20 is the GPIO interrupt clear register */
#define DWT_GPIO_ICLR_OFFSET 0x20
#define DWT_GPIO_ICLR_LEN 4
#define DWT_GPIO_ICLR_MASK DWT_GPIO_IRQE_MASK
/* GPIO IRQ latch clear for GPIO0 input.
* Write 1 to clear the GPIO0 interrupt latch.
* Writing 0 has no effect. Reading returns zero
*/
#define DWT_GICLR0 GIRQx0
#define DWT_GICLR1 GIRQx1
#define DWT_GICLR2 GIRQx2
#define DWT_GICLR3 GIRQx3
#define DWT_GICLR4 GIRQx4
#define DWT_GICLR5 GIRQx5
#define DWT_GICLR6 GIRQx6
#define DWT_GICLR7 GIRQx7
#define DWT_GICLR8 GIRQx8
/* Sub-register 0x24 is the GPIO interrupt de-bounce enable register */
#define DWT_GPIO_IDBE_OFFSET 0x24
#define DWT_GPIO_IDBE_LEN 4
#define DWT_GPIO_IDBE_MASK DWT_GPIO_IRQE_MASK
/* GPIO IRQ de-bounce enable for GPIO0.
* Value 1 = de-bounce enabled.
* Value 0 = de-bounce disabled
*/
#define DWT_GIDBE0 GIRQx0
#define DWT_GIDBE1 GIRQx1
#define DWT_GIDBE2 GIRQx2
#define DWT_GIDBE3 GIRQx3
#define DWT_GIDBE4 GIRQx4
#define DWT_GIDBE5 GIRQx5
#define DWT_GIDBE6 GIRQx6
#define DWT_GIDBE7 GIRQx7
/* Value 1 = de-bounce enabled, 0 = de-bounce disabled */
#define DWT_GIDBE8 GIRQx8
/* Sub-register 0x28 allows the raw state of the GPIO pin to be read. */
#define DWT_GPIO_RAW_OFFSET 0x28
#define DWT_GPIO_RAW_LEN 4
#define DWT_GPIO_RAW_MASK DWT_GPIO_IRQE_MASK
/* This bit reflects the raw state of GPIO0 .. GPIO8 */
#define DWT_GRAWP0 GIRQx0
#define DWT_GRAWP1 GIRQx1
#define DWT_GRAWP2 GIRQx2
#define DWT_GRAWP3 GIRQx3
#define DWT_GRAWP4 GIRQx4
#define DWT_GRAWP5 GIRQx5
#define DWT_GRAWP6 GIRQx6
#define DWT_GRAWP7 GIRQx7
#define DWT_GRAWP8 GIRQx8
/* Digital Receiver configuration */
#define DWT_DRX_CONF_ID 0x27
#define DWT_DRX_CONF_LEN 44
/* Sub-register 0x02 is a 16-bit tuning register. */
#define DWT_DRX_TUNE0b_OFFSET (0x02)
#define DWT_DRX_TUNE0b_LEN 2
/* 7.2.40.2 Sub-Register 0x27:02 DRX_TUNE0b */
#define DWT_DRX_TUNE0b_MASK 0xFFFF
#define DWT_DRX_TUNE0b_110K_STD 0x000A
#define DWT_DRX_TUNE0b_110K_NSTD 0x0016
#define DWT_DRX_TUNE0b_850K_STD 0x0001
#define DWT_DRX_TUNE0b_850K_NSTD 0x0006
#define DWT_DRX_TUNE0b_6M8_STD 0x0001
#define DWT_DRX_TUNE0b_6M8_NSTD 0x0002
/* 7.2.40.3 Sub-Register 0x27:04 DRX_TUNE1a */
#define DWT_DRX_TUNE1a_OFFSET 0x04
#define DWT_DRX_TUNE1a_LEN 2
#define DWT_DRX_TUNE1a_MASK 0xFFFF
#define DWT_DRX_TUNE1a_PRF16 0x0087
#define DWT_DRX_TUNE1a_PRF64 0x008D
/* 7.2.40.4 Sub-Register 0x27:06 DRX_TUNE1b */
#define DWT_DRX_TUNE1b_OFFSET 0x06
#define DWT_DRX_TUNE1b_LEN 2
#define DWT_DRX_TUNE1b_MASK 0xFFFF
#define DWT_DRX_TUNE1b_110K 0x0064
#define DWT_DRX_TUNE1b_850K_6M8 0x0020
#define DWT_DRX_TUNE1b_6M8_PRE64 0x0010
/* 7.2.40.5 Sub-Register 0x27:08 DRX_TUNE2 */
#define DWT_DRX_TUNE2_OFFSET 0x08
#define DWT_DRX_TUNE2_LEN 4
#define DWT_DRX_TUNE2_MASK 0xFFFFFFFFUL
#define DWT_DRX_TUNE2_PRF16_PAC8 0x311A002DUL
#define DWT_DRX_TUNE2_PRF16_PAC16 0x331A0052UL
#define DWT_DRX_TUNE2_PRF16_PAC32 0x351A009AUL
#define DWT_DRX_TUNE2_PRF16_PAC64 0x371A011DUL
#define DWT_DRX_TUNE2_PRF64_PAC8 0x313B006BUL
#define DWT_DRX_TUNE2_PRF64_PAC16 0x333B00BEUL
#define DWT_DRX_TUNE2_PRF64_PAC32 0x353B015EUL
#define DWT_DRX_TUNE2_PRF64_PAC64 0x373B0296UL
/* WARNING: Please do NOT set DRX_SFDTOC to zero
* (disabling SFD detection timeout) since this risks IC malfunction
* due to prolonged receiver activity in the event of false preamble detection.
*/
/* 7.2.40.7 Sub-Register 0x27:20 DRX_SFDTOC */
#define DWT_DRX_SFDTOC_OFFSET 0x20
#define DWT_DRX_SFDTOC_LEN 2
#define DWT_DRX_SFDTOC_MASK 0xFFFF
/* 7.2.40.9 Sub-Register 0x27:24 DRX_PRETOC */
#define DWT_DRX_PRETOC_OFFSET 0x24
#define DWT_DRX_PRETOC_LEN 2
#define DWT_DRX_PRETOC_MASK 0xFFFF
/* 7.2.40.10 Sub-Register 0x27:26 DRX_TUNE4H */
#define DWT_DRX_TUNE4H_OFFSET 0x26
#define DWT_DRX_TUNE4H_LEN 2
#define DWT_DRX_TUNE4H_MASK 0xFFFF
#define DWT_DRX_TUNE4H_PRE64 0x0010
#define DWT_DRX_TUNE4H_PRE128PLUS 0x0028
/*
* Offset from DRX_CONF_ID in bytes to 21-bit signed
* RX carrier integrator value
*/
#define DWT_DRX_CARRIER_INT_OFFSET 0x28
#define DWT_DRX_CARRIER_INT_LEN 3
#define DWT_DRX_CARRIER_INT_MASK 0x001FFFFF
/* 7.2.40.11 Sub-Register 0x27:2C - RXPACC_NOSAT */
#define DWT_RPACC_NOSAT_OFFSET 0x2C
#define DWT_RPACC_NOSAT_LEN 2
#define DWT_RPACC_NOSAT_MASK 0xFFFF
/* Analog RF Configuration */
#define DWT_RF_CONF_ID 0x28
#define DWT_RF_CONF_LEN 58
/* TX enable */
#define DWT_RF_CONF_TXEN_MASK 0x00400000UL
/* RX enable */
#define DWT_RF_CONF_RXEN_MASK 0x00200000UL
/* Turn on power all LDOs */
#define DWT_RF_CONF_TXPOW_MASK 0x001F0000UL
/* Enable PLLs */
#define DWT_RF_CONF_PLLEN_MASK 0x0000E000UL
/* Enable TX blocks */
#define DWT_RF_CONF_TXBLOCKSEN_MASK 0x00001F00UL
#define DWT_RF_CONF_TXPLLPOWEN_MASK (DWT_RF_CONF_PLLEN_MASK | \
DWT_RF_CONF_TXPOW_MASK)
#define DWT_RF_CONF_TXALLEN_MASK (DWT_RF_CONF_TXEN_MASK | \
DWT_RF_CONF_TXPOW_MASK | \
DWT_RF_CONF_PLLEN_MASK | \
DWT_RF_CONF_TXBLOCKSEN_MASK)
/* Analog RX Control Register */
#define DWT_RF_RXCTRLH_OFFSET 0x0B
#define DWT_RF_RXCTRLH_LEN 1
/* RXCTRLH value for narrow bandwidth channels */
#define DWT_RF_RXCTRLH_NBW 0xD8
/* RXCTRLH value for wide bandwidth channels */
#define DWT_RF_RXCTRLH_WBW 0xBC
/* Analog TX Control Register */
#define DWT_RF_TXCTRL_OFFSET 0x0C
#define DWT_RF_TXCTRL_LEN 4
/* Transmit mixer tuning register */
#define DWT_RF_TXCTRL_TXMTUNE_MASK 0x000001E0UL
/* Transmit mixer Q-factor tuning register */
#define DWT_RF_TXCTRL_TXTXMQ_MASK 0x00000E00UL
/* 32-bit value to program to Sub-Register 0x28:0C RF_TXCTRL */
#define DWT_RF_TXCTRL_CH1 0x00005C40UL
/* 32-bit value to program to Sub-Register 0x28:0C RF_TXCTRL */
#define DWT_RF_TXCTRL_CH2 0x00045CA0UL
/* 32-bit value to program to Sub-Register 0x28:0C RF_TXCTRL */
#define DWT_RF_TXCTRL_CH3 0x00086CC0UL
/* 32-bit value to program to Sub-Register 0x28:0C RF_TXCTRL */
#define DWT_RF_TXCTRL_CH4 0x00045C80UL
/* 32-bit value to program to Sub-Register 0x28:0C RF_TXCTRL */
#define DWT_RF_TXCTRL_CH5 0x001E3FE0UL
/* 32-bit value to program to Sub-Register 0x28:0C RF_TXCTRL */
#define DWT_RF_TXCTRL_CH7 0x001E7DE0UL
#define DWT_RF_STATUS_OFFSET 0x2C
#define DWT_REG_29_ID_RESERVED 0x29
/* Transmitter calibration block */
#define DWT_TX_CAL_ID 0x2A
#define DWT_TX_CAL_LEN 52
/* SAR control */
#define DWT_TC_SARL_SAR_C 0
/* Cause bug in register block TX_CAL, we need to read 1 byte in a time */
/* Latest SAR reading for Voltage level */
#define DWT_TC_SARL_SAR_LVBAT_OFFSET 3
/* Latest SAR reading for Temperature level */
#define DWT_TC_SARL_SAR_LTEMP_OFFSET 4
/* SAR reading of Temperature level taken at last wakeup event */
#define DWT_TC_SARW_SAR_WTEMP_OFFSET 0x06
/* SAR reading of Voltage level taken at last wakeup event */
#define DWT_TC_SARW_SAR_WVBAT_OFFSET 0x07
/* Transmitter Calibration Pulse Generator Delay */
#define DWT_TC_PGDELAY_OFFSET 0x0B
#define DWT_TC_PGDELAY_LEN 1
/* Recommended value for channel 1 */
#define DWT_TC_PGDELAY_CH1 0xC9
/* Recommended value for channel 2 */
#define DWT_TC_PGDELAY_CH2 0xC2
/* Recommended value for channel 3 */
#define DWT_TC_PGDELAY_CH3 0xC5
/* Recommended value for channel 4 */
#define DWT_TC_PGDELAY_CH4 0x95
/* Recommended value for channel 5 */
#define DWT_TC_PGDELAY_CH5 0xC0
/* Recommended value for channel 7 */
#define DWT_TC_PGDELAY_CH7 0x93
/* Transmitter Calibration Pulse Generator Test */
#define DWT_TC_PGTEST_OFFSET 0x0C
#define DWT_TC_PGTEST_LEN 1
/* Normal operation */
#define DWT_TC_PGTEST_NORMAL 0x00
/* Continuous Wave (CW) Test Mode */
#define DWT_TC_PGTEST_CW 0x13
/* Frequency synthesiser control block */
#define DWT_FS_CTRL_ID 0x2B
#define DWT_FS_CTRL_LEN 21
/*
* Offset from FS_CTRL_ID in bytes, reserved area.
* Please take care not to write to this area as doing so
* may cause the DW1000 to malfunction.
*/
#define DWT_FS_RES1_OFFSET 0x00
#define DWT_FS_RES1_LEN 7
/* Frequency synthesiser PLL configuration */
#define DWT_FS_PLLCFG_OFFSET 0x07
#define DWT_FS_PLLCFG_LEN 5
/* Operating Channel 1 */
#define DWT_FS_PLLCFG_CH1 0x09000407UL
/* Operating Channel 2 */
#define DWT_FS_PLLCFG_CH2 0x08400508UL
/* Operating Channel 3 */
#define DWT_FS_PLLCFG_CH3 0x08401009UL
/* Operating Channel 4 (same as 2) */
#define DWT_FS_PLLCFG_CH4 DWT_FS_PLLCFG_CH2
/* Operating Channel 5 */
#define DWT_FS_PLLCFG_CH5 0x0800041DUL
/* Operating Channel 7 (same as 5) */
#define DWT_FS_PLLCFG_CH7 DWT_FS_PLLCFG_CH5
/* Frequency synthesiser PLL Tuning */
#define DWT_FS_PLLTUNE_OFFSET 0x0B
#define DWT_FS_PLLTUNE_LEN 1
/* Operating Channel 1 */
#define DWT_FS_PLLTUNE_CH1 0x1E
/* Operating Channel 2 */
#define DWT_FS_PLLTUNE_CH2 0x26
/* Operating Channel 3 */
#define DWT_FS_PLLTUNE_CH3 0x56
/* Operating Channel 4 (same as 2) */
#define DWT_FS_PLLTUNE_CH4 DWT_FS_PLLTUNE_CH2
/* Operating Channel 5 */
#define DWT_FS_PLLTUNE_CH5 0xBE
/* Operating Channel 7 (same as 5) */
#define DWT_FS_PLLTUNE_CH7 DWT_FS_PLLTUNE_CH5
/*
* Offset from FS_CTRL_ID in bytes.
* Please take care not to write to this area as doing so
* may cause the DW1000 to malfunction.
*/
#define DWT_FS_RES2_OFFSET 0x0C
#define DWT_FS_RES2_LEN 2
/* Frequency synthesiser Crystal trim */
#define DWT_FS_XTALT_OFFSET 0x0E
#define DWT_FS_XTALT_LEN 1
/*
* Crystal Trim.
* Crystals may be trimmed using this register setting to tune out errors,
* see 8.1 IC Calibration Crystal Oscillator Trim.
*/
#define DWT_FS_XTALT_MASK 0x1F
#define DWT_FS_XTALT_MIDRANGE 0x10
/*
* Offset from FS_CTRL_ID in bytes.
* Please take care not to write to this area as doing so
* may cause the DW1000 to malfunction.
*/
#define DWT_FS_RES3_OFFSET 0x0F
#define DWT_FS_RES3_LEN 6
/* Always-On register set */
#define DWT_AON_ID 0x2C
#define DWT_AON_LEN 12
/*
* Offset from AON_ID in bytes
* Used to control what the DW1000 IC does as it wakes up from
* low-power SLEEP or DEEPSLEEPstates.
*/
#define DWT_AON_WCFG_OFFSET 0x00
#define DWT_AON_WCFG_LEN 2
/* Access mask to AON_WCFG register */
#define DWT_AON_WCFG_MASK 0x09CB
/* On Wake-up Run the (temperature and voltage) Analog-to-Digital Converters */
#define DWT_AON_WCFG_ONW_RADC 0x0001
/* On Wake-up turn on the Receiver */
#define DWT_AON_WCFG_ONW_RX 0x0002
/*
* On Wake-up load the EUI from OTP memory into Register file:
* 0x01 Extended Unique Identifier.
*/
#define DWT_AON_WCFG_ONW_LEUI 0x0008
/*
* On Wake-up load configurations from the AON memory
* into the host interface register set
*/
#define DWT_AON_WCFG_ONW_LDC 0x0040
/* On Wake-up load the Length64 receiver operating parameter set */
#define DWT_AON_WCFG_ONW_L64P 0x0080
/*
* Preserve Sleep. This bit determines what the DW1000 does
* with respect to the ARXSLP and ATXSLP sleep controls
*/
#define DWT_AON_WCFG_PRES_SLEEP 0x0100
/* On Wake-up load the LDE microcode. */
#define DWT_AON_WCFG_ONW_LLDE 0x0800
/* On Wake-up load the LDO tune value. */
#define DWT_AON_WCFG_ONW_LLDO 0x1000
/*
* The bits in this register in general cause direct activity
* within the AON block with respect to the stored AON memory
*/
#define DWT_AON_CTRL_OFFSET 0x02
#define DWT_AON_CTRL_LEN 1
/* Access mask to AON_CTRL register */
#define DWT_AON_CTRL_MASK 0x8F
/*
* When this bit is set the DW1000 will copy the user configurations
* from the AON memory to the host interface register set.
*/
#define DWT_AON_CTRL_RESTORE 0x01
/*
* When this bit is set the DW1000 will copy the user configurations
* from the host interface register set into the AON memory
*/
#define DWT_AON_CTRL_SAVE 0x02
/* Upload the AON block configurations to the AON */
#define DWT_AON_CTRL_UPL_CFG 0x04
/* Direct AON memory access read */
#define DWT_AON_CTRL_DCA_READ 0x08
/* Direct AON memory access enable bit */
#define DWT_AON_CTRL_DCA_ENAB 0x80
/* AON Direct Access Read Data Result */
#define DWT_AON_RDAT_OFFSET 0x03
#define DWT_AON_RDAT_LEN 1
/* AON Direct Access Address */
#define DWT_AON_ADDR_OFFSET 0x04
#define DWT_AON_ADDR_LEN 1
/* Address of low-power oscillator calibration value (lower byte) */
#define DWT_AON_ADDR_LPOSC_CAL_0 117
/* Address of low-power oscillator calibration value (lower byte) */
#define DWT_AON_ADDR_LPOSC_CAL_1 118
/* 32-bit configuration register for the always on block. */
#define DWT_AON_CFG0_OFFSET 0x06
#define DWT_AON_CFG0_LEN 4
/* This is the sleep enable configuration bit */
#define DWT_AON_CFG0_SLEEP_EN 0x00000001UL
/* Wake using WAKEUP pin */
#define DWT_AON_CFG0_WAKE_PIN 0x00000002UL
/* Wake using SPI access SPICSn */
#define DWT_AON_CFG0_WAKE_SPI 0x00000004UL
/* Wake when sleep counter elapses */
#define DWT_AON_CFG0_WAKE_CNT 0x00000008UL
/* Low power divider enable configuration */
#define DWT_AON_CFG0_LPDIV_EN 0x00000010UL
/*
* Divider count for dividing the raw DW1000 XTAL oscillator frequency
* to set an LP clock frequency
*/
#define DWT_AON_CFG0_LPCLKDIVA_MASK 0x0000FFE0UL
#define DWT_AON_CFG0_LPCLKDIVA_SHIFT 5
/* Sleep time. This field configures the sleep time count elapse value */
#define DWT_AON_CFG0_SLEEP_TIM 0xFFFF0000UL
#define DWT_AON_CFG0_SLEEP_SHIFT 16
#define DWT_AON_CFG0_SLEEP_TIM_OFFSET 2
#define DWT_AON_CFG1_OFFSET 0x0A
#define DWT_AON_CFG1_LEN 2
/* access mask to AON_CFG1 */
#define DWT_AON_CFG1_MASK 0x0007
/* This bit enables the sleep counter */
#define DWT_AON_CFG1_SLEEP_CEN 0x0001
/*
* This bit needs to be set to 0 for correct operation
* in the SLEEP state within the DW1000
*/
#define DWT_AON_CFG1_SMXX 0x0002
/*
* This bit enables the calibration function that measures
* the period of the ICs internal low powered oscillator.
*/
#define DWT_AON_CFG1_LPOSC_CAL 0x0004
/* One Time Programmable Memory Interface */
#define DWT_OTP_IF_ID 0x2D
#define DWT_OTP_IF_LEN 18
/* 32-bit register. The data value to be programmed into an OTP location */
#define DWT_OTP_WDAT 0x00
#define DWT_OTP_WDAT_LEN 4
/* 16-bit register used to select the address within the OTP memory block */
#define DWT_OTP_ADDR 0x04
#define DWT_OTP_ADDR_LEN 2
/*
* This 11-bit field specifies the address within OTP memory
* that will be accessed read or written.
*/
#define DWT_OTP_ADDR_MASK 0x07FF
/* used to control the operation of the OTP memory */
#define DWT_OTP_CTRL 0x06
#define DWT_OTP_CTRL_LEN 2
#define DWT_OTP_CTRL_MASK 0x8002
/* This bit forces the OTP into manual read mode */
#define DWT_OTP_CTRL_OTPRDEN 0x0001
/*
* This bit commands a read operation from the address specified
* in the OTP_ADDR register
*/
#define DWT_OTP_CTRL_OTPREAD 0x0002
/* This bit forces a load of LDE microcode */
#define DWT_OTP_CTRL_LDELOAD 0x8000
/*
* Setting this bit will cause the contents of OTP_WDAT to be written
* to OTP_ADDR.
*/
#define DWT_OTP_CTRL_OTPPROG 0x0040
#define DWT_OTP_STAT 0x08
#define DWT_OTP_STAT_LEN 2
#define DWT_OTP_STAT_MASK 0x0003
/* OTP Programming Done */
#define DWT_OTP_STAT_OTPPRGD 0x0001
/* OTP Programming Voltage OK */
#define DWT_OTP_STAT_OTPVPOK 0x0002
/* 32-bit register. The data value read from an OTP location will appear here */
#define DWT_OTP_RDAT 0x0A
#define DWT_OTP_RDAT_LEN 4
/*
* 32-bit register. The data value stored in the OTP SR (0x400) location
* will appear here after power up
*/
#define DWT_OTP_SRDAT 0x0E
#define DWT_OTP_SRDAT_LEN 4
/*
* 8-bit special function register used to select and
* load special receiver operational parameter
*/
#define DWT_OTP_SF 0x12
#define DWT_OTP_SF_LEN 1
#define DWT_OTP_SF_MASK 0x63
/*
* This bit when set initiates a load of the operating parameter set
* selected by the OPS_SEL
*/
#define DWT_OTP_SF_OPS_KICK 0x01
/* This bit when set initiates a load of the LDO tune code */
#define DWT_OTP_SF_LDO_KICK 0x02
#define DWT_OTP_SF_OPS_SEL_SHFT 5
#define DWT_OTP_SF_OPS_SEL_MASK 0x60
/* Operating parameter set selection: Length64 */
#define DWT_OTP_SF_OPS_SEL_L64 0x00
/* Operating parameter set selection: Tight */
#define DWT_OTP_SF_OPS_SEL_TIGHT 0x40
/* Leading edge detection control block */
#define DWT_LDE_IF_ID 0x2E
#define DWT_LDE_IF_LEN 0
/*
* 16-bit status register reporting the threshold that was used
* to find the first path
*/
#define DWT_LDE_THRESH_OFFSET 0x0000
#define DWT_LDE_THRESH_LEN 2
/*8-bit configuration register */
#define DWT_LDE_CFG1_OFFSET 0x0806
#define DWT_LDE_CFG1_LEN 1
/* Number of Standard Deviations mask. */
#define DWT_LDE_CFG1_NSTDEV_MASK 0x1F
/* Peak Multiplier mask. */
#define DWT_LDE_CFG1_PMULT_MASK 0xE0
/*
* Reporting the position within the accumulator that the LDE algorithm
* has determined to contain the maximum
*/
#define DWT_LDE_PPINDX_OFFSET 0x1000
#define DWT_LDE_PPINDX_LEN 2
/*
* Reporting the magnitude of the peak signal seen
* in the accumulator data memory
*/
#define DWT_LDE_PPAMPL_OFFSET 0x1002
#define DWT_LDE_PPAMPL_LEN 2
/* 16-bit configuration register for setting the receive antenna delay */
#define DWT_LDE_RXANTD_OFFSET 0x1804
#define DWT_LDE_RXANTD_LEN 2
/* 16-bit LDE configuration tuning register */
#define DWT_LDE_CFG2_OFFSET 0x1806
#define DWT_LDE_CFG2_LEN 2
/*
* 16-bit configuration register for setting
* the replica avoidance coefficient
*/
#define DWT_LDE_REPC_OFFSET 0x2804
#define DWT_LDE_REPC_LEN 2
#define DWT_LDE_REPC_PCODE_1 0x5998
#define DWT_LDE_REPC_PCODE_2 0x5998
#define DWT_LDE_REPC_PCODE_3 0x51EA
#define DWT_LDE_REPC_PCODE_4 0x428E
#define DWT_LDE_REPC_PCODE_5 0x451E
#define DWT_LDE_REPC_PCODE_6 0x2E14
#define DWT_LDE_REPC_PCODE_7 0x8000
#define DWT_LDE_REPC_PCODE_8 0x51EA
#define DWT_LDE_REPC_PCODE_9 0x28F4
#define DWT_LDE_REPC_PCODE_10 0x3332
#define DWT_LDE_REPC_PCODE_11 0x3AE0
#define DWT_LDE_REPC_PCODE_12 0x3D70
#define DWT_LDE_REPC_PCODE_13 0x3AE0
#define DWT_LDE_REPC_PCODE_14 0x35C2
#define DWT_LDE_REPC_PCODE_15 0x2B84
#define DWT_LDE_REPC_PCODE_16 0x35C2
#define DWT_LDE_REPC_PCODE_17 0x3332
#define DWT_LDE_REPC_PCODE_18 0x35C2
#define DWT_LDE_REPC_PCODE_19 0x35C2
#define DWT_LDE_REPC_PCODE_20 0x47AE
#define DWT_LDE_REPC_PCODE_21 0x3AE0
#define DWT_LDE_REPC_PCODE_22 0x3850
#define DWT_LDE_REPC_PCODE_23 0x30A2
#define DWT_LDE_REPC_PCODE_24 0x3850
/* Digital Diagnostics Interface */
#define DWT_DIG_DIAG_ID 0x2F
#define DWT_DIG_DIAG_LEN 41
/* Event Counter Control */
#define DWT_EVC_CTRL_OFFSET 0x00
#define DWT_EVC_CTRL_LEN 4
/*
* Access mask to Register for bits should always be set to zero
* to avoid any malfunction of the device.
*/
#define DWT_EVC_CTRL_MASK 0x00000003UL
/* Event Counters Enable bit */
#define DWT_EVC_EN 0x00000001UL
#define DWT_EVC_CLR 0x00000002UL
/* PHR Error Event Counter */
#define DWT_EVC_PHE_OFFSET 0x04
#define DWT_EVC_PHE_LEN 2
#define DWT_EVC_PHE_MASK 0x0FFF
/* Reed Solomon decoder (Frame Sync Loss) Error Event Counter */
#define DWT_EVC_RSE_OFFSET 0x06
#define DWT_EVC_RSE_LEN 2
#define DWT_EVC_RSE_MASK 0x0FFF
/*
* The EVC_FCG field is a 12-bit counter of the frames received with
* good CRC/FCS sequence.
*/
#define DWT_EVC_FCG_OFFSET 0x08
#define DWT_EVC_FCG_LEN 2
#define DWT_EVC_FCG_MASK 0x0FFF
/*
* The EVC_FCE field is a 12-bit counter of the frames received with
* bad CRC/FCS sequence.
*/
#define DWT_EVC_FCE_OFFSET 0x0A
#define DWT_EVC_FCE_LEN 2
#define DWT_EVC_FCE_MASK 0x0FFF
/*
* The EVC_FFR field is a 12-bit counter of the frames rejected
* by the receive frame filtering function.
*/
#define DWT_EVC_FFR_OFFSET 0x0C
#define DWT_EVC_FFR_LEN 2
#define DWT_EVC_FFR_MASK 0x0FFF
/* The EVC_OVR field is a 12-bit counter of receive overrun events */
#define DWT_EVC_OVR_OFFSET 0x0E
#define DWT_EVC_OVR_LEN 2
#define DWT_EVC_OVR_MASK 0x0FFF
/* The EVC_STO field is a 12-bit counter of SFD Timeout Error events */
#define DWT_EVC_STO_OFFSET 0x10
#define DWT_EVC_OVR_LEN 2
#define DWT_EVC_OVR_MASK 0x0FFF
/* The EVC_PTO field is a 12-bit counter of Preamble detection Timeout events */
#define DWT_EVC_PTO_OFFSET 0x12
#define DWT_EVC_PTO_LEN 2
#define DWT_EVC_PTO_MASK 0x0FFF
/*
* The EVC_FWTO field is a 12-bit counter of receive
* frame wait timeout events
*/
#define DWT_EVC_FWTO_OFFSET 0x14
#define DWT_EVC_FWTO_LEN 2
#define DWT_EVC_FWTO_MASK 0x0FFF
/*
* The EVC_TXFS field is a 12-bit counter of transmit frames sent.
* This is incremented every time a frame is sent
*/
#define DWT_EVC_TXFS_OFFSET 0x16
#define DWT_EVC_TXFS_LEN 2
#define DWT_EVC_TXFS_MASK 0x0FFF
/* The EVC_HPW field is a 12-bit counter of Half Period Warnings. */
#define DWT_EVC_HPW_OFFSET 0x18
#define DWT_EVC_HPW_LEN 2
#define DWT_EVC_HPW_MASK 0x0FFF
/* The EVC_TPW field is a 12-bit counter of Transmitter Power-Up Warnings. */
#define DWT_EVC_TPW_OFFSET 0x1A
#define DWT_EVC_TPW_LEN 2
#define DWT_EVC_TPW_MASK 0x0FFF
/*
* Offset from DIG_DIAG_ID in bytes,
* Please take care not to write to this area as doing so
* may cause the DW1000 to malfunction.
*/
#define DWT_EVC_RES1_OFFSET 0x1C
#define DWT_DIAG_TMC_OFFSET 0x24
#define DWT_DIAG_TMC_LEN 2
#define DWT_DIAG_TMC_MASK 0x0010
/*
* This test mode is provided to help support regulatory approvals
* spectral testing. When the TX_PSTM bit is set it enables a
* repeating transmission of the data from the TX_BUFFER
*/
#define DWT_DIAG_TMC_TX_PSTM 0x0010
#define DWT_REG_30_ID_RESERVED 0x30
#define DWT_REG_31_ID_RESERVED 0x31
#define DWT_REG_32_ID_RESERVED 0x32
#define DWT_REG_33_ID_RESERVED 0x33
#define DWT_REG_34_ID_RESERVED 0x34
#define DWT_REG_35_ID_RESERVED 0x35
/* Power Management System Control Block */
#define DWT_PMSC_ID 0x36
#define DWT_PMSC_LEN 48
#define DWT_PMSC_CTRL0_OFFSET 0x00
#define DWT_PMSC_CTRL0_LEN 4
/* Access mask to register PMSC_CTRL0 */
#define DWT_PMSC_CTRL0_MASK 0xF18F847FUL
/*
* The system clock will run off the 19.2 MHz XTI clock until the PLL is
* calibrated and locked, then it will switch over the 125 MHz PLL clock
*/
#define DWT_PMSC_CTRL0_SYSCLKS_AUTO 0x00000000UL
/* Force system clock to be the 19.2 MHz XTI clock. */
#define DWT_PMSC_CTRL0_SYSCLKS_19M 0x00000001UL
/* Force system clock to the 125 MHz PLL clock. */
#define DWT_PMSC_CTRL0_SYSCLKS_125M 0x00000002UL
/* The RX clock will be disabled until it is required for an RX operation */
#define DWT_PMSC_CTRL0_RXCLKS_AUTO 0x00000000UL
/* Force RX clock enable and sourced clock from the 19.2 MHz XTI clock */
#define DWT_PMSC_CTRL0_RXCLKS_19M 0x00000004UL
/* Force RX clock enable and sourced from the 125 MHz PLL clock */
#define DWT_PMSC_CTRL0_RXCLKS_125M 0x00000008UL
/* Force RX clock off. */
#define DWT_PMSC_CTRL0_RXCLKS_OFF 0x0000000CUL
/* The TX clock will be disabled until it is required for a TX operation */
#define DWT_PMSC_CTRL0_TXCLKS_AUTO 0x00000000UL
/* Force TX clock enable and sourced clock from the 19.2 MHz XTI clock */
#define DWT_PMSC_CTRL0_TXCLKS_19M 0x00000010UL
/* Force TX clock enable and sourced from the 125 MHz PLL clock */
#define DWT_PMSC_CTRL0_TXCLKS_125M 0x00000020UL
/* Force TX clock off */
#define DWT_PMSC_CTRL0_TXCLKS_OFF 0x00000030UL
/* Force Accumulator Clock Enable */
#define DWT_PMSC_CTRL0_FACE 0x00000040UL
/* GPIO clock enable */
#define DWT_PMSC_CTRL0_GPCE 0x00010000UL
/* GPIO reset (NOT), active low */
#define DWT_PMSC_CTRL0_GPRN 0x00020000UL
/* GPIO De-bounce Clock Enable */
#define DWT_PMSC_CTRL0_GPDCE 0x00040000UL
/* Kilohertz Clock Enable */
#define DWT_PMSC_CTRL0_KHZCLEN 0x00800000UL
/* Enable PLL2 on/off sequencing by SNIFF mode */
#define DWT_PMSC_CTRL0_PLL2_SEQ_EN 0x01000000UL
#define DWT_PMSC_CTRL0_SOFTRESET_OFFSET 3
/* Assuming only 4th byte of the register is read */
#define DWT_PMSC_CTRL0_RESET_ALL 0x00
/* Assuming only 4th byte of the register is read */
#define DWT_PMSC_CTRL0_RESET_RX 0xE0
/* Assuming only 4th byte of the register is read */
#define DWT_PMSC_CTRL0_RESET_CLEAR 0xF0
#define DWT_PMSC_CTRL1_OFFSET 0x04
#define DWT_PMSC_CTRL1_LEN 4
/* Access mask to register PMSC_CTRL1 */
#define DWT_PMSC_CTRL1_MASK 0xFC02F802UL
/* Automatic transition from receive mode into the INIT state */
#define DWT_PMSC_CTRL1_ARX2INIT 0x00000002UL
/*
* If this bit is set then the DW1000 will automatically transition
* into SLEEP or DEEPSLEEP mode after transmission of a frame
*/
#define DWT_PMSC_CTRL1_ATXSLP 0x00000800UL
/*
* This bit is set then the DW1000 will automatically transition
* into SLEEP mode after a receive attempt
*/
#define DWT_PMSC_CTRL1_ARXSLP 0x00001000UL
/* Snooze Enable */
#define DWT_PMSC_CTRL1_SNOZE 0x00002000UL
/* The SNOZR bit is set to allow the snooze timer to repeat twice */
#define DWT_PMSC_CTRL1_SNOZR 0x00004000UL
/* This enables a special 1 GHz clock used for some external SYNC modes */
#define DWT_PMSC_CTRL1_PLLSYN 0x00008000UL
/* This bit enables the running of the LDE algorithm */
#define DWT_PMSC_CTRL1_LDERUNE 0x00020000UL
/* Kilohertz clock divisor */
#define DWT_PMSC_CTRL1_KHZCLKDIV_MASK 0xFC000000UL
/*
* Writing this to PMSC CONTROL 1 register (bits 10-3) disables
* PMSC control of analog RF subsystems
*/
#define DWT_PMSC_CTRL1_PKTSEQ_DISABLE 0x00
/*
* Writing this to PMSC CONTROL 1 register (bits 10-3) enables
* PMSC control of analog RF subsystems
*/
#define DWT_PMSC_CTRL1_PKTSEQ_ENABLE 0xE7
#define DWT_PMSC_RES1_OFFSET 0x08
/* PMSC Snooze Time Register */
#define DWT_PMSC_SNOZT_OFFSET 0x0C
#define DWT_PMSC_SNOZT_LEN 1
#define DWT_PMSC_RES2_OFFSET 0x10
#define DWT_PMSC_RES3_OFFSET 0x24
#define DWT_PMSC_TXFINESEQ_OFFSET 0x26
/* Writing this disables fine grain sequencing in the transmitter */
#define DWT_PMSC_TXFINESEQ_DISABLE 0x0
/* Writing this enables fine grain sequencing in the transmitter */
#define DWT_PMSC_TXFINESEQ_ENABLE 0x0B74
#define DWT_PMSC_LEDC_OFFSET 0x28
#define DWT_PMSC_LEDC_LEN 4
/* 32-bit LED control register. */
#define DWT_PMSC_LEDC_MASK 0x000001FFUL
/*
* This field determines how long the LEDs remain lit after an event
* that causes them to be set on.
*/
#define DWT_PMSC_LEDC_BLINK_TIM_MASK 0x000000FFUL
/* Blink Enable. When this bit is set to 1 the LED blink feature is enabled. */
#define DWT_PMSC_LEDC_BLNKEN 0x00000100UL
/*
* Default blink time. Blink time is expressed in multiples of 14 ms.
* The value defined here is ~225 ms.
*/
#define DWT_PMSC_LEDC_BLINK_TIME_DEF 0x10
/* Command a blink of all LEDs */
#define DWT_PMSC_LEDC_BLINK_NOW_ALL 0x000F0000UL
#define DWT_REG_37_ID_RESERVED 0x37
#define DWT_REG_38_ID_RESERVED 0x38
#define DWT_REG_39_ID_RESERVED 0x39
#define DWT_REG_3A_ID_RESERVED 0x3A
#define DWT_REG_3B_ID_RESERVED 0x3B
#define DWT_REG_3C_ID_RESERVED 0x3C
#define DWT_REG_3D_ID_RESERVED 0x3D
#define DWT_REG_3E_ID_RESERVED 0x3E
#define DWT_REG_3F_ID_RESERVED 0x3F
/*
* Map the channel number to the index in the configuration arrays below.
* Channel: na 1 2 3 4 5 na 7
*/
const uint8_t dwt_ch_to_cfg[] = {0, 0, 1, 2, 3, 4, 0, 5};
/* Defaults from Table 38: Sub-Register 0x28:0C RF_TXCTRL values */
const uint32_t dwt_txctrl_defs[] = {
DWT_RF_TXCTRL_CH1,
DWT_RF_TXCTRL_CH2,
DWT_RF_TXCTRL_CH3,
DWT_RF_TXCTRL_CH4,
DWT_RF_TXCTRL_CH5,
DWT_RF_TXCTRL_CH7,
};
/* Defaults from Table 43: Sub-Register 0x2B:07 FS_PLLCFG values */
const uint32_t dwt_pllcfg_defs[] = {
DWT_FS_PLLCFG_CH1,
DWT_FS_PLLCFG_CH2,
DWT_FS_PLLCFG_CH3,
DWT_FS_PLLCFG_CH4,
DWT_FS_PLLCFG_CH5,
DWT_FS_PLLCFG_CH7
};
/* Defaults from Table 44: Sub-Register 0x2B:0B FS_PLLTUNE values */
const uint8_t dwt_plltune_defs[] = {
DWT_FS_PLLTUNE_CH1,
DWT_FS_PLLTUNE_CH2,
DWT_FS_PLLTUNE_CH3,
DWT_FS_PLLTUNE_CH4,
DWT_FS_PLLTUNE_CH5,
DWT_FS_PLLTUNE_CH7
};
/* Defaults from Table 37: Sub-Register 0x28:0B RF_RXCTRLH values */
const uint8_t dwt_rxctrlh_defs[] = {
DWT_RF_RXCTRLH_NBW,
DWT_RF_RXCTRLH_NBW,
DWT_RF_RXCTRLH_NBW,
DWT_RF_RXCTRLH_WBW,
DWT_RF_RXCTRLH_NBW,
DWT_RF_RXCTRLH_WBW
};
/* Defaults from Table 40: Sub-Register 0x2A:0B TC_PGDELAY */
const uint8_t dwt_pgdelay_defs[] = {
DWT_TC_PGDELAY_CH1,
DWT_TC_PGDELAY_CH2,
DWT_TC_PGDELAY_CH3,
DWT_TC_PGDELAY_CH4,
DWT_TC_PGDELAY_CH5,
DWT_TC_PGDELAY_CH7
};
/*
* Defaults from Table 19: Reference values for Register file:
* 0x1E Transmit Power Control for Smart Transmit Power Control
* Transmit Power Control values for 16 MHz, with DIS_STXP = 0
*/
const uint32_t dwt_txpwr_stxp0_16[] = {
0x15355575,
0x15355575,
0x0F2F4F6F,
0x1F1F3F5F,
0x0E082848,
0x32527292
};
/*
* Defaults from Table 19: Reference values for Register file:
* 0x1E Transmit Power Control for Smart Transmit Power Control
* Transmit Power Control values for 64 MHz, with DIS_STXP = 0
*/
const uint32_t dwt_txpwr_stxp0_64[] = {
0x07274767,
0x07274767,
0x2B4B6B8B,
0x3A5A7A9A,
0x25456585,
0x5171B1D1
};
/*
* Default from Table 20: Reference values Register file:
* 0x1E Transmit Power Control for Manual Transmit Power Control
* Transmit Power Control values for 16 MHz, with DIS_STXP = 1
*/
const uint32_t dwt_txpwr_stxp1_16[] = {
0x75757575,
0x75757575,
0x6F6F6F6F,
0x5F5F5F5F,
0x48484848,
0x92929292
};
/*
* Default from Table 20: Reference values Register file:
* 0x1E Transmit Power Control for Manual Transmit Power Control
* Transmit Power Control values for 64 MHz, with DIS_STXP = 1
*/
const uint32_t dwt_txpwr_stxp1_64[] = {
0x67676767,
0x67676767,
0x8B8B8B8B,
0x9A9A9A9A,
0x85858585,
0xD1D1D1D1
};
enum dwt_pulse_repetition_frequency {
DWT_PRF_16M = 0,
DWT_PRF_64M,
DWT_NUMOF_PRFS,
};
/* Defaults from Table 24: Sub-Register 0x23:04 AGC_TUNE1 values */
const uint16_t dwt_agc_tune1_defs[] = {
DWT_AGC_TUNE1_16M,
DWT_AGC_TUNE1_64M
};
enum dwt_baud_rate {
DWT_BR_110K = 0,
DWT_BR_850K,
DWT_BR_6M8,
DWT_NUMOF_BRS,
};
/* Decawave non-standard SFD lengths */
const uint8_t dwt_ns_sfdlen[] = {
DWT_DW_NS_SFD_LEN_110K,
DWT_DW_NS_SFD_LEN_850K,
DWT_DW_NS_SFD_LEN_6M8
};
/* Defaults from Table 30: Sub-Register 0x27:02 DRX_TUNE0b values */
const uint16_t dwt_tune0b_defs[DWT_NUMOF_BRS][2] = {
{
DWT_DRX_TUNE0b_110K_STD,
DWT_DRX_TUNE0b_110K_NSTD
},
{
DWT_DRX_TUNE0b_850K_STD,
DWT_DRX_TUNE0b_850K_NSTD
},
{
DWT_DRX_TUNE0b_6M8_STD,
DWT_DRX_TUNE0b_6M8_NSTD
}
};
/* Defaults from Table 31: Sub-Register 0x27:04 DRX_TUNE1a values */
const uint16_t dwt_tune1a_defs[] = {
DWT_DRX_TUNE1a_PRF16,
DWT_DRX_TUNE1a_PRF64
};
enum dwt_acquisition_chunk_size {
DWT_PAC8 = 0,
DWT_PAC16,
DWT_PAC32,
DWT_PAC64,
DWT_NUMOF_PACS,
};
/* Defaults from Table 33: Sub-Register 0x27:08 DRX_TUNE2 values */
const uint32_t dwt_tune2_defs[DWT_NUMOF_PRFS][DWT_NUMOF_PACS] = {
{
DWT_DRX_TUNE2_PRF16_PAC8,
DWT_DRX_TUNE2_PRF16_PAC16,
DWT_DRX_TUNE2_PRF16_PAC32,
DWT_DRX_TUNE2_PRF16_PAC64
},
{
DWT_DRX_TUNE2_PRF64_PAC8,
DWT_DRX_TUNE2_PRF64_PAC16,
DWT_DRX_TUNE2_PRF64_PAC32,
DWT_DRX_TUNE2_PRF64_PAC64
}
};
/*
* Defaults from Table 51:
* Sub-Register 0x2E:2804 LDE_REPC configurations for (850 kbps & 6.8 Mbps)
*
* For 110 kbps the values have to be divided by 8.
*/
const uint16_t dwt_lde_repc_defs[] = {
0,
DWT_LDE_REPC_PCODE_1,
DWT_LDE_REPC_PCODE_2,
DWT_LDE_REPC_PCODE_3,
DWT_LDE_REPC_PCODE_4,
DWT_LDE_REPC_PCODE_5,
DWT_LDE_REPC_PCODE_6,
DWT_LDE_REPC_PCODE_7,
DWT_LDE_REPC_PCODE_8,
DWT_LDE_REPC_PCODE_9,
DWT_LDE_REPC_PCODE_10,
DWT_LDE_REPC_PCODE_11,
DWT_LDE_REPC_PCODE_12,
DWT_LDE_REPC_PCODE_13,
DWT_LDE_REPC_PCODE_14,
DWT_LDE_REPC_PCODE_15,
DWT_LDE_REPC_PCODE_16,
DWT_LDE_REPC_PCODE_17,
DWT_LDE_REPC_PCODE_18,
DWT_LDE_REPC_PCODE_19,
DWT_LDE_REPC_PCODE_20,
DWT_LDE_REPC_PCODE_21,
DWT_LDE_REPC_PCODE_22,
DWT_LDE_REPC_PCODE_23,
DWT_LDE_REPC_PCODE_24
};
enum dwt_plen_idx {
DWT_PLEN_64 = 0,
DWT_PLEN_128,
DWT_PLEN_256,
DWT_PLEN_512,
DWT_PLEN_1024,
DWT_PLEN_2048,
DWT_PLEN_4096,
DWT_NUM_OF_PLEN,
};
/*
* Transmit Preamble Symbol Repetitions (TXPSR) and Preamble Extension (PE)
* constants for TX_FCTRL - Transmit Frame Control register.
* From Table 16: Preamble length selection
* BIT(19) | BIT(18) | BIT(21) | BIT(20)
*/
const uint32_t dwt_plen_cfg[] = {
(0 | BIT(18) | 0 | 0),
(0 | BIT(18) | 0 | BIT(20)),
(0 | BIT(18) | BIT(21) | 0),
(0 | BIT(18) | BIT(21) | BIT(20)),
(BIT(19) | 0 | 0 | 0),
(BIT(19) | 0 | BIT(21) | 0),
(BIT(19) | BIT(18) | 0 | 0),
};
/*
* Noise Threshold Multiplier (default NTM is 13) and
* Peak Multiplier (default PMULT is 3).
*/
#define DWT_DEFAULT_LDE_CFG1 ((3 << 5) | 13)
/* From Table 50: Sub-Register 0x2E:1806 LDE_CFG2 values */
#define DWT_DEFAULT_LDE_CFG2_PRF64 0x0607
#define DWT_DEFAULT_LDE_CFG2_PRF16 0x1607
#define DWT_RX_SIG_PWR_A_CONST_PRF64 121.74
#define DWT_RX_SIG_PWR_A_CONST_PRF16 113.77
#define DWT_DEVICE_ID 0xDECA0130
#define DWT_SFDTOC_DEF 0x1041
#define DWT_OTP_LDOTUNE_ADDR 0x04
#define DWT_OTP_PARTID_ADDR 0x06
#define DWT_OTP_LOTID_ADDR 0x07
#define DWT_OTP_VBAT_ADDR 0x08
#define DWT_OTP_VTEMP_ADDR 0x09
#define DWT_OTP_XTRIM_ADDR 0x1E
#endif /* ZEPHYR_INCLUDE_DW1000_REGS_H_ */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_dw1000_regs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 24,447 |
```unknown
menuconfig IEEE802154_DW1000
bool "Decawave DW1000 Driver support"
default y
depends on DT_HAS_DECAWAVE_DW1000_ENABLED
select SPI
if IEEE802154_DW1000
config IEEE802154_DW1000_SNIFF_ONT
int "SNIFF on time"
default 0
range 0 15
help
SNIFF on time in unit of PAC. The minimum on time is the duration
of two PACs. The SNIFF counter always adds 1 PAC unit to the on-time
count. The SNIFF_ONT value should be in range of 1-15.
Zero value disables SNIFF mode.
config IEEE802154_DW1000_SNIFF_OFFT
int "SNIFF off time"
default 16
range 1 $(UINT8_MAX)
help
SNIFF off time in unit of approximate 1 microsecond.
config IEEE802154_DW1000_INIT_PRIO
int "DW1000 initialization priority"
default 80
help
Set the initialization priority number. Do not mess with it unless
you know what you are doing. Beware DW1000 requires gpio and spi to
be ready first (and sometime gpio should be the very first as spi
might need it too). And of course it has to start before the net stack.
endif
``` | /content/code_sandbox/drivers/ieee802154/Kconfig.dw1000 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 303 |
```c
/* ieee802154_cc1200.c - TI CC1200 driver */
#define DT_DRV_COMPAT ti_cc1200
/*
*
*/
#define LOG_MODULE_NAME ieee802154_cc1200
#define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/debug/stack.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#include <zephyr/sys/byteorder.h>
#include <string.h>
#include <zephyr/random/random.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/net/ieee802154_radio.h>
#include "ieee802154_cc1200.h"
#include "ieee802154_cc1200_rf.h"
/* ToDo: supporting 802.15.4g will require GPIO2
* used as CC1200_GPIO_SIG_RXFIFO_THR
*
* Note: GPIO3 is unused.
*/
#define CC1200_IOCFG3 CC1200_GPIO_SIG_MARC_2PIN_STATUS_0
#define CC1200_IOCFG2 CC1200_GPIO_SIG_MARC_2PIN_STATUS_1
#define CC1200_IOCFG0 CC1200_GPIO_SIG_PKT_SYNC_RXTX
/***********************
* Debugging functions *
**********************/
static void cc1200_print_status(uint8_t status)
{
if (status == CC1200_STATUS_IDLE) {
LOG_DBG("Idling");
} else if (status == CC1200_STATUS_RX) {
LOG_DBG("Receiving");
} else if (status == CC1200_STATUS_TX) {
LOG_DBG("Transmitting");
} else if (status == CC1200_STATUS_FSTXON) {
LOG_DBG("FS TX on");
} else if (status == CC1200_STATUS_CALIBRATE) {
LOG_DBG("Calibrating");
} else if (status == CC1200_STATUS_SETTLING) {
LOG_DBG("Settling");
} else if (status == CC1200_STATUS_RX_FIFO_ERROR) {
LOG_DBG("RX FIFO error!");
} else if (status == CC1200_STATUS_TX_FIFO_ERROR) {
LOG_DBG("TX FIFO error!");
}
}
/*********************
* Generic functions *
********************/
bool z_cc1200_access_reg(const struct device *dev, bool read, uint8_t addr,
void *data, size_t length, bool extended, bool burst)
{
const struct cc1200_config *config = dev->config;
uint8_t cmd_buf[2];
const struct spi_buf buf[2] = {
{
.buf = cmd_buf,
.len = extended ? 2 : 1,
},
{
.buf = data,
.len = length,
}
};
struct spi_buf_set tx = { .buffers = buf };
cmd_buf[0] = 0U;
if (burst) {
cmd_buf[0] |= CC1200_ACCESS_BURST;
}
if (extended) {
cmd_buf[0] |= CC1200_REG_EXTENDED_ADDRESS;
cmd_buf[1] = addr;
} else {
cmd_buf[0] |= addr;
}
if (read) {
const struct spi_buf_set rx = {
.buffers = buf,
.count = 2
};
cmd_buf[0] |= CC1200_ACCESS_RD;
tx.count = 1;
return (spi_transceive_dt(&config->bus, &tx, &rx) == 0);
}
/* CC1200_ACCESS_WR is 0 so no need to play with it */
tx.count = data ? 2 : 1;
return (spi_write_dt(&config->bus, &tx) == 0);
}
static inline uint8_t *get_mac(const struct device *dev)
{
struct cc1200_context *cc1200 = dev->data;
#if defined(CONFIG_IEEE802154_CC1200_RANDOM_MAC)
sys_rand_get(&cc1200->mac_addr[4], 4U);
cc1200->mac_addr[7] = (cc1200->mac_addr[7] & ~0x01) | 0x02;
#else
cc1200->mac_addr[4] = CONFIG_IEEE802154_CC1200_MAC4;
cc1200->mac_addr[5] = CONFIG_IEEE802154_CC1200_MAC5;
cc1200->mac_addr[6] = CONFIG_IEEE802154_CC1200_MAC6;
cc1200->mac_addr[7] = CONFIG_IEEE802154_CC1200_MAC7;
#endif
cc1200->mac_addr[0] = 0x00;
cc1200->mac_addr[1] = 0x12;
cc1200->mac_addr[2] = 0x4b;
cc1200->mac_addr[3] = 0x00;
return cc1200->mac_addr;
}
static uint8_t get_status(const struct device *dev)
{
uint8_t val;
if (z_cc1200_access_reg(dev, true, CC1200_INS_SNOP,
&val, 1, false, false)) {
/* See Section 3.1.2 */
return val & CC1200_STATUS_MASK;
}
/* We cannot get the status, so let's assume about readiness */
return CC1200_STATUS_CHIP_NOT_READY;
}
/******************
* GPIO functions *
*****************/
static inline void gpio0_int_handler(const struct device *port,
struct gpio_callback *cb, uint32_t pins)
{
struct cc1200_context *cc1200 =
CONTAINER_OF(cb, struct cc1200_context, rx_tx_cb);
if (atomic_get(&cc1200->tx) == 1) {
if (atomic_get(&cc1200->tx_start) == 0) {
atomic_set(&cc1200->tx_start, 1);
} else {
atomic_set(&cc1200->tx, 0);
}
k_sem_give(&cc1200->tx_sync);
} else {
if (atomic_get(&cc1200->rx) == 1) {
k_sem_give(&cc1200->rx_lock);
atomic_set(&cc1200->rx, 0);
} else {
atomic_set(&cc1200->rx, 1);
}
}
}
static void enable_gpio0_interrupt(const struct device *dev, bool enable)
{
const struct cc1200_config *cfg = dev->config;
gpio_flags_t mode = enable ? GPIO_INT_EDGE_TO_ACTIVE : GPIO_INT_DISABLE;
gpio_pin_interrupt_configure_dt(&cfg->interrupt, mode);
}
static int setup_gpio_callback(const struct device *dev)
{
const struct cc1200_config *cfg = dev->config;
struct cc1200_context *cc1200 = dev->data;
gpio_init_callback(&cc1200->rx_tx_cb, gpio0_int_handler, BIT(cfg->interrupt.pin));
if (gpio_add_callback(cfg->interrupt.port, &cc1200->rx_tx_cb) != 0) {
return -EIO;
}
return 0;
}
/****************
* RF functions *
***************/
static uint8_t get_lo_divider(const struct device *dev)
{
/* See Table 34 */
return FSD_BANDSELECT(read_reg_fs_cfg(dev)) << 1;
}
static bool write_reg_freq(const struct device *dev, uint32_t freq)
{
uint8_t freq_data[3];
freq_data[0] = (uint8_t)((freq & 0x00FF0000) >> 16);
freq_data[1] = (uint8_t)((freq & 0x0000FF00) >> 8);
freq_data[2] = (uint8_t)(freq & 0x000000FF);
return z_cc1200_access_reg(dev, false, CC1200_REG_FREQ2,
freq_data, 3, true, true);
}
/* See Section 9.12 - RF programming
*
* The given formula in datasheet cannot be simply applied here, where CPU
* limits us to unsigned integers of 32 bits. Instead, "slicing" it to
* parts that fits in such limit is a solution which is applied below.
*
* The original formula being (freqoff is neglected):
* Freq = ( RF * Lo_Div * 2^16 ) / Xtal
*
* RF and Xtal are, from here, expressed in KHz.
*
* It first calculates the targeted RF with given ChanCenterFreq0, channel
* spacing and the channel number.
*
* The calculation will slice the targeted RF by multiple of 10:
* 10^n where n is in [5, 3]. The rest, below 1000, is taken at once.
* Let's take the 434000 KHz RF for instance:
* it will be "sliced" in 3 parts: 400000, 30000, 4000.
* Or the 169406 KHz RF, 4 parts: 100000, 60000, 9000, 406.
*
* This permits also to play with Xtal to keep the result big enough to avoid
* losing precision. A factor - growing as much as Xtal decrease - is then
* applied to get to the proper result. Which one is rounded to the nearest
* integer, again to get a bit better precision.
*
* In the end, this algorithm below works for all the supported bands by CC1200.
* User does not need to pass anything extra besides the nominal settings: no
* pre-computed part or else.
*/
static uint32_t rf_evaluate_freq_setting(const struct device *dev, uint32_t chan)
{
struct cc1200_context *ctx = dev->data;
uint32_t xtal = CONFIG_IEEE802154_CC1200_XOSC;
uint32_t mult_10 = 100000U;
uint32_t factor = 1U;
uint32_t freq = 0U;
uint32_t rf, lo_div;
rf = ctx->rf_settings->chan_center_freq0 +
((chan * (uint32_t)ctx->rf_settings->channel_spacing) / 10U);
lo_div = get_lo_divider(dev);
LOG_DBG("Calculating freq for %u KHz RF (%u)", rf, lo_div);
while (rf > 0) {
uint32_t hz, freq_tmp, rst;
if (rf < 1000) {
hz = rf;
} else {
hz = rf / mult_10;
hz *= mult_10;
}
if (hz < 1000) {
freq_tmp = (hz * lo_div * 65536U) / xtal;
} else {
freq_tmp = ((hz * lo_div) / xtal) * 65536U;
}
rst = freq_tmp % factor;
freq_tmp /= factor;
if (factor > 1 && (rst/(factor/10U)) > 5) {
freq_tmp++;
}
freq += freq_tmp;
factor *= 10U;
mult_10 /= 10U;
xtal /= 10U;
rf -= hz;
}
LOG_DBG("FREQ is 0x%06X", freq);
return freq;
}
static bool
rf_install_settings(const struct device *dev,
const struct cc1200_rf_registers_set *rf_settings)
{
struct cc1200_context *cc1200 = dev->data;
if (!z_cc1200_access_reg(dev, false, CC1200_REG_SYNC3,
(void *)rf_settings->registers,
CC1200_RF_NON_EXT_SPACE_REGS, false, true) ||
!z_cc1200_access_reg(dev, false, CC1200_REG_IF_MIX_CFG,
(uint8_t *)rf_settings->registers
+ CC1200_RF_NON_EXT_SPACE_REGS,
CC1200_RF_EXT_SPACE_REGS, true, true) ||
!write_reg_pkt_len(dev, 0xFF)) {
LOG_ERR("Could not install RF settings");
return false;
}
cc1200->rf_settings = rf_settings;
return true;
}
static int rf_calibrate(const struct device *dev)
{
if (!instruct_scal(dev)) {
LOG_ERR("Could not calibrate RF");
return -EIO;
}
k_busy_wait(USEC_PER_MSEC * 5U);
/* We need to re-enable RX as SCAL shuts off the freq synth */
if (!instruct_sidle(dev) ||
!instruct_sfrx(dev) ||
!instruct_srx(dev)) {
LOG_ERR("Could not switch to RX");
return -EIO;
}
k_busy_wait(USEC_PER_MSEC * 10U);
cc1200_print_status(get_status(dev));
return 0;
}
/****************
* TX functions *
***************/
static inline bool write_txfifo(const struct device *dev,
void *data, size_t length)
{
return z_cc1200_access_reg(dev, false,
CC1200_REG_TXFIFO,
data, length, false, true);
}
/****************
* RX functions *
***************/
static inline bool read_rxfifo(const struct device *dev,
void *data, size_t length)
{
return z_cc1200_access_reg(dev, true,
CC1200_REG_RXFIFO,
data, length, false, true);
}
static inline uint8_t get_packet_length(const struct device *dev)
{
uint8_t len;
if (z_cc1200_access_reg(dev, true, CC1200_REG_RXFIFO,
&len, 1, false, true)) {
return len;
}
return 0;
}
static inline bool verify_rxfifo_validity(const struct device *dev,
uint8_t pkt_len)
{
/* packet should be at least 3 bytes as a ACK */
if (pkt_len < 3 ||
read_reg_num_rxbytes(dev) > (pkt_len + CC1200_FCS_LEN)) {
return false;
}
return true;
}
static inline bool read_rxfifo_content(const struct device *dev,
struct net_buf *buf, uint8_t len)
{
if (!read_rxfifo(dev, buf->data, len) ||
(get_status(dev) == CC1200_STATUS_RX_FIFO_ERROR)) {
return false;
}
net_buf_add(buf, len);
return true;
}
static inline bool verify_crc(const struct device *dev, struct net_pkt *pkt)
{
uint8_t status[2];
int8_t rssi;
if (!read_rxfifo(dev, status, 2)) {
return false;
}
if (!(status[1] & CC1200_FCS_CRC_OK)) {
return false;
}
rssi = (int8_t) status[0];
net_pkt_set_ieee802154_rssi_dbm(
pkt, rssi == CC1200_INVALID_RSSI ? IEEE802154_MAC_RSSI_DBM_UNDEFINED : rssi);
net_pkt_set_ieee802154_lqi(pkt, status[1] & CC1200_FCS_LQI_MASK);
return true;
}
static void cc1200_rx(void *p1, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
const struct device *dev = p1;
struct cc1200_context *cc1200 = dev->data;
struct net_pkt *pkt;
uint8_t pkt_len;
while (1) {
pkt = NULL;
k_sem_take(&cc1200->rx_lock, K_FOREVER);
if (get_status(dev) == CC1200_STATUS_RX_FIFO_ERROR) {
LOG_ERR("Fifo error");
goto flush;
}
pkt_len = get_packet_length(dev);
if (!verify_rxfifo_validity(dev, pkt_len)) {
LOG_ERR("Invalid frame");
goto flush;
}
pkt = net_pkt_rx_alloc_with_buffer(cc1200->iface, pkt_len,
AF_UNSPEC, 0, K_NO_WAIT);
if (!pkt) {
LOG_ERR("No free pkt available");
goto flush;
}
if (!read_rxfifo_content(dev, pkt->buffer, pkt_len)) {
LOG_ERR("No content read");
goto flush;
}
if (!verify_crc(dev, pkt)) {
LOG_ERR("Bad packet CRC");
goto out;
}
if (ieee802154_handle_ack(cc1200->iface, pkt) == NET_OK) {
LOG_DBG("ACK packet handled");
goto out;
}
LOG_DBG("Caught a packet (%u)", pkt_len);
if (net_recv_data(cc1200->iface, pkt) < 0) {
LOG_DBG("Packet dropped by NET stack");
goto out;
}
log_stack_usage(&cc1200->rx_thread);
continue;
flush:
LOG_DBG("Flushing RX");
instruct_sidle(dev);
instruct_sfrx(dev);
instruct_srx(dev);
out:
if (pkt) {
net_pkt_unref(pkt);
}
}
}
/********************
* Radio device API *
*******************/
static enum ieee802154_hw_caps cc1200_get_capabilities(const struct device *dev)
{
return IEEE802154_HW_FCS;
}
static int cc1200_cca(const struct device *dev)
{
struct cc1200_context *cc1200 = dev->data;
if (atomic_get(&cc1200->rx) == 0) {
uint8_t status = read_reg_rssi0(dev);
if (!(status & CARRIER_SENSE) &&
(status & CARRIER_SENSE_VALID)) {
return 0;
}
}
LOG_WRN("Busy");
return -EBUSY;
}
static int cc1200_set_channel(const struct device *dev, uint16_t channel)
{
struct cc1200_context *cc1200 = dev->data;
uint32_t freq;
/* As SUN FSK provides a host of configurations with extremely different
* channel counts it doesn't make sense to validate (aka -EINVAL) a
* global upper limit on the number of supported channels on this page.
*/
if (channel > IEEE802154_CC1200_CHANNEL_LIMIT) {
return -ENOTSUP;
}
/* Unlike usual 15.4 chips, cc1200 is closer to a bare metal radio modem
* and thus does not provide any means to select a channel directly, but
* requires instead that one calculates and configures the actual
* targeted frequency for the requested channel.
*
* See rf_evaluate_freq_setting() above.
*/
if (atomic_get(&cc1200->rx) != 0) {
return -EIO;
}
freq = rf_evaluate_freq_setting(dev, channel);
if (!write_reg_freq(dev, freq) ||
rf_calibrate(dev)) {
LOG_ERR("Could not set channel %u", channel);
return -EIO;
}
return 0;
}
static int cc1200_set_txpower(const struct device *dev, int16_t dbm)
{
uint8_t pa_power_ramp;
LOG_DBG("%d dbm", dbm);
/* See Section 7.1 */
dbm = ((dbm + 18) * 2) - 1;
if ((dbm <= 3) || (dbm >= 64)) {
LOG_ERR("Unhandled value");
return -EINVAL;
}
pa_power_ramp = read_reg_pa_cfg1(dev) & ~PA_POWER_RAMP_MASK;
pa_power_ramp |= ((uint8_t) dbm) & PA_POWER_RAMP_MASK;
if (!write_reg_pa_cfg1(dev, pa_power_ramp)) {
LOG_ERR("Could not proceed");
return -EIO;
}
return 0;
}
static int cc1200_tx(const struct device *dev,
enum ieee802154_tx_mode mode,
struct net_pkt *pkt,
struct net_buf *frag)
{
struct cc1200_context *cc1200 = dev->data;
uint8_t *frame = frag->data;
uint8_t len = frag->len;
bool status = false;
if (mode != IEEE802154_TX_MODE_DIRECT) {
NET_ERR("TX mode %d not supported", mode);
return -ENOTSUP;
}
LOG_DBG("%p (%u)", frag, len);
/* ToDo:
* Supporting 802.15.4g will require to loop in pkt's frags
* depending on len value, this will also take more time.
*/
if (!instruct_sidle(dev) ||
!instruct_sfrx(dev) ||
!instruct_sftx(dev) ||
!instruct_sfstxon(dev)) {
LOG_ERR("Cannot switch to TX mode");
goto out;
}
if (!write_txfifo(dev, &len, CC1200_PHY_HDR_LEN) ||
!write_txfifo(dev, frame, len) ||
read_reg_num_txbytes(dev) != (len + CC1200_PHY_HDR_LEN)) {
LOG_ERR("Cannot fill-in TX fifo");
goto out;
}
atomic_set(&cc1200->tx, 1);
atomic_set(&cc1200->tx_start, 0);
if (!instruct_stx(dev)) {
LOG_ERR("Cannot start transmission");
goto out;
}
/* Wait for SYNC to be sent */
k_sem_take(&cc1200->tx_sync, K_MSEC(100));
if (atomic_get(&cc1200->tx_start) == 1) {
/* Now wait for the packet to be fully sent */
k_sem_take(&cc1200->tx_sync, K_MSEC(100));
}
out:
cc1200_print_status(get_status(dev));
if (atomic_get(&cc1200->tx) == 1 &&
read_reg_num_txbytes(dev) != 0) {
LOG_ERR("TX Failed");
atomic_set(&cc1200->tx_start, 0);
instruct_sftx(dev);
status = false;
} else {
status = true;
}
atomic_set(&cc1200->tx, 0);
/* Get back to RX */
instruct_srx(dev);
return status ? 0 : -EIO;
}
static int cc1200_start(const struct device *dev)
{
if (!instruct_sidle(dev) ||
!instruct_sftx(dev) ||
!instruct_sfrx(dev) ||
rf_calibrate(dev)) {
LOG_ERR("Could not proceed");
return -EIO;
}
enable_gpio0_interrupt(dev, true);
cc1200_print_status(get_status(dev));
return 0;
}
static int cc1200_stop(const struct device *dev)
{
enable_gpio0_interrupt(dev, false);
if (!instruct_spwd(dev)) {
LOG_ERR("Could not proceed");
return -EIO;
}
return 0;
}
/* driver-allocated attribute memory - constant across all driver instances as
* this driver's channel range is configured via a global KConfig setting.
*/
IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 0, IEEE802154_CC1200_CHANNEL_LIMIT);
static int cc1200_attr_get(const struct device *dev, enum ieee802154_attr attr,
struct ieee802154_attr_value *value)
{
ARG_UNUSED(dev);
return ieee802154_attr_get_channel_page_and_range(
attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_NINE_SUN_PREDEFINED,
&drv_attr.phy_supported_channels, value);
}
/******************
* Initialization *
*****************/
static int power_on_and_setup(const struct device *dev)
{
if (!instruct_sres(dev)) {
LOG_ERR("Cannot reset");
return -EIO;
}
if (!rf_install_settings(dev, &cc1200_rf_settings)) {
return -EIO;
}
if (!write_reg_iocfg3(dev, CC1200_IOCFG3) ||
!write_reg_iocfg2(dev, CC1200_IOCFG2) ||
!write_reg_iocfg0(dev, CC1200_IOCFG0)) {
LOG_ERR("Cannot configure GPIOs");
return -EIO;
}
if (setup_gpio_callback(dev) != 0) {
return -EIO;
}
return rf_calibrate(dev);
}
static int cc1200_init(const struct device *dev)
{
const struct cc1200_config *config = dev->config;
struct cc1200_context *cc1200 = dev->data;
atomic_set(&cc1200->tx, 0);
atomic_set(&cc1200->tx_start, 0);
atomic_set(&cc1200->rx, 0);
k_sem_init(&cc1200->rx_lock, 0, 1);
k_sem_init(&cc1200->tx_sync, 0, 1);
/* Configure GPIOs */
if (!gpio_is_ready_dt(&config->interrupt)) {
LOG_ERR("GPIO port %s is not ready",
config->interrupt.port->name);
return -ENODEV;
}
gpio_pin_configure_dt(&config->interrupt, GPIO_INPUT);
if (!spi_is_ready_dt(&config->bus)) {
LOG_ERR("SPI bus %s is not ready", config->bus.bus->name);
return -ENODEV;
}
LOG_DBG("GPIO and SPI configured");
if (power_on_and_setup(dev) != 0) {
LOG_ERR("Configuring CC1200 failed");
return -EIO;
}
k_thread_create(&cc1200->rx_thread, cc1200->rx_stack,
CONFIG_IEEE802154_CC1200_RX_STACK_SIZE,
cc1200_rx,
(void *)dev, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT);
k_thread_name_set(&cc1200->rx_thread, "cc1200_rx");
LOG_INF("CC1200 initialized");
return 0;
}
static void cc1200_iface_init(struct net_if *iface)
{
const struct device *dev = net_if_get_device(iface);
struct cc1200_context *cc1200 = dev->data;
uint8_t *mac = get_mac(dev);
LOG_DBG("");
net_if_set_link_addr(iface, mac, 8, NET_LINK_IEEE802154);
cc1200->iface = iface;
ieee802154_init(iface);
}
static const struct cc1200_config cc1200_config = {
.bus = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0),
.interrupt = GPIO_DT_SPEC_INST_GET(0, int_gpios)
};
static struct cc1200_context cc1200_context_data;
static const struct ieee802154_radio_api cc1200_radio_api = {
.iface_api.init = cc1200_iface_init,
.get_capabilities = cc1200_get_capabilities,
.cca = cc1200_cca,
.set_channel = cc1200_set_channel,
.set_txpower = cc1200_set_txpower,
.tx = cc1200_tx,
.start = cc1200_start,
.stop = cc1200_stop,
.attr_get = cc1200_attr_get,
};
NET_DEVICE_DT_INST_DEFINE(0, cc1200_init, NULL, &cc1200_context_data,
&cc1200_config, CONFIG_IEEE802154_CC1200_INIT_PRIO,
&cc1200_radio_api, IEEE802154_L2,
NET_L2_GET_CTX_TYPE(IEEE802154_L2), 125);
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_cc1200.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,961 |
```unknown
# NXP KW41Z configuration options
menuconfig IEEE802154_KW41Z
bool "NXP KW41Z Driver support"
default y
depends on DT_HAS_NXP_KW41Z_IEEE802154_ENABLED
select REQUIRES_FULL_LIBC
if IEEE802154_KW41Z
config IEEE802154_KW41Z_INIT_PRIO
int "KW41Z initialization priority"
default 80
help
Set the initialization priority number. Do not change it unless
you know what you are doing. It has to start before the net stack.
config KW41_DBG_TRACE
bool "Simplified debug tracing of events"
help
The value depends on your debugging needs. This generates an encoded
trace of events without going to debug logging to avoid timing impact
on running code. The buffer is post analyzed via the debugger.
endif
``` | /content/code_sandbox/drivers/ieee802154/Kconfig.kw41z | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 183 |
```objective-c
/* ieee802154_uart_pipe.h - Private header for UART PIPE fake radio driver */
/*
*
*/
#ifndef ZEPHYR_DRIVERS_IEEE802154_IEEE802154_UART_PIPE_H_
#define ZEPHYR_DRIVERS_IEEE802154_IEEE802154_UART_PIPE_H_
#define UART_PIPE_RADIO_15_4_FRAME_TYPE 0xF0
struct upipe_context {
struct net_if *iface;
uint8_t mac_addr[8];
bool stopped;
/** RX specific attributes */
uint8_t uart_pipe_buf[1];
bool rx;
uint8_t rx_len;
uint8_t rx_off;
uint8_t rx_buf[IEEE802154_MAX_PHY_PACKET_SIZE];
};
#endif /* ZEPHYR_DRIVERS_IEEE802154_IEEE802154_UART_PIPE_H_ */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_uart_pipe.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 163 |
```objective-c
/* ieee802154_cc2520_regs.h - Registers definition for TI CC2520 */
/*
*
*/
#ifndef ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC2520_REGS_H_
#define ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC2520_REGS_H_
/* Instructions (see chapter 13) */
#define CC2520_INS_SNOP (0x00)
#define CC2520_INS_IBUFLD (0x02)
#define CC2520_INS_SIBUFEX (0x03)
#define CC2520_INS_SSAMPLECCA (0x04)
#define CC2520_INS_SRES (0x0F)
#define CC2520_INS_MEMRD (0x10)
#define CC2520_INS_MEMWR (0x20)
#define CC2520_INS_RXBUF (0x30)
#define CC2520_INS_RXBUFCP (0x38)
#define CC2520_INS_RXBUFMOV (0x32)
#define CC2520_INS_TXBUF (0x3A)
#define CC2520_INS_TXBUFCP (0x3E)
#define CC2520_INS_RANDOM (0x3C)
#define CC2520_INS_SXOSCON (0x40)
#define CC2520_INS_STXCAL (0x41)
#define CC2520_INS_SRXON (0x42)
#define CC2520_INS_STXON (0x43)
#define CC2520_INS_STXONCCA (0x44)
#define CC2520_INS_SRFOFF (0x45)
#define CC2520_INS_SXOSCOFF (0x46)
#define CC2520_INS_SFLUSHRX (0x47)
#define CC2520_INS_SFLUSHTX (0x48)
#define CC2520_INS_SACK (0x49)
#define CC2520_INS_SACKPEND (0x4A)
#define CC2520_INS_SNACK (0x4B)
#define CC2520_INS_SRXMASKBITSET (0x4C)
#define CC2520_INS_SRXMASKBITCLR (0x4D)
#define CC2520_INS_RXMASKAND (0x4E)
#define CC2520_INS_RXMASKOR (0x4F)
#define CC2520_INS_MEMCP (0x50)
#define CC2520_INS_MEMCPR (0x52)
#define CC2520_INS_MEMXCP (0x54)
#define CC2520_INS_MEMXWR (0x56)
#define CC2520_INS_BCLR (0x58)
#define CC2520_INS_BSET (0x59)
#define CC2520_INS_CTR_UCTR (0x60)
#define CC2520_INS_CBCMAC (0x64)
#define CC2520_INS_UCBCMAC (0x66)
#define CC2520_INS_CCM (0x68)
#define CC2520_INS_UCCM (0x6A)
#define CC2520_INS_ECB (0x70)
#define CC2520_INS_ECBO (0x72)
#define CC2520_INS_ECBX (0x74)
#define CC2520_INS_INC (0x78)
#define CC2520_INS_ABORT (0x7F)
#define CC2520_INS_REGRD (0x80)
#define CC2520_INS_REGWR (0xC0)
/* FREG registers (see chapter 28 part 3) */
#define CC2520_FREG_FRMFILT0 (0x00)
#define FRMFILT0_FRAME_FILTER_EN BIT(0)
#define FRMFILT0_PAN_COORDINATOR BIT(1)
#define FRMFILT0_MAX_FRAME_VERSION(_mfv_) (_mfv_ << 2)
#define CC2520_FREG_FRMFILT1 (0x01)
#define FRMFILT1_ACCEPT_FT_0_BEACON BIT(3)
#define FRMFILT1_ACCEPT_FT_1_DATA BIT(4)
#define FRMFILT1_ACCEPT_FT_2_ACK BIT(5)
#define FRMFILT1_ACCEPT_FT_3_MAC_CMD BIT(6)
#define FRMFILT1_ACCEPT_ALL (FRMFILT1_ACCEPT_FT_0_BEACON | \
FRMFILT1_ACCEPT_FT_1_DATA | \
FRMFILT1_ACCEPT_FT_2_ACK | \
FRMFILT1_ACCEPT_FT_3_MAC_CMD)
#define CC2520_FREG_SRCMATCH (0x02)
#define SRCMATCH_SRC_MATCH_EN BIT(0)
#define SRCMATCH_AUTOPEND BIT(1)
#define SRCMATCH_PEND_DATAREQ_ONLY BIT(2)
#define SRCMATCH_DEFAULTS (SRCMATCH_SRC_MATCH_EN | \
SRCMATCH_AUTOPEND | \
SRCMATCH_PEND_DATAREQ_ONLY)
#define CC2520_FREG_SRCSHORTEN0 (0x04)
#define CC2520_FREG_SRCSHORTEN1 (0x05)
#define CC2520_FREG_SRCSHORTEN2 (0x06)
#define CC2520_FREG_SRCEXTEN0 (0x08)
#define CC2520_FREG_SRCEXTEN1 (0x09)
#define CC2520_FREG_SRCEXTEN2 (0x0A)
#define CC2520_FREG_FRMCTRL0 (0x0C)
#define FRMCTRL0_ENERGY_SCAN BIT(4)
#define FRMCTRL0_AUTOACK BIT(5)
#define FRMCTRL0_AUTOCRC BIT(6)
#define FRMCTRL0_APPEND_DATA_MODE BIT(7)
#define CC2520_FREG_FRMCTRL1 (0x0D)
#define FRMCTRL1_SET_RXENMASK_ON_TX BIT(0)
#define FRMCTRL1_IGNORE_TX_UNDERF BIT(1)
#define FRMCTRL1_PENDING_OR BIT(2)
#define CC2520_FREG_RXENABLE0 (0x0E)
#define CC2520_FREG_RXENABLE1 (0x0F)
#define CC2520_FREG_EXCFLAG0 (0x10)
#define EXCFLAG0_RF_IDLE BIT(0)
#define EXCFLAG0_TX_FRM_DONE BIT(1)
#define EXCFLAG0_TX_ACK_DONE BIT(2)
#define EXCFLAG0_TX_UNDERFLOW BIT(3)
#define EXCFLAG0_TX_OVERFLOW BIT(4)
#define EXCFLAG0_RX_UNDERFLOW BIT(5)
#define EXCFLAG0_RX_OVERFLOW BIT(6)
#define EXCFLAG0_RXENABLE_ZERO BIT(7)
#define EXCFLAG0_RESET_TX_FLAGS ((uint8_t) \
~(EXCFLAG0_TX_FRM_DONE | \
EXCFLAG0_TX_ACK_DONE | \
EXCFLAG0_TX_UNDERFLOW | \
EXCFLAG0_TX_OVERFLOW))
#define EXCFLAG0_RESET_RX_FLAGS ((uint8_t) \
~(EXCFLAG0_RX_UNDERFLOW | \
EXCFLAG0_RX_OVERFLOW | \
EXCFLAG0_RXENABLE_ZERO))
#define CC2520_FREG_EXCFLAG1 (0x11)
#define EXCFLAG1_RX_FRM_DONE BIT(0)
#define EXCFLAG1_RX_FRM_ACCEPTED BIT(1)
#define EXCFLAG1_SRC_MATCH_DONE BIT(2)
#define EXCFLAG1_SRC_MATCH_FOUND BIT(3)
#define EXCFLAG1_FIFOP BIT(4)
#define EXCFLAG1_SFD BIT(5)
#define EXCFLAG1_DPU_DONE_L BIT(6)
#define EXCFLAG1_DPU_DONE_H BIT(7)
#define EXCFLAG1_RESET_RX_FLAGS ((uint8_t) \
~(EXCFLAG1_RX_FRM_DONE | \
EXCFLAG1_RX_FRM_ACCEPTED | \
EXCFLAG1_FIFOP))
#define CC2520_FREG_EXCFLAG2 (0x12)
#define EXCFLAG2_MEMADDR_ERROR BIT(0)
#define EXCFLAG2_USAGE_ERROR BIT(1)
#define EXCFLAG2_OPERAND_ERROR BIT(2)
#define EXCFLAG2_SPI_ERROR BIT(3)
#define EXCFLAG2_RF_NO_LOCK BIT(4)
#define EXCFLAG2_RX_FRM_ABORTED BIT(5)
#define EXCFLAG2_RFBUFMOV_TIMEOUT BIT(6)
#define EXCFLAG2_UNUSED BIT(7)
#define CC2520_FREG_EXMASKA0 (0x13)
#define CC2520_FREG_EXMASKA1 (0x14)
#define CC2520_FREG_EXMASKA2 (0x15)
#define CC2520_FREG_EXMASKB0 (0x18)
#define CC2520_FREG_EXMASKB1 (0x19)
#define CC2520_FREG_EXMASKB2 (0x1A)
#define CC2520_FREG_EXCBINDX0 (0x1C)
#define CC2520_FREG_EXCBINDX1 (0x1D)
#define CC2520_FREG_EXCBINDY0 (0x1E)
#define CC2520_FREG_EXCBINDY1 (0x1F)
#define CC2520_FREG_GPIOCTRL0 (0x20)
#define GPIOCTRL0_CTRL0(_ctrl_) (_ctrl_ & 0x7F)
#define GPIOCTRL0_IN0 BIT(7)
#define CC2520_FREG_GPIOCTRL1 (0x21)
#define GPIOCTRL1_CTRL1(_ctrl_) (_ctrl_ & 0x7F)
#define GPIOCTRL1_IN1 BIT(7)
#define CC2520_FREG_GPIOCTRL2 (0x22)
#define GPIOCTRL2_CTRL2(_ctrl_) (_ctrl_ & 0x7F)
#define GPIOCTRL2_IN2 BIT(7)
#define CC2520_FREG_GPIOCTRL3 (0x23)
#define GPIOCTRL3_CTRL3(_ctrl_) (_ctrl_ & 0x7F)
#define GPIOCTRL3_IN3 BIT(7)
#define CC2520_FREG_GPIOCTRL4 (0x24)
#define GPIOCTRL4_CTRL4(_ctrl_) (_ctrl_ & 0x7F)
#define GPIOCTRL4_IN4 BIT(7)
#define CC2520_FREG_GPIOCTRL5 (0x25)
#define GPIOCTRL5_CTRL5(_ctrl_) (_ctrl_ & 0x7F)
#define GPIOCTRL5_IN5 BIT(7)
#define CC2520_FREG_GPIOPOLARITY (0x26)
#define GPIOPOLARITY_POLARITY0 BIT(0)
#define GPIOPOLARITY_POLARITY1 BIT(1)
#define GPIOPOLARITY_POLARITY2 BIT(2)
#define GPIOPOLARITY_POLARITY3 BIT(3)
#define GPIOPOLARITY_POLARITY4 BIT(4)
#define GPIOPOLARITY_POLARITY5 BIT(5)
#define CC2520_FREG_GPIOCTRL (0x28)
#define GPIOCTRL_GPIO_PUE0 BIT(0)
#define GPIOCTRL_GPIO_PUE1 BIT(1)
#define GPIOCTRL_GPIO_PUE2 BIT(2)
#define GPIOCTRL_GPIO_PUE3 BIT(3)
#define GPIOCTRL_GPIO_PUE4 BIT(4)
#define GPIOCTRL_GPIO_PUE5 BIT(5)
#define GPIOCTRL_GPIO_ACTRL BIT(6)
#define GPIOCTRL_SC BIT(7)
#define CC2520_FREG_DPUCON (0x2A)
#define DPUCON_RXTIMEOUT BIT(0)
#define CC2520_FREG_DPUSTAT (0x2C)
#define DPUSTAT_DPUL_ACTIVE BIT(0)
#define DPUSTAT_DPUH_ACTIVE BIT(1)
#define DPUSTAT_AUTHSTAT_L BIT(2)
#define DPUSTAT_AUTHSTAT_H BIT(3)
#define CC2520_FREG_FREQCTRL (0x2E)
#define FREQCTRL_FREQ(_f_) (_f_ & 0x7F)
#define CC2520_FREG_FREQTUNE (0x2F)
#define FREQTUNE_XOSC32M_TUNE(_xt_) (_xt_ & 0x07)
#define CC2520_FREG_TXPOWER (0x30)
#define CC2520_FREG_TXCTRL (0x31)
#define CC2520_FREG_FSMSTAT0 (0x32)
#define FSMSTAT0_CAL_RUNNING BIT(6)
#define FSMSTAT0_CAL_DONE BIT(7)
#define CC2520_FREG_FSMSTAT1 (0x33)
#define FSMSTAT1_RX_ACTIVE BIT(0)
#define FSMSTAT1_TX_ACTIVE BIT(1)
#define FSMSTAT1_LOCK_STATUS BIT(2)
#define FSMSTAT1_CCA_SAMPLED BIT(3)
#define FSMSTAT1_CCA BIT(4)
#define FSMSTAT1_SFD BIT(5)
#define FSMSTAT1_FIFOP BIT(6)
#define FSMSTAT1_FIFO BIT(7)
#define CC2520_FREG_FIFOPCTRL (0x34)
#define FIFOPCTRL_FIFOP_THR(_ft_) (_ft_ & 0x7F)
#define CC2520_FREG_FSMCTRL (0x35)
#define FSMCTRL_RX2RX_TIME_OFF BIT(0)
#define FSMCTRL_SLOTTED_ACK BIT(1)
#define CC2520_FREG_CCACTRL0 (0x36)
#define CC2520_FREG_CCACTRL1 (0x37)
#define CCACTRL1_CCA_HYST(_ch_) (_ch_ & 0x07)
#define CCACTRL1_CCA_MODE(_cm_) ((_cm_ << 3) & 0x18)
#define CC2520_FREG_RSSI (0x38)
#define RSSI_RSSI_VALID BIT(0)
#define CC2520_FREG_RSSISTAT (0x39)
#define CC2520_FREG_RXFIRST (0x3C)
#define CC2520_FREG_RXFIFOCNT (0x3E)
#define CC2520_FREG_TXFIFOCNT (0x3F)
/* SREG registers (see chapter 28 part 3) */
#define CC2520_SREG_CHIPID (0x40)
#define CC2520_SREG_VERSION (0x42)
#define CC2520_SREG_EXTCLOCK (0x44)
#define EXTCLOCK_EXT_FREQ(_ef_) (_ef_ & 0x1F)
#define EXTCLOCK_EXTCLOCK_EN BIT(5)
#define CC2520_SREG_MDMCTRL0 (0x46)
#define MDMCTRL0_TX_FILTER BIT(0)
#define MDMCTRL0_PREAMBLE_LENGTH(_pl_) ((_pl_ << 1) & 0x1E)
#define MDMCTRL0_DEMOD_AVG_MODE BIT(5)
#define MDMCTRL0_DEM_NUM_ZEROS(_dnz_) ((_dnz_ << 6) & 0xC0)
#define CC2520_SREG_MDMCTRL1 (0x47)
#define MDMCTRL1_CORR_THR(_ct_) (_ct_ & 0x1F)
#define MDMCTRL1_CORR_THR_SFD BIT(5)
#define CC2520_SREG_FREQEST (0x48)
#define CC2520_SREG_MDMTEST1 (0x49)
#define MDMTEST1_MODULATION_MODE BIT(1)
#define MDMTEST1_RFC_SNIFF_EN BIT(2)
#define CC2520_SREG_RXCTRL (0x4A)
#define CC2520_SREG_FSCTRL (0x4C)
#define CC2520_SREG_FSCAL0 (0x4E)
#define CC2520_SREG_FSCAL1 (0x4F)
#define CC2520_SREG_FSCAL2 (0x50)
#define CC2520_SREG_FSCAL3 (0x51)
#define CC2520_SREG_AGCCTRL0 (0x52)
#define CC2520_SREG_AGCCTRL1 (0x53)
#define CC2520_SREG_AGCCTRL2 (0x54)
#define CC2520_SREG_AGCCTRL3 (0x55)
#define CC2520_SREG_ADCTEST0 (0x56)
#define CC2520_SREG_ADCTEST1 (0x57)
#define CC2520_SREG_ADCTEST2 (0x58)
#define CC2520_SREG_MDMTEST0 (0x5A)
#define CC2520_SREG_DACTEST0 (0x5C)
#define CC2520_SREG_DACTEST1 (0x5D)
#define CC2520_SREG_ATEST (0x5E)
#define CC2520_SREG_DACTEST2 (0x5F)
#define CC2520_SREG_PTEST0 (0x60)
#define CC2520_SREG_PTEST1 (0x61)
#define CC2520_SREG_DPUBIST (0x7A)
#define CC2520_SREG_ACTBIST (0x7C)
#define CC2520_SREG_RAMBIST (0x7E)
/* Useful RAM addresses (see chapter 15 part 6) */
#define CC2520_MEM_SHORT_ADDR (0x03F4)
#define CC2520_MEM_PAN_ID (0x03F2)
#define CC2520_MEM_EXT_ADDR (0x03EA)
#define CC2520_MEM_SRCSHORTPENDEN2 (0x03E9)
#define CC2520_MEM_SRCSHORTPENDEN1 (0x03E8)
#define CC2520_MEM_SRCSHORTPENDEN0 (0x03E7)
#define CC2520_MEM_SRCEXTPENDEN2 (0x03E6)
#define CC2520_MEM_SRCEXTPENDEN1 (0x03E5)
#define CC2520_MEM_SRCEXTPENDEN0 (0x03E4)
#define CC2520_MEM_SRCRESINDEX (0x03E3)
#define CC2520_MEM_SRCRESMASK2 (0x03E2)
#define CC2520_MEM_SRCRESMASK1 (0x03E1)
#define CC2520_MEM_SRCRESMASK0 (0x03E0)
#define CC2520_MEM_START (0x0200)
#define CC2520_MEM_DATA (CC2520_MEM_START)
#define CC2520_MEM_KEY_POS (0x90)
#define CC2520_MEM_KEY (CC2520_MEM_START + \
CC2520_MEM_KEY_POS)
#define CC2520_MEM_NONCE_POS (0xa0)
#define CC2520_MEM_NONCE (CC2520_MEM_START + \
CC2520_MEM_NONCE_POS)
/* Default settings (see chapter 28 part 1) */
#define CC2520_TXPOWER_DEFAULT (0x32)
#define CC2520_CCACTRL0_DEFAULT (0xF8)
#define CC2520_MDMCTRL0_DEFAULT (0x85)
#define CC2520_MDMCTRL1_DEFAULT (0x14)
#define CC2520_RXCTRL_DEFAULT (0x3F)
#define CC2520_FSCTRL_DEFAULT (0x5A)
#define CC2520_FSCAL1_DEFAULT (0x2B)
#define CC2520_AGCCTRL1_DEFAULT (0x11)
#define CC2520_ADCTEST0_DEFAULT (0x10)
#define CC2520_ADCTEST1_DEFAULT (0x0E)
#define CC2520_ADCTEST2_DEFAULT (0x03)
/* Status bits (see chapter 13 part 4) */
#define CC2520_STATUS_RX_ACTIVE BIT(0)
#define CC2520_STATUS_TX_ACTIVE BIT(1)
#define CC2520_STATUS_DPU_L_ACTIVE BIT(2)
#define CC2520_STATUS_DPU_H_ACTIVE BIT(3)
#define CC2520_STATUS_EXCEPTION_CHANNEL_B BIT(4)
#define CC2520_STATUS_EXCEPTION_CHANNEL_A BIT(5)
#define CC2520_STATUS_RSSI_VALID BIT(6)
#define CC2520_STATUS_XOSC_STABLE_N_RUNNING BIT(7)
#define CC2520_FCS_CRC_OK (0x80)
#define CC2520_FCS_CORRELATION (0x7f)
#endif /* ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC2520_REGS_H_ */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_cc2520_regs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,598 |
```c
/* ieee802154_kw41z.c - NXP KW41Z driver */
/*
*
*/
#define DT_DRV_COMPAT nxp_kw41z_ieee802154
#define LOG_MODULE_NAME ieee802154_kw41z
#define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/irq.h>
#include <zephyr/net/ieee802154_radio.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/random/random.h>
#include "fsl_xcvr.h"
#if defined(CONFIG_NET_L2_OPENTHREAD)
#include <zephyr/net/openthread.h>
#endif
/*
* For non-invasive tracing of IRQ events. Sometimes the print logs
* will shift the timings around so this trace buffer can be used to
* post inspect conditions to see what sequence of events occurred.
*/
#define KW41_DBG_TRACE_WTRM 0
#define KW41_DBG_TRACE_RX 1
#define KW41_DBG_TRACE_TX 2
#define KW41_DBG_TRACE_CCA 3
#define KW41_DBG_TRACE_TMR3 0xFF
#if defined(CONFIG_KW41_DBG_TRACE)
#define KW41_DBG_TRACE_SIZE 30
struct kw41_dbg_trace {
uint8_t type;
uint32_t time;
uint32_t irqsts;
uint32_t phy_ctrl;
uint32_t seq_state;
};
struct kw41_dbg_trace kw41_dbg[KW41_DBG_TRACE_SIZE];
int kw41_dbg_idx;
#define KW_DBG_TRACE(_type, _irqsts, _phy_ctrl, _seq_state) \
do { \
kw41_dbg[kw41_dbg_idx].type = (_type); \
kw41_dbg[kw41_dbg_idx].time = \
ZLL->EVENT_TMR >> ZLL_EVENT_TMR_EVENT_TMR_SHIFT; \
kw41_dbg[kw41_dbg_idx].irqsts = (_irqsts); \
kw41_dbg[kw41_dbg_idx].phy_ctrl = (_phy_ctrl); \
kw41_dbg[kw41_dbg_idx].seq_state = (_seq_state); \
if (++kw41_dbg_idx == KW41_DBG_TRACE_SIZE) { \
kw41_dbg_idx = 0; \
} \
} while (false)
#else
#define KW_DBG_TRACE(_type, _irqsts, _phy_ctrl, _seq_state)
#endif
#define KW41Z_DEFAULT_CHANNEL 26
#define KW41Z_CCA_TIME 8
#define KW41Z_SHR_PHY_TIME 12
#define KW41Z_PER_BYTE_TIME 2
#define KW41Z_ACK_WAIT_TIME 54
#define KW41Z_PRE_RX_WAIT_TIME 1
#define KW40Z_POST_SEQ_WAIT_TIME 1
#define RADIO_0_IRQ_PRIO 0x0
#define KW41Z_FCS_LENGTH 2
#define KW41Z_PSDU_LENGTH 125
#define KW41Z_OUTPUT_POWER_MAX 4
#define KW41Z_OUTPUT_POWER_MIN (-31)
#define IEEE802154_ACK_LENGTH 5
#define BM_ZLL_IRQSTS_TMRxMSK (ZLL_IRQSTS_TMR1MSK_MASK | \
ZLL_IRQSTS_TMR2MSK_MASK | \
ZLL_IRQSTS_TMR3MSK_MASK | \
ZLL_IRQSTS_TMR4MSK_MASK)
/*
* Clear channel assessment types. Note that there is an extra one when
* bit 26 is included for "No CCA before transmit" if we are handling
* ACK frames but we will let the hardware handle that automatically.
*/
enum {
KW41Z_CCA_ED, /* Energy detect */
KW41Z_CCA_MODE1, /* Energy above threshold */
KW41Z_CCA_MODE2, /* Carrier sense only */
KW41Z_CCA_MODE3 /* Mode 1 + Mode 2 */
};
/*
* KW41Z has a sequencer that can run in any of the following states.
*/
enum {
KW41Z_STATE_IDLE,
KW41Z_STATE_RX,
KW41Z_STATE_TX,
KW41Z_STATE_CCA,
KW41Z_STATE_TXRX,
KW41Z_STATE_CCCA
};
/* Lookup table for PA_PWR register */
static const uint8_t pa_pwr_lt[] = {
1, /* -31.1 dBm: -31 */
2, 2, 2, 2, 2, 2, 2, /* -25.0 dBm: -30, -29, -28, -27, -26, -25 */
4, 4, 4, 4, 4, /* -19.0 dBm: -24, -23, -22, -21, -20, -19 */
6, 6, 6, /* -15.6 dBm: -18, -17, -16 */
8, 8, /* -13.1 dBm: -15, -14 */
10, 10, /* -11.2 dBm: -13, -12 */
12, 12, /* - 9.6 dBm: -11, -10 */
14, /* - 8.3 dBm: -9 */
16, /* - 7.2 dBm: -8 */
18, /* - 6.2 dBm: -7 */
20, /* - 5.3 dBm: -6 */
22, /* - 4.5 dBm: -5 */
24, /* - 3.8 dBm: -4 */
28, /* - 2.5 dBm: -3 */
30, /* - 1.9 dBm: -2 */
34, /* - 1.0 dBm: -1 */
40, /* + 0.3 dBm: 0 */
44, /* + 1.1 dBm: +1 */
50, /* + 2.1 dBm: +2 */
58, /* + 3.1 dBm: +3 */
62 /* + 3.5 dBm: +4 */
};
struct kw41z_context {
struct net_if *iface;
uint8_t mac_addr[8];
struct k_sem seq_sync;
atomic_t seq_retval;
uint32_t rx_warmup_time;
uint32_t tx_warmup_time;
bool frame_pending; /* FP bit state from the most recent ACK frame. */
};
static struct kw41z_context kw41z_context_data;
static inline uint8_t kw41z_get_instant_state(void)
{
return (ZLL->SEQ_STATE & ZLL_SEQ_STATE_SEQ_STATE_MASK) >>
ZLL_SEQ_STATE_SEQ_STATE_SHIFT;
}
static inline uint8_t kw41z_get_seq_state(void)
{
return (ZLL->PHY_CTRL & ZLL_PHY_CTRL_XCVSEQ_MASK) >>
ZLL_PHY_CTRL_XCVSEQ_SHIFT;
}
static inline void kw41z_set_seq_state(uint8_t state)
{
#if CONFIG_SOC_MKW40Z4
/*
* KW40Z seems to require a small delay when switching to IDLE state
* after a programmed sequence is complete.
*/
if (state == KW41Z_STATE_IDLE) {
k_busy_wait(KW40Z_POST_SEQ_WAIT_TIME);
}
#endif
ZLL->PHY_CTRL = (ZLL->PHY_CTRL & ~ZLL_PHY_CTRL_XCVSEQ_MASK) |
ZLL_PHY_CTRL_XCVSEQ(state);
}
static inline void kw41z_wait_for_idle(void)
{
uint8_t state = kw41z_get_instant_state();
while (state != KW41Z_STATE_IDLE) {
state = kw41z_get_instant_state();
}
if (state != KW41Z_STATE_IDLE) {
LOG_ERR("Error waiting for idle state");
}
}
static void kw41z_phy_abort(void)
{
unsigned int key;
key = irq_lock();
/* Mask SEQ interrupt */
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_SEQMSK_MASK;
/* Disable timer trigger (for scheduled XCVSEQ) */
if (ZLL->PHY_CTRL & ZLL_PHY_CTRL_TMRTRIGEN_MASK) {
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_TMRTRIGEN_MASK;
/* give the FSM enough time to start if it was triggered */
while ((XCVR_MISC->XCVR_CTRL &
XCVR_CTRL_XCVR_STATUS_TSM_COUNT_MASK) == 0) {
}
}
/* If XCVR is not idle, abort current SEQ */
if (ZLL->PHY_CTRL & ZLL_PHY_CTRL_XCVSEQ_MASK) {
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_XCVSEQ_MASK;
/* wait for Sequence Idle (if not already) */
while (ZLL->SEQ_STATE & ZLL_SEQ_STATE_SEQ_STATE_MASK) {
}
}
/* Stop timers */
ZLL->PHY_CTRL &= ~(ZLL_PHY_CTRL_TMR1CMP_EN_MASK |
ZLL_PHY_CTRL_TMR2CMP_EN_MASK |
ZLL_PHY_CTRL_TMR3CMP_EN_MASK |
ZLL_PHY_CTRL_TC3TMOUT_MASK);
/*
* Clear all IRQ bits to avoid unexpected interrupts.
*
* For Coverity, this is a pointer to a register bank and the IRQSTS
* register bits get cleared when a 1 is written to them so doing a
* reg=reg may generate a warning but it is needed to clear the bits.
*/
ZLL->IRQSTS = ZLL->IRQSTS;
irq_unlock(key);
}
static void kw41z_isr_timeout_cleanup(void)
{
uint32_t irqsts;
/*
* Set the PHY sequencer back to IDLE and disable TMR3 comparator
* and timeout
*/
ZLL->PHY_CTRL &= ~(ZLL_PHY_CTRL_TMR3CMP_EN_MASK |
ZLL_PHY_CTRL_TC3TMOUT_MASK |
ZLL_PHY_CTRL_XCVSEQ_MASK);
/* Mask SEQ, RX, TX and CCA interrupts */
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_CCAMSK_MASK |
ZLL_PHY_CTRL_RXMSK_MASK |
ZLL_PHY_CTRL_TXMSK_MASK |
ZLL_PHY_CTRL_SEQMSK_MASK;
while (ZLL->SEQ_STATE & ZLL_SEQ_STATE_SEQ_STATE_MASK) {
}
irqsts = ZLL->IRQSTS;
/* Mask TMR3 interrupt */
irqsts |= ZLL_IRQSTS_TMR3MSK_MASK;
ZLL->IRQSTS = irqsts;
}
static void kw41z_isr_seq_cleanup(void)
{
uint32_t irqsts;
/* Set the PHY sequencer back to IDLE */
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_XCVSEQ_MASK;
/* Mask SEQ, RX, TX and CCA interrupts */
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_CCAMSK_MASK |
ZLL_PHY_CTRL_RXMSK_MASK |
ZLL_PHY_CTRL_TXMSK_MASK |
ZLL_PHY_CTRL_SEQMSK_MASK;
while (ZLL->SEQ_STATE & ZLL_SEQ_STATE_SEQ_STATE_MASK) {
}
irqsts = ZLL->IRQSTS;
/* Mask TMR3 interrupt */
irqsts |= ZLL_IRQSTS_TMR3MSK_MASK;
/* Clear transceiver interrupts except TMRxIRQ */
irqsts &= ~(ZLL_IRQSTS_TMR1IRQ_MASK |
ZLL_IRQSTS_TMR2IRQ_MASK |
ZLL_IRQSTS_TMR3IRQ_MASK |
ZLL_IRQSTS_TMR4IRQ_MASK);
ZLL->IRQSTS = irqsts;
}
static inline void kw41z_enable_seq_irq(void)
{
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_SEQMSK_MASK;
}
static inline void kw41z_disable_seq_irq(void)
{
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_SEQMSK_MASK;
}
/*
* Set the T3CMP timer comparator. The 'timeout' value is an offset from
* now.
*/
static void kw41z_tmr3_set_timeout(uint32_t timeout)
{
uint32_t irqsts;
/* Add in the current time so that we can get the comparator to
* match appropriately to our offset time.
*/
timeout += ZLL->EVENT_TMR >> ZLL_EVENT_TMR_EVENT_TMR_SHIFT;
/* disable TMR3 compare */
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_TMR3CMP_EN_MASK;
ZLL->T3CMP = timeout & ZLL_T3CMP_T3CMP_MASK;
/* acknowledge TMR3 IRQ */
irqsts = ZLL->IRQSTS & BM_ZLL_IRQSTS_TMRxMSK;
irqsts |= ZLL_IRQSTS_TMR3IRQ_MASK;
ZLL->IRQSTS = irqsts;
/* enable TMR3 compare and autosequence stop by TC3 match */
ZLL->PHY_CTRL |=
(ZLL_PHY_CTRL_TMR3CMP_EN_MASK | ZLL_PHY_CTRL_TC3TMOUT_MASK);
}
static void kw41z_tmr3_disable(void)
{
uint32_t irqsts;
/*
* disable TMR3 compare and disable autosequence stop by TC3
* match
*/
ZLL->PHY_CTRL &= ~(ZLL_PHY_CTRL_TMR3CMP_EN_MASK |
ZLL_PHY_CTRL_TC3TMOUT_MASK);
/* mask TMR3 interrupt (do not change other IRQ status) */
irqsts = ZLL->IRQSTS & BM_ZLL_IRQSTS_TMRxMSK;
irqsts |= ZLL_IRQSTS_TMR3MSK_MASK;
/* acknowledge TMR3 IRQ */
irqsts |= ZLL_IRQSTS_TMR3IRQ_MASK;
ZLL->IRQSTS = irqsts;
}
static enum ieee802154_hw_caps kw41z_get_capabilities(const struct device *dev)
{
return IEEE802154_HW_FCS | IEEE802154_HW_FILTER |
IEEE802154_HW_TX_RX_ACK | IEEE802154_HW_RX_TX_ACK;
}
static int kw41z_cca(const struct device *dev)
{
struct kw41z_context *kw41z = dev->data;
kw41z_phy_abort();
k_sem_init(&kw41z->seq_sync, 0, 1);
kw41z_enable_seq_irq();
ZLL->PHY_CTRL = (ZLL->PHY_CTRL & ~ZLL_PHY_CTRL_CCATYPE_MASK) |
ZLL_PHY_CTRL_CCATYPE(KW41Z_CCA_MODE1);
kw41z_set_seq_state(KW41Z_STATE_CCA);
k_sem_take(&kw41z->seq_sync, K_FOREVER);
return kw41z->seq_retval;
}
static int kw41z_set_channel(const struct device *dev, uint16_t channel)
{
if (channel < 11 || channel > 26) {
return channel < 11 ? -ENOTSUP : -EINVAL;
}
ZLL->CHANNEL_NUM0 = channel;
return 0;
}
static int kw41z_set_pan_id(const struct device *dev, uint16_t pan_id)
{
ZLL->MACSHORTADDRS0 = (ZLL->MACSHORTADDRS0 &
~ZLL_MACSHORTADDRS0_MACPANID0_MASK) |
ZLL_MACSHORTADDRS0_MACPANID0(pan_id);
return 0;
}
static int kw41z_set_short_addr(const struct device *dev, uint16_t short_addr)
{
ZLL->MACSHORTADDRS0 = (ZLL->MACSHORTADDRS0 &
~ZLL_MACSHORTADDRS0_MACSHORTADDRS0_MASK) |
ZLL_MACSHORTADDRS0_MACSHORTADDRS0(short_addr);
return 0;
}
static int kw41z_set_ieee_addr(const struct device *dev,
const uint8_t *ieee_addr)
{
uint32_t val;
memcpy(&val, ieee_addr, sizeof(val));
ZLL->MACLONGADDRS0_LSB = val;
memcpy(&val, ieee_addr + sizeof(val), sizeof(val));
ZLL->MACLONGADDRS0_MSB = val;
return 0;
}
static int kw41z_filter(const struct device *dev,
bool set,
enum ieee802154_filter_type type,
const struct ieee802154_filter *filter)
{
LOG_DBG("Applying filter %u", type);
if (!set) {
return -ENOTSUP;
}
if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
return kw41z_set_ieee_addr(dev, filter->ieee_addr);
} else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
return kw41z_set_short_addr(dev, filter->short_addr);
} else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
return kw41z_set_pan_id(dev, filter->pan_id);
}
return -ENOTSUP;
}
static int kw41z_set_txpower(const struct device *dev, int16_t dbm)
{
if (dbm < KW41Z_OUTPUT_POWER_MIN) {
LOG_INF("TX-power %d dBm below min of %d dBm, using %d dBm",
dbm,
KW41Z_OUTPUT_POWER_MIN,
KW41Z_OUTPUT_POWER_MIN);
dbm = KW41Z_OUTPUT_POWER_MIN;
} else if (dbm > KW41Z_OUTPUT_POWER_MAX) {
LOG_INF("TX-power %d dBm above max of %d dBm, using %d dBm",
dbm,
KW41Z_OUTPUT_POWER_MAX,
KW41Z_OUTPUT_POWER_MAX);
dbm = KW41Z_OUTPUT_POWER_MAX;
}
ZLL->PA_PWR = pa_pwr_lt[dbm - KW41Z_OUTPUT_POWER_MIN];
return 0;
}
static int kw41z_start(const struct device *dev)
{
irq_enable(Radio_1_IRQn);
kw41z_set_seq_state(KW41Z_STATE_RX);
kw41z_enable_seq_irq();
return 0;
}
static int kw41z_stop(const struct device *dev)
{
irq_disable(Radio_1_IRQn);
kw41z_disable_seq_irq();
kw41z_set_seq_state(KW41Z_STATE_IDLE);
return 0;
}
static uint8_t kw41z_convert_lqi(uint8_t hw_lqi)
{
if (hw_lqi >= 220U) {
return 255;
} else {
return (hw_lqi * 51U) / 44;
}
}
static inline void kw41z_rx(struct kw41z_context *kw41z, uint8_t len)
{
struct net_pkt *pkt = NULL;
struct net_buf *buf = NULL;
uint8_t pkt_len, hw_lqi;
int rslt;
LOG_DBG("ENTRY: len: %d", len);
#if defined(CONFIG_NET_L2_OPENTHREAD)
/*
* OpenThread stack expects a receive frame to include the FCS
*/
pkt_len = len;
#else
pkt_len = len - KW41Z_FCS_LENGTH;
#endif
pkt = net_pkt_rx_alloc_with_buffer(kw41z->iface, pkt_len,
AF_UNSPEC, 0, K_NO_WAIT);
if (!pkt) {
LOG_ERR("No buf available");
goto out;
}
buf = pkt->buffer;
#if CONFIG_SOC_MKW41Z4
/* PKT_BUFFER_RX needs to be accessed aligned to 16 bits */
for (uint16_t reg_val = 0, i = 0; i < pkt_len; i++) {
if (i % 2 == 0U) {
reg_val = ZLL->PKT_BUFFER_RX[i/2U];
buf->data[i] = reg_val & 0xFF;
} else {
buf->data[i] = reg_val >> 8;
}
}
#else /* CONFIG_SOC_MKW40Z4 */
/* PKT_BUFFER needs to be accessed aligned to 32 bits */
for (uint32_t reg_val = 0, i = 0; i < pkt_len; i++) {
switch (i % 4) {
case 0:
reg_val = ZLL->PKT_BUFFER[i/4U];
buf->data[i] = reg_val & 0xFF;
break;
case 1:
buf->data[i] = (reg_val >> 8) & 0xFF;
break;
case 2:
buf->data[i] = (reg_val >> 16) & 0xFF;
break;
default:
buf->data[i] = reg_val >> 24;
}
}
#endif
net_buf_add(buf, pkt_len);
hw_lqi = (ZLL->LQI_AND_RSSI & ZLL_LQI_AND_RSSI_LQI_VALUE_MASK) >>
ZLL_LQI_AND_RSSI_LQI_VALUE_SHIFT;
net_pkt_set_ieee802154_lqi(pkt, kw41z_convert_lqi(hw_lqi));
/* ToDo: get the rssi as well and use net_pkt_set_ieee802154_rssi() */
rslt = net_recv_data(kw41z->iface, pkt);
if (rslt < 0) {
LOG_ERR("RCV Packet dropped by NET stack: %d", rslt);
goto out;
}
return;
out:
if (pkt) {
net_pkt_unref(pkt);
}
}
#define ACK_FRAME_LEN 3
#define ACK_FRAME_TYPE (2 << 0)
#define ACK_FRAME_PENDING_BIT (1 << 4)
static void handle_ack(struct kw41z_context *kw41z, uint8_t seq_number)
{
struct net_pkt *ack_pkt;
uint8_t ack_psdu[ACK_FRAME_LEN];
ack_pkt = net_pkt_rx_alloc_with_buffer(kw41z->iface, ACK_FRAME_LEN,
AF_UNSPEC, 0, K_NO_WAIT);
if (!ack_pkt) {
LOG_ERR("No free packet available.");
return;
}
/* Re-create ACK frame. */
ack_psdu[0] = kw41z_context_data.frame_pending ?
ACK_FRAME_TYPE | ACK_FRAME_PENDING_BIT : ACK_FRAME_TYPE;
ack_psdu[1] = 0;
ack_psdu[2] = seq_number;
if (net_pkt_write(ack_pkt, ack_psdu, sizeof(ack_psdu)) < 0) {
LOG_ERR("Failed to write to a packet.");
goto out;
}
/* Use some fake values for LQI and RSSI. */
(void)net_pkt_set_ieee802154_lqi(ack_pkt, 80);
(void)net_pkt_set_ieee802154_rssi_dbm(ack_pkt, -40);
net_pkt_cursor_init(ack_pkt);
if (ieee802154_handle_ack(kw41z->iface, ack_pkt) != NET_OK) {
LOG_INF("ACK packet not handled - releasing.");
}
out:
net_pkt_unref(ack_pkt);
}
static int kw41z_tx(const struct device *dev, enum ieee802154_tx_mode mode,
struct net_pkt *pkt, struct net_buf *frag)
{
struct kw41z_context *kw41z = dev->data;
uint8_t payload_len = frag->len;
uint32_t tx_timeout;
uint8_t xcvseq;
unsigned int key;
if (mode != IEEE802154_TX_MODE_DIRECT) {
NET_ERR("TX mode %d not supported", mode);
return -ENOTSUP;
}
/*
* The transmit requests are preceded by the CCA request. On
* completion of the CCA the sequencer should be in the IDLE
* state.
*/
if (kw41z_get_seq_state() != KW41Z_STATE_IDLE) {
LOG_WRN("Can't initiate new SEQ state");
return -EBUSY;
}
if (payload_len > KW41Z_PSDU_LENGTH) {
LOG_ERR("Payload too long");
return 0;
}
key = irq_lock();
/* Disable the 802.15.4 radio IRQ */
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_TRCV_MSK_MASK;
kw41z_disable_seq_irq();
#if CONFIG_SOC_MKW41Z4
((uint8_t *)ZLL->PKT_BUFFER_TX)[0] = payload_len + KW41Z_FCS_LENGTH;
memcpy(((uint8_t *)ZLL->PKT_BUFFER_TX) + 1,
(void *)frag->data, payload_len);
#else /* CONFIG_SOC_MKW40Z4 */
((uint8_t *)ZLL->PKT_BUFFER)[0] = payload_len + KW41Z_FCS_LENGTH;
memcpy(((uint8_t *)ZLL->PKT_BUFFER) + 1,
(void *)frag->data, payload_len);
#endif
/* Set CCA mode */
ZLL->PHY_CTRL = (ZLL->PHY_CTRL & ~ZLL_PHY_CTRL_CCATYPE_MASK) |
ZLL_PHY_CTRL_CCATYPE(KW41Z_CCA_MODE1);
/* Clear all IRQ flags */
ZLL->IRQSTS = ZLL->IRQSTS;
/* Perform automatic reception of ACK frame, if required */
if (ieee802154_is_ar_flag_set(frag)) {
tx_timeout = kw41z->tx_warmup_time + KW41Z_SHR_PHY_TIME +
payload_len * KW41Z_PER_BYTE_TIME + 10 +
KW41Z_ACK_WAIT_TIME;
LOG_DBG("AUTOACK ENABLED: len: %d, timeout: %d, seq: %d",
payload_len, tx_timeout, frag->data[2]);
kw41z_tmr3_set_timeout(tx_timeout);
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_RXACKRQD_MASK;
xcvseq = KW41Z_STATE_TXRX;
} else {
LOG_DBG("AUTOACK DISABLED: len: %d, seq: %d",
payload_len, frag->data[2]);
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_RXACKRQD_MASK;
xcvseq = KW41Z_STATE_TX;
}
kw41z_enable_seq_irq();
/*
* PHY_CTRL is sensitive to multiple writes that can kick off
* the sequencer engine causing TX with AR request to send the
* TX frame multiple times.
*
* To minimize, ensure there is only one write to PHY_CTRL with
* TXRX sequence enable and the 802.15.4 radio IRQ.
*/
ZLL->PHY_CTRL = (ZLL->PHY_CTRL & ~ZLL_PHY_CTRL_TRCV_MSK_MASK) | xcvseq;
irq_unlock(key);
k_sem_take(&kw41z->seq_sync, K_FOREVER);
if ((kw41z->seq_retval == 0) && ieee802154_is_ar_flag_set(frag)) {
handle_ack(kw41z, frag->data[2]);
}
LOG_DBG("seq_retval: %ld", kw41z->seq_retval);
return kw41z->seq_retval;
}
static void kw41z_isr(int unused)
{
uint32_t irqsts = ZLL->IRQSTS;
uint8_t state = kw41z_get_seq_state();
uint8_t restart_rx = 1U;
uint32_t rx_len;
/*
* Variable is used in debug output to capture the state of the
* sequencer at interrupt.
*/
uint32_t seq_state = ZLL->SEQ_STATE;
LOG_DBG("ENTRY: irqsts: 0x%08X, PHY_CTRL: 0x%08X, "
"SEQ_STATE: 0x%08X, SEQ_CTRL: 0x%08X, TMR: %d, state: %d",
irqsts, (unsigned int)ZLL->PHY_CTRL,
(unsigned int)seq_state,
(unsigned int)ZLL->SEQ_CTRL_STS,
(unsigned int)(ZLL->EVENT_TMR >> ZLL_EVENT_TMR_EVENT_TMR_SHIFT),
state);
/* Clear interrupts */
ZLL->IRQSTS = irqsts;
if (irqsts & ZLL_IRQSTS_FILTERFAIL_IRQ_MASK) {
LOG_DBG("Incoming RX failed packet filtering rules: "
"CODE: 0x%08X, irqsts: 0x%08X, PHY_CTRL: 0x%08X, "
"SEQ_STATE: 0x%08X, state: %d",
(unsigned int)ZLL->FILTERFAIL_CODE,
irqsts,
(unsigned int)ZLL->PHY_CTRL,
(unsigned int)seq_state, state);
restart_rx = 0U;
} else if ((!(ZLL->PHY_CTRL & ZLL_PHY_CTRL_RX_WMRK_MSK_MASK)) &&
(irqsts & ZLL_IRQSTS_RXWTRMRKIRQ_MASK)) {
/*
* There is a bug in the KW41Z where in noisy environments
* the RX sequence can get lost. The watermark mask IRQ can
* start TMR3 to complete the rest of the read or to assert
* IRQ if the sequencer gets lost so we can reset things.
* Note that a TX from the upper layers will also reset
* things so the problem is contained a bit in normal
* operation.
*/
rx_len = (irqsts & ZLL_IRQSTS_RX_FRAME_LENGTH_MASK)
>> ZLL_IRQSTS_RX_FRAME_LENGTH_SHIFT;
KW_DBG_TRACE(KW41_DBG_TRACE_WTRM, irqsts,
(unsigned int)ZLL->PHY_CTRL, seq_state);
if (rx_len > IEEE802154_ACK_LENGTH) {
LOG_DBG("WMRK irq: seq_state: 0x%08x, rx_len: %d",
seq_state, rx_len);
/*
* Assume the RX includes an auto-ACK so set the
* timer to include the RX frame size, crc, IFS,
* and ACK length and convert to symbols.
*
* IFS is 12 symbols
*
* ACK frame is 11 bytes: 4 preamble, 1 start of
* frame, 1 frame length, 2 frame control,
* 1 sequence, 2 FCS. Times two to convert to symbols.
*/
rx_len = rx_len * 2U + 12 + 22 + 2;
kw41z_tmr3_set_timeout(rx_len);
}
restart_rx = 0U;
}
/* Sequence done IRQ */
if ((state != KW41Z_STATE_IDLE) && (irqsts & ZLL_IRQSTS_SEQIRQ_MASK)) {
/*
* PLL unlock, the autosequence has been aborted due to
* PLL unlock
*/
if (irqsts & ZLL_IRQSTS_PLL_UNLOCK_IRQ_MASK) {
LOG_ERR("PLL unlock error");
kw41z_isr_seq_cleanup();
restart_rx = 1U;
}
/*
* TMR3 timeout, the autosequence has been aborted due to
* TMR3 timeout
*/
else if ((irqsts & ZLL_IRQSTS_TMR3IRQ_MASK) &&
(!(irqsts & ZLL_IRQSTS_RXIRQ_MASK)) &&
(state != KW41Z_STATE_TX)) {
LOG_DBG("a) TMR3 timeout: irqsts: 0x%08X, "
"seq_state: 0x%08X, PHY_CTRL: 0x%08X, "
"state: %d",
irqsts, seq_state,
(unsigned int)ZLL->PHY_CTRL, state);
KW_DBG_TRACE(KW41_DBG_TRACE_TMR3, irqsts,
(unsigned int)ZLL->PHY_CTRL, seq_state);
kw41z_isr_timeout_cleanup();
restart_rx = 1U;
if (state == KW41Z_STATE_TXRX) {
/* TODO: What is the right error for no ACK? */
atomic_set(&kw41z_context_data.seq_retval,
-EBUSY);
k_sem_give(&kw41z_context_data.seq_sync);
}
} else {
kw41z_isr_seq_cleanup();
switch (state) {
case KW41Z_STATE_RX:
LOG_DBG("RX seq done: SEQ_STATE: 0x%08X",
(unsigned int)seq_state);
KW_DBG_TRACE(KW41_DBG_TRACE_RX, irqsts,
(unsigned int)ZLL->PHY_CTRL, seq_state);
kw41z_tmr3_disable();
rx_len = (ZLL->IRQSTS &
ZLL_IRQSTS_RX_FRAME_LENGTH_MASK) >>
ZLL_IRQSTS_RX_FRAME_LENGTH_SHIFT;
if (irqsts & ZLL_IRQSTS_RXIRQ_MASK) {
if (rx_len != 0U) {
kw41z_rx(&kw41z_context_data,
rx_len);
}
}
restart_rx = 1U;
break;
case KW41Z_STATE_TXRX:
LOG_DBG("TXRX seq done");
kw41z_tmr3_disable();
/* Store the frame pending bit status. */
kw41z_context_data.frame_pending =
irqsts & ZLL_IRQSTS_RX_FRM_PEND_MASK;
case KW41Z_STATE_TX:
LOG_DBG("TX seq done");
KW_DBG_TRACE(KW41_DBG_TRACE_TX, irqsts,
(unsigned int)ZLL->PHY_CTRL, seq_state);
if (irqsts & ZLL_IRQSTS_CCA_MASK) {
atomic_set(
&kw41z_context_data.seq_retval,
-EBUSY);
} else {
atomic_set(
&kw41z_context_data.seq_retval,
0);
}
k_sem_give(&kw41z_context_data.seq_sync);
restart_rx = 1U;
break;
case KW41Z_STATE_CCA:
LOG_DBG("CCA seq done");
KW_DBG_TRACE(KW41_DBG_TRACE_CCA, irqsts,
(unsigned int)ZLL->PHY_CTRL, seq_state);
if (irqsts & ZLL_IRQSTS_CCA_MASK) {
atomic_set(
&kw41z_context_data.seq_retval,
-EBUSY);
restart_rx = 1U;
} else {
atomic_set(
&kw41z_context_data.seq_retval,
0);
restart_rx = 0U;
}
k_sem_give(&kw41z_context_data.seq_sync);
break;
default:
LOG_DBG("Unhandled state: %d", state);
restart_rx = 1U;
break;
}
}
} else {
/* Timer 3 Compare Match */
if ((irqsts & ZLL_IRQSTS_TMR3IRQ_MASK) &&
(!(irqsts & ZLL_IRQSTS_TMR3MSK_MASK))) {
LOG_DBG("b) TMR3 timeout: irqsts: 0x%08X, "
"seq_state: 0x%08X, state: %d",
irqsts, seq_state, state);
kw41z_tmr3_disable();
restart_rx = 0U;
if (state != KW41Z_STATE_IDLE) {
kw41z_isr_timeout_cleanup();
restart_rx = 1U;
/* If we are not running an automated
* sequence then handle event. TMR3 can expire
* during Recv/Ack sequence where the transmit
* of the ACK is not being interrupted.
*/
}
}
}
/* Restart RX */
if (restart_rx) {
LOG_DBG("RESET RX");
kw41z_phy_abort();
kw41z_set_seq_state(KW41Z_STATE_RX);
kw41z_enable_seq_irq();
}
}
static inline uint8_t *get_mac(const struct device *dev)
{
struct kw41z_context *kw41z = dev->data;
/*
* The KW40Z has two 32-bit registers for the MAC address where
* 40 bits of the registers are factory programmed to be unique
* and the rest are to be assigned as the "company-specific" value.
* 802.15.4 defines a EUI-64 64-bit address with company specific
* being 24 or 36 bits with the unique value being 24 or 40 bits.
*
* TODO: Grab from RSIM->MAC_LSB/MAC_MSB for the unique 40 bits
* and how to allow for a OUI portion?
*/
sys_rand_get(kw41z->mac_addr, sizeof(kw41z->mac_addr));
/*
* Clear bit 0 to ensure it isn't a multicast address and set
* bit 1 to indicate address is locally administered and may
* not be globally unique.
*/
kw41z->mac_addr[0] = (kw41z->mac_addr[0] & ~0x01) | 0x02;
return kw41z->mac_addr;
}
static int kw41z_init(const struct device *dev)
{
struct kw41z_context *kw41z = dev->data;
xcvrStatus_t xcvrStatus;
xcvrStatus = XCVR_Init(ZIGBEE_MODE, DR_500KBPS);
if (xcvrStatus != gXcvrSuccess_c) {
return -EIO;
}
/* Disable all timers, enable AUTOACK, mask all interrupts */
ZLL->PHY_CTRL = ZLL_PHY_CTRL_CCATYPE(KW41Z_CCA_MODE1) |
ZLL_PHY_CTRL_CRC_MSK_MASK |
ZLL_PHY_CTRL_PLL_UNLOCK_MSK_MASK |
/*ZLL_PHY_CTRL_FILTERFAIL_MSK_MASK |*/
ZLL_PHY_CTRL_RX_WMRK_MSK_MASK |
ZLL_PHY_CTRL_CCAMSK_MASK |
ZLL_PHY_CTRL_RXMSK_MASK |
ZLL_PHY_CTRL_TXMSK_MASK |
ZLL_PHY_CTRL_CCABFRTX_MASK |
ZLL_PHY_CTRL_SEQMSK_MASK;
#if CONFIG_SOC_MKW41Z4
ZLL->PHY_CTRL |= ZLL_IRQSTS_WAKE_IRQ_MASK;
#endif
ZLL->PHY_CTRL |= ZLL_PHY_CTRL_AUTOACK_MASK;
/*
* Clear all PP IRQ bits to avoid unexpected interrupts immediately
* after init disable all timer interrupts
*/
ZLL->IRQSTS = ZLL->IRQSTS;
/* Clear HW indirect queue */
ZLL->SAM_TABLE |= ZLL_SAM_TABLE_INVALIDATE_ALL_MASK;
/* Accept FrameVersion 0 and 1 packets, reject all others */
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_PROMISCUOUS_MASK;
ZLL->RX_FRAME_FILTER &= ~ZLL_RX_FRAME_FILTER_FRM_VER_FILTER_MASK;
ZLL->RX_FRAME_FILTER = ZLL_RX_FRAME_FILTER_FRM_VER_FILTER(3) |
ZLL_RX_FRAME_FILTER_CMD_FT_MASK |
ZLL_RX_FRAME_FILTER_DATA_FT_MASK |
ZLL_RX_FRAME_FILTER_ACK_FT_MASK |
ZLL_RX_FRAME_FILTER_BEACON_FT_MASK;
/* Set prescaler to obtain 1 symbol (16us) timebase */
ZLL->TMR_PRESCALE = 0x05;
kw41z_tmr3_disable();
/* Compute warmup times (scaled to 16us) */
kw41z->rx_warmup_time = (XCVR_TSM->END_OF_SEQ &
XCVR_TSM_END_OF_SEQ_END_OF_RX_WU_MASK) >>
XCVR_TSM_END_OF_SEQ_END_OF_RX_WU_SHIFT;
kw41z->tx_warmup_time = (XCVR_TSM->END_OF_SEQ &
XCVR_TSM_END_OF_SEQ_END_OF_TX_WU_MASK) >>
XCVR_TSM_END_OF_SEQ_END_OF_TX_WU_SHIFT;
if (kw41z->rx_warmup_time & 0x0F) {
kw41z->rx_warmup_time = 1 + (kw41z->rx_warmup_time >> 4);
} else {
kw41z->rx_warmup_time = kw41z->rx_warmup_time >> 4;
}
if (kw41z->tx_warmup_time & 0x0F) {
kw41z->tx_warmup_time = 1 + (kw41z->tx_warmup_time >> 4);
} else {
kw41z->tx_warmup_time = kw41z->tx_warmup_time >> 4;
}
/* Set CCA threshold to -75 dBm */
ZLL->CCA_LQI_CTRL &= ~ZLL_CCA_LQI_CTRL_CCA1_THRESH_MASK;
ZLL->CCA_LQI_CTRL |= ZLL_CCA_LQI_CTRL_CCA1_THRESH(0xB5);
/* Set the default power level */
kw41z_set_txpower(dev, 0);
/* Adjust ACK delay to fulfill the 802.15.4 turnaround requirements */
ZLL->ACKDELAY &= ~ZLL_ACKDELAY_ACKDELAY_MASK;
ZLL->ACKDELAY |= ZLL_ACKDELAY_ACKDELAY(-8);
/* Adjust LQI compensation */
ZLL->CCA_LQI_CTRL &= ~ZLL_CCA_LQI_CTRL_LQI_OFFSET_COMP_MASK;
ZLL->CCA_LQI_CTRL |= ZLL_CCA_LQI_CTRL_LQI_OFFSET_COMP(96);
/* Enable the RxWatermark IRQ */
ZLL->PHY_CTRL &= ~(ZLL_PHY_CTRL_RX_WMRK_MSK_MASK);
/* Set Rx watermark level */
ZLL->RX_WTR_MARK = 0;
/* Set default channel to 2405 MHZ */
kw41z_set_channel(dev, KW41Z_DEFAULT_CHANNEL);
/* Unmask Transceiver Global Interrupts */
ZLL->PHY_CTRL &= ~ZLL_PHY_CTRL_TRCV_MSK_MASK;
/* Configure Radio IRQ */
NVIC_ClearPendingIRQ(Radio_1_IRQn);
IRQ_CONNECT(Radio_1_IRQn, RADIO_0_IRQ_PRIO, kw41z_isr, 0, 0);
return 0;
}
static void kw41z_iface_init(struct net_if *iface)
{
const struct device *dev = net_if_get_device(iface);
struct kw41z_context *kw41z = dev->data;
uint8_t *mac = get_mac(dev);
#if defined(CONFIG_KW41_DBG_TRACE)
kw41_dbg_idx = 0;
#endif
net_if_set_link_addr(iface, mac, 8, NET_LINK_IEEE802154);
kw41z->iface = iface;
ieee802154_init(iface);
}
static int kw41z_configure(const struct device *dev,
enum ieee802154_config_type type,
const struct ieee802154_config *config)
{
return 0;
}
/* driver-allocated attribute memory - constant across all driver instances */
IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
static int kw41z_attr_get(const struct device *dev, enum ieee802154_attr attr,
struct ieee802154_attr_value *value)
{
ARG_UNUSED(dev);
return ieee802154_attr_get_channel_page_and_range(
attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
&drv_attr.phy_supported_channels, value);
}
static const struct ieee802154_radio_api kw41z_radio_api = {
.iface_api.init = kw41z_iface_init,
.get_capabilities = kw41z_get_capabilities,
.cca = kw41z_cca,
.set_channel = kw41z_set_channel,
.filter = kw41z_filter,
.set_txpower = kw41z_set_txpower,
.start = kw41z_start,
.stop = kw41z_stop,
.tx = kw41z_tx,
.configure = kw41z_configure,
.attr_get = kw41z_attr_get,
};
#if defined(CONFIG_NET_L2_IEEE802154)
#define L2 IEEE802154_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(IEEE802154_L2)
#define MTU KW41Z_PSDU_LENGTH
#elif defined(CONFIG_NET_L2_OPENTHREAD)
#define L2 OPENTHREAD_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(OPENTHREAD_L2)
#define MTU 1280
#endif
NET_DEVICE_DT_INST_DEFINE(
0,
kw41z_init, /* Initialization Function */
NULL, /* No PM API support */
&kw41z_context_data, /* Context data */
NULL, /* Configuration info */
CONFIG_IEEE802154_KW41Z_INIT_PRIO, /* Initial priority */
&kw41z_radio_api, /* API interface functions */
L2, /* L2 */
L2_CTX_TYPE, /* L2 context type */
MTU); /* MTU size */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_kw41z.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,742 |
```c
/*
*
*/
#define DT_DRV_COMPAT telink_b91_zb
#include "rf.h"
#include "stimer.h"
#define LOG_MODULE_NAME ieee802154_b91
#if defined(CONFIG_IEEE802154_DRIVER_LOG_LEVEL)
#define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
#else
#define LOG_LEVEL LOG_LEVEL_NONE
#endif
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
#include <zephyr/random/random.h>
#include <zephyr/net/ieee802154_radio.h>
#include <zephyr/irq.h>
#if defined(CONFIG_NET_L2_OPENTHREAD)
#include <zephyr/net/openthread.h>
#endif
#include <zephyr/drivers/interrupt_controller/riscv_plic.h>
#include "ieee802154_b91.h"
/* B91 data structure */
static struct b91_data data;
/* Set filter PAN ID */
static int b91_set_pan_id(uint16_t pan_id)
{
uint8_t pan_id_le[B91_PAN_ID_SIZE];
sys_put_le16(pan_id, pan_id_le);
memcpy(data.filter_pan_id, pan_id_le, B91_PAN_ID_SIZE);
return 0;
}
/* Set filter short address */
static int b91_set_short_addr(uint16_t short_addr)
{
uint8_t short_addr_le[B91_SHORT_ADDRESS_SIZE];
sys_put_le16(short_addr, short_addr_le);
memcpy(data.filter_short_addr, short_addr_le, B91_SHORT_ADDRESS_SIZE);
return 0;
}
/* Set filter IEEE address */
static int b91_set_ieee_addr(const uint8_t *ieee_addr)
{
memcpy(data.filter_ieee_addr, ieee_addr, B91_IEEE_ADDRESS_SIZE);
return 0;
}
/* Filter PAN ID, short address and IEEE address */
static bool b91_run_filter(uint8_t *rx_buffer)
{
/* Check destination PAN Id */
if (memcmp(&rx_buffer[B91_PAN_ID_OFFSET], data.filter_pan_id,
B91_PAN_ID_SIZE) != 0 &&
memcmp(&rx_buffer[B91_PAN_ID_OFFSET], B91_BROADCAST_ADDRESS,
B91_PAN_ID_SIZE) != 0) {
return false;
}
/* Check destination address */
switch (rx_buffer[B91_DEST_ADDR_TYPE_OFFSET] & B91_DEST_ADDR_TYPE_MASK) {
case B91_DEST_ADDR_TYPE_SHORT:
/* First check if the destination is broadcast */
/* If not broadcast, check if length and address matches */
if (memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], B91_BROADCAST_ADDRESS,
B91_SHORT_ADDRESS_SIZE) != 0 &&
memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], data.filter_short_addr,
B91_SHORT_ADDRESS_SIZE) != 0) {
return false;
}
break;
case B91_DEST_ADDR_TYPE_IEEE:
/* If not broadcast, check if length and address matches */
if ((net_if_get_link_addr(data.iface)->len != B91_IEEE_ADDRESS_SIZE) ||
memcmp(&rx_buffer[B91_DEST_ADDR_OFFSET], data.filter_ieee_addr,
B91_IEEE_ADDRESS_SIZE) != 0) {
return false;
}
break;
default:
return false;
}
return true;
}
/* Get MAC address */
static inline uint8_t *b91_get_mac(const struct device *dev)
{
struct b91_data *b91 = dev->data;
#if defined(CONFIG_IEEE802154_B91_RANDOM_MAC)
sys_rand_get(b91->mac_addr, sizeof(b91->mac_addr));
/*
* Clear bit 0 to ensure it isn't a multicast address and set
* bit 1 to indicate address is locally administered and may
* not be globally unique.
*/
b91->mac_addr[0] = (b91->mac_addr[0] & ~0x01) | 0x02;
#else
/* Vendor Unique Identifier */
b91->mac_addr[0] = 0xC4;
b91->mac_addr[1] = 0x19;
b91->mac_addr[2] = 0xD1;
b91->mac_addr[3] = 0x00;
/* Extended Unique Identifier */
b91->mac_addr[4] = CONFIG_IEEE802154_B91_MAC4;
b91->mac_addr[5] = CONFIG_IEEE802154_B91_MAC5;
b91->mac_addr[6] = CONFIG_IEEE802154_B91_MAC6;
b91->mac_addr[7] = CONFIG_IEEE802154_B91_MAC7;
#endif
return b91->mac_addr;
}
/* Convert RSSI to LQI */
static uint8_t b91_convert_rssi_to_lqi(int8_t rssi)
{
uint32_t lqi32 = 0;
/* check for MIN value */
if (rssi < B91_RSSI_TO_LQI_MIN) {
return 0;
}
/* convert RSSI to LQI */
lqi32 = B91_RSSI_TO_LQI_SCALE * (rssi - B91_RSSI_TO_LQI_MIN);
/* check for MAX value */
if (lqi32 > 0xFF) {
lqi32 = 0xFF;
}
return (uint8_t)lqi32;
}
/* Update RSSI and LQI parameters */
static void b91_update_rssi_and_lqi(struct net_pkt *pkt)
{
int8_t rssi;
uint8_t lqi;
rssi = ((signed char)(data.rx_buffer
[data.rx_buffer[B91_LENGTH_OFFSET] + B91_RSSI_OFFSET])) - 110;
lqi = b91_convert_rssi_to_lqi(rssi);
net_pkt_set_ieee802154_lqi(pkt, lqi);
net_pkt_set_ieee802154_rssi_dbm(pkt, rssi);
}
/* Prepare TX buffer */
static int b91_set_tx_payload(uint8_t *payload, uint8_t payload_len)
{
unsigned char rf_data_len;
unsigned int rf_tx_dma_len;
/* See Telink SDK Dev Handbook, AN-21010600, section 21.5.2.2. */
if (payload_len > (B91_TRX_LENGTH - B91_PAYLOAD_OFFSET - IEEE802154_FCS_LENGTH)) {
return -EINVAL;
}
rf_data_len = payload_len + 1;
rf_tx_dma_len = rf_tx_packet_dma_len(rf_data_len);
data.tx_buffer[0] = rf_tx_dma_len & 0xff;
data.tx_buffer[1] = (rf_tx_dma_len >> 8) & 0xff;
data.tx_buffer[2] = (rf_tx_dma_len >> 16) & 0xff;
data.tx_buffer[3] = (rf_tx_dma_len >> 24) & 0xff;
data.tx_buffer[4] = payload_len + IEEE802154_FCS_LENGTH;
memcpy(data.tx_buffer + B91_PAYLOAD_OFFSET, payload, payload_len);
return 0;
}
/* Enable ack handler */
static void b91_handle_ack_en(void)
{
data.ack_handler_en = true;
}
/* Disable ack handler */
static void b91_handle_ack_dis(void)
{
data.ack_handler_en = false;
}
/* Handle acknowledge packet */
static void b91_handle_ack(void)
{
struct net_pkt *ack_pkt;
/* allocate ack packet */
ack_pkt = net_pkt_rx_alloc_with_buffer(data.iface, B91_ACK_FRAME_LEN,
AF_UNSPEC, 0, K_NO_WAIT);
if (!ack_pkt) {
LOG_ERR("No free packet available.");
return;
}
/* update packet data */
if (net_pkt_write(ack_pkt, data.rx_buffer + B91_PAYLOAD_OFFSET,
B91_ACK_FRAME_LEN) < 0) {
LOG_ERR("Failed to write to a packet.");
goto out;
}
/* update RSSI and LQI */
b91_update_rssi_and_lqi(ack_pkt);
/* init net cursor */
net_pkt_cursor_init(ack_pkt);
/* handle ack */
if (ieee802154_handle_ack(data.iface, ack_pkt) != NET_OK) {
LOG_INF("ACK packet not handled - releasing.");
}
/* release ack_wait semaphore */
k_sem_give(&data.ack_wait);
out:
net_pkt_unref(ack_pkt);
}
/* Send acknowledge packet */
static void b91_send_ack(uint8_t seq_num)
{
uint8_t ack_buf[] = { B91_ACK_TYPE, 0, seq_num };
if (b91_set_tx_payload(ack_buf, sizeof(ack_buf))) {
return;
}
rf_set_txmode();
delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
rf_tx_pkt(data.tx_buffer);
}
/* RX IRQ handler */
static void b91_rf_rx_isr(void)
{
uint8_t status;
uint8_t length;
uint8_t *payload;
struct net_pkt *pkt;
/* disable DMA and clear IRQ flag */
dma_chn_dis(DMA1);
rf_clr_irq_status(FLD_RF_IRQ_RX);
/* check CRC */
if (rf_zigbee_packet_crc_ok(data.rx_buffer)) {
/* get payload length */
if (IS_ENABLED(CONFIG_IEEE802154_RAW_MODE) ||
IS_ENABLED(CONFIG_NET_L2_OPENTHREAD)) {
length = data.rx_buffer[B91_LENGTH_OFFSET];
} else {
length = data.rx_buffer[B91_LENGTH_OFFSET] - B91_FCS_LENGTH;
}
/* check length */
if ((length < B91_PAYLOAD_MIN) || (length > B91_PAYLOAD_MAX)) {
LOG_ERR("Invalid length\n");
goto exit;
}
/* get payload */
payload = (uint8_t *)(data.rx_buffer + B91_PAYLOAD_OFFSET);
/* handle acknowledge packet if enabled */
if ((length == (B91_ACK_FRAME_LEN + B91_FCS_LENGTH)) &&
((payload[B91_FRAME_TYPE_OFFSET] & B91_FRAME_TYPE_MASK) == B91_ACK_TYPE)) {
if (data.ack_handler_en) {
b91_handle_ack();
}
goto exit;
}
/* run filter (check PAN ID and destination address) */
if (b91_run_filter(payload) == false) {
LOG_DBG("Packet received is not addressed to me");
goto exit;
}
/* send ack if requested */
if (payload[B91_FRAME_TYPE_OFFSET] & B91_ACK_REQUEST) {
b91_send_ack(payload[B91_DSN_OFFSET]);
}
/* get packet pointer from NET stack */
pkt = net_pkt_rx_alloc_with_buffer(data.iface, length, AF_UNSPEC, 0, K_NO_WAIT);
if (!pkt) {
LOG_ERR("No pkt available");
goto exit;
}
/* update packet data */
if (net_pkt_write(pkt, payload, length)) {
LOG_ERR("Failed to write to a packet.");
net_pkt_unref(pkt);
goto exit;
}
/* update RSSI and LQI parameters */
b91_update_rssi_and_lqi(pkt);
/* transfer data to NET stack */
status = net_recv_data(data.iface, pkt);
if (status < 0) {
LOG_ERR("RCV Packet dropped by NET stack: %d", status);
net_pkt_unref(pkt);
}
}
exit:
dma_chn_en(DMA1);
}
/* TX IRQ handler */
static void b91_rf_tx_isr(void)
{
/* clear irq status */
rf_clr_irq_status(FLD_RF_IRQ_TX);
/* release tx semaphore */
k_sem_give(&data.tx_wait);
/* set to rx mode */
rf_set_rxmode();
}
/* IRQ handler */
static void b91_rf_isr(void)
{
if (rf_get_irq_status(FLD_RF_IRQ_RX)) {
b91_rf_rx_isr();
} else if (rf_get_irq_status(FLD_RF_IRQ_TX)) {
b91_rf_tx_isr();
} else {
rf_clr_irq_status(FLD_RF_IRQ_ALL);
}
}
/* Driver initialization */
static int b91_init(const struct device *dev)
{
struct b91_data *b91 = dev->data;
/* init semaphores */
k_sem_init(&b91->tx_wait, 0, 1);
k_sem_init(&b91->ack_wait, 0, 1);
/* init rf module */
rf_mode_init();
rf_set_zigbee_250K_mode();
rf_set_tx_dma(2, B91_TRX_LENGTH);
rf_set_rx_dma(data.rx_buffer, 3, B91_TRX_LENGTH);
rf_set_rxmode();
/* init IRQs */
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), b91_rf_isr, 0, 0);
riscv_plic_irq_enable(DT_INST_IRQN(0));
riscv_plic_set_priority(DT_INST_IRQN(0), DT_INST_IRQ(0, priority));
rf_set_irq_mask(FLD_RF_IRQ_RX | FLD_RF_IRQ_TX);
/* init data variables */
data.is_started = true;
data.ack_handler_en = false;
data.current_channel = 0;
return 0;
}
/* API implementation: iface_init */
static void b91_iface_init(struct net_if *iface)
{
const struct device *dev = net_if_get_device(iface);
struct b91_data *b91 = dev->data;
uint8_t *mac = b91_get_mac(dev);
net_if_set_link_addr(iface, mac, B91_IEEE_ADDRESS_SIZE, NET_LINK_IEEE802154);
b91->iface = iface;
ieee802154_init(iface);
}
/* API implementation: get_capabilities */
static enum ieee802154_hw_caps b91_get_capabilities(const struct device *dev)
{
ARG_UNUSED(dev);
return IEEE802154_HW_FCS | IEEE802154_HW_FILTER |
IEEE802154_HW_TX_RX_ACK | IEEE802154_HW_RX_TX_ACK;
}
/* API implementation: cca */
static int b91_cca(const struct device *dev)
{
ARG_UNUSED(dev);
unsigned int t1 = stimer_get_tick();
while (!clock_time_exceed(t1, B91_CCA_TIME_MAX_US)) {
if (rf_get_rssi() < CONFIG_IEEE802154_B91_CCA_RSSI_THRESHOLD) {
return 0;
}
}
return -EBUSY;
}
/* API implementation: set_channel */
static int b91_set_channel(const struct device *dev, uint16_t channel)
{
ARG_UNUSED(dev);
if (channel > 26) {
return -EINVAL;
}
if (channel < 11) {
return -ENOTSUP;
}
if (data.current_channel != channel) {
data.current_channel = channel;
rf_set_chn(B91_LOGIC_CHANNEL_TO_PHYSICAL(channel));
rf_set_rxmode();
}
return 0;
}
/* API implementation: filter */
static int b91_filter(const struct device *dev,
bool set,
enum ieee802154_filter_type type,
const struct ieee802154_filter *filter)
{
if (!set) {
return -ENOTSUP;
}
if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
return b91_set_ieee_addr(filter->ieee_addr);
} else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
return b91_set_short_addr(filter->short_addr);
} else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
return b91_set_pan_id(filter->pan_id);
}
return -ENOTSUP;
}
/* API implementation: set_txpower */
static int b91_set_txpower(const struct device *dev, int16_t dbm)
{
ARG_UNUSED(dev);
/* check for supported Min/Max range */
if (dbm < B91_TX_POWER_MIN) {
dbm = B91_TX_POWER_MIN;
} else if (dbm > B91_TX_POWER_MAX) {
dbm = B91_TX_POWER_MAX;
}
/* set TX power */
rf_set_power_level(b91_tx_pwr_lt[dbm - B91_TX_POWER_MIN]);
return 0;
}
/* API implementation: start */
static int b91_start(const struct device *dev)
{
ARG_UNUSED(dev);
/* check if RF is already started */
if (!data.is_started) {
rf_set_rxmode();
delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
riscv_plic_irq_enable(DT_INST_IRQN(0));
data.is_started = true;
}
return 0;
}
/* API implementation: stop */
static int b91_stop(const struct device *dev)
{
ARG_UNUSED(dev);
/* check if RF is already stopped */
if (data.is_started) {
riscv_plic_irq_disable(DT_INST_IRQN(0));
rf_set_tx_rx_off();
delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
data.is_started = false;
}
return 0;
}
/* API implementation: tx */
static int b91_tx(const struct device *dev,
enum ieee802154_tx_mode mode,
struct net_pkt *pkt,
struct net_buf *frag)
{
ARG_UNUSED(pkt);
int status;
struct b91_data *b91 = dev->data;
/* check for supported mode */
if (mode != IEEE802154_TX_MODE_DIRECT) {
LOG_DBG("TX mode %d not supported", mode);
return -ENOTSUP;
}
/* prepare tx buffer */
status = b91_set_tx_payload(frag->data, frag->len);
if (status) {
return status;
}
/* reset semaphores */
k_sem_reset(&b91->tx_wait);
k_sem_reset(&b91->ack_wait);
/* start transmission */
rf_set_txmode();
delay_us(CONFIG_IEEE802154_B91_SET_TXRX_DELAY_US);
rf_tx_pkt(data.tx_buffer);
/* wait for tx done */
status = k_sem_take(&b91->tx_wait, K_MSEC(B91_TX_WAIT_TIME_MS));
if (status != 0) {
rf_set_rxmode();
return -EIO;
}
/* wait for ACK if requested */
if (frag->data[B91_FRAME_TYPE_OFFSET] & B91_ACK_REQUEST) {
b91_handle_ack_en();
status = k_sem_take(&b91->ack_wait, K_MSEC(B91_ACK_WAIT_TIME_MS));
b91_handle_ack_dis();
}
return status;
}
/* API implementation: ed_scan */
static int b91_ed_scan(const struct device *dev, uint16_t duration,
energy_scan_done_cb_t done_cb)
{
ARG_UNUSED(dev);
ARG_UNUSED(duration);
ARG_UNUSED(done_cb);
/* ed_scan not supported */
return -ENOTSUP;
}
/* API implementation: configure */
static int b91_configure(const struct device *dev,
enum ieee802154_config_type type,
const struct ieee802154_config *config)
{
ARG_UNUSED(dev);
ARG_UNUSED(type);
ARG_UNUSED(config);
/* configure not supported */
return -ENOTSUP;
}
/* driver-allocated attribute memory - constant across all driver instances */
IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
/* API implementation: attr_get */
static int b91_attr_get(const struct device *dev, enum ieee802154_attr attr,
struct ieee802154_attr_value *value)
{
ARG_UNUSED(dev);
return ieee802154_attr_get_channel_page_and_range(
attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
&drv_attr.phy_supported_channels, value);
}
/* IEEE802154 driver APIs structure */
static const struct ieee802154_radio_api b91_radio_api = {
.iface_api.init = b91_iface_init,
.get_capabilities = b91_get_capabilities,
.cca = b91_cca,
.set_channel = b91_set_channel,
.filter = b91_filter,
.set_txpower = b91_set_txpower,
.start = b91_start,
.stop = b91_stop,
.tx = b91_tx,
.ed_scan = b91_ed_scan,
.configure = b91_configure,
.attr_get = b91_attr_get,
};
#if defined(CONFIG_NET_L2_IEEE802154)
#define L2 IEEE802154_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(IEEE802154_L2)
#define MTU 125
#elif defined(CONFIG_NET_L2_OPENTHREAD)
#define L2 OPENTHREAD_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(OPENTHREAD_L2)
#define MTU 1280
#endif
/* IEEE802154 driver registration */
#if defined(CONFIG_NET_L2_IEEE802154) || defined(CONFIG_NET_L2_OPENTHREAD)
NET_DEVICE_DT_INST_DEFINE(0, b91_init, NULL, &data, NULL,
CONFIG_IEEE802154_B91_INIT_PRIO,
&b91_radio_api, L2, L2_CTX_TYPE, MTU);
#else
DEVICE_DT_INST_DEFINE(0, b91_init, NULL, &data, NULL,
POST_KERNEL, CONFIG_IEEE802154_B91_INIT_PRIO,
&b91_radio_api);
#endif
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_b91.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,531 |
```unknown
# NXP MCR20A configuration options
menuconfig IEEE802154_MCR20A
bool "NXP MCR20A Driver support"
default y
depends on DT_HAS_NXP_MCR20A_ENABLED
select SPI
if IEEE802154_MCR20A
config MCR20A_IS_PART_OF_KW2XD_SIP
bool "MCR20A device is part of KW2xD SiP"
help
If this option is set, the driver does not perform a hardware
reset and the CLK_OUT frequency is not set, instead these settings
are performed during the initialization of the SoC.
choice
prompt "CLK_OUT frequency"
default MCR20A_CLK_OUT_4MHZ if MCR20A_IS_PART_OF_KW2XD_SIP
default MCR20A_CLK_OUT_DISABLED
help
Configuration of the MCR20A clock output pin.
config MCR20A_CLK_OUT_DISABLED
bool "Disabled"
config MCR20A_CLK_OUT_32MHZ
bool "32 MHz"
config MCR20A_CLK_OUT_16MHZ
bool "16 MHz"
config MCR20A_CLK_OUT_8MHZ
bool "8 MHz"
config MCR20A_CLK_OUT_4MHZ
bool "4 MHz"
config MCR20A_CLK_OUT_1MHZ
bool "1 MHz"
config MCR20A_CLK_OUT_250KHZ
bool "250 kHz"
config MCR20A_CLK_OUT_62500HZ
bool "62500 Hz"
config MCR20A_CLK_OUT_32768HZ
bool "32768 Hz"
endchoice
config IEEE802154_MCR20A_RX_STACK_SIZE
int "Driver's internal RX thread stack size"
default 800
help
This option sets the driver's stack size for its internal RX thread.
The default value should be sufficient, but in case it proves to be
a too little one, this option makes it easy to play with the size.
config IEEE802154_MCR20A_INIT_PRIO
int "MCR20A initialization priority"
default 80
help
Set the initialization priority number. Do not mess with it unless
you know what you are doing. Beware mcr20a requires gpio and spi to
be ready first (and sometime gpio should be the very first as spi
might need it too). And of course it has to start before the net stack.
endif
``` | /content/code_sandbox/drivers/ieee802154/Kconfig.mcr20a | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 519 |
```unknown
# IEEE 802.15.4 driver configuration options
#
# IEEE 802.15.4 options
#
menuconfig IEEE802154
bool "IEEE 802.15.4 drivers"
depends on NETWORKING
default y if NET_L2_PHY_IEEE802154
if IEEE802154
config IEEE802154_NET_IF_NO_AUTO_START
bool "IEEE 802.15.4 interface without auto-start"
help
This option allows user to set any configuration and/or filter before
the radio becomes operational. For instance, the EUI-64 value can be
configured using net_if_set_link_addr(iface, mac, 8,
NET_LINK_IEEE802154).
When all configurations are done net_if_up() has to be invoked to
bring the interface up.
This option can be useful when using OpenThread or Zigbee. If you
have any doubt about this option leave it as default value.
if !NET_L2_PHY_IEEE802154
config IEEE802154_RAW_MODE
bool "IEEE 802.15.4 driver without the MAC stack"
select NET_RAW_MODE
help
This option enables using the drivers in a so-called "raw" mode,
i.e. without a MAC stack (the net L2 layer for 802.15.4 will not
be built). Used only for very specific cases, such as wpan_serial
and wpanusb samples.
endif # !NET_L2_PHY_IEEE802154
config IEEE802154_RDEV
bool
help
PHY is a ranging-capable device (RDEV)
config IEEE802154_VENDOR_OUI_ENABLE
bool "Support setting Vendor Organizationally Unique Identifier"
help
This option enables setting custom vendor
OUI using IEEE802154_VENDOR_OUI. After enabling,
user is obliged to set IEEE802154_VENDOR_OUI value,
as this option has no default value.
if IEEE802154_VENDOR_OUI_ENABLE
config IEEE802154_VENDOR_OUI
int "Vendor Organizationally Unique Identifier"
help
Custom vendor OUI, which makes 24 most-significant
bits of MAC address
endif # IEEE802154_VENDOR_OUI_ENABLE
source "drivers/ieee802154/Kconfig.b91"
source "drivers/ieee802154/Kconfig.cc2520"
source "drivers/ieee802154/Kconfig.kw41z"
source "drivers/ieee802154/Kconfig.mcr20a"
source "drivers/ieee802154/Kconfig.nrf5"
source "drivers/ieee802154/Kconfig.cc1200"
source "drivers/ieee802154/Kconfig.cc13xx_cc26xx"
source "drivers/ieee802154/Kconfig.rf2xx"
source "drivers/ieee802154/Kconfig.dw1000"
source "drivers/ieee802154/Kconfig.uart_pipe"
config IEEE802154_CSL_ENDPOINT
bool "Support for CSL Endpoint"
help
Make this device a CSL (coordinated sampled listening) endpoint with delayed
reception handling and CSL IE injection.
config IEEE802154_CSL_DEBUG
bool "Support for CSL debugging"
depends on IEEE802154_CSL_ENDPOINT
help
Enable support for CSL debugging by avoiding sleep state in favor of receive state.
module = IEEE802154_DRIVER
module-str = IEEE 802.15.4 driver
module-help = Sets log level for IEEE 802.15.4 Device Drivers.
source "subsys/logging/Kconfig.template.log_config"
endif # IEEE802154
``` | /content/code_sandbox/drivers/ieee802154/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 732 |
```objective-c
/*
*
*
* References are to the IEEE 802.15.4-2020 standard.
*/
#ifndef ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC13XX_CC26XX_H_
#define ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC13XX_CC26XX_H_
#include <zephyr/kernel.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/ieee802154.h>
#include <zephyr/net/ieee802154_radio.h>
#include <ti/drivers/rf/RF.h>
#include <driverlib/rf_common_cmd.h>
#include <driverlib/rf_data_entry.h>
#include <driverlib/rf_ieee_cmd.h>
#include <driverlib/rf_mailbox.h>
/* For O-QPSK the physical and MAC timing symbol rates are the same, see section 12.3.3. */
#define IEEE802154_2450MHZ_OQPSK_SYMBOLS_PER_SECOND \
IEEE802154_PHY_SYMBOLS_PER_SECOND(IEEE802154_PHY_OQPSK_780_TO_2450MHZ_SYMBOL_PERIOD_NS)
/* PHY PIB attribute phyCcaMode - CCA Mode 3: Carrier sense with energy above threshold, see
* section 11.3, table 11-2 and section 10.2.8
*/
#define IEEE802154_PHY_CCA_MODE 3
#define IEEE802154_PHY_SHR_DURATION 10 /* in symbols, 8 preamble and 2 SFD, see section 12.1.2 */
#define IEEE802154_PHY_SYMBOLS_PER_OCTET 2 /* see section 12.2.1 */
/* ACK is 2 bytes for PHY header + 2 bytes MAC header + 2 bytes MAC footer */
#define IEEE802154_ACK_FRAME_OCTETS 6
/* IEEE 802.15.4-2006 MAC PIB attributes (7.4.2)
*
* The macAckWaitDuration attribute does not include aUnitBackoffPeriod for
* non-beacon enabled PANs (See IEEE 802.15.4-2006 7.5.6.4.2)
*/
#define IEEE802154_MAC_ACK_WAIT_DURATION \
(IEEE802154_PHY_A_TURNAROUND_TIME_DEFAULT + IEEE802154_PHY_SHR_DURATION + \
IEEE802154_ACK_FRAME_OCTETS * IEEE802154_PHY_SYMBOLS_PER_OCTET)
#define CC13XX_CC26XX_RAT_CYCLES_PER_SECOND 4000000
#define CC13XX_CC26XX_NUM_RX_BUF 2
/* Three additional bytes for length, RSSI and correlation values from CPE. */
#define CC13XX_CC26XX_RX_BUF_SIZE (IEEE802154_MAX_PHY_PACKET_SIZE + 3)
#define CC13XX_CC26XX_CPE0_IRQ (INT_RFC_CPE_0 - 16)
#define CC13XX_CC26XX_CPE1_IRQ (INT_RFC_CPE_1 - 16)
#define CC13XX_CC26XX_RECEIVER_SENSITIVITY -100
#define CC13XX_CC26XX_INVALID_RSSI INT8_MIN
struct ieee802154_cc13xx_cc26xx_data {
RF_Handle rf_handle;
RF_Object rf_object;
struct net_if *iface;
uint8_t mac[8]; /* in big endian */
struct k_mutex tx_mutex;
dataQueue_t rx_queue;
rfc_dataEntryPointer_t rx_entry[CC13XX_CC26XX_NUM_RX_BUF];
uint8_t rx_data[CC13XX_CC26XX_NUM_RX_BUF]
[CC13XX_CC26XX_RX_BUF_SIZE] __aligned(4);
volatile rfc_CMD_FS_t cmd_fs;
volatile rfc_CMD_IEEE_CCA_REQ_t cmd_ieee_cca_req;
volatile rfc_CMD_IEEE_RX_t cmd_ieee_rx;
volatile rfc_CMD_IEEE_CSMA_t cmd_ieee_csma;
volatile rfc_CMD_IEEE_TX_t cmd_ieee_tx;
volatile rfc_CMD_IEEE_RX_ACK_t cmd_ieee_rx_ack;
volatile rfc_CMD_RADIO_SETUP_t cmd_radio_setup;
volatile int16_t saved_cmdhandle;
};
#endif /* ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC13XX_CC26XX_H_ */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_cc13xx_cc26xx.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 889 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC13XX_CC26XX_SUBG_H_
#define ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC13XX_CC26XX_SUBG_H_
#include <zephyr/kernel.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/ieee802154.h>
#include <zephyr/net/ieee802154_radio.h>
#include <ti/drivers/rf/RF.h>
#include <driverlib/rf_common_cmd.h>
#include <driverlib/rf_data_entry.h>
#include <driverlib/rf_ieee_cmd.h>
#include <driverlib/rf_prop_cmd.h>
#include <driverlib/rf_mailbox.h>
#define CC13XX_CC26XX_NUM_RX_BUF \
CONFIG_IEEE802154_CC13XX_CC26XX_SUB_GHZ_NUM_RX_BUF
/* Three additional bytes for length, RSSI and status values from CPE */
#define CC13XX_CC26XX_RX_BUF_SIZE (IEEE802154_MAX_PHY_PACKET_SIZE + 3)
#define CC13XX_CC26XX_TX_BUF_SIZE (IEEE802154_PHY_SUN_FSK_PHR_LEN + IEEE802154_MAX_PHY_PACKET_SIZE)
#define CC13XX_CC26XX_INVALID_RSSI INT8_MIN
struct ieee802154_cc13xx_cc26xx_subg_data {
/* protects writable data and serializes access to the API */
struct k_sem lock;
RF_Handle rf_handle;
RF_Object rf_object;
struct net_if *iface;
uint8_t mac[8]; /* in big endian */
bool is_up;
dataQueue_t rx_queue;
rfc_dataEntryPointer_t rx_entry[CC13XX_CC26XX_NUM_RX_BUF];
uint8_t rx_data[CC13XX_CC26XX_NUM_RX_BUF][CC13XX_CC26XX_RX_BUF_SIZE];
uint8_t tx_data[CC13XX_CC26XX_TX_BUF_SIZE];
/* Common Radio Commands */
volatile rfc_CMD_FS_t cmd_fs;
/* Sub-GHz Radio Commands */
volatile rfc_CMD_PROP_RX_ADV_t cmd_prop_rx_adv;
volatile rfc_CMD_PROP_TX_ADV_t cmd_prop_tx_adv;
volatile rfc_propRxOutput_t cmd_prop_rx_adv_output;
volatile rfc_CMD_PROP_CS_t cmd_prop_cs;
RF_CmdHandle rx_cmd_handle;
};
#endif /* ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC13XX_CC26XX_SUBG_H_ */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_cc13xx_cc26xx_subg.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 513 |
```unknown
# ATMEL AT86RF23x/212x configuration options
menuconfig IEEE802154_RF2XX
bool "ATMEL RF2XX Driver support"
default y
depends on DT_HAS_ATMEL_RF2XX_ENABLED
select SPI
select GPIO
if IEEE802154_RF2XX
config IEEE802154_RF2XX_RX_STACK_SIZE
int "Driver's internal RX thread stack size"
default 800
help
This option sets the driver's stack size for its internal RX thread.
The default value should be sufficient, but in case it proves to be
a too little one, this option makes it easy to play with the size.
config IEEE802154_RF2XX_INIT_PRIO
int "RF2X initialization priority"
default 80
help
Set the initialization priority number. Do not mess with it unless
you know what you are doing. Beware rf2xx requires gpio and spi to
be ready first (and sometime gpio should be the very first as spi
might need it too). And of course it has to start before the net stack.
endif
``` | /content/code_sandbox/drivers/ieee802154/Kconfig.rf2xx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 234 |
```c
/* ieee802154_nrf5.c - nRF5 802.15.4 driver */
/*
*
*/
#define DT_DRV_COMPAT nordic_nrf_ieee802154
#define LOG_MODULE_NAME ieee802154_nrf5
#if defined(CONFIG_IEEE802154_DRIVER_LOG_LEVEL)
#define LOG_LEVEL CONFIG_IEEE802154_DRIVER_LOG_LEVEL
#else
#define LOG_LEVEL LOG_LEVEL_NONE
#endif
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/debug/stack.h>
#include <soc.h>
#if defined(CONFIG_TRUSTED_EXECUTION_NONSECURE) && defined(NRF_FICR_S)
#include <soc_secure.h>
#else
#include <hal/nrf_ficr.h>
#endif
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/debug/stack.h>
#include <zephyr/net/net_if.h>
#include <zephyr/net/net_pkt.h>
#if defined(CONFIG_NET_L2_OPENTHREAD)
#include <zephyr/net/openthread.h>
#include <zephyr/net/ieee802154_radio_openthread.h>
#endif
#include <zephyr/sys/byteorder.h>
#include <string.h>
#include <zephyr/random/random.h>
#include <zephyr/net/ieee802154_radio.h>
#include <zephyr/irq.h>
#include "ieee802154_nrf5.h"
#include "nrf_802154.h"
#include "nrf_802154_const.h"
#if defined(CONFIG_NRF_802154_SER_HOST)
#include "nrf_802154_serialization_error.h"
#endif
struct nrf5_802154_config {
void (*irq_config_func)(const struct device *dev);
};
static struct nrf5_802154_data nrf5_data;
#if defined(CONFIG_IEEE802154_RAW_MODE)
static const struct device *nrf5_dev;
#endif
#define DRX_SLOT_RX 0 /* Delayed reception window ID */
#define NSEC_PER_TEN_SYMBOLS (10 * IEEE802154_PHY_OQPSK_780_TO_2450MHZ_SYMBOL_PERIOD_NS)
#if defined(CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE)
#if defined(CONFIG_SOC_NRF5340_CPUAPP)
#if defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
#error "NRF_UICR->OTP is not supported to read from non-secure"
#else
#define EUI64_ADDR (NRF_UICR->OTP)
#endif /* CONFIG_TRUSTED_EXECUTION_NONSECURE */
#else
#define EUI64_ADDR (NRF_UICR->CUSTOMER)
#endif /* CONFIG_SOC_NRF5340_CPUAPP */
#endif /* CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE */
#if defined(CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE)
#define EUI64_ADDR_HIGH CONFIG_IEEE802154_NRF5_UICR_EUI64_REG
#define EUI64_ADDR_LOW (CONFIG_IEEE802154_NRF5_UICR_EUI64_REG + 1)
#else
#define EUI64_ADDR_HIGH 0
#define EUI64_ADDR_LOW 1
#endif /* CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE */
/* Convenience defines for RADIO */
#define NRF5_802154_DATA(dev) \
((struct nrf5_802154_data * const)(dev)->data)
#define NRF5_802154_CFG(dev) \
((const struct nrf5_802154_config * const)(dev)->config)
#if CONFIG_IEEE802154_VENDOR_OUI_ENABLE
#define IEEE802154_NRF5_VENDOR_OUI CONFIG_IEEE802154_VENDOR_OUI
#else
#define IEEE802154_NRF5_VENDOR_OUI (uint32_t)0xF4CE36
#endif
static inline const struct device *nrf5_get_device(void)
{
#if defined(CONFIG_IEEE802154_RAW_MODE)
return nrf5_dev;
#else
return net_if_get_device(nrf5_data.iface);
#endif
}
static void nrf5_get_eui64(uint8_t *mac)
{
uint64_t factoryAddress;
uint32_t index = 0;
#if !defined(CONFIG_IEEE802154_NRF5_UICR_EUI64_ENABLE)
uint32_t deviceid[2];
/* Set the MAC Address Block Larger (MA-L) formerly called OUI. */
mac[index++] = (IEEE802154_NRF5_VENDOR_OUI >> 16) & 0xff;
mac[index++] = (IEEE802154_NRF5_VENDOR_OUI >> 8) & 0xff;
mac[index++] = IEEE802154_NRF5_VENDOR_OUI & 0xff;
#if defined(NRF54H_SERIES)
/* Can't access SICR with device id on a radio core. Use BLE.ADDR. */
deviceid[0] = NRF_FICR->BLE.ADDR[0];
deviceid[1] = NRF_FICR->BLE.ADDR[1];
#elif defined(CONFIG_TRUSTED_EXECUTION_NONSECURE) && defined(NRF_FICR_S)
soc_secure_read_deviceid(deviceid);
#else
deviceid[0] = nrf_ficr_deviceid_get(NRF_FICR, 0);
deviceid[1] = nrf_ficr_deviceid_get(NRF_FICR, 1);
#endif
factoryAddress = (uint64_t)deviceid[EUI64_ADDR_HIGH] << 32;
factoryAddress |= deviceid[EUI64_ADDR_LOW];
#else
/* Use device identifier assigned during the production. */
factoryAddress = (uint64_t)EUI64_ADDR[EUI64_ADDR_HIGH] << 32;
factoryAddress |= EUI64_ADDR[EUI64_ADDR_LOW];
#endif
memcpy(mac + index, &factoryAddress, sizeof(factoryAddress) - index);
}
static void nrf5_rx_thread(void *arg1, void *arg2, void *arg3)
{
struct nrf5_802154_data *nrf5_radio = (struct nrf5_802154_data *)arg1;
struct net_pkt *pkt;
struct nrf5_802154_rx_frame *rx_frame;
uint8_t pkt_len;
uint8_t *psdu;
ARG_UNUSED(arg2);
ARG_UNUSED(arg3);
while (1) {
pkt = NULL;
rx_frame = NULL;
LOG_DBG("Waiting for frame");
rx_frame = k_fifo_get(&nrf5_radio->rx_fifo, K_FOREVER);
__ASSERT_NO_MSG(rx_frame->psdu);
/* rx_mpdu contains length, psdu, fcs|lqi
* The last 2 bytes contain LQI or FCS, depending if
* automatic CRC handling is enabled or not, respectively.
*/
if (IS_ENABLED(CONFIG_IEEE802154_NRF5_FCS_IN_LENGTH)) {
pkt_len = rx_frame->psdu[0];
} else {
pkt_len = rx_frame->psdu[0] - IEEE802154_FCS_LENGTH;
}
#if defined(CONFIG_NET_BUF_DATA_SIZE)
__ASSERT_NO_MSG(pkt_len <= CONFIG_NET_BUF_DATA_SIZE);
#endif
LOG_DBG("Frame received");
/* Block the RX thread until net_pkt is available, so that we
* don't drop already ACKed frame in case of temporary net_pkt
* scarcity. The nRF 802154 radio driver will accumulate any
* incoming frames until it runs out of internal buffers (and
* thus stops acknowledging consecutive frames).
*/
pkt = net_pkt_rx_alloc_with_buffer(nrf5_radio->iface, pkt_len,
AF_UNSPEC, 0, K_FOREVER);
if (net_pkt_write(pkt, rx_frame->psdu + 1, pkt_len)) {
goto drop;
}
net_pkt_set_ieee802154_lqi(pkt, rx_frame->lqi);
net_pkt_set_ieee802154_rssi_dbm(pkt, rx_frame->rssi);
net_pkt_set_ieee802154_ack_fpb(pkt, rx_frame->ack_fpb);
#if defined(CONFIG_NET_PKT_TIMESTAMP)
net_pkt_set_timestamp_ns(pkt, rx_frame->time * NSEC_PER_USEC);
#endif
#if defined(CONFIG_NET_L2_OPENTHREAD)
net_pkt_set_ieee802154_ack_seb(pkt, rx_frame->ack_seb);
#endif
LOG_DBG("Caught a packet (%u) (LQI: %u)",
pkt_len, rx_frame->lqi);
if (net_recv_data(nrf5_radio->iface, pkt) < 0) {
LOG_ERR("Packet dropped by NET stack");
goto drop;
}
psdu = rx_frame->psdu;
rx_frame->psdu = NULL;
nrf_802154_buffer_free_raw(psdu);
if (LOG_LEVEL >= LOG_LEVEL_DBG) {
log_stack_usage(&nrf5_radio->rx_thread);
}
continue;
drop:
psdu = rx_frame->psdu;
rx_frame->psdu = NULL;
nrf_802154_buffer_free_raw(psdu);
net_pkt_unref(pkt);
}
}
static void nrf5_get_capabilities_at_boot(void)
{
nrf_802154_capabilities_t caps = nrf_802154_capabilities_get();
nrf5_data.capabilities =
IEEE802154_HW_FCS |
IEEE802154_HW_PROMISC |
IEEE802154_HW_FILTER |
((caps & NRF_802154_CAPABILITY_CSMA) ? IEEE802154_HW_CSMA : 0UL) |
IEEE802154_HW_TX_RX_ACK |
IEEE802154_HW_RX_TX_ACK |
IEEE802154_HW_ENERGY_SCAN |
((caps & NRF_802154_CAPABILITY_DELAYED_TX) ? IEEE802154_HW_TXTIME : 0UL) |
((caps & NRF_802154_CAPABILITY_DELAYED_RX) ? IEEE802154_HW_RXTIME : 0UL) |
IEEE802154_HW_SLEEP_TO_TX |
IEEE802154_RX_ON_WHEN_IDLE |
((caps & NRF_802154_CAPABILITY_SECURITY) ? IEEE802154_HW_TX_SEC : 0UL)
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
| IEEE802154_OPENTHREAD_HW_MULTIPLE_CCA
#endif
;
}
/* Radio device API */
static enum ieee802154_hw_caps nrf5_get_capabilities(const struct device *dev)
{
return nrf5_data.capabilities;
}
static int nrf5_cca(const struct device *dev)
{
struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
if (!nrf_802154_cca()) {
LOG_DBG("CCA failed");
return -EBUSY;
}
/* The nRF driver guarantees that a callback will be called once
* the CCA function is done, thus unlocking the semaphore.
*/
k_sem_take(&nrf5_radio->cca_wait, K_FOREVER);
LOG_DBG("Channel free? %d", nrf5_radio->channel_free);
return nrf5_radio->channel_free ? 0 : -EBUSY;
}
static int nrf5_set_channel(const struct device *dev, uint16_t channel)
{
ARG_UNUSED(dev);
LOG_DBG("%u", channel);
if (channel < 11 || channel > 26) {
return channel < 11 ? -ENOTSUP : -EINVAL;
}
nrf_802154_channel_set(channel);
return 0;
}
static int nrf5_energy_scan_start(const struct device *dev,
uint16_t duration,
energy_scan_done_cb_t done_cb)
{
int err = 0;
ARG_UNUSED(dev);
if (nrf5_data.energy_scan_done == NULL) {
nrf5_data.energy_scan_done = done_cb;
if (nrf_802154_energy_detection(duration * 1000) == false) {
nrf5_data.energy_scan_done = NULL;
err = -EBUSY;
}
} else {
err = -EALREADY;
}
return err;
}
static int nrf5_set_pan_id(const struct device *dev, uint16_t pan_id)
{
uint8_t pan_id_le[2];
ARG_UNUSED(dev);
sys_put_le16(pan_id, pan_id_le);
nrf_802154_pan_id_set(pan_id_le);
LOG_DBG("0x%x", pan_id);
return 0;
}
static int nrf5_set_short_addr(const struct device *dev, uint16_t short_addr)
{
uint8_t short_addr_le[2];
ARG_UNUSED(dev);
sys_put_le16(short_addr, short_addr_le);
nrf_802154_short_address_set(short_addr_le);
LOG_DBG("0x%x", short_addr);
return 0;
}
static int nrf5_set_ieee_addr(const struct device *dev,
const uint8_t *ieee_addr)
{
ARG_UNUSED(dev);
LOG_DBG("IEEE address %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
ieee_addr[7], ieee_addr[6], ieee_addr[5], ieee_addr[4],
ieee_addr[3], ieee_addr[2], ieee_addr[1], ieee_addr[0]);
nrf_802154_extended_address_set(ieee_addr);
return 0;
}
static int nrf5_filter(const struct device *dev, bool set,
enum ieee802154_filter_type type,
const struct ieee802154_filter *filter)
{
LOG_DBG("Applying filter %u", type);
if (!set) {
return -ENOTSUP;
}
if (type == IEEE802154_FILTER_TYPE_IEEE_ADDR) {
return nrf5_set_ieee_addr(dev, filter->ieee_addr);
} else if (type == IEEE802154_FILTER_TYPE_SHORT_ADDR) {
return nrf5_set_short_addr(dev, filter->short_addr);
} else if (type == IEEE802154_FILTER_TYPE_PAN_ID) {
return nrf5_set_pan_id(dev, filter->pan_id);
}
return -ENOTSUP;
}
static int nrf5_set_txpower(const struct device *dev, int16_t dbm)
{
ARG_UNUSED(dev);
LOG_DBG("%d", dbm);
nrf5_data.txpwr = dbm;
return 0;
}
static int handle_ack(struct nrf5_802154_data *nrf5_radio)
{
uint8_t ack_len;
struct net_pkt *ack_pkt;
int err = 0;
#if defined(CONFIG_NET_PKT_TIMESTAMP)
if (nrf5_radio->ack_frame.time == NRF_802154_NO_TIMESTAMP) {
/* Ack timestamp is invalid and cannot be used by the upper layer.
* Report the transmission as failed as if the Ack was not received at all.
*/
LOG_WRN("Invalid ACK timestamp.");
err = -ENOMSG;
goto free_nrf_ack;
}
#endif
if (IS_ENABLED(CONFIG_IEEE802154_NRF5_FCS_IN_LENGTH)) {
ack_len = nrf5_radio->ack_frame.psdu[0];
} else {
ack_len = nrf5_radio->ack_frame.psdu[0] - IEEE802154_FCS_LENGTH;
}
ack_pkt = net_pkt_rx_alloc_with_buffer(nrf5_radio->iface, ack_len,
AF_UNSPEC, 0, K_NO_WAIT);
if (!ack_pkt) {
LOG_ERR("No free packet available.");
err = -ENOMEM;
goto free_nrf_ack;
}
/* Upper layers expect the frame to start at the MAC header, skip the
* PHY header (1 byte).
*/
if (net_pkt_write(ack_pkt, nrf5_radio->ack_frame.psdu + 1,
ack_len) < 0) {
LOG_ERR("Failed to write to a packet.");
err = -ENOMEM;
goto free_net_ack;
}
net_pkt_set_ieee802154_lqi(ack_pkt, nrf5_radio->ack_frame.lqi);
net_pkt_set_ieee802154_rssi_dbm(ack_pkt, nrf5_radio->ack_frame.rssi);
#if defined(CONFIG_NET_PKT_TIMESTAMP)
net_pkt_set_timestamp_ns(ack_pkt, nrf5_radio->ack_frame.time * NSEC_PER_USEC);
#endif
net_pkt_cursor_init(ack_pkt);
if (ieee802154_handle_ack(nrf5_radio->iface, ack_pkt) != NET_OK) {
LOG_INF("ACK packet not handled - releasing.");
}
free_net_ack:
net_pkt_unref(ack_pkt);
free_nrf_ack:
nrf_802154_buffer_free_raw(nrf5_radio->ack_frame.psdu);
nrf5_radio->ack_frame.psdu = NULL;
return err;
}
static void nrf5_tx_started(const struct device *dev,
struct net_pkt *pkt,
struct net_buf *frag)
{
ARG_UNUSED(pkt);
if (nrf5_data.event_handler) {
nrf5_data.event_handler(dev, IEEE802154_EVENT_TX_STARTED,
(void *)frag);
}
}
static bool nrf5_tx_immediate(struct net_pkt *pkt, uint8_t *payload, bool cca)
{
nrf_802154_transmit_metadata_t metadata = {
.frame_props = {
.is_secured = net_pkt_ieee802154_frame_secured(pkt),
.dynamic_data_is_set = net_pkt_ieee802154_mac_hdr_rdy(pkt),
},
.cca = cca,
.tx_power = {
.use_metadata_value = true,
.power = nrf5_data.txpwr,
},
};
return nrf_802154_transmit_raw(payload, &metadata);
}
#if NRF_802154_CSMA_CA_ENABLED
static bool nrf5_tx_csma_ca(struct net_pkt *pkt, uint8_t *payload)
{
nrf_802154_transmit_csma_ca_metadata_t metadata = {
.frame_props = {
.is_secured = net_pkt_ieee802154_frame_secured(pkt),
.dynamic_data_is_set = net_pkt_ieee802154_mac_hdr_rdy(pkt),
},
.tx_power = {
.use_metadata_value = true,
.power = nrf5_data.txpwr,
},
};
return nrf_802154_transmit_csma_ca_raw(payload, &metadata);
}
#endif
#if defined(CONFIG_NET_PKT_TXTIME)
static bool nrf5_tx_at(struct nrf5_802154_data *nrf5_radio, struct net_pkt *pkt,
uint8_t *payload, enum ieee802154_tx_mode mode)
{
bool cca = false;
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
uint8_t max_extra_cca_attempts = 0;
#endif
switch (mode) {
case IEEE802154_TX_MODE_TXTIME:
break;
case IEEE802154_TX_MODE_TXTIME_CCA:
cca = true;
break;
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
case IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA:
cca = true;
max_extra_cca_attempts = nrf5_data.max_extra_cca_attempts;
break;
#endif
break;
default:
__ASSERT_NO_MSG(false);
return false;
}
nrf_802154_transmit_at_metadata_t metadata = {
.frame_props = {
.is_secured = net_pkt_ieee802154_frame_secured(pkt),
.dynamic_data_is_set = net_pkt_ieee802154_mac_hdr_rdy(pkt),
},
.cca = cca,
.channel = nrf_802154_channel_get(),
.tx_power = {
.use_metadata_value = true,
.power = nrf5_data.txpwr,
},
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
.extra_cca_attempts = max_extra_cca_attempts,
#endif
};
/* The timestamp points to the start of PHR but `nrf_802154_transmit_raw_at`
* expects a timestamp pointing to start of SHR.
*/
uint64_t tx_at = nrf_802154_timestamp_phr_to_shr_convert(
net_pkt_timestamp_ns(pkt) / NSEC_PER_USEC);
return nrf_802154_transmit_raw_at(payload, tx_at, &metadata);
}
#endif /* CONFIG_NET_PKT_TXTIME */
static int nrf5_tx(const struct device *dev,
enum ieee802154_tx_mode mode,
struct net_pkt *pkt,
struct net_buf *frag)
{
struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
uint8_t payload_len = frag->len;
uint8_t *payload = frag->data;
bool ret = true;
if (payload_len > IEEE802154_MTU) {
LOG_ERR("Payload too large: %d", payload_len);
return -EMSGSIZE;
}
LOG_DBG("%p (%u)", payload, payload_len);
nrf5_radio->tx_psdu[0] = payload_len + IEEE802154_FCS_LENGTH;
memcpy(nrf5_radio->tx_psdu + 1, payload, payload_len);
/* Reset semaphore in case ACK was received after timeout */
k_sem_reset(&nrf5_radio->tx_wait);
switch (mode) {
case IEEE802154_TX_MODE_DIRECT:
case IEEE802154_TX_MODE_CCA:
ret = nrf5_tx_immediate(pkt, nrf5_radio->tx_psdu,
mode == IEEE802154_TX_MODE_CCA);
break;
#if NRF_802154_CSMA_CA_ENABLED
case IEEE802154_TX_MODE_CSMA_CA:
ret = nrf5_tx_csma_ca(pkt, nrf5_radio->tx_psdu);
break;
#endif
#if defined(CONFIG_NET_PKT_TXTIME)
case IEEE802154_TX_MODE_TXTIME:
case IEEE802154_TX_MODE_TXTIME_CCA:
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
case IEEE802154_OPENTHREAD_TX_MODE_TXTIME_MULTIPLE_CCA:
#endif
__ASSERT_NO_MSG(pkt);
ret = nrf5_tx_at(nrf5_radio, pkt, nrf5_radio->tx_psdu, mode);
break;
#endif /* CONFIG_NET_PKT_TXTIME */
default:
NET_ERR("TX mode %d not supported", mode);
return -ENOTSUP;
}
if (!ret) {
LOG_ERR("Cannot send frame");
return -EIO;
}
nrf5_tx_started(dev, pkt, frag);
LOG_DBG("Sending frame (ch:%d, txpower:%d)",
nrf_802154_channel_get(), nrf_802154_tx_power_get());
/* Wait for the callback from the radio driver. */
k_sem_take(&nrf5_radio->tx_wait, K_FOREVER);
LOG_DBG("Result: %d", nrf5_data.tx_result);
#if defined(CONFIG_NRF_802154_ENCRYPTION)
/*
* When frame encryption by the radio driver is enabled, the frame stored in
* the tx_psdu buffer is:
* 1) authenticated and encrypted in place which causes that after an unsuccessful
* TX attempt, this frame must be propagated back to the upper layer for retransmission.
* The upper layer must ensure that the exact same secured frame is used for
* retransmission
* 2) frame counters are updated in place and for keeping the link frame counter up to date,
* this information must be propagated back to the upper layer
*/
memcpy(payload, nrf5_radio->tx_psdu + 1, payload_len);
#endif
net_pkt_set_ieee802154_frame_secured(pkt, nrf5_radio->tx_frame_is_secured);
net_pkt_set_ieee802154_mac_hdr_rdy(pkt, nrf5_radio->tx_frame_mac_hdr_rdy);
switch (nrf5_radio->tx_result) {
case NRF_802154_TX_ERROR_NONE:
if (nrf5_radio->ack_frame.psdu == NULL) {
/* No ACK was requested. */
return 0;
}
/* Handle ACK packet. */
return handle_ack(nrf5_radio);
case NRF_802154_TX_ERROR_NO_MEM:
return -ENOBUFS;
case NRF_802154_TX_ERROR_BUSY_CHANNEL:
return -EBUSY;
case NRF_802154_TX_ERROR_INVALID_ACK:
case NRF_802154_TX_ERROR_NO_ACK:
return -ENOMSG;
case NRF_802154_TX_ERROR_ABORTED:
case NRF_802154_TX_ERROR_TIMESLOT_DENIED:
case NRF_802154_TX_ERROR_TIMESLOT_ENDED:
default:
return -EIO;
}
}
static net_time_t nrf5_get_time(const struct device *dev)
{
ARG_UNUSED(dev);
return (net_time_t)nrf_802154_time_get() * NSEC_PER_USEC;
}
static uint8_t nrf5_get_acc(const struct device *dev)
{
ARG_UNUSED(dev);
return CONFIG_IEEE802154_NRF5_DELAY_TRX_ACC;
}
static int nrf5_start(const struct device *dev)
{
ARG_UNUSED(dev);
nrf_802154_tx_power_set(nrf5_data.txpwr);
if (!nrf_802154_receive()) {
LOG_ERR("Failed to enter receive state");
return -EIO;
}
LOG_DBG("nRF5 802154 radio started (channel: %d)",
nrf_802154_channel_get());
return 0;
}
static int nrf5_stop(const struct device *dev)
{
#if defined(CONFIG_IEEE802154_CSL_ENDPOINT)
if (nrf_802154_sleep_if_idle() != NRF_802154_SLEEP_ERROR_NONE) {
if (nrf5_data.event_handler) {
nrf5_data.event_handler(dev, IEEE802154_EVENT_RX_OFF, NULL);
} else {
LOG_WRN("Transition to radio sleep cannot be handled.");
}
Z_SPIN_DELAY(1);
return 0;
}
#else
ARG_UNUSED(dev);
if (!nrf_802154_sleep()) {
LOG_ERR("Error while stopping radio");
return -EIO;
}
#endif
LOG_DBG("nRF5 802154 radio stopped");
return 0;
}
#if defined(CONFIG_NRF_802154_CARRIER_FUNCTIONS)
static int nrf5_continuous_carrier(const struct device *dev)
{
ARG_UNUSED(dev);
nrf_802154_tx_power_set(nrf5_data.txpwr);
if (!nrf_802154_continuous_carrier()) {
LOG_ERR("Failed to enter continuous carrier state");
return -EIO;
}
LOG_DBG("Continuous carrier wave transmission started (channel: %d)",
nrf_802154_channel_get());
return 0;
}
#endif
#if !defined(CONFIG_IEEE802154_NRF5_EXT_IRQ_MGMT)
static void nrf5_radio_irq(const void *arg)
{
ARG_UNUSED(arg);
nrf_802154_radio_irq_handler();
}
#endif
static void nrf5_irq_config(const struct device *dev)
{
ARG_UNUSED(dev);
#if !defined(CONFIG_IEEE802154_NRF5_EXT_IRQ_MGMT)
IRQ_CONNECT(DT_IRQN(DT_NODELABEL(radio)), NRF_802154_IRQ_PRIORITY, nrf5_radio_irq, NULL, 0);
irq_enable(DT_IRQN(DT_NODELABEL(radio)));
#endif
}
static int nrf5_init(const struct device *dev)
{
const struct nrf5_802154_config *nrf5_radio_cfg = NRF5_802154_CFG(dev);
struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
#if defined(CONFIG_IEEE802154_RAW_MODE)
nrf5_dev = dev;
#endif
k_fifo_init(&nrf5_radio->rx_fifo);
k_sem_init(&nrf5_radio->tx_wait, 0, 1);
k_sem_init(&nrf5_radio->cca_wait, 0, 1);
nrf_802154_init();
nrf5_get_capabilities_at_boot();
nrf5_radio->rx_on_when_idle = true;
nrf5_radio_cfg->irq_config_func(dev);
k_thread_create(&nrf5_radio->rx_thread, nrf5_radio->rx_stack,
CONFIG_IEEE802154_NRF5_RX_STACK_SIZE,
nrf5_rx_thread, nrf5_radio, NULL, NULL,
K_PRIO_COOP(2), 0, K_NO_WAIT);
k_thread_name_set(&nrf5_radio->rx_thread, "nrf5_rx");
LOG_INF("nRF5 802154 radio initialized");
return 0;
}
static void nrf5_iface_init(struct net_if *iface)
{
const struct device *dev = net_if_get_device(iface);
struct nrf5_802154_data *nrf5_radio = NRF5_802154_DATA(dev);
nrf5_get_eui64(nrf5_radio->mac);
net_if_set_link_addr(iface, nrf5_radio->mac, sizeof(nrf5_radio->mac),
NET_LINK_IEEE802154);
nrf5_radio->iface = iface;
ieee802154_init(iface);
}
#if defined(CONFIG_NRF_802154_ENCRYPTION)
static void nrf5_config_mac_keys(struct ieee802154_key *mac_keys)
{
nrf_802154_security_key_remove_all();
for (uint8_t i = 0; mac_keys->key_value
&& i < NRF_802154_SECURITY_KEY_STORAGE_SIZE; mac_keys++, i++) {
nrf_802154_key_t key = {
.value.p_cleartext_key = mac_keys->key_value,
.id.mode = mac_keys->key_id_mode,
.id.p_key_id = mac_keys->key_id,
.type = NRF_802154_KEY_CLEARTEXT,
.frame_counter = 0,
.use_global_frame_counter = !(mac_keys->frame_counter_per_key),
};
__ASSERT_EVAL((void)nrf_802154_security_key_store(&key),
nrf_802154_security_error_t err = nrf_802154_security_key_store(&key),
err == NRF_802154_SECURITY_ERROR_NONE ||
err == NRF_802154_SECURITY_ERROR_ALREADY_PRESENT,
"Storing key failed, err: %d", err);
};
}
#endif /* CONFIG_NRF_802154_ENCRYPTION */
static int nrf5_configure(const struct device *dev,
enum ieee802154_config_type type,
const struct ieee802154_config *config)
{
ARG_UNUSED(dev);
switch (type) {
case IEEE802154_CONFIG_AUTO_ACK_FPB:
if (config->auto_ack_fpb.enabled) {
switch (config->auto_ack_fpb.mode) {
case IEEE802154_FPB_ADDR_MATCH_THREAD:
nrf_802154_src_addr_matching_method_set(
NRF_802154_SRC_ADDR_MATCH_THREAD);
break;
case IEEE802154_FPB_ADDR_MATCH_ZIGBEE:
nrf_802154_src_addr_matching_method_set(
NRF_802154_SRC_ADDR_MATCH_ZIGBEE);
break;
default:
return -EINVAL;
}
}
nrf_802154_auto_pending_bit_set(config->auto_ack_fpb.enabled);
break;
case IEEE802154_CONFIG_ACK_FPB:
if (config->ack_fpb.enabled) {
if (!nrf_802154_pending_bit_for_addr_set(
config->ack_fpb.addr,
config->ack_fpb.extended)) {
return -ENOMEM;
}
break;
}
if (config->ack_fpb.addr != NULL) {
if (!nrf_802154_pending_bit_for_addr_clear(
config->ack_fpb.addr,
config->ack_fpb.extended)) {
return -ENOENT;
}
} else {
nrf_802154_pending_bit_for_addr_reset(
config->ack_fpb.extended);
}
break;
case IEEE802154_CONFIG_PAN_COORDINATOR:
nrf_802154_pan_coord_set(config->pan_coordinator);
break;
case IEEE802154_CONFIG_PROMISCUOUS:
nrf_802154_promiscuous_set(config->promiscuous);
break;
case IEEE802154_CONFIG_EVENT_HANDLER:
nrf5_data.event_handler = config->event_handler;
break;
#if defined(CONFIG_NRF_802154_ENCRYPTION)
case IEEE802154_CONFIG_MAC_KEYS:
nrf5_config_mac_keys(config->mac_keys);
break;
case IEEE802154_CONFIG_FRAME_COUNTER:
nrf_802154_security_global_frame_counter_set(config->frame_counter);
break;
case IEEE802154_CONFIG_FRAME_COUNTER_IF_LARGER:
nrf_802154_security_global_frame_counter_set_if_larger(config->frame_counter);
break;
#endif /* CONFIG_NRF_802154_ENCRYPTION */
case IEEE802154_CONFIG_ENH_ACK_HEADER_IE: {
uint8_t ext_addr_le[EXTENDED_ADDRESS_SIZE];
uint8_t short_addr_le[SHORT_ADDRESS_SIZE];
uint8_t element_id;
bool valid_vendor_specific_ie = false;
if (config->ack_ie.purge_ie) {
nrf_802154_ack_data_remove_all(false, NRF_802154_ACK_DATA_IE);
nrf_802154_ack_data_remove_all(true, NRF_802154_ACK_DATA_IE);
break;
}
if (config->ack_ie.short_addr == IEEE802154_BROADCAST_ADDRESS ||
config->ack_ie.ext_addr == NULL) {
return -ENOTSUP;
}
sys_put_le16(config->ack_ie.short_addr, short_addr_le);
sys_memcpy_swap(ext_addr_le, config->ack_ie.ext_addr, EXTENDED_ADDRESS_SIZE);
if (config->ack_ie.header_ie == NULL || config->ack_ie.header_ie->length == 0) {
nrf_802154_ack_data_clear(short_addr_le, false, NRF_802154_ACK_DATA_IE);
nrf_802154_ack_data_clear(ext_addr_le, true, NRF_802154_ACK_DATA_IE);
} else {
element_id = ieee802154_header_ie_get_element_id(config->ack_ie.header_ie);
#if defined(CONFIG_NET_L2_OPENTHREAD)
uint8_t vendor_oui_le[IEEE802154_OPENTHREAD_VENDOR_OUI_LEN] =
IEEE802154_OPENTHREAD_THREAD_IE_VENDOR_OUI;
if (element_id == IEEE802154_HEADER_IE_ELEMENT_ID_VENDOR_SPECIFIC_IE &&
memcmp(config->ack_ie.header_ie->content.vendor_specific.vendor_oui,
vendor_oui_le, sizeof(vendor_oui_le)) == 0) {
valid_vendor_specific_ie = true;
}
#endif
if (element_id != IEEE802154_HEADER_IE_ELEMENT_ID_CSL_IE &&
!valid_vendor_specific_ie) {
return -ENOTSUP;
}
nrf_802154_ack_data_set(short_addr_le, false, config->ack_ie.header_ie,
config->ack_ie.header_ie->length +
IEEE802154_HEADER_IE_HEADER_LENGTH,
NRF_802154_ACK_DATA_IE);
nrf_802154_ack_data_set(ext_addr_le, true, config->ack_ie.header_ie,
config->ack_ie.header_ie->length +
IEEE802154_HEADER_IE_HEADER_LENGTH,
NRF_802154_ACK_DATA_IE);
}
} break;
#if defined(CONFIG_IEEE802154_CSL_ENDPOINT)
case IEEE802154_CONFIG_EXPECTED_RX_TIME: {
#if defined(CONFIG_NRF_802154_SER_HOST)
net_time_t period_ns = nrf5_data.csl_period * NSEC_PER_TEN_SYMBOLS;
bool changed = (config->expected_rx_time - nrf5_data.csl_rx_time) % period_ns;
nrf5_data.csl_rx_time = config->expected_rx_time;
if (changed)
#endif /* CONFIG_NRF_802154_SER_HOST */
{
nrf_802154_csl_writer_anchor_time_set(
nrf_802154_timestamp_phr_to_mhr_convert(config->expected_rx_time /
NSEC_PER_USEC));
}
} break;
case IEEE802154_CONFIG_RX_SLOT: {
/* Note that even if the nrf_802154_receive_at function is not called in time
* (for example due to the call being blocked by higher priority threads) and
* the delayed reception window is not scheduled, the CSL phase will still be
* calculated as if the following reception windows were at times
* anchor_time + n * csl_period. The previously set
* anchor_time will be used for calculations.
*/
nrf_802154_receive_at(config->rx_slot.start / NSEC_PER_USEC,
config->rx_slot.duration / NSEC_PER_USEC,
config->rx_slot.channel, DRX_SLOT_RX);
} break;
case IEEE802154_CONFIG_CSL_PERIOD: {
nrf_802154_csl_writer_period_set(config->csl_period);
#if defined(CONFIG_NRF_802154_SER_HOST)
nrf5_data.csl_period = config->csl_period;
#endif
} break;
#endif /* CONFIG_IEEE802154_CSL_ENDPOINT */
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
case IEEE802154_OPENTHREAD_CONFIG_MAX_EXTRA_CCA_ATTEMPTS:
nrf5_data.max_extra_cca_attempts =
((const struct ieee802154_openthread_config *)config)
->max_extra_cca_attempts;
break;
#endif /* CONFIG_IEEE802154_NRF5_MULTIPLE_CCA */
case IEEE802154_CONFIG_RX_ON_WHEN_IDLE:
nrf_802154_rx_on_when_idle_set(config->rx_on_when_idle);
nrf5_data.rx_on_when_idle = config->rx_on_when_idle;
if (config->rx_on_when_idle == false) {
(void)nrf_802154_sleep_if_idle();
}
break;
default:
return -EINVAL;
}
return 0;
}
/* driver-allocated attribute memory - constant across all driver instances */
IEEE802154_DEFINE_PHY_SUPPORTED_CHANNELS(drv_attr, 11, 26);
static int nrf5_attr_get(const struct device *dev,
enum ieee802154_attr attr,
struct ieee802154_attr_value *value)
{
ARG_UNUSED(dev);
if (ieee802154_attr_get_channel_page_and_range(
attr, IEEE802154_ATTR_PHY_CHANNEL_PAGE_ZERO_OQPSK_2450_BPSK_868_915,
&drv_attr.phy_supported_channels, value) == 0) {
return 0;
}
switch ((uint32_t)attr) {
#if defined(CONFIG_IEEE802154_NRF5_MULTIPLE_CCA)
/* TODO: t_recca and t_ccatx should be provided by the public API of the
* nRF 802.15.4 Radio Driver.
*/
case IEEE802154_OPENTHREAD_ATTR_T_RECCA:
((struct ieee802154_openthread_attr_value *)value)->t_recca = 0;
break;
case IEEE802154_OPENTHREAD_ATTR_T_CCATX:
((struct ieee802154_openthread_attr_value *)value)->t_ccatx = 20;
break;
#endif
default:
return -ENOENT;
}
return 0;
}
/* nRF5 radio driver callbacks */
void nrf_802154_received_timestamp_raw(uint8_t *data, int8_t power, uint8_t lqi, uint64_t time)
{
for (uint32_t i = 0; i < ARRAY_SIZE(nrf5_data.rx_frames); i++) {
if (nrf5_data.rx_frames[i].psdu != NULL) {
continue;
}
nrf5_data.rx_frames[i].psdu = data;
nrf5_data.rx_frames[i].rssi = power;
nrf5_data.rx_frames[i].lqi = lqi;
#if defined(CONFIG_NET_PKT_TIMESTAMP)
nrf5_data.rx_frames[i].time =
nrf_802154_timestamp_end_to_phr_convert(time, data[0]);
#endif
nrf5_data.rx_frames[i].ack_fpb = nrf5_data.last_frame_ack_fpb;
nrf5_data.rx_frames[i].ack_seb = nrf5_data.last_frame_ack_seb;
nrf5_data.last_frame_ack_fpb = false;
nrf5_data.last_frame_ack_seb = false;
k_fifo_put(&nrf5_data.rx_fifo, &nrf5_data.rx_frames[i]);
return;
}
__ASSERT(false, "Not enough rx frames allocated for 15.4 driver");
}
void nrf_802154_receive_failed(nrf_802154_rx_error_t error, uint32_t id)
{
const struct device *dev = nrf5_get_device();
#if defined(CONFIG_IEEE802154_CSL_ENDPOINT)
if (id == DRX_SLOT_RX && error == NRF_802154_RX_ERROR_DELAYED_TIMEOUT) {
if (!nrf5_data.rx_on_when_idle) {
/* Transition to RxOff done automatically by the driver */
return;
} else if (nrf5_data.event_handler) {
/* Notify the higher layer to allow it to transition if needed */
nrf5_data.event_handler(dev, IEEE802154_EVENT_RX_OFF, NULL);
}
}
#else
ARG_UNUSED(id);
#endif
enum ieee802154_rx_fail_reason reason;
switch (error) {
case NRF_802154_RX_ERROR_INVALID_FRAME:
case NRF_802154_RX_ERROR_DELAYED_TIMEOUT:
reason = IEEE802154_RX_FAIL_NOT_RECEIVED;
break;
case NRF_802154_RX_ERROR_INVALID_FCS:
reason = IEEE802154_RX_FAIL_INVALID_FCS;
break;
case NRF_802154_RX_ERROR_INVALID_DEST_ADDR:
reason = IEEE802154_RX_FAIL_ADDR_FILTERED;
break;
default:
reason = IEEE802154_RX_FAIL_OTHER;
break;
}
if (IS_ENABLED(CONFIG_IEEE802154_NRF5_LOG_RX_FAILURES)) {
LOG_INF("Rx failed, error = %d", error);
}
nrf5_data.last_frame_ack_fpb = false;
nrf5_data.last_frame_ack_seb = false;
if (nrf5_data.event_handler) {
nrf5_data.event_handler(dev, IEEE802154_EVENT_RX_FAILED, (void *)&reason);
}
}
void nrf_802154_tx_ack_started(const uint8_t *data)
{
nrf5_data.last_frame_ack_fpb = data[FRAME_PENDING_OFFSET] & FRAME_PENDING_BIT;
nrf5_data.last_frame_ack_seb = data[SECURITY_ENABLED_OFFSET] & SECURITY_ENABLED_BIT;
}
void nrf_802154_transmitted_raw(uint8_t *frame,
const nrf_802154_transmit_done_metadata_t *metadata)
{
ARG_UNUSED(frame);
nrf5_data.tx_result = NRF_802154_TX_ERROR_NONE;
nrf5_data.tx_frame_is_secured = metadata->frame_props.is_secured;
nrf5_data.tx_frame_mac_hdr_rdy = metadata->frame_props.dynamic_data_is_set;
nrf5_data.ack_frame.psdu = metadata->data.transmitted.p_ack;
if (nrf5_data.ack_frame.psdu) {
nrf5_data.ack_frame.rssi = metadata->data.transmitted.power;
nrf5_data.ack_frame.lqi = metadata->data.transmitted.lqi;
#if defined(CONFIG_NET_PKT_TIMESTAMP)
if (metadata->data.transmitted.time == NRF_802154_NO_TIMESTAMP) {
/* Ack timestamp is invalid. Keep this value to detect it when handling Ack
*/
nrf5_data.ack_frame.time = NRF_802154_NO_TIMESTAMP;
} else {
nrf5_data.ack_frame.time = nrf_802154_timestamp_end_to_phr_convert(
metadata->data.transmitted.time, nrf5_data.ack_frame.psdu[0]);
}
#endif
}
k_sem_give(&nrf5_data.tx_wait);
}
void nrf_802154_transmit_failed(uint8_t *frame,
nrf_802154_tx_error_t error,
const nrf_802154_transmit_done_metadata_t *metadata)
{
ARG_UNUSED(frame);
nrf5_data.tx_result = error;
nrf5_data.tx_frame_is_secured = metadata->frame_props.is_secured;
nrf5_data.tx_frame_mac_hdr_rdy = metadata->frame_props.dynamic_data_is_set;
k_sem_give(&nrf5_data.tx_wait);
}
void nrf_802154_cca_done(bool channel_free)
{
nrf5_data.channel_free = channel_free;
k_sem_give(&nrf5_data.cca_wait);
}
void nrf_802154_cca_failed(nrf_802154_cca_error_t error)
{
ARG_UNUSED(error);
nrf5_data.channel_free = false;
k_sem_give(&nrf5_data.cca_wait);
}
void nrf_802154_energy_detected(const nrf_802154_energy_detected_t *result)
{
if (nrf5_data.energy_scan_done != NULL) {
energy_scan_done_cb_t callback = nrf5_data.energy_scan_done;
nrf5_data.energy_scan_done = NULL;
callback(nrf5_get_device(), result->ed_dbm);
}
}
void nrf_802154_energy_detection_failed(nrf_802154_ed_error_t error)
{
if (nrf5_data.energy_scan_done != NULL) {
energy_scan_done_cb_t callback = nrf5_data.energy_scan_done;
nrf5_data.energy_scan_done = NULL;
callback(nrf5_get_device(), SHRT_MAX);
}
}
#if defined(CONFIG_NRF_802154_SER_HOST)
void nrf_802154_serialization_error(const nrf_802154_ser_err_data_t *err)
{
__ASSERT(false, "802.15.4 serialization error: %d", err->reason);
k_oops();
}
#endif
static const struct nrf5_802154_config nrf5_radio_cfg = {
.irq_config_func = nrf5_irq_config,
};
static const struct ieee802154_radio_api nrf5_radio_api = {
.iface_api.init = nrf5_iface_init,
.get_capabilities = nrf5_get_capabilities,
.cca = nrf5_cca,
.set_channel = nrf5_set_channel,
.filter = nrf5_filter,
.set_txpower = nrf5_set_txpower,
.start = nrf5_start,
.stop = nrf5_stop,
#if defined(CONFIG_NRF_802154_CARRIER_FUNCTIONS)
.continuous_carrier = nrf5_continuous_carrier,
#endif
.tx = nrf5_tx,
.ed_scan = nrf5_energy_scan_start,
.get_time = nrf5_get_time,
.get_sch_acc = nrf5_get_acc,
.configure = nrf5_configure,
.attr_get = nrf5_attr_get
};
#if defined(CONFIG_NET_L2_IEEE802154)
#define L2 IEEE802154_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(IEEE802154_L2)
#define MTU IEEE802154_MTU
#elif defined(CONFIG_NET_L2_OPENTHREAD)
#define L2 OPENTHREAD_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(OPENTHREAD_L2)
#define MTU 1280
#elif defined(CONFIG_NET_L2_CUSTOM_IEEE802154)
#define L2 CUSTOM_IEEE802154_L2
#define L2_CTX_TYPE NET_L2_GET_CTX_TYPE(CUSTOM_IEEE802154_L2)
#define MTU CONFIG_NET_L2_CUSTOM_IEEE802154_MTU
#endif
#if defined(CONFIG_NET_L2_PHY_IEEE802154)
NET_DEVICE_DT_INST_DEFINE(0, nrf5_init, NULL, &nrf5_data, &nrf5_radio_cfg,
CONFIG_IEEE802154_NRF5_INIT_PRIO, &nrf5_radio_api, L2,
L2_CTX_TYPE, MTU);
#else
DEVICE_DT_INST_DEFINE(0, nrf5_init, NULL, &nrf5_data, &nrf5_radio_cfg,
POST_KERNEL, CONFIG_IEEE802154_NRF5_INIT_PRIO,
&nrf5_radio_api);
#endif
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_nrf5.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,335 |
```unknown
# Telink B91 RF configuration options
menuconfig IEEE802154_TELINK_B91
bool "Telink B91 RF driver"
default y
depends on DT_HAS_TELINK_B91_ZB_ENABLED
if IEEE802154_TELINK_B91
config IEEE802154_B91_INIT_PRIO
int "Telink B91 IEEE 802.15.4 initialization priority"
default 80
help
Set the initialization priority number. Do not mess with it unless
you know what you are doing.
config IEEE802154_B91_SET_TXRX_DELAY_US
int "Tx/Rx modes switching delay time (us)"
default 120
help
Delay time needed for PLL stabilization during Tx/Rx modes switching.
config IEEE802154_B91_CCA_RSSI_THRESHOLD
int "CCA procedure RSSI threshold"
default -50
help
CCA procedure returns true if the current RSSI value is less than
this parameter.
config IEEE802154_B91_RANDOM_MAC
bool "Random MAC address"
default y
help
Generate a random MAC address dynamically.
if ! IEEE802154_B91_RANDOM_MAC
config IEEE802154_B91_MAC4
hex "MAC Address Byte 4"
default 0
range 0 0xff
help
This is the byte 4 of the MAC address.
config IEEE802154_B91_MAC5
hex "MAC Address Byte 5"
default 0
range 0 0xff
help
This is the byte 5 of the MAC address.
config IEEE802154_B91_MAC6
hex "MAC Address Byte 6"
default 0
range 0 0xff
help
This is the byte 6 of the MAC address.
config IEEE802154_B91_MAC7
hex "MAC Address Byte 7"
default 0
range 0 0xff
help
This is the byte 7 of the MAC address.
endif # ! IEEE802154_B91_RANDOM_MAC
endif # IEEE802154_TELINK_B91
``` | /content/code_sandbox/drivers/ieee802154/Kconfig.b91 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 434 |
```c
/*
*
*/
#include <zephyr/drivers/tee.h>
int tee_add_shm(const struct device *dev, void *addr, size_t align, size_t size,
uint32_t flags, struct tee_shm **shmp)
{
int rc;
void *p = addr;
struct tee_shm *shm;
if (!shmp) {
return -EINVAL;
}
if (flags & TEE_SHM_ALLOC) {
if (align) {
p = k_aligned_alloc(align, size);
} else {
p = k_malloc(size);
}
}
if (!p) {
return -ENOMEM;
}
shm = k_malloc(sizeof(struct tee_shm));
if (!shm) {
rc = -ENOMEM;
goto err;
}
shm->addr = p;
shm->size = size;
shm->flags = flags;
shm->dev = dev;
if (flags & TEE_SHM_REGISTER) {
const struct tee_driver_api *api = (const struct tee_driver_api *)dev->api;
if (!api->shm_register) {
rc = -ENOSYS;
goto err;
}
rc = api->shm_register(dev, shm);
if (rc) {
goto err;
}
}
*shmp = shm;
return 0;
err:
k_free(shm);
if (flags & TEE_SHM_ALLOC) {
k_free(p);
}
return rc;
}
int tee_rm_shm(const struct device *dev, struct tee_shm *shm)
{
int rc = 0;
if (!shm) {
return -EINVAL;
}
if (shm->flags & TEE_SHM_REGISTER) {
const struct tee_driver_api *api = (const struct tee_driver_api *)dev->api;
if (api->shm_unregister) {
/*
* We don't return immediately if callback returned error,
* just return this code after cleanup.
*/
rc = api->shm_unregister(dev, shm);
} else {
/*
* Set ENOSYS is SHM_REGISTER flag was set, but callback
* is not set.
*/
rc = -ENOSYS;
}
}
if (shm->flags & TEE_SHM_ALLOC) {
k_free(shm->addr);
}
k_free(shm);
return rc;
}
``` | /content/code_sandbox/drivers/tee/tee.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 509 |
```unknown
menuconfig TEE
bool "Trusted Environment Drivers"
help
Include TEE drivers in system config
if TEE
module = TEE
module-str = tee
comment "Device Drivers"
source "drivers/tee/optee/Kconfig"
endif # TEE
``` | /content/code_sandbox/drivers/tee/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```objective-c
/* ieee802154_cc1200_regs.h - Registers definition for TI CC1200 */
/*
*
*/
#ifndef ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC1200_REGS_H_
#define ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC1200_REGS_H_
/* Access types (see Section 3.2) */
#define CC1200_ACCESS_RD BIT(7)
#define CC1200_ACCESS_WR (0x00)
#define CC1200_ACCESS_BURST BIT(6)
/* Configuration registers (see Section 12 for the details) */
#define CC1200_REG_IOCFG3 (0x00)
#define GPIO3_ATRAN BIT(7)
#define GPIO3_INV BIT(6)
#define GPIO3_CFG(_cfg_) (_cfg_ & 0x3F)
#define CC1200_REG_IOCFG2 (0x01)
#define GPIO2_ATRAN BIT(7)
#define GPIO2_INV BIT(6)
#define GPIO2_CFG(_cfg_) (_cfg_ & 0x3F)
#define CC1200_REG_IOCFG1 (0x02)
#define GPIO1_ATRAN BIT(7)
#define GPIO1_INV BIT(6)
#define GPIO1_CFG(_cfg_) (_cfg_ & 0x3F)
#define CC1200_REG_IOCFG0 (0x03)
#define GPIO0_ATRAN BIT(7)
#define GPIO0_INV BIT(6)
#define GPIO0_CFG(_cfg_) (_cfg_ & 0x3F)
#define CC1200_REG_SYNC3 (0x04)
#define CC1200_REG_SYNC2 (0x05)
#define CC1200_REG_SYNC1 (0x06)
#define CC1200_REG_SYNC0 (0x07)
#define CC1200_REG_SYNC_CFG1 (0x08)
#define SYNC_MODE(_cfg_) (_cfg_ & 0xE0)
#define SYNC_THR(_cfg_) (_cfg_ & 0x1F)
#define CC1200_REG_SYNC_CFG0 (0x09)
#define AUTO_CLEAR BIT(5)
#define RX_CONFIG_LIMITATION BIT(4)
#define PQT_GATING_EN BIT(3)
#define EXT_SYNC_DETECT BIT(2)
#define STRICT_SYNC_CHECK(_cfg_) (_cfg_ & 0x03)
#define CC1200_REG_DEVIATION_M (0x0A)
#define CC1200_REG_MODCFG_DEV_E (0x0B)
#define MODEM_MODE(_cfg_) (_cfg_ & 0xC0)
#define MOD_FORMAT(_cfg_) (_cfg_ & 0x38)
#define DEV_E(_cfg_) (_cfg_ & 0x07)
#define CC1200_REG_DCFILT_CFG (0x0C)
#define DCFILT_FREEZE_COEFF BIT(6)
#define DCFILT_BW_SETTLE(_cfg_) (_cfg_ & 0x38)
#define DCFILT_BW(_cfg_) (_cfg_ & 0x07)
#define CC1200_REG_PREAMBLE_CFG1 (0x0D)
#define NUM_PREAMBL(_cfg_) (_cfg_ & 0x3C)
#define PREAMBLE_WORD(_cfg_) (_cfg_ & 0x03)
#define CC1200_REG_PREAMBLE_CFG0 (0x0E)
#define PQT_EN BIT(7)
#define PQT_VALID_TIMEOUT(_cfg_) (_cfg_ & 0x70)
#define PQT(_cfg_) (_cfg_ & 0x0F)
#define CC1200_REG_IQIC (0x0F)
#define IQIC_EN BIT(7)
#define IQIC_UPDATE_COEFF_EN BIT(6)
#define IQIC_BLEN_SETTLE(_cfg_) (_cfg_ & 0x30)
#define IQIC_BLEN(_cfg_) (_cfg_ & 0x0C)
#define IQIC_IMGCH_LEVEL_THR(_cfg_) (_cfg_ & 0x03)
#define CC1200_REG_CHAN_BW (0x10)
#define ADC_CIC_DECFACT(_cfg_) (_cfg_ & 0xC0)
#define BB_CIC_DECFAC(_cfg_) (_cfg_ & 0x3F)
/* Is it mentioned anywhere apart from chapter 12? No known address at least
* #define CC1200_REG_MDMCFG2 ()
* #define ASK_SHAPE(_cfg_) (_cfg_ & 0xC0)
* #define SYMBOL_MAP_CFG(_cfg_) (_cfg_ & 0x30)
* #define UPSAMPLER_P(_cfg_) (_cfg_ & 0x0E)
* #define CFM_DATA_EN BIT(0)
*/
#define CC1200_REG_MDMCFG1 (0x11)
#define CARRIER_SENSE_GATE BIT(7)
#define FIFO_EN BIT(6)
#define MANCHESTER_EN BIT(5)
#define INVERT_DATA_EN BIT(4)
#define COLLISION_DETECT_EN BIT(3)
#define DVGA_GAIN(_cfg_) (_cfg_ & 0x06)
#define SINGLE_ADC_EN BIT(0)
#define CC1200_REG_MDMCFG0 (0x12)
#define TRANSPARENT_MODE_EN BIT(6)
#define TRANSPARENT_INTFACT(_cfg_) (_cfg_ & 0x30)
#define DATA_FILTER_EN BIT(3)
#define VITERBI_EN BIT(2)
#define CC1200_REG_SYMBOL_RATE2 (0x13)
#define SRATE_E(_cfg_) (_cfg_ & 0xF0)
#define SRATE_M_19_16(_cfg_) (_cfg_ & 0x0F)
#define CC1200_REG_SYMBOL_RATE1 (0x14)
#define CC1200_REG_SYMBOL_RATE0 (0x15)
#define CC1200_REG_AGC_REF (0x16)
#define CC1200_REG_AGC_CS_THR (0x17)
#define CC1200_REG_AGC_GAIN_ADJUST (0x18)
#define CC1200_REG_AGC_CFG3 (0x19)
#define AGC_SYNC_BEHAVIOUR(_cfg_) (_cfg_ & 0xE0)
#define AGC_MIN_GAIN(_cfg_) (_cfg_ & 0x1F)
#define CC1200_REG_AGC_CFG2 (0x1A)
#define START_PREVIOUS_GAIN_EN BIT(7)
#define FE_PERFORMANCE_MODE(_cfg_) (_cfg_ & 0x60)
#define AGC_MAX_GAIN(_cfg_) (_cfg_ & 0x1F)
#define CC1200_REG_AGC_CFG1 (0x1B)
#define RSSI_STEP_THR BIT(6)
#define AGC_WIN_SIZE(_cfg_) (_cfg_ & 0x38)
#define AGC_SETTLE_WAIT(_cfg_) (_cfg_ & 0x07)
#define CC1200_REG_AGC_CFG0 (0x1C)
#define AGC_HYST_LEVEL(_cfg_) (_cfg_ & 0xC0)
#define AGC_SLEWRATE_LIMIT(_cfg_) (_cfg_ & 0x30)
#define RSSI_VALID_CNT(_cfg_) (_cfg_ & 0x0C)
#define AGC_ASK_DECAY(_cfg_) (_cfg_ & 0x03)
#define CC1200_REG_FIFO_CFG (0x1D)
#define CRC_AUTOFLUSH BIT(7)
#define FIFO_THR(_cfg_) (_cfg_ & 0x7F)
#define CC1200_REG_DEV_ADDR (0x1E)
#define CC1200_REG_SETTLING_CFG (0x1F)
#define FS_AUTOCAL(_cfg_) (_cfg_ & 0x18)
#define LOCK_TIME(_cfg_) (_cfg_ & 0x06)
#define FSREG_TIME BIT(0)
#define CC1200_REG_FS_CFG (0x20)
#define FS_LOCK_EN BIT(4)
#define FSD_BANDSELECT(_cfg_) (_cfg_ & 0x0F)
#define CC1200_REG_WOR_CFG1 (0x21)
#define WOR_RES(_cfg_) (_cfg_ & 0xC0)
#define WOR_MODE(_cfg_) (_cfg_ & 0x38)
#define EVENT1(_cfg_) (_cfg_ & 0x07)
#define CC1200_REG_WOR_CFG0 (0x22)
#define RX_DUTY_CYCLE_MODE(_cfg_) (_cfg_ & 0xC0)
#define DIV_256HZ_EN BIT(5)
#define EVENT2_CFG(_cfg_) (_cfg_ & 0x18)
#define RC_MODE(_cfg_) (_cfg_ & 0x06)
#define RC_PD BIT(0)
#define CC1200_REG_WOR_EVENT0_MSB (0x23)
#define CC1200_REG_WOR_EVENT0_LSB (0x24)
#define CC1200_REG_RXDCM_TIME (0x25)
#define CC1200_REG_PKT_CFG2 (0x26)
#define BYTE_SWAP_EN BIT(6)
#define FG_MODE_EN BIT(5)
#define CCA_MODE(_cfg_) (_cfg_ & 0x1C)
#define CCA_ALWAYS_CLEAR (0)
#define CCA_RSSI_BELOW (1 << 2)
#define CCA_NO_RX (2 << 2)
#define CCA_RSSI_BELOW_NO_RX (3 << 2)
#define CCA_RSSI_BELOW_ETSI_LBT (4 << 2)
#define PKT_FORMAT(_cfg_) (_cfg_ & 0x03)
#define PKT_FORMAT_NORMAL_MODE (0)
#define PKT_FORMAT_SYNCHRONOUS_MODE (1)
#define PKT_FORMAT_RANDOM_MODE (2)
#define PKT_FORMAT_TRANSPARENT_MODE (3)
#define CC1200_REG_PKT_CFG1 (0x27)
#define FEC_EN BIT(7)
#define WHITE_DATA BIT(6)
#define PN9_SWAP_EN BIT(5)
#define ADDR_CHECK_CFG(_cfg_) (_cfg_ & 0x18)
#define ADDR_NO_CHK (0)
#define ADDR_CHK_NO_BROADCAST (1 << 3)
#define ADDR_CHK_BROADCAST_00 (2 << 3)
#define ADDR_CHK_BROADCAST_FF (3 << 3)
#define CRC_CFG(_cfg_) (_cfg_ & 0x06)
#define CRC_NONE (0)
#define CRC_FFFF (1 << 1)
#define CRC_0000 (2 << 1)
#define CRC_1D0F (3 << 1)
#define APPEND_STATUS BIT(0)
#define CC1200_REG_PKT_CFG0 (0x28)
#define LENGTH_CONFIG(_cfg_) (_cfg_ & 0x60)
#define LENGTH_FIXED (0)
#define LENGTH_VAR_1 (1 << 5)
#define LENGTH_INFINITE (2 << 5)
#define LENGTH_VAR_2 (3 << 5)
#define PKT_BIT_LEN(_cfg_) (_cfg_ & 0x1C)
#define UART_MODE_EN BIT(1)
#define UART_SWAP_EN BIT(0)
#define CC1200_REG_RFEND_CFG1 (0x29)
#define RXOFF_MODE(_cfg_) (_cfg_ & 0x30)
#define RX_TIME(_cfg_) (_cfg_ & 0x0E)
#define RX_TIME_QUAL BIT(0)
#define CC1200_REG_RFEND_CFG0 (0x2A)
#define CAL_END_WAKE_UP_EN BIT(6)
#define TXOFF_MODE(_cfg_) (_cfg_ & 0x30)
#define TERM_ON_BAD_PACKET_EN BIT(3)
#define ANT_DIV_RX_TERM_CFG(_cfg_) (_cfg_ & 0x07)
/* Common RFEND tx/rx mode */
#define RFEND_TXRX_MODE_IDLE (0)
#define RFEND_TXRX_MODE_FSTXON (1 << 4)
#define RFEND_TXRX_MODE_TX (2 << 4)
#define RFEND_TXRX_MODE_RX (3 << 4)
#define CC1200_REG_PA_CFG1 (0x2B)
#define PA_RAMP_SHAPE_EN BIT(6)
#define PA_POWER_RAMP_MASK (0x3F)
#define CC1200_REG_PA_CFG0 (0x2C)
#define FIRST_IPL(_cfg_) (_cfg_ & 0xE0)
#define SECOND_IPL(_cfg_) (_cfg_ & 0x1C)
#define RAMP_SHAPE(_cfg_) (_cfg_ & 0x03)
#define CC1200_REG_ASK_CFG (0x2D)
#define AGC_ASK_BW(_cfg_) (_cfg_ & 0xC0)
#define ASK_DEPTH(_cfg_) (_cfg_ & 0x3F)
#define CC1200_REG_PKT_LEN (0x2E)
#define CC1200_REG_EXTENDED_ADDRESS (0x2F)
/* Command strobes */
#define CC1200_INS_SRES (0x30)
#define CC1200_INS_SFSTXON (0x31)
#define CC1200_INS_SXOFF (0x32)
#define CC1200_INS_SCAL (0x33)
#define CC1200_INS_SRX (0x34)
#define CC1200_INS_STX (0x35)
#define CC1200_INS_SIDLE (0x36)
#define CC1200_INS_SAFC (0x37)
#define CC1200_INS_SWOR (0x38)
#define CC1200_INS_SPWD (0x39)
#define CC1200_INS_SFRX (0x3A)
#define CC1200_INS_SFTX (0x3B)
#define CC1200_INS_SWORRST (0x3C)
#define CC1200_INS_SNOP (0x3D)
/* Memory access */
#define CC1200_MEM_DMA (0x3E)
#define CC1200_MEM_STD (0x3F)
/* FIFO access */
#define CC1200_REG_TXFIFO (0x3F)
#define CC1200_REG_RXFIFO (0x3F)
/* Configuration registers (Extended register space) */
#define CC1200_REG_IF_MIX_CFG (0x00)
#define CC1200_REG_FREQOFF_CFG (0x01)
#define CC1200_REG_TOC_CFG (0x02)
#define CC1200_REG_MARC_SPARE (0x03)
#define CC1200_REG_ECG_CFG (0x04)
#define CC1200_REG_MDMCFG2 (0x05)
#define CC1200_REG_EXT_CTRL (0x06)
#define CC1200_REG_RCCAL_FINE (0x07)
#define CC1200_REG_RCCAL_COARSE (0x08)
#define CC1200_REG_RCCAL_OFFSET (0x09)
#define CC1200_REG_FREQOFF1 (0x0A)
#define CC1200_REG_FREQOFF0 (0x0B)
#define CC1200_REG_FREQ2 (0x0C)
#define CC1200_REG_FREQ1 (0x0D)
#define CC1200_REG_FREQ0 (0x0E)
#define CC1200_REG_IF_ADC2 (0x0F)
#define CC1200_REG_IF_ADC1 (0x10)
#define CC1200_REG_IF_ADC0 (0x11)
#define CC1200_REG_FS_DIG1 (0x12)
#define CC1200_REG_FS_DIG0 (0x13)
#define CC1200_REG_FS_CAL3 (0x14)
#define CC1200_REG_FS_CAL2 (0x15)
#define CC1200_REG_FS_CAL1 (0x16)
#define CC1200_REG_FS_CAL0 (0x17)
#define CC1200_REG_FS_CHP (0x18)
#define CC1200_REG_FS_DIVTWO (0x19)
#define CC1200_REG_FS_DSM1 (0x1A)
#define CC1200_REG_FS_DSM0 (0x1B)
#define CC1200_REG_FS_DVC1 (0x1C)
#define CC1200_REG_FS_DVC0 (0x1D)
#define CC1200_REG_FS_LBI (0x1E)
#define CC1200_REG_FS_PFD (0x1F)
#define CC1200_REG_FS_PRE (0x20)
#define CC1200_REG_FS_REG_DIV_CML (0x21)
#define CC1200_REG_FS_SPARE (0x22)
#define CC1200_REG_FS_VCO4 (0x23)
#define CC1200_REG_FS_VCO3 (0x24)
#define CC1200_REG_FS_VCO2 (0x25)
#define CC1200_REG_FS_VCO1 (0x26)
#define CC1200_REG_FS_VCO0 (0x27)
#define CC1200_REG_GBIAS6 (0x28)
#define CC1200_REG_GBIAS5 (0x29)
#define CC1200_REG_GBIAS4 (0x2A)
#define CC1200_REG_GBIAS3 (0x2B)
#define CC1200_REG_GBIAS2 (0x2C)
#define CC1200_REG_GBIAS1 (0x2D)
#define CC1200_REG_GBIAS0 (0x2E)
#define CC1200_REG_IFAMP (0x2F)
#define CC1200_REG_LNA (0x30)
#define CC1200_REG_RXMIX (0x31)
#define CC1200_REG_XOSC5 (0x32)
#define CC1200_REG_XOSC4 (0x33)
#define CC1200_REG_XOSC3 (0x34)
#define CC1200_REG_XOSC2 (0x35)
#define CC1200_REG_XOSC1 (0x36)
#define CC1200_REG_XOSC0 (0x37)
#define CC1200_REG_ANALOG_SPARE (0x38)
#define CC1200_REG_PA_CFG3 (0x39)
/* All addresses in between are unused ones */
#define CC1200_REG_WOR_TIME1 (0x64)
#define CC1200_REG_WOR_TIME0 (0x65)
#define CC1200_REG_WOR_CAPTURE1 (0x66)
#define CC1200_REG_WOR_CAPTURE0 (0x67)
#define CC1200_REG_BIST (0x68)
#define CC1200_REG_DCFILTOFFSET_I1 (0x69)
#define CC1200_REG_DCFILTOFFSET_I0 (0x6A)
#define CC1200_REG_DCFILTOFFSET_Q1 (0x6B)
#define CC1200_REG_DCFILTOFFSET_Q0 (0x6C)
#define CC1200_REG_IQIE_I1 (0x6D)
#define CC1200_REG_IQIE_I0 (0x6E)
#define CC1200_REG_IQIE_Q1 (0x6F)
#define CC1200_REG_IQIE_Q0 (0x70)
#define CC1200_REG_RSSI1 (0x71)
#define CC1200_REG_RSSI0 (0x72)
#define RSSI(_reg_) ((_reg_ & 0x78) >> 3)
#define CARRIER_SENSE BIT(2)
#define CARRIER_SENSE_VALID BIT(1)
#define RSSI_VALID BIT(0)
#define CC1200_REG_MARCSTATE (0x73)
#define CC1200_REG_LQI_VAL (0x74)
#define CC1200_REG_PQT_SYNC_ERR (0x75)
#define CC1200_REG_DEM_STATUS (0x76)
#define CC1200_REG_FREQOFF_EST1 (0x77)
#define CC1200_REG_FREQOFF_EST0 (0x78)
#define CC1200_REG_AGC_GAIN3 (0x79)
#define CC1200_REG_AGC_GAIN2 (0x7A)
#define CC1200_REG_AGC_GAIN1 (0x7B)
#define CC1200_REG_AGC_GAIN0 (0x7C)
#define CC1200_REG_CFM_RX_DATA_OUT (0x7D)
#define CC1200_REG_CFM_RX_DATA_IN (0x7E)
#define CC1200_REG_ASK_SOFT_RX_DATA (0x7F)
#define CC1200_REG_RNDGEN (0x80)
#define CC1200_REG_MAGN2 (0x81)
#define CC1200_REG_MAGN1 (0x82)
#define CC1200_REG_MAGN0 (0x83)
#define CC1200_REG_ANG1 (0x84)
#define CC1200_REG_ANG0 (0x85)
#define CC1200_REG_CHFILT_I2 (0x86)
#define CC1200_REG_CHFILT_I1 (0x87)
#define CC1200_REG_CHFILT_I0 (0x88)
#define CC1200_REG_CHFILT_Q2 (0x89)
#define CC1200_REG_CHFILT_Q1 (0x8A)
#define CC1200_REG_CHFILT_Q0 (0x8B)
#define CC1200_REG_GPIO_STATUS (0x8C)
#define CC1200_REG_FSCAL_CTRL (0x8D)
#define CC1200_REG_PHASE_ADJUST (0x8E)
#define CC1200_REG_PARTNUMBER (0x8F)
#define CC1200_REG_PARTVERSION (0x90)
#define CC1200_REG_SERIAL_STATUS (0x91)
#define CC1200_REG_MODEM_STATUS1 (0x92)
#define CC1200_REG_MODEM_STATUS0 (0x93)
#define CC1200_REG_MARC_STATUS1 (0x94)
#define CC1200_REG_MARC_STATUS0 (0x95)
#define CC1200_REG_PA_IFAMP_TEST (0x96)
#define CC1200_REG_FSRF_TEST (0x97)
#define CC1200_REG_PRE_TEST (0x98)
#define CC1200_REG_PRE_OVR (0x99)
#define CC1200_REG_ADC_TEST (0x9A)
#define CC1200_REG_DVC_TEST (0x9B)
#define CC1200_REG_ATEST (0x9C)
#define CC1200_REG_ATEST_LVDS (0x9D)
#define CC1200_REG_ATEST_MODE (0x9E)
#define CC1200_REG_XOSC_TEST1 (0x9F)
#define CC1200_REG_XOSC_TEST0 (0xA0)
#define CC1200_REG_AES (0xA1)
#define CC1200_REG_MDM_TEST (0xA2)
/* All addresses in between are unused ones */
#define CC1200_REG_RXFIRST (0xD2)
#define CC1200_REG_TXFIRST (0xD3)
#define CC1200_REG_RXLAST (0xD4)
#define CC1200_REG_TXLAST (0xD5)
#define CC1200_REG_NUM_TXBYTES (0xD6)
#define CC1200_REG_NUM_RXBYTES (0xD7)
#define CC1200_REG_FIFO_NUM_TXBYTES (0xD8)
#define CC1200_REG_FIFO_NUM_RXBYTES (0xD9)
#define CC1200_REG_RXFIFO_PRE_BUF (0xDA)
/* All addresses in between are unused ones */
#define CC1200_REG_AES_WORKSPACE (0xE0)
/* Useful values
***************
*/
/* GPIO signals - See Section 3.4.1.1
* Note: some signals are only available on certain GPIO
* thus the number CC1200_GPIOn_(...) on some which
* tells the GPIO line where that signal is valid.
*/
#define CC1200_GPIO_SIG_RXFIFO_THR (0)
#define CC1200_GPIO_SIG_RXFIFO_THR_PKT (1)
#define CC1200_GPIO_SIG_TXFIFO_THR (2)
#define CC1200_GPIO_SIG_TXFIFO_THR_PKT (3)
#define CC1200_GPIO_SIG_RXFIFO_OVERFLOW (4)
#define CC1200_GPIO_SIG_TXFIFO_UNDERFLOW (5)
#define CC1200_GPIO_SIG_PKT_SYNC_RXTX (6)
#define CC1200_GPIO_SIG_CRC_OK (7)
#define CC1200_GPIO_SIG_SERIAL_CLK (8)
#define CC1200_GPIO_SIG_SERIAL_RX (9)
#define CC1200_GPIO_SIG_PQT_REACHED (11)
#define CC1200_GPIO_SIG_PQT_VALID (12)
#define CC1200_GPIO_SIG_RSSI_VALID (13)
#define CC1200_GPIO3_SIG_RSSI_UPDATE (14)
#define CC1200_GPIO2_SIG_RSSI_UPDATE (14)
#define CC1200_GPIO1_SIG_AGC_HOLD (14)
#define CC1200_GPIO0_SIG_AGC_UPDATE (14)
#define CC1200_GPIO3_SIG_CCA_STATUS (15)
#define CC1200_GPIO2_SIG_TXONCCA_DONE (15)
#define CC1200_GPIO1_SIG_CCA_STATUS (15)
#define CC1200_GPIO0_SIG_TXONCCA_FAILED (15)
#define CC1200_GPIO_SIG_CARRIER_SENSE_VALID (16)
#define CC1200_GPIO_SIG_CARRIER_SENSE (17)
#define CC1200_GPIO3_SIG_DSSS_CLK (18)
#define CC1200_GPIO2_SIG_DSSS_DATA0 (18)
#define CC1200_GPIO1_SIG_DSSS_CLK (18)
#define CC1200_GPIO0_SIG_DSSS_DATA1 (18)
#define CC1200_GPIO_SIG_PKT_CRC_OK (19)
#define CC1200_GPIO_SIG_MCU_WAKEUP (20)
#define CC1200_GPIO_SIG_SYNC_LOW0_HIGH1 (21)
#define CC1200_GPIO0_SIG_AES_COMMAND_ACTIVE (22)
#define CC1200_GPIO_SIG_LNA_PA_REG_PD (23)
#define CC1200_GPIO_SIG_LNA_PD (24)
#define CC1200_GPIO_SIG_PA_PD (25)
#define CC1200_GPIO_SIG_RX0TX1_CFG (26)
#define CC1200_GPIO_SIG_IMAGE_FOUND (28)
#define CC1200_GPIO_SIG_CLKEN_CFM (29)
#define CC1200_GPIO_SIG_CFM_TX_DATA_CLK (30)
#define CC1200_GPIO_SIG_RSSI_STEP_FOUND (33)
#define CC1200_GPIO3_SIG_AES_RUN (34)
#define CC1200_GPIO2_SIG_AES_RUN (34)
#define CC1200_GPIO1_SIG_RSSI_STEP_EVENT (34)
#define CC1200_GPIO0_SIG_RSSI_STEP_EVENT (34)
#define CC1200_GPIO1_SIG_LOCK (35)
#define CC1200_GPIO0_SIG_LOCK (35)
#define CC1200_GPIO_SIG_ANTENNA_SELECT (36)
#define CC1200_GPIO_SIG_MARC_2PIN_STATUS_1 (37)
#define CC1200_GPIO_SIG_MARC_2PIN_STATUS_0 (38)
#define CC1200_GPIO2_SIG_TXFIFO_OVERFLOW (39)
#define CC1200_GPIO0_SIG_RXFIFO_UNDERFLOW (39)
#define CC1200_GPIO3_SIG_MAGN_VALID (40)
#define CC1200_GPIO2_SIG_CHFILT_VALID (40)
#define CC1200_GPIO1_SIG_RCC_CAL_VALID (40)
#define CC1200_GPIO0_SIG_CHFILT_STARTUP_VALID (40)
#define CC1200_GPIO3_SIG_COLLISION_FOUND (41)
#define CC1200_GPIO2_SIG_SYNC_EVENT (41)
#define CC1200_GPIO1_SIG_COLLISION_FOUND (41)
#define CC1200_GPIO0_SIG_COLLISION_EVENT (41)
#define CC1200_GPIO_SIG_PA_RAMP_UP (42)
#define CC1200_GPIO3_SIG_CRC_FAILED (43)
#define CC1200_GPIO2_SIG_LENGTH_FAILED (43)
#define CC1200_GPIO1_SIG_ADDR_FAILED (43)
#define CC1200_GPIO0_SIG_UART_FRAMING_ERROR (43)
#define CC1200_GPIO_SIG_AGC_STABLE_GAIN (44)
#define CC1200_GPIO_SIG_AGC_UPDATE (45)
#define CC1200_GPIO3_SIG_ADC_CLOCK (46)
#define CC1200_GPIO2_SIG_ADC_Q_DATA_SAMPLE (46)
#define CC1200_GPIO1_SIG_ADC_CLOCK (46)
#define CC1200_GPIO0_SIG_ADC_I_DATA_SAMPLE (46)
#define CC1200_GPIO_SIG_HIGHZ (48)
#define CC1200_GPIO_SIG_EXT_CLOCK (49)
#define CC1200_GPIO_SIG_CHIP_RDYn (50)
#define CC1200_GPIO_SIG_HW0 (51)
#define CC1200_GPIO_SIG_CLOCK_40K (54)
#define CC1200_GPIO_SIG_WOR_EVENT0 (55)
#define CC1200_GPIO_SIG_WOR_EVENT1 (56)
#define CC1200_GPIO_SIG_WOR_EVENT2 (57)
#define CC1200_GPIO_SIG_XOSC_STABLE (59)
#define CC1200_GPIO_SIG_EXT_OSC_EN (60)
/* Chip status - See Section 3.1.2 */
#define CC1200_STATUS_CHIP_NOT_READY BIT(7)
#define CC1200_STATUS_IDLE (0x00)
#define CC1200_STATUS_RX (0x01 << 4)
#define CC1200_STATUS_TX (0x02 << 4)
#define CC1200_STATUS_FSTXON (0x03 << 4)
#define CC1200_STATUS_CALIBRATE (0x04 << 4)
#define CC1200_STATUS_SETTLING (0x05 << 4)
#define CC1200_STATUS_RX_FIFO_ERROR (0x06 << 4)
#define CC1200_STATUS_TX_FIFO_ERROR (0x07 << 4)
#define CC1200_STATUS_MASK (0x70)
/* Appended FCS - See Section 8 */
#define CC1200_FCS_LEN (2)
#define CC1200_FCS_CRC_OK BIT(7)
#define CC1200_FCS_LQI_MASK (0x7F)
/* ToDo: supporting 802.15.4g will make this header of a different size */
#define CC1200_PHY_HDR_LEN (1)
#endif /* ZEPHYR_DRIVERS_IEEE802154_IEEE802154_CC1200_REGS_H_ */
``` | /content/code_sandbox/drivers/ieee802154/ieee802154_cc1200_regs.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,895 |
```objective-c
/*
*/
#ifndef _OPTEE_MSG_H
#define _OPTEE_MSG_H
/*
* TODO: Zephyr has similar macros defined in stdint.h and called UINT32_C,
* UINT64_C etc. This should be refactored to use macro from Zephyr.
*/
#define U(v) v ## U
/*
* This file defines the OP-TEE message protocol used to communicate
* with an instance of OP-TEE running in secure world.
*/
/*****************************************************************************
* Part 1 - formatting of messages
*****************************************************************************/
#define OPTEE_MSG_ATTR_TYPE_NONE U(0x0)
#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT U(0x1)
#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT U(0x2)
#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT U(0x3)
#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT U(0x5)
#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT U(0x6)
#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT U(0x7)
#define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
#define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT
#define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT
#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT U(0x9)
#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT U(0xa)
#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT U(0xb)
#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
/*
* Meta parameter to be absorbed by the Secure OS and not passed
* to the Trusted Application.
*
* Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
*/
#define OPTEE_MSG_ATTR_META BIT(8)
/*
* Pointer to a list of pages used to register user-defined SHM buffer.
* Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
* buf_ptr should point to the beginning of the buffer. Buffer will contain
* list of page addresses. OP-TEE core can reconstruct contiguous buffer from
* that page addresses list. Page addresses are stored as 64 bit values.
* Last entry on a page should point to the next page of buffer.
* Every entry in buffer should point to a 4k page beginning (12 least
* significant bits must be equal to zero).
*
* 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold
* page offset of user buffer.
*
* So, entries should be placed like members of this structure:
*
* struct page_data {
* uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
* uint64_t next_page_data;
* };
*
* Structure is designed to exactly fit into the page size
* OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
*
* The size of 4KB is chosen because this is the smallest page size for ARM
* architectures. If REE uses larger pages, it should divide them to 4KB ones.
*/
#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
/*
* Memory attributes for caching passed with temp memrefs. The actual value
* used is defined outside the message protocol with the exception of
* OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
* defined for the memory range should be used. If optee_smc.h is used as
* bearer of this protocol OPTEE_SMC_SHM_* is used for values.
*/
#define OPTEE_MSG_ATTR_CACHE_SHIFT U(16)
#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
#define OPTEE_MSG_ATTR_CACHE_PREDEFINED U(0)
/*
* Same values as TEE_LOGIN_* from TEE Internal API
*/
#define OPTEE_MSG_LOGIN_PUBLIC U(0x00000000)
#define OPTEE_MSG_LOGIN_USER U(0x00000001)
#define OPTEE_MSG_LOGIN_GROUP U(0x00000002)
#define OPTEE_MSG_LOGIN_APPLICATION U(0x00000004)
#define OPTEE_MSG_LOGIN_APPLICATION_USER U(0x00000005)
#define OPTEE_MSG_LOGIN_APPLICATION_GROUP U(0x00000006)
/*
* Page size used in non-contiguous buffer entries
*/
#define OPTEE_MSG_NONCONTIG_PAGE_SIZE U(4096)
#define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff
#ifndef __ASSEMBLER__
/**
* struct optee_msg_param_tmem - temporary memory reference parameter
* @buf_ptr: Address of the buffer
* @size: Size of the buffer
* @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
*
* Secure and normal world communicates pointers as physical address
* instead of the virtual address. This is because secure and normal world
* have completely independent memory mapping. Normal world can even have a
* hypervisor which need to translate the guest physical address (AKA IPA
* in ARM documentation) to a real physical address before passing the
* structure to secure world.
*/
struct optee_msg_param_tmem {
uint64_t buf_ptr;
uint64_t size;
uint64_t shm_ref;
};
/**
* struct optee_msg_param_rmem - registered memory reference parameter
* @offs: Offset into shared memory reference
* @size: Size of the buffer
* @shm_ref: Shared memory reference, pointer to a struct tee_shm
*/
struct optee_msg_param_rmem {
uint64_t offs;
uint64_t size;
uint64_t shm_ref;
};
/**
* struct optee_msg_param_fmem - FF-A memory reference parameter
* @offs_lower: Lower bits of offset into shared memory reference
* @offs_upper: Upper bits of offset into shared memory reference
* @internal_offs: Internal offset into the first page of shared memory
* reference
* @size: Size of the buffer
* @global_id: Global identifier of the shared memory
*/
struct optee_msg_param_fmem {
uint32_t offs_low;
uint16_t offs_high;
uint16_t internal_offs;
uint64_t size;
uint64_t global_id;
};
/**
* struct optee_msg_param_value - opaque value parameter
*
* Value parameters are passed unchecked between normal and secure world.
*/
struct optee_msg_param_value {
uint64_t a;
uint64_t b;
uint64_t c;
};
/**
* struct optee_msg_param - parameter used together with struct optee_msg_arg
* @attr: attributes
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
* @fmem: parameter by FF-A registered memory reference
* @value: parameter by opaque value
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
* OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates
* @rmem or @fmem depending on the conduit.
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
*/
struct optee_msg_param {
uint64_t attr;
union {
struct optee_msg_param_tmem tmem;
struct optee_msg_param_rmem rmem;
struct optee_msg_param_fmem fmem;
struct optee_msg_param_value value;
} u;
};
/**
* struct optee_msg_arg - call argument
* @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
* @func: Trusted Application function, specific to the Trusted Application,
* used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
* @session: In parameter for all OPTEE_MSG_CMD_* except
* OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
* @cancel_id: Cancellation id, a unique value to identify this request
* @ret: return value
* @ret_origin: origin of the return value
* @num_params: number of parameters supplied to the OS Command
* @params: the parameters supplied to the OS Command
*
* All normal calls to Trusted OS uses this struct. If cmd requires further
* information than what these fields hold it can be passed as a parameter
* tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
* attrs field). All parameters tagged as meta have to come first.
*/
struct optee_msg_arg {
uint32_t cmd;
uint32_t func;
uint32_t session;
uint32_t cancel_id;
uint32_t pad;
uint32_t ret;
uint32_t ret_origin;
uint32_t num_params;
/* num_params tells the actual number of element in params */
struct optee_msg_param params[];
};
/**
* OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
*
* @num_params: Number of parameters embedded in the struct optee_msg_arg
*
* Returns the size of the struct optee_msg_arg together with the number
* of embedded parameters.
*/
#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
(sizeof(struct optee_msg_arg) + \
sizeof(struct optee_msg_param) * (num_params))
/*
* Defines the maximum value of @num_params that can be passed to
* OPTEE_MSG_GET_ARG_SIZE without a risk of crossing page boundary.
*/
#define OPTEE_MSG_MAX_NUM_PARAMS \
((OPTEE_MSG_NONCONTIG_PAGE_SIZE - sizeof(struct optee_msg_arg)) / \
sizeof(struct optee_msg_param))
#endif /*__ASSEMBLER__*/
/*****************************************************************************
* Part 2 - requests from normal world
*****************************************************************************/
/*
* Return the following UID if using API specified in this file without
* further extensions:
* 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
* Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
* OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
*/
#define OPTEE_MSG_UID_0 U(0x384fb3e0)
#define OPTEE_MSG_UID_1 U(0xe7f811e3)
#define OPTEE_MSG_UID_2 U(0xaf630002)
#define OPTEE_MSG_UID_3 U(0xa5d5c51b)
#define OPTEE_MSG_FUNCID_CALLS_UID U(0xFF01)
/*
* Returns 2.0 if using API specified in this file without further
* extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
* and OPTEE_MSG_REVISION_MINOR
*/
#define OPTEE_MSG_REVISION_MAJOR U(2)
#define OPTEE_MSG_REVISION_MINOR U(0)
#define OPTEE_MSG_FUNCID_CALLS_REVISION U(0xFF03)
/*
* Get UUID of Trusted OS.
*
* Used by non-secure world to figure out which Trusted OS is installed.
* Note that returned UUID is the UUID of the Trusted OS, not of the API.
*
* Returns UUID in 4 32-bit words in the same way as
* OPTEE_MSG_FUNCID_CALLS_UID described above.
*/
#define OPTEE_MSG_OS_OPTEE_UUID_0 U(0x486178e0)
#define OPTEE_MSG_OS_OPTEE_UUID_1 U(0xe7f811e3)
#define OPTEE_MSG_OS_OPTEE_UUID_2 U(0xbc5e0002)
#define OPTEE_MSG_OS_OPTEE_UUID_3 U(0xa5d5c51b)
#define OPTEE_MSG_FUNCID_GET_OS_UUID U(0x0000)
/*
* Get revision of Trusted OS.
*
* Used by non-secure world to figure out which version of the Trusted OS
* is installed. Note that the returned revision is the revision of the
* Trusted OS, not of the API.
*
* Returns revision in 2 32-bit words in the same way as
* OPTEE_MSG_CALLS_REVISION described above.
*/
#define OPTEE_MSG_FUNCID_GET_OS_REVISION U(0x0001)
/*
* Do a secure call with struct optee_msg_arg as argument
* The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
*
* OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
* The first two parameters are tagged as meta, holding two value
* parameters to pass the following information:
* param[0].u.value.a-b uuid of Trusted Application
* param[1].u.value.a-b uuid of Client
* param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
*
* OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
* session to a Trusted Application. struct optee_msg_arg::func is Trusted
* Application function, specific to the Trusted Application.
*
* OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
* Trusted Application.
*
* OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
*
* OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
* information is passed as:
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
* [| OPTEE_MSG_ATTR_NONCONTIG]
* [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
* [in] param[0].u.tmem.size size (of first fragment)
* [in] param[0].u.tmem.shm_ref holds shared memory reference
*
* OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
* memory reference. The information is passed as:
* [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
* [in] param[0].u.rmem.shm_ref holds shared memory reference
* [in] param[0].u.rmem.offs 0
* [in] param[0].u.rmem.size 0
*
* OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
* of a driver.
*
* OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
* normal world unable to process asynchronous notifications. Typically
* used when the driver is shut down.
*/
#define OPTEE_MSG_CMD_OPEN_SESSION U(0)
#define OPTEE_MSG_CMD_INVOKE_COMMAND U(1)
#define OPTEE_MSG_CMD_CLOSE_SESSION U(2)
#define OPTEE_MSG_CMD_CANCEL U(3)
#define OPTEE_MSG_CMD_REGISTER_SHM U(4)
#define OPTEE_MSG_CMD_UNREGISTER_SHM U(5)
#define OPTEE_MSG_CMD_DO_BOTTOM_HALF U(6)
#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF U(7)
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG U(0x0004)
#endif /* _OPTEE_MSG_H */
``` | /content/code_sandbox/drivers/tee/optee/optee_msg.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,190 |
```objective-c
/*
*/
#ifndef __OPTEE_RPC_CMD_H
#define __OPTEE_RPC_CMD_H
/*
* All RPC is done with a struct optee_msg_arg as bearer of information,
* struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below.
* Only the commands handled by the kernel driver are defined here.
*
* RPC communication with tee-supplicant is reversed compared to normal
* client communication described above. The supplicant receives requests
* and sends responses.
*/
/*
* Get time
*
* Returns number of seconds and nano seconds since the Epoch,
* 1970-01-01 00:00:00 +0000 (UTC).
*
* [out] value[0].a Number of seconds
* [out] value[0].b Number of nano seconds.
*/
#define OPTEE_RPC_CMD_GET_TIME 3
/*
* Notification from/to secure world.
*
* If secure world needs to wait for something, for instance a mutex, it
* does a notification wait request instead of spinning in secure world.
* Conversely can a synchronous notification can be sent when a secure
* world mutex with a thread waiting thread is unlocked.
*
* This interface can also be used to wait for a asynchronous notification
* which instead is sent via a non-secure interrupt.
*
* Waiting on notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
* [in] value[0].b notification value
*
* Sending a synchronous notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
* [in] value[0].b notification value
*/
#define OPTEE_RPC_CMD_NOTIFICATION 4
#define OPTEE_RPC_NOTIFICATION_WAIT 0
#define OPTEE_RPC_NOTIFICATION_SEND 1
/*
* Suspend execution
*
* [in] value[0].a Number of milliseconds to suspend
*/
#define OPTEE_RPC_CMD_SUSPEND 5
/*
* Allocate a piece of shared memory
*
* [in] value[0].a Type of memory one of
* OPTEE_RPC_SHM_TYPE_* below
* [in] value[0].b Requested size
* [in] value[0].c Required alignment
* [out] memref[0] Buffer
*/
#define OPTEE_RPC_CMD_SHM_ALLOC 6
/* Memory that can be shared with a non-secure user space application */
#define OPTEE_RPC_SHM_TYPE_APPL 0
/* Memory only shared with non-secure kernel */
#define OPTEE_RPC_SHM_TYPE_KERNEL 1
/*
* Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC
*
* [in] value[0].a Type of memory one of
* OPTEE_RPC_SHM_TYPE_* above
* [in] value[0].b Value of shared memory reference or cookie
*/
#define OPTEE_RPC_CMD_SHM_FREE 7
/*
* Issue master requests (read and write operations) to an I2C chip.
*
* [in] value[0].a Transfer mode (OPTEE_RPC_I2C_TRANSFER_*)
* [in] value[0].b The I2C bus (a.k.a adapter).
* 16 bit field.
* [in] value[0].c The I2C chip (a.k.a address).
* 16 bit field (either 7 or 10 bit effective).
* [in] value[1].a The I2C master control flags (ie, 10 bit address).
* 16 bit field.
* [in/out] memref[2] Buffer used for data transfers.
* [out] value[3].a Number of bytes transferred by the REE.
*/
#define OPTEE_RPC_CMD_I2C_TRANSFER 21
/* I2C master transfer modes */
#define OPTEE_RPC_I2C_TRANSFER_RD 0
#define OPTEE_RPC_I2C_TRANSFER_WR 1
/* I2C master control flags */
#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0)
#endif /*__OPTEE_RPC_CMD_H*/
``` | /content/code_sandbox/drivers/tee/optee/optee_rpc_cmd.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 888 |
```unknown
config OPTEE
bool "OP-TEE driver"
depends on (ARM64 && ARMV8_A_NS && HAS_ARM_SMCCC) || ZTEST
help
This implements support of the OP-TEE firmware which is loaded
as BL32 image. OP-TEE is a Trust Zone OS which implements mechanisms
of the hardware isolation and rely to ARM TrustZone technology.
Driver requests functions from the OP-TEE and implements RPC mechanism
needed by OP-TEE to run services. See path_to_url for more
information.
config OPTEE_MAX_NOTIF
int "Max number of OP-TEE notifications"
depends on OPTEE
default $(UINT8_MAX)
help
Sets the maximum notifications from OP-TEE to the Normal World. OP-TEE using
this mechanism for the synchronization.
``` | /content/code_sandbox/drivers/tee/optee/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 173 |
```objective-c
/*
*/
#ifndef OPTEE_SMC_H
#define OPTEE_SMC_H
#include <stdint.h>
/*
* This file is exported by OP-TEE and is in kept in sync between secure
* world and normal world kernel driver. We're following ARM SMC Calling
* Convention as specified in
* path_to_url
*
* This file depends on optee_msg.h being included to expand the SMC id
* macros below.
*/
#define OPTEE_SMC_32 U(0)
#define OPTEE_SMC_64 U(0x40000000)
#define OPTEE_SMC_FAST_CALL U(0x80000000)
#define OPTEE_SMC_STD_CALL U(0)
#define OPTEE_SMC_OWNER_MASK U(0x3F)
#define OPTEE_SMC_OWNER_SHIFT U(24)
#define OPTEE_SMC_FUNC_MASK U(0xFFFF)
#define OPTEE_SMC_IS_FAST_CALL(smc_val) ((smc_val) & OPTEE_SMC_FAST_CALL)
#define OPTEE_SMC_IS_64(smc_val) ((smc_val) & OPTEE_SMC_64)
#define OPTEE_SMC_FUNC_NUM(smc_val) ((smc_val) & OPTEE_SMC_FUNC_MASK)
#define OPTEE_SMC_OWNER_NUM(smc_val) \
(((smc_val) >> OPTEE_SMC_OWNER_SHIFT) & OPTEE_SMC_OWNER_MASK)
#define OPTEE_SMC_CALL_VAL(type, calling_convention, owner, func_num) \
((type) | (calling_convention) | \
(((owner) & OPTEE_SMC_OWNER_MASK) << \
OPTEE_SMC_OWNER_SHIFT) |\
((func_num) & OPTEE_SMC_FUNC_MASK))
#define OPTEE_SMC_STD_CALL_VAL(func_num) \
OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_STD_CALL, \
OPTEE_SMC_OWNER_TRUSTED_OS, (func_num))
#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
OPTEE_SMC_OWNER_TRUSTED_OS, (func_num))
#define OPTEE_SMC_OWNER_ARCH U(0)
#define OPTEE_SMC_OWNER_CPU U(1)
#define OPTEE_SMC_OWNER_SIP U(2)
#define OPTEE_SMC_OWNER_OEM U(3)
#define OPTEE_SMC_OWNER_STANDARD U(4)
#define OPTEE_SMC_OWNER_TRUSTED_APP U(48)
#define OPTEE_SMC_OWNER_TRUSTED_OS U(50)
#define OPTEE_SMC_OWNER_TRUSTED_OS_OPTEED U(62)
#define OPTEE_SMC_OWNER_TRUSTED_OS_API U(63)
/*
* Function specified by SMC Calling convention.
*/
#define OPTEE_SMC_FUNCID_CALLS_COUNT U(0xFF00)
#define OPTEE_SMC_CALLS_COUNT \
OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
OPTEE_SMC_OWNER_TRUSTED_OS_API, \
OPTEE_SMC_FUNCID_CALLS_COUNT)
/*
* Normal cached memory (write-back), shareable for SMP systems and not
* shareable for UP systems.
*/
#define OPTEE_SMC_SHM_CACHED U(1)
/*
* a0..a7 is used as register names in the descriptions below, on arm32
* that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
* 32-bit registers.
*/
/*
* Function specified by SMC Calling convention
*
* Return the following UID if using API specified in this file
* without further extensions:
* 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
* see also OPTEE_MSG_UID_* in optee_msg.h
*/
#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
#define OPTEE_SMC_CALLS_UID \
OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
OPTEE_SMC_OWNER_TRUSTED_OS_API, \
OPTEE_SMC_FUNCID_CALLS_UID)
/*
* Function specified by SMC Calling convention
*
* Returns 2.0 if using API specified in this file without further extensions.
* see also OPTEE_MSG_REVISION_* in optee_msg.h
*/
#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
#define OPTEE_SMC_CALLS_REVISION \
OPTEE_SMC_CALL_VAL(OPTEE_SMC_32, OPTEE_SMC_FAST_CALL, \
OPTEE_SMC_OWNER_TRUSTED_OS_API, \
OPTEE_SMC_FUNCID_CALLS_REVISION)
/*
* Get UUID of Trusted OS.
*
* Used by non-secure world to figure out which Trusted OS is installed.
* Note that returned UUID is the UUID of the Trusted OS, not of the API.
*
* Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
* described above.
*/
#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
#define OPTEE_SMC_CALL_GET_OS_UUID \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
/*
* Get revision of Trusted OS.
*
* Used by non-secure world to figure out which version of the Trusted OS
* is installed. Note that the returned revision is the revision of the
* Trusted OS, not of the API.
*
* Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
* described above. May optionally return a 32-bit build identifier in a2,
* with zero meaning unspecified.
*/
#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
#define OPTEE_SMC_CALL_GET_OS_REVISION \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
/*
* Call with struct optee_msg_arg as argument
*
* When called with OPTEE_SMC_CALL_WITH_RPC_ARG or
* OPTEE_SMC_CALL_WITH_REGD_ARG in a0 there is one RPC struct optee_msg_arg
* following after the first struct optee_msg_arg. The RPC struct
* optee_msg_arg has reserved space for the number of RPC parameters as
* returned by OPTEE_SMC_EXCHANGE_CAPABILITIES.
*
* When calling these functions normal world has a few responsibilities:
* 1. It must be able to handle eventual RPCs
* 2. Non-secure interrupts should not be masked
* 3. If asynchronous notifications has been negotiated successfully, then
* the interrupt for asynchronous notifications should be unmasked
* during this call.
*
* Call register usage, OPTEE_SMC_CALL_WITH_ARG and
* OPTEE_SMC_CALL_WITH_RPC_ARG:
* a0 SMC Function ID, OPTEE_SMC_CALL_WITH_ARG or OPTEE_SMC_CALL_WITH_RPC_ARG
* a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
* a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
* a3 Cache settings, not used if physical pointer is in a predefined shared
* memory area else per OPTEE_SMC_SHM_*
* a4-6 Not used
* a7 Hypervisor Client ID register
*
* Call register usage, OPTEE_SMC_CALL_WITH_REGD_ARG:
* a0 SMC Function ID, OPTEE_SMC_CALL_WITH_REGD_ARG
* a1 Upper 32 bits of a 64-bit shared memory cookie
* a2 Lower 32 bits of a 64-bit shared memory cookie
* a3 Offset of the struct optee_msg_arg in the shared memory with the
* supplied cookie
* a4-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 Return value, OPTEE_SMC_RETURN_*
* a1-3 Not used
* a4-7 Preserved
*
* OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
* a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
* a1-3 Preserved
* a4-7 Preserved
*
* RPC return register usage:
* a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
* a1-2 RPC parameters
* a3-7 Resume information, must be preserved
*
* Possible return values:
* OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
* function.
* OPTEE_SMC_RETURN_OK Call completed, result updated in
* the previously supplied struct
* optee_msg_arg.
* OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
* try again later.
* OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct
* optee_msg_arg.
* OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
* OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
* world.
*/
#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
#define OPTEE_SMC_CALL_WITH_ARG \
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
#define OPTEE_SMC_CALL_WITH_RPC_ARG \
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG)
#define OPTEE_SMC_CALL_WITH_REGD_ARG \
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG)
/*
* Get Shared Memory Config
*
* Returns the Secure/Non-secure shared memory config.
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Have config return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 Physical address of start of SHM
* a2 Size of of SHM
* a3 Cache settings of memory, as defined by the
* OPTEE_SMC_SHM_* values above
* a4-7 Preserved
*
* Not available register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
* a1-3 Not used
* a4-7 Preserved
*/
#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
#define OPTEE_SMC_GET_SHM_CONFIG \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
/*
* Configures L2CC mutex
*
* Disables, enables usage of L2CC mutex. Returns or sets physical address
* of L2CC mutex.
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_L2CC_MUTEX
* a1 OPTEE_SMC_L2CC_MUTEX_GET_ADDR Get physical address of mutex
* OPTEE_SMC_L2CC_MUTEX_SET_ADDR Set physical address of mutex
* OPTEE_SMC_L2CC_MUTEX_ENABLE Enable usage of mutex
* OPTEE_SMC_L2CC_MUTEX_DISABLE Disable usage of mutex
* a2 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, upper 32bit of a 64bit
* physical address of mutex
* a3 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, lower 32bit of a 64bit
* physical address of mutex
* a3-6 Not used
* a7 Hypervisor Client ID register
*
* Have config return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 Preserved
* a2 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, upper 32bit of a 64bit
* physical address of mutex
* a3 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, lower 32bit of a 64bit
* physical address of mutex
* a3-7 Preserved
*
* Error return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL Physical address not available
* OPTEE_SMC_RETURN_EBADADDR Bad supplied physical address
* OPTEE_SMC_RETURN_EBADCMD Unsupported value in a1
* a1-7 Preserved
*/
#define OPTEE_SMC_L2CC_MUTEX_GET_ADDR U(0)
#define OPTEE_SMC_L2CC_MUTEX_SET_ADDR U(1)
#define OPTEE_SMC_L2CC_MUTEX_ENABLE U(2)
#define OPTEE_SMC_L2CC_MUTEX_DISABLE U(3)
#define OPTEE_SMC_FUNCID_L2CC_MUTEX U(8)
#define OPTEE_SMC_L2CC_MUTEX \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_L2CC_MUTEX)
/*
* Exchanges capabilities between normal world and secure world
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
* a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
* a2-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
* a2 The maximum secure world notification number
* a3 Bit[7:0]: Number of parameters needed for RPC to be supplied
* as the second MSG arg struct for
* OPTEE_SMC_CALL_WITH_ARG
* Bit[31:8]: Reserved (MBZ)
* a3-7 Preserved
*
* Error return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
* a2-7 Preserved
*/
/* Normal world works as a uniprocessor system */
#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
/* Secure world has reserved shared memory for normal world to use */
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
/*
* Secure world supports commands "register/unregister shared memory",
* secure world accepts command buffers located in any parts of non-secure RAM
*/
#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
/* Secure world is built with virtualization support */
#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
/* Secure world supports Shared Memory with a NULL reference */
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
/* Secure world supports asynchronous notification of normal world */
#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
/* Secure world supports pre-allocating RPC arg struct */
#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES U(9)
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
/*
* Disable and empties cache of shared memory objects
*
* Secure world can cache frequently used shared memory objects, for
* example objects used as RPC arguments. When secure world is idle this
* function returns one shared memory reference to free. To disable the
* cache and free all cached objects this function has to be called until
* it returns OPTEE_SMC_RETURN_ENOTAVAIL.
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 Upper 32 bits of a 64-bit Shared memory cookie
* a2 Lower 32 bits of a 64-bit Shared memory cookie
* a3-7 Preserved
*
* Cache empty return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
* a1-7 Preserved
*
* Not idle return register usage:
* a0 OPTEE_SMC_RETURN_EBUSY
* a1-7 Preserved
*/
#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE U(10)
#define OPTEE_SMC_DISABLE_SHM_CACHE \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
/*
* Enable cache of shared memory objects
*
* Secure world can cache frequently used shared memory objects, for
* example objects used as RPC arguments. When secure world is idle this
* function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
* secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1-7 Preserved
*
* Not idle return register usage:
* a0 OPTEE_SMC_RETURN_EBUSY
* a1-7 Preserved
*/
#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE U(11)
#define OPTEE_SMC_ENABLE_SHM_CACHE \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
/*
* Release of secondary cores
*
* OP-TEE in secure world is in charge of the release process of secondary
* cores. The Rich OS issue the this request to ask OP-TEE to boot up the
* secondary cores, go through the OP-TEE per-core initialization, and then
* switch to the Non-seCure world with the Rich OS provided entry address.
* The secondary cores enter Non-Secure world in SVC mode, with Thumb, FIQ,
* IRQ and Abort bits disabled.
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_BOOT_SECONDARY
* a1 Index of secondary core to boot
* a2 Upper 32 bits of a 64-bit Non-Secure world entry physical address
* a3 Lower 32 bits of a 64-bit Non-Secure world entry physical address
* a4-7 Not used
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1-7 Preserved
*
* Error return:
* a0 OPTEE_SMC_RETURN_EBADCMD Core index out of range
* a1-7 Preserved
*
* Not idle return register usage:
* a0 OPTEE_SMC_RETURN_EBUSY
* a1-7 Preserved
*/
#define OPTEE_SMC_FUNCID_BOOT_SECONDARY U(12)
#define OPTEE_SMC_BOOT_SECONDARY \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_BOOT_SECONDARY)
/*
* Inform OP-TEE about a new virtual machine
*
* Hypervisor issues this call during virtual machine (guest) creation.
* OP-TEE records client id of new virtual machine and prepares
* to receive requests from it. This call is available only if OP-TEE
* was built with virtualization support.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_VM_CREATED
* a1 Hypervisor Client ID of newly created virtual machine
* a2-6 Not used
* a7 Hypervisor Client ID register. Must be 0, because only hypervisor
* can issue this call
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1-7 Preserved
*
* Error return:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL OP-TEE have no resources for
* another VM
* a1-7 Preserved
*
*/
#define OPTEE_SMC_FUNCID_VM_CREATED U(13)
#define OPTEE_SMC_VM_CREATED \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_CREATED)
/*
* Inform OP-TEE about shutdown of a virtual machine
*
* Hypervisor issues this call during virtual machine (guest) destruction.
* OP-TEE will clean up all resources associated with this VM. This call is
* available only if OP-TEE was built with virtualization support.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_VM_DESTROYED
* a1 Hypervisor Client ID of virtual machine being shut down
* a2-6 Not used
* a7 Hypervisor Client ID register. Must be 0, because only hypervisor
* can issue this call
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1-7 Preserved
*
*/
#define OPTEE_SMC_FUNCID_VM_DESTROYED U(14)
#define OPTEE_SMC_VM_DESTROYED \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_DESTROYED)
/*
* Query OP-TEE about number of supported threads
*
* Normal World OS or Hypervisor issues this call to find out how many
* threads OP-TEE supports. That is how many standard calls can be issued
* in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 Number of threads
* a2-7 Preserved
*
* Error return:
* a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented
* a1-7 Preserved
*/
#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT U(15)
#define OPTEE_SMC_GET_THREAD_COUNT \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
/*
* Inform OP-TEE that normal world is able to receive asynchronous
* notifications.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1-7 Preserved
*
* Not supported return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
* a1-7 Preserved
*/
#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
/*
* Retrieve a value of notifications pending since the last call of this
* function.
*
* OP-TEE keeps a record of all posted values. When an interrupt is
* received which indicates that there are posted values this function
* should be called until all pended values have been retrieved. When a
* value is retrieved, it's cleared from the record in secure world.
*
* It is expected that this function is called from an interrupt handler
* in normal world.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 value
* a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
* valid, else 0 if no values were pending
* a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
* pending, else 0.
* Bit[31:2]: MBZ
* a3-7 Preserved
*
* Not supported return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
* a1-7 Preserved
*/
#define OPTEE_SMC_ASYNC_NOTIF_VALID BIT(0)
#define OPTEE_SMC_ASYNC_NOTIF_PENDING BIT(1)
/*
* Notification that OP-TEE expects a yielding call to do some bottom half
* work in a driver.
*/
#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
/* See OPTEE_SMC_CALL_WITH_RPC_ARG above */
#define OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG U(18)
/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG U(19)
/*
* Resume from RPC (for example after processing a foreign interrupt)
*
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
* a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
* OPTEE_SMC_RETURN_RPC in a0
*
* Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
*
* Possible return values
* OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
* function.
* OPTEE_SMC_RETURN_OK Original call completed, result
* updated in the previously supplied.
* struct optee_msg_arg
* OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
* world.
* OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
* information was corrupt.
*/
#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC U(3)
#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK U(0xFFFF0000)
#define OPTEE_SMC_RETURN_RPC_PREFIX U(0xFFFF0000)
#define OPTEE_SMC_RETURN_RPC_FUNC_MASK U(0x0000FFFF)
#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
/*
* Allocate memory for RPC parameter passing. The memory is used to hold a
* struct optee_msg_arg.
*
* "Call" register usage:
* a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
* a1 Size in bytes of required argument memory
* a2 Not used
* a3 Resume information, must be preserved
* a4-5 Not used
* a6-7 Resume information, must be preserved
*
* "Return" register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
* a1 Upper 32 bits of 64-bit physical pointer to allocated
* memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
* be allocated.
* a2 Lower 32 bits of 64-bit physical pointer to allocated
* memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
* be allocated
* a3 Preserved
* a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing
* the memory or doing an RPC
* a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing
* the memory or doing an RPC
* a6-7 Preserved
*/
#define OPTEE_SMC_RPC_FUNC_ALLOC U(0)
#define OPTEE_SMC_RETURN_RPC_ALLOC \
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
/*
* Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
*
* "Call" register usage:
* a0 This value, OPTEE_SMC_RETURN_RPC_FREE
* a1 Upper 32 bits of 64-bit shared memory cookie belonging to this
* argument memory
* a2 Lower 32 bits of 64-bit shared memory cookie belonging to this
* argument memory
* a3-7 Resume information, must be preserved
*
* "Return" register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
* a1-2 Not used
* a3-7 Preserved
*/
#define OPTEE_SMC_RPC_FUNC_FREE U(2)
#define OPTEE_SMC_RETURN_RPC_FREE \
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
/*
* Deliver a foreign interrupt in normal world.
*
* "Call" register usage:
* a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
* a1-7 Resume information, must be preserved
*
* "Return" register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
* a1-7 Preserved
*/
#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR U(4)
#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR)
/*
* Do an RPC request. The supplied struct optee_msg_arg tells which
* request to do and the parameters for the request. The following fields
* are used (the rest are unused):
* - cmd the Request ID
* - ret return value of the request, filled in by normal world
* - num_params number of parameters for the request
* - params the parameters
* - param_attrs attributes of the parameters
*
* "Call" register usage:
* a0 OPTEE_SMC_RETURN_RPC_CMD
* a1 Upper 32 bits of a 64-bit Shared memory cookie holding a
* struct optee_msg_arg, must be preserved, only the data should
* be updated
* a2 Lower 32 bits of a 64-bit Shared memory cookie holding a
* struct optee_msg_arg, must be preserved, only the data should
* be updated
* a3-7 Resume information, must be preserved
*
* "Return" register usage:
* a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
* a1-2 Not used
* a3-7 Preserved
*/
#define OPTEE_SMC_RPC_FUNC_CMD U(5)
#define OPTEE_SMC_RETURN_RPC_CMD \
OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
/* Returned in a0 */
#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION U(0xFFFFFFFF)
/* Returned in a0 only from Trusted OS functions */
#define OPTEE_SMC_RETURN_OK U(0x0)
#define OPTEE_SMC_RETURN_ETHREAD_LIMIT U(0x1)
#define OPTEE_SMC_RETURN_EBUSY U(0x2)
#define OPTEE_SMC_RETURN_ERESUME U(0x3)
#define OPTEE_SMC_RETURN_EBADADDR U(0x4)
#define OPTEE_SMC_RETURN_EBADCMD U(0x5)
#define OPTEE_SMC_RETURN_ENOMEM U(0x6)
#define OPTEE_SMC_RETURN_ENOTAVAIL U(0x7)
#define OPTEE_SMC_RETURN_IS_RPC(ret) \
(((ret) != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION) && \
((((ret) & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == \
OPTEE_SMC_RETURN_RPC_PREFIX)))
#endif /* OPTEE_SMC_H */
``` | /content/code_sandbox/drivers/tee/optee/optee_smc.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,879 |
```c
/*
*
*/
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/drivers/emul_fuel_gauge.h>
/* Emulator syscalls just need to exist as stubs as these are only called by tests. */
static inline int z_vrfy_emul_fuel_gauge_is_battery_cutoff(const struct emul *target, bool *cutoff)
{
return z_impl_emul_fuel_gauge_is_battery_cutoff(target, cutoff);
}
#include <zephyr/syscalls/emul_fuel_gauge_is_battery_cutoff_mrsh.c>
static inline int z_vrfy_emul_fuel_gauge_set_battery_charging(const struct emul *target,
uint32_t uV, int uA)
{
return z_impl_emul_fuel_gauge_set_battery_charging(target, uV, uA);
}
#include <zephyr/syscalls/emul_fuel_gauge_set_battery_charging_mrsh.c>
``` | /content/code_sandbox/drivers/fuel_gauge/emul_fuel_gauge_syscall_handlers.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 196 |
```unknown
#
menuconfig FUEL_GAUGE
bool "Battery fuel gauge drivers"
help
Enable battery fuel gauge driver configuration.
if FUEL_GAUGE
module = FUEL_GAUGE
module-str = fuel_gauge
source "subsys/logging/Kconfig.template.log_config"
config FUEL_GAUGE_INIT_PRIORITY
int "Battery Fuel Gauge init priority"
default 90
help
Battery fuel gauge initialization priority.
source "drivers/fuel_gauge/max17048/Kconfig"
source "drivers/fuel_gauge/sbs_gauge/Kconfig"
source "drivers/fuel_gauge/bq27z746/Kconfig"
endif # FUEL_GAUGE
``` | /content/code_sandbox/drivers/fuel_gauge/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 140 |
```c
/*
*
*/
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/drivers/fuel_gauge.h>
static inline int z_vrfy_fuel_gauge_get_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val *val)
{
union fuel_gauge_prop_val k_val;
K_OOPS(K_SYSCALL_DRIVER_FUEL_GAUGE(dev, get_property));
K_OOPS(k_usermode_from_copy(&k_val, val, sizeof(union fuel_gauge_prop_val)));
int ret = z_impl_fuel_gauge_get_prop(dev, prop, &k_val);
K_OOPS(k_usermode_to_copy(val, &k_val, sizeof(union fuel_gauge_prop_val)));
return ret;
}
#include <zephyr/syscalls/fuel_gauge_get_prop_mrsh.c>
static inline int z_vrfy_fuel_gauge_get_props(const struct device *dev, fuel_gauge_prop_t *props,
union fuel_gauge_prop_val *vals, size_t len)
{
union fuel_gauge_prop_val k_vals[len];
fuel_gauge_prop_t k_props[len];
K_OOPS(K_SYSCALL_DRIVER_FUEL_GAUGE(dev, get_property));
K_OOPS(k_usermode_from_copy(k_vals, vals, len * sizeof(union fuel_gauge_prop_val)));
K_OOPS(k_usermode_from_copy(k_props, props, len * sizeof(fuel_gauge_prop_t)));
int ret = z_impl_fuel_gauge_get_props(dev, k_props, k_vals, len);
K_OOPS(k_usermode_to_copy(vals, k_vals, len * sizeof(union fuel_gauge_prop_val)));
return ret;
}
#include <zephyr/syscalls/fuel_gauge_get_props_mrsh.c>
static inline int z_vrfy_fuel_gauge_set_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val val)
{
K_OOPS(K_SYSCALL_DRIVER_FUEL_GAUGE(dev, set_property));
int ret = z_impl_fuel_gauge_set_prop(dev, prop, val);
return ret;
}
#include <zephyr/syscalls/fuel_gauge_set_prop_mrsh.c>
static inline int z_vrfy_fuel_gauge_set_props(const struct device *dev, fuel_gauge_prop_t *props,
union fuel_gauge_prop_val *vals, size_t len)
{
union fuel_gauge_prop_val k_vals[len];
fuel_gauge_prop_t k_props[len];
K_OOPS(K_SYSCALL_DRIVER_FUEL_GAUGE(dev, set_property));
K_OOPS(k_usermode_from_copy(k_vals, vals, len * sizeof(union fuel_gauge_prop_val)));
K_OOPS(k_usermode_from_copy(k_props, props, len * sizeof(fuel_gauge_prop_t)));
int ret = z_impl_fuel_gauge_set_props(dev, k_props, k_vals, len);
/* We only copy back vals because props will never be modified */
K_OOPS(k_usermode_to_copy(vals, k_vals, len * sizeof(union fuel_gauge_prop_val)));
return ret;
}
#include <zephyr/syscalls/fuel_gauge_set_props_mrsh.c>
static inline int z_vrfy_fuel_gauge_get_buffer_prop(const struct device *dev,
fuel_gauge_prop_t prop, void *dst,
size_t dst_len)
{
K_OOPS(K_SYSCALL_DRIVER_FUEL_GAUGE(dev, get_buffer_property));
K_OOPS(K_SYSCALL_MEMORY_WRITE(dst, dst_len));
int ret = z_impl_fuel_gauge_get_buffer_prop(dev, prop, dst, dst_len);
return ret;
}
#include <zephyr/syscalls/fuel_gauge_get_buffer_prop_mrsh.c>
static inline int z_vrfy_fuel_gauge_battery_cutoff(const struct device *dev)
{
K_OOPS(K_SYSCALL_DRIVER_FUEL_GAUGE(dev, battery_cutoff));
return z_impl_fuel_gauge_battery_cutoff(dev);
}
#include <zephyr/syscalls/fuel_gauge_battery_cutoff_mrsh.c>
``` | /content/code_sandbox/drivers/fuel_gauge/fuel_gauge_syscall_handlers.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 844 |
```c
/*
*
*
*/
#define DT_DRV_COMPAT sbs_sbs_gauge_new_api
#include "sbs_gauge.h"
#include <stdbool.h>
#include <stdint.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/fuel_gauge.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/util.h>
LOG_MODULE_REGISTER(sbs_gauge);
static int sbs_cmd_reg_read(const struct device *dev, uint8_t reg_addr, uint16_t *val)
{
const struct sbs_gauge_config *cfg;
uint8_t i2c_data[2];
int status;
cfg = dev->config;
status = i2c_burst_read_dt(&cfg->i2c, reg_addr, i2c_data, ARRAY_SIZE(i2c_data));
if (status < 0) {
LOG_ERR("Unable to read register");
return status;
}
*val = sys_get_le16(i2c_data);
return 0;
}
static int sbs_cmd_reg_write(const struct device *dev, uint8_t reg_addr, uint16_t val)
{
const struct sbs_gauge_config *config = dev->config;
uint8_t buf[2];
sys_put_le16(val, buf);
return i2c_burst_write_dt(&config->i2c, reg_addr, buf, sizeof(buf));
}
static int sbs_cmd_buffer_read(const struct device *dev, uint8_t reg_addr, char *buffer,
const uint8_t buffer_size)
{
const struct sbs_gauge_config *cfg;
int status;
cfg = dev->config;
status = i2c_burst_read_dt(&cfg->i2c, reg_addr, buffer, buffer_size);
if (status < 0) {
LOG_ERR("Unable to read register");
return status;
}
return 0;
}
static int sbs_gauge_get_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val *val)
{
int rc = 0;
uint16_t tmp_val = 0;
switch (prop) {
case FUEL_GAUGE_AVG_CURRENT:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_AVG_CURRENT, &tmp_val);
val->avg_current = tmp_val * 1000;
break;
case FUEL_GAUGE_CYCLE_COUNT:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_CYCLE_COUNT, &tmp_val);
val->cycle_count = tmp_val;
break;
case FUEL_GAUGE_CURRENT:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_CURRENT, &tmp_val);
val->current = tmp_val * 1000;
break;
case FUEL_GAUGE_FULL_CHARGE_CAPACITY:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_FULL_CAPACITY, &tmp_val);
val->full_charge_capacity = tmp_val * 1000;
break;
case FUEL_GAUGE_REMAINING_CAPACITY:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_REM_CAPACITY, &tmp_val);
val->remaining_capacity = tmp_val * 1000;
break;
case FUEL_GAUGE_RUNTIME_TO_EMPTY:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_RUNTIME2EMPTY, &tmp_val);
val->runtime_to_empty = tmp_val;
break;
case FUEL_GAUGE_RUNTIME_TO_FULL:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_AVG_TIME2FULL, &tmp_val);
val->runtime_to_full = tmp_val;
break;
case FUEL_GAUGE_SBS_MFR_ACCESS:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_MANUFACTURER_ACCESS, &tmp_val);
val->sbs_mfr_access_word = tmp_val;
break;
case FUEL_GAUGE_ABSOLUTE_STATE_OF_CHARGE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_ASOC, &tmp_val);
val->absolute_state_of_charge = tmp_val;
break;
case FUEL_GAUGE_RELATIVE_STATE_OF_CHARGE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_RSOC, &tmp_val);
val->relative_state_of_charge = tmp_val;
break;
case FUEL_GAUGE_TEMPERATURE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_TEMP, &tmp_val);
val->temperature = tmp_val;
break;
case FUEL_GAUGE_VOLTAGE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_VOLTAGE, &tmp_val);
val->voltage = tmp_val * 1000;
break;
case FUEL_GAUGE_SBS_MODE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_BATTERY_MODE, &tmp_val);
val->sbs_mode = tmp_val;
break;
case FUEL_GAUGE_CHARGE_CURRENT:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_CHG_CURRENT, &tmp_val);
val->chg_current = tmp_val * 1000;
break;
case FUEL_GAUGE_CHARGE_VOLTAGE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_CHG_VOLTAGE, &tmp_val);
val->chg_voltage = tmp_val * 1000;
break;
case FUEL_GAUGE_STATUS:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_FLAGS, &tmp_val);
val->fg_status = tmp_val;
break;
case FUEL_GAUGE_DESIGN_CAPACITY:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_NOM_CAPACITY, &tmp_val);
val->design_cap = tmp_val;
break;
case FUEL_GAUGE_DESIGN_VOLTAGE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_DESIGN_VOLTAGE, &tmp_val);
val->design_volt = tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_AR, &tmp_val);
val->sbs_at_rate = tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE_TIME_TO_FULL:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_ARTTF, &tmp_val);
val->sbs_at_rate_time_to_full = tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE_TIME_TO_EMPTY:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_ARTTE, &tmp_val);
val->sbs_at_rate_time_to_empty = tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE_OK:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_AROK, &tmp_val);
val->sbs_at_rate_ok = tmp_val;
break;
case FUEL_GAUGE_SBS_REMAINING_CAPACITY_ALARM:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_REM_CAPACITY_ALARM, &tmp_val);
val->sbs_remaining_capacity_alarm = tmp_val;
break;
case FUEL_GAUGE_SBS_REMAINING_TIME_ALARM:
rc = sbs_cmd_reg_read(dev, SBS_GAUGE_CMD_REM_TIME_ALARM, &tmp_val);
val->sbs_remaining_time_alarm = tmp_val;
break;
default:
rc = -ENOTSUP;
}
return rc;
}
static int sbs_gauge_do_battery_cutoff(const struct device *dev)
{
int rc = -ENOTSUP;
const struct sbs_gauge_config *cfg = dev->config;
if (cfg->cutoff_cfg == NULL) {
return -ENOTSUP;
}
for (int i = 0; i < cfg->cutoff_cfg->payload_size; i++) {
rc = sbs_cmd_reg_write(dev, cfg->cutoff_cfg->reg, cfg->cutoff_cfg->payload[i]);
if (rc != 0) {
return rc;
}
}
return rc;
}
static int sbs_gauge_set_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val val)
{
int rc = 0;
uint16_t tmp_val = 0;
switch (prop) {
case FUEL_GAUGE_SBS_MFR_ACCESS:
rc = sbs_cmd_reg_write(dev, SBS_GAUGE_CMD_MANUFACTURER_ACCESS,
val.sbs_mfr_access_word);
val.sbs_mfr_access_word = tmp_val;
break;
case FUEL_GAUGE_SBS_REMAINING_CAPACITY_ALARM:
rc = sbs_cmd_reg_write(dev, SBS_GAUGE_CMD_REM_CAPACITY_ALARM,
val.sbs_remaining_capacity_alarm);
val.sbs_remaining_capacity_alarm = tmp_val;
break;
case FUEL_GAUGE_SBS_REMAINING_TIME_ALARM:
rc = sbs_cmd_reg_write(dev, SBS_GAUGE_CMD_REM_TIME_ALARM,
val.sbs_remaining_time_alarm);
val.sbs_remaining_time_alarm = tmp_val;
break;
case FUEL_GAUGE_SBS_MODE:
rc = sbs_cmd_reg_write(dev, SBS_GAUGE_CMD_BATTERY_MODE, val.sbs_mode);
val.sbs_mode = tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE:
rc = sbs_cmd_reg_write(dev, SBS_GAUGE_CMD_AR, val.sbs_at_rate);
val.sbs_at_rate = tmp_val;
break;
default:
rc = -ENOTSUP;
}
return rc;
}
static int sbs_gauge_get_buffer_prop(const struct device *dev,
fuel_gauge_prop_t prop_type, void *dst,
size_t dst_len)
{
int rc = 0;
switch (prop_type) {
case FUEL_GAUGE_MANUFACTURER_NAME:
if (dst_len == sizeof(struct sbs_gauge_manufacturer_name)) {
rc = sbs_cmd_buffer_read(dev, SBS_GAUGE_CMD_MANUFACTURER_NAME, (char *)dst,
dst_len);
} else {
rc = -EINVAL;
}
break;
case FUEL_GAUGE_DEVICE_NAME:
if (dst_len == sizeof(struct sbs_gauge_device_name)) {
rc = sbs_cmd_buffer_read(dev, SBS_GAUGE_CMD_DEVICE_NAME, (char *)dst,
dst_len);
} else {
rc = -EINVAL;
}
break;
case FUEL_GAUGE_DEVICE_CHEMISTRY:
if (dst_len == sizeof(struct sbs_gauge_device_chemistry)) {
rc = sbs_cmd_buffer_read(dev, SBS_GAUGE_CMD_DEVICE_CHEMISTRY, (char *)dst,
dst_len);
} else {
rc = -EINVAL;
}
break;
default:
rc = -ENOTSUP;
}
return rc;
}
/**
* @brief initialize the fuel gauge
*
* @return 0 for success
*/
static int sbs_gauge_init(const struct device *dev)
{
const struct sbs_gauge_config *cfg;
cfg = dev->config;
if (!device_is_ready(cfg->i2c.bus)) {
LOG_ERR("Bus device is not ready");
return -ENODEV;
}
return 0;
}
static const struct fuel_gauge_driver_api sbs_gauge_driver_api = {
.get_property = &sbs_gauge_get_prop,
.set_property = &sbs_gauge_set_prop,
.get_buffer_property = &sbs_gauge_get_buffer_prop,
.battery_cutoff = &sbs_gauge_do_battery_cutoff,
};
/* Concatenates index to battery config to create unique cfg variable name per instance. */
#define _SBS_GAUGE_BATT_CUTOFF_CFG_VAR_NAME(index) sbs_gauge_batt_cutoff_cfg_##index
/* Declare and define the battery config struct */
#define _SBS_GAUGE_CONFIG_DEFINE(index) \
static const struct sbs_gauge_battery_cutoff_config _SBS_GAUGE_BATT_CUTOFF_CFG_VAR_NAME( \
index) = { \
.reg = DT_INST_PROP(index, battery_cutoff_reg_addr), \
.payload = DT_INST_PROP(index, battery_cutoff_payload), \
.payload_size = DT_INST_PROP_LEN(index, battery_cutoff_payload), \
};
/* Conditionally defined battery config based on battery cutoff support */
#define SBS_GAUGE_CONFIG_DEFINE(index) \
COND_CODE_1(DT_INST_PROP(index, battery_cutoff_support), \
(_SBS_GAUGE_CONFIG_DEFINE(index)), (;))
/* Conditionally get the battery config variable name or NULL based on battery cutoff support */
#define SBS_GAUGE_GET_BATTERY_CONFIG_NAME(index) \
COND_CODE_1(DT_INST_PROP(index, battery_cutoff_support), \
(&_SBS_GAUGE_BATT_CUTOFF_CFG_VAR_NAME(index)), (NULL))
#define SBS_GAUGE_INIT(index) \
SBS_GAUGE_CONFIG_DEFINE(index); \
static const struct sbs_gauge_config sbs_gauge_config_##index = { \
.i2c = I2C_DT_SPEC_INST_GET(index), \
.cutoff_cfg = SBS_GAUGE_GET_BATTERY_CONFIG_NAME(index)}; \
\
DEVICE_DT_INST_DEFINE(index, &sbs_gauge_init, NULL, NULL, &sbs_gauge_config_##index, \
POST_KERNEL, CONFIG_FUEL_GAUGE_INIT_PRIORITY, \
&sbs_gauge_driver_api);
DT_INST_FOREACH_STATUS_OKAY(SBS_GAUGE_INIT)
#define CUTOFF_PAYLOAD_SIZE_ASSERT(inst) \
BUILD_ASSERT(DT_INST_PROP_LEN_OR(inst, battery_cutoff_payload, 0) <= \
SBS_GAUGE_CUTOFF_PAYLOAD_MAX_SIZE);
DT_INST_FOREACH_STATUS_OKAY(CUTOFF_PAYLOAD_SIZE_ASSERT)
``` | /content/code_sandbox/drivers/fuel_gauge/sbs_gauge/sbs_gauge.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,049 |
```c
/*
*
*
* Emulator for SBS 1.1 compliant smart battery fuel gauge.
*/
#ifdef CONFIG_FUEL_GAUGE
#define DT_DRV_COMPAT sbs_sbs_gauge_new_api
#else
#define DT_DRV_COMPAT sbs_sbs_gauge
#endif /* CONFIG_FUEL_GAUGE */
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(sbs_sbs_gauge);
#include <stdbool.h>
#include <stdint.h>
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/emul.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/drivers/i2c_emul.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/drivers/emul_fuel_gauge.h>
#include <zephyr/drivers/fuel_gauge.h>
#include <zephyr/sys/util.h>
#include "sbs_gauge.h"
/** Run-time data used by the emulator */
struct sbs_gauge_emul_data {
uint16_t mfr_acc;
uint16_t remaining_capacity_alarm;
uint16_t remaining_time_alarm;
uint16_t mode;
int16_t at_rate;
/* Whether the battery cutoff or not */
bool is_cutoff;
/*
* Counts the number of times the cutoff payload has been sent to the designated
* register
*/
uint8_t cutoff_writes;
struct {
/* Non-register values associated with the state of the battery */
/* Battery terminal voltage */
uint32_t uV;
/* Battery terminal current - Pos is charging, Neg is discharging */
int uA;
} batt_state;
};
/** Static configuration for the emulator */
struct sbs_gauge_emul_cfg {
/** I2C address of emulator */
uint16_t addr;
bool cutoff_support;
uint32_t cutoff_reg_addr;
uint16_t cutoff_payload[SBS_GAUGE_CUTOFF_PAYLOAD_MAX_SIZE];
};
static void emul_sbs_gauge_maybe_do_battery_cutoff(const struct emul *target, int reg, int val)
{
struct sbs_gauge_emul_data *data = target->data;
const struct sbs_gauge_emul_cfg *cfg = target->cfg;
/* Check if this is a cutoff write */
if (cfg->cutoff_support && reg == cfg->cutoff_reg_addr) {
__ASSERT_NO_MSG(ARRAY_SIZE(cfg->cutoff_payload) > 0);
/*
* Calculate the next payload element value for a battery cutoff.
*
* We thoroughly check bounds elsewhere, so we can be confident we're not indexing
* past the end of the array.
*/
uint16_t target_payload_elem_val = cfg->cutoff_payload[data->cutoff_writes];
if (target_payload_elem_val == val) {
data->cutoff_writes++;
__ASSERT_NO_MSG(data->cutoff_writes <= ARRAY_SIZE(cfg->cutoff_payload));
} else {
/* Wrong payload target value, reset cutoff sequence detection. */
data->cutoff_writes = 0;
}
if (data->cutoff_writes == ARRAY_SIZE(cfg->cutoff_payload)) {
data->is_cutoff = true;
data->cutoff_writes = 0;
}
}
/* Not a cutoff write, reset payload counter */
else {
data->cutoff_writes = 0;
}
}
static int emul_sbs_gauge_reg_write(const struct emul *target, int reg, int val)
{
struct sbs_gauge_emul_data *data = target->data;
LOG_INF("write %x = %x", reg, val);
switch (reg) {
case SBS_GAUGE_CMD_MANUFACTURER_ACCESS:
data->mfr_acc = val;
break;
case SBS_GAUGE_CMD_REM_CAPACITY_ALARM:
data->remaining_capacity_alarm = val;
break;
case SBS_GAUGE_CMD_REM_TIME_ALARM:
data->remaining_time_alarm = val;
break;
case SBS_GAUGE_CMD_BATTERY_MODE:
data->mode = val;
break;
case SBS_GAUGE_CMD_AR:
data->at_rate = val;
break;
default:
LOG_INF("Unknown write %x", reg);
return -EIO;
}
/*
* One of the above registers is always designated as a "cutoff" register, usually it's
* MANUFACTURER ACCESS, but not always.
*/
emul_sbs_gauge_maybe_do_battery_cutoff(target, reg, val);
return 0;
}
static int emul_sbs_gauge_reg_read(const struct emul *target, int reg, int *val)
{
struct sbs_gauge_emul_data *data = target->data;
switch (reg) {
case SBS_GAUGE_CMD_MANUFACTURER_ACCESS:
*val = data->mfr_acc;
break;
case SBS_GAUGE_CMD_REM_CAPACITY_ALARM:
*val = data->remaining_capacity_alarm;
break;
case SBS_GAUGE_CMD_REM_TIME_ALARM:
*val = data->remaining_time_alarm;
break;
case SBS_GAUGE_CMD_BATTERY_MODE:
*val = data->mode;
break;
case SBS_GAUGE_CMD_AR:
*val = data->at_rate;
break;
case SBS_GAUGE_CMD_VOLTAGE:
*val = data->batt_state.uV / 1000;
break;
case SBS_GAUGE_CMD_CURRENT:
*val = data->batt_state.uA / 1000;
break;
case SBS_GAUGE_CMD_AVG_CURRENT:
case SBS_GAUGE_CMD_TEMP:
case SBS_GAUGE_CMD_ASOC:
case SBS_GAUGE_CMD_RSOC:
case SBS_GAUGE_CMD_FULL_CAPACITY:
case SBS_GAUGE_CMD_REM_CAPACITY:
case SBS_GAUGE_CMD_NOM_CAPACITY:
case SBS_GAUGE_CMD_AVG_TIME2EMPTY:
case SBS_GAUGE_CMD_AVG_TIME2FULL:
case SBS_GAUGE_CMD_RUNTIME2EMPTY:
case SBS_GAUGE_CMD_CYCLE_COUNT:
case SBS_GAUGE_CMD_DESIGN_VOLTAGE:
case SBS_GAUGE_CMD_CHG_CURRENT:
case SBS_GAUGE_CMD_CHG_VOLTAGE:
case SBS_GAUGE_CMD_FLAGS:
case SBS_GAUGE_CMD_ARTTF:
case SBS_GAUGE_CMD_ARTTE:
case SBS_GAUGE_CMD_AROK:
/* Arbitrary stub value. */
*val = 1;
break;
default:
LOG_ERR("Unknown register 0x%x read", reg);
return -EIO;
}
LOG_INF("read 0x%x = 0x%x", reg, *val);
return 0;
}
static int emul_sbs_gauge_buffer_read(const struct emul *target, int reg, char *val)
{
char mfg[] = "ACME";
char dev[] = "B123456";
char chem[] = "LiPO";
struct sbs_gauge_manufacturer_name *mfg_name = (struct sbs_gauge_manufacturer_name *)val;
struct sbs_gauge_device_name *dev_name = (struct sbs_gauge_device_name *)val;
struct sbs_gauge_device_chemistry *dev_chem = (struct sbs_gauge_device_chemistry *)val;
switch (reg) {
case SBS_GAUGE_CMD_MANUFACTURER_NAME:
mfg_name->manufacturer_name_length = sizeof(mfg);
memcpy(mfg_name->manufacturer_name, mfg, mfg_name->manufacturer_name_length);
break;
case SBS_GAUGE_CMD_DEVICE_NAME:
dev_name->device_name_length = sizeof(dev);
memcpy(dev_name->device_name, dev, dev_name->device_name_length);
break;
case SBS_GAUGE_CMD_DEVICE_CHEMISTRY:
dev_chem->device_chemistry_length = MIN(sizeof(chem),
sizeof(dev_chem->device_chemistry));
memcpy(dev_chem->device_chemistry, chem, dev_chem->device_chemistry_length);
break;
default:
LOG_ERR("Unknown register 0x%x read", reg);
return -EIO;
}
return 0;
}
static int sbs_gauge_emul_transfer_i2c(const struct emul *target, struct i2c_msg *msgs,
int num_msgs, int addr)
{
/* Largely copied from emul_bmi160.c */
struct sbs_gauge_emul_data *data;
unsigned int val;
int reg;
int rc;
data = target->data;
__ASSERT_NO_MSG(msgs && num_msgs);
i2c_dump_msgs_rw(target->dev, msgs, num_msgs, addr, false);
switch (num_msgs) {
case 2:
if (msgs->flags & I2C_MSG_READ) {
LOG_ERR("Unexpected read");
return -EIO;
}
if (msgs->len != 1) {
LOG_ERR("Unexpected msg0 length %d", msgs->len);
return -EIO;
}
reg = msgs->buf[0];
/* Now process the 'read' part of the message */
msgs++;
if (msgs->flags & I2C_MSG_READ) {
switch (msgs->len) {
case 2:
rc = emul_sbs_gauge_reg_read(target, reg, &val);
if (rc) {
/* Return before writing bad value to message buffer */
return rc;
}
/* SBS uses SMBus, which sends data in little-endian format. */
sys_put_le16(val, msgs->buf);
break;
/* buffer properties */
case (sizeof(struct sbs_gauge_manufacturer_name)):
case (sizeof(struct sbs_gauge_device_chemistry)):
rc = emul_sbs_gauge_buffer_read(target, reg, (char *)msgs->buf);
break;
default:
LOG_ERR("Unexpected msg1 length %d", msgs->len);
return -EIO;
}
} else {
/* We write a word (2 bytes by the SBS spec) */
if (msgs->len != 2) {
LOG_ERR("Unexpected msg1 length %d", msgs->len);
}
uint16_t value = sys_get_le16(msgs->buf);
rc = emul_sbs_gauge_reg_write(target, reg, value);
}
break;
default:
LOG_ERR("Invalid number of messages: %d", num_msgs);
return -EIO;
}
return rc;
}
static int emul_sbs_fuel_gauge_set_battery_charging(const struct emul *target, uint32_t uV, int uA)
{
struct sbs_gauge_emul_data *data = target->data;
if (uV == 0 || uA == 0)
return -EINVAL;
data->batt_state.uA = uA;
data->batt_state.uV = uV;
return 0;
}
static int emul_sbs_fuel_gauge_is_battery_cutoff(const struct emul *target, bool *cutoff)
{
struct sbs_gauge_emul_data *data = target->data;
__ASSERT_NO_MSG(cutoff != NULL);
*cutoff = data->is_cutoff;
return 0;
}
static const struct fuel_gauge_emul_driver_api sbs_gauge_backend_api = {
.set_battery_charging = emul_sbs_fuel_gauge_set_battery_charging,
.is_battery_cutoff = emul_sbs_fuel_gauge_is_battery_cutoff,
};
static const struct i2c_emul_api sbs_gauge_emul_api_i2c = {
.transfer = sbs_gauge_emul_transfer_i2c,
};
static void sbs_gauge_emul_reset(const struct emul *target)
{
struct sbs_gauge_emul_data *data = target->data;
memset(data, 0, sizeof(*data));
}
#ifdef CONFIG_ZTEST
#include <zephyr/ztest.h>
/* Add test reset handlers in when using emulators with tests */
#define SBS_GAUGE_EMUL_RESET_RULE_BEFORE(inst) \
sbs_gauge_emul_reset(EMUL_DT_GET(DT_DRV_INST(inst)));
static void emul_sbs_gauge_reset_rule_after(const struct ztest_unit_test *test, void *data)
{
ARG_UNUSED(test);
ARG_UNUSED(data);
DT_INST_FOREACH_STATUS_OKAY(SBS_GAUGE_EMUL_RESET_RULE_BEFORE)
}
ZTEST_RULE(emul_sbs_gauge_reset, NULL, emul_sbs_gauge_reset_rule_after);
#endif /* CONFIG_ZTEST */
/**
* Set up a new SBS_GAUGE emulator (I2C)
*
* @param emul Emulation information
* @param parent Device to emulate (must use sbs_gauge driver)
* @return 0 indicating success (always)
*/
static int emul_sbs_sbs_gauge_init(const struct emul *target, const struct device *parent)
{
ARG_UNUSED(parent);
sbs_gauge_emul_reset(target);
return 0;
}
/*
* Main instantiation macro. SBS Gauge Emulator only implemented for I2C
*/
#define SBS_GAUGE_EMUL(n) \
static struct sbs_gauge_emul_data sbs_gauge_emul_data_##n; \
static const struct sbs_gauge_emul_cfg sbs_gauge_emul_cfg_##n = { \
.addr = DT_INST_REG_ADDR(n), \
.cutoff_support = DT_PROP_OR(DT_DRV_INST(n), battery_cutoff_support, false), \
.cutoff_reg_addr = DT_PROP_OR(DT_DRV_INST(n), battery_cutoff_reg_addr, 0), \
.cutoff_payload = DT_PROP_OR(DT_DRV_INST(n), battery_cutoff_payload, {}), \
}; \
EMUL_DT_INST_DEFINE(n, emul_sbs_sbs_gauge_init, &sbs_gauge_emul_data_##n, \
&sbs_gauge_emul_cfg_##n, &sbs_gauge_emul_api_i2c, \
&sbs_gauge_backend_api)
DT_INST_FOREACH_STATUS_OKAY(SBS_GAUGE_EMUL)
``` | /content/code_sandbox/drivers/fuel_gauge/sbs_gauge/emul_sbs_gauge.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,069 |
```c
/*
*
*/
#include <zephyr/arch/arm64/arm-smccc.h>
#include <zephyr/drivers/tee.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/dlist.h>
#include "optee_msg.h"
#include "optee_rpc_cmd.h"
#include "optee_smc.h"
LOG_MODULE_REGISTER(optee);
#define DT_DRV_COMPAT linaro_optee_tz
/* amount of physical addresses that can be stored in one page */
#define OPTEE_NUMBER_OF_ADDR_PER_PAGE (OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(uint64_t))
/*
* TEE Implementation ID
*/
#define TEE_IMPL_ID_OPTEE 1
/*
* OP-TEE specific capabilities
*/
#define TEE_OPTEE_CAP_TZ BIT(0)
struct optee_rpc_param {
uint32_t a0;
uint32_t a1;
uint32_t a2;
uint32_t a3;
uint32_t a4;
uint32_t a5;
uint32_t a6;
uint32_t a7;
};
typedef void (*smc_call_t)(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res);
struct optee_driver_config {
const char *method;
};
struct optee_notify {
sys_dnode_t node;
uint32_t key;
struct k_sem wait;
};
struct optee_supp_req {
sys_dnode_t link;
bool in_queue;
uint32_t func;
uint32_t ret;
size_t num_params;
struct tee_param *param;
struct k_sem complete;
};
struct optee_supp {
/* Serializes access to this struct */
struct k_mutex mutex;
int req_id;
sys_dlist_t reqs;
struct optee_supp_req *current;
struct k_sem reqs_c;
};
struct optee_driver_data {
smc_call_t smc_call;
sys_bitarray_t *notif_bitmap;
sys_dlist_t notif;
struct k_spinlock notif_lock;
struct optee_supp supp;
unsigned long sec_caps;
struct k_sem call_sem;
};
/* Wrapping functions so function pointer can be used */
static void optee_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res)
{
arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
static void optee_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res)
{
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
static int param_to_msg_param(const struct tee_param *param, unsigned int num_param,
struct optee_msg_param *msg_param)
{
int i;
const struct tee_param *tp = param;
struct optee_msg_param *mtp = msg_param;
if (!param || !msg_param) {
return -EINVAL;
}
for (i = 0; i < num_param; i++, tp++, mtp++) {
if (!tp || !mtp) {
LOG_ERR("Wrong param on %d iteration", i);
return -EINVAL;
}
switch (tp->attr) {
case TEE_PARAM_ATTR_TYPE_NONE:
mtp->attr = OPTEE_MSG_ATTR_TYPE_NONE;
memset(&mtp->u, 0, sizeof(mtp->u));
break;
case TEE_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_PARAM_ATTR_TYPE_VALUE_INOUT:
mtp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + tp->attr -
TEE_PARAM_ATTR_TYPE_VALUE_INPUT;
mtp->u.value.a = tp->a;
mtp->u.value.b = tp->b;
mtp->u.value.c = tp->c;
break;
case TEE_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_ATTR_TYPE_MEMREF_INOUT:
mtp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + tp->attr -
TEE_PARAM_ATTR_TYPE_MEMREF_INPUT;
mtp->u.rmem.shm_ref = tp->c;
mtp->u.rmem.size = tp->b;
mtp->u.rmem.offs = tp->a;
break;
default:
return -EINVAL;
}
}
return 0;
}
static void msg_param_to_tmp_mem(struct tee_param *p, uint32_t attr,
const struct optee_msg_param *mp)
{
struct tee_shm *shm = (struct tee_shm *)mp->u.tmem.shm_ref;
p->attr = TEE_PARAM_ATTR_TYPE_MEMREF_INPUT + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
p->b = mp->u.tmem.size;
if (!shm) {
p->a = 0;
p->c = 0;
return;
}
p->a = mp->u.tmem.buf_ptr - k_mem_phys_addr(shm->addr);
p->c = mp->u.tmem.shm_ref;
}
static int msg_param_to_param(struct tee_param *param, unsigned int num_param,
const struct optee_msg_param *msg_param)
{
int i;
struct tee_param *tp = param;
const struct optee_msg_param *mtp = msg_param;
if (!param || !msg_param) {
return -EINVAL;
}
for (i = 0; i < num_param; i++, tp++, mtp++) {
uint32_t attr = mtp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
if (!tp || !mtp) {
LOG_ERR("Wrong param on %d iteration", i);
return -EINVAL;
}
switch (attr) {
case OPTEE_MSG_ATTR_TYPE_NONE:
memset(tp, 0, sizeof(*tp));
tp->attr = TEE_PARAM_ATTR_TYPE_NONE;
break;
case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
tp->attr = TEE_PARAM_ATTR_TYPE_VALUE_INPUT + attr -
OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
tp->a = mtp->u.value.a;
tp->b = mtp->u.value.b;
tp->c = mtp->u.value.c;
break;
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
tp->attr = TEE_PARAM_ATTR_TYPE_MEMREF_INPUT + attr -
OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
tp->b = mtp->u.rmem.size;
if (!mtp->u.rmem.shm_ref) {
tp->a = 0;
tp->c = 0;
} else {
tp->a = mtp->u.rmem.offs;
tp->c = mtp->u.rmem.shm_ref;
}
break;
case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
msg_param_to_tmp_mem(tp, attr, mtp);
break;
default:
return -EINVAL;
}
}
return 0;
}
static uint64_t regs_to_u64(uint32_t reg0, uint32_t reg1)
{
return (uint64_t)(((uint64_t)reg0 << 32) | reg1);
}
static void u64_to_regs(uint64_t val, uint32_t *reg0, uint32_t *reg1)
{
*reg0 = val >> 32;
*reg1 = val;
}
static inline bool check_param_input(struct optee_msg_arg *arg)
{
return arg->num_params == 1 &&
arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
}
static void *optee_construct_page_list(void *buf, uint32_t len, uint64_t *phys_buf);
static uint32_t optee_call_supp(const struct device *dev, uint32_t func, size_t num_params,
struct tee_param *param)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct optee_supp *supp = &data->supp;
struct optee_supp_req *req;
uint32_t ret;
req = k_malloc(sizeof(*req));
if (!req) {
return TEEC_ERROR_OUT_OF_MEMORY;
}
k_sem_init(&req->complete, 0, 1);
req->func = func;
req->num_params = num_params;
req->param = param;
/* Insert the request in the request list */
k_mutex_lock(&supp->mutex, K_FOREVER);
sys_dlist_append(&supp->reqs, &req->link);
k_mutex_unlock(&supp->mutex);
/* Tell an event listener there's a new request */
k_sem_give(&supp->reqs_c);
/*
* Wait for supplicant to process and return result, once we've
* returned from k_sem_take(&req->c) successfully we have
* exclusive access again.
*/
k_sem_take(&req->complete, K_FOREVER);
ret = req->ret;
k_free(req);
return ret;
}
static int cmd_alloc_suppl(const struct device *dev, size_t sz, struct tee_shm **shm)
{
uint32_t ret;
struct tee_param param;
param.attr = TEE_PARAM_ATTR_TYPE_VALUE_INOUT;
param.a = OPTEE_RPC_SHM_TYPE_APPL;
param.b = sz;
param.c = 0;
ret = optee_call_supp(dev, OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m);
if (ret) {
return ret;
}
ret = tee_add_shm(dev, (void *)param.c, 0, param.b, 0, shm);
return ret;
}
static void cmd_free_suppl(const struct device *dev, struct tee_shm *shm)
{
struct tee_param param;
param.attr = TEE_PARAM_ATTR_TYPE_VALUE_INOUT;
param.a = OPTEE_RPC_SHM_TYPE_APPL;
param.b = (uint64_t)shm;
param.c = 0;
optee_call_supp(dev, OPTEE_RPC_CMD_SHM_FREE, 1, ¶m);
tee_rm_shm(dev, shm);
}
static void handle_cmd_alloc(const struct device *dev, struct optee_msg_arg *arg,
void **pages)
{
int rc;
struct tee_shm *shm = NULL;
void *pl;
uint64_t pl_phys_and_offset;
arg->ret_origin = TEEC_ORIGIN_COMMS;
if (!check_param_input(arg)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
switch (arg->params[0].u.value.a) {
case OPTEE_RPC_SHM_TYPE_KERNEL:
/* TODO handle situation when shm was allocated statically so buffer can be reused*/
rc = tee_add_shm(dev, NULL, 0, arg->params[0].u.value.b, TEE_SHM_ALLOC, &shm);
break;
case OPTEE_RPC_SHM_TYPE_APPL:
rc = cmd_alloc_suppl(dev, arg->params[0].u.value.b, &shm);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
if (rc) {
if (rc == -ENOMEM) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
} else {
arg->ret = TEEC_ERROR_GENERIC;
}
return;
}
pl = optee_construct_page_list(shm->addr, shm->size, &pl_phys_and_offset);
if (!pl) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto out;
}
*pages = pl;
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | OPTEE_MSG_ATTR_NONCONTIG;
arg->params[0].u.tmem.buf_ptr = pl_phys_and_offset;
arg->params[0].u.tmem.size = shm->size;
arg->params[0].u.tmem.shm_ref = (uint64_t)shm;
arg->ret = TEEC_SUCCESS;
return;
out:
tee_shm_free(dev, shm);
}
static void handle_cmd_free(const struct device *dev, struct optee_msg_arg *arg)
{
int rc = 0;
if (!check_param_input(arg)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
switch (arg->params[0].u.value.a) {
case OPTEE_RPC_SHM_TYPE_KERNEL:
rc = tee_rm_shm(dev, (struct tee_shm *)arg->params[0].u.value.b);
break;
case OPTEE_RPC_SHM_TYPE_APPL:
cmd_free_suppl(dev, (struct tee_shm *)arg->params[0].u.value.b);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
if (rc) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
return;
}
arg->ret = TEEC_SUCCESS;
}
static void handle_cmd_get_time(const struct device *dev, struct optee_msg_arg *arg)
{
int64_t ticks;
int64_t up_secs;
int64_t up_nsecs;
if (arg->num_params != 1 ||
(arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK)
!= OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
ticks = k_uptime_ticks();
up_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
up_nsecs = k_ticks_to_ns_floor64(ticks - up_secs * CONFIG_SYS_CLOCK_TICKS_PER_SEC);
arg->params[0].u.value.a = up_secs;
arg->params[0].u.value.b = up_nsecs;
arg->ret = TEEC_SUCCESS;
}
/* This should be called under notif_lock */
static inline bool key_is_pending(struct optee_driver_data *data, uint32_t key)
{
struct optee_notify *iter;
SYS_DLIST_FOR_EACH_CONTAINER(&data->notif, iter, node) {
if (iter->key == key) {
k_sem_give(&iter->wait);
return true;
}
}
return false;
}
static int optee_notif_send(const struct device *dev, uint32_t key)
{
struct optee_driver_data *data = dev->data;
k_spinlock_key_t sp_key;
if (key > CONFIG_OPTEE_MAX_NOTIF) {
return -EINVAL;
}
sp_key = k_spin_lock(&data->notif_lock);
if (!key_is_pending(data, key)) {
/* If nobody is waiting for key - set bit in the bitmap */
sys_bitarray_set_bit(data->notif_bitmap, key);
}
k_spin_unlock(&data->notif_lock, sp_key);
return 0;
}
static int optee_notif_wait(const struct device *dev, uint32_t key)
{
int rc = 0;
struct optee_driver_data *data = dev->data;
struct optee_notify *entry;
k_spinlock_key_t sp_key;
int prev_val;
if (key > CONFIG_OPTEE_MAX_NOTIF)
return -EINVAL;
entry = k_malloc(sizeof(*entry));
if (!entry) {
return -ENOMEM;
}
k_sem_init(&entry->wait, 0, 1);
entry->key = key;
sp_key = k_spin_lock(&data->notif_lock);
/*
* If notif bit was set then SEND command was already received.
* Skipping wait.
*/
rc = sys_bitarray_test_and_clear_bit(data->notif_bitmap, key, &prev_val);
if (rc || prev_val) {
goto out;
}
/*
* If key is already registred, then skip.
*/
if (key_is_pending(data, key)) {
rc = -EBUSY;
goto out;
}
sys_dlist_append(&data->notif, &entry->node);
k_spin_unlock(&data->notif_lock, sp_key);
k_sem_take(&entry->wait, K_FOREVER);
sp_key = k_spin_lock(&data->notif_lock);
sys_dlist_remove(&entry->node);
out:
k_spin_unlock(&data->notif_lock, sp_key);
k_free(entry);
return rc;
}
static void handle_cmd_notify(const struct device *dev, struct optee_msg_arg *arg)
{
if (!check_param_input(arg)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
switch (arg->params[0].u.value.a) {
case OPTEE_RPC_NOTIFICATION_SEND:
if (optee_notif_send(dev, arg->params[0].u.value.b)) {
goto err;
}
break;
case OPTEE_RPC_NOTIFICATION_WAIT:
if (optee_notif_wait(dev, arg->params[0].u.value.b)) {
goto err;
}
break;
default:
goto err;
}
arg->ret = TEEC_SUCCESS;
return;
err:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
}
static void handle_cmd_wait(const struct device *dev, struct optee_msg_arg *arg)
{
if (!check_param_input(arg)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
return;
}
k_sleep(K_MSEC(arg->params[0].u.value.a));
arg->ret = TEEC_SUCCESS;
}
static void free_shm_pages(void **pages)
{
/*
* Clean allocated pages if needed. Some function calls requires pages
* allocation which should be freed after processing new request.
* It is safe to free this list when another SHM op (e,g. another alloc
* or free) was received.
*/
if (*pages) {
k_free(*pages);
*pages = NULL;
}
}
static void handle_rpc_supp_cmd(const struct device *dev, struct optee_msg_arg *arg)
{
struct tee_param *params;
int ret;
arg->ret_origin = TEEC_ORIGIN_COMMS;
params = k_malloc(sizeof(*params) * arg->num_params);
if (!params) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
return;
}
ret = msg_param_to_param(params, arg->num_params, arg->params);
if (ret) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
arg->ret_origin = TEEC_ORIGIN_COMMS;
goto out;
}
arg->ret = optee_call_supp(dev, arg->cmd, arg->num_params, params);
ret = param_to_msg_param(params, arg->num_params, arg->params);
if (ret) {
arg->ret = TEEC_ERROR_GENERIC;
arg->ret_origin = TEEC_ORIGIN_COMMS;
}
out:
k_free(params);
}
static uint32_t handle_func_rpc_call(const struct device *dev, struct tee_shm *shm,
void **pages)
{
struct optee_msg_arg *arg = shm->addr;
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
free_shm_pages(pages);
handle_cmd_alloc(dev, arg, pages);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_cmd_free(dev, arg);
break;
case OPTEE_RPC_CMD_GET_TIME:
handle_cmd_get_time(dev, arg);
break;
case OPTEE_RPC_CMD_NOTIFICATION:
handle_cmd_notify(dev, arg);
break;
case OPTEE_RPC_CMD_SUSPEND:
handle_cmd_wait(dev, arg);
break;
case OPTEE_RPC_CMD_I2C_TRANSFER:
/* TODO: i2c transfer case is not implemented right now */
return TEEC_ERROR_NOT_IMPLEMENTED;
default:
handle_rpc_supp_cmd(dev, arg);
break;
}
return OPTEE_SMC_CALL_RETURN_FROM_RPC;
}
static void handle_rpc_call(const struct device *dev, struct optee_rpc_param *param,
void **pages)
{
struct tee_shm *shm = NULL;
uint32_t res = OPTEE_SMC_CALL_RETURN_FROM_RPC;
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
if (!tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE,
param->a1,
TEE_SHM_ALLOC, &shm)) {
u64_to_regs((uint64_t)k_mem_phys_addr(shm->addr), ¶m->a1, ¶m->a2);
u64_to_regs((uint64_t)shm, ¶m->a4, ¶m->a5);
} else {
param->a1 = 0;
param->a2 = 0;
param->a4 = 0;
param->a5 = 0;
}
break;
case OPTEE_SMC_RPC_FUNC_FREE:
shm = (struct tee_shm *)regs_to_u64(param->a1, param->a2);
tee_rm_shm(dev, shm);
break;
case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
/* Foreign interrupt was raised */
break;
case OPTEE_SMC_RPC_FUNC_CMD:
shm = (struct tee_shm *)regs_to_u64(param->a1, param->a2);
res = handle_func_rpc_call(dev, shm, pages);
break;
default:
break;
}
param->a0 = res;
}
static int optee_call(const struct device *dev, struct optee_msg_arg *arg)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct optee_rpc_param param = {
.a0 = OPTEE_SMC_CALL_WITH_ARG
};
void *pages = NULL;
u64_to_regs((uint64_t)k_mem_phys_addr(arg), ¶m.a1, ¶m.a2);
k_sem_take(&data->call_sem, K_FOREVER);
while (true) {
struct arm_smccc_res res;
data->smc_call(param.a0, param.a1, param.a2, param.a3,
param.a4, param.a5, param.a6, param.a7, &res);
if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
param.a3 = res.a3;
handle_rpc_call(dev, ¶m, &pages);
} else {
free_shm_pages(&pages);
k_sem_give(&data->call_sem);
return res.a0 == OPTEE_SMC_RETURN_OK ? TEEC_SUCCESS :
TEEC_ERROR_BAD_PARAMETERS;
}
}
}
static int optee_get_version(const struct device *dev, struct tee_version_info *info)
{
if (!info) {
return -EINVAL;
}
/*
* TODO Version and capabilities should be requested from
* OP-TEE OS.
*/
info->impl_id = TEE_IMPL_ID_OPTEE;
info->impl_caps = TEE_OPTEE_CAP_TZ;
info->gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM;
return 0;
}
static int optee_close_session(const struct device *dev, uint32_t session_id)
{
int rc;
struct tee_shm *shm;
struct optee_msg_arg *marg;
rc = tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE,
OPTEE_MSG_GET_ARG_SIZE(0),
TEE_SHM_ALLOC, &shm);
if (rc) {
LOG_ERR("Unable to get shared memory, rc = %d", rc);
return rc;
}
marg = shm->addr;
marg->num_params = 0;
marg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
marg->session = session_id;
rc = optee_call(dev, marg);
if (tee_rm_shm(dev, shm)) {
LOG_ERR("Unable to free shared memory");
}
return rc;
}
static int optee_open_session(const struct device *dev, struct tee_open_session_arg *arg,
unsigned int num_param, struct tee_param *param,
uint32_t *session_id)
{
int rc, ret;
struct tee_shm *shm;
struct optee_msg_arg *marg;
if (!arg || !session_id) {
return -EINVAL;
}
rc = tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE,
OPTEE_MSG_GET_ARG_SIZE(num_param + 2),
TEE_SHM_ALLOC, &shm);
if (rc) {
LOG_ERR("Unable to get shared memory, rc = %d", rc);
return rc;
}
marg = shm->addr;
memset(marg, 0, OPTEE_MSG_GET_ARG_SIZE(num_param + 2));
marg->num_params = num_param + 2;
marg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
marg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META;
marg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META;
memcpy(&marg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
memcpy(&marg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
marg->params[1].u.value.c = arg->clnt_login;
rc = param_to_msg_param(param, num_param, marg->params + 2);
if (rc) {
goto out;
}
arg->ret = optee_call(dev, marg);
if (arg->ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
goto out;
}
rc = msg_param_to_param(param, num_param, marg->params);
if (rc) {
arg->ret = TEEC_ERROR_COMMUNICATION;
arg->ret_origin = TEEC_ORIGIN_COMMS;
/*
* Ret is needed here only to print an error. Param conversion error
* should be returned from the function.
*/
ret = optee_close_session(dev, marg->session);
if (ret) {
LOG_ERR("Unable to close session: %d", ret);
}
goto out;
}
*session_id = marg->session;
arg->ret = marg->ret;
arg->ret_origin = marg->ret_origin;
out:
ret = tee_rm_shm(dev, shm);
if (ret) {
LOG_ERR("Unable to free shared memory");
}
return (rc) ? rc : ret;
}
static int optee_cancel(const struct device *dev, uint32_t session_id, uint32_t cancel_id)
{
int rc;
struct tee_shm *shm;
struct optee_msg_arg *marg;
rc = tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE,
OPTEE_MSG_GET_ARG_SIZE(0),
TEE_SHM_ALLOC, &shm);
if (rc) {
LOG_ERR("Unable to get shared memory, rc = %d", rc);
return rc;
}
marg = shm->addr;
marg->num_params = 0;
marg->cmd = OPTEE_MSG_CMD_CANCEL;
marg->cancel_id = cancel_id;
marg->session = session_id;
rc = optee_call(dev, marg);
if (tee_rm_shm(dev, shm)) {
LOG_ERR("Unable to free shared memory");
}
return rc;
}
static int optee_invoke_func(const struct device *dev, struct tee_invoke_func_arg *arg,
unsigned int num_param, struct tee_param *param)
{
int rc, ret;
struct tee_shm *shm;
struct optee_msg_arg *marg;
if (!arg) {
return -EINVAL;
}
rc = tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE,
OPTEE_MSG_GET_ARG_SIZE(num_param),
TEE_SHM_ALLOC, &shm);
if (rc) {
LOG_ERR("Unable to get shared memory, rc = %d", rc);
return rc;
}
marg = shm->addr;
memset(marg, 0, OPTEE_MSG_GET_ARG_SIZE(num_param));
marg->num_params = num_param;
marg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
marg->func = arg->func;
marg->session = arg->session;
rc = param_to_msg_param(param, num_param, marg->params);
if (rc) {
goto out;
}
arg->ret = optee_call(dev, marg);
if (arg->ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
goto out;
}
rc = msg_param_to_param(param, num_param, marg->params);
if (rc) {
arg->ret = TEEC_ERROR_COMMUNICATION;
arg->ret_origin = TEEC_ORIGIN_COMMS;
goto out;
}
arg->ret = marg->ret;
arg->ret_origin = marg->ret_origin;
out:
ret = tee_rm_shm(dev, shm);
if (ret) {
LOG_ERR("Unable to free shared memory");
}
return (rc) ? rc : ret;
}
static void *optee_construct_page_list(void *buf, uint32_t len, uint64_t *phys_buf)
{
const size_t page_size = OPTEE_MSG_NONCONTIG_PAGE_SIZE;
const size_t num_pages_in_pl = OPTEE_NUMBER_OF_ADDR_PER_PAGE - 1;
uint32_t page_offset = (uintptr_t)buf & (page_size - 1);
uint8_t *buf_page;
uint32_t num_pages;
uint32_t list_size;
/* see description of OPTEE_MSG_ATTR_NONCONTIG */
struct {
uint64_t pages[OPTEE_NUMBER_OF_ADDR_PER_PAGE - 1];
uint64_t next_page;
} *pl;
BUILD_ASSERT(sizeof(*pl) == OPTEE_MSG_NONCONTIG_PAGE_SIZE);
num_pages = ROUND_UP(page_offset + len, page_size) / page_size;
list_size = DIV_ROUND_UP(num_pages, num_pages_in_pl) * page_size;
pl = k_aligned_alloc(page_size, list_size);
if (!pl) {
return NULL;
}
memset(pl, 0, list_size);
buf_page = (uint8_t *)ROUND_DOWN((uintptr_t)buf, page_size);
for (uint32_t pl_idx = 0; pl_idx < list_size / page_size; pl_idx++) {
for (uint32_t page_idx = 0; num_pages && page_idx < num_pages_in_pl; page_idx++) {
pl[pl_idx].pages[page_idx] = k_mem_phys_addr(buf_page);
buf_page += page_size;
num_pages--;
}
if (!num_pages) {
break;
}
pl[pl_idx].next_page = k_mem_phys_addr(pl + 1);
}
/* 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold page offset
* of user buffer
*/
*phys_buf = k_mem_phys_addr(pl) | page_offset;
return pl;
}
static int optee_shm_register(const struct device *dev, struct tee_shm *shm)
{
struct tee_shm *shm_arg;
struct optee_msg_arg *msg_arg;
void *pl;
uint64_t pl_phys_and_offset;
int rc;
rc = tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE, OPTEE_MSG_GET_ARG_SIZE(1),
TEE_SHM_ALLOC, &shm_arg);
if (rc) {
return rc;
}
msg_arg = shm_arg->addr;
memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
pl = optee_construct_page_list(shm->addr, shm->size, &pl_phys_and_offset);
if (!pl) {
rc = -ENOMEM;
goto out;
}
/* for this command op-tee os should support CFG_CORE_DYN_SHM */
msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
/* op-tee OS ingnore this cmd in case when TYPE_TMEM_OUTPUT and NONCONTIG aren't set */
msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | OPTEE_MSG_ATTR_NONCONTIG;
msg_arg->num_params = 1;
msg_arg->params->u.tmem.buf_ptr = pl_phys_and_offset;
msg_arg->params->u.tmem.shm_ref = (uint64_t)shm;
msg_arg->params->u.tmem.size = shm->size;
if (optee_call(dev, msg_arg)) {
rc = -EINVAL;
}
k_free(pl);
out:
tee_rm_shm(dev, shm_arg);
return rc;
}
static int optee_shm_unregister(const struct device *dev, struct tee_shm *shm)
{
struct tee_shm *shm_arg;
struct optee_msg_arg *msg_arg;
int rc;
rc = tee_add_shm(dev, NULL, OPTEE_MSG_NONCONTIG_PAGE_SIZE, OPTEE_MSG_GET_ARG_SIZE(1),
TEE_SHM_ALLOC, &shm_arg);
if (rc) {
return rc;
}
msg_arg = shm_arg->addr;
memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
msg_arg->num_params = 1;
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
msg_arg->params[0].u.rmem.shm_ref = (uint64_t)shm;
if (optee_call(dev, msg_arg)) {
rc = -EINVAL;
}
tee_rm_shm(dev, shm_arg);
return rc;
}
static int optee_suppl_recv(const struct device *dev, uint32_t *func, unsigned int *num_params,
struct tee_param *param)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct optee_supp *supp = &data->supp;
struct optee_supp_req *req = NULL;
while (true) {
k_mutex_lock(&supp->mutex, K_FOREVER);
req = (struct optee_supp_req *)sys_dlist_peek_head(&supp->reqs);
if (req) {
if (supp->current) {
LOG_ERR("Concurrent supp_recv calls are not supported");
k_mutex_unlock(&supp->mutex);
return -EBUSY;
}
if (*num_params < req->num_params) {
LOG_ERR("Not enough space for params, need at least %lu",
req->num_params);
k_mutex_unlock(&supp->mutex);
return -EINVAL;
}
supp->current = req;
sys_dlist_remove(&req->link);
}
k_mutex_unlock(&supp->mutex);
if (req) {
break;
}
k_sem_take(&supp->reqs_c, K_FOREVER);
}
*func = req->func;
*num_params = req->num_params;
memcpy(param, req->param, sizeof(struct tee_param) * req->num_params);
return 0;
}
static int optee_suppl_send(const struct device *dev, unsigned int ret, unsigned int num_params,
struct tee_param *param)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct optee_supp *supp = &data->supp;
struct optee_supp_req *req = NULL;
size_t n;
k_mutex_lock(&supp->mutex, K_FOREVER);
if (supp->current && num_params >= supp->current->num_params) {
req = supp->current;
supp->current = NULL;
} else {
LOG_ERR("Invalid number of parameters, expected %lu got %u", req->num_params,
num_params);
}
k_mutex_unlock(&supp->mutex);
if (!req) {
return -EINVAL;
}
/* Update out and in/out parameters */
for (n = 0; n < req->num_params; n++) {
struct tee_param *p = req->param + n;
switch (p->attr & TEE_PARAM_ATTR_TYPE_MASK) {
case TEE_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_PARAM_ATTR_TYPE_VALUE_INOUT:
p->a = param[n].a;
p->b = param[n].b;
p->c = param[n].c;
break;
case TEE_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_ATTR_TYPE_MEMREF_INOUT:
LOG_WRN("Memref params are not fully tested");
p->a = param[n].a;
p->b = param[n].b;
p->c = param[n].c;
break;
default:
break;
}
}
req->ret = ret;
/* Let the requesting thread continue */
k_mutex_lock(&supp->mutex, K_FOREVER);
supp->current = NULL;
k_mutex_unlock(&supp->mutex);
k_sem_give(&req->complete);
return 0;
}
static int set_optee_method(const struct device *dev)
{
const struct optee_driver_config *conf = dev->config;
struct optee_driver_data *data = dev->data;
if (!strcmp("hvc", conf->method)) {
data->smc_call = optee_smccc_hvc;
} else if (!strcmp("smc", conf->method)) {
data->smc_call = optee_smccc_smc;
} else {
LOG_ERR("Invalid smc_call method");
return -EINVAL;
}
return 0;
}
static bool optee_check_uid(const struct device *dev)
{
struct arm_smccc_res res;
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
data->smc_call(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) {
return true;
}
return false;
}
static void optee_get_revision(const struct device *dev)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct arm_smccc_res res = { 0 };
data->smc_call(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a2) {
LOG_INF("OPTEE revision %lu.%lu (%08lx)", res.a0,
res.a1, res.a2);
} else {
LOG_INF("OPTEE revision %lu.%lu", res.a0, res.a1);
}
}
static bool optee_exchange_caps(const struct device *dev, unsigned long *sec_caps)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct arm_smccc_res res = { 0 };
unsigned long a1 = 0;
if (!IS_ENABLED(CONFIG_SMP) || arch_num_cpus() == 1) {
a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
}
data->smc_call(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 != OPTEE_SMC_RETURN_OK) {
return false;
}
*sec_caps = res.a1;
return true;
}
static unsigned long optee_get_thread_count(const struct device *dev, unsigned long *thread_count)
{
struct optee_driver_data *data = (struct optee_driver_data *)dev->data;
struct arm_smccc_res res = { 0 };
unsigned long a1 = 0;
data->smc_call(OPTEE_SMC_GET_THREAD_COUNT, a1, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 != OPTEE_SMC_RETURN_OK) {
return false;
}
*thread_count = res.a1;
return true;
}
static int optee_init(const struct device *dev)
{
struct optee_driver_data *data = dev->data;
unsigned long thread_count;
if (set_optee_method(dev)) {
return -ENOTSUP;
}
sys_dlist_init(&data->notif);
k_mutex_init(&data->supp.mutex);
k_sem_init(&data->supp.reqs_c, 0, 1);
sys_dlist_init(&data->supp.reqs);
if (!optee_check_uid(dev)) {
LOG_ERR("OPTEE API UID mismatch");
return -EINVAL;
}
optee_get_revision(dev);
if (!optee_exchange_caps(dev, &data->sec_caps)) {
LOG_ERR("OPTEE capabilities exchange failed\n");
return -EINVAL;
}
if (!(data->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)) {
LOG_ERR("OPTEE does not support dynamic shared memory");
return -ENOTSUP;
}
if (!optee_get_thread_count(dev, &thread_count)) {
LOG_ERR("OPTEE unable to get maximum thread count");
return -ENOTSUP;
}
k_sem_init(&data->call_sem, thread_count, thread_count);
return 0;
}
static const struct tee_driver_api optee_driver_api = {
.get_version = optee_get_version,
.open_session = optee_open_session,
.close_session = optee_close_session,
.cancel = optee_cancel,
.invoke_func = optee_invoke_func,
.shm_register = optee_shm_register,
.shm_unregister = optee_shm_unregister,
.suppl_recv = optee_suppl_recv,
.suppl_send = optee_suppl_send,
};
/*
* Bitmap of the ongoing notificatons, received from OP-TEE. Maximum number is
* CONFIG_OPTEE_MAX_NOTIF. This bitmap is needed to handle case when SEND command
* was received before WAIT command from OP-TEE. In this case WAIT will not create
* locks.
*/
#define OPTEE_DT_DEVICE_INIT(inst) \
SYS_BITARRAY_DEFINE_STATIC(notif_bitmap_##inst, CONFIG_OPTEE_MAX_NOTIF); \
\
static struct optee_driver_config optee_config_##inst = { \
.method = DT_INST_PROP(inst, method) \
}; \
\
static struct optee_driver_data optee_data_##inst = { \
.notif_bitmap = ¬if_bitmap_##inst \
}; \
\
DEVICE_DT_INST_DEFINE(inst, optee_init, NULL, &optee_data_##inst, \
&optee_config_##inst, POST_KERNEL, \
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
&optee_driver_api); \
DT_INST_FOREACH_STATUS_OKAY(OPTEE_DT_DEVICE_INIT)
``` | /content/code_sandbox/drivers/tee/optee/optee.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 9,394 |
```unknown
# Keep this Kconfig separate from the fuel_gauge/sbs_gauge Kconfig so it may be shared with the
# sensor/sbs_gauge Kconfig.
config EMUL_SBS_GAUGE
bool "Emulate an SBS 1.1 compliant smart battery fuel gauge"
default y
depends on EMUL
depends on SBS_GAUGE_NEW_API || SBS_GAUGE
help
It provides readings which follow a simple sequence, thus allowing
test code to check that things are working as expected.
``` | /content/code_sandbox/drivers/fuel_gauge/sbs_gauge/Kconfig.emul_sbs_gauge | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 115 |
```unknown
config SBS_GAUGE_NEW_API
bool "Smart Battery Fuel Gauge"
default y
depends on DT_HAS_SBS_SBS_GAUGE_NEW_API_ENABLED
select I2C
help
Enable I2C-based/SMBus-based driver for a Smart Battery Fuel Gauge.
rsource "Kconfig.emul_sbs_gauge"
``` | /content/code_sandbox/drivers/fuel_gauge/sbs_gauge/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 74 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_SENSOR_SBS_GAUGE_H_
#define ZEPHYR_DRIVERS_SENSOR_SBS_GAUGE_H_
#include <stdint.h>
#include <zephyr/drivers/i2c.h>
/** Standard Commands */
#define SBS_GAUGE_CMD_MANUFACTURER_ACCESS 0x00 /* ManufacturerAccess */
#define SBS_GAUGE_CMD_REM_CAPACITY_ALARM 0x01 /* LowCapacityAlarmThreshold */
#define SBS_GAUGE_CMD_REM_TIME_ALARM 0x02 /* RemainingTimeToEmptyThreshold */
#define SBS_GAUGE_CMD_BATTERY_MODE 0x03 /* BatteryOperatingMode */
#define SBS_GAUGE_CMD_AR 0x04 /* AtRate */
#define SBS_GAUGE_CMD_ARTTF 0x05 /* AtRateTimeToFull */
#define SBS_GAUGE_CMD_ARTTE 0x06 /* AtRateTimeToEmpty */
#define SBS_GAUGE_CMD_AROK 0x07 /* AtRateOK */
#define SBS_GAUGE_CMD_TEMP 0x08 /* Temperature */
#define SBS_GAUGE_CMD_VOLTAGE 0x09 /* Voltage */
#define SBS_GAUGE_CMD_CURRENT 0x0A /* Current */
#define SBS_GAUGE_CMD_AVG_CURRENT 0x0B /* AverageCurrent */
#define SBS_GAUGE_CMD_MAX_ERROR 0x0C /* MaxError */
#define SBS_GAUGE_CMD_RSOC 0x0D /* RelativeStateOfCharge */
#define SBS_GAUGE_CMD_ASOC 0x0E /* AbsoluteStateOfCharge */
#define SBS_GAUGE_CMD_REM_CAPACITY 0x0F /* RemainingCapacity */
#define SBS_GAUGE_CMD_FULL_CAPACITY 0x10 /* FullChargeCapacity */
#define SBS_GAUGE_CMD_RUNTIME2EMPTY 0x11 /* RunTimeToEmpty */
#define SBS_GAUGE_CMD_AVG_TIME2EMPTY 0x12 /* AverageTimeToEmpty */
#define SBS_GAUGE_CMD_AVG_TIME2FULL 0x13 /* AverageTimeToFull */
#define SBS_GAUGE_CMD_CHG_CURRENT 0x14 /* ChargeCurrent */
#define SBS_GAUGE_CMD_CHG_VOLTAGE 0x15 /* ChargeVoltage */
#define SBS_GAUGE_CMD_FLAGS 0x16 /* BatteryStatus */
#define SBS_GAUGE_CMD_CYCLE_COUNT 0x17 /* CycleCount */
#define SBS_GAUGE_CMD_NOM_CAPACITY 0x18 /* DesignCapacity */
#define SBS_GAUGE_CMD_DESIGN_VOLTAGE 0x19 /* DesignVoltage */
#define SBS_GAUGE_CMD_SPECS_INFO 0x1A /* SpecificationInfo */
#define SBS_GAUGE_CMD_MANUFACTURER_DATE 0x1B /* ManufacturerDate */
#define SBS_GAUGE_CMD_SN 0x1C /* SerialNumber */
#define SBS_GAUGE_CMD_MANUFACTURER_NAME 0x20 /* ManufacturerName */
#define SBS_GAUGE_CMD_DEVICE_NAME 0x21 /* DeviceName */
#define SBS_GAUGE_CMD_DEVICE_CHEMISTRY 0x22 /* DeviceChemistry */
#define SBS_GAUGE_CMD_MANUFACTURER_DATA 0x23 /* ManufacturerData */
#define SBS_GAUGE_DELAY 1000
/*
* Nearly all cutoff payloads are actually a singular value that must be written twice to the fuel
* gauge. For the case where it's a singular value that must only be written to the fuel gauge only
* once, retransmitting the duplicate write has no significant negative consequences.
*
* Why not devicetree: Finding the maximum length of all the battery cutoff payloads in a devicetree
* at compile-time would require labyrinthine amount of macro-batics.
*
* Why not compute at runtime: It's not worth the memory given having more than a single fuel gauge
* is rare, and most will have a payload size of 2.
*
* This is validated as a BUILD_ASSERT in the driver.
*/
#define SBS_GAUGE_CUTOFF_PAYLOAD_MAX_SIZE 2
struct sbs_gauge_battery_cutoff_config {
/* Size of the payload array */
size_t payload_size;
/* Array SMBus word values to write to cut off the battery */
uint32_t payload[SBS_GAUGE_CUTOFF_PAYLOAD_MAX_SIZE];
/* Register to write cutoff payload */
uint8_t reg;
};
struct sbs_gauge_config {
struct i2c_dt_spec i2c;
const struct sbs_gauge_battery_cutoff_config *cutoff_cfg;
};
#endif
``` | /content/code_sandbox/drivers/fuel_gauge/sbs_gauge/sbs_gauge.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 989 |
```c
/*
*
*
* Emulator for bq27z746 fuel gauge
*/
#include <string.h>
#define DT_DRV_COMPAT ti_bq27z746
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(EMUL_BQ27Z746);
#include <zephyr/device.h>
#include <zephyr/drivers/emul.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/drivers/i2c_emul.h>
#include <zephyr/sys/byteorder.h>
#include "bq27z746.h"
#define BQ27Z746_MAC_DATA_LEN 32
#define BQ27Z746_MAC_OVERHEAD_LEN 4 /* 2 cmd bytes, 1 length byte, 1 checksum byte */
#define BQ27Z746_MAC_COMPLETE_LEN (BQ27Z746_MAC_DATA_LEN + BQ27Z746_MAC_OVERHEAD_LEN)
struct bq27z746_emul_data {
uint16_t mac_cmd;
};
/** Static configuration for the emulator */
struct bq27z746_emul_cfg {
/** I2C address of emulator */
uint16_t addr;
};
static int emul_bq27z746_read_altmac(const struct emul *target, uint8_t *buf, size_t len)
{
const uint8_t manufacturer_name[] = "Texas Instruments";
const uint8_t device_name[] = "BQ27Z746";
const uint8_t device_chemistry[] = "LION";
const struct bq27z746_emul_data *data = target->data;
if (len < BQ27Z746_MAC_COMPLETE_LEN) {
LOG_ERR("When reading the ALTMAC, one must read the full %u byte",
BQ27Z746_MAC_COMPLETE_LEN);
return -EIO;
}
memset(buf, 0, len);
/*
* The data read from BQ27Z746_ALTMANUFACTURERACCESS is:
* 0..1: The command (for verification)
* 2..33: The data
* 34: Checksum calculated as (uint8_t)(0xFF - (sum of all command and data bytes))
* 35: Length including command, checksum and length (e.g. data length + 4)
*/
/* Put the command in the first two byte */
sys_put_le16(data->mac_cmd, buf);
/* Based on the command, put some data and the length into the buffer. */
/* In all of the operations, don't consider the zero-terminator. */
switch (data->mac_cmd) {
case BQ27Z746_MAC_CMD_MANUFACTURER_NAME:
memcpy(&buf[2], manufacturer_name, sizeof(manufacturer_name) - 1);
buf[35] = sizeof(manufacturer_name) - 1 + BQ27Z746_MAC_OVERHEAD_LEN;
break;
case BQ27Z746_MAC_CMD_DEVICE_NAME:
memcpy(&buf[2], device_name, sizeof(device_name) - 1);
buf[35] = sizeof(device_name) - 1 + BQ27Z746_MAC_OVERHEAD_LEN;
break;
case BQ27Z746_MAC_CMD_DEVICE_CHEM:
memcpy(&buf[2], device_chemistry, sizeof(device_chemistry) - 1);
buf[35] = sizeof(device_chemistry) - 1 + BQ27Z746_MAC_OVERHEAD_LEN;
break;
default:
LOG_ERR("ALTMAC command 0x%x is not supported", data->mac_cmd);
return -EIO;
}
/* Calculate the checksum */
uint8_t sum = 0; /* Intentionally 8 bit wide and overflowing */
for (int i = 0; i < BQ27Z746_MAC_COMPLETE_LEN - 2; i++) {
sum += buf[i];
}
buf[34] = 0xFF - sum;
return 0;
}
static int emul_bq27z746_write(const struct emul *target, uint8_t *buf, size_t len)
{
struct bq27z746_emul_data *data = target->data;
const uint8_t reg = buf[0];
switch (reg) {
case BQ27Z746_ALTMANUFACTURERACCESS:
data->mac_cmd = sys_get_le16(&buf[1]);
return 0;
default:
LOG_ERR("Writing is only supported to ALTMAC currently");
return -EIO;
}
}
static int emul_bq27z746_reg_read(const struct emul *target, int reg, int *val)
{
switch (reg) {
case BQ27Z746_MANUFACTURERACCESS:
*val = 1;
break;
case BQ27Z746_ATRATE:
*val = -2;
break;
case BQ27Z746_ATRATETIMETOEMPTY:
*val = 1;
break;
case BQ27Z746_TEMPERATURE:
*val = 1;
break;
case BQ27Z746_VOLTAGE:
*val = 1;
break;
case BQ27Z746_BATTERYSTATUS:
*val = 1;
break;
case BQ27Z746_CURRENT:
*val = -2;
break;
case BQ27Z746_REMAININGCAPACITY:
*val = 1;
break;
case BQ27Z746_FULLCHARGECAPACITY:
*val = 1;
break;
case BQ27Z746_AVERAGECURRENT:
*val = -2;
break;
case BQ27Z746_AVERAGETIMETOEMPTY:
*val = 1;
break;
case BQ27Z746_AVERAGETIMETOFULL:
*val = 1;
break;
case BQ27Z746_MAXLOADCURRENT:
*val = 1;
break;
case BQ27Z746_MAXLOADTIMETOEMPTY:
*val = 1;
break;
case BQ27Z746_AVERAGEPOWER:
*val = 1;
break;
case BQ27Z746_BTPDISCHARGESET:
*val = 1;
break;
case BQ27Z746_BTPCHARGESET:
*val = 1;
break;
case BQ27Z746_INTERNALTEMPERATURE:
*val = 1;
break;
case BQ27Z746_CYCLECOUNT:
*val = 1;
break;
case BQ27Z746_RELATIVESTATEOFCHARGE:
*val = 1;
break;
case BQ27Z746_STATEOFHEALTH:
*val = 1;
break;
case BQ27Z746_CHARGINGVOLTAGE:
*val = 1;
break;
case BQ27Z746_CHARGINGCURRENT:
*val = 1;
break;
case BQ27Z746_TERMINATEVOLTAGE:
*val = 1;
break;
case BQ27Z746_TIMESTAMPUPPER:
*val = 1;
break;
case BQ27Z746_TIMESTAMPLOWER:
*val = 1;
break;
case BQ27Z746_QMAXCYCLES:
*val = 1;
break;
case BQ27Z746_DESIGNCAPACITY:
*val = 1;
break;
case BQ27Z746_ALTMANUFACTURERACCESS:
*val = 1;
break;
case BQ27Z746_MACDATA:
*val = 1;
break;
case BQ27Z746_MACDATASUM:
*val = 1;
break;
case BQ27Z746_MACDATALEN:
*val = 1;
break;
case BQ27Z746_VOLTHISETTHRESHOLD:
*val = 1;
break;
case BQ27Z746_VOLTHICLEARTHRESHOLD:
*val = 1;
break;
case BQ27Z746_VOLTLOSETTHRESHOLD:
*val = 1;
break;
case BQ27Z746_VOLTLOCLEARTHRESHOLD:
*val = 1;
break;
case BQ27Z746_TEMPHISETTHRESHOLD:
*val = 1;
break;
case BQ27Z746_TEMPHICLEARTHRESHOLD:
*val = 1;
break;
case BQ27Z746_TEMPLOSETTHRESHOLD:
*val = 1;
break;
case BQ27Z746_TEMPLOCLEARTHRESHOLD:
*val = 1;
break;
case BQ27Z746_INTERRUPTSTATUS:
*val = 1;
break;
case BQ27Z746_SOCDELTASETTHRESHOLD:
*val = 1;
break;
default:
LOG_ERR("Unknown register 0x%x read", reg);
return -EIO;
}
LOG_INF("read 0x%x = 0x%x", reg, *val);
return 0;
}
static int emul_bq27z746_read(const struct emul *target, int reg, uint8_t *buf, size_t len)
{
if (len == 2) {
unsigned int val;
int rc = emul_bq27z746_reg_read(target, reg, &val);
if (rc) {
return rc;
}
sys_put_le16(val, buf);
} else {
switch (reg) {
case BQ27Z746_ALTMANUFACTURERACCESS:
LOG_DBG("Reading %u byte from ALTMAC", len);
emul_bq27z746_read_altmac(target, buf, len);
break;
default:
LOG_ERR("Reading is only supported from ALTMAC currently");
return -EIO;
}
}
return 0;
}
static int bq27z746_emul_transfer_i2c(const struct emul *target, struct i2c_msg *msgs, int num_msgs,
int addr)
{
int reg;
int rc;
__ASSERT_NO_MSG(msgs && num_msgs);
i2c_dump_msgs_rw(target->dev, msgs, num_msgs, addr, false);
switch (num_msgs) {
case 1:
if (msgs->flags & I2C_MSG_READ) {
LOG_ERR("Unexpected read");
return -EIO;
}
return emul_bq27z746_write(target, msgs->buf, msgs->len);
case 2:
if (msgs->flags & I2C_MSG_READ) {
LOG_ERR("Unexpected read");
return -EIO;
}
if (msgs->len != 1) {
LOG_ERR("Unexpected msg0 length %d", msgs->len);
return -EIO;
}
reg = msgs->buf[0];
/* Now process the 'read' part of the message */
msgs++;
if (msgs->flags & I2C_MSG_READ) {
rc = emul_bq27z746_read(target, reg, msgs->buf, msgs->len);
if (rc) {
return rc;
}
} else {
LOG_ERR("Second message must be an I2C write");
return -EIO;
}
return rc;
default:
LOG_ERR("Invalid number of messages: %d", num_msgs);
return -EIO;
}
return 0;
}
static const struct i2c_emul_api bq27z746_emul_api_i2c = {
.transfer = bq27z746_emul_transfer_i2c,
};
/**
* Set up a new emulator (I2C)
*
* @param emul Emulation information
* @param parent Device to emulate
* @return 0 indicating success (always)
*/
static int emul_bq27z746_init(const struct emul *target, const struct device *parent)
{
ARG_UNUSED(target);
ARG_UNUSED(parent);
return 0;
}
/*
* Main instantiation macro.
*/
#define BQ27Z746_EMUL(n) \
static struct bq27z746_emul_data bq27z746_emul_data_##n; \
static const struct bq27z746_emul_cfg bq27z746_emul_cfg_##n = { \
.addr = DT_INST_REG_ADDR(n), \
}; \
EMUL_DT_INST_DEFINE(n, emul_bq27z746_init, &bq27z746_emul_data_##n, \
&bq27z746_emul_cfg_##n, &bq27z746_emul_api_i2c, NULL)
DT_INST_FOREACH_STATUS_OKAY(BQ27Z746_EMUL)
``` | /content/code_sandbox/drivers/fuel_gauge/bq27z746/emul_bq27z746.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,757 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_FUELGAUGE_BQ27Z746_GAUGE_H_
#define ZEPHYR_DRIVERS_FUELGAUGE_BQ27Z746_GAUGE_H_
#include <zephyr/drivers/i2c.h>
/* Registers */
#define BQ27Z746_MANUFACTURERACCESS 0x00 /* R/W */
#define BQ27Z746_ATRATE 0x02 /* R/W, Unit: mA, Range: -32768..32767 */
#define BQ27Z746_ATRATETIMETOEMPTY 0x04 /* R/O, Unit: minutes, Range: 0..65535 */
#define BQ27Z746_TEMPERATURE 0x06 /* R/O, Unit: 0.1 K, Range: 0..32767 */
#define BQ27Z746_VOLTAGE 0x08 /* R/O, Unit: mV, Range: 0..32767 */
#define BQ27Z746_BATTERYSTATUS 0x0A /* R/O, Unit: status bits */
#define BQ27Z746_CURRENT 0x0C /* R/O, Unit: mA, Range: -32768..32767 */
#define BQ27Z746_REMAININGCAPACITY 0x10 /* R/O, Unit: mAh, Range: 0..32767 */
#define BQ27Z746_FULLCHARGECAPACITY 0x12 /* R/O, Unit: mAh, Range: 0..32767 */
#define BQ27Z746_AVERAGECURRENT 0x14 /* R/O, Unit: mA, Range: -32768..32767 */
#define BQ27Z746_AVERAGETIMETOEMPTY 0x16 /* R/O, Unit: minutes, Range: 0..65535 */
#define BQ27Z746_AVERAGETIMETOFULL 0x18 /* R/O, Unit: minutes, Range: 0..65535 */
#define BQ27Z746_MAXLOADCURRENT 0x1E /* R/O, Unit: mA, Range: 0..65535 */
#define BQ27Z746_MAXLOADTIMETOEMPTY 0x20 /* R/O, Unit: minutes, Range: 0..65535 */
#define BQ27Z746_AVERAGEPOWER 0x22 /* R/O, Unit: mW, Range: -32768..32767 */
#define BQ27Z746_BTPDISCHARGESET 0x24 /* Datasheet unclear */
#define BQ27Z746_BTPCHARGESET 0x26 /* Datasheet unclear */
#define BQ27Z746_INTERNALTEMPERATURE 0x28 /* R/O, Unit: 0.1 K, Range: 0..32767 */
#define BQ27Z746_CYCLECOUNT 0x2A /* R/O, Unit: none, Range: 0..65535 */
#define BQ27Z746_RELATIVESTATEOFCHARGE 0x2C /* R/O, Unit: percent, Range: 0..100 */
#define BQ27Z746_STATEOFHEALTH 0x2E /* R/O, Unit: percent, Range: 0..100 */
#define BQ27Z746_CHARGINGVOLTAGE 0x30 /* R/O, Unit: mV, Range: 0..32767 */
#define BQ27Z746_CHARGINGCURRENT 0x32 /* R/O, Unit: mA, Range: 0..32767 */
#define BQ27Z746_TERMINATEVOLTAGE 0x34 /* R/W, Unit: mC, Range: 0..32767 */
#define BQ27Z746_TIMESTAMPUPPER 0x36 /* R/O, Unit: seconds, Range: 0..65535 */
#define BQ27Z746_TIMESTAMPLOWER 0x38 /* R/O, Unit: seconds, Range: 0..65535 */
#define BQ27Z746_QMAXCYCLES 0x3A /* R/O, Unit: none, Range: 0..65535 */
#define BQ27Z746_DESIGNCAPACITY \
0x3C /* R/O (sealed), R/W (unsealed or factory access), Unit: mAh, Range: 0..32767 */
#define BQ27Z746_ALTMANUFACTURERACCESS 0x3E /* R/W */
#define BQ27Z746_MACDATA 0x40 /* R/O, MAC data */
#define BQ27Z746_MACDATASUM 0x60 /* R/O, Checksum over MAC command and data */
#define BQ27Z746_MACDATALEN 0x61 /* R/O, Length of the MAC data */
#define BQ27Z746_VOLTHISETTHRESHOLD 0x62 /* R/W, Unit: mV, Range: 0..5000 */
#define BQ27Z746_VOLTHICLEARTHRESHOLD 0x64 /* R/W, Unit: mV, Range: 0..5000 */
#define BQ27Z746_VOLTLOSETTHRESHOLD 0x66 /* R/W, Unit: mV, Range: 0..5000 */
#define BQ27Z746_VOLTLOCLEARTHRESHOLD 0x68 /* R/W, Unit: mV, Range: 0..5000 */
#define BQ27Z746_TEMPHISETTHRESHOLD 0x6A /* R/W, Unit: degree celsius, Range: -128..127 */
#define BQ27Z746_TEMPHICLEARTHRESHOLD 0x6B /* R/W, Unit: degree celsius, Range: -128..127 */
#define BQ27Z746_TEMPLOSETTHRESHOLD 0x6C /* R/W, Unit: degree celsius, Range: -128..127 */
#define BQ27Z746_TEMPLOCLEARTHRESHOLD 0x6D /* R/W, Unit: degree celsius, Range: -128..127 */
#define BQ27Z746_INTERRUPTSTATUS 0x6E /* R/O, Unit: status bits */
#define BQ27Z746_SOCDELTASETTHRESHOLD 0x6F /* R/W, Unit: percent, Range: 0..100 */
/* MAC commands */
#define BQ27Z746_MAC_CMD_DEVICETYPE 0x0001
#define BQ27Z746_MAC_CMD_FIRMWAREVERSION 0x0002
#define BQ27Z746_MAC_CMD_HARDWAREVERSION 0x0003
#define BQ27Z746_MAC_CMD_IFCHECKSUM 0x0004
#define BQ27Z746_MAC_CMD_STATICDFSIGNATURE 0x0005
#define BQ27Z746_MAC_CMD_CHEMID 0x0006
#define BQ27Z746_MAC_CMD_PREV_MACWRITE 0x0007
#define BQ27Z746_MAC_CMD_STATICCHEMDFSIGNATURE 0x0008
#define BQ27Z746_MAC_CMD_ALLDFSIGNATURE 0x0009
#define BQ27Z746_MAC_CMD_SHELFENABLE 0x000B
#define BQ27Z746_MAC_CMD_SHELFDISABLE 0x000C
#define BQ27Z746_MAC_CMD_SHUTDOWNMODE 0x0010
#define BQ27Z746_MAC_CMD_RESET1 0x0012
#define BQ27Z746_MAC_CMD_SHIPMODEENABLE 0x0015
#define BQ27Z746_MAC_CMD_SHIPMODEDISABLE 0x0016
#define BQ27Z746_MAC_CMD_QMAX_DAY 0x0017
#define BQ27Z746_MAC_CMD_CHARGEFETTOGGLE 0x001F
#define BQ27Z746_MAC_CMD_DISCHARGEFETTOGGLE 0x0020
#define BQ27Z746_MAC_CMD_GAUGING_IT_ENABLE 0x0021
#define BQ27Z746_MAC_CMD_FET_ENABLE 0x0022
#define BQ27Z746_MAC_CMD_LIFETIMEDATACOLLECTION 0x0023
#define BQ27Z746_MAC_CMD_LIFETIMEDATARESET 0x0028
#define BQ27Z746_MAC_CMD_CALIBRATIONMODE 0x002D
#define BQ27Z746_MAC_CMD_LIFETIMEDATAFLUSH 0x002E
#define BQ27Z746_MAC_CMD_LIFETIMEDATASPEEDUPMODE 0x002F
#define BQ27Z746_MAC_CMD_SEALDEVICE 0x0030
#define BQ27Z746_MAC_CMD_SECURITYKEYS 0x0035
#define BQ27Z746_MAC_CMD_RESET2 0x0041
#define BQ27Z746_MAC_CMD_TAMBIENTSYNC 0x0047
#define BQ27Z746_MAC_CMD_DEVICE_NAME 0x004A
#define BQ27Z746_MAC_CMD_DEVICE_CHEM 0x004B
#define BQ27Z746_MAC_CMD_MANUFACTURER_NAME 0x004C
#define BQ27Z746_MAC_CMD_MANUFACTURE_DATE 0x004D
#define BQ27Z746_MAC_CMD_SERIAL_NUMBER 0x004E
#define BQ27Z746_MAC_CMD_SAFETYALERT 0x0050
#define BQ27Z746_MAC_CMD_SAFETYSTATUS 0x0051
#define BQ27Z746_MAC_CMD_OPERATIONSTATUS 0x0054
#define BQ27Z746_MAC_CMD_CHARGINGSTATUS 0x0055
#define BQ27Z746_MAC_CMD_GAUGINGSTATUS 0x0056
#define BQ27Z746_MAC_CMD_MANUFACTURINGSTATUS 0x0057
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK1 0x0060
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK2 0x0061
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK3 0x0062
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK4 0x0063
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK6 0x0065
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK7 0x0065
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK8 0x0067
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK9 0x0068
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK10 0x0069
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK11 0x006A
#define BQ27Z746_MAC_CMD_LIFETIMEDATABLOCK12 0x006B
#define BQ27Z746_MAC_CMD_MANUFACTURERINFO 0x0070
#define BQ27Z746_MAC_CMD_DASTATUS1 0x0071
#define BQ27Z746_MAC_CMD_DASTATUS2 0x0072
#define BQ27Z746_MAC_CMD_ITSTATUS1 0x0073
#define BQ27Z746_MAC_CMD_ITSTATUS2 0x0074
#define BQ27Z746_MAC_CMD_ITSTATUS3 0x0075
#define BQ27Z746_MAC_CMD_FCC_SOH 0x0077
#define BQ27Z746_MAC_CMD_FILTERED_CAPACITY 0x0078
#define BQ27Z746_MAC_CMD_MANUFACTURERINFOB 0x007A
#define BQ27Z746_MAC_CMD_MANUFACTURERINFOC 0x007B
#define BQ27Z746_MAC_CMD_FET_CONTROL_OVERRIDE 0x0097
#define BQ27Z746_MAC_CMD_SYSTEM_RESET_ENABLE 0x00A3
#define BQ27Z746_MAC_CMD_SYSTEM_RESET 0x00A4
#define BQ27Z746_MAC_CMD_BATTSENSEOUTPUT 0x00B1
#define BQ27Z746_MAC_CMD_RATABLECELL0 0x00E0
#define BQ27Z746_MAC_CMD_ROMMODE 0x0F00
#define BQ27Z746_MAC_CMD_DATAFLASHACCESS 0x4000
#define BQ27Z746_MAC_CMD_SWITCHTOHDQ 0x7C40
#define BQ27Z746_MAC_CMD_EXITCALIBRATIONOUTPUT 0xF080
#define BQ27Z746_MAC_CMD_OUTPUTCCANDADCFORCALIBRATIO 0xF081
#define BQ27Z746_MAC_CMD_OUTPUTTEMPERATURECAL 0xF083
#define BQ27Z746_MAC_CMD_PROTECTORCALIBRATION 0xF0A0
#define BQ27Z746_MAC_CMD_PROTECTORIMAGE1 0xF0A1
#define BQ27Z746_MAC_CMD_PROTECTORIMAGE2 0xF0A2
#define BQ27Z746_MAC_CMD_PROTECTORIMAGESAVE 0xF0A3
#define BQ27Z746_MAC_CMD_PROTECTORIMAGELOCK 0xF0A4
#define BQ27Z746_MAC_CMD_PROTECTORFACTORYCONFIG 0xF0A5
struct bq27z746_config {
struct i2c_dt_spec i2c;
};
#endif
``` | /content/code_sandbox/drivers/fuel_gauge/bq27z746/bq27z746.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,947 |
```unknown
#
config BQ27Z746
bool "BQ27Z746 Fuel Gauge"
default y
depends on DT_HAS_TI_BQ27Z746_ENABLED
select I2C
help
Enable I2C-based driver for BQ27Z746 Fuel Gauge.
config EMUL_BQ27Z746
bool "Emulate an BQ27Z746 fuel gague"
default y
depends on EMUL
depends on BQ27Z746
help
It provides readings which follow a simple sequence, thus allowing
test code to check that things are working as expected.
``` | /content/code_sandbox/drivers/fuel_gauge/bq27z746/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 126 |
```c
/*
*
*/
#define DT_DRV_COMPAT ti_bq27z746
#include "bq27z746.h"
#include <zephyr/kernel.h>
#include <zephyr/drivers/fuel_gauge.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/logging/log.h>
#include <string.h>
LOG_MODULE_REGISTER(BQ27Z746);
#define BQ27Z746_MAC_DATA_LEN 32
#define BQ27Z746_MAC_OVERHEAD_LEN 4 /* 2 cmd bytes, 1 length byte, 1 checksum byte */
#define BQ27Z746_MAC_COMPLETE_LEN (BQ27Z746_MAC_DATA_LEN + BQ27Z746_MAC_OVERHEAD_LEN)
static int bq27z746_read16(const struct device *dev, uint8_t reg, uint16_t *value)
{
uint8_t i2c_data[2];
const struct bq27z746_config *cfg = dev->config;
const int status = i2c_burst_read_dt(&cfg->i2c, reg, i2c_data, sizeof(i2c_data));
if (status < 0) {
LOG_ERR("Unable to read register");
return status;
}
*value = sys_get_le16(i2c_data);
return 0;
}
static int bq27z746_write16(const struct device *dev, uint8_t reg, uint16_t value)
{
uint8_t buf[3];
const struct bq27z746_config *cfg = dev->config;
buf[0] = reg;
sys_put_le16(value, &buf[1]);
return i2c_write_dt(&cfg->i2c, buf, sizeof(buf));
}
static int bq27z746_read_mac(const struct device *dev, uint16_t cmd, uint8_t *data, int len)
{
if (len > BQ27Z746_MAC_DATA_LEN) {
return -EINVAL;
}
uint8_t buf[BQ27Z746_MAC_COMPLETE_LEN];
const struct bq27z746_config *cfg = dev->config;
/* Instead of MAC, ALTMAC is used as reccommended in the datasheet */
int ret = bq27z746_write16(dev, BQ27Z746_ALTMANUFACTURERACCESS, cmd);
if (ret != 0) {
return ret;
}
/*
* The data read from BQ27Z746_ALTMANUFACTURERACCESS is:
* 0..1: The command (for verification)
* 2..33: The data
* 34: Checksum calculated as (uint8_t)(0xFF - (sum of all command and data bytes))
* 35: Length including command, checksum and length (e.g. data length + 4)
*/
ret = i2c_burst_read_dt(&cfg->i2c, BQ27Z746_ALTMANUFACTURERACCESS, buf,
BQ27Z746_MAC_COMPLETE_LEN);
if (ret != 0) {
return ret;
}
/* The first two bytes read is the command and is used for verification */
const uint16_t read_cmd = sys_get_le16(buf);
if (read_cmd != cmd) {
LOG_ERR("Read command 0x%x != written command 0x%x", read_cmd, cmd);
return -EIO;
}
const uint8_t checksum_actual = buf[34];
uint8_t sum = 0; /* Intentionally 8 bit wide and overflowing */
for (int i = 0; i < BQ27Z746_MAC_COMPLETE_LEN - 2; i++) {
sum += buf[i];
}
const uint8_t checksum_expected = 0xFF - sum;
if (checksum_expected != checksum_actual) {
LOG_ERR("Checksum mismatch");
return -EIO;
}
/* First byte of the user buffer is the length */
data[0] = buf[35] - BQ27Z746_MAC_OVERHEAD_LEN;
/* Copy only the data to the user buffer (= skipping the first two command bytes) */
memcpy(&data[1], &buf[2], len);
return ret;
}
static int bq27z746_get_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val *val)
{
int rc = 0;
uint16_t tmp_val = 0;
/*
* Possibly negative values must be cast from uint16 to int16 first to
* then correctly end up in the wider datatypes of `prop`.
*/
switch (prop) {
case FUEL_GAUGE_AVG_CURRENT:
rc = bq27z746_read16(dev, BQ27Z746_AVERAGECURRENT, &tmp_val);
val->avg_current = (int16_t)tmp_val * 1000;
break;
case FUEL_GAUGE_CYCLE_COUNT:
rc = bq27z746_read16(dev, BQ27Z746_CYCLECOUNT, &tmp_val);
val->cycle_count = tmp_val * 100;
break;
case FUEL_GAUGE_CURRENT:
rc = bq27z746_read16(dev, BQ27Z746_CURRENT, &tmp_val);
val->current = (int16_t)tmp_val * 1000;
break;
case FUEL_GAUGE_FULL_CHARGE_CAPACITY:
rc = bq27z746_read16(dev, BQ27Z746_FULLCHARGECAPACITY, &tmp_val);
val->full_charge_capacity = tmp_val * 1000;
break;
case FUEL_GAUGE_REMAINING_CAPACITY:
rc = bq27z746_read16(dev, BQ27Z746_REMAININGCAPACITY, &tmp_val);
val->remaining_capacity = tmp_val * 1000;
break;
case FUEL_GAUGE_RUNTIME_TO_EMPTY:
rc = bq27z746_read16(dev, BQ27Z746_AVERAGETIMETOEMPTY, &tmp_val);
val->runtime_to_empty = tmp_val;
break;
case FUEL_GAUGE_RUNTIME_TO_FULL:
rc = bq27z746_read16(dev, BQ27Z746_AVERAGETIMETOFULL, &tmp_val);
val->runtime_to_full = tmp_val;
break;
case FUEL_GAUGE_SBS_MFR_ACCESS:
rc = bq27z746_read16(dev, BQ27Z746_MANUFACTURERACCESS, &tmp_val);
val->sbs_mfr_access_word = tmp_val;
break;
case FUEL_GAUGE_RELATIVE_STATE_OF_CHARGE:
rc = bq27z746_read16(dev, BQ27Z746_RELATIVESTATEOFCHARGE, &tmp_val);
val->relative_state_of_charge = tmp_val;
break;
case FUEL_GAUGE_TEMPERATURE:
rc = bq27z746_read16(dev, BQ27Z746_TEMPERATURE, &tmp_val);
val->temperature = tmp_val;
break;
case FUEL_GAUGE_VOLTAGE:
rc = bq27z746_read16(dev, BQ27Z746_VOLTAGE, &tmp_val);
val->voltage = tmp_val * 1000;
break;
case FUEL_GAUGE_SBS_ATRATE:
rc = bq27z746_read16(dev, BQ27Z746_ATRATE, &tmp_val);
val->sbs_at_rate = (int16_t)tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE_TIME_TO_EMPTY:
rc = bq27z746_read16(dev, BQ27Z746_ATRATETIMETOEMPTY, &tmp_val);
val->sbs_at_rate_time_to_empty = tmp_val;
break;
case FUEL_GAUGE_CHARGE_VOLTAGE:
rc = bq27z746_read16(dev, BQ27Z746_CHARGINGVOLTAGE, &tmp_val);
val->chg_voltage = tmp_val * 1000;
break;
case FUEL_GAUGE_CHARGE_CURRENT:
rc = bq27z746_read16(dev, BQ27Z746_CHARGINGCURRENT, &tmp_val);
val->chg_current = tmp_val * 1000;
break;
case FUEL_GAUGE_STATUS:
rc = bq27z746_read16(dev, BQ27Z746_BATTERYSTATUS, &tmp_val);
val->fg_status = tmp_val;
break;
case FUEL_GAUGE_DESIGN_CAPACITY:
rc = bq27z746_read16(dev, BQ27Z746_DESIGNCAPACITY, &tmp_val);
val->design_cap = tmp_val;
break;
default:
rc = -ENOTSUP;
}
return rc;
}
static int bq27z746_get_buffer_prop(const struct device *dev,
fuel_gauge_prop_t property_type, void *dst,
size_t dst_len)
{
int rc = 0;
switch (property_type) {
case FUEL_GAUGE_MANUFACTURER_NAME:
if (dst_len == sizeof(struct sbs_gauge_manufacturer_name)) {
rc = bq27z746_read_mac(dev, BQ27Z746_MAC_CMD_MANUFACTURER_NAME,
(uint8_t *)dst, dst_len - 1);
} else {
rc = -EINVAL;
}
break;
case FUEL_GAUGE_DEVICE_NAME:
if (dst_len == sizeof(struct sbs_gauge_device_name)) {
rc = bq27z746_read_mac(dev, BQ27Z746_MAC_CMD_DEVICE_NAME, (uint8_t *)dst,
dst_len - 1);
} else {
rc = -EINVAL;
}
break;
case FUEL_GAUGE_DEVICE_CHEMISTRY:
if (dst_len == sizeof(struct sbs_gauge_device_chemistry)) {
rc = bq27z746_read_mac(dev, BQ27Z746_MAC_CMD_DEVICE_CHEM, (uint8_t *)dst,
dst_len - 1);
} else {
rc = -EINVAL;
}
break;
default:
rc = -ENOTSUP;
}
return rc;
}
static int bq27z746_set_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val val)
{
int rc = 0;
uint16_t tmp_val = 0;
switch (prop) {
case FUEL_GAUGE_SBS_MFR_ACCESS:
rc = bq27z746_write16(dev, BQ27Z746_MANUFACTURERACCESS, val.sbs_mfr_access_word);
val.sbs_mfr_access_word = tmp_val;
break;
case FUEL_GAUGE_SBS_ATRATE:
rc = bq27z746_write16(dev, BQ27Z746_ATRATE, val.sbs_at_rate);
val.sbs_at_rate = tmp_val;
break;
default:
rc = -ENOTSUP;
}
return rc;
}
static int bq27z746_init(const struct device *dev)
{
const struct bq27z746_config *cfg;
cfg = dev->config;
if (!device_is_ready(cfg->i2c.bus)) {
LOG_ERR("Bus device is not ready");
return -ENODEV;
}
return 0;
}
static const struct fuel_gauge_driver_api bq27z746_driver_api = {
.get_property = &bq27z746_get_prop,
.set_property = &bq27z746_set_prop,
.get_buffer_property = &bq27z746_get_buffer_prop,
};
#define BQ27Z746_INIT(index) \
\
static const struct bq27z746_config bq27z746_config_##index = { \
.i2c = I2C_DT_SPEC_INST_GET(index), \
}; \
\
DEVICE_DT_INST_DEFINE(index, &bq27z746_init, NULL, NULL, &bq27z746_config_##index, \
POST_KERNEL, CONFIG_FUEL_GAUGE_INIT_PRIORITY, &bq27z746_driver_api);
DT_INST_FOREACH_STATUS_OKAY(BQ27Z746_INIT)
``` | /content/code_sandbox/drivers/fuel_gauge/bq27z746/bq27z746.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,592 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_SENSOR_MAX17048_MAX17048_H_
#define ZEPHYR_DRIVERS_SENSOR_MAX17048_MAX17048_H_
#include <zephyr/drivers/i2c.h>
#define REGISTER_VCELL 0x02
#define REGISTER_SOC 0x04
#define REGISTER_MODE 0x06
#define REGISTER_VERSION 0x08
#define REGISTER_HIBRT 0x0A
#define REGISTER_CONFIG 0x0C
#define REGISTER_VALRT 0x14
#define REGISTER_CRATE 0x16
#define REGISTER_VRESET 0x18
#define REGISTER_CHIP_ID 0x19
#define REGISTER_STATUS 0x1A
#define REGISTER_TABLE 0x40
#define REGISTER_COMMAND 0xFE
#define RESET_COMMAND 0x5400
#define QUICKSTART_MODE 0x4000
struct max17048_config {
struct i2c_dt_spec i2c;
};
#endif /* ZEPHYR_DRIVERS_SENSOR_MAX17048_MAX17048_H_ */
``` | /content/code_sandbox/drivers/fuel_gauge/max17048/max17048.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 229 |
```c
/* max17048.c - Driver for max17048 battery fuel gauge */
/*
*
*/
#define DT_DRV_COMPAT maxim_max17048
#include "max17048.h"
#include <zephyr/drivers/fuel_gauge.h>
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/pm/device.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(MAX17048);
#if DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 0
#warning "MAX17048 driver enabled without any devices"
#endif
/**
* Storage for the fuel gauge basic information
*/
struct max17048_data {
/* Charge as percentage */
uint8_t charge;
/* Voltage as uV */
uint32_t voltage;
/* Time in minutes */
uint16_t time_to_full;
uint16_t time_to_empty;
/* True if battery chargin, false if discharging */
bool charging;
};
/**
* I2C communication
* The way we read a value is first writing the address we want to read and then
* wait for 2 bytes containing the data.
*/
int max17048_read_register(const struct device *dev, uint8_t registerId, uint16_t *response)
{
uint8_t max17048_buffer[2];
const struct max17048_config *cfg = dev->config;
int rc = i2c_write_read_dt(&cfg->i2c, ®isterId, sizeof(registerId), max17048_buffer,
sizeof(max17048_buffer));
if (rc != 0) {
LOG_ERR("Unable to read register, error %d", rc);
return rc;
}
*response = sys_get_be16(max17048_buffer);
return 0;
}
/**
* Raw value from the internal ADC
*/
int max17048_adc(const struct device *i2c_dev, uint16_t *response)
{
return max17048_read_register(i2c_dev, REGISTER_VCELL, response);
}
/**
* Battery voltage
*/
int max17048_voltage(const struct device *i2c_dev, uint32_t *response)
{
uint16_t raw_voltage;
int rc = max17048_adc(i2c_dev, &raw_voltage);
if (rc < 0) {
return rc;
}
/**
* Once the value is read, it has to be converted to volts. The datasheet
* path_to_url
* MAX17048-MAX17049.pdf
* Page 10, Table 2. Register Summary: 78.125V/cell
* Max17048 only supports one cell so we just have to multiply the value by 78.125 to
* obtain V
*/
*response = (uint32_t)raw_voltage * 78.125;
return 0;
}
/**
* Battery percentage still available
*/
int max17048_percent(const struct device *i2c_dev, uint8_t *response)
{
uint16_t data;
int rc = max17048_read_register(i2c_dev, REGISTER_SOC, &data);
if (rc < 0) {
return rc;
}
/**
* Once the value is read, it has to be converted to percentage. The datasheet
* path_to_url
* MAX17048-MAX17049.pdf
* Page 10, Table 2. Register Summary: 1%/256
* So to obtain the total percentaje we just divide the read value by 256
*/
*response = data / 256;
return 0;
}
/**
* Percentage of the total battery capacity per hour, positive is charging or
* negative if discharging
*/
int max17048_crate(const struct device *i2c_dev, int16_t *response)
{
int rc = max17048_read_register(i2c_dev, REGISTER_CRATE, response);
if (rc < 0) {
return rc;
}
/**
* Once the value is read, it has to be converted to something useful. The datasheet
* path_to_url
* MAX17048-MAX17049.pdf
* Page 11, Table 2. Register Summary (continued): 0.208%/hr
* To avoid floats, the value will be multiplied by 208 instead of 0.208, taking into
* account that the value will be 1000 times higher
*/
*response = *response * 208;
return 0;
}
/**
* Initialize and verify the chip. The datasheet says that the version register
* should be 0x10. If not, or the chip is malfunctioning or it is not a MAX17048 at all
*/
static int max17048_init(const struct device *dev)
{
const struct max17048_config *cfg = dev->config;
uint16_t version;
int rc = max17048_read_register(dev, REGISTER_VERSION, &version);
if (!device_is_ready(cfg->i2c.bus)) {
LOG_ERR("Bus device is not ready");
return -ENODEV;
}
if (rc < 0) {
LOG_ERR("Cannot read from I2C");
return rc;
}
version = version & 0xFFF0;
if (version != 0x10) {
LOG_ERR("Something found at the provided I2C address, but it is not a MAX17048");
LOG_ERR("The version registers should be 0x10 but got %x. Maybe your wiring is "
"wrong or it is a fake chip\n",
version);
return -ENODEV;
}
return 0;
}
/**
* Get a single property from the fuel gauge
*/
static int max17048_get_single_prop_impl(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val *val)
{
struct max17048_data *data = dev->data;
int rc = 0;
switch (prop) {
case FUEL_GAUGE_RUNTIME_TO_EMPTY:
val->runtime_to_empty = data->time_to_empty;
break;
case FUEL_GAUGE_RUNTIME_TO_FULL:
val->runtime_to_full = data->time_to_full;
break;
case FUEL_GAUGE_RELATIVE_STATE_OF_CHARGE:
val->relative_state_of_charge = data->charge;
break;
case FUEL_GAUGE_VOLTAGE:
val->voltage = data->voltage;
break;
default:
rc = -ENOTSUP;
}
return rc;
}
/**
* Get properties from the fuel gauge
*/
static int max17048_get_prop(const struct device *dev, fuel_gauge_prop_t prop,
union fuel_gauge_prop_val *val)
{
struct max17048_data *data = dev->data;
int rc = max17048_percent(dev, &data->charge);
int16_t crate;
int ret;
if (rc < 0) {
LOG_ERR("Error while reading battery percentage");
return rc;
}
rc = max17048_voltage(dev, &data->voltage);
if (rc < 0) {
LOG_ERR("Error while reading battery voltage");
return rc;
}
/**
* Crate (current rate) is the current percentage of the battery charged or drained
* per hour
*/
rc = max17048_crate(dev, &crate);
if (rc < 0) {
LOG_ERR("Error while reading battery current rate");
return rc;
}
if (crate != 0) {
/**
* May take some time until the chip detects the change between discharging to
* charging (and vice versa) especially if your device consumes little power
*/
data->charging = crate > 0;
/**
* In the following code, we multiply by 1000 the charge to increase the
* precision. If we just truncate the division without this multiplier,
* the precision lost is very significant when converting it into minutes
* (the value given is in hours)
*
* The value coming from crate is already 1000 times higher (check the
* function max17048_crate to see the reason) so the multiplier for the
* charge will be 1000000
*/
if (data->charging) {
uint8_t percentage_pending = 100 - data->charge;
uint32_t hours_pending = percentage_pending * 1000000 / crate;
data->time_to_empty = 0;
data->time_to_full = hours_pending * 60 / 1000;
} else {
/* Discharging */
uint32_t hours_pending = data->charge * 1000000 / -crate;
data->time_to_empty = hours_pending * 60 / 1000;
data->time_to_full = 0;
}
} else {
/**
* This case is to avoid a division by 0 when the charge rate is the same
* than consumption rate. It could also happen when the sensor is still
* calibrating the battery
*/
data->charging = false;
data->time_to_full = 0;
data->time_to_empty = 0;
}
ret = max17048_get_single_prop_impl(dev, prop, val);
return ret;
}
static const struct fuel_gauge_driver_api max17048_driver_api = {
.get_property = &max17048_get_prop,
};
#define MAX17048_DEFINE(inst) \
static struct max17048_data max17048_data_##inst; \
\
static const struct max17048_config max17048_config_##inst = { \
.i2c = I2C_DT_SPEC_INST_GET(inst)}; \
\
DEVICE_DT_INST_DEFINE(inst, &max17048_init, NULL, &max17048_data_##inst, \
&max17048_config_##inst, POST_KERNEL, \
CONFIG_FUEL_GAUGE_INIT_PRIORITY, &max17048_driver_api);
DT_INST_FOREACH_STATUS_OKAY(MAX17048_DEFINE)
``` | /content/code_sandbox/drivers/fuel_gauge/max17048/max17048.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,157 |
```c
/*
*
*
* Emulator for max17048 fuel gauge
*/
#define DT_DRV_COMPAT maxim_max17048
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(maxim_max17048);
#include <zephyr/device.h>
#include <zephyr/drivers/emul.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/drivers/i2c_emul.h>
#include <zephyr/sys/byteorder.h>
#include "max17048.h"
static int crate_value = 0x4000;
void emul_max17048_set_crate_status(int value)
{
crate_value = value;
}
/** Static configuration for the emulator */
struct max17048_emul_cfg {
/** I2C address of emulator */
uint16_t addr;
};
static int emul_max17048_reg_write(const struct emul *target, int reg, int val)
{
return -EIO;
}
static int emul_max17048_reg_read(const struct emul *target, int reg, int *val)
{
switch (reg) {
case REGISTER_VERSION:
*val = 0x1000;
break;
case REGISTER_CRATE:
*val = crate_value;
break;
case REGISTER_SOC:
*val = 0x3525;
break;
case REGISTER_VCELL:
*val = 0x4387;
break;
default:
LOG_ERR("Unknown register 0x%x read", reg);
return -EIO;
}
LOG_INF("read 0x%x = 0x%x", reg, *val);
return 0;
}
static int max17048_emul_transfer_i2c(const struct emul *target, struct i2c_msg *msgs,
int num_msgs, int addr)
{
/* Largely copied from emul_bmi160.c */
unsigned int val;
int reg;
int rc;
__ASSERT_NO_MSG(msgs && num_msgs);
i2c_dump_msgs_rw(target->dev, msgs, num_msgs, addr, false);
switch (num_msgs) {
case 2:
if (msgs->flags & I2C_MSG_READ) {
LOG_ERR("Unexpected read");
return -EIO;
}
if (msgs->len != 1) {
LOG_ERR("Unexpected msg0 length %d", msgs->len);
return -EIO;
}
reg = msgs->buf[0];
/* Now process the 'read' part of the message */
msgs++;
if (msgs->flags & I2C_MSG_READ) {
switch (msgs->len - 1) {
case 1:
rc = emul_max17048_reg_read(target, reg, &val);
if (rc) {
/* Return before writing bad value to message buffer */
return rc;
}
/* SBS uses SMBus, which sends data in little-endian format. */
sys_put_le16(val, msgs->buf);
break;
default:
LOG_ERR("Unexpected msg1 length %d", msgs->len);
return -EIO;
}
} else {
/* We write a word (2 bytes by the SBS spec) */
if (msgs->len != 2) {
LOG_ERR("Unexpected msg1 length %d", msgs->len);
}
uint16_t value = sys_get_le16(msgs->buf);
rc = emul_max17048_reg_write(target, reg, value);
}
break;
default:
LOG_ERR("Invalid number of messages: %d", num_msgs);
return -EIO;
}
return rc;
}
static const struct i2c_emul_api max17048_emul_api_i2c = {
.transfer = max17048_emul_transfer_i2c,
};
/**
* Set up a new emulator (I2C)
*
* @param emul Emulation information
* @param parent Device to emulate
* @return 0 indicating success (always)
*/
static int emul_max17048_init(const struct emul *target, const struct device *parent)
{
ARG_UNUSED(target);
ARG_UNUSED(parent);
return 0;
}
/*
* Main instantiation macro.
*/
#define MAX17048_EMUL(n) \
static const struct max17048_emul_cfg max17048_emul_cfg_##n = { \
.addr = DT_INST_REG_ADDR(n), \
}; \
EMUL_DT_INST_DEFINE(n, emul_max17048_init, NULL, \
&max17048_emul_cfg_##n, &max17048_emul_api_i2c, NULL)
DT_INST_FOREACH_STATUS_OKAY(MAX17048_EMUL)
``` | /content/code_sandbox/drivers/fuel_gauge/max17048/emul_max17048.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 998 |
```unknown
config AUDIO_TLV320DAC
bool "TLV320DAC310x DAC support"
default y
depends on DT_HAS_TI_TLV320DAC_ENABLED
select I2C
depends on GPIO
help
Enable TLV320DAC support on the selected board
``` | /content/code_sandbox/drivers/audio/Kconfig.tlv320dac | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```unknown
config AUDIO_MPXXDTYY
bool "ST Digital PDM microphone attached to I2S support"
default y
depends on DT_HAS_ST_MPXXDTYY_ENABLED
select I2S
select HAS_STLIB
help
Enable MPXXDTYY microphone support on the selected board
``` | /content/code_sandbox/drivers/audio/Kconfig.mpxxdtyy | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```unknown
# MAX17048 Li-Ion battery fuel gauge
config MAX17048
bool "MAX17048 Li-Po fuel gauge"
default y
depends on DT_HAS_MAXIM_MAX17048_ENABLED
select I2C
help
Enable driver for the MAX17048 fuel gauge device.
config EMUL_MAX17048
bool "Emulate an MAX17048 fuel gague"
default y
depends on EMUL
depends on MAX17048
help
It provides readings which follow a simple sequence, thus allowing
test code to check that things are working as expected.
``` | /content/code_sandbox/drivers/fuel_gauge/max17048/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 124 |
```c
/*
*
*/
#define DT_DRV_COMPAT st_mpxxdtyy
#include <zephyr/devicetree.h>
#include "mpxxdtyy.h"
#define LOG_LEVEL CONFIG_AUDIO_DMIC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(mpxxdtyy);
#define CHANNEL_MASK 0x55
static uint8_t ch_demux[128] = {
0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03,
0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03,
0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07,
0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07,
0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03,
0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03,
0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07,
0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07,
0x08, 0x09, 0x08, 0x09, 0x0a, 0x0b, 0x0a, 0x0b,
0x08, 0x09, 0x08, 0x09, 0x0a, 0x0b, 0x0a, 0x0b,
0x0c, 0x0d, 0x0c, 0x0d, 0x0e, 0x0f, 0x0e, 0x0f,
0x0c, 0x0d, 0x0c, 0x0d, 0x0e, 0x0f, 0x0e, 0x0f,
0x08, 0x09, 0x08, 0x09, 0x0a, 0x0b, 0x0a, 0x0b,
0x08, 0x09, 0x08, 0x09, 0x0a, 0x0b, 0x0a, 0x0b,
0x0c, 0x0d, 0x0c, 0x0d, 0x0e, 0x0f, 0x0e, 0x0f,
0x0c, 0x0d, 0x0c, 0x0d, 0x0e, 0x0f, 0x0e, 0x0f
};
static uint8_t left_channel(uint8_t a, uint8_t b)
{
return ch_demux[a & CHANNEL_MASK] | (ch_demux[b & CHANNEL_MASK] << 4);
}
static uint8_t right_channel(uint8_t a, uint8_t b)
{
a >>= 1;
b >>= 1;
return ch_demux[a & CHANNEL_MASK] | (ch_demux[b & CHANNEL_MASK] << 4);
}
uint16_t sw_filter_lib_init(const struct device *dev, struct dmic_cfg *cfg)
{
struct mpxxdtyy_data *const data = dev->data;
TPDMFilter_InitStruct *pdm_filter = &data->pdm_filter[0];
uint16_t factor;
uint32_t audio_freq = cfg->streams->pcm_rate;
int i;
/* calculate oversampling factor based on pdm clock */
for (factor = 64U; factor <= 128U; factor += 64U) {
uint32_t pdm_bit_clk = (audio_freq * factor *
cfg->channel.req_num_chan);
if (pdm_bit_clk >= cfg->io.min_pdm_clk_freq &&
pdm_bit_clk <= cfg->io.max_pdm_clk_freq) {
break;
}
}
if (factor != 64U && factor != 128U) {
return 0;
}
for (i = 0; i < cfg->channel.req_num_chan; i++) {
/* init the filter lib */
pdm_filter[i].LP_HZ = audio_freq / 2U;
pdm_filter[i].HP_HZ = 10;
pdm_filter[i].Fs = audio_freq;
pdm_filter[i].Out_MicChannels = cfg->channel.req_num_chan;
pdm_filter[i].In_MicChannels = cfg->channel.req_num_chan;
pdm_filter[i].Decimation = factor;
pdm_filter[i].MaxVolume = 64;
Open_PDM_Filter_Init(&data->pdm_filter[i]);
}
return factor;
}
int sw_filter_lib_run(TPDMFilter_InitStruct *pdm_filter,
void *pdm_block, void *pcm_block,
size_t pdm_size, size_t pcm_size)
{
int i, j;
int pdm_offset;
uint8_t a, b;
if (pdm_block == NULL || pcm_block == NULL || pdm_filter == NULL) {
return -EINVAL;
}
for (i = 0; i < pdm_size/2; i++) {
switch (pdm_filter[0].In_MicChannels) {
case 1: /* MONO */
((uint16_t *)pdm_block)[i] = HTONS(((uint16_t *)pdm_block)[i]);
break;
case 2: /* STEREO */
if (pdm_filter[0].In_MicChannels > 1) {
a = ((uint8_t *)pdm_block)[2*i];
b = ((uint8_t *)pdm_block)[2*i + 1];
((uint8_t *)pdm_block)[2*i] = left_channel(a, b);
((uint8_t *)pdm_block)[2*i + 1] = right_channel(a, b);
}
break;
default:
return -EINVAL;
}
}
for (j = 0; j < pcm_size / 2; j += pdm_filter[0].Fs / 1000) {
/*
* The number of PDM bytes per PCM sample is the decimation factor
* divided by the number of bits per byte (8). We need to skip a number of
* PDM bytes equivalent to the number of PCM samples, times the number of
* channels.
*/
pdm_offset = j * (pdm_filter[0].Decimation / 8) * pdm_filter[0].In_MicChannels;
for (i = 0; i < pdm_filter[0].In_MicChannels; i++) {
switch (pdm_filter[0].Decimation) {
case 64:
Open_PDM_Filter_64(&((uint8_t *) pdm_block)[pdm_offset + i],
&((uint16_t *) pcm_block)[j + i],
pdm_filter->MaxVolume,
&pdm_filter[i]);
break;
case 128:
Open_PDM_Filter_128(&((uint8_t *) pdm_block)[pdm_offset + i],
&((uint16_t *) pcm_block)[j + i],
pdm_filter->MaxVolume,
&pdm_filter[i]);
break;
default:
return -EINVAL;
}
}
}
return 0;
}
static const struct _dmic_ops mpxxdtyy_driver_api = {
#if DT_ANY_INST_ON_BUS_STATUS_OKAY(i2s)
.configure = mpxxdtyy_i2s_configure,
.trigger = mpxxdtyy_i2s_trigger,
.read = mpxxdtyy_i2s_read,
#endif /* DT_ANY_INST_ON_BUS_STATUS_OKAY(i2s) */
};
static int mpxxdtyy_initialize(const struct device *dev)
{
const struct mpxxdtyy_config *config = dev->config;
struct mpxxdtyy_data *const data = dev->data;
if (!device_is_ready(config->comm_master)) {
return -ENODEV;
}
data->state = DMIC_STATE_INITIALIZED;
return 0;
}
static const struct mpxxdtyy_config mpxxdtyy_config = {
.comm_master = DEVICE_DT_GET(DT_INST_BUS(0)),
};
static struct mpxxdtyy_data mpxxdtyy_data;
DEVICE_DT_INST_DEFINE(0, mpxxdtyy_initialize, NULL, &mpxxdtyy_data,
&mpxxdtyy_config, POST_KERNEL,
CONFIG_AUDIO_DMIC_INIT_PRIORITY, &mpxxdtyy_driver_api);
``` | /content/code_sandbox/drivers/audio/mpxxdtyy.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,082 |
```unknown
config AUDIO_DMIC_MCUX
bool "DMIC driver for MCUX"
default y
depends on DT_HAS_NXP_DMIC_ENABLED
select DMA
help
Enable support for DMIC on NXP MCUX SoC's
if AUDIO_DMIC_MCUX
config DMIC_MCUX_DMA_BUFFERS
int "Number of buffers to reserve for DMIC DMA"
default 2
range 2 16
help
This determines how many buffers the driver should allocate and
reserve for the DMA engine. The memory slab used with the DMIC
API should provide at least one more buffer than this value, since
a buffer will always be in the RX queue.
config DMIC_MCUX_QUEUE_SIZE
int "Size of DMIC buffer queue"
default 8
help
This sets the size of the RX buffer queue for the DMIC. Up to this
many buffers may be queued by the DMIC once it is triggered, before
the application must read buffers to avoid data being dropped.
endif # AUDIO_DMIC_MCUX
``` | /content/code_sandbox/drivers/audio/Kconfig.dmic_mcux | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 225 |
```unknown
config AUDIO_DMIC_NRFX_PDM
bool "nRF PDM nrfx driver"
default y
depends on DT_HAS_NORDIC_NRF_PDM_ENABLED
select NRFX_PDM
select PINCTRL
help
Enable support for nrfx PDM driver for nRF MCU series.
``` | /content/code_sandbox/drivers/audio/Kconfig.dmic_pdm_nrfx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 67 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_AUDIO_TAS6422DAC_H_
#define ZEPHYR_DRIVERS_AUDIO_TAS6422DAC_H_
#include <zephyr/sys/util_macro.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Mode Control Register */
#define MODE_CTRL_ADDR 0x00
#define MODE_CTRL_RESET BIT(7)
#define MODE_CTRL_RESET_MASK BIT(7)
#define MODE_CTRL_PBTL_CH12 BIT(4)
#define MODE_CTRL_PBTL_CH12_MASK BIT(4)
#define MODE_CTRL_CH1_LO_MODE BIT(3)
#define MODE_CTRL_CH1_LO_MODE_MASK BIT(3)
#define MODE_CTRL_CH2_LO_MODE BIT(2)
#define MODE_CTRL_CH2_LO_MODE_MASK BIT(2)
/* Miscellaneous Control 1 Register */
#define MISC_CTRL_1_ADDR 0x01
#define MISC_CTRL_1_HPF_BYPASS BIT(7)
#define MISC_CTRL_1_HPF_BYPASS_MASK BIT(7)
#define MISC_CTRL_1_OTW_CONTROL_MASK (BIT_MASK(2) << 5)
#define MISC_CTRL_1_OTW_CONTROL(val) (((val) << 5) & MISC_CTRL_1_OTW_CONTROL_MASK)
#define MISC_CTRL_1_OTW_CONTROL_140_DEGREE 0
#define MISC_CTRL_1_OTW_CONTROL_130_DEGREE 1
#define MISC_CTRL_1_OTW_CONTROL_120_DEGREE 2
#define MISC_CTRL_1_OTW_CONTROL_110_DEGREE 3
#define MISC_CTRL_1_OC_CONTROL BIT(4)
#define MISC_CTRL_1_OC_CONTROL_MASK BIT(4)
#define MISC_CTRL_1_VOLUME_RATE_MASK (BIT_MASK(2) << 2)
#define MISC_CTRL_1_VOLUME_RATE(val) (((val) << 2) & MISC_CTRL_1_VOLUME_RATE_MASK)
#define MISC_CTRL_1_VOLUME_RATE_1_STEP_EVERY_1_FSYNC 0
#define MISC_CTRL_1_VOLUME_RATE_1_STEP_EVERY_2_FSYNC 1
#define MISC_CTRL_1_VOLUME_RATE_1_STEP_EVERY_4_FSYNC 2
#define MISC_CTRL_1_VOLUME_RATE_1_STEP_EVERY_8_FSYNC 3
#define MISC_CTRL_1_GAIN_MASK BIT_MASK(2)
#define MISC_CTRL_1_GAIN(val) ((val) & MISC_CTRL_1_GAIN_MASK)
#define MISC_CTRL_1_GAIN_7_5_V_PEAK_OUTPUT 0
#define MISC_CTRL_1_GAIN_15_V_PEAK_OUTPUT 1
#define MISC_CTRL_1_GAIN_21_V_PEAK_OUTPUT 2
#define MISC_CTRL_1_GAIN_29_V_PEAK_OUTPUT 3
/* Miscellaneous Control 2 Register */
#define MISC_CTRL_2_ADDR 0x02
#define MISC_CTRL_2_PWM_FREQUENCY_MASK (BIT_MASK(3) << 4)
#define MISC_CTRL_2_PWM_FREQUENCY(val) (((val) << 4) & MISC_CTRL_2_PWM_FREQUENCY_MASK)
#define MISC_CTRL_2_PWM_FREQUENCY_8_FS 0
#define MISC_CTRL_2_PWM_FREQUENCY_10_FS 1
#define MISC_CTRL_2_PWM_FREQUENCY_38_FS 5
#define MISC_CTRL_2_PWM_FREQUENCY_44_FS 6
#define MISC_CTRL_2_PWM_FREQUENCY_48_FS 7
#define MISC_CTRL_2_SDM_OSR BIT(2)
#define MISC_CTRL_2_SDM_OSR_MASK BIT(2)
#define MISC_CTRL_2_OUTPUT_PHASE_MASK BIT_MASK(2)
#define MISC_CTRL_2_OUTPUT_PHASE(val) ((val) & MISC_CTRL_2_OUTPUT_PHASE_MASK)
#define MISC_CTRL_2_OUTPUT_PHASE_210_DEGREES 1
#define MISC_CTRL_2_OUTPUT_PHASE_225_DEGREES 2
#define MISC_CTRL_2_OUTPUT_PHASE_240_DEGREES 3
/* Serial Audio-Port Control Register */
#define SAP_CTRL_ADDR 0x03
#define SAP_CTRL_INPUT_SAMPLING_RATE_MASK (BIT_MASK(2) << 6)
#define SAP_CTRL_INPUT_SAMPLING_RATE(val) (((val) << 6) & SAP_CTRL_INPUT_SAMPLING_RATE_MASK)
#define SAP_CTRL_INPUT_SAMPLING_RATE_44_1_KHZ 0
#define SAP_CTRL_INPUT_SAMPLING_RATE_48_KHZ 1
#define SAP_CTRL_INPUT_SAMPLING_RATE_96_KHZ 2
#define SAP_CTRL_TDM_SLOT_SELECT BIT(5)
#define SAP_CTRL_TDM_SLOT_SELECT_MASK BIT(5)
#define SAP_CTRL_TDM_SLOT_SIZE BIT(4)
#define SAP_CTRL_TDM_SLOT_SIZE_MASK BIT(4)
#define SAP_CTRL_TDM_SLOT_SELECT_2 BIT(3)
#define SAP_CTRL_TDM_SLOT_SELECT_2_MASK BIT(3)
#define SAP_CTRL_INPUT_FORMAT_MASK BIT_MASK(3)
#define SAP_CTRL_INPUT_FORMAT(val) ((val) & SAP_CTRL_INPUT_FORMAT_MASK)
#define SAP_CTRL_INPUT_FORMAT_24_BITS_RIGHT 0
#define SAP_CTRL_INPUT_FORMAT_20_BITS_RIGHT 1
#define SAP_CTRL_INPUT_FORMAT_18_BITS_RIGHT 2
#define SAP_CTRL_INPUT_FORMAT_16_BITS_RIGHT 3
#define SAP_CTRL_INPUT_FORMAT_I2S 4
#define SAP_CTRL_INPUT_FORMAT_LEFT 5
#define SAP_CTRL_INPUT_FORMAT_DSP 6
/* Channel State Control Register */
#define CH_STATE_CTRL_ADDR 0x04
#define CH_STATE_CTRL_CH1_STATE_CTRL_MASK (BIT_MASK(2) << 6)
#define CH_STATE_CTRL_CH1_STATE_CTRL(val) (((val) << 6) & CH_STATE_CTRL_CH1_STATE_CTRL_MASK)
#define CH_STATE_CTRL_CH2_STATE_CTRL_MASK (BIT_MASK(2) << 4)
#define CH_STATE_CTRL_CH2_STATE_CTRL(val) (((val) << 4) & CH_STATE_CTRL_CH2_STATE_CTRL_MASK)
#define CH_STATE_CTRL_PLAY 0
#define CH_STATE_CTRL_HIZ 1
#define CH_STATE_CTRL_MUTE 2
#define CH_STATE_CTRL_DC_LOAD 3
/* Channel 1 and 2 Volume Control Registers */
#define CH1_VOLUME_CTRL_ADDR 0x05
#define CH2_VOLUME_CTRL_ADDR 0x06
#define CH_VOLUME_CTRL_VOLUME_MASK BIT_MASK(8)
#define CH_VOLUME_CTRL_VOLUME(val) ((val) & CH_VOLUME_CTRL_VOLUME_MASK)
/* DC Load Diagnostic Control 1 Register */
#define DC_LDG_CTRL_1_ADDR 0x09
#define DC_LDG_CTRL_1_ABORT BIT(7)
#define DC_LDG_CTRL_1_ABORT_MASK BIT(7)
#define DC_LDG_CTRL_1_DOUBLE_RAMP BIT(6)
#define DC_LDG_CTRL_1_DOUBLE_RAMP_MASK BIT(6)
#define DC_LDG_CTRL_1_DOUBLE_SETTLE BIT(5)
#define DC_LDG_CTRL_1_DOUBLE_SETTLE_MASK BIT(5)
#define DC_LDG_CTRL_1_LO_ENABLE BIT(1)
#define DC_LDG_CTRL_1_LO_ENABLE_MASK BIT(1)
#define DC_LDG_CTRL_1_BYPASS BIT(0)
#define DC_LDG_CTRL_1_BYPASS_MASK BIT(0)
/* DC Load Diagnostic Control 2 Register */
#define DC_LDG_CTRL_2_ADDR 0x0A
#define DC_LDG_CTRL_2_CH1_SL_MASK (BIT_MASK(4) << 4)
#define DC_LDG_CTRL_2_CH1_SL(val) (((val) << 4) & DC_LDG_CTRL_2_CH1_SL_MASK)
#define DC_LDG_CTRL_2_CH2_SL_MASK BIT_MASK(4)
#define DC_LDG_CTRL_2_CH2_SL(val) ((val) & DC_LDG_CTRL_2_CH2_SL_MASK)
/* DC Load Diagnostics Report 1 */
#define DC_LDG_REPORT_1_ADDR 0x0C
#define DC_LDG_REPORT_1_CH1_S2G BIT(7)
#define DC_LDG_REPORT_1_CH1_S2G_MASK BIT(7)
#define DC_LDG_REPORT_1_CH1_S2P BIT(6)
#define DC_LDG_REPORT_1_CH1_S2P_MASK BIT(6)
#define DC_LDG_REPORT_1_CH1_OL BIT(5)
#define DC_LDG_REPORT_1_CH1_OL_MASK BIT(5)
#define DC_LDG_REPORT_1_CH1_SL BIT(4)
#define DC_LDG_REPORT_1_CH1_SL_MASK BIT(4)
#define DC_LDG_REPORT_1_CH2_S2G BIT(3)
#define DC_LDG_REPORT_1_CH2_S2G_MASK BIT(3)
#define DC_LDG_REPORT_1_CH2_S2P BIT(2)
#define DC_LDG_REPORT_1_CH2_S2P_MASK BIT(2)
#define DC_LDG_REPORT_1_CH2_OL BIT(1)
#define DC_LDG_REPORT_1_CH2_OL_MASK BIT(1)
#define DC_LDG_REPORT_1_CH2_SL BIT(0)
#define DC_LDG_REPORT_1_CH2_SL_MASK BIT(0)
/* DC Load Diagnostics Report 3 */
#define DC_LDG_REPORT_3_ADDR 0x0E
#define DC_LDG_REPORT_3_CH1_LO BIT(3)
#define DC_LDG_REPORT_3_CH1_LO_MASK BIT(3)
#define DC_LDG_REPORT_3_CH2_LO BIT(2)
#define DC_LDG_REPORT_3_CH2_LO_MASK BIT(2)
/* Channel Faults Register */
#define CH_FAULTS_ADDR 0x10
#define CH_FAULTS_CH1_OC BIT(7)
#define CH_FAULTS_CH1_OC_MASK BIT(7)
#define CH_FAULTS_CH2_OC BIT(6)
#define CH_FAULTS_CH2_OC_MASK BIT(6)
#define CH_FAULTS_CH1_DC BIT(3)
#define CH_FAULTS_CH1_DC_MASK BIT(3)
#define CH_FAULTS_CH2_DC BIT(2)
#define CH_FAULTS_CH2_DC_MASK BIT(2)
/* Global Faults 1 Register */
#define GLOBAL_FAULTS_1_ADDR 0x11
#define GLOBAL_FAULTS_1_INVALID_CLOCK BIT(4)
#define GLOBAL_FAULTS_1_INVALID_CLOCK_MASK BIT(4)
#define GLOBAL_FAULTS_1_PVDD_OV BIT(3)
#define GLOBAL_FAULTS_1_PVDD_OV_MASK BIT(3)
#define GLOBAL_FAULTS_1_VBAT_OV BIT(2)
#define GLOBAL_FAULTS_1_VBAT_OV_MASK BIT(2)
#define GLOBAL_FAULTS_1_PVDD_UV BIT(1)
#define GLOBAL_FAULTS_1_PVDD_UV_MASK BIT(1)
#define GLOBAL_FAULTS_1_VBAT_UV BIT(0)
#define GLOBAL_FAULTS_1_VBAT_UV_MASK BIT(0)
/* Global Faults 2 Register */
#define GLOBAL_FAULTS_2_ADDR 0x12
#define GLOBAL_FAULTS_2_OTSD BIT(4)
#define GLOBAL_FAULTS_2_OTSD_MASK BIT(4)
#define GLOBAL_FAULTS_2_CH1_OTSD BIT(3)
#define GLOBAL_FAULTS_2_CH1_OTSD_MASK BIT(3)
#define GLOBAL_FAULTS_2_CH2_OTSD BIT(2)
#define GLOBAL_FAULTS_2_CH2_OTSD_MASK BIT(2)
/* Warnings Register */
#define WARNINGS_ADDR 0x13
#define WARNINGS_VDD_POR BIT(5)
#define WARNINGS_VDD_POR_MASK BIT(5)
#define WARNINGS_OTW BIT(4)
#define WARNINGS_OTW_MASK BIT(4)
#define WARNINGS_OTW_CH1 BIT(3)
#define WARNINGS_OTW_CH1_MASK BIT(3)
#define WARNINGS_OTW_CH2 BIT(2)
#define WARNINGS_OTW_CH2_MASK BIT(2)
/* Pin Control Register */
#define PIN_CTRL_ADDR 0x14
#define PIN_CTRL_MASK_OC BIT(7)
#define PIN_CTRL_MASK_OC_MASK BIT(7)
#define PIN_CTRL_MASK_OTSD BIT(6)
#define PIN_CTRL_MASK_OTSD_MASK BIT(6)
#define PIN_CTRL_MASK_UV BIT(5)
#define PIN_CTRL_MASK_UV_MASK BIT(5)
#define PIN_CTRL_MASK_OV BIT(4)
#define PIN_CTRL_MASK_OV_MASK BIT(4)
#define PIN_CTRL_MASK_DC BIT(3)
#define PIN_CTRL_MASK_DC_MASK BIT(3)
#define PIN_CTRL_MASK_ILIMIT BIT(2)
#define PIN_CTRL_MASK_ILIMIT_MASK BIT(2)
#define PIN_CTRL_MASK_CLIP BIT(1)
#define PIN_CTRL_MASK_CLIP_MASK BIT(1)
#define PIN_CTRL_MASK_OTW BIT(0)
#define PIN_CTRL_MASK_OTW_MASK BIT(0)
/* Miscellaneous Control 3 Register */
#define MISC_CTRL_3_ADDR 0x21
#define MISC_CTRL_3_CLEAR_FAULT BIT(7)
#define MISC_CTRL_3_CLEAR_FAULT_MASK BIT(7)
#define MISC_CTRL_3_PBTL_CH_SEL BIT(6)
#define MISC_CTRL_3_PBTL_CH_SEL_MASK BIT(6)
#define MISC_CTRL_3_MASK_ILIMIT BIT(5)
#define MISC_CTRL_3_MASK_ILIMIT_MASK BIT(5)
#define MISC_CTRL_3_OTSD_AUTO_RECOVERY BIT(3)
#define MISC_CTRL_3_OTSD_AUTO_RECOVERY_MASK BIT(3)
/* ILIMIT Status Register */
#define ILIMIT_STATUS_ADDR 0x25
#define ILIMIT_STATUS_CH2_ILIMIT_WARN BIT(1)
#define ILIMIT_STATUS_CH2_ILIMIT_WARN_MASK BIT(1)
#define ILIMIT_STATUS_CH1_ILIMIT_WARN BIT(0)
#define ILIMIT_STATUS_CH1_ILIMIT_WARN_MASK BIT(0)
/* Miscellaneous Control 4 Register */
#define MISC_CTRL_4_ADDR 0x26
#define MISC_CTRL_4_HPF_CORNER_MASK BIT_MASK(3)
#define MISC_CTRL_4_HPF_CORNER(val) ((val) & MISC_CTRL_4_HPF_CORNER_MASK)
#define MISC_CTRL_4_HPF_CORNER_3_7_HZ 0
#define MISC_CTRL_4_HPF_CORNER_7_4_HZ 1
#define MISC_CTRL_4_HPF_CORNER_15_HZ 2
#define MISC_CTRL_4_HPF_CORNER_30_HZ 3
#define MISC_CTRL_4_HPF_CORNER_59_HZ 4
#define MISC_CTRL_4_HPF_CORNER_118_HZ 5
#define MISC_CTRL_4_HPF_CORNER_235_HZ 6
#define MISC_CTRL_4_HPF_CORNER_463_HZ 7
/* Miscellaneous Control 5 Register */
#define MISC_CTRL_5_ADDR 0x28
#define MISC_CTRL_5_SS_BW_SEL BIT(7)
#define MISC_CTRL_5_SS_BW_SEL_MASK BIT(7)
#define MISC_CTRL_5_SS_DIV2 BIT(6)
#define MISC_CTRL_5_SS_DIV2_MASK BIT(6)
#define MISC_CTRL_5_PHASE_SEL_MSB BIT(5)
#define MISC_CTRL_5_PHASE_SEL_MSB_MASK BIT(5)
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_AUDIO_TAS6422DAC_H_ */
``` | /content/code_sandbox/drivers/audio/tas6422dac.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,261 |
```c
/*
*
*/
#define DT_DRV_COMPAT ti_tas6422dac
#include <zephyr/device.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/audio/codec.h>
#include "tas6422dac.h"
#define LOG_LEVEL CONFIG_AUDIO_CODEC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(tas6422dac);
#define TAS6422DAC_MUTE_GPIO_SUPPORT DT_ANY_INST_HAS_PROP_STATUS_OKAY(mute_gpios)
#define CODEC_OUTPUT_VOLUME_MAX (24 * 2)
#define CODEC_OUTPUT_VOLUME_MIN (-100 * 2)
struct codec_driver_config {
struct i2c_dt_spec bus;
#if TAS6422DAC_MUTE_GPIO_SUPPORT
struct gpio_dt_spec mute_gpio;
#endif /* TAS6422DAC_MUTE_GPIO_SUPPORT */
};
struct codec_driver_data {
};
enum tas6422dac_channel_t {
TAS6422DAC_CHANNEL_1,
TAS6422DAC_CHANNEL_2,
TAS6422DAC_CHANNEL_ALL,
TAS6422DAC_CHANNEL_UNKNOWN,
};
static enum tas6422dac_channel_t audio_to_tas6422dac_channel[] = {
[AUDIO_CHANNEL_FRONT_LEFT] = TAS6422DAC_CHANNEL_1,
[AUDIO_CHANNEL_FRONT_RIGHT] = TAS6422DAC_CHANNEL_2,
[AUDIO_CHANNEL_LFE] = TAS6422DAC_CHANNEL_UNKNOWN,
[AUDIO_CHANNEL_FRONT_CENTER] = TAS6422DAC_CHANNEL_UNKNOWN,
[AUDIO_CHANNEL_REAR_LEFT] = TAS6422DAC_CHANNEL_1,
[AUDIO_CHANNEL_REAR_RIGHT] = TAS6422DAC_CHANNEL_2,
[AUDIO_CHANNEL_REAR_CENTER] = TAS6422DAC_CHANNEL_UNKNOWN,
[AUDIO_CHANNEL_SIDE_LEFT] = TAS6422DAC_CHANNEL_1,
[AUDIO_CHANNEL_SIDE_RIGHT] = TAS6422DAC_CHANNEL_2,
[AUDIO_CHANNEL_ALL] = TAS6422DAC_CHANNEL_ALL,
};
static void codec_mute_output(const struct device *dev, enum tas6422dac_channel_t channel);
static void codec_unmute_output(const struct device *dev, enum tas6422dac_channel_t channel);
static void codec_write_reg(const struct device *dev, uint8_t reg, uint8_t val);
static void codec_read_reg(const struct device *dev, uint8_t reg, uint8_t *val);
static void codec_soft_reset(const struct device *dev);
static int codec_configure_dai(const struct device *dev, audio_dai_cfg_t *cfg);
static void codec_configure_output(const struct device *dev);
static int codec_set_output_volume(const struct device *dev, enum tas6422dac_channel_t channel,
int vol);
#if (LOG_LEVEL >= LOG_LEVEL_DEBUG)
static void codec_read_all_regs(const struct device *dev);
#define CODEC_DUMP_REGS(dev) codec_read_all_regs((dev))
#else
#define CODEC_DUMP_REGS(dev)
#endif
static int codec_initialize(const struct device *dev)
{
const struct codec_driver_config *const dev_cfg = dev->config;
if (!device_is_ready(dev_cfg->bus.bus)) {
LOG_ERR("I2C device not ready");
return -ENODEV;
}
#if TAS6422DAC_MUTE_GPIO_SUPPORT
if (!device_is_ready(dev_cfg->mute_gpio.port)) {
LOG_ERR("GPIO device not ready");
return -ENODEV;
}
#endif /* TAS6422DAC_MUTE_GPIO_SUPPORT */
return 0;
}
static int codec_configure(const struct device *dev, struct audio_codec_cfg *cfg)
{
int ret;
if (cfg->dai_type != AUDIO_DAI_TYPE_I2S) {
LOG_ERR("dai_type must be AUDIO_DAI_TYPE_I2S");
return -EINVAL;
}
codec_soft_reset(dev);
ret = codec_configure_dai(dev, &cfg->dai_cfg);
codec_configure_output(dev);
return ret;
}
static void codec_start_output(const struct device *dev)
{
codec_unmute_output(dev, TAS6422DAC_CHANNEL_ALL);
CODEC_DUMP_REGS(dev);
}
static void codec_stop_output(const struct device *dev)
{
codec_mute_output(dev, TAS6422DAC_CHANNEL_ALL);
}
static void codec_mute_output(const struct device *dev, enum tas6422dac_channel_t channel)
{
uint8_t val;
#if TAS6422DAC_MUTE_GPIO_SUPPORT
const struct codec_driver_config *const dev_cfg = dev->config;
if (channel == TAS6422DAC_CHANNEL_ALL) {
gpio_pin_configure_dt(&dev_cfg->mute_gpio, GPIO_OUTPUT_ACTIVE);
}
#endif
codec_read_reg(dev, CH_STATE_CTRL_ADDR, &val);
switch (channel) {
case TAS6422DAC_CHANNEL_1:
val &= ~CH_STATE_CTRL_CH1_STATE_CTRL_MASK;
val |= CH_STATE_CTRL_CH1_STATE_CTRL(CH_STATE_CTRL_MUTE);
break;
case TAS6422DAC_CHANNEL_2:
val &= ~CH_STATE_CTRL_CH2_STATE_CTRL_MASK;
val |= CH_STATE_CTRL_CH2_STATE_CTRL(CH_STATE_CTRL_MUTE);
break;
case TAS6422DAC_CHANNEL_ALL:
val &= ~(CH_STATE_CTRL_CH1_STATE_CTRL_MASK | CH_STATE_CTRL_CH2_STATE_CTRL_MASK);
val |= CH_STATE_CTRL_CH1_STATE_CTRL(CH_STATE_CTRL_MUTE) |
CH_STATE_CTRL_CH2_STATE_CTRL(CH_STATE_CTRL_MUTE);
break;
case TAS6422DAC_CHANNEL_UNKNOWN:
default:
LOG_ERR("Invalid codec channel %u", channel);
return;
}
codec_write_reg(dev, CH_STATE_CTRL_ADDR, val);
}
static void codec_unmute_output(const struct device *dev, enum tas6422dac_channel_t channel)
{
uint8_t val;
#if TAS6422DAC_MUTE_GPIO_SUPPORT
const struct codec_driver_config *const dev_cfg = dev->config;
gpio_pin_configure_dt(&dev_cfg->mute_gpio, GPIO_OUTPUT_INACTIVE);
#endif
codec_read_reg(dev, CH_STATE_CTRL_ADDR, &val);
switch (channel) {
case TAS6422DAC_CHANNEL_1:
val &= ~CH_STATE_CTRL_CH1_STATE_CTRL_MASK;
val |= CH_STATE_CTRL_CH1_STATE_CTRL(CH_STATE_CTRL_PLAY);
break;
case TAS6422DAC_CHANNEL_2:
val &= ~CH_STATE_CTRL_CH2_STATE_CTRL_MASK;
val |= CH_STATE_CTRL_CH2_STATE_CTRL(CH_STATE_CTRL_PLAY);
break;
case TAS6422DAC_CHANNEL_ALL:
val &= ~(CH_STATE_CTRL_CH1_STATE_CTRL_MASK | CH_STATE_CTRL_CH2_STATE_CTRL_MASK);
val |= CH_STATE_CTRL_CH1_STATE_CTRL(CH_STATE_CTRL_PLAY) |
CH_STATE_CTRL_CH2_STATE_CTRL(CH_STATE_CTRL_PLAY);
break;
case TAS6422DAC_CHANNEL_UNKNOWN:
default:
LOG_ERR("Invalid codec channel %u", channel);
return;
}
codec_write_reg(dev, CH_STATE_CTRL_ADDR, val);
}
static int codec_set_property(const struct device *dev, audio_property_t property,
audio_channel_t channel, audio_property_value_t val)
{
enum tas6422dac_channel_t codec_channel = audio_to_tas6422dac_channel[channel];
if (codec_channel == TAS6422DAC_CHANNEL_UNKNOWN) {
LOG_ERR("Invalid channel %u", channel);
return -EINVAL;
}
switch (property) {
case AUDIO_PROPERTY_OUTPUT_VOLUME:
return codec_set_output_volume(dev, codec_channel, val.vol);
case AUDIO_PROPERTY_OUTPUT_MUTE:
if (val.mute) {
codec_mute_output(dev, codec_channel);
} else {
codec_unmute_output(dev, codec_channel);
}
return 0;
default:
break;
}
return -EINVAL;
}
static int codec_apply_properties(const struct device *dev)
{
/* nothing to do because there is nothing cached */
return 0;
}
static void codec_write_reg(const struct device *dev, uint8_t reg, uint8_t val)
{
const struct codec_driver_config *const dev_cfg = dev->config;
i2c_reg_write_byte_dt(&dev_cfg->bus, reg, val);
LOG_DBG("%s WR REG:0x%02x VAL:0x%02x", dev->name, reg, val);
}
static void codec_read_reg(const struct device *dev, uint8_t reg, uint8_t *val)
{
const struct codec_driver_config *const dev_cfg = dev->config;
i2c_reg_read_byte_dt(&dev_cfg->bus, reg, val);
LOG_DBG("%s RD REG:0x%02x VAL:0x%02x", dev->name, reg, *val);
}
static void codec_soft_reset(const struct device *dev)
{
uint8_t val;
codec_read_reg(dev, MODE_CTRL_ADDR, &val);
val |= MODE_CTRL_RESET;
codec_write_reg(dev, MODE_CTRL_ADDR, val);
}
static int codec_configure_dai(const struct device *dev, audio_dai_cfg_t *cfg)
{
uint8_t val;
codec_read_reg(dev, SAP_CTRL_ADDR, &val);
/* I2S mode */
val &= ~SAP_CTRL_INPUT_FORMAT_MASK;
val |= SAP_CTRL_INPUT_FORMAT(SAP_CTRL_INPUT_FORMAT_I2S);
/* Input sampling rate */
val &= ~SAP_CTRL_INPUT_SAMPLING_RATE_MASK;
switch (cfg->i2s.frame_clk_freq) {
case AUDIO_PCM_RATE_44P1K:
val |= SAP_CTRL_INPUT_SAMPLING_RATE(SAP_CTRL_INPUT_SAMPLING_RATE_44_1_KHZ);
break;
case AUDIO_PCM_RATE_48K:
val |= SAP_CTRL_INPUT_SAMPLING_RATE(SAP_CTRL_INPUT_SAMPLING_RATE_48_KHZ);
break;
case AUDIO_PCM_RATE_96K:
val |= SAP_CTRL_INPUT_SAMPLING_RATE(SAP_CTRL_INPUT_SAMPLING_RATE_96_KHZ);
break;
default:
LOG_ERR("Invalid sampling rate %zu", cfg->i2s.frame_clk_freq);
return -EINVAL;
}
codec_write_reg(dev, SAP_CTRL_ADDR, val);
return 0;
}
static void codec_configure_output(const struct device *dev)
{
uint8_t val;
/* Overcurrent level = 1 */
codec_read_reg(dev, MISC_CTRL_1_ADDR, &val);
val &= ~MISC_CTRL_1_OC_CONTROL_MASK;
codec_write_reg(dev, MISC_CTRL_1_ADDR, val);
/*
* PWM frequency = 10 fs
* Reduce PWM frequency to prevent component overtemperature
*/
codec_read_reg(dev, MISC_CTRL_2_ADDR, &val);
val &= ~MISC_CTRL_2_PWM_FREQUENCY_MASK;
val |= MISC_CTRL_2_PWM_FREQUENCY(MISC_CTRL_2_PWM_FREQUENCY_10_FS);
codec_write_reg(dev, MISC_CTRL_2_ADDR, val);
}
static int codec_set_output_volume(const struct device *dev, enum tas6422dac_channel_t channel,
int vol)
{
uint8_t vol_val;
if ((vol > CODEC_OUTPUT_VOLUME_MAX) || (vol < CODEC_OUTPUT_VOLUME_MIN)) {
LOG_ERR("Invalid volume %d.%d dB", vol >> 1, ((uint32_t)vol & 1) ? 5 : 0);
return -EINVAL;
}
vol_val = vol + 0xcf;
switch (channel) {
case TAS6422DAC_CHANNEL_1:
codec_write_reg(dev, CH1_VOLUME_CTRL_ADDR, CH_VOLUME_CTRL_VOLUME(vol_val));
break;
case TAS6422DAC_CHANNEL_2:
codec_write_reg(dev, CH2_VOLUME_CTRL_ADDR, CH_VOLUME_CTRL_VOLUME(vol_val));
break;
case TAS6422DAC_CHANNEL_ALL:
codec_write_reg(dev, CH1_VOLUME_CTRL_ADDR, CH_VOLUME_CTRL_VOLUME(vol_val));
codec_write_reg(dev, CH2_VOLUME_CTRL_ADDR, CH_VOLUME_CTRL_VOLUME(vol_val));
break;
case TAS6422DAC_CHANNEL_UNKNOWN:
default:
LOG_ERR("Invalid codec channel %u", channel);
return -EINVAL;
}
return 0;
}
#if (LOG_LEVEL >= LOG_LEVEL_DEBUG)
static void codec_read_all_regs(const struct device *dev)
{
uint8_t val;
codec_read_reg(dev, MODE_CTRL_ADDR, &val);
codec_read_reg(dev, MISC_CTRL_1_ADDR, &val);
codec_read_reg(dev, MISC_CTRL_2_ADDR, &val);
codec_read_reg(dev, SAP_CTRL_ADDR, &val);
codec_read_reg(dev, CH_STATE_CTRL_ADDR, &val);
codec_read_reg(dev, CH1_VOLUME_CTRL_ADDR, &val);
codec_read_reg(dev, CH2_VOLUME_CTRL_ADDR, &val);
codec_read_reg(dev, DC_LDG_CTRL_1_ADDR, &val);
codec_read_reg(dev, DC_LDG_CTRL_2_ADDR, &val);
codec_read_reg(dev, DC_LDG_REPORT_1_ADDR, &val);
codec_read_reg(dev, DC_LDG_REPORT_3_ADDR, &val);
codec_read_reg(dev, CH_FAULTS_ADDR, &val);
codec_read_reg(dev, GLOBAL_FAULTS_1_ADDR, &val);
codec_read_reg(dev, GLOBAL_FAULTS_2_ADDR, &val);
codec_read_reg(dev, WARNINGS_ADDR, &val);
codec_read_reg(dev, PIN_CTRL_ADDR, &val);
codec_read_reg(dev, MISC_CTRL_3_ADDR, &val);
codec_read_reg(dev, ILIMIT_STATUS_ADDR, &val);
codec_read_reg(dev, MISC_CTRL_4_ADDR, &val);
codec_read_reg(dev, MISC_CTRL_5_ADDR, &val);
}
#endif
static const struct audio_codec_api codec_driver_api = {
.configure = codec_configure,
.start_output = codec_start_output,
.stop_output = codec_stop_output,
.set_property = codec_set_property,
.apply_properties = codec_apply_properties,
};
#if TAS6422DAC_MUTE_GPIO_SUPPORT
#define TAS6422DAC_MUTE_GPIO_INIT(n) .mute_gpio = GPIO_DT_SPEC_INST_GET(n, mute_gpios)
#else
#define TAS6422DAC_MUTE_GPIO_INIT(n)
#endif /* TAS6422DAC_MUTE_GPIO_SUPPORT */
#define TAS6422DAC_INIT(n) \
static struct codec_driver_data codec_device_data_##n; \
\
static struct codec_driver_config codec_device_config_##n = { \
.bus = I2C_DT_SPEC_INST_GET(n), TAS6422DAC_MUTE_GPIO_INIT(n)}; \
\
DEVICE_DT_INST_DEFINE(n, codec_initialize, NULL, &codec_device_data_##n, \
&codec_device_config_##n, POST_KERNEL, \
CONFIG_AUDIO_CODEC_INIT_PRIORITY, &codec_driver_api);
DT_INST_FOREACH_STATUS_OKAY(TAS6422DAC_INIT)
``` | /content/code_sandbox/drivers/audio/tas6422dac.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,070 |
```c
/*
*
*/
#define DT_DRV_COMPAT st_mpxxdtyy
#include "mpxxdtyy.h"
#include <zephyr/drivers/i2s.h>
#define LOG_LEVEL CONFIG_AUDIO_DMIC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(mpxxdtyy);
#if DT_ANY_INST_ON_BUS_STATUS_OKAY(i2s)
#define NUM_RX_BLOCKS 4
#define PDM_BLOCK_MAX_SIZE_BYTES 512
K_MEM_SLAB_DEFINE(rx_pdm_i2s_mslab, PDM_BLOCK_MAX_SIZE_BYTES, NUM_RX_BLOCKS, 1);
int mpxxdtyy_i2s_read(const struct device *dev, uint8_t stream, void **buffer,
size_t *size, int32_t timeout)
{
int ret;
const struct mpxxdtyy_config *config = dev->config;
struct mpxxdtyy_data *const data = dev->data;
void *pdm_block, *pcm_block;
size_t pdm_size;
TPDMFilter_InitStruct *pdm_filter = &data->pdm_filter[0];
ret = i2s_read(config->comm_master, &pdm_block, &pdm_size);
if (ret != 0) {
LOG_ERR("read failed (%d)", ret);
return ret;
}
ret = k_mem_slab_alloc(data->pcm_mem_slab,
&pcm_block, K_NO_WAIT);
if (ret < 0) {
return ret;
}
sw_filter_lib_run(pdm_filter, pdm_block, pcm_block, pdm_size,
data->pcm_mem_size);
k_mem_slab_free(&rx_pdm_i2s_mslab, pdm_block);
*buffer = pcm_block;
*size = data->pcm_mem_size;
return 0;
}
int mpxxdtyy_i2s_trigger(const struct device *dev, enum dmic_trigger cmd)
{
int ret;
const struct mpxxdtyy_config *config = dev->config;
struct mpxxdtyy_data *const data = dev->data;
enum i2s_trigger_cmd i2s_cmd;
enum dmic_state tmp_state;
switch (cmd) {
case DMIC_TRIGGER_START:
if (data->state == DMIC_STATE_CONFIGURED) {
tmp_state = DMIC_STATE_ACTIVE;
i2s_cmd = I2S_TRIGGER_START;
} else {
return 0;
}
break;
case DMIC_TRIGGER_STOP:
if (data->state == DMIC_STATE_ACTIVE) {
tmp_state = DMIC_STATE_CONFIGURED;
i2s_cmd = I2S_TRIGGER_STOP;
} else {
return 0;
}
break;
default:
return -EINVAL;
}
ret = i2s_trigger(config->comm_master, I2S_DIR_RX, i2s_cmd);
if (ret != 0) {
LOG_ERR("trigger failed with %d error", ret);
return ret;
}
data->state = tmp_state;
return 0;
}
int mpxxdtyy_i2s_configure(const struct device *dev, struct dmic_cfg *cfg)
{
int ret;
const struct mpxxdtyy_config *config = dev->config;
struct mpxxdtyy_data *const data = dev->data;
uint8_t chan_size = cfg->streams->pcm_width;
uint32_t audio_freq = cfg->streams->pcm_rate;
uint16_t factor;
/* PCM buffer size */
data->pcm_mem_slab = cfg->streams->mem_slab;
data->pcm_mem_size = cfg->streams->block_size;
/* check requested min pdm frequency */
if (cfg->io.min_pdm_clk_freq < MPXXDTYY_MIN_PDM_FREQ ||
cfg->io.min_pdm_clk_freq > cfg->io.max_pdm_clk_freq) {
return -EINVAL;
}
/* check requested max pdm frequency */
if (cfg->io.max_pdm_clk_freq > MPXXDTYY_MAX_PDM_FREQ ||
cfg->io.max_pdm_clk_freq < cfg->io.min_pdm_clk_freq) {
return -EINVAL;
}
factor = sw_filter_lib_init(dev, cfg);
if (factor == 0U) {
return -EINVAL;
}
/* configure I2S channels */
struct i2s_config i2s_cfg;
i2s_cfg.word_size = chan_size;
i2s_cfg.channels = cfg->channel.req_num_chan;
i2s_cfg.format = I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED |
I2S_FMT_BIT_CLK_INV;
i2s_cfg.options = I2S_OPT_FRAME_CLK_MASTER | I2S_OPT_BIT_CLK_MASTER;
i2s_cfg.frame_clk_freq = audio_freq * factor / chan_size;
i2s_cfg.block_size = data->pcm_mem_size * (factor / chan_size);
i2s_cfg.mem_slab = &rx_pdm_i2s_mslab;
i2s_cfg.timeout = 2000;
ret = i2s_configure(config->comm_master, I2S_DIR_RX, &i2s_cfg);
if (ret != 0) {
LOG_ERR("I2S device configuration error");
return ret;
}
data->state = DMIC_STATE_CONFIGURED;
return 0;
}
#endif /* DT_ANY_INST_ON_BUS_STATUS_OKAY(i2s) */
``` | /content/code_sandbox/drivers/audio/mpxxdtyy-i2s.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,133 |
```c
/*
*
* based on dmic_nrfx_pdm.c
*
*/
#include <zephyr/drivers/dma.h>
#include <zephyr/audio/dmic.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <zephyr/drivers/clock_control.h>
#include <soc.h>
#include <fsl_dmic.h>
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(dmic_mcux, CONFIG_AUDIO_DMIC_LOG_LEVEL);
#define DT_DRV_COMPAT nxp_dmic
struct mcux_dmic_pdm_chan {
dmic_channel_config_t dmic_channel_cfg;
const struct device *dma;
uint8_t dma_chan;
};
struct mcux_dmic_drv_data {
struct k_mem_slab *mem_slab;
void *dma_bufs[CONFIG_DMIC_MCUX_DMA_BUFFERS];
uint8_t active_buf_idx;
uint32_t block_size;
DMIC_Type *base_address;
struct mcux_dmic_pdm_chan **pdm_channels;
uint8_t act_num_chan;
struct k_msgq *rx_queue;
uint32_t chan_map_lo;
uint32_t chan_map_hi;
enum dmic_state dmic_state;
};
struct mcux_dmic_cfg {
const struct pinctrl_dev_config *pcfg;
const struct device *clock_dev;
clock_control_subsys_t clock_name;
bool use2fs;
};
static int dmic_mcux_get_osr(uint32_t pcm_rate, uint32_t bit_clk, bool use_2fs)
{
uint32_t use2fs_div = use_2fs ? 1 : 2;
/* Note that the below calculation assumes the following:
* - DMIC DIVHFCLK is set to 0x0 (divide by 1)
* - DMIC PHY_HALF is set to 0x0 (standard sample rate)
*/
return (uint32_t)(bit_clk / (2 * pcm_rate * use2fs_div));
}
/* Gets hardware channel index from logical channel */
static uint8_t dmic_mcux_hw_chan(struct mcux_dmic_drv_data *drv_data,
uint8_t log_chan)
{
enum pdm_lr lr;
uint8_t hw_chan;
/* This function assigns hardware channel "n" to the left channel,
* and hardware channel "n+1" to the right channel. This choice is
* arbitrary, but must be followed throughout the driver.
*/
dmic_parse_channel_map(drv_data->chan_map_lo,
drv_data->chan_map_hi,
log_chan, &hw_chan, &lr);
if (lr == PDM_CHAN_LEFT) {
return hw_chan * 2;
} else {
return (hw_chan * 2) + 1;
}
}
static void dmic_mcux_activate_channels(struct mcux_dmic_drv_data *drv_data,
bool enable)
{
/* PDM channel 0 must always be enabled, as the RM states:
* "In order to output 8 channels of PDM Data, PDM_CLK01 must be used"
* therefore, even if we don't intend to capture PDM data from the
* channel 0 FIFO, we still enable the channel so the clock is active.
*/
uint32_t mask = 0x1;
for (uint8_t chan = 0; chan < drv_data->act_num_chan; chan++) {
/* Set bitmask of hw channel to enable */
mask |= BIT(dmic_mcux_hw_chan(drv_data, chan));
}
if (enable) {
DMIC_EnableChannnel(drv_data->base_address, mask);
} else {
/* No function to disable channels, we must bypass HAL here */
drv_data->base_address->CHANEN &= ~mask;
}
}
static int dmic_mcux_enable_dma(struct mcux_dmic_drv_data *drv_data, bool enable)
{
struct mcux_dmic_pdm_chan *pdm_channel;
uint8_t num_chan = drv_data->act_num_chan;
uint8_t hw_chan;
int ret = 0;
for (uint8_t chan = 0; chan < num_chan; chan++) {
/* Parse the channel map data */
hw_chan = dmic_mcux_hw_chan(drv_data, chan);
pdm_channel = drv_data->pdm_channels[hw_chan];
if (enable) {
ret = dma_start(pdm_channel->dma, pdm_channel->dma_chan);
if (ret < 0) {
LOG_ERR("Could not start DMA for HW channel %d",
hw_chan);
return ret;
}
} else {
if (dma_stop(pdm_channel->dma, pdm_channel->dma_chan)) {
ret = -EIO;
}
}
DMIC_EnableChannelDma(drv_data->base_address,
(dmic_channel_t)hw_chan, enable);
}
return ret;
}
/* Helper to reload DMA engine for all active channels with new buffer */
static void dmic_mcux_reload_dma(struct mcux_dmic_drv_data *drv_data,
void *buffer)
{
int ret;
uint8_t hw_chan;
struct mcux_dmic_pdm_chan *pdm_channel;
uint8_t num_chan = drv_data->act_num_chan;
uint32_t dma_buf_size = drv_data->block_size / num_chan;
uint32_t src, dst;
/* This function reloads the DMA engine for all active DMA channels
* with the provided buffer. Each DMA channel will start
* at a different initial address to interleave channel data.
*/
for (uint8_t chan = 0; chan < num_chan; chan++) {
/* Parse the channel map data */
hw_chan = dmic_mcux_hw_chan(drv_data, chan);
pdm_channel = drv_data->pdm_channels[hw_chan];
src = DMIC_FifoGetAddress(drv_data->base_address, hw_chan);
dst = (uint32_t)(((uint16_t *)buffer) + chan);
ret = dma_reload(pdm_channel->dma, pdm_channel->dma_chan,
src, dst, dma_buf_size);
if (ret < 0) {
LOG_ERR("Could not reload DMIC HW channel %d", hw_chan);
return;
}
}
}
/* Helper to get next buffer index for DMA */
static uint8_t dmic_mcux_next_buf_idx(uint8_t current_idx)
{
if ((current_idx + 1) == CONFIG_DMIC_MCUX_DMA_BUFFERS) {
return 0;
}
return current_idx + 1;
}
static int dmic_mcux_stop(struct mcux_dmic_drv_data *drv_data)
{
/* Disable active channels */
dmic_mcux_activate_channels(drv_data, false);
/* Disable DMA */
dmic_mcux_enable_dma(drv_data, false);
/* Free all memory slabs */
for (uint32_t i = 0; i < CONFIG_DMIC_MCUX_DMA_BUFFERS; i++) {
k_mem_slab_free(drv_data->mem_slab, drv_data->dma_bufs[i]);
}
/* Purge the RX queue as well. */
k_msgq_purge(drv_data->rx_queue);
drv_data->dmic_state = DMIC_STATE_CONFIGURED;
return 0;
}
static void dmic_mcux_dma_cb(const struct device *dev, void *user_data,
uint32_t channel, int status)
{
struct mcux_dmic_drv_data *drv_data = (struct mcux_dmic_drv_data *)user_data;
int ret;
void *done_buffer = drv_data->dma_bufs[drv_data->active_buf_idx];
void *new_buffer;
LOG_DBG("CB: channel is %u", channel);
if (status < 0) {
/* DMA has failed, free allocated blocks */
LOG_ERR("DMA reports error");
dmic_mcux_enable_dma(drv_data, false);
dmic_mcux_activate_channels(drv_data, false);
/* Free all allocated DMA buffers */
dmic_mcux_stop(drv_data);
drv_data->dmic_state = DMIC_STATE_ERROR;
return;
}
/* Before we queue the current buffer, make sure we can allocate
* another one to replace it.
*/
ret = k_mem_slab_alloc(drv_data->mem_slab, &new_buffer, K_NO_WAIT);
if (ret < 0) {
/* We can't allocate a new buffer to replace the current
* one, so we cannot release the current buffer to the
* rx queue (or the DMA would stave). Therefore, we just
* leave the current buffer in place to be overwritten
* by the DMA.
*/
LOG_ERR("Could not allocate RX buffer. Dropping RX data");
drv_data->dmic_state = DMIC_STATE_ERROR;
/* Reload DMA */
dmic_mcux_reload_dma(drv_data, done_buffer);
/* Advance active buffer index */
drv_data->active_buf_idx =
dmic_mcux_next_buf_idx(drv_data->active_buf_idx);
return;
}
/* DMA issues an interrupt at the completion of every block.
* we should put the active buffer into the rx queue for the
* application to read. The application is responsible for
* freeing this buffer once it processes it.
*/
ret = k_msgq_put(drv_data->rx_queue, &done_buffer, K_NO_WAIT);
if (ret < 0) {
/* Free the newly allocated buffer, we won't need it. */
k_mem_slab_free(drv_data->mem_slab, new_buffer);
/* We cannot enqueue the current buffer, so we will drop
* the current buffer data and leave the current buffer
* in place to be overwritten by the DMA
*/
LOG_ERR("RX queue overflow, dropping RX buffer data");
drv_data->dmic_state = DMIC_STATE_ERROR;
/* Reload DMA */
dmic_mcux_reload_dma(drv_data, done_buffer);
/* Advance active buffer index */
drv_data->active_buf_idx =
dmic_mcux_next_buf_idx(drv_data->active_buf_idx);
return;
}
/* Previous buffer was enqueued, and new buffer is allocated.
* Replace pointer to previous buffer in our dma slots array,
* and reload DMA with next buffer.
*/
drv_data->dma_bufs[drv_data->active_buf_idx] = new_buffer;
dmic_mcux_reload_dma(drv_data, new_buffer);
/* Advance active buffer index */
drv_data->active_buf_idx = dmic_mcux_next_buf_idx(drv_data->active_buf_idx);
}
static int dmic_mcux_setup_dma(const struct device *dev)
{
struct mcux_dmic_drv_data *drv_data = dev->data;
struct mcux_dmic_pdm_chan *pdm_channel;
struct dma_block_config blk_cfg[CONFIG_DMIC_MCUX_DMA_BUFFERS] = {0};
struct dma_config dma_cfg = {0};
uint8_t num_chan = drv_data->act_num_chan;
uint32_t dma_buf_size = drv_data->block_size / num_chan;
uint8_t dma_buf_idx = 0;
void *dma_buf = drv_data->dma_bufs[dma_buf_idx];
uint8_t hw_chan;
int ret = 0;
/* Setup DMA configuration common between all channels */
dma_cfg.user_data = drv_data;
dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
dma_cfg.source_data_size = sizeof(uint16_t); /* Each sample is 16 bits */
dma_cfg.dest_data_size = sizeof(uint16_t);
dma_cfg.block_count = CONFIG_DMIC_MCUX_DMA_BUFFERS;
dma_cfg.head_block = &blk_cfg[0];
dma_cfg.complete_callback_en = 1; /* Callback at each block */
dma_cfg.dma_callback = dmic_mcux_dma_cb;
/* When multiple channels are enabled simultaneously, the DMA
* completion interrupt from one channel will signal that DMA data
* from multiple channels may be collected, provided the same
* amount of data was transferred. Therefore, we only enable the
* DMA completion callback for the first channel we setup
*/
for (uint8_t chan = 0; chan < num_chan; chan++) {
/* Parse the channel map data */
hw_chan = dmic_mcux_hw_chan(drv_data, chan);
/* Configure blocks for hw_chan */
for (uint32_t blk = 0; blk < CONFIG_DMIC_MCUX_DMA_BUFFERS; blk++) {
blk_cfg[blk].source_address =
DMIC_FifoGetAddress(drv_data->base_address, hw_chan);
/* We interleave samples within the output buffer
* based on channel map. So for a channel map like so:
* [pdm0_l, pdm0_r, pdm1_r, pdm1_l]
* the resulting DMA buffer would look like:
* [pdm0_l_s0, pdm0_r_s0, pdm1_r_s0, pdm1_l_s0,
* pdm0_l_s1, pdm0_r_s1, pdm1_r_s1, pdm1_l_s1, ...]
* Each sample is 16 bits wide.
*/
blk_cfg[blk].dest_address =
(uint32_t)(((uint16_t *)dma_buf) + chan);
blk_cfg[blk].dest_scatter_interval =
num_chan * sizeof(uint16_t);
blk_cfg[blk].dest_scatter_en = 1;
blk_cfg[blk].source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
blk_cfg[blk].dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
blk_cfg[blk].block_size = dma_buf_size;
/* Enable circular mode- when the final DMA block
* is exhausted, we want the DMA controller
* to restart with the first one.
*/
blk_cfg[blk].source_reload_en = 1;
blk_cfg[blk].dest_reload_en = 1;
if (blk < (CONFIG_DMIC_MCUX_DMA_BUFFERS - 1)) {
blk_cfg[blk].next_block = &blk_cfg[blk + 1];
} else {
/* Last block, enable circular reload */
blk_cfg[blk].next_block = NULL;
}
/* Select next dma buffer in array */
dma_buf_idx = dmic_mcux_next_buf_idx(dma_buf_idx);
dma_buf = drv_data->dma_bufs[dma_buf_idx];
}
pdm_channel = drv_data->pdm_channels[hw_chan];
/* Set configuration for hw_chan_0 */
ret = dma_config(pdm_channel->dma, pdm_channel->dma_chan, &dma_cfg);
if (ret < 0) {
LOG_ERR("Could not configure DMIC channel %d", hw_chan);
return ret;
}
/* First channel is configured. Do not install callbacks for
* other channels.
*/
dma_cfg.dma_callback = NULL;
}
return 0;
}
/* Initializes a DMIC hardware channel */
static int dmic_mcux_init_channel(const struct device *dev, uint32_t osr,
uint8_t chan, enum pdm_lr lr)
{
struct mcux_dmic_drv_data *drv_data = dev->data;
if (!drv_data->pdm_channels[chan]) {
/* Channel disabled at devicetree level */
return -EINVAL;
}
drv_data->pdm_channels[chan]->dmic_channel_cfg.osr = osr;
/* Configure channel settings */
DMIC_ConfigChannel(drv_data->base_address, (dmic_channel_t)chan,
lr == PDM_CHAN_LEFT ? kDMIC_Left : kDMIC_Right,
&drv_data->pdm_channels[chan]->dmic_channel_cfg);
/* Setup channel FIFO. We use maximum threshold to avoid triggering
* DMA too frequently
*/
DMIC_FifoChannel(drv_data->base_address, chan, 15, true, true);
/* Disable interrupts. DMA will be enabled in dmic_mcux_trigger. */
DMIC_EnableChannelInterrupt(drv_data->base_address, chan, false);
return 0;
}
static int mcux_dmic_init(const struct device *dev)
{
const struct mcux_dmic_cfg *config = dev->config;
struct mcux_dmic_drv_data *drv_data = dev->data;
int ret;
ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
return ret;
}
DMIC_Init(drv_data->base_address);
DMIC_Use2fs(drv_data->base_address, config->use2fs);
#if !(defined(FSL_FEATURE_DMIC_HAS_NO_IOCFG) && FSL_FEATURE_DMIC_HAS_NO_IOCFG)
/* Set IO to dual mode */
DMIC_SetIOCFG(drv_data->base_address, kDMIC_PdmDual);
#endif
drv_data->dmic_state = DMIC_STATE_INITIALIZED;
return 0;
}
static int dmic_mcux_configure(const struct device *dev,
struct dmic_cfg *config)
{
const struct mcux_dmic_cfg *drv_config = dev->config;
struct mcux_dmic_drv_data *drv_data = dev->data;
struct pdm_chan_cfg *channel = &config->channel;
struct pcm_stream_cfg *stream = &config->streams[0];
enum pdm_lr lr_0 = 0, lr_1 = 0;
uint8_t hw_chan_0 = 0, hw_chan_1 = 0;
uint32_t bit_clk_rate, osr;
int ret;
if (drv_data->dmic_state == DMIC_STATE_ACTIVE) {
LOG_ERR("Cannot configure device while it is active");
return -EBUSY;
}
/* Only one active channel is supported */
if (channel->req_num_streams != 1) {
return -EINVAL;
}
/* DMIC supports up to 8 active channels. Verify user is not
* requesting more
*/
if (channel->req_num_chan > FSL_FEATURE_DMIC_CHANNEL_NUM) {
LOG_ERR("DMIC only supports 8 channels or less");
return -ENOTSUP;
}
if (stream->pcm_rate == 0 || stream->pcm_width == 0) {
if (drv_data->dmic_state == DMIC_STATE_CONFIGURED) {
DMIC_DeInit(drv_data->base_address);
drv_data->dmic_state = DMIC_STATE_UNINIT;
}
return 0;
}
/* If DMIC was deinitialized, reinit here */
if (drv_data->dmic_state == DMIC_STATE_UNINIT) {
ret = mcux_dmic_init(dev);
if (ret < 0) {
LOG_ERR("Could not reinit DMIC");
return ret;
}
}
/* Currently, we only support 16 bit samples. This is because the DMIC
* API dictates that samples should be interleaved between channels,
* IE: {C0, C1, C2, C0, C1, C2}. To achieve this we must use the
* "destination address increment" function of the LPC DMA IP. Since
* the LPC DMA IP does not support 3 byte wide transfers, we cannot
* effectively use destination address increments to interleave 24
* bit samples.
*/
if (stream->pcm_width != 16) {
LOG_ERR("Only 16 bit samples are supported");
return -ENOTSUP;
}
ret = clock_control_get_rate(drv_config->clock_dev,
drv_config->clock_name, &bit_clk_rate);
if (ret < 0) {
return ret;
}
/* Check bit clock rate versus what user requested */
if ((config->io.min_pdm_clk_freq > bit_clk_rate) ||
(config->io.max_pdm_clk_freq < bit_clk_rate)) {
return -EINVAL;
}
/* Calculate the required OSR divider based on the PCM bit clock
* rate to the DMIC.
*/
osr = dmic_mcux_get_osr(stream->pcm_rate, bit_clk_rate, drv_config->use2fs);
/* Now, parse the channel map and set up each channel we should
* make active. We parse two channels at once, that way we can
* check to make sure that the L/R channels of each PDM controller
* are adjacent.
*/
channel->act_num_chan = 0;
/* Save channel request data */
drv_data->chan_map_lo = channel->req_chan_map_lo;
drv_data->chan_map_hi = channel->req_chan_map_hi;
for (uint8_t chan = 0; chan < channel->req_num_chan; chan += 2) {
/* Get the channel map data for channel pair */
dmic_parse_channel_map(channel->req_chan_map_lo,
channel->req_chan_map_hi,
chan, &hw_chan_0, &lr_0);
if ((chan + 1) < channel->req_num_chan) {
/* Paired channel is enabled */
dmic_parse_channel_map(channel->req_chan_map_lo,
channel->req_chan_map_hi,
chan + 1, &hw_chan_1, &lr_1);
/* Verify that paired channels use same hardware index */
if ((lr_0 == lr_1) ||
(hw_chan_0 != hw_chan_1)) {
return -EINVAL;
}
}
/* Configure selected channels in DMIC */
ret = dmic_mcux_init_channel(dev, osr,
dmic_mcux_hw_chan(drv_data, chan),
lr_0);
if (ret < 0) {
return ret;
}
channel->act_num_chan++;
if ((chan + 1) < channel->req_num_chan) {
/* Paired channel is enabled */
ret = dmic_mcux_init_channel(dev, osr,
dmic_mcux_hw_chan(drv_data,
chan + 1),
lr_1);
if (ret < 0) {
return ret;
}
channel->act_num_chan++;
}
}
channel->act_chan_map_lo = channel->req_chan_map_lo;
channel->act_chan_map_hi = channel->req_chan_map_hi;
drv_data->mem_slab = stream->mem_slab;
drv_data->block_size = stream->block_size;
drv_data->act_num_chan = channel->act_num_chan;
drv_data->dmic_state = DMIC_STATE_CONFIGURED;
return 0;
}
static int dmic_mcux_start(const struct device *dev)
{
struct mcux_dmic_drv_data *drv_data = dev->data;
int ret;
/* Allocate the initial set of buffers reserved for use by the hardware.
* We queue buffers so that when the DMA is operating on buffer "n",
* buffer "n+1" is already queued in the DMA hardware. When buffer "n"
* completes, we allocate another buffer and add it to the tail of the
* DMA descriptor chain. This approach requires the driver to allocate
* a minimum of two buffers
*/
for (uint32_t i = 0; i < CONFIG_DMIC_MCUX_DMA_BUFFERS; i++) {
/* Allocate buffers for DMA */
ret = k_mem_slab_alloc(drv_data->mem_slab,
&drv_data->dma_bufs[i], K_NO_WAIT);
if (ret < 0) {
LOG_ERR("failed to allocate buffer");
return -ENOBUFS;
}
}
ret = dmic_mcux_setup_dma(dev);
if (ret < 0) {
return ret;
}
ret = dmic_mcux_enable_dma(drv_data, true);
if (ret < 0) {
return ret;
}
dmic_mcux_activate_channels(drv_data, true);
return 0;
}
static int dmic_mcux_trigger(const struct device *dev,
enum dmic_trigger cmd)
{
struct mcux_dmic_drv_data *drv_data = dev->data;
switch (cmd) {
case DMIC_TRIGGER_PAUSE:
/* Disable active channels */
if (drv_data->dmic_state == DMIC_STATE_ACTIVE) {
dmic_mcux_activate_channels(drv_data, false);
}
drv_data->dmic_state = DMIC_STATE_PAUSED;
break;
case DMIC_TRIGGER_STOP:
if (drv_data->dmic_state == DMIC_STATE_ACTIVE) {
dmic_mcux_stop(drv_data);
}
drv_data->dmic_state = DMIC_STATE_CONFIGURED;
break;
case DMIC_TRIGGER_RELEASE:
/* Enable active channels */
if (drv_data->dmic_state == DMIC_STATE_PAUSED) {
dmic_mcux_activate_channels(drv_data, true);
}
drv_data->dmic_state = DMIC_STATE_ACTIVE;
break;
case DMIC_TRIGGER_START:
if ((drv_data->dmic_state != DMIC_STATE_CONFIGURED) &&
(drv_data->dmic_state != DMIC_STATE_ACTIVE)) {
LOG_ERR("Device is not configured");
return -EIO;
} else if (drv_data->dmic_state != DMIC_STATE_ACTIVE) {
if (dmic_mcux_start(dev) < 0) {
LOG_ERR("Could not start DMIC");
return -EIO;
}
drv_data->dmic_state = DMIC_STATE_ACTIVE;
}
break;
case DMIC_TRIGGER_RESET:
/* Reset DMIC to uninitialized state */
DMIC_DeInit(drv_data->base_address);
drv_data->dmic_state = DMIC_STATE_UNINIT;
break;
default:
LOG_ERR("Invalid command: %d", cmd);
return -EINVAL;
}
return 0;
}
static int dmic_mcux_read(const struct device *dev,
uint8_t stream,
void **buffer, size_t *size, int32_t timeout)
{
struct mcux_dmic_drv_data *drv_data = dev->data;
int ret;
ARG_UNUSED(stream);
if (drv_data->dmic_state == DMIC_STATE_ERROR) {
LOG_ERR("Device reports an error, please reset and reconfigure it");
return -EIO;
}
if ((drv_data->dmic_state != DMIC_STATE_CONFIGURED) &&
(drv_data->dmic_state != DMIC_STATE_ACTIVE) &&
(drv_data->dmic_state != DMIC_STATE_PAUSED)) {
LOG_ERR("Device state is not valid for read");
return -EIO;
}
ret = k_msgq_get(drv_data->rx_queue, buffer, SYS_TIMEOUT_MS(timeout));
if (ret < 0) {
return ret;
}
*size = drv_data->block_size;
LOG_DBG("read buffer = %p", *buffer);
return 0;
}
static const struct _dmic_ops dmic_ops = {
.configure = dmic_mcux_configure,
.trigger = dmic_mcux_trigger,
.read = dmic_mcux_read,
};
/* Converts integer gainshift into 5 bit 2's complement value for GAINSHIFT reg */
#define PDM_DMIC_GAINSHIFT(val) \
(val >= 0) ? (val & 0xF) : (BIT(4) | (0x10 - (val & 0xF)))
/* Defines structure for a given PDM channel node */
#define PDM_DMIC_CHAN_DEFINE(pdm_node) \
static struct mcux_dmic_pdm_chan \
pdm_channel_##pdm_node = { \
.dma = DEVICE_DT_GET(DT_DMAS_CTLR(pdm_node)), \
.dma_chan = DT_DMAS_CELL_BY_IDX(pdm_node, 0, channel), \
.dmic_channel_cfg = { \
.gainshft = PDM_DMIC_GAINSHIFT(DT_PROP(pdm_node, \
gainshift)), \
.preac2coef = DT_ENUM_IDX(pdm_node, compensation_2fs), \
.preac4coef = DT_ENUM_IDX(pdm_node, compensation_4fs), \
.dc_cut_level = DT_ENUM_IDX(pdm_node, dc_cutoff), \
.post_dc_gain_reduce = DT_PROP(pdm_node, dc_gain), \
.sample_rate = kDMIC_PhyFullSpeed, \
.saturate16bit = 1U, \
}, \
};
/* Defines structures for all enabled PDM channels */
#define PDM_DMIC_CHANNELS_DEFINE(idx) \
DT_INST_FOREACH_CHILD_STATUS_OKAY(idx, PDM_DMIC_CHAN_DEFINE)
/* Gets pointer for a given PDM channel node */
#define PDM_DMIC_CHAN_GET(pdm_node) \
COND_CODE_1(DT_NODE_HAS_STATUS(pdm_node, okay), \
(&pdm_channel_##pdm_node), (NULL)),
/* Gets array of pointers to PDM channels */
#define PDM_DMIC_CHANNELS_GET(idx) \
DT_INST_FOREACH_CHILD(idx, PDM_DMIC_CHAN_GET)
#define MCUX_DMIC_DEVICE(idx) \
PDM_DMIC_CHANNELS_DEFINE(idx); \
static struct mcux_dmic_pdm_chan \
*pdm_channels##idx[FSL_FEATURE_DMIC_CHANNEL_NUM] = { \
PDM_DMIC_CHANNELS_GET(idx) \
}; \
K_MSGQ_DEFINE(dmic_msgq##idx, sizeof(void *), \
CONFIG_DMIC_MCUX_QUEUE_SIZE, 1); \
static struct mcux_dmic_drv_data mcux_dmic_data##idx = { \
.pdm_channels = pdm_channels##idx, \
.base_address = (DMIC_Type *) DT_INST_REG_ADDR(idx), \
.dmic_state = DMIC_STATE_UNINIT, \
.rx_queue = &dmic_msgq##idx, \
.active_buf_idx = 0U, \
}; \
\
PINCTRL_DT_INST_DEFINE(idx); \
static struct mcux_dmic_cfg mcux_dmic_cfg##idx = { \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
.clock_name = (clock_control_subsys_t) \
DT_INST_CLOCKS_CELL(idx, name), \
.use2fs = DT_INST_PROP(idx, use2fs), \
}; \
\
DEVICE_DT_INST_DEFINE(idx, mcux_dmic_init, NULL, \
&mcux_dmic_data##idx, &mcux_dmic_cfg##idx, \
POST_KERNEL, CONFIG_AUDIO_DMIC_INIT_PRIORITY, \
&dmic_ops);
/* Existing SoCs only have one PDM instance. */
DT_INST_FOREACH_STATUS_OKAY(MCUX_DMIC_DEVICE)
``` | /content/code_sandbox/drivers/audio/dmic_mcux.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6,766 |
```c
/*
*
*/
#include <stdlib.h>
#include <zephyr/shell/shell.h>
#include <zephyr/audio/codec.h>
#define CODEC_START_HELP \
"Start output audio playback. Syntax:\n" \
"<device>"
#define CODEC_STOP_HELP \
"Stop output audio playback. Syntax:\n" \
"<device>"
#define CODEC_SET_PROP_HELP \
"Set a codec property. Syntax:\n" \
"<device> <property> <channel> <value>"
#define CODEC_APPLY_PROP_HELP \
"Apply any cached properties. Syntax:\n" \
"<device>"
static const char *const codec_property_name[] = {
[AUDIO_PROPERTY_OUTPUT_VOLUME] = "volume",
[AUDIO_PROPERTY_OUTPUT_MUTE] = "mute",
};
static const char *const codec_channel_name[] = {
[AUDIO_CHANNEL_FRONT_LEFT] = "front_left",
[AUDIO_CHANNEL_FRONT_RIGHT] = "front_right",
[AUDIO_CHANNEL_LFE] = "lfe",
[AUDIO_CHANNEL_FRONT_CENTER] = "front_center",
[AUDIO_CHANNEL_REAR_LEFT] = "rear_left",
[AUDIO_CHANNEL_REAR_RIGHT] = "rear_right",
[AUDIO_CHANNEL_REAR_CENTER] = "rear_center",
[AUDIO_CHANNEL_SIDE_LEFT] = "side_left",
[AUDIO_CHANNEL_SIDE_RIGHT] = "side_right",
[AUDIO_CHANNEL_ALL] = "all",
};
struct args_index {
uint8_t device;
uint8_t property;
uint8_t channel;
uint8_t value;
};
static const struct args_index args_indx = {
.device = 1,
.property = 2,
.channel = 3,
.value = 4,
};
static int parse_named_int(const char *name, const char *const keystack[], size_t count)
{
char *endptr;
int i;
/* Attempt to parse name as a number first */
i = strtoul(name, &endptr, 0);
if (*endptr == '\0') {
return i;
}
/* Name is not a number, look it up */
for (i = 0; i < count; i++) {
if (strcmp(name, keystack[i]) == 0) {
return i;
}
}
return -ENOTSUP;
}
static int cmd_start(const struct shell *sh, size_t argc, char *argv[])
{
const struct device *dev;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "Audio Codec device not found");
return -ENODEV;
}
audio_codec_start_output(dev);
return 0;
}
static int cmd_stop(const struct shell *sh, size_t argc, char *argv[])
{
const struct device *dev;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "Audio Codec device not found");
return -ENODEV;
}
audio_codec_stop_output(dev);
return 0;
}
static int cmd_set_prop(const struct shell *sh, size_t argc, char *argv[])
{
const struct device *dev;
int property;
int channel;
long value;
char *endptr;
audio_property_value_t property_value;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "Audio Codec device not found");
return -ENODEV;
}
property = parse_named_int(argv[args_indx.property], codec_property_name,
ARRAY_SIZE(codec_property_name));
if (property < 0) {
shell_error(sh, "Property '%s' unknown", argv[args_indx.property]);
return -EINVAL;
}
channel = parse_named_int(argv[args_indx.channel], codec_channel_name,
ARRAY_SIZE(codec_channel_name));
if (channel < 0) {
shell_error(sh, "Channel '%s' unknown", argv[args_indx.channel]);
return -EINVAL;
}
value = strtol(argv[args_indx.value], &endptr, 0);
if (*endptr != '\0') {
return -EINVAL;
}
if (value > INT32_MAX || value < INT32_MIN) {
return -EINVAL;
}
switch (property) {
case AUDIO_PROPERTY_OUTPUT_VOLUME:
property_value.vol = value;
break;
case AUDIO_PROPERTY_OUTPUT_MUTE:
property_value.mute = value;
break;
default:
return -EINVAL;
}
return audio_codec_set_property(dev, property, channel, property_value);
}
static int cmd_apply_prop(const struct shell *sh, size_t argc, char *argv[])
{
const struct device *dev;
dev = device_get_binding(argv[args_indx.device]);
if (!dev) {
shell_error(sh, "Audio Codec device not found");
return -ENODEV;
}
return audio_codec_apply_properties(dev);
}
/* Device name autocompletion support */
static void device_name_get(size_t idx, struct shell_static_entry *entry)
{
const struct device *dev = shell_device_lookup(idx, NULL);
entry->syntax = (dev != NULL) ? dev->name : NULL;
entry->handler = NULL;
entry->help = NULL;
entry->subcmd = NULL;
}
SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get);
/* clang-format off */
SHELL_STATIC_SUBCMD_SET_CREATE(sub_codec,
SHELL_CMD_ARG(start, &dsub_device_name, CODEC_START_HELP, cmd_start,
2, 0),
SHELL_CMD_ARG(stop, &dsub_device_name, CODEC_STOP_HELP, cmd_stop,
2, 0),
SHELL_CMD_ARG(set_prop, &dsub_device_name, CODEC_SET_PROP_HELP, cmd_set_prop,
5, 0),
SHELL_CMD_ARG(apply_prop, &dsub_device_name, CODEC_APPLY_PROP_HELP, cmd_apply_prop,
2, 0),
SHELL_SUBCMD_SET_END
);
/* clang-format on */
SHELL_CMD_REGISTER(codec, &sub_codec, "Audio Codec commands", NULL);
``` | /content/code_sandbox/drivers/audio/codec_shell.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,265 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_AUDIO_TLV320DAC310X_H_
#define ZEPHYR_DRIVERS_AUDIO_TLV320DAC310X_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Register addresses */
#define PAGE_CONTROL_ADDR 0
/* Register addresses {page, address} and fields */
#define SOFT_RESET_ADDR (struct reg_addr){0, 1}
#define SOFT_RESET_ASSERT (1)
#define NDAC_DIV_ADDR (struct reg_addr){0, 11}
#define NDAC_POWER_UP BIT(7)
#define NDAC_POWER_UP_MASK BIT(7)
#define NDAC_DIV_MASK BIT_MASK(7)
#define NDAC_DIV(val) ((val) & NDAC_DIV_MASK)
#define MDAC_DIV_ADDR (struct reg_addr){0, 12}
#define MDAC_POWER_UP BIT(7)
#define MDAC_POWER_UP_MASK BIT(7)
#define MDAC_DIV_MASK BIT_MASK(7)
#define MDAC_DIV(val) ((val) & MDAC_DIV_MASK)
#define DAC_PROC_CLK_FREQ_MAX 49152000 /* 49.152 MHz */
#define OSR_MSB_ADDR (struct reg_addr){0, 13}
#define OSR_MSB_MASK BIT_MASK(2)
#define OSR_LSB_ADDR (struct reg_addr){0, 14}
#define OSR_LSB_MASK BIT_MASK(8)
#define DAC_MOD_CLK_FREQ_MIN 2800000 /* 2.8 MHz */
#define DAC_MOD_CLK_FREQ_MAX 6200000 /* 6.2 MHz */
#define IF_CTRL1_ADDR (struct reg_addr){0, 27}
#define IF_CTRL_IFTYPE_MASK BIT_MASK(2)
#define IF_CTRL_IFTYPE_I2S 0
#define IF_CTRL_IFTYPE_DSP 1
#define IF_CTRL_IFTYPE_RJF 2
#define IF_CTRL_IFTYPE_LJF 3
#define IF_CTRL_IFTYPE(val) (((val) & IF_CTRL_IFTYPE_MASK) << 6)
#define IF_CTRL_WLEN_MASK BIT_MASK(2)
#define IF_CTRL_WLEN(val) (((val) & IF_CTRL_WLEN_MASK) << 4)
#define IF_CTRL_WLEN_16 0
#define IF_CTRL_WLEN_20 1
#define IF_CTRL_WLEN_24 2
#define IF_CTRL_WLEN_32 3
#define IF_CTRL_BCLK_OUT BIT(3)
#define IF_CTRL_WCLK_OUT BIT(2)
#define BCLK_DIV_ADDR (struct reg_addr){0, 30}
#define BCLK_DIV_POWER_UP BIT(7)
#define BCLK_DIV_POWER_UP_MASK BIT(7)
#define BCLK_DIV_MASK BIT_MASK(7)
#define BCLK_DIV(val) ((val) & MDAC_DIV_MASK)
#define OVF_FLAG_ADDR (struct reg_addr){0, 39}
#define PROC_BLK_SEL_ADDR (struct reg_addr){0, 60}
#define PROC_BLK_SEL_MASK BIT_MASK(5)
#define PROC_BLK_SEL(val) ((val) & PROC_BLK_SEL_MASK)
#define DATA_PATH_SETUP_ADDR (struct reg_addr){0, 63}
#define DAC_LR_POWERUP_DEFAULT (BIT(7) | BIT(6) | BIT(4) | BIT(2))
#define DAC_LR_POWERDN_DEFAULT (BIT(4) | BIT(2))
#define VOL_CTRL_ADDR (struct reg_addr){0, 64}
#define VOL_CTRL_UNMUTE_DEFAULT (0)
#define VOL_CTRL_MUTE_DEFAULT (BIT(3) | BIT(2))
#define L_DIG_VOL_CTRL_ADDR (struct reg_addr){0, 65}
#define DRC_CTRL1_ADDR (struct reg_addr){0, 68}
#define L_BEEP_GEN_ADDR (struct reg_addr){0, 71}
#define BEEP_GEN_EN_BEEP (BIT(7))
#define R_BEEP_GEN_ADDR (struct reg_addr){0, 72}
#define BEEP_LEN_MSB_ADDR (struct reg_addr){0, 73}
#define BEEP_LEN_MIB_ADDR (struct reg_addr){0, 74}
#define BEEP_LEN_LSB_ADDR (struct reg_addr){0, 75}
/* Page 1 registers */
#define HEADPHONE_DRV_ADDR (struct reg_addr){1, 31}
#define HEADPHONE_DRV_POWERUP (BIT(7) | BIT(6))
#define HEADPHONE_DRV_CM_MASK (BIT_MASK(2) << 3)
#define HEADPHONE_DRV_CM(val) (((val) << 3) & HEADPHONE_DRV_CM_MASK)
#define HEADPHONE_DRV_RESERVED (BIT(2))
#define HP_OUT_POP_RM_ADDR (struct reg_addr){1, 33}
#define HP_OUT_POP_RM_ENABLE (BIT(7))
#define OUTPUT_ROUTING_ADDR (struct reg_addr){1, 35}
#define OUTPUT_ROUTING_HPL (2 << 6)
#define OUTPUT_ROUTING_HPR (2 << 2)
#define HPL_ANA_VOL_CTRL_ADDR (struct reg_addr){1, 36}
#define HPR_ANA_VOL_CTRL_ADDR (struct reg_addr){1, 37}
#define HPX_ANA_VOL_ENABLE (BIT(7))
#define HPX_ANA_VOL_MASK (BIT_MASK(7))
#define HPX_ANA_VOL(val) (((val) & HPX_ANA_VOL_MASK) | \
HPX_ANA_VOL_ENABLE)
#define HPX_ANA_VOL_MAX (0)
#define HPX_ANA_VOL_DEFAULT (64)
#define HPX_ANA_VOL_MIN (127)
#define HPX_ANA_VOL_MUTE (HPX_ANA_VOL_MIN | ~HPX_ANA_VOL_ENABLE)
#define HPX_ANA_VOL_LOW_THRESH (105)
#define HPX_ANA_VOL_FLOOR (144)
#define HPL_DRV_GAIN_CTRL_ADDR (struct reg_addr){1, 40}
#define HPR_DRV_GAIN_CTRL_ADDR (struct reg_addr){1, 41}
#define HPX_DRV_UNMUTE (BIT(2))
#define HEADPHONE_DRV_CTRL_ADDR (struct reg_addr){1, 44}
#define HEADPHONE_DRV_LINEOUT (BIT(1) | BIT(2))
/* Page 3 registers */
#define TIMER_MCLK_DIV_ADDR (struct reg_addr){3, 16}
#define TIMER_MCLK_DIV_EN_EXT (BIT(7))
#define TIMER_MCLK_DIV_MASK (BIT_MASK(7))
#define TIMER_MCLK_DIV_VAL(val) ((val) & TIMER_MCLK_DIV_MASK)
struct reg_addr {
uint8_t page; /* page number */
uint8_t reg_addr; /* register address */
};
enum proc_block {
/* highest performance class with each decimation filter */
PRB_P25_DECIMATION_A = 25,
PRB_P10_DECIMATION_B = 10,
PRB_P18_DECIMATION_C = 18,
};
enum osr_multiple {
OSR_MULTIPLE_8 = 8,
OSR_MULTIPLE_4 = 4,
OSR_MULTIPLE_2 = 2,
};
enum cm_voltage {
CM_VOLTAGE_1P35 = 0,
CM_VOLTAGE_1P5 = 1,
CM_VOLTAGE_1P65 = 2,
CM_VOLTAGE_1P8 = 3,
};
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_AUDIO_TLV320DAC310X_H_ */
``` | /content/code_sandbox/drivers/audio/tlv320dac310x.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,554 |
```unknown
config AUDIO_TAS6422DAC
bool "TAS6422 audio amplifier support"
default y
depends on DT_HAS_TI_TAS6422DAC_ENABLED
select I2C
depends on GPIO
help
Enable TAS6422 support on the selected board
``` | /content/code_sandbox/drivers/audio/Kconfig.tas6422dac | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```objective-c
/*
*
*/
#ifndef MPXXDTYY_H
#define MPXXDTYY_H
#include <zephyr/audio/dmic.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include "OpenPDMFilter.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MPXXDTYY_MIN_PDM_FREQ 1200000 /* 1.2MHz */
#define MPXXDTYY_MAX_PDM_FREQ 3250000 /* 3.25MHz */
struct mpxxdtyy_config {
const struct device *comm_master;
};
struct mpxxdtyy_data {
enum dmic_state state;
TPDMFilter_InitStruct pdm_filter[2];
size_t pcm_mem_size;
struct k_mem_slab *pcm_mem_slab;
};
uint16_t sw_filter_lib_init(const struct device *dev, struct dmic_cfg *cfg);
int sw_filter_lib_run(TPDMFilter_InitStruct *pdm_filter,
void *pdm_block, void *pcm_block,
size_t pdm_size, size_t pcm_size);
#if DT_ANY_INST_ON_BUS_STATUS_OKAY(i2s)
int mpxxdtyy_i2s_read(const struct device *dev, uint8_t stream, void **buffer,
size_t *size, int32_t timeout);
int mpxxdtyy_i2s_trigger(const struct device *dev, enum dmic_trigger cmd);
int mpxxdtyy_i2s_configure(const struct device *dev, struct dmic_cfg *cfg);
#endif /* DT_ANY_INST_ON_BUS_STATUS_OKAY(i2s) */
#ifdef __cplusplus
}
#endif
#endif /* MPXXDTYY_H */
``` | /content/code_sandbox/drivers/audio/mpxxdtyy.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 351 |
```unknown
# Audio Codec configuration options
menuconfig AUDIO
bool "Audio drivers"
help
Enable support for Audio
if AUDIO
#
# Audio Codec options
#
menuconfig AUDIO_CODEC
bool "Audio Codec Drivers"
help
Enable Audio Codec Driver Configuration
if AUDIO_CODEC
config AUDIO_CODEC_INIT_PRIORITY
int "Init priority"
default 60
help
Audio codec device driver initialization priority.
config AUDIO_CODEC_SHELL
bool "Audio Codec shell"
depends on SHELL
help
Enable the Audio Codec shell with Audio Codec related commands.
module = AUDIO_CODEC
module-str = audio codec
source "subsys/logging/Kconfig.template.log_config"
source "drivers/audio/Kconfig.tas6422dac"
source "drivers/audio/Kconfig.tlv320dac"
endif # AUDIO_CODEC
menuconfig AUDIO_DMIC
bool "Digital Microphone (Audio) Drivers"
help
Enable Digital Microphone Driver Configuration
if AUDIO_DMIC
config AUDIO_DMIC_INIT_PRIORITY
int "Init priority"
default 80
help
Audio Digital Microphone device driver initialization priority.
module = AUDIO_DMIC
module-str = audio_dmic
source "subsys/logging/Kconfig.template.log_config"
source "drivers/audio/Kconfig.mpxxdtyy"
source "drivers/audio/Kconfig.dmic_pdm_nrfx"
source "drivers/audio/Kconfig.dmic_mcux"
endif # AUDIO_DMIC
endif # AUDIO
``` | /content/code_sandbox/drivers/audio/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 295 |
```c
/*
*
*/
#define DT_DRV_COMPAT ti_tlv320dac
#include <errno.h>
#include <zephyr/sys/util.h>
#include <zephyr/device.h>
#include <zephyr/drivers/i2c.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/sys/util.h>
#include <zephyr/audio/codec.h>
#include "tlv320dac310x.h"
#define LOG_LEVEL CONFIG_AUDIO_CODEC_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(tlv320dac310x);
#define CODEC_OUTPUT_VOLUME_MAX 0
#define CODEC_OUTPUT_VOLUME_MIN (-78 * 2)
struct codec_driver_config {
struct i2c_dt_spec bus;
struct gpio_dt_spec reset_gpio;
};
struct codec_driver_data {
struct reg_addr reg_addr_cache;
};
static struct codec_driver_config codec_device_config = {
.bus = I2C_DT_SPEC_INST_GET(0),
.reset_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios),
};
static struct codec_driver_data codec_device_data;
static void codec_write_reg(const struct device *dev, struct reg_addr reg,
uint8_t val);
static void codec_read_reg(const struct device *dev, struct reg_addr reg,
uint8_t *val);
static void codec_soft_reset(const struct device *dev);
static int codec_configure_dai(const struct device *dev, audio_dai_cfg_t *cfg);
static int codec_configure_clocks(const struct device *dev,
struct audio_codec_cfg *cfg);
static int codec_configure_filters(const struct device *dev,
audio_dai_cfg_t *cfg);
static enum osr_multiple codec_get_osr_multiple(audio_dai_cfg_t *cfg);
static void codec_configure_output(const struct device *dev);
static int codec_set_output_volume(const struct device *dev, int vol);
#if (LOG_LEVEL >= LOG_LEVEL_DEBUG)
static void codec_read_all_regs(const struct device *dev);
#define CODEC_DUMP_REGS(dev) codec_read_all_regs((dev))
#else
#define CODEC_DUMP_REGS(dev)
#endif
static int codec_initialize(const struct device *dev)
{
const struct codec_driver_config *const dev_cfg = dev->config;
if (!device_is_ready(dev_cfg->bus.bus)) {
LOG_ERR("I2C device not ready");
return -ENODEV;
}
if (!gpio_is_ready_dt(&dev_cfg->reset_gpio)) {
LOG_ERR("GPIO device not ready");
return -ENODEV;
}
return 0;
}
static int codec_configure(const struct device *dev,
struct audio_codec_cfg *cfg)
{
const struct codec_driver_config *const dev_cfg = dev->config;
int ret;
if (cfg->dai_type != AUDIO_DAI_TYPE_I2S) {
LOG_ERR("dai_type must be AUDIO_DAI_TYPE_I2S");
return -EINVAL;
}
/* Configure reset GPIO, and set the line to inactive, which will also
* de-assert the reset line and thus enable the codec.
*/
gpio_pin_configure_dt(&dev_cfg->reset_gpio, GPIO_OUTPUT_INACTIVE);
codec_soft_reset(dev);
ret = codec_configure_clocks(dev, cfg);
if (ret == 0) {
ret = codec_configure_dai(dev, &cfg->dai_cfg);
}
if (ret == 0) {
ret = codec_configure_filters(dev, &cfg->dai_cfg);
}
codec_configure_output(dev);
return ret;
}
static void codec_start_output(const struct device *dev)
{
/* powerup DAC channels */
codec_write_reg(dev, DATA_PATH_SETUP_ADDR, DAC_LR_POWERUP_DEFAULT);
/* unmute DAC channels */
codec_write_reg(dev, VOL_CTRL_ADDR, VOL_CTRL_UNMUTE_DEFAULT);
CODEC_DUMP_REGS(dev);
}
static void codec_stop_output(const struct device *dev)
{
/* mute DAC channels */
codec_write_reg(dev, VOL_CTRL_ADDR, VOL_CTRL_MUTE_DEFAULT);
/* powerdown DAC channels */
codec_write_reg(dev, DATA_PATH_SETUP_ADDR, DAC_LR_POWERDN_DEFAULT);
}
static void codec_mute_output(const struct device *dev)
{
/* mute DAC channels */
codec_write_reg(dev, VOL_CTRL_ADDR, VOL_CTRL_MUTE_DEFAULT);
}
static void codec_unmute_output(const struct device *dev)
{
/* unmute DAC channels */
codec_write_reg(dev, VOL_CTRL_ADDR, VOL_CTRL_UNMUTE_DEFAULT);
}
static int codec_set_property(const struct device *dev,
audio_property_t property,
audio_channel_t channel,
audio_property_value_t val)
{
/* individual channel control not currently supported */
if (channel != AUDIO_CHANNEL_ALL) {
LOG_ERR("channel %u invalid. must be AUDIO_CHANNEL_ALL",
channel);
return -EINVAL;
}
switch (property) {
case AUDIO_PROPERTY_OUTPUT_VOLUME:
return codec_set_output_volume(dev, val.vol);
case AUDIO_PROPERTY_OUTPUT_MUTE:
if (val.mute) {
codec_mute_output(dev);
} else {
codec_unmute_output(dev);
}
return 0;
default:
break;
}
return -EINVAL;
}
static int codec_apply_properties(const struct device *dev)
{
/* nothing to do because there is nothing cached */
return 0;
}
static void codec_write_reg(const struct device *dev, struct reg_addr reg,
uint8_t val)
{
struct codec_driver_data *const dev_data = dev->data;
const struct codec_driver_config *const dev_cfg = dev->config;
/* set page if different */
if (dev_data->reg_addr_cache.page != reg.page) {
i2c_reg_write_byte_dt(&dev_cfg->bus, 0, reg.page);
dev_data->reg_addr_cache.page = reg.page;
}
i2c_reg_write_byte_dt(&dev_cfg->bus, reg.reg_addr, val);
LOG_DBG("WR PG:%u REG:%02u VAL:0x%02x",
reg.page, reg.reg_addr, val);
}
static void codec_read_reg(const struct device *dev, struct reg_addr reg,
uint8_t *val)
{
struct codec_driver_data *const dev_data = dev->data;
const struct codec_driver_config *const dev_cfg = dev->config;
/* set page if different */
if (dev_data->reg_addr_cache.page != reg.page) {
i2c_reg_write_byte_dt(&dev_cfg->bus, 0, reg.page);
dev_data->reg_addr_cache.page = reg.page;
}
i2c_reg_read_byte_dt(&dev_cfg->bus, reg.reg_addr, val);
LOG_DBG("RD PG:%u REG:%02u VAL:0x%02x",
reg.page, reg.reg_addr, *val);
}
static void codec_soft_reset(const struct device *dev)
{
/* soft reset the DAC */
codec_write_reg(dev, SOFT_RESET_ADDR, SOFT_RESET_ASSERT);
}
static int codec_configure_dai(const struct device *dev, audio_dai_cfg_t *cfg)
{
uint8_t val;
/* configure I2S interface */
val = IF_CTRL_IFTYPE(IF_CTRL_IFTYPE_I2S);
if (cfg->i2s.options & I2S_OPT_BIT_CLK_MASTER) {
val |= IF_CTRL_BCLK_OUT;
}
if (cfg->i2s.options & I2S_OPT_FRAME_CLK_MASTER) {
val |= IF_CTRL_WCLK_OUT;
}
switch (cfg->i2s.word_size) {
case AUDIO_PCM_WIDTH_16_BITS:
val |= IF_CTRL_WLEN(IF_CTRL_WLEN_16);
break;
case AUDIO_PCM_WIDTH_20_BITS:
val |= IF_CTRL_WLEN(IF_CTRL_WLEN_20);
break;
case AUDIO_PCM_WIDTH_24_BITS:
val |= IF_CTRL_WLEN(IF_CTRL_WLEN_24);
break;
case AUDIO_PCM_WIDTH_32_BITS:
val |= IF_CTRL_WLEN(IF_CTRL_WLEN_32);
break;
default:
LOG_ERR("Unsupported PCM sample bit width %u",
cfg->i2s.word_size);
return -EINVAL;
}
codec_write_reg(dev, IF_CTRL1_ADDR, val);
return 0;
}
static int codec_configure_clocks(const struct device *dev,
struct audio_codec_cfg *cfg)
{
int dac_clk, mod_clk;
struct i2s_config *i2s;
int osr, osr_min, osr_max;
enum osr_multiple osr_multiple;
int mdac, ndac, bclk_div, mclk_div;
i2s = &cfg->dai_cfg.i2s;
LOG_DBG("MCLK %u Hz PCM Rate: %u Hz", cfg->mclk_freq,
i2s->frame_clk_freq);
if (cfg->mclk_freq <= DAC_PROC_CLK_FREQ_MAX) {
/* use MCLK frequency as the DAC processing clock */
ndac = 1;
} else {
ndac = cfg->mclk_freq / DAC_PROC_CLK_FREQ_MAX;
}
dac_clk = cfg->mclk_freq / ndac;
/* determine OSR Multiple based on PCM rate */
osr_multiple = codec_get_osr_multiple(&cfg->dai_cfg);
/*
* calculate MOD clock such that it is an integer multiple of
* cfg->i2s.frame_clk_freq and
* DAC_MOD_CLK_FREQ_MIN <= MOD clock <= DAC_MOD_CLK_FREQ_MAX
*/
osr_min = (DAC_MOD_CLK_FREQ_MIN + i2s->frame_clk_freq - 1) /
i2s->frame_clk_freq;
osr_max = DAC_MOD_CLK_FREQ_MAX / i2s->frame_clk_freq;
/* round mix and max values to the required multiple */
osr_max = (osr_max / osr_multiple) * osr_multiple;
osr_min = DIV_ROUND_UP(osr_min, osr_multiple);
osr = osr_max;
while (osr >= osr_min) {
mod_clk = i2s->frame_clk_freq * osr;
/* calculate mdac */
mdac = dac_clk / mod_clk;
/* check if mdac is an integer */
if ((mdac * mod_clk) == dac_clk) {
/* found suitable dividers */
break;
}
osr -= osr_multiple;
}
/* check if suitable value was found */
if (osr < osr_min) {
LOG_ERR("Unable to find suitable mdac and osr values");
return -EINVAL;
}
LOG_DBG("Processing freq: %u Hz Modulator freq: %u Hz",
dac_clk, mod_clk);
LOG_DBG("NDAC: %u MDAC: %u OSR: %u", ndac, mdac, osr);
if (i2s->options & I2S_OPT_BIT_CLK_MASTER) {
bclk_div = osr * mdac / (i2s->word_size * 2U); /* stereo */
if ((bclk_div * i2s->word_size * 2) != (osr * mdac)) {
LOG_ERR("Unable to generate BCLK %u from MCLK %u",
i2s->frame_clk_freq * i2s->word_size * 2U,
cfg->mclk_freq);
return -EINVAL;
}
LOG_DBG("I2S Master BCLKDIV: %u", bclk_div);
codec_write_reg(dev, BCLK_DIV_ADDR,
BCLK_DIV_POWER_UP | BCLK_DIV(bclk_div));
}
/* set NDAC, then MDAC, followed by OSR */
codec_write_reg(dev, NDAC_DIV_ADDR,
(uint8_t)(NDAC_DIV(ndac) | NDAC_POWER_UP_MASK));
codec_write_reg(dev, MDAC_DIV_ADDR,
(uint8_t)(MDAC_DIV(mdac) | MDAC_POWER_UP_MASK));
codec_write_reg(dev, OSR_MSB_ADDR, (uint8_t)((osr >> 8) & OSR_MSB_MASK));
codec_write_reg(dev, OSR_LSB_ADDR, (uint8_t)(osr & OSR_LSB_MASK));
if (i2s->options & I2S_OPT_BIT_CLK_MASTER) {
codec_write_reg(dev, BCLK_DIV_ADDR,
BCLK_DIV(bclk_div) | BCLK_DIV_POWER_UP);
}
/* calculate MCLK divider to get ~1MHz */
mclk_div = DIV_ROUND_UP(cfg->mclk_freq, 1000000);
/* setup timer clock to be MCLK divided */
codec_write_reg(dev, TIMER_MCLK_DIV_ADDR,
TIMER_MCLK_DIV_EN_EXT | TIMER_MCLK_DIV_VAL(mclk_div));
LOG_DBG("Timer MCLK Divider: %u", mclk_div);
return 0;
}
static int codec_configure_filters(const struct device *dev,
audio_dai_cfg_t *cfg)
{
enum proc_block proc_blk;
/* determine decimation filter type */
if (cfg->i2s.frame_clk_freq >= AUDIO_PCM_RATE_192K) {
proc_blk = PRB_P18_DECIMATION_C;
LOG_INF("PCM Rate: %u Filter C PRB P18 selected",
cfg->i2s.frame_clk_freq);
} else if (cfg->i2s.frame_clk_freq >= AUDIO_PCM_RATE_96K) {
proc_blk = PRB_P10_DECIMATION_B;
LOG_INF("PCM Rate: %u Filter B PRB P10 selected",
cfg->i2s.frame_clk_freq);
} else {
proc_blk = PRB_P25_DECIMATION_A;
LOG_INF("PCM Rate: %u Filter A PRB P25 selected",
cfg->i2s.frame_clk_freq);
}
codec_write_reg(dev, PROC_BLK_SEL_ADDR, PROC_BLK_SEL(proc_blk));
return 0;
}
static enum osr_multiple codec_get_osr_multiple(audio_dai_cfg_t *cfg)
{
enum osr_multiple osr;
if (cfg->i2s.frame_clk_freq >= AUDIO_PCM_RATE_192K) {
osr = OSR_MULTIPLE_2;
} else if (cfg->i2s.frame_clk_freq >= AUDIO_PCM_RATE_96K) {
osr = OSR_MULTIPLE_4;
} else {
osr = OSR_MULTIPLE_8;
}
LOG_INF("PCM Rate: %u OSR Multiple: %u", cfg->i2s.frame_clk_freq,
osr);
return osr;
}
static void codec_configure_output(const struct device *dev)
{
uint8_t val;
/*
* set common mode voltage to 1.65V (half of AVDD)
* AVDD is typically 3.3V
*/
codec_read_reg(dev, HEADPHONE_DRV_ADDR, &val);
val &= ~HEADPHONE_DRV_CM_MASK;
val |= HEADPHONE_DRV_CM(CM_VOLTAGE_1P65) | HEADPHONE_DRV_RESERVED;
codec_write_reg(dev, HEADPHONE_DRV_ADDR, val);
/* enable pop removal on power down/up */
codec_read_reg(dev, HP_OUT_POP_RM_ADDR, &val);
codec_write_reg(dev, HP_OUT_POP_RM_ADDR, val | HP_OUT_POP_RM_ENABLE);
/* route DAC output to Headphone */
val = OUTPUT_ROUTING_HPL | OUTPUT_ROUTING_HPR;
codec_write_reg(dev, OUTPUT_ROUTING_ADDR, val);
/* enable volume control on Headphone out */
codec_write_reg(dev, HPL_ANA_VOL_CTRL_ADDR,
HPX_ANA_VOL(HPX_ANA_VOL_DEFAULT));
codec_write_reg(dev, HPR_ANA_VOL_CTRL_ADDR,
HPX_ANA_VOL(HPX_ANA_VOL_DEFAULT));
/* set headphone outputs as line-out */
codec_write_reg(dev, HEADPHONE_DRV_CTRL_ADDR, HEADPHONE_DRV_LINEOUT);
/* unmute headphone drivers */
codec_write_reg(dev, HPL_DRV_GAIN_CTRL_ADDR, HPX_DRV_UNMUTE);
codec_write_reg(dev, HPR_DRV_GAIN_CTRL_ADDR, HPX_DRV_UNMUTE);
/* power up headphone drivers */
codec_read_reg(dev, HEADPHONE_DRV_ADDR, &val);
val |= HEADPHONE_DRV_POWERUP | HEADPHONE_DRV_RESERVED;
codec_write_reg(dev, HEADPHONE_DRV_ADDR, val);
}
static int codec_set_output_volume(const struct device *dev, int vol)
{
uint8_t vol_val;
int vol_index;
uint8_t vol_array[] = {
107, 108, 110, 113, 116, 120, 125, 128, 132, 138, 144
};
if ((vol > CODEC_OUTPUT_VOLUME_MAX) ||
(vol < CODEC_OUTPUT_VOLUME_MIN)) {
LOG_ERR("Invalid volume %d.%d dB",
vol >> 1, ((uint32_t)vol & 1) ? 5 : 0);
return -EINVAL;
}
/* remove sign */
vol = -vol;
/* if volume is near floor, set minimum */
if (vol > HPX_ANA_VOL_FLOOR) {
vol_val = HPX_ANA_VOL_FLOOR;
} else if (vol > HPX_ANA_VOL_LOW_THRESH) {
/* lookup low volume values */
for (vol_index = 0; vol_index < ARRAY_SIZE(vol_array); vol_index++) {
if (vol_array[vol_index] >= vol) {
break;
}
}
vol_val = HPX_ANA_VOL_LOW_THRESH + vol_index + 1;
} else {
vol_val = (uint8_t)vol;
}
codec_write_reg(dev, HPL_ANA_VOL_CTRL_ADDR, HPX_ANA_VOL(vol_val));
codec_write_reg(dev, HPR_ANA_VOL_CTRL_ADDR, HPX_ANA_VOL(vol_val));
return 0;
}
#if (LOG_LEVEL >= LOG_LEVEL_DEBUG)
static void codec_read_all_regs(const struct device *dev)
{
uint8_t val;
codec_read_reg(dev, SOFT_RESET_ADDR, &val);
codec_read_reg(dev, NDAC_DIV_ADDR, &val);
codec_read_reg(dev, MDAC_DIV_ADDR, &val);
codec_read_reg(dev, OSR_MSB_ADDR, &val);
codec_read_reg(dev, OSR_LSB_ADDR, &val);
codec_read_reg(dev, IF_CTRL1_ADDR, &val);
codec_read_reg(dev, BCLK_DIV_ADDR, &val);
codec_read_reg(dev, OVF_FLAG_ADDR, &val);
codec_read_reg(dev, PROC_BLK_SEL_ADDR, &val);
codec_read_reg(dev, DATA_PATH_SETUP_ADDR, &val);
codec_read_reg(dev, VOL_CTRL_ADDR, &val);
codec_read_reg(dev, L_DIG_VOL_CTRL_ADDR, &val);
codec_read_reg(dev, DRC_CTRL1_ADDR, &val);
codec_read_reg(dev, L_BEEP_GEN_ADDR, &val);
codec_read_reg(dev, R_BEEP_GEN_ADDR, &val);
codec_read_reg(dev, BEEP_LEN_MSB_ADDR, &val);
codec_read_reg(dev, BEEP_LEN_MIB_ADDR, &val);
codec_read_reg(dev, BEEP_LEN_LSB_ADDR, &val);
codec_read_reg(dev, HEADPHONE_DRV_ADDR, &val);
codec_read_reg(dev, HP_OUT_POP_RM_ADDR, &val);
codec_read_reg(dev, OUTPUT_ROUTING_ADDR, &val);
codec_read_reg(dev, HPL_ANA_VOL_CTRL_ADDR, &val);
codec_read_reg(dev, HPR_ANA_VOL_CTRL_ADDR, &val);
codec_read_reg(dev, HPL_DRV_GAIN_CTRL_ADDR, &val);
codec_read_reg(dev, HPR_DRV_GAIN_CTRL_ADDR, &val);
codec_read_reg(dev, HEADPHONE_DRV_CTRL_ADDR, &val);
codec_read_reg(dev, TIMER_MCLK_DIV_ADDR, &val);
}
#endif
static const struct audio_codec_api codec_driver_api = {
.configure = codec_configure,
.start_output = codec_start_output,
.stop_output = codec_stop_output,
.set_property = codec_set_property,
.apply_properties = codec_apply_properties,
};
DEVICE_DT_INST_DEFINE(0, codec_initialize, NULL, &codec_device_data,
&codec_device_config, POST_KERNEL,
CONFIG_AUDIO_CODEC_INIT_PRIORITY, &codec_driver_api);
``` | /content/code_sandbox/drivers/audio/tlv320dac310x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,304 |
```c
/*
*
*/
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/drivers/ptp_clock.h>
#ifdef CONFIG_USERSPACE
int z_vrfy_ptp_clock_get(const struct device *dev,
struct net_ptp_time *tm)
{
struct net_ptp_time ptp_time;
int ret;
K_OOPS(K_SYSCALL_DRIVER_PTP_CLOCK(dev, get));
K_OOPS(K_SYSCALL_MEMORY_WRITE(tm, sizeof(struct net_ptp_time)));
ret = z_impl_ptp_clock_get((const struct device *)dev, &ptp_time);
if (ret != 0) {
return 0;
}
if (k_usermode_to_copy((void *)tm, &ptp_time, sizeof(ptp_time)) != 0) {
return 0;
}
return ret;
}
#include <zephyr/syscalls/ptp_clock_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/drivers/ptp_clock/ptp_clock.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 195 |
```c
/*
*
*/
#include <zephyr/audio/dmic.h>
#include <zephyr/drivers/clock_control/nrf_clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <soc.h>
#include <nrfx_pdm.h>
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(dmic_nrfx_pdm, CONFIG_AUDIO_DMIC_LOG_LEVEL);
struct dmic_nrfx_pdm_drv_data {
struct onoff_manager *clk_mgr;
struct onoff_client clk_cli;
struct k_mem_slab *mem_slab;
uint32_t block_size;
struct k_msgq rx_queue;
bool request_clock : 1;
bool configured : 1;
volatile bool active;
volatile bool stopping;
};
struct dmic_nrfx_pdm_drv_cfg {
nrfx_pdm_event_handler_t event_handler;
nrfx_pdm_config_t nrfx_def_cfg;
const struct pinctrl_dev_config *pcfg;
enum clock_source {
PCLK32M,
PCLK32M_HFXO,
ACLK
} clk_src;
};
static void free_buffer(struct dmic_nrfx_pdm_drv_data *drv_data, void *buffer)
{
k_mem_slab_free(drv_data->mem_slab, buffer);
LOG_DBG("Freed buffer %p", buffer);
}
static void event_handler(const struct device *dev, const nrfx_pdm_evt_t *evt)
{
struct dmic_nrfx_pdm_drv_data *drv_data = dev->data;
int ret;
bool stop = false;
if (evt->buffer_requested) {
void *buffer;
nrfx_err_t err;
ret = k_mem_slab_alloc(drv_data->mem_slab, &buffer, K_NO_WAIT);
if (ret < 0) {
LOG_ERR("Failed to allocate buffer: %d", ret);
stop = true;
} else {
err = nrfx_pdm_buffer_set(buffer,
drv_data->block_size / 2);
if (err != NRFX_SUCCESS) {
LOG_ERR("Failed to set buffer: 0x%08x", err);
stop = true;
}
}
}
if (drv_data->stopping) {
if (evt->buffer_released) {
free_buffer(drv_data, evt->buffer_released);
}
if (drv_data->active) {
drv_data->active = false;
if (drv_data->request_clock) {
(void)onoff_release(drv_data->clk_mgr);
}
}
} else if (evt->buffer_released) {
ret = k_msgq_put(&drv_data->rx_queue,
&evt->buffer_released,
K_NO_WAIT);
if (ret < 0) {
LOG_ERR("No room in RX queue");
stop = true;
free_buffer(drv_data, evt->buffer_released);
} else {
LOG_DBG("Queued buffer %p", evt->buffer_released);
}
}
if (stop) {
drv_data->stopping = true;
nrfx_pdm_stop();
}
}
static bool is_better(uint32_t freq,
uint8_t ratio,
uint32_t req_rate,
uint32_t *best_diff,
uint32_t *best_rate,
uint32_t *best_freq)
{
uint32_t act_rate = freq / ratio;
uint32_t diff = act_rate >= req_rate ? (act_rate - req_rate)
: (req_rate - act_rate);
LOG_DBG("Freq %u, ratio %u, act_rate %u", freq, ratio, act_rate);
if (diff < *best_diff) {
*best_diff = diff;
*best_rate = act_rate;
*best_freq = freq;
return true;
}
return false;
}
static bool check_pdm_frequencies(const struct dmic_nrfx_pdm_drv_cfg *drv_cfg,
nrfx_pdm_config_t *config,
const struct dmic_cfg *pdm_cfg,
uint8_t ratio,
uint32_t *best_diff,
uint32_t *best_rate,
uint32_t *best_freq)
{
uint32_t req_rate = pdm_cfg->streams[0].pcm_rate;
bool better_found = false;
if (IS_ENABLED(CONFIG_SOC_SERIES_NRF53X)) {
const uint32_t src_freq =
(NRF_PDM_HAS_MCLKCONFIG && drv_cfg->clk_src == ACLK)
/* The DMIC_NRFX_PDM_DEVICE() macro contains build
* assertions that make sure that the ACLK clock
* source is only used when it is available and only
* with the "hfclkaudio-frequency" property defined,
* but the default value of 0 here needs to be used
* to prevent compilation errors when the property is
* not defined (this expression will be eventually
* optimized away then).
*/
? DT_PROP_OR(DT_NODELABEL(clock), hfclkaudio_frequency,
0)
: 32*1000*1000UL;
uint32_t req_freq = req_rate * ratio;
/* As specified in the nRF5340 PS:
*
* PDMCLKCTRL = 4096 * floor(f_pdm * 1048576 /
* (f_source + f_pdm / 2))
* f_actual = f_source / floor(1048576 * 4096 / PDMCLKCTRL)
*/
uint32_t clk_factor = (uint32_t)((req_freq * 1048576ULL) /
(src_freq + req_freq / 2));
uint32_t act_freq = src_freq / (1048576 / clk_factor);
if (act_freq >= pdm_cfg->io.min_pdm_clk_freq &&
act_freq <= pdm_cfg->io.max_pdm_clk_freq &&
is_better(act_freq, ratio, req_rate,
best_diff, best_rate, best_freq)) {
config->clock_freq = clk_factor * 4096;
better_found = true;
}
} else { /* -> !IS_ENABLED(CONFIG_SOC_SERIES_NRF53X)) */
static const struct {
uint32_t freq_val;
nrf_pdm_freq_t freq_enum;
} freqs[] = {
{ 1000000, NRF_PDM_FREQ_1000K },
{ 1032000, NRF_PDM_FREQ_1032K },
{ 1067000, NRF_PDM_FREQ_1067K },
#if defined(PDM_PDMCLKCTRL_FREQ_1231K)
{ 1231000, NRF_PDM_FREQ_1231K },
#endif
#if defined(PDM_PDMCLKCTRL_FREQ_1280K)
{ 1280000, NRF_PDM_FREQ_1280K },
#endif
#if defined(PDM_PDMCLKCTRL_FREQ_1333K)
{ 1333000, NRF_PDM_FREQ_1333K }
#endif
};
for (int i = 0; i < ARRAY_SIZE(freqs); ++i) {
uint32_t freq_val = freqs[i].freq_val;
if (freq_val < pdm_cfg->io.min_pdm_clk_freq) {
continue;
}
if (freq_val > pdm_cfg->io.max_pdm_clk_freq) {
break;
}
if (is_better(freq_val, ratio, req_rate,
best_diff, best_rate, best_freq)) {
config->clock_freq = freqs[i].freq_enum;
/* Stop if an exact rate match is found. */
if (*best_diff == 0) {
return true;
}
better_found = true;
}
/* Since frequencies are in ascending order, stop
* checking next ones for the current ratio after
* resulting PCM rate goes above the one requested.
*/
if ((freq_val / ratio) > req_rate) {
break;
}
}
}
return better_found;
}
/* Finds clock settings that give the PCM output rate closest to that requested,
* taking into account the hardware limitations.
*/
static bool find_suitable_clock(const struct dmic_nrfx_pdm_drv_cfg *drv_cfg,
nrfx_pdm_config_t *config,
const struct dmic_cfg *pdm_cfg)
{
uint32_t best_diff = UINT32_MAX;
uint32_t best_rate;
uint32_t best_freq;
#if NRF_PDM_HAS_RATIO_CONFIG
static const struct {
uint8_t ratio_val;
nrf_pdm_ratio_t ratio_enum;
} ratios[] = {
{ 64, NRF_PDM_RATIO_64X },
{ 80, NRF_PDM_RATIO_80X }
};
for (int r = 0; best_diff != 0 && r < ARRAY_SIZE(ratios); ++r) {
uint8_t ratio = ratios[r].ratio_val;
if (check_pdm_frequencies(drv_cfg, config, pdm_cfg, ratio,
&best_diff, &best_rate, &best_freq)) {
config->ratio = ratios[r].ratio_enum;
/* Look no further if a configuration giving the exact
* PCM rate is found.
*/
if (best_diff == 0) {
break;
}
}
}
#else
uint8_t ratio = 64;
(void)check_pdm_frequencies(drv_cfg, config, pdm_cfg, ratio,
&best_diff, &best_rate, &best_freq);
#endif
if (best_diff == UINT32_MAX) {
return false;
}
LOG_INF("PDM clock frequency: %u, actual PCM rate: %u",
best_freq, best_rate);
return true;
}
static int dmic_nrfx_pdm_configure(const struct device *dev,
struct dmic_cfg *config)
{
struct dmic_nrfx_pdm_drv_data *drv_data = dev->data;
const struct dmic_nrfx_pdm_drv_cfg *drv_cfg = dev->config;
struct pdm_chan_cfg *channel = &config->channel;
struct pcm_stream_cfg *stream = &config->streams[0];
uint32_t def_map, alt_map;
nrfx_pdm_config_t nrfx_cfg;
nrfx_err_t err;
if (drv_data->active) {
LOG_ERR("Cannot configure device while it is active");
return -EBUSY;
}
/*
* This device supports only one stream and can be configured to return
* 16-bit samples for two channels (Left+Right samples) or one channel
* (only Left samples). Left and Right samples can be optionally swapped
* by changing the PDM_CLK edge on which the sampling is done
* Provide the valid channel maps for both the above configurations
* (to inform the requester what is available) and check if what is
* requested can be actually configured.
*/
if (channel->req_num_chan == 1) {
def_map = dmic_build_channel_map(0, 0, PDM_CHAN_LEFT);
alt_map = dmic_build_channel_map(0, 0, PDM_CHAN_RIGHT);
channel->act_num_chan = 1;
} else {
def_map = dmic_build_channel_map(0, 0, PDM_CHAN_LEFT)
| dmic_build_channel_map(1, 0, PDM_CHAN_RIGHT);
alt_map = dmic_build_channel_map(0, 0, PDM_CHAN_RIGHT)
| dmic_build_channel_map(1, 0, PDM_CHAN_LEFT);
channel->act_num_chan = 2;
}
channel->act_num_streams = 1;
channel->act_chan_map_hi = 0;
channel->act_chan_map_lo = def_map;
if (channel->req_num_streams != 1 ||
channel->req_num_chan > 2 ||
channel->req_num_chan < 1 ||
(channel->req_chan_map_lo != def_map &&
channel->req_chan_map_lo != alt_map) ||
channel->req_chan_map_hi != channel->act_chan_map_hi) {
LOG_ERR("Requested configuration is not supported");
return -EINVAL;
}
/* If either rate or width is 0, the stream is to be disabled. */
if (stream->pcm_rate == 0 || stream->pcm_width == 0) {
if (drv_data->configured) {
nrfx_pdm_uninit();
drv_data->configured = false;
}
return 0;
}
if (stream->pcm_width != 16) {
LOG_ERR("Only 16-bit samples are supported");
return -EINVAL;
}
nrfx_cfg = drv_cfg->nrfx_def_cfg;
nrfx_cfg.mode = channel->req_num_chan == 1
? NRF_PDM_MODE_MONO
: NRF_PDM_MODE_STEREO;
nrfx_cfg.edge = channel->req_chan_map_lo == def_map
? NRF_PDM_EDGE_LEFTFALLING
: NRF_PDM_EDGE_LEFTRISING;
#if NRF_PDM_HAS_MCLKCONFIG
nrfx_cfg.mclksrc = drv_cfg->clk_src == ACLK
? NRF_PDM_MCLKSRC_ACLK
: NRF_PDM_MCLKSRC_PCLK32M;
#endif
if (!find_suitable_clock(drv_cfg, &nrfx_cfg, config)) {
LOG_ERR("Cannot find suitable PDM clock configuration.");
return -EINVAL;
}
if (drv_data->configured) {
nrfx_pdm_uninit();
drv_data->configured = false;
}
err = nrfx_pdm_init(&nrfx_cfg, drv_cfg->event_handler);
if (err != NRFX_SUCCESS) {
LOG_ERR("Failed to initialize PDM: 0x%08x", err);
return -EIO;
}
drv_data->block_size = stream->block_size;
drv_data->mem_slab = stream->mem_slab;
/* Unless the PCLK32M source is used with the HFINT oscillator
* (which is always available without any additional actions),
* it is required to request the proper clock to be running
* before starting the transfer itself.
*/
drv_data->request_clock = (drv_cfg->clk_src != PCLK32M);
drv_data->configured = true;
return 0;
}
static int start_transfer(struct dmic_nrfx_pdm_drv_data *drv_data)
{
nrfx_err_t err;
int ret;
err = nrfx_pdm_start();
if (err == NRFX_SUCCESS) {
return 0;
}
LOG_ERR("Failed to start PDM: 0x%08x", err);
ret = -EIO;
if (drv_data->request_clock) {
(void)onoff_release(drv_data->clk_mgr);
}
drv_data->active = false;
return ret;
}
static void clock_started_callback(struct onoff_manager *mgr,
struct onoff_client *cli,
uint32_t state,
int res)
{
struct dmic_nrfx_pdm_drv_data *drv_data =
CONTAINER_OF(cli, struct dmic_nrfx_pdm_drv_data, clk_cli);
/* The driver can turn out to be inactive at this point if the STOP
* command was triggered before the clock has started. Do not start
* the actual transfer in such case.
*/
if (!drv_data->active) {
(void)onoff_release(drv_data->clk_mgr);
} else {
(void)start_transfer(drv_data);
}
}
static int trigger_start(const struct device *dev)
{
struct dmic_nrfx_pdm_drv_data *drv_data = dev->data;
int ret;
drv_data->active = true;
/* If it is required to use certain HF clock, request it to be running
* first. If not, start the transfer directly.
*/
if (drv_data->request_clock) {
sys_notify_init_callback(&drv_data->clk_cli.notify,
clock_started_callback);
ret = onoff_request(drv_data->clk_mgr, &drv_data->clk_cli);
if (ret < 0) {
drv_data->active = false;
LOG_ERR("Failed to request clock: %d", ret);
return -EIO;
}
} else {
ret = start_transfer(drv_data);
if (ret < 0) {
return ret;
}
}
return 0;
}
static int dmic_nrfx_pdm_trigger(const struct device *dev,
enum dmic_trigger cmd)
{
struct dmic_nrfx_pdm_drv_data *drv_data = dev->data;
switch (cmd) {
case DMIC_TRIGGER_PAUSE:
case DMIC_TRIGGER_STOP:
if (drv_data->active) {
drv_data->stopping = true;
nrfx_pdm_stop();
}
break;
case DMIC_TRIGGER_RELEASE:
case DMIC_TRIGGER_START:
if (!drv_data->configured) {
LOG_ERR("Device is not configured");
return -EIO;
} else if (!drv_data->active) {
drv_data->stopping = false;
return trigger_start(dev);
}
break;
default:
LOG_ERR("Invalid command: %d", cmd);
return -EINVAL;
}
return 0;
}
static int dmic_nrfx_pdm_read(const struct device *dev,
uint8_t stream,
void **buffer, size_t *size, int32_t timeout)
{
struct dmic_nrfx_pdm_drv_data *drv_data = dev->data;
int ret;
ARG_UNUSED(stream);
if (!drv_data->configured) {
LOG_ERR("Device is not configured");
return -EIO;
}
ret = k_msgq_get(&drv_data->rx_queue, buffer, SYS_TIMEOUT_MS(timeout));
if (ret != 0) {
LOG_ERR("No audio data to be read");
} else {
LOG_DBG("Released buffer %p", *buffer);
*size = drv_data->block_size;
}
return ret;
}
static void init_clock_manager(const struct device *dev)
{
struct dmic_nrfx_pdm_drv_data *drv_data = dev->data;
clock_control_subsys_t subsys;
#if NRF_CLOCK_HAS_HFCLKAUDIO
const struct dmic_nrfx_pdm_drv_cfg *drv_cfg = dev->config;
if (drv_cfg->clk_src == ACLK) {
subsys = CLOCK_CONTROL_NRF_SUBSYS_HFAUDIO;
} else
#endif
{
subsys = CLOCK_CONTROL_NRF_SUBSYS_HF;
}
drv_data->clk_mgr = z_nrf_clock_control_get_onoff(subsys);
__ASSERT_NO_MSG(drv_data->clk_mgr != NULL);
}
static const struct _dmic_ops dmic_ops = {
.configure = dmic_nrfx_pdm_configure,
.trigger = dmic_nrfx_pdm_trigger,
.read = dmic_nrfx_pdm_read,
};
#define PDM(idx) DT_NODELABEL(pdm##idx)
#define PDM_CLK_SRC(idx) DT_STRING_TOKEN(PDM(idx), clock_source)
#define PDM_NRFX_DEVICE(idx) \
static void *rx_msgs##idx[DT_PROP(PDM(idx), queue_size)]; \
static struct dmic_nrfx_pdm_drv_data dmic_nrfx_pdm_data##idx; \
static int pdm_nrfx_init##idx(const struct device *dev) \
{ \
IRQ_CONNECT(DT_IRQN(PDM(idx)), DT_IRQ(PDM(idx), priority), \
nrfx_isr, nrfx_pdm_irq_handler, 0); \
const struct dmic_nrfx_pdm_drv_cfg *drv_cfg = dev->config; \
int err = pinctrl_apply_state(drv_cfg->pcfg, \
PINCTRL_STATE_DEFAULT); \
if (err < 0) { \
return err; \
} \
k_msgq_init(&dmic_nrfx_pdm_data##idx.rx_queue, \
(char *)rx_msgs##idx, sizeof(void *), \
ARRAY_SIZE(rx_msgs##idx)); \
init_clock_manager(dev); \
return 0; \
} \
static void event_handler##idx(const nrfx_pdm_evt_t *evt) \
{ \
event_handler(DEVICE_DT_GET(PDM(idx)), evt); \
} \
PINCTRL_DT_DEFINE(PDM(idx)); \
static const struct dmic_nrfx_pdm_drv_cfg dmic_nrfx_pdm_cfg##idx = { \
.event_handler = event_handler##idx, \
.nrfx_def_cfg = NRFX_PDM_DEFAULT_CONFIG(0, 0), \
.nrfx_def_cfg.skip_gpio_cfg = true, \
.nrfx_def_cfg.skip_psel_cfg = true, \
.pcfg = PINCTRL_DT_DEV_CONFIG_GET(PDM(idx)), \
.clk_src = PDM_CLK_SRC(idx), \
}; \
BUILD_ASSERT(PDM_CLK_SRC(idx) != ACLK || NRF_PDM_HAS_MCLKCONFIG, \
"Clock source ACLK is not available."); \
BUILD_ASSERT(PDM_CLK_SRC(idx) != ACLK || \
DT_NODE_HAS_PROP(DT_NODELABEL(clock), \
hfclkaudio_frequency), \
"Clock source ACLK requires the hfclkaudio-frequency " \
"property to be defined in the nordic,nrf-clock node."); \
DEVICE_DT_DEFINE(PDM(idx), pdm_nrfx_init##idx, NULL, \
&dmic_nrfx_pdm_data##idx, &dmic_nrfx_pdm_cfg##idx, \
POST_KERNEL, CONFIG_AUDIO_DMIC_INIT_PRIORITY, \
&dmic_ops);
/* Existing SoCs only have one PDM instance. */
PDM_NRFX_DEVICE(0);
``` | /content/code_sandbox/drivers/audio/dmic_nrfx_pdm.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,805 |
```unknown
config PTP_CLOCK_NXP_ENET
bool "NXP ENET PTP Clock driver"
default y
depends on DT_HAS_NXP_ENET_PTP_CLOCK_ENABLED && NET_L2_PTP
depends on ETH_NXP_ENET
help
Enable NXP ENET PTP clock support.
``` | /content/code_sandbox/drivers/ptp_clock/Kconfig.nxp_enet | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 67 |
```unknown
config PTP_CLOCK
bool "Precision Time Protocol (PTP) Clock drivers"
help
Enable options for Precision Time Protocol Clock drivers.
if PTP_CLOCK
source "drivers/ptp_clock/Kconfig.nxp_enet"
config PTP_CLOCK_INIT_PRIORITY
int "Init priority"
default 75
help
PTP Clock device driver initialization priority
endif # PTP_CLOCK
``` | /content/code_sandbox/drivers/ptp_clock/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 83 |
```unknown
# Atmel SAM flash driver config
config SOC_FLASH_SAM
bool "Atmel SAM flash driver"
default y
depends on DT_HAS_ATMEL_SAM_FLASH_CONTROLLER_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select MPU_ALLOW_FLASH_WRITE if ARM_MPU
help
Enable the Atmel SAM series internal flash driver.
``` | /content/code_sandbox/drivers/flash/Kconfig.sam | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 83 |
```c
/*
*
* Based on a commit to drivers/ethernet/eth_mcux.c which was:
*
*/
#define DT_DRV_COMPAT nxp_enet_ptp_clock
#include <zephyr/drivers/ptp_clock.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/ethernet/eth_nxp_enet.h>
#include <fsl_enet.h>
struct ptp_clock_nxp_enet_config {
const struct pinctrl_dev_config *pincfg;
const struct device *module_dev;
const struct device *port;
const struct device *clock_dev;
struct device *clock_subsys;
void (*irq_config_func)(void);
};
struct ptp_clock_nxp_enet_data {
ENET_Type *base;
double clock_ratio;
enet_handle_t enet_handle;
struct k_mutex ptp_mutex;
};
static int ptp_clock_nxp_enet_set(const struct device *dev,
struct net_ptp_time *tm)
{
struct ptp_clock_nxp_enet_data *data = dev->data;
enet_ptp_time_t enet_time;
enet_time.second = tm->second;
enet_time.nanosecond = tm->nanosecond;
ENET_Ptp1588SetTimer(data->base, &data->enet_handle, &enet_time);
return 0;
}
static int ptp_clock_nxp_enet_get(const struct device *dev,
struct net_ptp_time *tm)
{
struct ptp_clock_nxp_enet_data *data = dev->data;
enet_ptp_time_t enet_time;
ENET_Ptp1588GetTimer(data->base, &data->enet_handle, &enet_time);
tm->second = enet_time.second;
tm->nanosecond = enet_time.nanosecond;
return 0;
}
static int ptp_clock_nxp_enet_adjust(const struct device *dev,
int increment)
{
struct ptp_clock_nxp_enet_data *data = dev->data;
int ret = 0;
int key;
if ((increment <= (int32_t)(-NSEC_PER_SEC)) ||
(increment >= (int32_t)NSEC_PER_SEC)) {
ret = -EINVAL;
} else {
key = irq_lock();
if (data->base->ATPER != NSEC_PER_SEC) {
ret = -EBUSY;
} else {
/* Seconds counter is handled by software. Change the
* period of one software second to adjust the clock.
*/
data->base->ATPER = NSEC_PER_SEC - increment;
ret = 0;
}
irq_unlock(key);
}
return ret;
}
static int ptp_clock_nxp_enet_rate_adjust(const struct device *dev,
double ratio)
{
const struct ptp_clock_nxp_enet_config *config = dev->config;
struct ptp_clock_nxp_enet_data *data = dev->data;
int corr;
int32_t mul;
double val;
uint32_t enet_ref_pll_rate;
(void) clock_control_get_rate(config->clock_dev, config->clock_subsys,
&enet_ref_pll_rate);
int hw_inc = NSEC_PER_SEC / enet_ref_pll_rate;
/* No change needed. */
if ((ratio > 1.0 && ratio - 1.0 < 0.00000001) ||
(ratio < 1.0 && 1.0 - ratio < 0.00000001)) {
return 0;
}
ratio *= data->clock_ratio;
/* Limit possible ratio. */
if ((ratio > 1.0 + 1.0/(2 * hw_inc)) ||
(ratio < 1.0 - 1.0/(2 * hw_inc))) {
return -EINVAL;
}
/* Save new ratio. */
data->clock_ratio = ratio;
if (ratio < 1.0) {
corr = hw_inc - 1;
val = 1.0 / (hw_inc * (1.0 - ratio));
} else if (ratio > 1.0) {
corr = hw_inc + 1;
val = 1.0 / (hw_inc * (ratio - 1.0));
} else {
val = 0;
corr = hw_inc;
}
if (val >= INT32_MAX) {
/* Value is too high.
* It is not possible to adjust the rate of the clock.
*/
mul = 0;
} else {
mul = val;
}
k_mutex_lock(&data->ptp_mutex, K_FOREVER);
ENET_Ptp1588AdjustTimer(data->base, corr, mul);
k_mutex_unlock(&data->ptp_mutex);
return 0;
}
void nxp_enet_ptp_clock_callback(const struct device *dev,
enum nxp_enet_callback_reason event,
void *cb_data)
{
const struct ptp_clock_nxp_enet_config *config = dev->config;
struct ptp_clock_nxp_enet_data *data = dev->data;
if (event == NXP_ENET_MODULE_RESET) {
enet_ptp_config_t ptp_config;
uint32_t enet_ref_pll_rate;
uint8_t ptp_multicast[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
uint8_t ptp_peer_multicast[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x0E };
(void) clock_control_get_rate(config->clock_dev, config->clock_subsys,
&enet_ref_pll_rate);
ENET_AddMulticastGroup(data->base, ptp_multicast);
ENET_AddMulticastGroup(data->base, ptp_peer_multicast);
/* only for ERRATA_2579 */
ptp_config.channel = kENET_PtpTimerChannel3;
ptp_config.ptp1588ClockSrc_Hz = enet_ref_pll_rate;
data->clock_ratio = 1.0;
ENET_Ptp1588SetChannelMode(data->base, kENET_PtpTimerChannel3,
kENET_PtpChannelPulseHighonCompare, true);
ENET_Ptp1588Configure(data->base, &data->enet_handle,
&ptp_config);
}
if (cb_data != NULL) {
/* Share the mutex with mac driver */
*(uintptr_t *)cb_data = (uintptr_t)&data->ptp_mutex;
}
}
static int ptp_clock_nxp_enet_init(const struct device *port)
{
const struct ptp_clock_nxp_enet_config *config = port->config;
struct ptp_clock_nxp_enet_data *data = port->data;
int ret;
data->base = (ENET_Type *)DEVICE_MMIO_GET(config->module_dev);
ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (ret) {
return ret;
}
k_mutex_init(&data->ptp_mutex);
config->irq_config_func();
return 0;
}
static void ptp_clock_nxp_enet_isr(const struct device *dev)
{
struct ptp_clock_nxp_enet_data *data = dev->data;
enet_ptp_timer_channel_t channel;
unsigned int irq_lock_key = irq_lock();
/* clear channel */
for (channel = kENET_PtpTimerChannel1; channel <= kENET_PtpTimerChannel4; channel++) {
if (ENET_Ptp1588GetChannelStatus(data->base, channel)) {
ENET_Ptp1588ClearChannelStatus(data->base, channel);
}
}
ENET_TimeStampIRQHandler(data->base, &data->enet_handle);
irq_unlock(irq_lock_key);
}
static const struct ptp_clock_driver_api ptp_clock_nxp_enet_api = {
.set = ptp_clock_nxp_enet_set,
.get = ptp_clock_nxp_enet_get,
.adjust = ptp_clock_nxp_enet_adjust,
.rate_adjust = ptp_clock_nxp_enet_rate_adjust,
};
#define PTP_CLOCK_NXP_ENET_INIT(n) \
static void nxp_enet_ptp_clock_##n##_irq_config_func(void) \
{ \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, 0, irq), \
DT_INST_IRQ_BY_IDX(n, 0, priority), \
ptp_clock_nxp_enet_isr, \
DEVICE_DT_INST_GET(n), \
0); \
irq_enable(DT_INST_IRQ_BY_IDX(n, 0, irq)); \
} \
\
PINCTRL_DT_INST_DEFINE(n); \
\
static const struct ptp_clock_nxp_enet_config \
ptp_clock_nxp_enet_##n##_config = { \
.module_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.port = DEVICE_DT_INST_GET(n), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (void *) \
DT_INST_CLOCKS_CELL_BY_IDX(n, 0, name), \
.irq_config_func = \
nxp_enet_ptp_clock_##n##_irq_config_func, \
}; \
\
static struct ptp_clock_nxp_enet_data ptp_clock_nxp_enet_##n##_data; \
\
DEVICE_DT_INST_DEFINE(n, &ptp_clock_nxp_enet_init, NULL, \
&ptp_clock_nxp_enet_##n##_data, \
&ptp_clock_nxp_enet_##n##_config, \
POST_KERNEL, CONFIG_PTP_CLOCK_INIT_PRIORITY, \
&ptp_clock_nxp_enet_api);
DT_INST_FOREACH_STATUS_OKAY(PTP_CLOCK_NXP_ENET_INIT)
``` | /content/code_sandbox/drivers/ptp_clock/ptp_clock_nxp_enet.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,242 |
```unknown
# NUMAKER GPIO driver configuration options
config SOC_FLASH_NUMAKER_RMC
bool "Nuvoton NuMaker MCU embedded RRAM memory controller"
default y
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_NO_EXPLICIT_ERASE
select HAS_NUMAKER_RMC
depends on DT_HAS_NUVOTON_NUMAKER_RMC_ENABLED
help
This option enables the RMC driver for Nuvoton NuMaker family of
processors.
Say y if you wish to enable NuMaker RMC.
``` | /content/code_sandbox/drivers/flash/Kconfig.numaker_rmc | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 112 |
```c
/*
*
*/
#define LOG_DOMAIN flash_stm32g4
#define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_DOMAIN);
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <string.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/init.h>
#include <soc.h>
#include <stm32_ll_system.h>
#include "flash_stm32.h"
#define STM32G4_SERIES_MAX_FLASH 512
#define BANK2_OFFSET (KB(STM32G4_SERIES_MAX_FLASH) / 2)
/*
* offset and len must be aligned on 8 for write,
* positive and not beyond end of flash
*/
bool flash_stm32_valid_range(const struct device *dev, off_t offset,
uint32_t len,
bool write)
{
#if defined(FLASH_STM32_DBANK) && (CONFIG_FLASH_SIZE < STM32G4_SERIES_MAX_FLASH)
/*
* In case of bank1/2 discontinuity, the range should not
* start before bank2 and end beyond bank1 at the same time.
* Locations beyond bank2 are caught by flash_stm32_range_exists.
*/
if ((offset < BANK2_OFFSET) && (offset + len > FLASH_SIZE / 2)) {
return 0;
}
#endif
if (write && !flash_stm32_valid_write(offset, len)) {
return false;
}
return flash_stm32_range_exists(dev, offset, len);
}
static inline void flush_cache(FLASH_TypeDef *regs)
{
if (regs->ACR & FLASH_ACR_DCEN) {
regs->ACR &= ~FLASH_ACR_DCEN;
/* Datasheet: DCRST: Data cache reset
* This bit can be written only when the data cache is disabled
*/
regs->ACR |= FLASH_ACR_DCRST;
regs->ACR &= ~FLASH_ACR_DCRST;
regs->ACR |= FLASH_ACR_DCEN;
}
if (regs->ACR & FLASH_ACR_ICEN) {
regs->ACR &= ~FLASH_ACR_ICEN;
/* Datasheet: ICRST: Instruction cache reset :
* This bit can be written only when the instruction cache
* is disabled
*/
regs->ACR |= FLASH_ACR_ICRST;
regs->ACR &= ~FLASH_ACR_ICRST;
regs->ACR |= FLASH_ACR_ICEN;
}
}
static int write_dword(const struct device *dev, off_t offset, uint64_t val)
{
volatile uint32_t *flash = (uint32_t *)(offset + FLASH_STM32_BASE_ADDRESS);
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
#if defined(FLASH_STM32_DBANK)
bool dcache_enabled = false;
#endif /* FLASH_STM32_DBANK */
uint32_t tmp;
int rc;
/* if the control register is locked, do not fail silently */
if (regs->CR & FLASH_CR_LOCK) {
LOG_ERR("CR locked");
return -EIO;
}
/* Check that no Flash main memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
/* Check if this double word is erased and value isn't 0.
*
* It is allowed to write only zeros over an already written dword
* See 3.3.7 in reference manual.
*/
if ((flash[0] != 0xFFFFFFFFUL ||
flash[1] != 0xFFFFFFFFUL) && val != 0UL) {
LOG_ERR("Word at offs %ld not erased", (long)offset);
return -EIO;
}
#if defined(FLASH_STM32_DBANK)
/*
* Disable the data cache to avoid the silicon errata ES0430 Rev 7 2.2.2:
* "Data cache might be corrupted during Flash memory read-while-write operation"
*/
if (regs->ACR & FLASH_ACR_DCEN) {
dcache_enabled = true;
regs->ACR &= (~FLASH_ACR_DCEN);
}
#endif /* FLASH_STM32_DBANK */
/* Set the PG bit */
regs->CR |= FLASH_CR_PG;
/* Flush the register write */
tmp = regs->CR;
/* Perform the data write operation at the desired memory address */
flash[0] = (uint32_t)val;
flash[1] = (uint32_t)(val >> 32);
/* Wait until the BSY bit is cleared */
rc = flash_stm32_wait_flash_idle(dev);
/* Clear the PG bit */
regs->CR &= (~FLASH_CR_PG);
#if defined(FLASH_STM32_DBANK)
/* Reset/enable the data cache if previously enabled */
if (dcache_enabled) {
regs->ACR |= FLASH_ACR_DCRST;
regs->ACR &= (~FLASH_ACR_DCRST);
regs->ACR |= FLASH_ACR_DCEN;
}
#endif /* FLASH_STM32_DBANK */
return rc;
}
static int erase_page(const struct device *dev, unsigned int offset)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
uint32_t tmp;
int rc;
int page;
/* if the control register is locked, do not fail silently */
if (regs->CR & FLASH_CR_LOCK) {
LOG_ERR("CR locked");
return -EIO;
}
/* Check that no Flash memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
#if defined(FLASH_STM32_DBANK)
bool bank_swap;
/* Check whether bank1/2 are swapped */
bank_swap = (LL_SYSCFG_GetFlashBankMode() == LL_SYSCFG_BANKMODE_BANK2);
if ((offset < (FLASH_SIZE / 2)) && !bank_swap) {
/* The pages to be erased is in bank 1 */
regs->CR &= ~FLASH_CR_BKER_Msk;
page = offset / FLASH_PAGE_SIZE;
LOG_DBG("Erase page %d on bank 1", page);
} else if ((offset >= BANK2_OFFSET) && bank_swap) {
/* The pages to be erased is in bank 1 */
regs->CR &= ~FLASH_CR_BKER_Msk;
page = (offset - BANK2_OFFSET) / FLASH_PAGE_SIZE;
LOG_DBG("Erase page %d on bank 1", page);
} else if ((offset < (FLASH_SIZE / 2)) && bank_swap) {
/* The pages to be erased is in bank 2 */
regs->CR |= FLASH_CR_BKER;
page = offset / FLASH_PAGE_SIZE;
LOG_DBG("Erase page %d on bank 2", page);
} else if ((offset >= BANK2_OFFSET) && !bank_swap) {
/* The pages to be erased is in bank 2 */
regs->CR |= FLASH_CR_BKER;
page = (offset - BANK2_OFFSET) / FLASH_PAGE_SIZE;
LOG_DBG("Erase page %d on bank 2", page);
} else {
LOG_ERR("Offset %d does not exist", offset);
return -EINVAL;
}
#else
page = offset / FLASH_PAGE_SIZE;
LOG_DBG("Erase page %d", page);
#endif
/* Set the PER bit and select the page you wish to erase */
regs->CR |= FLASH_CR_PER;
regs->CR &= ~FLASH_CR_PNB_Msk;
regs->CR |= (page << FLASH_CR_PNB_Pos);
/* Set the STRT bit */
regs->CR |= FLASH_CR_STRT;
/* flush the register write */
tmp = regs->CR;
/* Wait for the BSY bit */
rc = flash_stm32_wait_flash_idle(dev);
flush_cache(regs);
#ifdef FLASH_STM32_DBANK
regs->CR &= ~(FLASH_CR_PER | FLASH_CR_BKER);
#else
regs->CR &= ~(FLASH_CR_PER);
#endif
return rc;
}
int flash_stm32_block_erase_loop(const struct device *dev,
unsigned int offset,
unsigned int len)
{
unsigned int address = offset;
int rc = 0;
for (; address <= offset + len - 1 ; address += FLASH_PAGE_SIZE) {
rc = erase_page(dev, address);
if (rc < 0) {
break;
}
}
return rc;
}
int flash_stm32_write_range(const struct device *dev, unsigned int offset,
const void *data, unsigned int len)
{
int i, rc = 0;
for (i = 0; i < len; i += 8, offset += 8) {
rc = write_dword(dev, offset, ((const uint64_t *) data)[i>>3]);
if (rc < 0) {
return rc;
}
}
return rc;
}
static __unused int write_optb(const struct device *dev, uint32_t mask,
uint32_t value)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
int rc;
if (regs->CR & FLASH_CR_OPTLOCK) {
return -EIO;
}
if ((regs->OPTR & mask) == value) {
return 0;
}
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
regs->OPTR = (regs->OPTR & ~mask) | value;
regs->CR |= FLASH_CR_OPTSTRT;
/* Make sure previous write is completed. */
barrier_dsync_fence_full();
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
return 0;
}
#if defined(CONFIG_FLASH_STM32_WRITE_PROTECT)
/*
* Remark for future development implementing Write Protection for the L4 parts:
*
* STM32L4 allows for 2 write protected memory areas, c.f. FLASH_WEP1AR, FLASH_WRP1BR
* which are defined by their start and end pages.
*
* Other STM32 parts (i.e. F4 series) uses bitmask to select sectors.
*
* To implement Write Protection for L4 one should thus add a new EX_OP like
* FLASH_STM32_EX_OP_SECTOR_WP_RANGED in stm32_flash_api_extensions.h
*/
#endif /* CONFIG_FLASH_STM32_WRITE_PROTECT */
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION)
uint8_t flash_stm32_get_rdp_level(const struct device *dev)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
return (regs->OPTR & FLASH_OPTR_RDP_Msk) >> FLASH_OPTR_RDP_Pos;
}
void flash_stm32_set_rdp_level(const struct device *dev, uint8_t level)
{
write_optb(dev, FLASH_OPTR_RDP_Msk,
(uint32_t)level << FLASH_OPTR_RDP_Pos);
}
#endif /* CONFIG_FLASH_STM32_READOUT_PROTECTION */
void flash_stm32_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
ARG_UNUSED(dev);
#if defined(FLASH_STM32_DBANK) && (CONFIG_FLASH_SIZE < STM32G4_SERIES_MAX_FLASH)
#define PAGES_PER_BANK ((FLASH_SIZE / FLASH_PAGE_SIZE) / 2)
static struct flash_pages_layout stm32g4_flash_layout[3];
if (stm32g4_flash_layout[0].pages_count == 0) {
/* Bank1 */
stm32g4_flash_layout[0].pages_count = PAGES_PER_BANK;
stm32g4_flash_layout[0].pages_size = FLASH_PAGE_SIZE;
/* Dummy page corresponding to discontinuity between bank1/2 */
stm32g4_flash_layout[1].pages_count = 1;
stm32g4_flash_layout[1].pages_size = BANK2_OFFSET
- (PAGES_PER_BANK * FLASH_PAGE_SIZE);
/* Bank2 */
stm32g4_flash_layout[2].pages_count = PAGES_PER_BANK;
stm32g4_flash_layout[2].pages_size = FLASH_PAGE_SIZE;
}
#else
static struct flash_pages_layout stm32g4_flash_layout[1];
if (stm32g4_flash_layout[0].pages_count == 0) {
stm32g4_flash_layout[0].pages_count = FLASH_SIZE
/ FLASH_PAGE_SIZE;
stm32g4_flash_layout[0].pages_size = FLASH_PAGE_SIZE;
}
#endif
*layout = stm32g4_flash_layout;
*layout_size = ARRAY_SIZE(stm32g4_flash_layout);
}
/* Override weak function */
int flash_stm32_check_configuration(void)
{
#if defined(FLASH_STM32_DBANK)
if (READ_BIT(FLASH->OPTR, FLASH_STM32_DBANK) == 0U) {
/* Single bank not supported when dualbank is possible */
LOG_ERR("Single bank configuration not supported");
return -ENOTSUP;
}
#endif
return 0;
}
``` | /content/code_sandbox/drivers/flash/flash_stm32g4x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,861 |
```unknown
config SOC_FLASH_MCUX
bool "MCUX flash shim driver"
default y
depends on DT_HAS_NXP_KINETIS_FTFA_ENABLED || \
DT_HAS_NXP_KINETIS_FTFE_ENABLED || \
DT_HAS_NXP_KINETIS_FTFL_ENABLED || \
DT_HAS_NXP_IAP_FMC55_ENABLED || \
DT_HAS_NXP_IAP_FMC553_ENABLED || \
DT_HAS_NXP_IAP_MSF1_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select MPU_ALLOW_FLASH_WRITE if ARM_MPU
help
Enables the MCUX flash shim driver.
WARNING: This driver will disable the system interrupts for
the duration of the flash erase/write operations. This will
have an impact on the overall system performance - whether
this is acceptable or not will depend on the use case.
if SOC_FLASH_MCUX
config CHECK_BEFORE_READING
bool "Verify area before reading it"
default y if SOC_SERIES_LPC55XXX
help
Do a margin check flash command before reading an area.
This feature prevents erroneous/forbidden reading. Some ECC enabled
devices will crash when reading an erased or wrongly programmed area.
endif # SOC_FLASH_MCUX
if DT_HAS_NXP_IMX_FLEXSPI_ENABLED
menu "Flexspi flash driver"
config FLASH_MCUX_FLEXSPI_NOR
bool "MCUX FlexSPI NOR driver"
default y
depends on DT_HAS_NXP_IMX_FLEXSPI_NOR_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select FLASH_JESD216
select MEMC
select MEMC_MCUX_FLEXSPI
config FLASH_MCUX_FLEXSPI_MX25UM51345G
bool "MCUX FlexSPI MX25UM51345G driver"
default y
depends on DT_HAS_NXP_IMX_FLEXSPI_MX25UM51345G_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select MEMC
select MEMC_MCUX_FLEXSPI
config FLASH_MCUX_FLEXSPI_HYPERFLASH
bool "MCUX FlexSPI HYPERFLASH driver"
default y
depends on DT_HAS_NXP_IMX_FLEXSPI_HYPERFLASH_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select MEMC
select MEMC_MCUX_FLEXSPI
endmenu
if FLASH_MCUX_FLEXSPI_MX25UM51345G
choice FLASH_MCUX_FLEXSPI_MX25UM51345G_OPI_MODE
prompt "FlexSPI MX25UM51345G OPI mode"
default FLASH_MCUX_FLEXSPI_MX25UM51345G_OPI_DTR
help
Select the MX25UM51345G octal flash operation mode(Octal I/O STR
or Octal I/O DTR).
config FLASH_MCUX_FLEXSPI_MX25UM51345G_OPI_STR
bool "STR"
config FLASH_MCUX_FLEXSPI_MX25UM51345G_OPI_DTR
bool "DTR"
endchoice
endif # FLASH_MCUX_FLEXSPI_MX25UM51345G
config FLASH_MCUX_FLEXSPI_NOR_WRITE_BUFFER
bool "MCUX FlexSPI NOR write RAM buffer"
default y
depends on (FLASH_MCUX_FLEXSPI_NOR || FLASH_MCUX_FLEXSPI_MX25UM51345G)
help
Copy the data to a RAM buffer before writing it to the flash.
This prevents faults when the data to write would be located on the
flash itself.
config FLASH_MCUX_FLEXSPI_HYPERFLASH_WRITE_BUFFER
bool "MCUX FlexSPI HYPERFLASH write RAM buffer"
default y
depends on FLASH_MCUX_FLEXSPI_HYPERFLASH
help
Copy the data to a RAM buffer before writing it to the flash.
This prevents faults when the data to write would be located on the
flash itself.
endif # HAS_MCUX_FLEXSPI
``` | /content/code_sandbox/drivers/flash/Kconfig.mcux | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 874 |
```unknown
# Kconfig Andes QSPI-NOR configuration options
#
#
#
menuconfig FLASH_ANDES_QSPI
bool "Andes FLASH driver"
default y
depends on DT_HAS_ANDESTECH_QSPI_NOR_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_JESD216
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
depends on !SPI_NOR
help
Enable driver for Andes QSPI
if FLASH_ANDES_QSPI
choice FLASH_ANDES_QSPI_SFDP
prompt "Source for Serial Flash Discoverable Parameters"
default FLASH_ANDES_QSPI_SFDP_RUNTIME
config FLASH_ANDES_QSPI_SFDP_DEVICETREE
bool "Basic Flash Parameters from devicetree node"
help
The JESD216 Basic Flash Parameters table must be provided in the
sfdp-bfp property in devicetree. The size and jedec-id properties are
also required.
config FLASH_ANDES_QSPI_SFDP_RUNTIME
bool "Read flash parameters at runtime"
help
Read all flash device characteristics from the device at runtime.
This option is the most flexible as it should provide functionality
for all supported JESD216-compatible devices.
endchoice
config FLASH_ANDES_QSPI_INIT_PRIORITY
int
default 80
help
Device driver initialization priority.
config FLASH_ANDES_QSPI_LAYOUT_PAGE_SIZE
int "Page size to use for FLASH_LAYOUT feature"
default 65536
help
When CONFIG_FLASH_PAGE_LAYOUT is used this driver will support
that API. By default the page size corresponds to the block
size (65536). Other options include the 32K-byte erase size
(32768), the sector size (4096), or any non-zero multiple of the
sector size.
endif
``` | /content/code_sandbox/drivers/flash/Kconfig.andes | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 384 |
```unknown
# STM32 xSPI flash driver configuration options
DT_STM32_XSPI_1_HAS_DMA := $(dt_nodelabel_has_prop,xspi1,dmas)
DT_STM32_XSPI_2_HAS_DMA := $(dt_nodelabel_has_prop,xspi2,dmas)
config FLASH_STM32_XSPI
bool "STM32 XSPI Flash driver"
default y
depends on DT_HAS_ST_STM32_XSPI_ENABLED && DT_HAS_ST_STM32_XSPI_NOR_ENABLED
select USE_STM32_HAL_XSPI
select USE_STM32_LL_DLYB
select FLASH_HAS_DRIVER_ENABLED
select FLASH_JESD216
select FLASH_PAGE_LAYOUT
select FLASH_HAS_PAGE_LAYOUT
select DMA if $(DT_STM32_XSPI_1_HAS_DMA) || $(DT_STM32_XSPI_2_HAS_DMA)
select USE_STM32_HAL_DMA if $(DT_STM32_XSPI_1_HAS_DMA) || \
$(DT_STM32_XSPI_2_HAS_DMA)
select USE_STM32_HAL_DMA_EX if SOC_SERIES_STM32H5X && \
($(DT_STM32_XSPI_1_HAS_DMA) || \
$(DT_STM32_XSPI_2_HAS_DMA))
help
Enable XSPI-NOR support on the STM32 family of processors.
``` | /content/code_sandbox/drivers/flash/Kconfig.stm32_xspi | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 277 |
```unknown
config FLASH_AMBIQ
bool "Ambiq flash driver on MRAM or flash"
default y
depends on DT_HAS_AMBIQ_FLASH_CONTROLLER_ENABLED
select AMBIQ_HAL
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_NO_EXPLICIT_ERASE if SOC_SERIES_APOLLO4X
select FLASH_HAS_EXPLICIT_ERASE if SOC_SERIES_APOLLO3X
help
Enables Ambiq flash driver on MRAM (e.g. Apollo4x) or
flash (e.g. Apollo3x).
``` | /content/code_sandbox/drivers/flash/Kconfig.ambiq | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 119 |
```c
/*
*
*/
#define LOG_DOMAIN flash_stm32wba
#define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_DOMAIN);
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <string.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/init.h>
#include <soc.h>
#include <stm32_ll_icache.h>
#include <stm32_ll_system.h>
#include "flash_stm32.h"
#define STM32_SERIES_MAX_FLASH 1024
#define ICACHE_DISABLE_TIMEOUT_VALUE 1U /* 1ms */
#define ICACHE_INVALIDATE_TIMEOUT_VALUE 1U /* 1ms */
static int stm32_icache_disable(void)
{
int status = 0;
uint32_t tickstart;
LOG_DBG("I-cache Disable");
/* Clear BSYENDF flag first and then disable the instruction cache
* that starts a cache invalidation procedure
*/
CLEAR_BIT(ICACHE->FCR, ICACHE_FCR_CBSYENDF);
LL_ICACHE_Disable();
/* Get tick */
tickstart = k_uptime_get_32();
/* Wait for instruction cache to get disabled */
while (LL_ICACHE_IsEnabled()) {
if ((k_uptime_get_32() - tickstart) >
ICACHE_DISABLE_TIMEOUT_VALUE) {
/* New check to avoid false timeout detection in case
* of preemption.
*/
if (LL_ICACHE_IsEnabled()) {
status = -ETIMEDOUT;
break;
}
}
}
return status;
}
static void stm32_icache_enable(void)
{
LOG_DBG("I-cache Enable");
LL_ICACHE_Enable();
}
static int icache_wait_for_invalidate_complete(void)
{
int status = -EIO;
uint32_t tickstart;
/* Check if ongoing invalidation operation */
if (LL_ICACHE_IsActiveFlag_BUSY()) {
/* Get tick */
tickstart = k_uptime_get_32();
/* Wait for end of cache invalidation */
while (!LL_ICACHE_IsActiveFlag_BSYEND()) {
if ((k_uptime_get_32() - tickstart) >
ICACHE_INVALIDATE_TIMEOUT_VALUE) {
break;
}
}
}
/* Clear any pending flags */
if (LL_ICACHE_IsActiveFlag_BSYEND()) {
LOG_DBG("I-cache Invalidation complete");
LL_ICACHE_ClearFlag_BSYEND();
status = 0;
} else {
LOG_ERR("I-cache Invalidation timeout");
status = -ETIMEDOUT;
}
if (LL_ICACHE_IsActiveFlag_ERR()) {
LOG_ERR("I-cache error");
LL_ICACHE_ClearFlag_ERR();
status = -EIO;
}
return status;
}
static int write_qword(const struct device *dev, off_t offset, const uint32_t *buff)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
volatile uint32_t *flash = (uint32_t *)(offset
+ FLASH_STM32_BASE_ADDRESS);
uint32_t tmp;
int rc;
/* if the non-secure control register is locked, do not fail silently */
if (regs->NSCR & FLASH_STM32_NSLOCK) {
LOG_ERR("NSCR locked\n");
return -EIO;
}
/* Check that no Flash main memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
/* Check if this double word is erased */
if ((flash[0] != 0xFFFFFFFFUL) || (flash[1] != 0xFFFFFFFFUL) ||
(flash[2] != 0xFFFFFFFFUL) || (flash[3] != 0xFFFFFFFFUL)) {
LOG_ERR("Word at offs %ld not erased", (long)offset);
return -EIO;
}
/* Set the NSPG bit */
regs->NSCR |= FLASH_STM32_NSPG;
/* Flush the register write */
tmp = regs->NSCR;
/* Perform the data write operation at the desired memory address */
flash[0] = buff[0];
flash[1] = buff[1];
flash[2] = buff[2];
flash[3] = buff[3];
/* Wait until the NSBSY bit is cleared */
rc = flash_stm32_wait_flash_idle(dev);
/* Clear the NSPG bit */
regs->NSCR &= (~FLASH_STM32_NSPG);
return rc;
}
static int erase_page(const struct device *dev, unsigned int offset)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
uint32_t tmp;
int rc;
int page;
/* if the non-secure control register is locked,do not fail silently */
if (regs->NSCR & FLASH_STM32_NSLOCK) {
LOG_ERR("NSCR locked\n");
return -EIO;
}
/* Check that no Flash memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
page = offset / FLASH_PAGE_SIZE;
LOG_DBG("Erase page %d\n", page);
/* Set the NSPER bit and select the page you wish to erase */
regs->NSCR |= FLASH_STM32_NSPER;
regs->NSCR &= ~FLASH_STM32_NSPNB_MSK;
regs->NSCR |= (page << FLASH_STM32_NSPNB_POS);
/* Set the NSSTRT bit */
regs->NSCR |= FLASH_STM32_NSSTRT;
/* flush the register write */
tmp = regs->NSCR;
/* Wait for the NSBSY bit */
rc = flash_stm32_wait_flash_idle(dev);
regs->NSCR &= ~(FLASH_STM32_NSPER);
return rc;
}
int flash_stm32_block_erase_loop(const struct device *dev,
unsigned int offset,
unsigned int len)
{
unsigned int address = offset;
int rc = 0;
bool icache_enabled = LL_ICACHE_IsEnabled();
if (icache_enabled) {
/* Disable icache, this will start the invalidation procedure.
* All changes(erase/write) to flash memory should happen when
* i-cache is disabled. A write to flash performed without
* disabling i-cache will set ERRF error flag in SR register.
*/
rc = stm32_icache_disable();
if (rc != 0) {
return rc;
}
}
for (; address <= offset + len - 1 ; address += FLASH_PAGE_SIZE) {
rc = erase_page(dev, address);
if (rc < 0) {
break;
}
}
if (icache_enabled) {
/* Since i-cache was disabled, this would start the
* invalidation procedure, so wait for completion.
*/
rc = icache_wait_for_invalidate_complete();
/* I-cache should be enabled only after the
* invalidation is complete.
*/
stm32_icache_enable();
}
return rc;
}
int flash_stm32_write_range(const struct device *dev, unsigned int offset,
const void *data, unsigned int len)
{
int i, rc = 0;
bool icache_enabled = LL_ICACHE_IsEnabled();
if (icache_enabled) {
/* Disable icache, this will start the invalidation procedure.
* All changes(erase/write) to flash memory should happen when
* i-cache is disabled. A write to flash performed without
* disabling i-cache will set ERRF error flag in SR register.
*/
rc = stm32_icache_disable();
if (rc != 0) {
return rc;
}
}
for (i = 0; i < len; i += 16) {
rc = write_qword(dev, offset + i, ((const uint32_t *) data + (i>>2)));
if (rc < 0) {
break;
}
}
if (icache_enabled) {
int rc2;
/* Since i-cache was disabled, this would start the
* invalidation procedure, so wait for completion.
*/
rc2 = icache_wait_for_invalidate_complete();
if (!rc) {
rc = rc2;
}
/* I-cache should be enabled only after the
* invalidation is complete.
*/
stm32_icache_enable();
}
return rc;
}
void flash_stm32_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
static struct flash_pages_layout stm32wba_flash_layout = {
.pages_count = 0,
.pages_size = 0,
};
ARG_UNUSED(dev);
if (stm32wba_flash_layout.pages_count == 0) {
stm32wba_flash_layout.pages_count = FLASH_SIZE / FLASH_PAGE_SIZE;
stm32wba_flash_layout.pages_size = FLASH_PAGE_SIZE;
}
*layout = &stm32wba_flash_layout;
*layout_size = 1;
}
``` | /content/code_sandbox/drivers/flash/flash_stm32wbax.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,970 |
```c
/*
*
*/
#define LOG_DOMAIN flash_stm32l4
#define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_DOMAIN);
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <string.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/init.h>
#include <soc.h>
#include "flash_stm32.h"
#if !defined(STM32L4R5xx) && !defined(STM32L4R7xx) && !defined(STM32L4R9xx) && \
!defined(STM32L4S5xx) && !defined(STM32L4S7xx) && !defined(STM32L4S9xx) && \
!defined(STM32L4Q5xx) && !defined(STM32L4P5xx)
#define STM32L4X_PAGE_SHIFT 11
#else
#define STM32L4X_PAGE_SHIFT 12
#endif
#if defined(FLASH_OPTR_DUALBANK) || defined(FLASH_STM32_DBANK)
#define CONTROL_DCACHE
#endif
static inline void flush_cache(FLASH_TypeDef *regs)
{
if (regs->ACR & FLASH_ACR_DCEN) {
regs->ACR &= ~FLASH_ACR_DCEN;
/* Datasheet: DCRST: Data cache reset
* This bit can be written only when the data cache is disabled
*/
regs->ACR |= FLASH_ACR_DCRST;
regs->ACR &= ~FLASH_ACR_DCRST;
regs->ACR |= FLASH_ACR_DCEN;
}
if (regs->ACR & FLASH_ACR_ICEN) {
regs->ACR &= ~FLASH_ACR_ICEN;
/* Datasheet: ICRST: Instruction cache reset :
* This bit can be written only when the instruction cache
* is disabled
*/
regs->ACR |= FLASH_ACR_ICRST;
regs->ACR &= ~FLASH_ACR_ICRST;
regs->ACR |= FLASH_ACR_ICEN;
}
}
/*
* STM32L4xx devices can have up to 512 2K pages on two 256x2K pages banks
*
* STM32L4R/Sxx devices can have up to 512 4K pages on two 256x4K pages banks
*/
static unsigned int get_page(off_t offset)
{
return offset >> STM32L4X_PAGE_SHIFT;
}
static int write_dword(const struct device *dev, off_t offset, uint64_t val)
{
volatile uint32_t *flash = (uint32_t *)(offset + FLASH_STM32_BASE_ADDRESS);
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
#ifdef CONTROL_DCACHE
bool dcache_enabled = false;
#endif /* CONTROL_DCACHE */
uint32_t tmp;
int rc;
/* if the control register is locked, do not fail silently */
if (regs->CR & FLASH_CR_LOCK) {
return -EIO;
}
/* Check that no Flash main memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
/* Check if this double word is erased and value isn't 0.
*
* It is allowed to write only zeros over an already written dword
* See 3.3.7 in reference manual.
*/
if ((flash[0] != 0xFFFFFFFFUL ||
flash[1] != 0xFFFFFFFFUL) && val != 0UL) {
LOG_ERR("Word at offs %ld not erased", (long)offset);
return -EIO;
}
#ifdef CONTROL_DCACHE
/*
* Disable the data cache to avoid the silicon errata 2.2.3:
* "Data cache might be corrupted during Flash memory read-while-write operation"
*/
if (regs->ACR & FLASH_ACR_DCEN) {
dcache_enabled = true;
regs->ACR &= (~FLASH_ACR_DCEN);
}
#endif /* CONTROL_DCACHE */
/* Set the PG bit */
regs->CR |= FLASH_CR_PG;
/* Flush the register write */
tmp = regs->CR;
/* Perform the data write operation at the desired memory address */
flash[0] = (uint32_t)val;
flash[1] = (uint32_t)(val >> 32);
/* Wait until the BSY bit is cleared */
rc = flash_stm32_wait_flash_idle(dev);
/* Clear the PG bit */
regs->CR &= (~FLASH_CR_PG);
#ifdef CONTROL_DCACHE
/* Reset/enable the data cache if previously enabled */
if (dcache_enabled) {
regs->ACR |= FLASH_ACR_DCRST;
regs->ACR &= (~FLASH_ACR_DCRST);
regs->ACR |= FLASH_ACR_DCEN;
}
#endif /* CONTROL_DCACHE */
return rc;
}
#define SOC_NV_FLASH_SIZE DT_REG_SIZE(DT_INST(0, soc_nv_flash))
static int erase_page(const struct device *dev, unsigned int page)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
uint32_t tmp;
uint16_t pages_per_bank;
int rc;
#if !defined(FLASH_OPTR_DUALBANK) && !defined(FLASH_STM32_DBANK)
/* Single bank device. Each page is of 2KB size */
pages_per_bank = SOC_NV_FLASH_SIZE >> 11;
#elif defined(FLASH_OPTR_DUALBANK)
/* L4 series (2K page size) with configurable Dual Bank (default y) */
/* Dual Bank is only option for 1M devices */
if ((regs->OPTR & FLASH_OPTR_DUALBANK) ||
(SOC_NV_FLASH_SIZE == (1024*1024))) {
/* Dual Bank configuration (nbr pages = flash size / 2 / 2K) */
pages_per_bank = SOC_NV_FLASH_SIZE >> 12;
} else {
/* Single bank configuration. This has not been validated. */
/* Not supported for now. */
return -ENOTSUP;
}
#elif defined(FLASH_STM32_DBANK)
/* L4+ series (4K page size) with configurable Dual Bank (default y)*/
if (regs->OPTR & FLASH_STM32_DBANK) {
/* Dual Bank configuration (nbre pags = flash size / 2 / 4K) */
pages_per_bank = SOC_NV_FLASH_SIZE >> 13;
} else {
/* Single bank configuration */
/* Requires 128 bytes data read. This config is not supported */
return -ENOTSUP;
}
#endif
/* if the control register is locked, do not fail silently */
if (regs->CR & FLASH_CR_LOCK) {
return -EIO;
}
/* Check that no Flash memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
flush_cache(regs);
/* Set the PER bit and select the page you wish to erase */
regs->CR |= FLASH_CR_PER;
#ifdef FLASH_CR_BKER
regs->CR &= ~FLASH_CR_BKER_Msk;
/* Select bank, only for DUALBANK devices */
if (page >= pages_per_bank)
regs->CR |= FLASH_CR_BKER;
#endif
regs->CR &= ~FLASH_CR_PNB_Msk;
regs->CR |= ((page % pages_per_bank) << 3);
/* Set the STRT bit */
regs->CR |= FLASH_CR_STRT;
/* flush the register write */
tmp = regs->CR;
/* Wait for the BSY bit */
rc = flash_stm32_wait_flash_idle(dev);
regs->CR &= ~FLASH_CR_PER;
return rc;
}
int flash_stm32_block_erase_loop(const struct device *dev,
unsigned int offset,
unsigned int len)
{
int i, rc = 0;
i = get_page(offset);
for (; i <= get_page(offset + len - 1) ; ++i) {
rc = erase_page(dev, i);
if (rc < 0) {
break;
}
}
return rc;
}
int flash_stm32_write_range(const struct device *dev, unsigned int offset,
const void *data, unsigned int len)
{
int i, rc = 0;
for (i = 0; i < len; i += 8, offset += 8U) {
rc = write_dword(dev, offset,
UNALIGNED_GET((const uint64_t *) data + (i >> 3)));
if (rc < 0) {
return rc;
}
}
return rc;
}
static __unused int write_optb(const struct device *dev, uint32_t mask,
uint32_t value)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
int rc;
if (regs->CR & FLASH_CR_OPTLOCK) {
return -EIO;
}
if ((regs->OPTR & mask) == value) {
return 0;
}
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
regs->OPTR = (regs->OPTR & ~mask) | value;
regs->CR |= FLASH_CR_OPTSTRT;
/* Make sure previous write is completed. */
barrier_dsync_fence_full();
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
return 0;
}
#if defined(CONFIG_FLASH_STM32_WRITE_PROTECT)
/*
* Remark for future development implementing Write Protection for the L4 parts:
*
* STM32L4 allows for 2 write protected memory areas, c.f. FLASH_WEP1AR, FLASH_WRP1BR
* which are defined by their start and end pages.
*
* Other STM32 parts (i.e. F4 series) uses bitmask to select sectors.
*
* To implement Write Protection for L4 one should thus add a new EX_OP like
* FLASH_STM32_EX_OP_SECTOR_WP_RANGED in stm32_flash_api_extensions.h
*/
#endif /* CONFIG_FLASH_STM32_WRITE_PROTECT */
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION)
uint8_t flash_stm32_get_rdp_level(const struct device *dev)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
return (regs->OPTR & FLASH_OPTR_RDP_Msk) >> FLASH_OPTR_RDP_Pos;
}
void flash_stm32_set_rdp_level(const struct device *dev, uint8_t level)
{
write_optb(dev, FLASH_OPTR_RDP_Msk,
(uint32_t)level << FLASH_OPTR_RDP_Pos);
}
#endif /* CONFIG_FLASH_STM32_READOUT_PROTECTION */
void flash_stm32_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
static struct flash_pages_layout stm32l4_flash_layout = {
.pages_count = 0,
.pages_size = 0,
};
ARG_UNUSED(dev);
if (stm32l4_flash_layout.pages_count == 0) {
stm32l4_flash_layout.pages_count = FLASH_SIZE / FLASH_PAGE_SIZE;
stm32l4_flash_layout.pages_size = FLASH_PAGE_SIZE;
}
*layout = &stm32l4_flash_layout;
*layout_size = 1;
}
``` | /content/code_sandbox/drivers/flash/flash_stm32l4x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,490 |
```c
/*
*
* This driver is heavily inspired from the spi_flash_w25qxxdv.c SPI NOR driver.
*
*/
#define DT_DRV_COMPAT jedec_spi_nor
#include <errno.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/init.h>
#include <string.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys_clock.h>
#include <zephyr/pm/device.h>
#include "spi_nor.h"
#include "jesd216.h"
#include "flash_priv.h"
LOG_MODULE_REGISTER(spi_nor, CONFIG_FLASH_LOG_LEVEL);
/* Device Power Management Notes
*
* These flash devices have several modes during operation:
* * When CSn is asserted (during a SPI operation) the device is
* active.
* * When CSn is deasserted the device enters a standby mode.
* * Some devices support a Deep Power-Down mode which reduces current
* to as little as 0.1% of standby.
*
* The power reduction from DPD is sufficient to warrant allowing its
* use even in cases where Zephyr's device power management is not
* available. This is selected through the SPI_NOR_IDLE_IN_DPD
* Kconfig option.
*
* When mapped to the Zephyr Device Power Management states:
* * PM_DEVICE_STATE_ACTIVE covers both active and standby modes;
* * PM_DEVICE_STATE_SUSPENDED, and PM_DEVICE_STATE_OFF all correspond to
* deep-power-down mode.
*/
#define SPI_NOR_MAX_ADDR_WIDTH 4
#define ANY_INST_HAS_TRUE_(idx, bool_prop) \
COND_CODE_1(DT_INST_PROP(idx, bool_prop), (1,), ())
#define ANY_INST_HAS_TRUE(bool_prop) \
COND_CODE_1(IS_EMPTY(DT_INST_FOREACH_STATUS_OKAY_VARGS(ANY_INST_HAS_TRUE_, bool_prop)), \
(0), (1))
#define ANY_INST_HAS_PROP_(idx, prop_name) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, prop_name), (1,), ())
#define ANY_INST_HAS_PROP(prop_name) \
COND_CODE_1(IS_EMPTY(DT_INST_FOREACH_STATUS_OKAY_VARGS(ANY_INST_HAS_PROP_, prop_name)), \
(0), (1))
#define ANY_INST_HAS_MXICY_MX25R_POWER_MODE ANY_INST_HAS_PROP(mxicy_mx25r_power_mode)
#define ANY_INST_HAS_DPD ANY_INST_HAS_TRUE(has_dpd)
#define ANY_INST_HAS_T_EXIT_DPD ANY_INST_HAS_PROP(t_exit_dpd)
#define ANY_INST_HAS_DPD_WAKEUP_SEQUENCE ANY_INST_HAS_PROP(dpd_wakeup_sequence)
#define ANY_INST_HAS_RESET_GPIOS ANY_INST_HAS_PROP(reset_gpios)
#define ANY_INST_HAS_WP_GPIOS ANY_INST_HAS_PROP(wp_gpios)
#define ANY_INST_HAS_HOLD_GPIOS ANY_INST_HAS_PROP(hold_gpios)
#define DEV_CFG(_dev_) ((const struct spi_nor_config * const) (_dev_)->config)
/* MXICY Related defines*/
/* MXICY Low-power/high perf mode is second bit in configuration register 2 */
#define LH_SWITCH_BIT 9
#define JEDEC_MACRONIX_ID 0xc2
#define JEDEC_MX25R_TYPE_ID 0x28
/* Build-time data associated with the device. */
struct spi_nor_config {
/* Devicetree SPI configuration */
struct spi_dt_spec spi;
#if ANY_INST_HAS_RESET_GPIOS
const struct gpio_dt_spec reset;
#endif
/* Runtime SFDP stores no static configuration. */
#ifndef CONFIG_SPI_NOR_SFDP_RUNTIME
/* Size of device in bytes, from size property */
uint32_t flash_size;
#ifdef CONFIG_FLASH_PAGE_LAYOUT
/* Flash page layout can be determined from devicetree. */
struct flash_pages_layout layout;
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
/* Expected JEDEC ID, from jedec-id property */
uint8_t jedec_id[SPI_NOR_MAX_ID_LEN];
#if defined(CONFIG_SPI_NOR_SFDP_MINIMAL)
/* Optional support for entering 32-bit address mode. */
uint8_t enter_4byte_addr;
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
#if defined(CONFIG_SPI_NOR_SFDP_DEVICETREE)
/* Length of BFP structure, in 32-bit words. */
uint8_t bfp_len;
/* Pointer to the BFP table as read from the device
* (little-endian stored words), from sfdp-bfp property
*/
const struct jesd216_bfp *bfp;
#endif /* CONFIG_SPI_NOR_SFDP_DEVICETREE */
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
/* Optional bits in SR to be cleared on startup.
*
* This information cannot be derived from SFDP.
*/
uint8_t has_lock;
#if ANY_INST_HAS_WP_GPIOS
/* The write-protect GPIO (wp-gpios) */
const struct gpio_dt_spec wp;
#endif
#if ANY_INST_HAS_HOLD_GPIOS
/* The hold GPIO (hold-gpios) */
const struct gpio_dt_spec hold;
#endif
#if ANY_INST_HAS_DPD
uint16_t t_enter_dpd; /* in milliseconds */
uint16_t t_dpdd_ms; /* in milliseconds */
#if ANY_INST_HAS_T_EXIT_DPD
uint16_t t_exit_dpd; /* in milliseconds */
#endif
#endif
#if ANY_INST_HAS_DPD_WAKEUP_SEQUENCE
uint16_t t_crdp_ms; /* in milliseconds */
uint16_t t_rdp_ms; /* in milliseconds */
#endif
#if ANY_INST_HAS_MXICY_MX25R_POWER_MODE
bool mxicy_mx25r_power_mode;
#endif
/* exist flags for dts opt-ins */
bool dpd_exist:1;
bool dpd_wakeup_sequence_exist:1;
bool mxicy_mx25r_power_mode_exist:1;
bool reset_gpios_exist:1;
bool requires_ulbpr_exist:1;
bool wp_gpios_exist:1;
bool hold_gpios_exist:1;
};
/**
* struct spi_nor_data - Structure for defining the SPI NOR access
* @sem: The semaphore to access to the flash
*/
struct spi_nor_data {
struct k_sem sem;
#if ANY_INST_HAS_DPD
/* Low 32-bits of uptime counter at which device last entered
* deep power-down.
*/
uint32_t ts_enter_dpd;
#endif
/* Miscellaneous flags */
/* If set addressed operations should use 32-bit rather than
* 24-bit addresses.
*
* This is ignored if the access parameter to a command
* explicitly specifies 24-bit or 32-bit addressing.
*/
bool flag_access_32bit: 1;
/* Minimal SFDP stores no dynamic configuration. Runtime and
* devicetree store page size and erase_types; runtime also
* stores flash size and layout.
*/
#ifndef CONFIG_SPI_NOR_SFDP_MINIMAL
struct jesd216_erase_type erase_types[JESD216_NUM_ERASE_TYPES];
/* Number of bytes per page */
uint16_t page_size;
#ifdef CONFIG_SPI_NOR_SFDP_RUNTIME
/* Size of flash, in bytes */
uint32_t flash_size;
#ifdef CONFIG_FLASH_PAGE_LAYOUT
struct flash_pages_layout layout;
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
};
#ifdef CONFIG_SPI_NOR_SFDP_MINIMAL
/* The historically supported erase sizes. */
static const struct jesd216_erase_type minimal_erase_types[JESD216_NUM_ERASE_TYPES] = {
{
.cmd = SPI_NOR_CMD_BE,
.exp = 16,
},
{
.cmd = SPI_NOR_CMD_SE,
.exp = 12,
},
};
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
/* Register writes should be ready extremely quickly */
#define WAIT_READY_REGISTER K_NO_WAIT
/* Page writes range from sub-ms to 10ms */
#define WAIT_READY_WRITE K_TICKS(1)
/* Erases can range from 45ms to 240sec */
#define WAIT_READY_ERASE K_MSEC(50)
static int spi_nor_write_protection_set(const struct device *dev,
bool write_protect);
/* Get pointer to array of supported erase types. Static const for
* minimal, data for runtime and devicetree.
*/
static inline const struct jesd216_erase_type *
dev_erase_types(const struct device *dev)
{
#ifdef CONFIG_SPI_NOR_SFDP_MINIMAL
return minimal_erase_types;
#else /* CONFIG_SPI_NOR_SFDP_MINIMAL */
const struct spi_nor_data *data = dev->data;
return data->erase_types;
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
}
/* Get the size of the flash device. Data for runtime, constant for
* minimal and devicetree.
*/
static inline uint32_t dev_flash_size(const struct device *dev)
{
#ifdef CONFIG_SPI_NOR_SFDP_RUNTIME
const struct spi_nor_data *data = dev->data;
return data->flash_size;
#else /* CONFIG_SPI_NOR_SFDP_RUNTIME */
const struct spi_nor_config *cfg = dev->config;
return cfg->flash_size;
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
}
/* Get the flash device page size. Constant for minimal, data for
* runtime and devicetree.
*/
static inline uint16_t dev_page_size(const struct device *dev)
{
#ifdef CONFIG_SPI_NOR_SFDP_MINIMAL
return DT_INST_PROP_OR(0, page_size, 256);
#else /* CONFIG_SPI_NOR_SFDP_MINIMAL */
const struct spi_nor_data *data = dev->data;
return data->page_size;
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
}
static const struct flash_parameters flash_nor_parameters = {
.write_block_size = 1,
.erase_value = 0xff,
};
/* Capture the time at which the device entered deep power-down. */
static inline void record_entered_dpd(const struct device *const dev)
{
#if ANY_INST_HAS_DPD
const struct spi_nor_config *const driver_config = dev->config;
if (driver_config->dpd_exist) {
struct spi_nor_data *const driver_data = dev->data;
driver_data->ts_enter_dpd = k_uptime_get_32();
}
#else
ARG_UNUSED(dev);
#endif
}
/* Check the current time against the time DPD was entered and delay
* until it's ok to initiate the DPD exit process.
*/
static inline void delay_until_exit_dpd_ok(const struct device *const dev)
{
#if ANY_INST_HAS_DPD
const struct spi_nor_config *const driver_config = dev->config;
if (driver_config->dpd_exist) {
struct spi_nor_data *const driver_data = dev->data;
int32_t since = (int32_t)(k_uptime_get_32() - driver_data->ts_enter_dpd);
/* If the time is negative the 32-bit counter has wrapped,
* which is certainly long enough no further delay is
* required. Otherwise we have to check whether it's been
* long enough taking into account necessary delays for
* entering and exiting DPD.
*/
if (since >= 0) {
/* Subtract time required for DPD to be reached */
since -= driver_config->t_enter_dpd;
/* Subtract time required in DPD before exit */
since -= driver_config->t_dpdd_ms;
/* If the adjusted time is negative we have to wait
* until it reaches zero before we can proceed.
*/
if (since < 0) {
k_sleep(K_MSEC((uint32_t)-since));
}
}
}
#else
ARG_UNUSED(dev);
#endif /* ANY_INST_HAS_DPD */
}
/* Indicates that an access command includes bytes for the address.
* If not provided the opcode is not followed by address bytes.
*/
#define NOR_ACCESS_ADDRESSED BIT(0)
/* Indicates that addressed access uses a 24-bit address regardless of
* spi_nor_data::flag_32bit_addr.
*/
#define NOR_ACCESS_24BIT_ADDR BIT(1)
/* Indicates that addressed access uses a 32-bit address regardless of
* spi_nor_data::flag_32bit_addr.
*/
#define NOR_ACCESS_32BIT_ADDR BIT(2)
/* Indicates that an access command is performing a write. If not
* provided access is a read.
*/
#define NOR_ACCESS_WRITE BIT(7)
/*
* @brief Send an SPI command
*
* @param dev Device struct
* @param opcode The command to send
* @param access flags that determine how the command is constructed.
* See NOR_ACCESS_*.
* @param addr The address to send
* @param data The buffer to store or read the value
* @param length The size of the buffer
* @return 0 on success, negative errno code otherwise
*/
static int spi_nor_access(const struct device *const dev,
uint8_t opcode, unsigned int access,
off_t addr, void *data, size_t length)
{
const struct spi_nor_config *const driver_cfg = dev->config;
struct spi_nor_data *const driver_data = dev->data;
bool is_addressed = (access & NOR_ACCESS_ADDRESSED) != 0U;
bool is_write = (access & NOR_ACCESS_WRITE) != 0U;
uint8_t buf[5] = { 0 };
struct spi_buf spi_buf[2] = {
{
.buf = buf,
.len = 1,
},
{
.buf = data,
.len = length
}
};
buf[0] = opcode;
if (is_addressed) {
bool access_24bit = (access & NOR_ACCESS_24BIT_ADDR) != 0;
bool access_32bit = (access & NOR_ACCESS_32BIT_ADDR) != 0;
bool use_32bit = (access_32bit
|| (!access_24bit
&& driver_data->flag_access_32bit));
union {
uint32_t u32;
uint8_t u8[4];
} addr32 = {
.u32 = sys_cpu_to_be32(addr),
};
if (use_32bit) {
memcpy(&buf[1], &addr32.u8[0], 4);
spi_buf[0].len += 4;
} else {
memcpy(&buf[1], &addr32.u8[1], 3);
spi_buf[0].len += 3;
}
};
const struct spi_buf_set tx_set = {
.buffers = spi_buf,
.count = (length != 0) ? 2 : 1,
};
const struct spi_buf_set rx_set = {
.buffers = spi_buf,
.count = 2,
};
if (is_write) {
return spi_write_dt(&driver_cfg->spi, &tx_set);
}
return spi_transceive_dt(&driver_cfg->spi, &tx_set, &rx_set);
}
#define spi_nor_cmd_read(dev, opcode, dest, length) \
spi_nor_access(dev, opcode, 0, 0, dest, length)
#define spi_nor_cmd_addr_read(dev, opcode, addr, dest, length) \
spi_nor_access(dev, opcode, NOR_ACCESS_ADDRESSED, addr, dest, length)
#define spi_nor_cmd_write(dev, opcode) \
spi_nor_access(dev, opcode, NOR_ACCESS_WRITE, 0, NULL, 0)
#define spi_nor_cmd_addr_write(dev, opcode, addr, src, length) \
spi_nor_access(dev, opcode, NOR_ACCESS_WRITE | NOR_ACCESS_ADDRESSED, \
addr, (void *)src, length)
/**
* @brief Wait until the flash is ready
*
* @note The device must be externally acquired before invoking this
* function.
*
* This function should be invoked after every ERASE, PROGRAM, or
* WRITE_STATUS operation before continuing. This allows us to assume
* that the device is ready to accept new commands at any other point
* in the code.
*
* @param dev The device structure
* @param poll_delay Duration between polls of status register
* @return 0 on success, negative errno code otherwise
*/
static int spi_nor_wait_until_ready(const struct device *dev, k_timeout_t poll_delay)
{
int ret;
uint8_t reg;
ARG_UNUSED(poll_delay);
while (true) {
ret = spi_nor_cmd_read(dev, SPI_NOR_CMD_RDSR, ®, sizeof(reg));
/* Exit on error or no longer WIP */
if (ret || !(reg & SPI_NOR_WIP_BIT)) {
break;
}
#ifdef CONFIG_SPI_NOR_SLEEP_WHILE_WAITING_UNTIL_READY
/* Don't monopolise the CPU while waiting for ready */
k_sleep(poll_delay);
#endif /* CONFIG_SPI_NOR_SLEEP_WHILE_WAITING_UNTIL_READY */
}
return ret;
}
#if defined(CONFIG_SPI_NOR_SFDP_RUNTIME) || defined(CONFIG_FLASH_JESD216_API)
/*
* @brief Read content from the SFDP hierarchy
*
* @note The device must be externally acquired before invoking this
* function.
*
* @param dev Device struct
* @param addr The address to send
* @param data The buffer to store or read the value
* @param length The size of the buffer
* @return 0 on success, negative errno code otherwise
*/
static int read_sfdp(const struct device *const dev,
off_t addr, void *data, size_t length)
{
/* READ_SFDP requires a 24-bit address followed by a single
* byte for a wait state. This is effected by using 32-bit
* address by shifting the 24-bit address up 8 bits.
*/
return spi_nor_access(dev, JESD216_CMD_READ_SFDP,
NOR_ACCESS_32BIT_ADDR | NOR_ACCESS_ADDRESSED,
addr << 8, data, length);
}
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
static int enter_dpd(const struct device *const dev)
{
int ret = 0;
const struct spi_nor_config *cfg = dev->config;
if (cfg->dpd_exist) {
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_DPD);
if (ret == 0) {
record_entered_dpd(dev);
}
}
return ret;
}
static int exit_dpd(const struct device *const dev)
{
int ret = 0;
#if ANY_INST_HAS_DPD
const struct spi_nor_config *cfg = dev->config;
if (cfg->dpd_exist) {
delay_until_exit_dpd_ok(dev);
if (cfg->dpd_wakeup_sequence_exist) {
#if ANY_INST_HAS_DPD_WAKEUP_SEQUENCE
/* Assert CSn and wait for tCRDP.
*
* Unfortunately the SPI API doesn't allow us to
* control CSn so fake it by writing a known-supported
* single-byte command, hoping that'll hold the assert
* long enough. This is highly likely, since the
* duration is usually less than two SPI clock cycles.
*/
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_RDID);
/* Deassert CSn and wait for tRDP */
k_sleep(K_MSEC(cfg->t_rdp_ms));
#endif /* ANY_INST_HAS_DPD_WAKEUP_SEQUENCE */
} else {
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_RDPD);
#if ANY_INST_HAS_T_EXIT_DPD
if (ret == 0) {
if (cfg->dpd_exist) {
k_sleep(K_MSEC(cfg->t_exit_dpd));
}
}
#endif /* T_EXIT_DPD */
}
}
#endif /* ANY_INST_HAS_DPD */
return ret;
}
/* Everything necessary to acquire owning access to the device.
*
* This means taking the lock and, if necessary, waking the device
* from deep power-down mode.
*/
static void acquire_device(const struct device *dev)
{
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
struct spi_nor_data *const driver_data = dev->data;
k_sem_take(&driver_data->sem, K_FOREVER);
}
if (IS_ENABLED(CONFIG_SPI_NOR_IDLE_IN_DPD)) {
exit_dpd(dev);
}
}
/* Everything necessary to release access to the device.
*
* This means (optionally) putting the device into deep power-down
* mode, and releasing the lock.
*/
static void release_device(const struct device *dev)
{
if (IS_ENABLED(CONFIG_SPI_NOR_IDLE_IN_DPD)) {
enter_dpd(dev);
}
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
struct spi_nor_data *const driver_data = dev->data;
k_sem_give(&driver_data->sem);
}
}
/**
* @brief Read the status register.
*
* @note The device must be externally acquired before invoking this
* function.
*
* @param dev Device struct
*
* @return the non-negative value of the status register, or an error code.
*/
static int spi_nor_rdsr(const struct device *dev)
{
uint8_t reg;
int ret = spi_nor_cmd_read(dev, SPI_NOR_CMD_RDSR, ®, sizeof(reg));
if (ret == 0) {
ret = reg;
}
return ret;
}
/**
* @brief Write the status register.
*
* @note The device must be externally acquired before invoking this
* function.
*
* @param dev Device struct
* @param sr The new value of the status register
*
* @return 0 on success or a negative error code.
*/
static int spi_nor_wrsr(const struct device *dev,
uint8_t sr)
{
int ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_WREN);
if (ret != 0) {
return ret;
}
ret = spi_nor_access(dev, SPI_NOR_CMD_WRSR, NOR_ACCESS_WRITE, 0, &sr,
sizeof(sr));
if (ret != 0) {
return ret;
}
return spi_nor_wait_until_ready(dev, WAIT_READY_REGISTER);
}
#if ANY_INST_HAS_MXICY_MX25R_POWER_MODE
/**
* @brief Read the configuration register.
*
* @note The device must be externally acquired before invoking this
* function.
*
* @param dev Device struct
*
* @return the non-negative value of the configuration register, or an error code.
*/
static int mxicy_rdcr(const struct device *dev)
{
const struct spi_nor_config *cfg = dev->config;
uint16_t cr = -ENOSYS;
if (cfg->mxicy_mx25r_power_mode_exist) {
int ret = spi_nor_cmd_read(dev, CMD_RDCR, &cr, sizeof(cr));
if (ret < 0) {
return ret;
}
}
return cr;
}
/**
* @brief Write the configuration register.
*
* @note The device must be externally acquired before invoking this
* function.
*
* @param dev Device struct
* @param cr The new value of the configuration register
*
* @return 0 on success or a negative error code.
*/
static int mxicy_wrcr(const struct device *dev,
uint16_t cr)
{
const struct spi_nor_config *cfg = dev->config;
int ret = -ENOSYS;
/* The configuration register bytes on the Macronix MX25R devices are
* written using the Write Status Register command where the configuration
* register bytes are written as two extra bytes after the status register.
* First read out the current status register to preserve the value.
*/
if (cfg->mxicy_mx25r_power_mode_exist) {
int sr = spi_nor_rdsr(dev);
if (sr < 0) {
LOG_ERR("Read status register failed: %d", sr);
return sr;
}
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_WREN);
if (ret != 0) {
return ret;
}
uint8_t data[] = {
sr,
cr & 0xFF, /* Configuration register 1 */
cr >> 8 /* Configuration register 2 */
};
ret = spi_nor_access(dev, SPI_NOR_CMD_WRSR, NOR_ACCESS_WRITE, 0,
data, sizeof(data));
if (ret != 0) {
return ret;
}
ret = spi_nor_wait_until_ready(dev, WAIT_READY_REGISTER);
}
return ret;
}
static int mxicy_configure(const struct device *dev, const uint8_t *jedec_id)
{
const struct spi_nor_config *cfg = dev->config;
int ret = -ENOSYS;
if (cfg->mxicy_mx25r_power_mode_exist) {
/* Low-power/high perf mode is second bit in configuration register 2 */
int current_cr, new_cr;
/* lh_switch enum index:
* 0: Ultra low power
* 1: High performance mode
*/
const bool use_high_perf = cfg->mxicy_mx25r_power_mode;
/* Only supported on Macronix MX25R Ultra Low Power series. */
if (jedec_id[0] != JEDEC_MACRONIX_ID || jedec_id[1] != JEDEC_MX25R_TYPE_ID) {
LOG_WRN("L/H switch not supported for device id: %02x %02x %02x",
jedec_id[0], jedec_id[1], jedec_id[2]);
/* Do not return an error here because the flash still functions */
return 0;
}
acquire_device(dev);
/* Read current configuration register */
ret = mxicy_rdcr(dev);
if (ret < 0) {
release_device(dev);
return ret;
}
current_cr = ret;
LOG_DBG("Use high performance mode? %d", use_high_perf);
new_cr = current_cr;
WRITE_BIT(new_cr, LH_SWITCH_BIT, use_high_perf);
if (new_cr != current_cr) {
ret = mxicy_wrcr(dev, new_cr);
} else {
ret = 0;
}
if (ret < 0) {
LOG_ERR("Enable high performace mode failed: %d", ret);
}
release_device(dev);
}
return ret;
}
#endif /* ANY_INST_HAS_MXICY_MX25R_POWER_MODE */
static int spi_nor_read(const struct device *dev, off_t addr, void *dest,
size_t size)
{
const size_t flash_size = dev_flash_size(dev);
int ret;
/* should be between 0 and flash size */
if ((addr < 0) || ((addr + size) > flash_size)) {
return -EINVAL;
}
acquire_device(dev);
ret = spi_nor_cmd_addr_read(dev, SPI_NOR_CMD_READ, addr, dest, size);
release_device(dev);
return ret;
}
#if defined(CONFIG_FLASH_EX_OP_ENABLED)
static int flash_spi_nor_ex_op(const struct device *dev, uint16_t code,
const uintptr_t in, void *out)
{
int ret;
ARG_UNUSED(in);
ARG_UNUSED(out);
acquire_device(dev);
switch (code) {
case FLASH_EX_OP_RESET:
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_RESET_EN);
if (ret == 0) {
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_RESET_MEM);
}
break;
default:
ret = -ENOTSUP;
break;
}
release_device(dev);
return ret;
}
#endif
static int spi_nor_write(const struct device *dev, off_t addr,
const void *src,
size_t size)
{
const size_t flash_size = dev_flash_size(dev);
const uint16_t page_size = dev_page_size(dev);
int ret;
/* should be between 0 and flash size */
if ((addr < 0) || ((size + addr) > flash_size)) {
return -EINVAL;
}
acquire_device(dev);
ret = spi_nor_write_protection_set(dev, false);
if (ret == 0) {
while (size > 0) {
size_t to_write = size;
/* Don't write more than a page. */
if (to_write >= page_size) {
to_write = page_size;
}
/* Don't write across a page boundary */
if (((addr + to_write - 1U) / page_size)
!= (addr / page_size)) {
to_write = page_size - (addr % page_size);
}
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_WREN);
if (ret != 0) {
break;
}
ret = spi_nor_cmd_addr_write(dev, SPI_NOR_CMD_PP, addr,
src, to_write);
if (ret != 0) {
break;
}
size -= to_write;
src = (const uint8_t *)src + to_write;
addr += to_write;
ret = spi_nor_wait_until_ready(dev, WAIT_READY_WRITE);
if (ret != 0) {
break;
}
}
}
int ret2 = spi_nor_write_protection_set(dev, true);
if (!ret) {
ret = ret2;
}
release_device(dev);
return ret;
}
static int spi_nor_erase(const struct device *dev, off_t addr, size_t size)
{
const size_t flash_size = dev_flash_size(dev);
int ret;
/* erase area must be subregion of device */
if ((addr < 0) || ((size + addr) > flash_size)) {
return -EINVAL;
}
/* address must be sector-aligned */
if (!SPI_NOR_IS_SECTOR_ALIGNED(addr)) {
return -EINVAL;
}
/* size must be a multiple of sectors */
if ((size % SPI_NOR_SECTOR_SIZE) != 0) {
return -EINVAL;
}
acquire_device(dev);
ret = spi_nor_write_protection_set(dev, false);
while ((size > 0) && (ret == 0)) {
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_WREN);
if (ret) {
break;
}
if (size == flash_size) {
/* chip erase */
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_CE);
size -= flash_size;
} else {
const struct jesd216_erase_type *erase_types =
dev_erase_types(dev);
const struct jesd216_erase_type *bet = NULL;
for (uint8_t ei = 0; ei < JESD216_NUM_ERASE_TYPES; ++ei) {
const struct jesd216_erase_type *etp =
&erase_types[ei];
if ((etp->exp != 0)
&& SPI_NOR_IS_ALIGNED(addr, etp->exp)
&& (size >= BIT(etp->exp))
&& ((bet == NULL)
|| (etp->exp > bet->exp))) {
bet = etp;
}
}
if (bet != NULL) {
ret = spi_nor_cmd_addr_write(dev, bet->cmd, addr, NULL, 0);
addr += BIT(bet->exp);
size -= BIT(bet->exp);
} else {
LOG_DBG("Can't erase %zu at 0x%lx",
size, (long)addr);
ret = -EINVAL;
}
}
if (ret != 0) {
break;
}
ret = spi_nor_wait_until_ready(dev, WAIT_READY_ERASE);
}
int ret2 = spi_nor_write_protection_set(dev, true);
if (!ret) {
ret = ret2;
}
release_device(dev);
return ret;
}
/* @note The device must be externally acquired before invoking this
* function.
*/
static int spi_nor_write_protection_set(const struct device *dev,
bool write_protect)
{
int ret;
const struct spi_nor_config *cfg = dev->config;
#if ANY_INST_HAS_WP_GPIOS
if (DEV_CFG(dev)->wp_gpios_exist && write_protect == false) {
gpio_pin_set_dt(&(DEV_CFG(dev)->wp), 0);
}
#endif
ret = spi_nor_cmd_write(dev, (write_protect) ?
SPI_NOR_CMD_WRDI : SPI_NOR_CMD_WREN);
if (cfg->requires_ulbpr_exist
&& (ret == 0)
&& !write_protect) {
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_ULBPR);
}
#if ANY_INST_HAS_WP_GPIOS
if (DEV_CFG(dev)->wp_gpios_exist && write_protect == true) {
gpio_pin_set_dt(&(DEV_CFG(dev)->wp), 1);
}
#endif
return ret;
}
#if defined(CONFIG_FLASH_JESD216_API) || defined(CONFIG_SPI_NOR_SFDP_RUNTIME)
static int spi_nor_sfdp_read(const struct device *dev, off_t addr,
void *dest, size_t size)
{
acquire_device(dev);
int ret = read_sfdp(dev, addr, dest, size);
release_device(dev);
return ret;
}
#endif /* CONFIG_FLASH_JESD216_API || CONFIG_SPI_NOR_SFDP_RUNTIME */
static int spi_nor_read_jedec_id(const struct device *dev,
uint8_t *id)
{
if (id == NULL) {
return -EINVAL;
}
acquire_device(dev);
int ret = spi_nor_cmd_read(dev, SPI_NOR_CMD_RDID, id, SPI_NOR_MAX_ID_LEN);
release_device(dev);
return ret;
}
/* Put the device into the appropriate address mode, if supported.
*
* On successful return spi_nor_data::flag_access_32bit has been set
* (cleared) if the device is configured for 4-byte (3-byte) addresses
* for read, write, and erase commands.
*
* @param dev the device
*
* @param enter_4byte_addr the Enter 4-Byte Addressing bit set from
* DW16 of SFDP BFP. A value of all zeros or all ones is interpreted
* as "not supported".
*
* @retval -ENOTSUP if 4-byte addressing is supported but not in a way
* that the driver can handle.
* @retval negative codes if the attempt was made and failed
* @retval 0 if the device is successfully left in 24-bit mode or
* reconfigured to 32-bit mode.
*/
static int spi_nor_set_address_mode(const struct device *dev,
uint8_t enter_4byte_addr)
{
int ret = 0;
LOG_DBG("Checking enter-4byte-addr %02x", enter_4byte_addr);
/* Do nothing if not provided (either no bits or all bits
* set).
*/
if ((enter_4byte_addr == 0)
|| (enter_4byte_addr == 0xff)) {
return 0;
}
/* This currently only supports command 0xB7 (Enter 4-Byte
* Address Mode), with or without preceding WREN.
*/
if ((enter_4byte_addr & 0x03) == 0) {
return -ENOTSUP;
}
acquire_device(dev);
if ((enter_4byte_addr & 0x02) != 0) {
/* Enter after WREN. */
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_WREN);
}
if (ret == 0) {
ret = spi_nor_cmd_write(dev, SPI_NOR_CMD_4BA);
if (ret == 0) {
struct spi_nor_data *data = dev->data;
data->flag_access_32bit = true;
}
}
release_device(dev);
return ret;
}
#ifndef CONFIG_SPI_NOR_SFDP_MINIMAL
static int spi_nor_process_bfp(const struct device *dev,
const struct jesd216_param_header *php,
const struct jesd216_bfp *bfp)
{
struct spi_nor_data *data = dev->data;
struct jesd216_erase_type *etp = data->erase_types;
const size_t flash_size = jesd216_bfp_density(bfp) / 8U;
LOG_INF("%s: %u %ciBy flash", dev->name,
(flash_size < (1024U * 1024U)) ? (uint32_t)(flash_size >> 10)
: (uint32_t)(flash_size >> 20),
(flash_size < (1024U * 1024U)) ? 'k' : 'M');
/* Copy over the erase types, preserving their order. (The
* Sector Map Parameter table references them by index.)
*/
memset(data->erase_types, 0, sizeof(data->erase_types));
for (uint8_t ti = 1; ti <= ARRAY_SIZE(data->erase_types); ++ti) {
if (jesd216_bfp_erase(bfp, ti, etp) == 0) {
LOG_DBG("Erase %u with %02x", (uint32_t)BIT(etp->exp), etp->cmd);
}
++etp;
}
data->page_size = jesd216_bfp_page_size(php, bfp);
#ifdef CONFIG_SPI_NOR_SFDP_RUNTIME
data->flash_size = flash_size;
#else /* CONFIG_SPI_NOR_SFDP_RUNTIME */
if (flash_size != dev_flash_size(dev)) {
LOG_ERR("BFP flash size mismatch with devicetree");
return -EINVAL;
}
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
LOG_DBG("Page size %u bytes", data->page_size);
/* If 4-byte addressing is supported, switch to it. */
if (jesd216_bfp_addrbytes(bfp) != JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_3B) {
struct jesd216_bfp_dw16 dw16;
int rc = 0;
if (jesd216_bfp_decode_dw16(php, bfp, &dw16) == 0) {
rc = spi_nor_set_address_mode(dev, dw16.enter_4ba);
}
if (rc != 0) {
LOG_ERR("Unable to enter 4-byte mode: %d\n", rc);
return rc;
}
}
return 0;
}
static int spi_nor_process_sfdp(const struct device *dev)
{
int rc;
#if defined(CONFIG_SPI_NOR_SFDP_RUNTIME)
/* For runtime we need to read the SFDP table, identify the
* BFP block, and process it.
*/
const uint8_t decl_nph = 2;
union {
/* We only process BFP so use one parameter block */
uint8_t raw[JESD216_SFDP_SIZE(decl_nph)];
struct jesd216_sfdp_header sfdp;
} u_header;
const struct jesd216_sfdp_header *hp = &u_header.sfdp;
rc = spi_nor_sfdp_read(dev, 0, u_header.raw, sizeof(u_header.raw));
if (rc != 0) {
LOG_ERR("SFDP read failed: %d", rc);
return rc;
}
uint32_t magic = jesd216_sfdp_magic(hp);
if (magic != JESD216_SFDP_MAGIC) {
LOG_ERR("SFDP magic %08x invalid", magic);
return -EINVAL;
}
LOG_INF("%s: SFDP v %u.%u AP %x with %u PH", dev->name,
hp->rev_major, hp->rev_minor, hp->access, 1 + hp->nph);
const struct jesd216_param_header *php = hp->phdr;
const struct jesd216_param_header *phpe = php + MIN(decl_nph, 1 + hp->nph);
while (php != phpe) {
uint16_t id = jesd216_param_id(php);
LOG_INF("PH%u: %04x rev %u.%u: %u DW @ %x",
(php - hp->phdr), id, php->rev_major, php->rev_minor,
php->len_dw, jesd216_param_addr(php));
if (id == JESD216_SFDP_PARAM_ID_BFP) {
union {
uint32_t dw[MIN(php->len_dw, 20)];
struct jesd216_bfp bfp;
} u_param;
const struct jesd216_bfp *bfp = &u_param.bfp;
rc = spi_nor_sfdp_read(dev, jesd216_param_addr(php),
u_param.dw, sizeof(u_param.dw));
if (rc == 0) {
rc = spi_nor_process_bfp(dev, php, bfp);
}
if (rc != 0) {
LOG_INF("SFDP BFP failed: %d", rc);
break;
}
}
++php;
}
#elif defined(CONFIG_SPI_NOR_SFDP_DEVICETREE)
/* For devicetree we need to synthesize a parameter header and
* process the stored BFP data as if we had read it.
*/
const struct spi_nor_config *cfg = dev->config;
struct jesd216_param_header bfp_hdr = {
.len_dw = cfg->bfp_len,
};
rc = spi_nor_process_bfp(dev, &bfp_hdr, cfg->bfp);
#else
#error Unhandled SFDP choice
#endif
return rc;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static int setup_pages_layout(const struct device *dev)
{
#if defined(CONFIG_SPI_NOR_SFDP_RUNTIME)
struct spi_nor_data *data = dev->data;
const size_t flash_size = dev_flash_size(dev);
const uint32_t layout_page_size = CONFIG_SPI_NOR_FLASH_LAYOUT_PAGE_SIZE;
uint8_t exp = 0;
/* Find the smallest erase size. */
for (size_t i = 0; i < ARRAY_SIZE(data->erase_types); ++i) {
const struct jesd216_erase_type *etp = &data->erase_types[i];
if ((etp->cmd != 0)
&& ((exp == 0) || (etp->exp < exp))) {
exp = etp->exp;
}
}
if (exp == 0) {
return -ENOTSUP;
}
uint32_t erase_size = BIT(exp);
/* Error if layout page size is not a multiple of smallest
* erase size.
*/
if ((layout_page_size % erase_size) != 0) {
LOG_ERR("layout page %u not compatible with erase size %u",
layout_page_size, erase_size);
return -EINVAL;
}
/* Warn but accept layout page sizes that leave inaccessible
* space.
*/
if ((flash_size % layout_page_size) != 0) {
LOG_INF("layout page %u wastes space with device size %zu",
layout_page_size, flash_size);
}
data->layout.pages_size = layout_page_size;
data->layout.pages_count = flash_size / layout_page_size;
LOG_DBG("layout %u x %u By pages", data->layout.pages_count, data->layout.pages_size);
#elif defined(CONFIG_SPI_NOR_SFDP_DEVICETREE)
const struct spi_nor_config *cfg = dev->config;
const struct flash_pages_layout *layout = &cfg->layout;
const size_t flash_size = dev_flash_size(dev);
size_t layout_size = layout->pages_size * layout->pages_count;
if (flash_size != layout_size) {
LOG_ERR("device size %u mismatch %zu * %zu By pages",
flash_size, layout->pages_count, layout->pages_size);
return -EINVAL;
}
#else /* CONFIG_SPI_NOR_SFDP_RUNTIME */
#error Unhandled SFDP choice
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
return 0;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
/**
* @brief Configure the flash
*
* @param dev The flash device structure
* @param info The flash info structure
* @return 0 on success, negative errno code otherwise
*/
static int spi_nor_configure(const struct device *dev)
{
const struct spi_nor_config *cfg = dev->config;
uint8_t jedec_id[SPI_NOR_MAX_ID_LEN];
int rc;
/* Validate bus and CS is ready */
if (!spi_is_ready_dt(&cfg->spi)) {
return -ENODEV;
}
#if ANY_INST_HAS_RESET_GPIOS
if (cfg->reset_gpios_exist) {
if (!gpio_is_ready_dt(&cfg->reset)) {
LOG_ERR("Reset pin not ready");
return -ENODEV;
}
if (gpio_pin_configure_dt(&cfg->reset, GPIO_OUTPUT_ACTIVE)) {
LOG_ERR("Couldn't configure reset pin");
return -ENODEV;
}
rc = gpio_pin_set_dt(&cfg->reset, 0);
if (rc) {
return rc;
}
}
#endif
/* After a soft-reset the flash might be in DPD or busy writing/erasing.
* Exit DPD and wait until flash is ready.
*/
acquire_device(dev);
rc = exit_dpd(dev);
if (rc < 0) {
LOG_ERR("Failed to exit DPD (%d)", rc);
release_device(dev);
return -ENODEV;
}
rc = spi_nor_rdsr(dev);
if (rc > 0 && (rc & SPI_NOR_WIP_BIT)) {
LOG_WRN("Waiting until flash is ready");
rc = spi_nor_wait_until_ready(dev, WAIT_READY_REGISTER);
}
release_device(dev);
if (rc < 0) {
LOG_ERR("Failed to wait until flash is ready (%d)", rc);
return -ENODEV;
}
/* now the spi bus is configured, we can verify SPI
* connectivity by reading the JEDEC ID.
*/
rc = spi_nor_read_jedec_id(dev, jedec_id);
if (rc != 0) {
LOG_ERR("JEDEC ID read failed: %d", rc);
return -ENODEV;
}
#ifndef CONFIG_SPI_NOR_SFDP_RUNTIME
/* For minimal and devicetree we need to check the JEDEC ID
* against the one from devicetree, to ensure we didn't find a
* device that has different parameters.
*/
if (memcmp(jedec_id, cfg->jedec_id, sizeof(jedec_id)) != 0) {
LOG_ERR("Device id %02x %02x %02x does not match config %02x %02x %02x",
jedec_id[0], jedec_id[1], jedec_id[2],
cfg->jedec_id[0], cfg->jedec_id[1], cfg->jedec_id[2]);
return -EINVAL;
}
#endif
/* Check for block protect bits that need to be cleared. This
* information cannot be determined from SFDP content, so the
* devicetree node property must be set correctly for any device
* that powers up with block protect enabled.
*/
if (cfg->has_lock != 0) {
acquire_device(dev);
rc = spi_nor_rdsr(dev);
/* Only clear if RDSR worked and something's set. */
if (rc > 0) {
rc = spi_nor_wrsr(dev, rc & ~cfg->has_lock);
}
release_device(dev);
if (rc != 0) {
LOG_ERR("BP clear failed: %d\n", rc);
return -ENODEV;
}
}
#ifdef CONFIG_SPI_NOR_SFDP_MINIMAL
/* For minimal we support some overrides from specific
* devicertee properties.
*/
if (cfg->enter_4byte_addr != 0) {
rc = spi_nor_set_address_mode(dev, cfg->enter_4byte_addr);
if (rc != 0) {
LOG_ERR("Unable to enter 4-byte mode: %d\n", rc);
return -ENODEV;
}
}
#else /* CONFIG_SPI_NOR_SFDP_MINIMAL */
/* For devicetree and runtime we need to process BFP data and
* set up or validate page layout.
*/
rc = spi_nor_process_sfdp(dev);
if (rc != 0) {
LOG_ERR("SFDP read failed: %d", rc);
return -ENODEV;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
rc = setup_pages_layout(dev);
if (rc != 0) {
LOG_ERR("layout setup failed: %d", rc);
return -ENODEV;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
#endif /* CONFIG_SPI_NOR_SFDP_MINIMAL */
#if ANY_INST_HAS_MXICY_MX25R_POWER_MODE
if (cfg->mxicy_mx25r_power_mode_exist) {
/* Do not fail init if setting configuration register fails */
(void)mxicy_configure(dev, jedec_id);
}
#endif /* ANY_INST_HAS_MXICY_MX25R_POWER_MODE */
return 0;
}
static int spi_nor_pm_control(const struct device *dev, enum pm_device_action action)
{
int rc = 0;
switch (action) {
#ifdef CONFIG_SPI_NOR_IDLE_IN_DPD
case PM_DEVICE_ACTION_SUSPEND:
case PM_DEVICE_ACTION_RESUME:
break;
#else
case PM_DEVICE_ACTION_SUSPEND:
acquire_device(dev);
rc = enter_dpd(dev);
release_device(dev);
break;
case PM_DEVICE_ACTION_RESUME:
acquire_device(dev);
rc = exit_dpd(dev);
release_device(dev);
break;
#endif /* CONFIG_SPI_NOR_IDLE_IN_DPD */
case PM_DEVICE_ACTION_TURN_ON:
/* Coming out of power off */
rc = spi_nor_configure(dev);
#ifndef CONFIG_SPI_NOR_IDLE_IN_DPD
if (rc == 0) {
/* Move to DPD, the correct device state
* for PM_DEVICE_STATE_SUSPENDED
*/
acquire_device(dev);
rc = enter_dpd(dev);
release_device(dev);
}
#endif /* CONFIG_SPI_NOR_IDLE_IN_DPD */
break;
case PM_DEVICE_ACTION_TURN_OFF:
break;
default:
rc = -ENOSYS;
}
return rc;
}
/**
* @brief Initialize and configure the flash
*
* @param name The flash name
* @return 0 on success, negative errno code otherwise
*/
static int spi_nor_init(const struct device *dev)
{
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
struct spi_nor_data *const driver_data = dev->data;
k_sem_init(&driver_data->sem, 1, K_SEM_MAX_LIMIT);
}
#if ANY_INST_HAS_WP_GPIOS
if (DEV_CFG(dev)->wp_gpios_exist) {
if (!device_is_ready(DEV_CFG(dev)->wp.port)) {
LOG_ERR("Write-protect pin not ready");
return -ENODEV;
}
if (gpio_pin_configure_dt(&(DEV_CFG(dev)->wp), GPIO_OUTPUT_ACTIVE)) {
LOG_ERR("Write-protect pin failed to set active");
return -ENODEV;
}
}
#endif /* ANY_INST_HAS_WP_GPIOS */
#if ANY_INST_HAS_HOLD_GPIOS
if (DEV_CFG(dev)->hold_gpios_exist) {
if (!device_is_ready(DEV_CFG(dev)->hold.port)) {
LOG_ERR("Hold pin not ready");
return -ENODEV;
}
if (gpio_pin_configure_dt(&(DEV_CFG(dev)->hold), GPIO_OUTPUT_INACTIVE)) {
LOG_ERR("Hold pin failed to set inactive");
return -ENODEV;
}
}
#endif /* ANY_INST_HAS_HOLD_GPIOS */
return pm_device_driver_init(dev, spi_nor_pm_control);
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static void spi_nor_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
/* Data for runtime, const for devicetree and minimal. */
#ifdef CONFIG_SPI_NOR_SFDP_RUNTIME
const struct spi_nor_data *data = dev->data;
*layout = &data->layout;
#else /* CONFIG_SPI_NOR_SFDP_RUNTIME */
const struct spi_nor_config *cfg = dev->config;
*layout = &cfg->layout;
#endif /* CONFIG_SPI_NOR_SFDP_RUNTIME */
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_parameters *
flash_nor_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_nor_parameters;
}
static const struct flash_driver_api spi_nor_api = {
.read = spi_nor_read,
.write = spi_nor_write,
.erase = spi_nor_erase,
.get_parameters = flash_nor_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = spi_nor_pages_layout,
#endif
#if defined(CONFIG_FLASH_JESD216_API)
.sfdp_read = spi_nor_sfdp_read,
.read_jedec_id = spi_nor_read_jedec_id,
#endif
#if defined(CONFIG_FLASH_EX_OP_ENABLED)
.ex_op = flash_spi_nor_ex_op,
#endif
};
#define PAGE_LAYOUT_GEN(idx) \
BUILD_ASSERT(DT_INST_NODE_HAS_PROP(idx, size), \
"jedec,spi-nor size required for non-runtime SFDP page layout"); \
enum { \
INST_##idx##_BYTES = (DT_INST_PROP(idx, size) / 8) \
}; \
BUILD_ASSERT(SPI_NOR_IS_SECTOR_ALIGNED(CONFIG_SPI_NOR_FLASH_LAYOUT_PAGE_SIZE), \
"SPI_NOR_FLASH_LAYOUT_PAGE_SIZE must be multiple of 4096"); \
enum { \
LAYOUT_PAGES_##idx##_COUNT = \
(INST_##idx##_BYTES / CONFIG_SPI_NOR_FLASH_LAYOUT_PAGE_SIZE) \
}; \
BUILD_ASSERT((CONFIG_SPI_NOR_FLASH_LAYOUT_PAGE_SIZE * LAYOUT_PAGES_##idx##_COUNT) == \
INST_##idx##_BYTES, \
"SPI_NOR_FLASH_LAYOUT_PAGE_SIZE incompatible with flash size");
#define SFDP_BFP_ATTR_GEN(idx) \
BUILD_ASSERT(DT_INST_NODE_HAS_PROP(idx, sfdp_bfp), \
"jedec,spi-nor sfdp-bfp required for devicetree SFDP"); \
static const __aligned(4) uint8_t bfp_##idx##_data[] = DT_INST_PROP(idx, sfdp_bfp);
#define INST_ATTR_GEN(idx) \
BUILD_ASSERT(DT_INST_NODE_HAS_PROP(idx, jedec_id), \
"jedec,spi-nor jedec-id required for non-runtime SFDP"); \
IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, (PAGE_LAYOUT_GEN(idx))) \
IF_ENABLED(CONFIG_SPI_NOR_SFDP_DEVICETREE, (SFDP_BFP_ATTR_GEN(idx)))
#define ATTRIBUTES_DEFINE(idx) COND_CODE_1(CONFIG_SPI_NOR_SFDP_RUNTIME, EMPTY(), \
(INST_ATTR_GEN(idx)))
#define DEFINE_PAGE_LAYOUT(idx) \
IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, \
(.layout = { \
.pages_count = LAYOUT_PAGES_##idx##_COUNT, \
.pages_size = CONFIG_SPI_NOR_FLASH_LAYOUT_PAGE_SIZE, \
},))
#define INST_HAS_LOCK(idx) DT_INST_NODE_HAS_PROP(idx, has_lock)
#define INST_HAS_WP_GPIO(idx) DT_INST_NODE_HAS_PROP(idx, wp_gpios)
#define INST_HAS_HOLD_GPIO(idx) DT_INST_NODE_HAS_PROP(idx, hold_gpios)
#define LOCK_DEFINE(idx) \
IF_ENABLED(INST_HAS_LOCK(idx), (BUILD_ASSERT(DT_INST_PROP(idx, has_lock) == \
(DT_INST_PROP(idx, has_lock) & 0xFF), \
"Need support for lock clear beyond SR1");))
#define INST_HAS_ENTER_4BYTE_ADDR(idx) DT_INST_NODE_HAS_PROP(idx, enter_4byte_addr)
#define CONFIGURE_4BYTE_ADDR(idx) \
IF_ENABLED(INST_HAS_ENTER_4BYTE_ADDR(idx), \
(.enter_4byte_addr = DT_INST_PROP(idx, enter_4byte_addr),))
#define INIT_T_ENTER_DPD(idx) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, t_enter_dpd), \
(.t_enter_dpd = \
DIV_ROUND_UP(DT_INST_PROP(idx, t_enter_dpd), NSEC_PER_MSEC)),\
(.t_enter_dpd = 0))
#if ANY_INST_HAS_T_EXIT_DPD
#define INIT_T_EXIT_DPD(idx) \
COND_CODE_1( \
DT_INST_NODE_HAS_PROP(idx, t_exit_dpd), \
(.t_exit_dpd = DIV_ROUND_UP(DT_INST_PROP(idx, t_exit_dpd), NSEC_PER_MSEC)),\
(.t_exit_dpd = 0))
#endif
#define INIT_WP_GPIOS(idx) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, wp_gpios), \
(.wp = GPIO_DT_SPEC_INST_GET(idx, wp_gpios)), \
(.wp = {0}))
#define INIT_HOLD_GPIOS(idx) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, hold_gpios), \
(.hold = GPIO_DT_SPEC_INST_GET(idx, hold_gpios)), \
(.hold = {0},))
#define INIT_WAKEUP_SEQ_PARAMS(idx) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, dpd_wakeup_sequence), \
(.t_dpdd_ms = DIV_ROUND_UP( \
DT_INST_PROP_BY_IDX(idx, dpd_wakeup_sequence, 0), NSEC_PER_MSEC),\
.t_crdp_ms = DIV_ROUND_UP( \
DT_INST_PROP_BY_IDX(idx, dpd_wakeup_sequence, 1), NSEC_PER_MSEC),\
.t_rdp_ms = DIV_ROUND_UP( \
DT_INST_PROP_BY_IDX(idx, dpd_wakeup_sequence, 2), NSEC_PER_MSEC)),\
(.t_dpdd_ms = 0, .t_crdp_ms = 0, .t_rdp_ms = 0))
#define INIT_MXICY_MX25R_POWER_MODE(idx) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, mxicy_mx25r_power_mode), \
(.mxicy_mx25r_power_mode = DT_INST_ENUM_IDX(idx, mxicy_mx25r_power_mode)),\
(.mxicy_mx25r_power_mode = 0))
#define INIT_RESET_GPIOS(idx) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(idx, reset_gpios), \
(.reset = GPIO_DT_SPEC_INST_GET(idx, reset_gpios)), \
(.reset = {0}))
#define INST_CONFIG_STRUCT_GEN(idx) \
DEFINE_PAGE_LAYOUT(idx) \
.flash_size = DT_INST_PROP(idx, size) / 8, \
.jedec_id = DT_INST_PROP(idx, jedec_id), \
.dpd_exist = DT_INST_PROP(idx, has_dpd), \
.dpd_wakeup_sequence_exist = DT_INST_NODE_HAS_PROP(idx, dpd_wakeup_sequence), \
.mxicy_mx25r_power_mode_exist = DT_INST_NODE_HAS_PROP(idx, mxicy_mx25r_power_mode), \
.reset_gpios_exist = DT_INST_NODE_HAS_PROP(idx, reset_gpios), \
.requires_ulbpr_exist = DT_INST_PROP(idx, requires_ulbpr), \
.wp_gpios_exist = DT_INST_NODE_HAS_PROP(idx, wp_gpios), \
.hold_gpios_exist = DT_INST_NODE_HAS_PROP(idx, hold_gpios), \
IF_ENABLED(INST_HAS_LOCK(idx), (.has_lock = DT_INST_PROP(idx, has_lock),)) \
IF_ENABLED(CONFIG_SPI_NOR_SFDP_MINIMAL, (CONFIGURE_4BYTE_ADDR(idx))) \
IF_ENABLED(CONFIG_SPI_NOR_SFDP_DEVICETREE, \
(.bfp_len = sizeof(bfp_##idx##_data) / 4, \
.bfp = (const struct jesd216_bfp *)bfp_##idx##_data,)) \
IF_ENABLED(ANY_INST_HAS_DPD, (INIT_T_ENTER_DPD(idx),)) \
IF_ENABLED(UTIL_AND(ANY_INST_HAS_DPD, ANY_INST_HAS_T_EXIT_DPD), (INIT_T_EXIT_DPD(idx),))\
IF_ENABLED(ANY_INST_HAS_DPD_WAKEUP_SEQUENCE, (INIT_WAKEUP_SEQ_PARAMS(idx),)) \
IF_ENABLED(ANY_INST_HAS_MXICY_MX25R_POWER_MODE, (INIT_MXICY_MX25R_POWER_MODE(idx),)) \
IF_ENABLED(ANY_INST_HAS_RESET_GPIOS, (INIT_RESET_GPIOS(idx),)) \
IF_ENABLED(ANY_INST_HAS_WP_GPIOS, (INIT_WP_GPIOS(idx),)) \
IF_ENABLED(ANY_INST_HAS_HOLD_GPIOS, (INIT_HOLD_GPIOS(idx),))
#define GENERATE_CONFIG_STRUCT(idx) \
static const struct spi_nor_config spi_nor_##idx##_config = { \
.spi = SPI_DT_SPEC_INST_GET(idx, SPI_WORD_SET(8), CONFIG_SPI_NOR_CS_WAIT_DELAY),\
COND_CODE_1(CONFIG_SPI_NOR_SFDP_RUNTIME, EMPTY(), (INST_CONFIG_STRUCT_GEN(idx)))};
#define ASSIGN_PM(idx) \
PM_DEVICE_DT_INST_DEFINE(idx, spi_nor_pm_control);
#define SPI_NOR_INST(idx) \
ASSIGN_PM(idx) \
ATTRIBUTES_DEFINE(idx) \
LOCK_DEFINE(idx) \
GENERATE_CONFIG_STRUCT(idx) \
static struct spi_nor_data spi_nor_##idx##_data; \
DEVICE_DT_INST_DEFINE(idx, &spi_nor_init, PM_DEVICE_DT_INST_GET(idx), \
&spi_nor_##idx##_data, &spi_nor_##idx##_config, \
POST_KERNEL, CONFIG_SPI_NOR_INIT_PRIORITY, &spi_nor_api);
DT_INST_FOREACH_STATUS_OKAY(SPI_NOR_INST)
``` | /content/code_sandbox/drivers/flash/spi_nor.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 13,587 |
```c
/*
*
*/
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <string.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/init.h>
#include <zephyr/sys/barrier.h>
#include <soc.h>
#include "flash_stm32.h"
bool flash_stm32_valid_range(const struct device *dev, off_t offset,
uint32_t len,
bool write)
{
ARG_UNUSED(write);
return flash_stm32_range_exists(dev, offset, len);
}
static inline void flush_cache(FLASH_TypeDef *regs)
{
if (regs->ACR & FLASH_ACR_ARTEN) {
regs->ACR &= ~FLASH_ACR_ARTEN;
/* Reference manual:
* The ART cache can be flushed only if the ART accelerator
* is disabled (ARTEN = 0).
*/
regs->ACR |= FLASH_ACR_ARTRST;
regs->ACR &= ~FLASH_ACR_ARTRST;
regs->ACR |= FLASH_ACR_ARTEN;
}
}
static int write_byte(const struct device *dev, off_t offset, uint8_t val)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
int rc;
/* if the control register is locked, do not fail silently */
if (regs->CR & FLASH_CR_LOCK) {
return -EIO;
}
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
/* prepare to write a single byte */
regs->CR = (regs->CR & CR_PSIZE_MASK) |
FLASH_PSIZE_BYTE | FLASH_CR_PG;
/* flush the register write */
barrier_dsync_fence_full();
/* write the data */
*((uint8_t *) offset + FLASH_STM32_BASE_ADDRESS) = val;
/* flush the register write */
barrier_dsync_fence_full();
rc = flash_stm32_wait_flash_idle(dev);
regs->CR &= (~FLASH_CR_PG);
return rc;
}
static int erase_sector(const struct device *dev, uint32_t sector)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
int rc;
/* if the control register is locked, do not fail silently */
if (regs->CR & FLASH_CR_LOCK) {
return -EIO;
}
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
/* Dual bank mode, SNB MSB selects the bank2,
* others select sector, so we remap sector number.
*/
#if defined(FLASH_OPTCR_nDBANK) && FLASH_SECTOR_TOTAL == 24
#if CONFIG_FLASH_SIZE == 2048
if (sector > 11) {
sector += 4U;
}
#elif CONFIG_FLASH_SIZE == 1024
if (sector > 7) {
sector += 8U;
}
#endif /* CONFIG_FLASH_SIZE */
#endif /* defined(FLASH_OPTCR_nDBANK) && FLASH_SECTOR_TOTAL == 24 */
regs->CR = (regs->CR & ~(FLASH_CR_PSIZE | FLASH_CR_SNB)) |
FLASH_PSIZE_BYTE |
FLASH_CR_SER |
(sector << FLASH_CR_SNB_Pos) |
FLASH_CR_STRT;
/* flush the register write */
barrier_dsync_fence_full();
rc = flash_stm32_wait_flash_idle(dev);
regs->CR &= ~(FLASH_CR_SER | FLASH_CR_SNB);
return rc;
}
int flash_stm32_block_erase_loop(const struct device *dev,
unsigned int offset,
unsigned int len)
{
struct flash_pages_info info;
uint32_t start_sector, end_sector;
uint32_t i;
int rc = 0;
rc = flash_get_page_info_by_offs(dev, offset, &info);
if (rc) {
return rc;
}
start_sector = info.index;
rc = flash_get_page_info_by_offs(dev, offset + len - 1, &info);
if (rc) {
return rc;
}
end_sector = info.index;
for (i = start_sector; i <= end_sector; i++) {
rc = erase_sector(dev, i);
if (rc < 0) {
break;
}
}
return rc;
}
int flash_stm32_write_range(const struct device *dev, unsigned int offset,
const void *data, unsigned int len)
{
int i, rc = 0;
for (i = 0; i < len; i++, offset++) {
rc = write_byte(dev, offset, ((const uint8_t *) data)[i]);
if (rc < 0) {
return rc;
}
}
return rc;
}
static __unused int write_optb(const struct device *dev, uint32_t mask,
uint32_t value)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
int rc;
if (regs->OPTCR & FLASH_OPTCR_OPTLOCK) {
return -EIO;
}
if ((regs->OPTCR & mask) == value) {
return 0;
}
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
regs->OPTCR = (regs->OPTCR & ~mask) | value;
regs->OPTCR |= FLASH_OPTCR_OPTSTRT;
/* Make sure previous write is completed. */
barrier_dsync_fence_full();
return flash_stm32_wait_flash_idle(dev);
}
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION)
uint8_t flash_stm32_get_rdp_level(const struct device *dev)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
return (regs->OPTCR & FLASH_OPTCR_RDP_Msk) >> FLASH_OPTCR_RDP_Pos;
}
void flash_stm32_set_rdp_level(const struct device *dev, uint8_t level)
{
write_optb(dev, FLASH_OPTCR_RDP_Msk,
(uint32_t)level << FLASH_OPTCR_RDP_Pos);
}
#endif /* CONFIG_FLASH_STM32_READOUT_PROTECTION */
/* Some SoC can run in single or dual bank mode, others can't.
* Different SoC flash layouts are specified in various reference
* manuals, but the flash layout for a given number of sectors is
* consistent across these manuals. The number of sectors is given
* by the HAL as FLASH_SECTOR_TOTAL. And some SoC that with same
* FLASH_SECTOR_TOTAL have different flash size.
*
* In case of 8 sectors and 24 sectors we need to differentiate
* between two cases by using the memory size.
* In case of 24 sectors we need to check if the SoC is running
* in single or dual bank mode.
*/
#ifndef FLASH_SECTOR_TOTAL
#error "Unknown flash layout"
#elif FLASH_SECTOR_TOTAL == 2
static const struct flash_pages_layout stm32f7_flash_layout[] = {
/* RM0385, table 4: STM32F750xx */
{.pages_count = 2, .pages_size = KB(32)},
};
#elif FLASH_SECTOR_TOTAL == 4
static const struct flash_pages_layout stm32f7_flash_layout[] = {
/* RM0431, table 4: STM32F730xx */
{.pages_count = 4, .pages_size = KB(16)},
};
#elif FLASH_SECTOR_TOTAL == 8
#if CONFIG_FLASH_SIZE == 512
static const struct flash_pages_layout stm32f7_flash_layout[] = {
/* RM0431, table 3: STM32F72xxx and STM32F732xx/F733xx */
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 3, .pages_size = KB(128)},
};
#elif CONFIG_FLASH_SIZE == 1024
static const struct flash_pages_layout stm32f7_flash_layout[] = {
/* RM0385, table 3: STM32F756xx and STM32F74xxx */
{.pages_count = 4, .pages_size = KB(32)},
{.pages_count = 1, .pages_size = KB(128)},
{.pages_count = 3, .pages_size = KB(256)},
};
#endif /* CONFIG_FLASH_SIZE */
#elif FLASH_SECTOR_TOTAL == 24
static const struct flash_pages_layout stm32f7_flash_layout_single_bank[] = {
/* RM0410, table 3: STM32F76xxx and STM32F77xxx in single bank */
{.pages_count = 4, .pages_size = KB(32)},
{.pages_count = 1, .pages_size = KB(128)},
{.pages_count = 7, .pages_size = KB(256)},
};
static const struct flash_pages_layout stm32f7_flash_layout_dual_bank[] = {
/* RM0410, table 4: STM32F76xxx and STM32F77xxx in dual bank */
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
};
#else
#error "Unknown flash layout"
#endif/* !defined(FLASH_SECTOR_TOTAL) */
void flash_stm32_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
#if FLASH_OPTCR_nDBANK
if (FLASH_STM32_REGS(dev)->OPTCR & FLASH_OPTCR_nDBANK) {
*layout = stm32f7_flash_layout_single_bank;
*layout_size = ARRAY_SIZE(stm32f7_flash_layout_single_bank);
} else {
*layout = stm32f7_flash_layout_dual_bank;
*layout_size = ARRAY_SIZE(stm32f7_flash_layout_dual_bank);
}
#else
ARG_UNUSED(dev);
*layout = stm32f7_flash_layout;
*layout_size = ARRAY_SIZE(stm32f7_flash_layout);
#endif
}
``` | /content/code_sandbox/drivers/flash/flash_stm32f7x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,215 |
```unknown
config SOC_FLASH_NIOS2_QSPI
bool "Nios-II QSPI flash driver"
default y
depends on HAS_ALTERA_HAL
depends on DT_HAS_ALTR_NIOS2_QSPI_NOR_ENABLED
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
help
Enables the Nios-II QSPI flash driver.
``` | /content/code_sandbox/drivers/flash/Kconfig.nios2_qspi | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 76 |
```c
/*
*
*/
#include "flash_gd32.h"
#include <zephyr/logging/log.h>
#include <zephyr/kernel.h>
#include <gd32_fmc.h>
LOG_MODULE_DECLARE(flash_gd32);
#define GD32_NV_FLASH_V3_NODE DT_INST(0, gd_gd32_nv_flash_v3)
#define GD32_NV_FLASH_V3_TIMEOUT DT_PROP(GD32_NV_FLASH_V3_NODE, max_erase_time_ms)
/**
* @brief GD32 FMC v3 flash memory layout for GD32F4xx series.
*/
#if defined(CONFIG_FLASH_PAGE_LAYOUT) && \
defined(CONFIG_SOC_SERIES_GD32F4XX)
#if (PRE_KB(512) == SOC_NV_FLASH_SIZE)
static const struct flash_pages_layout gd32_fmc_v3_layout[] = {
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 3, .pages_size = KB(128)},
};
#elif (PRE_KB(1024) == SOC_NV_FLASH_SIZE)
static const struct flash_pages_layout gd32_fmc_v3_layout[] = {
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
};
#elif (PRE_KB(2048) == SOC_NV_FLASH_SIZE)
static const struct flash_pages_layout gd32_fmc_v3_layout[] = {
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
};
#elif (PRE_KB(3072) == SOC_NV_FLASH_SIZE)
static const struct flash_pages_layout gd32_fmc_v3_layout[] = {
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
{.pages_count = 4, .pages_size = KB(16)},
{.pages_count = 1, .pages_size = KB(64)},
{.pages_count = 7, .pages_size = KB(128)},
{.pages_count = 4, .pages_size = KB(256)},
};
#else
#error "Unknown FMC layout for GD32F4xx series."
#endif
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
#define gd32_fmc_v3_WRITE_ERR (FMC_STAT_PGMERR | FMC_STAT_PGSERR | FMC_STAT_WPERR)
#define gd32_fmc_v3_ERASE_ERR FMC_STAT_OPERR
/* SN bits in FMC_CTL are not continue values, use table below to map them. */
static uint8_t gd32_fmc_v3_sectors[] = {
0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U,
16U, 17U, 18U, 19U, 20U, 21U, 22U, 23U, 24U, 25U, 26U, 27U,
12U, 13U, 14U, 15U
};
static inline void gd32_fmc_v3_unlock(void)
{
FMC_KEY = UNLOCK_KEY0;
FMC_KEY = UNLOCK_KEY1;
}
static inline void gd32_fmc_v3_lock(void)
{
FMC_CTL |= FMC_CTL_LK;
}
static int gd32_fmc_v3_wait_idle(void)
{
const int64_t expired_time = k_uptime_get() + GD32_NV_FLASH_V3_TIMEOUT;
while (FMC_STAT & FMC_STAT_BUSY) {
if (k_uptime_get() > expired_time) {
return -ETIMEDOUT;
}
}
return 0;
}
bool flash_gd32_valid_range(off_t offset, uint32_t len, bool write)
{
const struct flash_pages_layout *page_layout;
uint32_t cur = 0U, next = 0U;
if ((offset > SOC_NV_FLASH_SIZE) ||
((offset + len) > SOC_NV_FLASH_SIZE)) {
return false;
}
if (write) {
/* Check offset and len aligned to write-block-size. */
if ((offset % sizeof(flash_prg_t)) ||
(len % sizeof(flash_prg_t))) {
return false;
}
} else {
for (size_t i = 0; i < ARRAY_SIZE(gd32_fmc_v3_layout); i++) {
page_layout = &gd32_fmc_v3_layout[i];
for (size_t j = 0; j < page_layout->pages_count; j++) {
cur = next;
next += page_layout->pages_size;
/* Check bad offset. */
if ((offset > cur) && (offset < next)) {
return false;
}
/* Check bad len. */
if (((offset + len) > cur) &&
((offset + len) < next)) {
return false;
}
if ((offset + len) == next) {
return true;
}
}
}
}
return true;
}
int flash_gd32_write_range(off_t offset, const void *data, size_t len)
{
flash_prg_t *prg_flash = (flash_prg_t *)((uint8_t *)SOC_NV_FLASH_ADDR + offset);
flash_prg_t *prg_data = (flash_prg_t *)data;
int ret = 0;
gd32_fmc_v3_unlock();
if (FMC_STAT & FMC_STAT_BUSY) {
return -EBUSY;
}
FMC_CTL |= FMC_CTL_PG;
FMC_CTL &= ~FMC_CTL_PSZ;
FMC_CTL |= CTL_PSZ(sizeof(flash_prg_t) - 1);
for (size_t i = 0U; i < (len / sizeof(flash_prg_t)); i++) {
*prg_flash++ = *prg_data++;
}
ret = gd32_fmc_v3_wait_idle();
if (ret < 0) {
goto expired_out;
}
if (FMC_STAT & gd32_fmc_v3_WRITE_ERR) {
ret = -EIO;
FMC_STAT |= gd32_fmc_v3_WRITE_ERR;
LOG_ERR("FMC programming failed");
}
expired_out:
FMC_CTL &= ~FMC_CTL_PG;
gd32_fmc_v3_lock();
return ret;
}
static int gd32_fmc_v3_sector_erase(uint8_t sector)
{
int ret = 0;
gd32_fmc_v3_unlock();
if (FMC_STAT & FMC_STAT_BUSY) {
return -EBUSY;
}
FMC_CTL |= FMC_CTL_SER;
FMC_CTL &= ~FMC_CTL_SN;
FMC_CTL |= CTL_SN(sector);
FMC_CTL |= FMC_CTL_START;
ret = gd32_fmc_v3_wait_idle();
if (ret < 0) {
goto expired_out;
}
if (FMC_STAT & gd32_fmc_v3_ERASE_ERR) {
ret = -EIO;
FMC_STAT |= gd32_fmc_v3_ERASE_ERR;
LOG_ERR("FMC sector %u erase failed", sector);
}
expired_out:
FMC_CTL &= ~FMC_CTL_SER;
gd32_fmc_v3_lock();
return ret;
}
int flash_gd32_erase_block(off_t offset, size_t size)
{
const struct flash_pages_layout *page_layout;
uint32_t erase_offset = 0U;
uint8_t counter = 0U;
int ret = 0;
for (size_t i = 0; i < ARRAY_SIZE(gd32_fmc_v3_layout); i++) {
page_layout = &gd32_fmc_v3_layout[i];
for (size_t j = 0; j < page_layout->pages_count; j++) {
if (erase_offset < offset) {
counter++;
erase_offset += page_layout->pages_size;
continue;
}
uint8_t sector = gd32_fmc_v3_sectors[counter++];
ret = gd32_fmc_v3_sector_erase(sector);
if (ret < 0) {
return ret;
}
erase_offset += page_layout->pages_size;
if (erase_offset - offset >= size) {
return 0;
}
}
}
return 0;
}
#ifdef CONFIG_FLASH_PAGE_LAYOUT
void flash_gd32_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
ARG_UNUSED(dev);
*layout = gd32_fmc_v3_layout;
*layout_size = ARRAY_SIZE(gd32_fmc_v3_layout);
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
``` | /content/code_sandbox/drivers/flash/flash_gd32_v3.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,016 |
```unknown
config FLASH_CAD_QSPI_NOR
bool "Cadence Quad SPI Flash driver"
default y
depends on DT_HAS_CDNS_QSPI_NOR_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
help
Enable Cadence QSPI-NOR support.
if FLASH_CAD_QSPI_NOR
config CAD_QSPI_MICRON_N25Q_SUPPORT
bool "Cadence Quad SPI Micron N25Q Support"
default y
help
Enable Micron N25Q Support.
config CAD_QSPI_NOR_SUBSECTOR_SIZE
hex "Cadence Quad SPI subsector size"
default 0x1000
help
Set the Cadence Quad SPI subsector size.
config QSPI_ADDR_BYTES
int "Access QSPI address memory size in bytes"
default 2
help
Set the address memory size in bytes when
accessing QSPI.
config QSPI_BYTES_PER_DEV
int "Set QSPI to read / write how many bytes per device"
default 256
help
Set the size for a QSPI to read / write per device.
config QSPI_BYTES_PER_BLOCK
int "Set QSPI to read / write how many bytes per block"
default 16
help
Set the size for a QSPI to read / write per block.
endif # FLASH_CAD_QSPI_NOR
``` | /content/code_sandbox/drivers/flash/Kconfig.cadence_qspi_nor | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 291 |
```c
/*
*
*/
#include <errno.h>
#include <stdint.h>
#include <stddef.h>
#include <zephyr/device.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
#include <zephyr/toolchain.h>
#include <hardware/flash.h>
#include <hardware/regs/io_qspi.h>
#include <hardware/regs/pads_qspi.h>
#include <hardware/structs/ssi.h>
#include <hardware/structs/xip_ctrl.h>
#include <hardware/resets.h>
#include <pico/bootrom.h>
LOG_MODULE_REGISTER(flash_rpi_pico, CONFIG_FLASH_LOG_LEVEL);
#define DT_DRV_COMPAT raspberrypi_pico_flash_controller
#define PAGE_SIZE 256
#define SECTOR_SIZE DT_PROP(DT_CHOSEN(zephyr_flash), erase_block_size)
#define ERASE_VALUE 0xff
#define FLASH_SIZE KB(CONFIG_FLASH_SIZE)
#define FLASH_BASE CONFIG_FLASH_BASE_ADDRESS
#define SSI_BASE_ADDRESS DT_REG_ADDR(DT_CHOSEN(zephyr_flash_controller))
static const struct flash_parameters flash_rpi_parameters = {
.write_block_size = 1,
.erase_value = ERASE_VALUE,
};
/**
* Low level flash functions are based on:
* github.com/raspberrypi/pico-bootrom/blob/master/bootrom/program_flash_generic.c
* and
* github.com/raspberrypi/pico-sdk/blob/master/src/rp2_common/hardware_flash/flash.c
*/
#define FLASHCMD_PAGE_PROGRAM 0x02
#define FLASHCMD_READ_STATUS 0x05
#define FLASHCMD_WRITE_ENABLE 0x06
#define BOOT2_SIZE_WORDS 64
enum outover {
OUTOVER_NORMAL = 0,
OUTOVER_INVERT,
OUTOVER_LOW,
OUTOVER_HIGH
};
static ssi_hw_t *const ssi = (ssi_hw_t *)SSI_BASE_ADDRESS;
static uint32_t boot2_copyout[BOOT2_SIZE_WORDS];
static bool boot2_copyout_valid;
static uint8_t flash_ram_buffer[PAGE_SIZE];
static void __no_inline_not_in_flash_func(flash_init_boot2_copyout)(void)
{
if (boot2_copyout_valid) {
return;
}
for (int i = 0; i < BOOT2_SIZE_WORDS; ++i)
boot2_copyout[i] = ((uint32_t *)FLASH_BASE)[i];
__compiler_memory_barrier();
boot2_copyout_valid = true;
}
static void __no_inline_not_in_flash_func(flash_enable_xip_via_boot2)(void)
{
((void (*)(void))((uint32_t)boot2_copyout+1))();
}
void __no_inline_not_in_flash_func(flash_cs_force)(enum outover over)
{
io_rw_32 *reg = (io_rw_32 *) (IO_QSPI_BASE + IO_QSPI_GPIO_QSPI_SS_CTRL_OFFSET);
*reg = (*reg & ~IO_QSPI_GPIO_QSPI_SS_CTRL_OUTOVER_BITS)
| (over << IO_QSPI_GPIO_QSPI_SS_CTRL_OUTOVER_LSB);
(void) *reg;
}
int __no_inline_not_in_flash_func(flash_was_aborted)()
{
return *(io_rw_32 *) (IO_QSPI_BASE + IO_QSPI_GPIO_QSPI_SD1_CTRL_OFFSET)
& IO_QSPI_GPIO_QSPI_SD1_CTRL_INOVER_BITS;
}
void __no_inline_not_in_flash_func(flash_put_get)(const uint8_t *tx, uint8_t *rx, size_t count,
size_t rx_skip)
{
const uint max_in_flight = 16 - 2;
size_t tx_count = count;
size_t rx_count = count;
bool did_something;
uint32_t tx_level;
uint32_t rx_level;
uint8_t rxbyte;
while (tx_count || rx_skip || rx_count) {
tx_level = ssi_hw->txflr;
rx_level = ssi_hw->rxflr;
did_something = false;
if (tx_count && tx_level + rx_level < max_in_flight) {
ssi->dr0 = (uint32_t) (tx ? *tx++ : 0);
--tx_count;
did_something = true;
}
if (rx_level) {
rxbyte = ssi->dr0;
did_something = true;
if (rx_skip) {
--rx_skip;
} else {
if (rx)
*rx++ = rxbyte;
--rx_count;
}
}
if (!did_something && __builtin_expect(flash_was_aborted(), 0)) {
break;
}
}
flash_cs_force(OUTOVER_HIGH);
}
void __no_inline_not_in_flash_func(flash_put_get_wrapper)(uint8_t cmd, const uint8_t *tx,
uint8_t *rx, size_t count)
{
flash_cs_force(OUTOVER_LOW);
ssi->dr0 = cmd;
flash_put_get(tx, rx, count, 1);
}
static ALWAYS_INLINE void flash_put_cmd_addr(uint8_t cmd, uint32_t addr)
{
flash_cs_force(OUTOVER_LOW);
addr |= cmd << 24;
for (int i = 0; i < 4; ++i) {
ssi->dr0 = addr >> 24;
addr <<= 8;
}
}
void __no_inline_not_in_flash_func(flash_write_partial_internal)(uint32_t addr, const uint8_t *data,
size_t size)
{
uint8_t status_reg;
flash_put_get_wrapper(FLASHCMD_WRITE_ENABLE, NULL, NULL, 0);
flash_put_cmd_addr(FLASHCMD_PAGE_PROGRAM, addr);
flash_put_get(data, NULL, size, 4);
do {
flash_put_get_wrapper(FLASHCMD_READ_STATUS, NULL, &status_reg, 1);
} while (status_reg & 0x1 && !flash_was_aborted());
}
void __no_inline_not_in_flash_func(flash_write_partial)(uint32_t flash_offs, const uint8_t *data,
size_t count)
{
rom_connect_internal_flash_fn connect_internal_flash = (rom_connect_internal_flash_fn)
rom_func_lookup_inline(ROM_FUNC_CONNECT_INTERNAL_FLASH);
rom_flash_exit_xip_fn flash_exit_xip = (rom_flash_exit_xip_fn)
rom_func_lookup_inline(ROM_FUNC_FLASH_EXIT_XIP);
rom_flash_flush_cache_fn flash_flush_cache = (rom_flash_flush_cache_fn)
rom_func_lookup_inline(ROM_FUNC_FLASH_FLUSH_CACHE);
flash_init_boot2_copyout();
__compiler_memory_barrier();
connect_internal_flash();
flash_exit_xip();
flash_write_partial_internal(flash_offs, data, count);
flash_flush_cache();
flash_enable_xip_via_boot2();
}
static bool is_valid_range(off_t offset, uint32_t size)
{
return (offset >= 0) && ((offset + size) <= FLASH_SIZE);
}
static int flash_rpi_read(const struct device *dev, off_t offset, void *data, size_t size)
{
if (size == 0) {
return 0;
}
if (!is_valid_range(offset, size)) {
LOG_ERR("Read range exceeds the flash boundaries");
return -EINVAL;
}
memcpy(data, (uint8_t *)(CONFIG_FLASH_BASE_ADDRESS + offset), size);
return 0;
}
static int flash_rpi_write(const struct device *dev, off_t offset, const void *data, size_t size)
{
uint32_t key;
size_t bytes_to_write;
uint8_t *data_pointer = (uint8_t *)data;
if (size == 0) {
return 0;
}
if (!is_valid_range(offset, size)) {
LOG_ERR("Write range exceeds the flash boundaries. Offset=%#lx, Size=%u",
offset, size);
return -EINVAL;
}
key = irq_lock();
if ((offset & (PAGE_SIZE - 1)) > 0) {
bytes_to_write = MIN(PAGE_SIZE - (offset & (PAGE_SIZE - 1)), size);
memcpy(flash_ram_buffer, data_pointer, bytes_to_write);
flash_write_partial(offset, flash_ram_buffer, bytes_to_write);
data_pointer += bytes_to_write;
size -= bytes_to_write;
offset += bytes_to_write;
}
while (size >= PAGE_SIZE) {
bytes_to_write = PAGE_SIZE;
memcpy(flash_ram_buffer, data_pointer, bytes_to_write);
flash_range_program(offset, flash_ram_buffer, bytes_to_write);
data_pointer += bytes_to_write;
size -= bytes_to_write;
offset += bytes_to_write;
}
if (size > 0) {
memcpy(flash_ram_buffer, data_pointer, size);
flash_write_partial(offset, flash_ram_buffer, size);
}
irq_unlock(key);
return 0;
}
static int flash_rpi_erase(const struct device *dev, off_t offset, size_t size)
{
uint32_t key;
if (size == 0) {
return 0;
}
if (!is_valid_range(offset, size)) {
LOG_ERR("Erase range exceeds the flash boundaries. Offset=%#lx, Size=%u",
offset, size);
return -EINVAL;
}
if ((offset % SECTOR_SIZE) || (size % SECTOR_SIZE)) {
LOG_ERR("Erase range is not a multiple of the sector size. Offset=%#lx, Size=%u",
offset, size);
return -EINVAL;
}
key = irq_lock();
flash_range_erase(offset, size);
irq_unlock(key);
return 0;
}
static const struct flash_parameters *flash_rpi_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_rpi_parameters;
}
#if CONFIG_FLASH_PAGE_LAYOUT
static const struct flash_pages_layout flash_rpi_pages_layout = {
.pages_count = FLASH_SIZE / SECTOR_SIZE,
.pages_size = SECTOR_SIZE,
};
void flash_rpi_page_layout(const struct device *dev, const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &flash_rpi_pages_layout;
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_driver_api flash_rpi_driver_api = {
.read = flash_rpi_read,
.write = flash_rpi_write,
.erase = flash_rpi_erase,
.get_parameters = flash_rpi_get_parameters,
#ifdef CONFIG_FLASH_PAGE_LAYOUT
.page_layout = flash_rpi_page_layout,
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
};
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, POST_KERNEL,
CONFIG_FLASH_INIT_PRIORITY, &flash_rpi_driver_api);
``` | /content/code_sandbox/drivers/flash/flash_rpi_pico.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,261 |
```c
/*
*
*/
#define DT_DRV_COMPAT renesas_smartbond_flash_controller
#define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash)
#define QSPIF_NODE DT_NODELABEL(qspif)
#include <stddef.h>
#include <string.h>
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/sys/byteorder.h>
#include <DA1469xAB.h>
#define FLASH_ERASE_SIZE DT_PROP(SOC_NV_FLASH_NODE, erase_block_size)
#define FLASH_PAGE_SIZE 256
struct flash_smartbond_config {
uint32_t qspif_base_address;
};
static const struct flash_parameters flash_smartbond_parameters = {
.write_block_size = DT_PROP(SOC_NV_FLASH_NODE, write_block_size),
.erase_value = 0xff,
};
static bool range_is_valid(off_t offset, uint32_t size)
{
return (offset + size) <= (CONFIG_FLASH_SIZE * 1024);
}
static ALWAYS_INLINE void qspic_data_write8(uint8_t data)
{
volatile uint8_t *reg8 = (uint8_t *)&QSPIC->QSPIC_WRITEDATA_REG;
*reg8 = data;
}
static ALWAYS_INLINE void qspic_data_write32(uint32_t data)
{
volatile uint32_t *reg32 = (uint32_t *)&QSPIC->QSPIC_WRITEDATA_REG;
*reg32 = data;
}
static ALWAYS_INLINE uint8_t qspic_data_read8(void)
{
volatile uint8_t *reg8 = (uint8_t *)&QSPIC->QSPIC_READDATA_REG;
return *reg8;
}
static __ramfunc uint8_t qspic_read_status(void)
{
uint8_t status;
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_EN_CS_Msk;
qspic_data_write8(0x05);
status = qspic_data_read8();
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_DIS_CS_Msk;
return status;
}
static __ramfunc void qspic_wait_busy(void)
{
do {
} while (qspic_read_status() & 0x01);
}
static __ramfunc void qspic_automode_exit(void)
{
QSPIC->QSPIC_CTRLMODE_REG &= ~QSPIC_QSPIC_CTRLMODE_REG_QSPIC_AUTO_MD_Msk;
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_SET_SINGLE_Msk;
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_EN_CS_Msk;
qspic_data_write8(0xff);
qspic_data_write8(0xff);
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_DIS_CS_Msk;
}
static __ramfunc void qspic_write_enable(void)
{
uint8_t status;
do {
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_EN_CS_Msk;
qspic_data_write8(0x06);
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_DIS_CS_Msk;
do {
status = qspic_read_status();
} while (status & 0x01);
} while (!(status & 0x02));
}
static __ramfunc size_t qspic_write_page(uint32_t address, const uint8_t *data, size_t size)
{
size_t written;
/* Make sure we write up to page boundary */
size = MIN(size, FLASH_PAGE_SIZE - (address & (FLASH_PAGE_SIZE - 1)));
written = size;
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_EN_CS_Msk;
address = sys_cpu_to_be32(address);
qspic_data_write32(address | 0x02);
while (size >= 4) {
qspic_data_write32(*(uint32_t *) data);
data += 4;
size -= 4;
}
while (size) {
qspic_data_write8(*data);
data++;
size--;
}
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_DIS_CS_Msk;
return written;
}
static __ramfunc void qspic_write(uint32_t address, const uint8_t *data, size_t size)
{
size_t written;
while (size) {
qspic_write_enable();
written = qspic_write_page(address, data, size);
address += written;
data += written;
size -= written;
qspic_wait_busy();
}
}
static int flash_smartbond_read(const struct device *dev, off_t offset,
void *data, size_t size)
{
const struct flash_smartbond_config *config = dev->config;
if (!range_is_valid(offset, size)) {
return -EINVAL;
}
if (!size) {
return 0;
}
memcpy(data, (uint8_t *)(config->qspif_base_address + offset), size);
return 0;
}
static __ramfunc int flash_smartbond_write(const struct device *dev,
off_t offset, const void *data,
size_t size)
{
unsigned int key;
uint32_t ctrlmode;
if (!range_is_valid(offset, size)) {
return -EINVAL;
}
if (!size) {
return 0;
}
key = irq_lock();
ctrlmode = QSPIC->QSPIC_CTRLMODE_REG;
qspic_automode_exit();
qspic_wait_busy();
qspic_write(offset, data, size);
QSPIC->QSPIC_CTRLMODE_REG = ctrlmode;
CACHE->CACHE_CTRL1_REG |= CACHE_CACHE_CTRL1_REG_CACHE_FLUSH_Msk;
irq_unlock(key);
return 0;
}
static __ramfunc int flash_smartbond_erase(const struct device *dev, off_t offset,
size_t size)
{
unsigned int key;
uint32_t ctrlmode;
uint32_t address;
if (!range_is_valid(offset, size)) {
return -EINVAL;
}
if ((offset % FLASH_ERASE_SIZE) != 0) {
return -EINVAL;
}
if ((size % FLASH_ERASE_SIZE) != 0) {
return -EINVAL;
}
if (!size) {
return 0;
}
key = irq_lock();
ctrlmode = QSPIC->QSPIC_CTRLMODE_REG;
qspic_automode_exit();
qspic_wait_busy();
while (size) {
qspic_write_enable();
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_EN_CS_Msk;
address = sys_cpu_to_be32(offset);
qspic_data_write32(address | 0x20);
QSPIC->QSPIC_CTRLBUS_REG = QSPIC_QSPIC_CTRLBUS_REG_QSPIC_DIS_CS_Msk;
qspic_wait_busy();
offset += FLASH_ERASE_SIZE;
size -= FLASH_ERASE_SIZE;
}
QSPIC->QSPIC_CTRLMODE_REG = ctrlmode;
CACHE->CACHE_CTRL1_REG |= CACHE_CACHE_CTRL1_REG_CACHE_FLUSH_Msk;
irq_unlock(key);
return 0;
}
static const struct flash_parameters *
flash_smartbond_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_smartbond_parameters;
}
#if CONFIG_FLASH_PAGE_LAYOUT
static const struct flash_pages_layout flash_smartbond_0_pages_layout = {
.pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) /
DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
.pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
};
void flash_smartbond_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &flash_smartbond_0_pages_layout;
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_driver_api flash_smartbond_driver_api = {
.read = flash_smartbond_read,
.write = flash_smartbond_write,
.erase = flash_smartbond_erase,
.get_parameters = flash_smartbond_get_parameters,
#ifdef CONFIG_FLASH_PAGE_LAYOUT
.page_layout = flash_smartbond_page_layout,
#endif
};
static const struct flash_smartbond_config flash_smartbond_0_config = {
.qspif_base_address = DT_REG_ADDR(QSPIF_NODE),
};
DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &flash_smartbond_0_config,
POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, &flash_smartbond_driver_api);
``` | /content/code_sandbox/drivers/flash/flash_smartbond.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,894 |
```c
/*
*
*/
#define DT_DRV_COMPAT ite_it8xxx2_flash_controller
#define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash)
#define FLASH_WRITE_BLK_SZ DT_PROP(SOC_NV_FLASH_NODE, write_block_size)
#define FLASH_ERASE_BLK_SZ DT_PROP(SOC_NV_FLASH_NODE, erase_block_size)
#include <string.h>
#include <zephyr/device.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/linker/linker-defs.h>
#include <ilm.h>
#include <soc.h>
#define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(flash_ite_it8xxx2);
#define FLASH_IT8XXX2_REG_BASE \
((struct smfi_it8xxx2_regs *)DT_INST_REG_ADDR(0))
struct flash_it8xxx2_dev_data {
struct k_sem sem;
};
/*
* One page program instruction allows maximum 256 bytes (a page) of data
* to be programmed.
*/
#define CHIP_FLASH_WRITE_PAGE_MAX_SIZE 256
/* Program is run directly from storage */
#define CHIP_MAPPED_STORAGE_BASE DT_REG_ADDR(DT_NODELABEL(flash0))
/* flash size */
#define CHIP_FLASH_SIZE_BYTES DT_REG_SIZE(DT_NODELABEL(flash0))
/* protect bank size */
#define CHIP_FLASH_BANK_SIZE 0x00001000
/*
* This is the block size of the ILM on the it8xxx2 chip.
* The ILM for static code cache, CPU fetch instruction from
* ILM(ILM -> CPU)instead of flash(flash -> I-Cache -> CPU) if enabled.
*/
#define IT8XXX2_ILM_BLOCK_SIZE 0x00001000
/* page program command */
#define FLASH_CMD_PAGE_WRITE 0x2
/* sector erase command (erase size is 4KB) */
#define FLASH_CMD_SECTOR_ERASE 0x20
/* command for flash write */
#define FLASH_CMD_WRITE FLASH_CMD_PAGE_WRITE
/* Write status register */
#define FLASH_CMD_WRSR 0x01
/* Write disable */
#define FLASH_CMD_WRDI 0x04
/* Write enable */
#define FLASH_CMD_WREN 0x06
/* Read status register */
#define FLASH_CMD_RS 0x05
/* Set FSCE# as high level by writing 0 to address xfff_fe00h */
#define FLASH_FSCE_HIGH_ADDRESS 0x0FFFFE00
/* Set FSCE# as low level by writing data to address xfff_fd00h */
#define FLASH_FSCE_LOW_ADDRESS 0x0FFFFD00
enum flash_status_mask {
FLASH_SR_NO_BUSY = 0,
/* Internal write operation is in progress */
FLASH_SR_BUSY = 0x01,
/* Device is memory Write enabled */
FLASH_SR_WEL = 0x02,
FLASH_SR_ALL = (FLASH_SR_BUSY | FLASH_SR_WEL),
};
enum flash_transaction_cmd {
CMD_CONTINUE,
CMD_END,
};
static const struct flash_parameters flash_it8xxx2_parameters = {
.write_block_size = FLASH_WRITE_BLK_SZ,
.erase_value = 0xff,
};
void __soc_ram_code ramcode_reset_i_cache(void)
{
struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE;
/* I-Cache tag sram reset */
gctrl_regs->GCTRL_MCCR |= IT8XXX2_GCTRL_ICACHE_RESET;
/* Make sure the I-Cache is reset */
__asm__ volatile ("fence.i" ::: "memory");
gctrl_regs->GCTRL_MCCR &= ~IT8XXX2_GCTRL_ICACHE_RESET;
__asm__ volatile ("fence.i" ::: "memory");
}
void __soc_ram_code ramcode_flash_follow_mode(void)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
/*
* ECINDAR3-0 are EC-indirect memory address registers.
*
* Enter follow mode by writing 0xf to low nibble of ECINDAR3 register,
* and set high nibble as 0x4 to select internal flash.
*/
flash_regs->SMFI_ECINDAR3 = (EC_INDIRECT_READ_INTERNAL_FLASH |
((FLASH_FSCE_HIGH_ADDRESS >> 24) & GENMASK(3, 0)));
/* Set FSCE# as high level by writing 0 to address xfff_fe00h */
flash_regs->SMFI_ECINDAR2 = (FLASH_FSCE_HIGH_ADDRESS >> 16) & GENMASK(7, 0);
flash_regs->SMFI_ECINDAR1 = (FLASH_FSCE_HIGH_ADDRESS >> 8) & GENMASK(7, 0);
flash_regs->SMFI_ECINDAR0 = FLASH_FSCE_HIGH_ADDRESS & GENMASK(7, 0);
/* Writing 0 to EC-indirect memory data register */
flash_regs->SMFI_ECINDDR = 0x00;
}
void __soc_ram_code ramcode_flash_follow_mode_exit(void)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
/* Exit follow mode, and keep the setting of selecting internal flash */
flash_regs->SMFI_ECINDAR3 = EC_INDIRECT_READ_INTERNAL_FLASH;
flash_regs->SMFI_ECINDAR2 = 0x00;
}
void __soc_ram_code ramcode_flash_fsce_high(void)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
struct gctrl_it8xxx2_regs *const gctrl_regs = GCTRL_IT8XXX2_REGS_BASE;
/* FSCE# high level */
flash_regs->SMFI_ECINDAR1 = (FLASH_FSCE_HIGH_ADDRESS >> 8) & GENMASK(7, 0);
/*
* A short delay (15~30 us) before #CS be driven high to ensure
* last byte has been latched in.
*
* For a loop that writing 0 to WNCKR register for N times, the delay
* value will be: ((N-1) / 65.536 kHz) to (N / 65.536 kHz).
* So we perform 2 consecutive writes to WNCKR here to ensure the
* minimum delay is 15us.
*/
gctrl_regs->GCTRL_WNCKR = 0;
gctrl_regs->GCTRL_WNCKR = 0;
/* Writing 0 to EC-indirect memory data register */
flash_regs->SMFI_ECINDDR = 0x00;
}
void __soc_ram_code ramcode_flash_write_dat(uint8_t wdata)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
/* Write data to FMOSI */
flash_regs->SMFI_ECINDDR = wdata;
}
void __soc_ram_code ramcode_flash_transaction(int wlen, uint8_t *wbuf, int rlen, uint8_t *rbuf,
enum flash_transaction_cmd cmd_end)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
int i;
/* FSCE# with low level */
flash_regs->SMFI_ECINDAR1 = (FLASH_FSCE_LOW_ADDRESS >> 8) & GENMASK(7, 0);
/* Write data to FMOSI */
for (i = 0; i < wlen; i++) {
flash_regs->SMFI_ECINDDR = wbuf[i];
}
/* Read data from FMISO */
for (i = 0; i < rlen; i++) {
rbuf[i] = flash_regs->SMFI_ECINDDR;
}
/* FSCE# high level if transaction done */
if (cmd_end == CMD_END) {
ramcode_flash_fsce_high();
}
}
void __soc_ram_code ramcode_flash_cmd_read_status(enum flash_status_mask mask,
enum flash_status_mask target)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
uint8_t cmd_rs[] = {FLASH_CMD_RS};
/* Send read status command */
ramcode_flash_transaction(sizeof(cmd_rs), cmd_rs, 0, NULL, CMD_CONTINUE);
/*
* We prefer no timeout here. We can always get the status
* we want, or wait for watchdog triggered to check
* e-flash's status instead of breaking loop.
* This will avoid fetching unknown instruction from e-flash
* and causing exception.
*/
while ((flash_regs->SMFI_ECINDDR & mask) != target) {
/* read status and check if it is we want. */
;
}
/* transaction done, drive #CS high */
ramcode_flash_fsce_high();
}
void __soc_ram_code ramcode_flash_cmd_write_enable(void)
{
uint8_t cmd_we[] = {FLASH_CMD_WREN};
/* enter EC-indirect follow mode */
ramcode_flash_follow_mode();
/* send write enable command */
ramcode_flash_transaction(sizeof(cmd_we), cmd_we, 0, NULL, CMD_END);
/* read status and make sure busy bit cleared and write enabled. */
ramcode_flash_cmd_read_status(FLASH_SR_ALL, FLASH_SR_WEL);
/* exit EC-indirect follow mode */
ramcode_flash_follow_mode_exit();
}
void __soc_ram_code ramcode_flash_cmd_write_disable(void)
{
uint8_t cmd_wd[] = {FLASH_CMD_WRDI};
/* enter EC-indirect follow mode */
ramcode_flash_follow_mode();
/* send write disable command */
ramcode_flash_transaction(sizeof(cmd_wd), cmd_wd, 0, NULL, CMD_END);
/* make sure busy bit cleared. */
ramcode_flash_cmd_read_status(FLASH_SR_ALL, FLASH_SR_NO_BUSY);
/* exit EC-indirect follow mode */
ramcode_flash_follow_mode_exit();
}
int __soc_ram_code ramcode_flash_verify(int addr, int size, const char *data)
{
int i;
uint8_t *wbuf = (uint8_t *)data;
uint8_t *flash = (uint8_t *)addr;
if (data == NULL) {
/* verify for erase */
for (i = 0; i < size; i++) {
if (flash[i] != 0xFF) {
return -EINVAL;
}
}
} else {
/* verify for write */
for (i = 0; i < size; i++) {
if (flash[i] != wbuf[i]) {
return -EINVAL;
}
}
}
return 0;
}
void __soc_ram_code ramcode_flash_cmd_write(int addr, int wlen, uint8_t *wbuf)
{
int i;
uint8_t flash_write[] = {FLASH_CMD_WRITE, ((addr >> 16) & 0xFF),
((addr >> 8) & 0xFF), (addr & 0xFF)};
/* enter EC-indirect follow mode */
ramcode_flash_follow_mode();
/* send flash write command (aai word or page program) */
ramcode_flash_transaction(sizeof(flash_write), flash_write, 0, NULL, CMD_CONTINUE);
for (i = 0; i < wlen; i++) {
/* send data byte */
ramcode_flash_write_dat(wbuf[i]);
/*
* we want to restart the write sequence every IDEAL_SIZE
* chunk worth of data.
*/
if (!(++addr % CHIP_FLASH_WRITE_PAGE_MAX_SIZE)) {
uint8_t w_en[] = {FLASH_CMD_WREN};
ramcode_flash_fsce_high();
/* make sure busy bit cleared. */
ramcode_flash_cmd_read_status(FLASH_SR_BUSY, FLASH_SR_NO_BUSY);
/* send write enable command */
ramcode_flash_transaction(sizeof(w_en), w_en, 0, NULL, CMD_END);
/* make sure busy bit cleared and write enabled. */
ramcode_flash_cmd_read_status(FLASH_SR_ALL, FLASH_SR_WEL);
/* re-send write command */
flash_write[1] = (addr >> 16) & GENMASK(7, 0);
flash_write[2] = (addr >> 8) & GENMASK(7, 0);
flash_write[3] = addr & GENMASK(7, 0);
ramcode_flash_transaction(sizeof(flash_write), flash_write,
0, NULL, CMD_CONTINUE);
}
}
ramcode_flash_fsce_high();
/* make sure busy bit cleared. */
ramcode_flash_cmd_read_status(FLASH_SR_BUSY, FLASH_SR_NO_BUSY);
/* exit EC-indirect follow mode */
ramcode_flash_follow_mode_exit();
}
void __soc_ram_code ramcode_flash_write(int addr, int wlen, const char *wbuf)
{
ramcode_flash_cmd_write_enable();
ramcode_flash_cmd_write(addr, wlen, (uint8_t *)wbuf);
ramcode_flash_cmd_write_disable();
}
void __soc_ram_code ramcode_flash_cmd_erase(int addr, int cmd)
{
uint8_t cmd_erase[] = {cmd, ((addr >> 16) & 0xFF),
((addr >> 8) & 0xFF), (addr & 0xFF)};
/* enter EC-indirect follow mode */
ramcode_flash_follow_mode();
/* send erase command */
ramcode_flash_transaction(sizeof(cmd_erase), cmd_erase, 0, NULL, CMD_END);
/* make sure busy bit cleared. */
ramcode_flash_cmd_read_status(FLASH_SR_BUSY, FLASH_SR_NO_BUSY);
/* exit EC-indirect follow mode */
ramcode_flash_follow_mode_exit();
}
void __soc_ram_code ramcode_flash_erase(int addr, int cmd)
{
ramcode_flash_cmd_write_enable();
ramcode_flash_cmd_erase(addr, cmd);
ramcode_flash_cmd_write_disable();
}
/* Read data from flash */
static int __soc_ram_code flash_it8xxx2_read(const struct device *dev, off_t offset, void *data,
size_t len)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
uint8_t *data_t = data;
int i;
for (i = 0; i < len; i++) {
flash_regs->SMFI_ECINDAR3 = EC_INDIRECT_READ_INTERNAL_FLASH;
flash_regs->SMFI_ECINDAR2 = (offset >> 16) & GENMASK(7, 0);
flash_regs->SMFI_ECINDAR1 = (offset >> 8) & GENMASK(7, 0);
flash_regs->SMFI_ECINDAR0 = (offset & GENMASK(7, 0));
/*
* Read/Write to this register will access one byte on the
* flash with the 32-bit flash address defined in ECINDAR3-0
*/
data_t[i] = flash_regs->SMFI_ECINDDR;
offset++;
}
return 0;
}
/* Write data to the flash, page by page */
static int __soc_ram_code flash_it8xxx2_write(const struct device *dev, off_t offset,
const void *src_data, size_t len)
{
struct flash_it8xxx2_dev_data *data = dev->data;
int ret = -EINVAL;
unsigned int key;
/*
* Check that the offset and length are multiples of the write
* block size.
*/
if ((offset % FLASH_WRITE_BLK_SZ) != 0) {
return -EINVAL;
}
if ((len % FLASH_WRITE_BLK_SZ) != 0) {
return -EINVAL;
}
if (!it8xxx2_is_ilm_configured()) {
return -EACCES;
}
k_sem_take(&data->sem, K_FOREVER);
/*
* CPU can't fetch instruction from flash while use
* EC-indirect follow mode to access flash, interrupts need to be
* disabled.
*/
key = irq_lock();
ramcode_flash_write(offset, len, src_data);
ramcode_reset_i_cache();
/* Get the ILM address of a flash offset. */
offset |= CHIP_MAPPED_STORAGE_BASE;
ret = ramcode_flash_verify(offset, len, src_data);
irq_unlock(key);
k_sem_give(&data->sem);
return ret;
}
/* Erase multiple blocks */
static int __soc_ram_code flash_it8xxx2_erase(const struct device *dev, off_t offset, size_t len)
{
struct flash_it8xxx2_dev_data *data = dev->data;
int v_size = len, v_addr = offset, ret = -EINVAL;
unsigned int key;
/*
* Check that the offset and length are multiples of the write
* erase block size.
*/
if ((offset % FLASH_ERASE_BLK_SZ) != 0) {
return -EINVAL;
}
if ((len % FLASH_ERASE_BLK_SZ) != 0) {
return -EINVAL;
}
if (!it8xxx2_is_ilm_configured()) {
return -EACCES;
}
k_sem_take(&data->sem, K_FOREVER);
/*
* CPU can't fetch instruction from flash while use
* EC-indirect follow mode to access flash, interrupts need to be
* disabled.
*/
key = irq_lock();
/* Always use sector erase command */
for (; len > 0; len -= FLASH_ERASE_BLK_SZ) {
ramcode_flash_erase(offset, FLASH_CMD_SECTOR_ERASE);
offset += FLASH_ERASE_BLK_SZ;
}
ramcode_reset_i_cache();
/* get the ILM address of a flash offset. */
v_addr |= CHIP_MAPPED_STORAGE_BASE;
ret = ramcode_flash_verify(v_addr, v_size, NULL);
irq_unlock(key);
k_sem_give(&data->sem);
return ret;
}
static const struct flash_parameters *
flash_it8xxx2_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_it8xxx2_parameters;
}
static int flash_it8xxx2_init(const struct device *dev)
{
struct smfi_it8xxx2_regs *const flash_regs = FLASH_IT8XXX2_REG_BASE;
struct flash_it8xxx2_dev_data *data = dev->data;
/* By default, select internal flash for indirect fast read. */
flash_regs->SMFI_ECINDAR3 = EC_INDIRECT_READ_INTERNAL_FLASH;
/*
* If the embedded flash's size of this part number is larger
* than 256K-byte, enable the page program cycle constructed
* by EC-Indirect Follow Mode.
*/
flash_regs->SMFI_FLHCTRL6R |= IT8XXX2_SMFI_MASK_ECINDPP;
/* Initialize mutex for flash controller */
k_sem_init(&data->sem, 1, 1);
return 0;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static const struct flash_pages_layout dev_layout = {
.pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) /
DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
.pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
};
static void flash_it8xxx2_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &dev_layout;
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_driver_api flash_it8xxx2_api = {
.erase = flash_it8xxx2_erase,
.write = flash_it8xxx2_write,
.read = flash_it8xxx2_read,
.get_parameters = flash_it8xxx2_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = flash_it8xxx2_pages_layout,
#endif
};
static struct flash_it8xxx2_dev_data flash_it8xxx2_data;
DEVICE_DT_INST_DEFINE(0, flash_it8xxx2_init, NULL,
&flash_it8xxx2_data, NULL,
PRE_KERNEL_1,
CONFIG_FLASH_INIT_PRIORITY,
&flash_it8xxx2_api);
``` | /content/code_sandbox/drivers/flash/flash_ite_it8xxx2.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,301 |
```c
/*
*
*/
/*
* This driver defines a page as the erase_block_size.
* This driver defines a write page as defined by the flash controller
* This driver defines a section as a contiguous array of bytes
* This driver defines an area as the entire flash area
* This driver defines the write block size as the minimum write block size
*/
#define DT_DRV_COMPAT atmel_sam_flash_controller
#include <zephyr/kernel.h>
#include <zephyr/devicetree.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/sys/barrier.h>
#include <string.h>
#include <soc.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(flash_sam, CONFIG_FLASH_LOG_LEVEL);
#define SAM_FLASH_WRITE_PAGE_SIZE (512)
typedef void (*sam_flash_irq_init_fn_ptr)(void);
struct sam_flash_config {
Efc *regs;
sam_flash_irq_init_fn_ptr irq_init;
off_t area_address;
off_t area_size;
struct flash_parameters parameters;
struct flash_pages_layout *pages_layouts;
size_t pages_layouts_size;
};
struct sam_flash_erase_data {
off_t section_start;
size_t section_end;
bool succeeded;
};
struct sam_flash_data {
const struct device *dev;
struct k_spinlock lock;
struct sam_flash_erase_data erase_data;
struct k_sem ready_sem;
};
static bool sam_flash_validate_offset_len(off_t offset, size_t len)
{
if (offset < 0) {
return false;
}
if ((offset + len) < len) {
return false;
}
return true;
}
static bool sam_flash_aligned(size_t value, size_t alignment)
{
return (value & (alignment - 1)) == 0;
}
static bool sam_flash_offset_is_on_write_page_boundary(off_t offset)
{
return sam_flash_aligned(offset, SAM_FLASH_WRITE_PAGE_SIZE);
}
static inline void sam_flash_mask_ready_interrupt(const struct sam_flash_config *config)
{
Efc *regs = config->regs;
regs->EEFC_FMR &= ~EEFC_FMR_FRDY;
}
static inline void sam_flash_unmask_ready_interrupt(const struct sam_flash_config *config)
{
Efc *regs = config->regs;
regs->EEFC_FMR |= EEFC_FMR_FRDY;
}
static void sam_flash_isr(const struct device *dev)
{
struct sam_flash_data *data = dev->data;
const struct sam_flash_config *config = dev->config;
sam_flash_mask_ready_interrupt(config);
k_sem_give(&data->ready_sem);
}
static int sam_flash_section_wait_until_ready(const struct device *dev)
{
struct sam_flash_data *data = dev->data;
const struct sam_flash_config *config = dev->config;
Efc *regs = config->regs;
uint32_t eefc_fsr;
k_sem_reset(&data->ready_sem);
sam_flash_unmask_ready_interrupt(config);
if (k_sem_take(&data->ready_sem, K_MSEC(500)) < 0) {
LOG_ERR("Command did not execute in time");
return -EFAULT;
}
/* FSR register is cleared on read */
eefc_fsr = regs->EEFC_FSR;
if (eefc_fsr & EEFC_FSR_FCMDE) {
LOG_ERR("Invalid command requested");
return -EPERM;
}
if (eefc_fsr & EEFC_FSR_FLOCKE) {
LOG_ERR("Tried to modify locked region");
return -EPERM;
}
if (eefc_fsr & EEFC_FSR_FLERR) {
LOG_ERR("Programming failed");
return -EPERM;
}
return 0;
}
static bool sam_flash_section_is_within_area(const struct device *dev, off_t offset, size_t len)
{
const struct sam_flash_config *config = dev->config;
if ((offset + ((off_t)len)) < offset) {
return false;
}
if ((offset >= 0) && ((offset + len) <= config->area_size)) {
return true;
}
LOG_WRN("Section from 0x%x to 0x%x is not within flash area (0x0 to %x)",
(size_t)offset, (size_t)(offset + len), (size_t)config->area_size);
return false;
}
static bool sam_flash_section_is_aligned_with_write_block_size(const struct device *dev,
off_t offset, size_t len)
{
const struct sam_flash_config *config = dev->config;
if (sam_flash_aligned(offset, config->parameters.write_block_size) &&
sam_flash_aligned(len, config->parameters.write_block_size)) {
return true;
}
LOG_WRN("Section from 0x%x to 0x%x is not aligned with write block size (%u)",
(size_t)offset, (size_t)(offset + len), config->parameters.write_block_size);
return false;
}
static bool sam_flash_section_is_aligned_with_pages(const struct device *dev, off_t offset,
size_t len)
{
const struct sam_flash_config *config = dev->config;
struct flash_pages_info pages_info;
/* Get the page offset points to */
if (flash_get_page_info_by_offs(dev, offset, &pages_info) < 0) {
return false;
}
/* Validate offset points to start of page */
if (offset != pages_info.start_offset) {
return false;
}
/* Check if end of section is aligned with end of area */
if ((offset + len) == (config->area_size)) {
return true;
}
/* Get the page pointed to by end of section */
if (flash_get_page_info_by_offs(dev, offset + len, &pages_info) < 0) {
return false;
}
/* Validate offset points to start of page */
if ((offset + len) != pages_info.start_offset) {
return false;
}
return true;
}
static int sam_flash_read(const struct device *dev, off_t offset, void *data, size_t len)
{
struct sam_flash_data *sam_data = dev->data;
const struct sam_flash_config *sam_config = dev->config;
k_spinlock_key_t key;
if (len == 0) {
return 0;
}
if (!sam_flash_validate_offset_len(offset, len)) {
return -EINVAL;
}
if (!sam_flash_section_is_within_area(dev, offset, len)) {
return -EINVAL;
}
key = k_spin_lock(&sam_data->lock);
memcpy(data, (uint8_t *)(sam_config->area_address + offset), len);
k_spin_unlock(&sam_data->lock, key);
return 0;
}
static int sam_flash_write_latch_buffer_to_page(const struct device *dev, off_t offset)
{
const struct sam_flash_config *sam_config = dev->config;
Efc *regs = sam_config->regs;
uint32_t page = offset / SAM_FLASH_WRITE_PAGE_SIZE;
regs->EEFC_FCR = EEFC_FCR_FCMD_WP | EEFC_FCR_FARG(page) | EEFC_FCR_FKEY_PASSWD;
sam_flash_section_wait_until_ready(dev);
return 0;
}
static int sam_flash_write_latch_buffer_to_previous_page(const struct device *dev, off_t offset)
{
return sam_flash_write_latch_buffer_to_page(dev, offset - SAM_FLASH_WRITE_PAGE_SIZE);
}
static void sam_flash_write_dword_to_latch_buffer(off_t offset, uint32_t dword)
{
*((uint32_t *)offset) = dword;
barrier_dsync_fence_full();
}
static int sam_flash_write_dwords_to_flash(const struct device *dev, off_t offset,
const uint32_t *dwords, size_t size)
{
for (size_t i = 0; i < size; i++) {
sam_flash_write_dword_to_latch_buffer(offset, dwords[i]);
offset += sizeof(uint32_t);
if (sam_flash_offset_is_on_write_page_boundary(offset)) {
sam_flash_write_latch_buffer_to_previous_page(dev, offset);
}
}
if (!sam_flash_offset_is_on_write_page_boundary(offset)) {
sam_flash_write_latch_buffer_to_page(dev, offset);
}
return 0;
}
static int sam_flash_write(const struct device *dev, off_t offset, const void *data, size_t len)
{
struct sam_flash_data *sam_data = dev->data;
k_spinlock_key_t key;
if (len == 0) {
return 0;
}
if (!sam_flash_validate_offset_len(offset, len)) {
return -EINVAL;
}
if (!sam_flash_section_is_aligned_with_write_block_size(dev, offset, len)) {
return -EINVAL;
}
LOG_DBG("Writing sector from 0x%x to 0x%x", (size_t)offset, (size_t)(offset + len));
key = k_spin_lock(&sam_data->lock);
if (sam_flash_write_dwords_to_flash(dev, offset, data, len / sizeof(uint32_t)) < 0) {
k_spin_unlock(&sam_data->lock, key);
return -EAGAIN;
}
k_spin_unlock(&sam_data->lock, key);
return 0;
}
static int sam_flash_unlock_write_page(const struct device *dev, uint16_t page_index)
{
const struct sam_flash_config *sam_config = dev->config;
Efc *regs = sam_config->regs;
/* Perform unlock command of write page */
regs->EEFC_FCR = EEFC_FCR_FCMD_CLB
| EEFC_FCR_FARG(page_index)
| EEFC_FCR_FKEY_PASSWD;
return sam_flash_section_wait_until_ready(dev);
}
static int sam_flash_unlock_page(const struct device *dev, const struct flash_pages_info *info)
{
uint16_t page_index_start;
uint16_t page_index_end;
int ret;
/* Convert from page offset and size to write page index and count */
page_index_start = info->start_offset / SAM_FLASH_WRITE_PAGE_SIZE;
page_index_end = page_index_start + (info->size / SAM_FLASH_WRITE_PAGE_SIZE);
for (uint16_t i = page_index_start; i < page_index_end; i++) {
ret = sam_flash_unlock_write_page(dev, i);
if (ret < 0) {
return ret;
}
}
return 0;
}
static int sam_flash_erase_page(const struct device *dev, const struct flash_pages_info *info)
{
const struct sam_flash_config *sam_config = dev->config;
Efc *regs = sam_config->regs;
uint32_t page_index;
int ret;
/* Convert from page offset to write page index */
page_index = info->start_offset / SAM_FLASH_WRITE_PAGE_SIZE;
LOG_DBG("Erasing page at 0x%x of size 0x%x", (size_t)info->start_offset, info->size);
/* Perform erase command of page */
switch (info->size) {
case 0x800:
regs->EEFC_FCR = EEFC_FCR_FCMD_EPA
| EEFC_FCR_FARG(page_index)
| EEFC_FCR_FKEY_PASSWD;
break;
case 0x1000:
regs->EEFC_FCR = EEFC_FCR_FCMD_EPA
| EEFC_FCR_FARG(page_index | 1)
| EEFC_FCR_FKEY_PASSWD;
break;
case 0x2000:
regs->EEFC_FCR = EEFC_FCR_FCMD_EPA
| EEFC_FCR_FARG(page_index | 2)
| EEFC_FCR_FKEY_PASSWD;
break;
case 0x4000:
regs->EEFC_FCR = EEFC_FCR_FCMD_EPA
| EEFC_FCR_FARG(page_index | 3)
| EEFC_FCR_FKEY_PASSWD;
break;
default:
return -EINVAL;
}
ret = sam_flash_section_wait_until_ready(dev);
if (ret == 0) {
return ret;
}
LOG_ERR("Failed to erase page at 0x%x of size 0x%x", (size_t)info->start_offset,
info->size);
return ret;
}
static bool sam_flash_erase_foreach_page(const struct flash_pages_info *info, void *data)
{
struct sam_flash_data *sam_data = data;
const struct device *dev = sam_data->dev;
struct sam_flash_erase_data *erase_data = &sam_data->erase_data;
/* Validate we reached first page to erase */
if (info->start_offset < erase_data->section_start) {
/* Next page */
return true;
}
/* Check if we've reached the end of pages to erase */
if (info->start_offset >= erase_data->section_end) {
/* Succeeded, stop iterating */
erase_data->succeeded = true;
return false;
}
if (sam_flash_unlock_page(dev, info) < 0) {
/* Failed to unlock page, stop iterating */
return false;
}
if (sam_flash_erase_page(dev, info) < 0) {
/* Failed to erase page, stop iterating */
return false;
}
/* Next page */
return true;
}
static int sam_flash_erase(const struct device *dev, off_t offset, size_t size)
{
struct sam_flash_data *sam_data = dev->data;
k_spinlock_key_t key;
if (size == 0) {
return 0;
}
if (!sam_flash_validate_offset_len(offset, size)) {
return -EINVAL;
}
if (!sam_flash_section_is_aligned_with_pages(dev, offset, size)) {
return -EINVAL;
}
LOG_DBG("Erasing sector from 0x%x to 0x%x", (size_t)offset, (size_t)(offset + size));
key = k_spin_lock(&sam_data->lock);
sam_data->erase_data.section_start = offset;
sam_data->erase_data.section_end = offset + size;
sam_data->erase_data.succeeded = false;
flash_page_foreach(dev, sam_flash_erase_foreach_page, sam_data);
if (!sam_data->erase_data.succeeded) {
k_spin_unlock(&sam_data->lock, key);
return -EFAULT;
}
k_spin_unlock(&sam_data->lock, key);
return 0;
}
static const struct flash_parameters *sam_flash_get_parameters(const struct device *dev)
{
const struct sam_flash_config *config = dev->config;
return &config->parameters;
}
static void sam_flash_api_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
const struct sam_flash_config *config = dev->config;
*layout = config->pages_layouts;
*layout_size = config->pages_layouts_size;
}
static struct flash_driver_api sam_flash_api = {
.read = sam_flash_read,
.write = sam_flash_write,
.erase = sam_flash_erase,
.get_parameters = sam_flash_get_parameters,
.page_layout = sam_flash_api_pages_layout,
};
static int sam_flash_init(const struct device *dev)
{
struct sam_flash_data *sam_data = dev->data;
const struct sam_flash_config *sam_config = dev->config;
sam_data->dev = dev;
k_sem_init(&sam_data->ready_sem, 0, 1);
sam_flash_mask_ready_interrupt(sam_config);
sam_config->irq_init();
return 0;
}
#define SAM_FLASH_DEVICE DT_INST(0, atmel_sam_flash)
#define SAM_FLASH_PAGES_LAYOUT(node_id, prop, idx) \
{ \
.pages_count = DT_PHA_BY_IDX(node_id, prop, idx, pages_count), \
.pages_size = DT_PHA_BY_IDX(node_id, prop, idx, pages_size), \
}
#define SAM_FLASH_PAGES_LAYOUTS \
DT_FOREACH_PROP_ELEM_SEP(SAM_FLASH_DEVICE, erase_blocks, SAM_FLASH_PAGES_LAYOUT, (,))
#define SAM_FLASH_CONTROLLER(inst) \
struct flash_pages_layout sam_flash_pages_layouts##inst[] = { \
SAM_FLASH_PAGES_LAYOUTS \
}; \
\
static void sam_flash_irq_init_##inst(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), \
sam_flash_isr, DEVICE_DT_INST_GET(inst), 0); \
irq_enable(DT_INST_IRQN(inst)); \
\
} \
\
static const struct sam_flash_config sam_flash_config##inst = { \
.regs = (Efc *)DT_INST_REG_ADDR(inst), \
.irq_init = sam_flash_irq_init_##inst, \
.area_address = DT_REG_ADDR(SAM_FLASH_DEVICE), \
.area_size = DT_REG_SIZE(SAM_FLASH_DEVICE), \
.parameters = { \
.write_block_size = DT_PROP(SAM_FLASH_DEVICE, write_block_size), \
.erase_value = 0xFF, \
}, \
.pages_layouts = sam_flash_pages_layouts##inst, \
.pages_layouts_size = ARRAY_SIZE(sam_flash_pages_layouts##inst), \
}; \
\
static struct sam_flash_data sam_flash_data##inst; \
\
DEVICE_DT_INST_DEFINE(inst, sam_flash_init, NULL, &sam_flash_data##inst, \
&sam_flash_config##inst, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY, \
&sam_flash_api);
SAM_FLASH_CONTROLLER(0)
``` | /content/code_sandbox/drivers/flash/flash_sam.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,760 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_FLASH_NPCX_FIU_QSPI_H_
#define ZEPHYR_DRIVERS_FLASH_NPCX_FIU_QSPI_H_
#include <zephyr/device.h>
#include "jesd216.h"
#ifdef __cplusplus
extern "C" {
#endif
/* UMA operation flags */
#define NPCX_UMA_ACCESS_WRITE BIT(0)
#define NPCX_UMA_ACCESS_READ BIT(1)
#define NPCX_UMA_ACCESS_ADDR BIT(2)
/* Valid value of Dn_NADDRB that sets the number of address bytes in a transaction */
#define NPCX_DEV_NUM_ADDR_1BYTE 1
#define NPCX_DEV_NUM_ADDR_2BYTE 2
#define NPCX_DEV_NUM_ADDR_3BYTE 3
#define NPCX_DEV_NUM_ADDR_4BYTE 4
/* UMA operation configuration for a SPI device */
struct npcx_uma_cfg {
uint8_t opcode;
uint8_t *tx_buf;
size_t tx_count;
uint8_t *rx_buf;
size_t rx_count;
union {
uint32_t u32;
uint8_t u8[4];
} addr;
};
/* QSPI bus configuration for a SPI device */
struct npcx_qspi_cfg {
/* Type of Quad Enable bit in status register */
enum jesd216_dw15_qer_type qer_type;
/* Pinctrl for QSPI bus */
const struct pinctrl_dev_config *pcfg;
/* Enter four bytes address mode value */
uint8_t enter_4ba;
/* SPI read access type of Direct Read Access mode */
uint8_t rd_mode;
/* Configurations for the Quad-SPI peripherals */
int flags;
};
/**
* @brief Execute UMA transactions on qspi bus
*
* @param dev Pointer to the device structure for qspi bus controller instance.
* @param cfg Pointer to the configuration of UMA transactions.
* @param flags Flags to be used during transactions.
* @retval 0 on success, -EPERM if an UMA transaction is not permitted.
*/
int qspi_npcx_fiu_uma_transceive(const struct device *dev, struct npcx_uma_cfg *cfg,
uint32_t flags);
/**
* @brief Lock the mutex of npcx qspi bus controller and apply its configuration
*
* @param dev Pointer to the device structure for qspi bus controller instance.
* @param cfg Pointer to the configuration for the device on qspi bus.
* @param operation Qspi bus operation for the device.
*/
void qspi_npcx_fiu_mutex_lock_configure(const struct device *dev,
const struct npcx_qspi_cfg *cfg,
const uint32_t operation);
/**
* @brief Unlock the mutex of npcx qspi bus controller.
*
* @param dev Pointer to the device structure for qspi bus controller instance.
*/
void qspi_npcx_fiu_mutex_unlock(const struct device *dev);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_FLASH_NPCX_FIU_QSPI_H_ */
``` | /content/code_sandbox/drivers/flash/flash_npcx_fiu_qspi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 635 |
```unknown
# Silicon Labs Gecko flash driver config
config SOC_FLASH_GECKO
bool "Silicon Labs Gecko flash driver"
default y
depends on DT_HAS_SILABS_GECKO_FLASH_CONTROLLER_ENABLED
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select SOC_GECKO_MSC
select FLASH_HAS_EXPLICIT_ERASE
select MPU_ALLOW_FLASH_WRITE if ARM_MPU
help
Enable Silicon Labs Gecko series internal flash driver.
``` | /content/code_sandbox/drivers/flash/Kconfig.gecko | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 93 |
```c
/*
*
*/
#define DT_DRV_COMPAT silabs_gecko_flash_controller
#define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash)
#include <stddef.h>
#include <string.h>
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <em_msc.h>
#include <zephyr/drivers/flash.h>
#include <soc.h>
#define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(flash_gecko);
struct flash_gecko_data {
struct k_sem mutex;
};
static const struct flash_parameters flash_gecko_parameters = {
.write_block_size = DT_PROP(SOC_NV_FLASH_NODE, write_block_size),
.erase_value = 0xff,
};
static bool write_range_is_valid(off_t offset, uint32_t size);
static bool read_range_is_valid(off_t offset, uint32_t size);
static int erase_flash_block(off_t offset, size_t size);
static void flash_gecko_write_protection(bool enable);
static int flash_gecko_read(const struct device *dev, off_t offset,
void *data,
size_t size)
{
if (!read_range_is_valid(offset, size)) {
return -EINVAL;
}
if (!size) {
return 0;
}
memcpy(data, (uint8_t *)CONFIG_FLASH_BASE_ADDRESS + offset, size);
return 0;
}
static int flash_gecko_write(const struct device *dev, off_t offset,
const void *data, size_t size)
{
struct flash_gecko_data *const dev_data = dev->data;
MSC_Status_TypeDef msc_ret;
void *address;
int ret = 0;
if (!write_range_is_valid(offset, size)) {
return -EINVAL;
}
if (!size) {
return 0;
}
k_sem_take(&dev_data->mutex, K_FOREVER);
flash_gecko_write_protection(false);
address = (uint8_t *)CONFIG_FLASH_BASE_ADDRESS + offset;
msc_ret = MSC_WriteWord(address, data, size);
if (msc_ret < 0) {
ret = -EIO;
}
flash_gecko_write_protection(true);
k_sem_give(&dev_data->mutex);
return ret;
}
static int flash_gecko_erase(const struct device *dev, off_t offset,
size_t size)
{
struct flash_gecko_data *const dev_data = dev->data;
int ret;
if (!read_range_is_valid(offset, size)) {
return -EINVAL;
}
if ((offset % FLASH_PAGE_SIZE) != 0) {
LOG_ERR("offset 0x%lx: not on a page boundary", (long)offset);
return -EINVAL;
}
if ((size % FLASH_PAGE_SIZE) != 0) {
LOG_ERR("size %zu: not multiple of a page size", size);
return -EINVAL;
}
if (!size) {
return 0;
}
k_sem_take(&dev_data->mutex, K_FOREVER);
flash_gecko_write_protection(false);
ret = erase_flash_block(offset, size);
flash_gecko_write_protection(true);
k_sem_give(&dev_data->mutex);
return ret;
}
static void flash_gecko_write_protection(bool enable)
{
if (enable) {
/* Lock the MSC module. */
MSC->LOCK = 0;
} else {
/* Unlock the MSC module. */
#if defined(MSC_LOCK_LOCKKEY_UNLOCK)
MSC->LOCK = MSC_LOCK_LOCKKEY_UNLOCK;
#else
MSC->LOCK = MSC_UNLOCK_CODE;
#endif
}
}
/* Note:
* - A flash address to write to must be aligned to words.
* - Number of bytes to write must be divisible by 4.
*/
static bool write_range_is_valid(off_t offset, uint32_t size)
{
return read_range_is_valid(offset, size)
&& (offset % sizeof(uint32_t) == 0)
&& (size % 4 == 0U);
}
static bool read_range_is_valid(off_t offset, uint32_t size)
{
return (offset + size) <= (CONFIG_FLASH_SIZE * 1024);
}
static int erase_flash_block(off_t offset, size_t size)
{
MSC_Status_TypeDef msc_ret;
void *address;
int ret = 0;
for (off_t tmp = offset; tmp < offset + size; tmp += FLASH_PAGE_SIZE) {
address = (uint8_t *)CONFIG_FLASH_BASE_ADDRESS + tmp;
msc_ret = MSC_ErasePage(address);
if (msc_ret < 0) {
ret = -EIO;
break;
}
}
return ret;
}
#if CONFIG_FLASH_PAGE_LAYOUT
static const struct flash_pages_layout flash_gecko_0_pages_layout = {
.pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) /
DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
.pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
};
void flash_gecko_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &flash_gecko_0_pages_layout;
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_parameters *
flash_gecko_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_gecko_parameters;
}
static int flash_gecko_init(const struct device *dev)
{
struct flash_gecko_data *const dev_data = dev->data;
k_sem_init(&dev_data->mutex, 1, 1);
MSC_Init();
/* Lock the MSC module. */
MSC->LOCK = 0;
LOG_INF("Device %s initialized", dev->name);
return 0;
}
static const struct flash_driver_api flash_gecko_driver_api = {
.read = flash_gecko_read,
.write = flash_gecko_write,
.erase = flash_gecko_erase,
.get_parameters = flash_gecko_get_parameters,
#ifdef CONFIG_FLASH_PAGE_LAYOUT
.page_layout = flash_gecko_page_layout,
#endif
};
static struct flash_gecko_data flash_gecko_0_data;
DEVICE_DT_INST_DEFINE(0, flash_gecko_init, NULL,
&flash_gecko_0_data, NULL, POST_KERNEL,
CONFIG_FLASH_INIT_PRIORITY, &flash_gecko_driver_api);
``` | /content/code_sandbox/drivers/flash/flash_gecko.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,338 |
```unknown
config SOC_FLASH_CC13XX_CC26XX
bool "TI SimpleLink CC13xx/CC26xx flash controller driver"
default y
depends on DT_HAS_TI_CC13XX_CC26XX_FLASH_CONTROLLER_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select MPU_ALLOW_FLASH_WRITE if ARM_MPU
help
Enables TI SimpleLink CC13xx/CC26xx flash controller driver.
``` | /content/code_sandbox/drivers/flash/Kconfig.cc13xx_cc26xx | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 95 |
```unknown
# ST Microelectronics STM32 MCUs Flash driver config
config STM32_MEMMAP
bool "NOR Flash in MemoryMapped for XiP"
depends on XIP && \
(DT_HAS_ST_STM32_OSPI_NOR_ENABLED || \
DT_HAS_ST_STM32_QSPI_NOR_ENABLED || \
DT_HAS_ST_STM32_XSPI_NOR_ENABLED)
help
This option enables the XIP mode for the external NOR flash
mounted on STM32 boards.
config SOC_FLASH_STM32
bool "STM32 flash driver"
depends on DT_HAS_ST_STM32_FLASH_CONTROLLER_ENABLED
select FLASH_HAS_DRIVER_ENABLED
default y
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_EXPLICIT_ERASE
select MPU_ALLOW_FLASH_WRITE if ARM_MPU
select USE_STM32_HAL_FLASH if BT_STM32WBA
select USE_STM32_HAL_FLASH_EX if BT_STM32WBA
help
Enable flash driver for STM32 series
if SOC_FLASH_STM32
config FLASH_STM32_WRITE_PROTECT
bool "Extended operation for flash write protection control"
depends on SOC_SERIES_STM32F4X
select FLASH_HAS_EX_OP
default n
help
Enables flash extended operation for enabling/disabling flash write
protection.
config FLASH_STM32_WRITE_PROTECT_DISABLE_PREVENTION
bool "Prevent from disabling flash write protection"
depends on FLASH_STM32_WRITE_PROTECT
default n
help
If enabled, all requests to disable flash write protection will be
blocked.
config FLASH_STM32_READOUT_PROTECTION
bool "Extended operation for flash readout protection control"
depends on SOC_SERIES_STM32F4X || SOC_SERIES_STM32L4X || \
SOC_SERIES_STM32G4X || SOC_SERIES_STM32F7X
select FLASH_HAS_EX_OP
default n
help
Enables flash extended operation for enabling/disabling flash readout
protection.
config FLASH_STM32_READOUT_PROTECTION_DISABLE_ALLOW
bool "Allow disabling readout protection"
depends on FLASH_STM32_READOUT_PROTECTION
default n
help
With this option enabled it will be possible to disable readout
protection. On STM32 devices it will trigger flash mass erase!
config FLASH_STM32_READOUT_PROTECTION_PERMANENT_ALLOW
bool "Allow enabling readout protection permanently"
depends on FLASH_STM32_READOUT_PROTECTION
default n
help
With this option enabled it will be possible to enable readout
protection permanently.
config FLASH_STM32_BLOCK_REGISTERS
bool "Extended operation for blocking option and control registers"
select FLASH_HAS_EX_OP
default n
help
Enables flash extended operations that can be used to disable access
to option and control registers until reset. Disabling access to these
registers improves system security, because flash content (or
protection settings) can't be changed even when exploit was found.
endif # SOC_FLASH_STM32
``` | /content/code_sandbox/drivers/flash/Kconfig.stm32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 636 |
```c
/*
*
*
* Part of flash simulator which interacts with the host OS
*
* When building for the native simulator, this file is built in the
* native simulator runner/host context, and not in Zephyr/embedded context.
*/
#undef _POSIX_C_SOURCE
/* Note: This is used only for interaction with the host C library, and is therefore exempt of
* coding guidelines rule A.4&5 which applies to the embedded code using embedded libraries
*/
#define _POSIX_C_SOURCE 200809L
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <nsi_tracing.h>
/*
* Initialize the flash buffer.
* And, if the content is to be kept on disk map it to the buffer to the file.
*
* Returns -1 on failure
* 0 on success
*/
int flash_mock_init_native(bool flash_in_ram, uint8_t **mock_flash, unsigned int size,
int *flash_fd, const char *flash_file_path,
unsigned int erase_value, bool flash_erase_at_start)
{
struct stat f_stat;
int rc;
if (flash_in_ram == true) {
*mock_flash = (uint8_t *)malloc(size);
if (*mock_flash == NULL) {
nsi_print_warning("Could not allocate flash in the process heap %s\n",
strerror(errno));
return -1;
}
} else {
*flash_fd = open(flash_file_path, O_RDWR | O_CREAT, (mode_t)0600);
if (*flash_fd == -1) {
nsi_print_warning("Failed to open flash device file "
"%s: %s\n",
flash_file_path, strerror(errno));
return -1;
}
rc = fstat(*flash_fd, &f_stat);
if (rc) {
nsi_print_warning("Failed to get status of flash device file "
"%s: %s\n",
flash_file_path, strerror(errno));
return -1;
}
if (ftruncate(*flash_fd, size) == -1) {
nsi_print_warning("Failed to resize flash device file "
"%s: %s\n",
flash_file_path, strerror(errno));
return -1;
}
*mock_flash = mmap(NULL, size,
PROT_WRITE | PROT_READ, MAP_SHARED, *flash_fd, 0);
if (*mock_flash == MAP_FAILED) {
nsi_print_warning("Failed to mmap flash device file "
"%s: %s\n",
flash_file_path, strerror(errno));
return -1;
}
}
if ((flash_erase_at_start == true) || (flash_in_ram == true) || (f_stat.st_size == 0)) {
/* Erase the memory unit by pulling all bits to the configured erase value */
(void)memset(*mock_flash, erase_value, size);
}
return 0;
}
/*
* If in RAM: Free the mock buffer
* If in disk: unmap the flash file from RAM, close the file, and if configured to do so,
* delete the file.
*/
void flash_mock_cleanup_native(bool flash_in_ram, int flash_fd, uint8_t *mock_flash,
unsigned int size, const char *flash_file_path,
bool flash_rm_at_exit)
{
if (flash_in_ram == true) {
if (mock_flash != NULL) {
free(mock_flash);
}
return;
}
if ((mock_flash != MAP_FAILED) && (mock_flash != NULL)) {
munmap(mock_flash, size);
}
if (flash_fd != -1) {
close(flash_fd);
}
if ((flash_rm_at_exit == true) && (flash_file_path != NULL)) {
/* We try to remove the file but do not error out if we can't */
(void) remove(flash_file_path);
}
}
``` | /content/code_sandbox/drivers/flash/flash_simulator_native.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 881 |
```c
/*
*
*/
#define DT_DRV_COMPAT st_stm32_ospi_nor
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/sys/util.h>
#include <soc.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/dt-bindings/flash_controller/ospi.h>
#include <zephyr/drivers/gpio.h>
#include <zephyr/irq.h>
#include "spi_nor.h"
#include "jesd216.h"
#include "flash_stm32_ospi.h"
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(flash_stm32_ospi, CONFIG_FLASH_LOG_LEVEL);
#define STM32_OSPI_NODE DT_INST_PARENT(0)
#define DT_OSPI_IO_PORT_PROP_OR(prop, default_value) \
COND_CODE_1(DT_NODE_HAS_PROP(STM32_OSPI_NODE, prop), \
(_CONCAT(HAL_OSPIM_, DT_STRING_TOKEN(STM32_OSPI_NODE, prop))), \
((default_value)))
#define DT_OSPI_PROP_OR(prop, default_value) \
DT_PROP_OR(STM32_OSPI_NODE, prop, default_value)
/* Get the base address of the flash from the DTS node */
#define STM32_OSPI_BASE_ADDRESS DT_INST_REG_ADDR(0)
#define STM32_OSPI_RESET_GPIO DT_INST_NODE_HAS_PROP(0, reset_gpios)
#define STM32_OSPI_DLYB_BYPASSED DT_PROP(STM32_OSPI_NODE, dlyb_bypass)
#define STM32_OSPI_USE_DMA DT_NODE_HAS_PROP(STM32_OSPI_NODE, dmas)
#if STM32_OSPI_USE_DMA
#include <zephyr/drivers/dma/dma_stm32.h>
#include <zephyr/drivers/dma.h>
#include <stm32_ll_dma.h>
#endif /* STM32_OSPI_USE_DMA */
#define STM32_OSPI_FIFO_THRESHOLD 4
#if defined(CONFIG_SOC_SERIES_STM32H5X)
/* Valid range is [0, 255] */
#define STM32_OSPI_CLOCK_PRESCALER_MIN 0U
#define STM32_OSPI_CLOCK_PRESCALER_MAX 255U
#define STM32_OSPI_CLOCK_COMPUTE(bus_freq, prescaler) ((bus_freq) / ((prescaler) + 1U))
#else
/* Valid range is [1, 256] */
#define STM32_OSPI_CLOCK_PRESCALER_MIN 1U
#define STM32_OSPI_CLOCK_PRESCALER_MAX 256U
#define STM32_OSPI_CLOCK_COMPUTE(bus_freq, prescaler) ((bus_freq) / (prescaler))
#endif
/* Max Time value during reset or erase operation */
#define STM32_OSPI_RESET_MAX_TIME 100U
#define STM32_OSPI_BULK_ERASE_MAX_TIME 460000U
#define STM32_OSPI_SECTOR_ERASE_MAX_TIME 1000U
#define STM32_OSPI_SUBSECTOR_4K_ERASE_MAX_TIME 400U
#define STM32_OSPI_WRITE_REG_MAX_TIME 40U
/* used as default value for DTS writeoc */
#define SPI_NOR_WRITEOC_NONE 0xFF
#if STM32_OSPI_USE_DMA
#if CONFIG_DMA_STM32U5
static const uint32_t table_src_size[] = {
LL_DMA_SRC_DATAWIDTH_BYTE,
LL_DMA_SRC_DATAWIDTH_HALFWORD,
LL_DMA_SRC_DATAWIDTH_WORD,
};
static const uint32_t table_dest_size[] = {
LL_DMA_DEST_DATAWIDTH_BYTE,
LL_DMA_DEST_DATAWIDTH_HALFWORD,
LL_DMA_DEST_DATAWIDTH_WORD,
};
/* Lookup table to set dma priority from the DTS */
static const uint32_t table_priority[] = {
LL_DMA_LOW_PRIORITY_LOW_WEIGHT,
LL_DMA_LOW_PRIORITY_MID_WEIGHT,
LL_DMA_LOW_PRIORITY_HIGH_WEIGHT,
LL_DMA_HIGH_PRIORITY,
};
#else
static const uint32_t table_m_size[] = {
LL_DMA_MDATAALIGN_BYTE,
LL_DMA_MDATAALIGN_HALFWORD,
LL_DMA_MDATAALIGN_WORD,
};
static const uint32_t table_p_size[] = {
LL_DMA_PDATAALIGN_BYTE,
LL_DMA_PDATAALIGN_HALFWORD,
LL_DMA_PDATAALIGN_WORD,
};
/* Lookup table to set dma priority from the DTS */
static const uint32_t table_priority[] = {
DMA_PRIORITY_LOW,
DMA_PRIORITY_MEDIUM,
DMA_PRIORITY_HIGH,
DMA_PRIORITY_VERY_HIGH,
};
#endif /* CONFIG_DMA_STM32U5 */
struct stream {
DMA_TypeDef *reg;
const struct device *dev;
uint32_t channel;
struct dma_config cfg;
};
#endif /* STM32_OSPI_USE_DMA */
typedef void (*irq_config_func_t)(const struct device *dev);
struct flash_stm32_ospi_config {
OCTOSPI_TypeDef *regs;
const struct stm32_pclken pclken; /* clock subsystem */
#if DT_CLOCKS_HAS_NAME(STM32_OSPI_NODE, ospi_ker)
const struct stm32_pclken pclken_ker; /* clock subsystem */
#endif
#if DT_CLOCKS_HAS_NAME(STM32_OSPI_NODE, ospi_mgr)
const struct stm32_pclken pclken_mgr; /* clock subsystem */
#endif
irq_config_func_t irq_config;
size_t flash_size;
uint32_t max_frequency;
int data_mode; /* SPI or QSPI or OSPI */
int data_rate; /* DTR or STR */
const struct pinctrl_dev_config *pcfg;
#if STM32_OSPI_RESET_GPIO
const struct gpio_dt_spec reset;
#endif /* STM32_OSPI_RESET_GPIO */
#if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), sfdp_bfp)
uint8_t sfdp_bfp[DT_INST_PROP_LEN(0, sfdp_bfp)];
#endif /* sfdp_bfp */
};
struct flash_stm32_ospi_data {
OSPI_HandleTypeDef hospi;
struct k_sem sem;
struct k_sem sync;
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
struct flash_pages_layout layout;
#endif
struct jesd216_erase_type erase_types[JESD216_NUM_ERASE_TYPES];
/* Number of bytes per page */
uint16_t page_size;
/* Address width in bytes */
uint8_t address_width;
/* Read operation dummy cycles */
uint8_t read_dummy;
uint32_t read_opcode;
uint32_t write_opcode;
enum jesd216_mode_type read_mode;
enum jesd216_dw15_qer_type qer_type;
#if defined(CONFIG_FLASH_JESD216_API)
/* Table to hold the jedec Read ID given by the octoFlash or the DTS */
uint8_t jedec_id[JESD216_READ_ID_LEN];
#endif /* CONFIG_FLASH_JESD216_API */
int cmd_status;
#if STM32_OSPI_USE_DMA
struct stream dma;
#endif /* STM32_OSPI_USE_DMA */
};
static inline void ospi_lock_thread(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
k_sem_take(&dev_data->sem, K_FOREVER);
}
static inline void ospi_unlock_thread(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
k_sem_give(&dev_data->sem);
}
static int ospi_send_cmd(const struct device *dev, OSPI_RegularCmdTypeDef *cmd)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
HAL_StatusTypeDef hal_ret;
LOG_DBG("Instruction 0x%x", cmd->Instruction);
dev_data->cmd_status = 0;
hal_ret = HAL_OSPI_Command(&dev_data->hospi, cmd, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to send OSPI instruction", hal_ret);
return -EIO;
}
LOG_DBG("CCR 0x%x", dev_cfg->regs->CCR);
return dev_data->cmd_status;
}
static int ospi_read_access(const struct device *dev, OSPI_RegularCmdTypeDef *cmd,
uint8_t *data, size_t size)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
HAL_StatusTypeDef hal_ret;
LOG_DBG("Instruction 0x%x", cmd->Instruction);
cmd->NbData = size;
dev_data->cmd_status = 0;
hal_ret = HAL_OSPI_Command(&dev_data->hospi, cmd, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to send OSPI instruction", hal_ret);
return -EIO;
}
#if STM32_OSPI_USE_DMA
hal_ret = HAL_OSPI_Receive_DMA(&dev_data->hospi, data);
#else
hal_ret = HAL_OSPI_Receive_IT(&dev_data->hospi, data);
#endif
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to read data", hal_ret);
return -EIO;
}
k_sem_take(&dev_data->sync, K_FOREVER);
return dev_data->cmd_status;
}
static int ospi_write_access(const struct device *dev, OSPI_RegularCmdTypeDef *cmd,
const uint8_t *data, size_t size)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
HAL_StatusTypeDef hal_ret;
LOG_DBG("Instruction 0x%x", cmd->Instruction);
cmd->NbData = size;
dev_data->cmd_status = 0;
/* in OPI/STR the 3-byte AddressSize is not supported by the NOR flash */
if ((dev_cfg->data_mode == OSPI_OPI_MODE) &&
(cmd->AddressSize != HAL_OSPI_ADDRESS_32_BITS)) {
LOG_ERR("OSPI wr in OPI/STR mode is for 32bit address only");
return -EIO;
}
hal_ret = HAL_OSPI_Command(&dev_data->hospi, cmd, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to send OSPI instruction", hal_ret);
return -EIO;
}
#if STM32_OSPI_USE_DMA
hal_ret = HAL_OSPI_Transmit_DMA(&dev_data->hospi, (uint8_t *)data);
#else
hal_ret = HAL_OSPI_Transmit_IT(&dev_data->hospi, (uint8_t *)data);
#endif
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to write data", hal_ret);
return -EIO;
}
k_sem_take(&dev_data->sync, K_FOREVER);
return dev_data->cmd_status;
}
/*
* Gives a OSPI_RegularCmdTypeDef with all parameters set
* except Instruction, Address, DummyCycles, NbData
*/
static OSPI_RegularCmdTypeDef ospi_prepare_cmd(uint8_t transfer_mode, uint8_t transfer_rate)
{
OSPI_RegularCmdTypeDef cmd_tmp = {
.OperationType = HAL_OSPI_OPTYPE_COMMON_CFG,
.FlashId = HAL_OSPI_FLASH_ID_1,
.InstructionSize = ((transfer_mode == OSPI_OPI_MODE)
? HAL_OSPI_INSTRUCTION_16_BITS
: HAL_OSPI_INSTRUCTION_8_BITS),
.InstructionDtrMode = ((transfer_rate == OSPI_DTR_TRANSFER)
? HAL_OSPI_INSTRUCTION_DTR_ENABLE
: HAL_OSPI_INSTRUCTION_DTR_DISABLE),
.AddressDtrMode = ((transfer_rate == OSPI_DTR_TRANSFER)
? HAL_OSPI_ADDRESS_DTR_ENABLE
: HAL_OSPI_ADDRESS_DTR_DISABLE),
/* AddressSize must be set to 32bits for init and mem config phase */
.AddressSize = HAL_OSPI_ADDRESS_32_BITS,
.AlternateBytesMode = HAL_OSPI_ALTERNATE_BYTES_NONE,
.DataDtrMode = ((transfer_rate == OSPI_DTR_TRANSFER)
? HAL_OSPI_DATA_DTR_ENABLE
: HAL_OSPI_DATA_DTR_DISABLE),
.DQSMode = (transfer_rate == OSPI_DTR_TRANSFER)
? HAL_OSPI_DQS_ENABLE
: HAL_OSPI_DQS_DISABLE,
.SIOOMode = HAL_OSPI_SIOO_INST_EVERY_CMD,
};
switch (transfer_mode) {
case OSPI_OPI_MODE: {
cmd_tmp.InstructionMode = HAL_OSPI_INSTRUCTION_8_LINES;
cmd_tmp.AddressMode = HAL_OSPI_ADDRESS_8_LINES;
cmd_tmp.DataMode = HAL_OSPI_DATA_8_LINES;
break;
}
case OSPI_QUAD_MODE: {
cmd_tmp.InstructionMode = HAL_OSPI_INSTRUCTION_4_LINES;
cmd_tmp.AddressMode = HAL_OSPI_ADDRESS_4_LINES;
cmd_tmp.DataMode = HAL_OSPI_DATA_4_LINES;
break;
}
case OSPI_DUAL_MODE: {
cmd_tmp.InstructionMode = HAL_OSPI_INSTRUCTION_2_LINES;
cmd_tmp.AddressMode = HAL_OSPI_ADDRESS_2_LINES;
cmd_tmp.DataMode = HAL_OSPI_DATA_2_LINES;
break;
}
default: {
cmd_tmp.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd_tmp.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
cmd_tmp.DataMode = HAL_OSPI_DATA_1_LINE;
break;
}
}
return cmd_tmp;
}
static uint32_t stm32_ospi_hal_address_size(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
if (dev_data->address_width == 4U) {
return HAL_OSPI_ADDRESS_32_BITS;
}
return HAL_OSPI_ADDRESS_24_BITS;
}
#if defined(CONFIG_FLASH_JESD216_API)
/*
* Read the JEDEC ID data from the octoFlash at init or DTS
* and store in the jedec_id Table of the flash_stm32_ospi_data
*/
static int stm32_ospi_read_jedec_id(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
#if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), jedec_id)
/* If DTS has the jedec_id property, check its length */
if (DT_INST_PROP_LEN(0, jedec_id) != JESD216_READ_ID_LEN) {
LOG_ERR("Read ID length is wrong (%d)", DT_INST_PROP_LEN(0, jedec_id));
return -EIO;
}
/* The dev_data->jedec_id if filled from the DTS property */
#else
/* This is a SPI/STR command to issue to the octoFlash device */
OSPI_RegularCmdTypeDef cmd = ospi_prepare_cmd(OSPI_SPI_MODE, OSPI_STR_TRANSFER);
cmd.Instruction = JESD216_CMD_READ_ID;
cmd.AddressSize = stm32_ospi_hal_address_size(dev);
cmd.AddressMode = HAL_OSPI_ADDRESS_NONE;
cmd.NbData = JESD216_READ_ID_LEN; /* 3 bytes in the READ ID */
HAL_StatusTypeDef hal_ret;
hal_ret = HAL_OSPI_Command(&dev_data->hospi, &cmd,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to send OSPI instruction", hal_ret);
return -EIO;
}
/* Place the received data directly into the jedec Table */
hal_ret = HAL_OSPI_Receive(&dev_data->hospi, dev_data->jedec_id,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to read data", hal_ret);
return -EIO;
}
#endif /* jedec_id */
LOG_DBG("Jedec ID = [%02x %02x %02x]",
dev_data->jedec_id[0], dev_data->jedec_id[1], dev_data->jedec_id[2]);
dev_data->cmd_status = 0;
return 0;
}
/*
* Read Serial Flash ID :
* just gives the values received by the octoFlash or from the DTS
*/
static int ospi_read_jedec_id(const struct device *dev, uint8_t *id)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
/* Take jedec Id values from the table (issued from the octoFlash) */
memcpy(id, dev_data->jedec_id, JESD216_READ_ID_LEN);
LOG_INF("Manuf ID = %02x Memory Type = %02x Memory Density = %02x",
id[0], id[1], id[2]);
return 0;
}
#endif /* CONFIG_FLASH_JESD216_API */
#if !DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), sfdp_bfp)
/*
* Read Serial Flash Discovery Parameter from the octoFlash at init :
* perform a read access over SPI bus for SDFP (DataMode is already set)
*/
static int stm32_ospi_read_sfdp(const struct device *dev, off_t addr,
void *data,
size_t size)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
OSPI_RegularCmdTypeDef cmd = ospi_prepare_cmd(dev_cfg->data_mode,
dev_cfg->data_rate);
if (dev_cfg->data_mode == OSPI_OPI_MODE) {
cmd.Instruction = JESD216_OCMD_READ_SFDP;
cmd.DummyCycles = 20U;
cmd.AddressSize = HAL_OSPI_ADDRESS_32_BITS;
} else {
cmd.Instruction = JESD216_CMD_READ_SFDP;
cmd.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd.DataMode = HAL_OSPI_DATA_1_LINE;
cmd.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
cmd.DummyCycles = 8U;
cmd.AddressSize = HAL_OSPI_ADDRESS_24_BITS;
}
cmd.Address = addr;
cmd.NbData = size;
HAL_StatusTypeDef hal_ret;
hal_ret = HAL_OSPI_Command(&dev_data->hospi, &cmd, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to send OSPI instruction", hal_ret);
return -EIO;
}
hal_ret = HAL_OSPI_Receive(&dev_data->hospi, (uint8_t *)data,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: Failed to read data", hal_ret);
return -EIO;
}
dev_data->cmd_status = 0;
return 0;
}
#endif /* ! sfdp_bfp */
/*
* Read Serial Flash Discovery Parameter :
* perform a read access over SPI bus for SDFP (DataMode is already set)
* or get it from the sdfp table (in the DTS)
*/
static int ospi_read_sfdp(const struct device *dev, off_t addr, void *data,
size_t size)
{
#if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), sfdp_bfp)
/* There is a sfdp-bfp property in the deviceTree : do not read the flash */
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
LOG_INF("Read SFDP from DTS property");
/* If DTS has the sdfp table property, check its length */
if (size > DT_INST_PROP_LEN(0, sfdp_bfp)) {
LOG_ERR("SDFP bdfp length is wrong (%d)", DT_INST_PROP_LEN(0, sfdp_bfp));
return -EIO;
}
/* The dev_cfg->sfdp_bfp if filled from the DTS property */
memcpy(data, dev_cfg->sfdp_bfp + addr, size);
return 0;
#else
LOG_INF("Read SFDP from octoFlash");
/* Get the SFDP from the octoFlash (no sfdp-bfp table in the DeviceTree) */
if (stm32_ospi_read_sfdp(dev, addr, data, size) == 0) {
/* If valid, then ignore any table from the DTS */
return 0;
}
LOG_INF("Error reading SFDP from octoFlash and none in the DTS");
return -EINVAL;
#endif /* sfdp_bfp */
}
static bool ospi_address_is_valid(const struct device *dev, off_t addr,
size_t size)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
size_t flash_size = dev_cfg->flash_size;
return (addr >= 0) && ((uint64_t)addr + (uint64_t)size <= flash_size);
}
static int stm32_ospi_wait_auto_polling(struct flash_stm32_ospi_data *dev_data,
OSPI_AutoPollingTypeDef *s_config, uint32_t timeout_ms)
{
dev_data->cmd_status = 0;
if (HAL_OSPI_AutoPolling_IT(&dev_data->hospi, s_config) != HAL_OK) {
LOG_ERR("OSPI AutoPoll failed");
return -EIO;
}
if (k_sem_take(&dev_data->sync, K_MSEC(timeout_ms)) != 0) {
LOG_ERR("OSPI AutoPoll wait failed");
HAL_OSPI_Abort(&dev_data->hospi);
k_sem_reset(&dev_data->sync);
return -EIO;
}
/* HAL_OSPI_AutoPolling_IT enables transfer error interrupt which sets
* cmd_status.
*/
return dev_data->cmd_status;
}
/*
* This function Polls the WEL (write enable latch) bit to become to 0
* When the Chip Erase Cycle is completed, the Write Enable Latch (WEL) bit is cleared.
* in nor_mode SPI/OPI OSPI_SPI_MODE or OSPI_OPI_MODE
* and nor_rate transfer STR/DTR OSPI_STR_TRANSFER or OSPI_DTR_TRANSFER
*/
static int stm32_ospi_mem_erased(const struct device *dev)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
uint8_t nor_mode = dev_cfg->data_mode;
uint8_t nor_rate = dev_cfg->data_rate;
OSPI_HandleTypeDef *hospi = &dev_data->hospi;
OSPI_AutoPollingTypeDef s_config = {0};
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(nor_mode, nor_rate);
/* Configure automatic polling mode command to wait for memory ready */
if (nor_mode == OSPI_OPI_MODE) {
s_command.Instruction = SPI_NOR_OCMD_RDSR;
s_command.DummyCycles = (nor_rate == OSPI_DTR_TRANSFER)
? SPI_NOR_DUMMY_REG_OCTAL_DTR
: SPI_NOR_DUMMY_REG_OCTAL;
} else {
s_command.Instruction = SPI_NOR_CMD_RDSR;
/* force 1-line InstructionMode for any non-OSPI transfer */
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_NONE;
/* force 1-line DataMode for any non-OSPI transfer */
s_command.DataMode = HAL_OSPI_DATA_1_LINE;
s_command.DummyCycles = 0;
}
s_command.NbData = ((nor_rate == OSPI_DTR_TRANSFER) ? 2U : 1U);
s_command.Address = 0U;
/* Set the mask to 0x02 to mask all Status REG bits except WEL */
/* Set the match to 0x00 to check if the WEL bit is Reset */
s_config.Match = SPI_NOR_WEL_MATCH;
s_config.Mask = SPI_NOR_WEL_MASK; /* Write Enable Latch */
s_config.MatchMode = HAL_OSPI_MATCH_MODE_AND;
s_config.Interval = SPI_NOR_AUTO_POLLING_INTERVAL;
s_config.AutomaticStop = HAL_OSPI_AUTOMATIC_STOP_ENABLE;
if (HAL_OSPI_Command(hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI AutoPoll command (WEL) failed");
return -EIO;
}
/* Start Automatic-Polling mode to wait until the memory is totally erased */
return stm32_ospi_wait_auto_polling(dev_data,
&s_config, STM32_OSPI_BULK_ERASE_MAX_TIME);
}
/*
* This function Polls the WIP(Write In Progress) bit to become to 0
* in nor_mode SPI/OPI OSPI_SPI_MODE or OSPI_OPI_MODE
* and nor_rate transfer STR/DTR OSPI_STR_TRANSFER or OSPI_DTR_TRANSFER
*/
static int stm32_ospi_mem_ready(struct flash_stm32_ospi_data *dev_data, uint8_t nor_mode,
uint8_t nor_rate)
{
OSPI_HandleTypeDef *hospi = &dev_data->hospi;
OSPI_AutoPollingTypeDef s_config = {0};
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(nor_mode, nor_rate);
/* Configure automatic polling mode command to wait for memory ready */
if (nor_mode == OSPI_OPI_MODE) {
s_command.Instruction = SPI_NOR_OCMD_RDSR;
s_command.DummyCycles = (nor_rate == OSPI_DTR_TRANSFER)
? SPI_NOR_DUMMY_REG_OCTAL_DTR
: SPI_NOR_DUMMY_REG_OCTAL;
} else {
s_command.Instruction = SPI_NOR_CMD_RDSR;
/* force 1-line InstructionMode for any non-OSPI transfer */
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_NONE;
/* force 1-line DataMode for any non-OSPI transfer */
s_command.DataMode = HAL_OSPI_DATA_1_LINE;
s_command.DummyCycles = 0;
}
s_command.NbData = ((nor_rate == OSPI_DTR_TRANSFER) ? 2U : 1U);
s_command.Address = 0U;
/* Set the mask to 0x01 to mask all Status REG bits except WIP */
/* Set the match to 0x00 to check if the WIP bit is Reset */
s_config.Match = SPI_NOR_MEM_RDY_MATCH;
s_config.Mask = SPI_NOR_MEM_RDY_MASK; /* Write in progress */
s_config.MatchMode = HAL_OSPI_MATCH_MODE_AND;
s_config.Interval = SPI_NOR_AUTO_POLLING_INTERVAL;
s_config.AutomaticStop = HAL_OSPI_AUTOMATIC_STOP_ENABLE;
if (HAL_OSPI_Command(hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI AutoPoll command failed");
return -EIO;
}
/* Start Automatic-Polling mode to wait until the memory is ready WIP=0 */
return stm32_ospi_wait_auto_polling(dev_data, &s_config, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
}
/* Enables writing to the memory sending a Write Enable and wait it is effective */
static int stm32_ospi_write_enable(struct flash_stm32_ospi_data *dev_data,
uint8_t nor_mode, uint8_t nor_rate)
{
OSPI_HandleTypeDef *hospi = &dev_data->hospi;
OSPI_AutoPollingTypeDef s_config = {0};
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(nor_mode, nor_rate);
/* Initialize the write enable command */
if (nor_mode == OSPI_OPI_MODE) {
s_command.Instruction = SPI_NOR_OCMD_WREN;
} else {
s_command.Instruction = SPI_NOR_CMD_WREN;
/* force 1-line InstructionMode for any non-OSPI transfer */
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
}
s_command.AddressMode = HAL_OSPI_ADDRESS_NONE;
s_command.DataMode = HAL_OSPI_DATA_NONE;
s_command.DummyCycles = 0U;
if (HAL_OSPI_Command(hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI flash write enable cmd failed");
return -EIO;
}
/* New command to Configure automatic polling mode to wait for write enabling */
if (nor_mode == OSPI_OPI_MODE) {
s_command.Instruction = SPI_NOR_OCMD_RDSR;
s_command.AddressMode = HAL_OSPI_ADDRESS_8_LINES;
s_command.DataMode = HAL_OSPI_DATA_8_LINES;
s_command.DummyCycles = (nor_rate == OSPI_DTR_TRANSFER)
? SPI_NOR_DUMMY_REG_OCTAL_DTR
: SPI_NOR_DUMMY_REG_OCTAL;
} else {
s_command.Instruction = SPI_NOR_CMD_RDSR;
/* force 1-line DataMode for any non-OSPI transfer */
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
s_command.DataMode = HAL_OSPI_DATA_1_LINE;
s_command.DummyCycles = 0;
/* DummyCycles remains 0 */
}
s_command.NbData = (nor_rate == OSPI_DTR_TRANSFER) ? 2U : 1U;
s_command.Address = 0U;
if (HAL_OSPI_Command(hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI config auto polling cmd failed");
return -EIO;
}
s_config.Match = SPI_NOR_WREN_MATCH;
s_config.Mask = SPI_NOR_WREN_MASK;
s_config.MatchMode = HAL_OSPI_MATCH_MODE_AND;
s_config.Interval = SPI_NOR_AUTO_POLLING_INTERVAL;
s_config.AutomaticStop = HAL_OSPI_AUTOMATIC_STOP_ENABLE;
return stm32_ospi_wait_auto_polling(dev_data, &s_config, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
}
/* Write Flash configuration register 2 with new dummy cycles */
static int stm32_ospi_write_cfg2reg_dummy(OSPI_HandleTypeDef *hospi,
uint8_t nor_mode, uint8_t nor_rate)
{
uint8_t transmit_data = SPI_NOR_CR2_DUMMY_CYCLES_66MHZ;
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(nor_mode, nor_rate);
/* Initialize the writing of configuration register 2 */
s_command.Instruction = (nor_mode == OSPI_SPI_MODE)
? SPI_NOR_CMD_WR_CFGREG2
: SPI_NOR_OCMD_WR_CFGREG2;
s_command.Address = SPI_NOR_REG2_ADDR3;
s_command.DummyCycles = 0U;
s_command.NbData = (nor_mode == OSPI_SPI_MODE) ? 1U
: ((nor_rate == OSPI_DTR_TRANSFER) ? 2U : 1U);
if (HAL_OSPI_Command(hospi, &s_command,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI transmit ");
return -EIO;
}
if (HAL_OSPI_Transmit(hospi, &transmit_data,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI transmit ");
return -EIO;
}
return 0;
}
/* Write Flash configuration register 2 with new single or octal SPI protocol */
static int stm32_ospi_write_cfg2reg_io(OSPI_HandleTypeDef *hospi,
uint8_t nor_mode, uint8_t nor_rate, uint8_t op_enable)
{
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(nor_mode, nor_rate);
/* Initialize the writing of configuration register 2 */
s_command.Instruction = (nor_mode == OSPI_SPI_MODE)
? SPI_NOR_CMD_WR_CFGREG2
: SPI_NOR_OCMD_WR_CFGREG2;
s_command.Address = SPI_NOR_REG2_ADDR1;
s_command.DummyCycles = 0U;
s_command.NbData = (nor_mode == OSPI_SPI_MODE) ? 1U
: ((nor_rate == OSPI_DTR_TRANSFER) ? 2U : 1U);
if (HAL_OSPI_Command(hospi, &s_command,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("Write Flash configuration reg2 failed");
return -EIO;
}
if (HAL_OSPI_Transmit(hospi, &op_enable,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("Write Flash configuration reg2 failed");
return -EIO;
}
return 0;
}
/* Read Flash configuration register 2 with new single or octal SPI protocol */
static int stm32_ospi_read_cfg2reg(OSPI_HandleTypeDef *hospi,
uint8_t nor_mode, uint8_t nor_rate, uint8_t *value)
{
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(nor_mode, nor_rate);
/* Initialize the writing of configuration register 2 */
s_command.Instruction = (nor_mode == OSPI_SPI_MODE)
? SPI_NOR_CMD_RD_CFGREG2
: SPI_NOR_OCMD_RD_CFGREG2;
s_command.Address = SPI_NOR_REG2_ADDR1;
s_command.DummyCycles = (nor_mode == OSPI_SPI_MODE)
? 0U
: ((nor_rate == OSPI_DTR_TRANSFER)
? SPI_NOR_DUMMY_REG_OCTAL_DTR
: SPI_NOR_DUMMY_REG_OCTAL);
s_command.NbData = (nor_rate == OSPI_DTR_TRANSFER) ? 2U : 1U;
if (HAL_OSPI_Command(hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("Write Flash configuration reg2 failed");
return -EIO;
}
if (HAL_OSPI_Receive(hospi, value, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("Write Flash configuration reg2 failed");
return -EIO;
}
return 0;
}
/* Set the NOR Flash to desired Interface mode : SPI/OSPI and STR/DTR according to the DTS */
static int stm32_ospi_config_mem(const struct device *dev)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
uint8_t reg[2];
/* Going to set the SPI mode and STR transfer rate : done */
if ((dev_cfg->data_mode != OSPI_OPI_MODE)
&& (dev_cfg->data_rate == OSPI_STR_TRANSFER)) {
LOG_INF("OSPI flash config is SPI|DUAL|QUAD / STR");
return 0;
}
/* Going to set the OPI mode (STR or DTR transfer rate) */
LOG_DBG("OSPI configuring OctoSPI mode");
if (stm32_ospi_write_enable(dev_data,
OSPI_SPI_MODE, OSPI_STR_TRANSFER) != 0) {
LOG_ERR("OSPI write Enable failed");
return -EIO;
}
/* Write Configuration register 2 (with new dummy cycles) */
if (stm32_ospi_write_cfg2reg_dummy(&dev_data->hospi,
OSPI_SPI_MODE, OSPI_STR_TRANSFER) != 0) {
LOG_ERR("OSPI write CFGR2 failed");
return -EIO;
}
if (stm32_ospi_mem_ready(dev_data,
OSPI_SPI_MODE, OSPI_STR_TRANSFER) != 0) {
LOG_ERR("OSPI autopolling failed");
return -EIO;
}
if (stm32_ospi_write_enable(dev_data,
OSPI_SPI_MODE, OSPI_STR_TRANSFER) != 0) {
LOG_ERR("OSPI write Enable 2 failed");
return -EIO;
}
/* Write Configuration register 2 (with Octal I/O SPI protocol : choose STR or DTR) */
uint8_t mode_enable = ((dev_cfg->data_rate == OSPI_DTR_TRANSFER)
? SPI_NOR_CR2_DTR_OPI_EN
: SPI_NOR_CR2_STR_OPI_EN);
if (stm32_ospi_write_cfg2reg_io(&dev_data->hospi,
OSPI_SPI_MODE, OSPI_STR_TRANSFER, mode_enable) != 0) {
LOG_ERR("OSPI write CFGR2 failed");
return -EIO;
}
/* Wait that the configuration is effective and check that memory is ready */
k_busy_wait(STM32_OSPI_WRITE_REG_MAX_TIME * USEC_PER_MSEC);
/* Reconfigure the memory type of the peripheral */
dev_data->hospi.Init.MemoryType = HAL_OSPI_MEMTYPE_MACRONIX;
dev_data->hospi.Init.DelayHoldQuarterCycle = HAL_OSPI_DHQC_ENABLE;
if (HAL_OSPI_Init(&dev_data->hospi) != HAL_OK) {
LOG_ERR("OSPI mem type MACRONIX failed");
return -EIO;
}
if (dev_cfg->data_rate == OSPI_STR_TRANSFER) {
if (stm32_ospi_mem_ready(dev_data,
OSPI_OPI_MODE, OSPI_STR_TRANSFER) != 0) {
/* Check Flash busy ? */
LOG_ERR("OSPI flash busy failed");
return -EIO;
}
if (stm32_ospi_read_cfg2reg(&dev_data->hospi,
OSPI_OPI_MODE, OSPI_STR_TRANSFER, reg) != 0) {
/* Check the configuration has been correctly done on SPI_NOR_REG2_ADDR1 */
LOG_ERR("OSPI flash config read failed");
return -EIO;
}
LOG_INF("OSPI flash config is OPI / STR");
}
if (dev_cfg->data_rate == OSPI_DTR_TRANSFER) {
if (stm32_ospi_mem_ready(dev_data,
OSPI_OPI_MODE, OSPI_DTR_TRANSFER) != 0) {
/* Check Flash busy ? */
LOG_ERR("OSPI flash busy failed");
return -EIO;
}
LOG_INF("OSPI flash config is OPI / DTR");
}
return 0;
}
/* gpio or send the different reset command to the NOR flash in SPI/OSPI and STR/DTR */
static int stm32_ospi_mem_reset(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
#if STM32_OSPI_RESET_GPIO
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
/* Generate RESETn pulse for the flash memory */
gpio_pin_configure_dt(&dev_cfg->reset, GPIO_OUTPUT_ACTIVE);
k_msleep(DT_INST_PROP(0, reset_gpios_duration));
gpio_pin_set_dt(&dev_cfg->reset, 0);
#else
/* Reset command sent sucessively for each mode SPI/OPS & STR/DTR */
OSPI_RegularCmdTypeDef s_command = {
.OperationType = HAL_OSPI_OPTYPE_COMMON_CFG,
.FlashId = HAL_OSPI_FLASH_ID_1,
.AddressMode = HAL_OSPI_ADDRESS_NONE,
.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE,
.InstructionDtrMode = HAL_OSPI_INSTRUCTION_DTR_DISABLE,
.Instruction = SPI_NOR_CMD_RESET_EN,
.InstructionSize = HAL_OSPI_INSTRUCTION_8_BITS,
.AlternateBytesMode = HAL_OSPI_ALTERNATE_BYTES_NONE,
.DataMode = HAL_OSPI_DATA_NONE,
.DummyCycles = 0U,
.DQSMode = HAL_OSPI_DQS_DISABLE,
.SIOOMode = HAL_OSPI_SIOO_INST_EVERY_CMD,
};
/* Reset enable in SPI mode and STR transfer mode */
if (HAL_OSPI_Command(&dev_data->hospi,
&s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI reset enable (SPI/STR) failed");
return -EIO;
}
/* Reset memory in SPI mode and STR transfer mode */
s_command.Instruction = SPI_NOR_CMD_RESET_MEM;
if (HAL_OSPI_Command(&dev_data->hospi,
&s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI reset memory (SPI/STR) failed");
return -EIO;
}
/* Reset enable in OPI mode and STR transfer mode */
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_8_LINES;
s_command.InstructionDtrMode = HAL_OSPI_INSTRUCTION_DTR_DISABLE;
s_command.Instruction = SPI_NOR_OCMD_RESET_EN;
s_command.InstructionSize = HAL_OSPI_INSTRUCTION_16_BITS;
if (HAL_OSPI_Command(&dev_data->hospi,
&s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI reset enable (OPI/STR) failed");
return -EIO;
}
/* Reset memory in OPI mode and STR transfer mode */
s_command.Instruction = SPI_NOR_OCMD_RESET_MEM;
if (HAL_OSPI_Command(&dev_data->hospi,
&s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI reset memory (OPI/STR) failed");
return -EIO;
}
/* Reset enable in OPI mode and DTR transfer mode */
s_command.InstructionDtrMode = HAL_OSPI_INSTRUCTION_DTR_ENABLE;
s_command.Instruction = SPI_NOR_OCMD_RESET_EN;
if (HAL_OSPI_Command(&dev_data->hospi,
&s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI reset enable (OPI/DTR) failed");
return -EIO;
}
/* Reset memory in OPI mode and DTR transfer mode */
s_command.Instruction = SPI_NOR_OCMD_RESET_MEM;
if (HAL_OSPI_Command(&dev_data->hospi,
&s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI reset memory (OPI/DTR) failed");
return -EIO;
}
#endif
/* Wait after SWreset CMD, in case SWReset occurred during erase operation */
k_busy_wait(STM32_OSPI_RESET_MAX_TIME * USEC_PER_MSEC);
return 0;
}
#ifdef CONFIG_STM32_MEMMAP
/* Function to configure the octoflash in MemoryMapped mode */
static int stm32_ospi_set_memorymap(const struct device *dev)
{
HAL_StatusTypeDef ret;
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
OSPI_RegularCmdTypeDef s_command = ospi_prepare_cmd(dev_cfg->data_mode, dev_cfg->data_rate);
OSPI_MemoryMappedTypeDef s_MemMappedCfg;
/* Configure octoflash in MemoryMapped mode */
if ((dev_cfg->data_mode == OSPI_SPI_MODE) &&
(stm32_ospi_hal_address_size(dev) == HAL_OSPI_ADDRESS_24_BITS)) {
/* OPI mode and 3-bytes address size not supported by memory */
LOG_ERR("OSPI_SPI_MODE in 3Bytes addressing is not supported");
return -ENOTSUP;
}
/* Initialize the read command */
s_command.OperationType = HAL_OSPI_OPTYPE_READ_CFG;
s_command.AddressSize = (dev_cfg->data_rate == OSPI_STR_TRANSFER)
? stm32_ospi_hal_address_size(dev)
: HAL_OSPI_ADDRESS_32_BITS;
/* Adapt lines based on read_mode */
if (dev_cfg->data_mode != OSPI_OPI_MODE) {
switch (dev_data->read_mode) {
case JESD216_MODE_112:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
s_command.DataMode = HAL_OSPI_DATA_2_LINES;
break;
case JESD216_MODE_122:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_2_LINES;
s_command.DataMode = HAL_OSPI_DATA_2_LINES;
break;
case JESD216_MODE_114:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
s_command.DataMode = HAL_OSPI_DATA_4_LINES;
break;
case JESD216_MODE_144:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_4_LINES;
s_command.DataMode = HAL_OSPI_DATA_4_LINES;
break;
default:
/* Use lines based on data_mode set in ospi_prepare_cmd */
break;
}
}
/* Set instruction and dummy cycles parameters */
if (dev_cfg->data_rate == OSPI_DTR_TRANSFER) {
/* DTR transfer rate (==> Octal mode) */
s_command.Instruction = SPI_NOR_OCMD_DTR_RD;
s_command.DummyCycles = SPI_NOR_DUMMY_RD_OCTAL_DTR;
} else {
/* STR transfer rate */
if (dev_cfg->data_mode == OSPI_OPI_MODE) {
/* OPI and STR */
s_command.Instruction = SPI_NOR_OCMD_RD;
s_command.DummyCycles = SPI_NOR_DUMMY_RD_OCTAL;
} else {
/* use SFDP:BFP read instruction */
s_command.Instruction = dev_data->read_opcode;
s_command.DummyCycles = dev_data->read_dummy;
}
}
ret = HAL_OSPI_Command(&dev_data->hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (ret != HAL_OK) {
LOG_ERR("%d: Failed to set memory map read cmd", ret);
return -EIO;
}
/* Initialize the program command */
s_command.OperationType = HAL_OSPI_OPTYPE_WRITE_CFG;
s_command.DQSMode = HAL_OSPI_DQS_DISABLE;
s_command.Instruction = dev_data->write_opcode;
s_command.DummyCycles = 0U;
/* Adapt lines based on write opcode */
switch (s_command.Instruction) {
case SPI_NOR_CMD_PP_4B:
__fallthrough;
case SPI_NOR_CMD_PP:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
s_command.DataMode = HAL_OSPI_DATA_1_LINE;
break;
case SPI_NOR_CMD_PP_1_1_2:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
s_command.DataMode = HAL_OSPI_DATA_2_LINES;
break;
case SPI_NOR_CMD_PP_1_1_4_4B:
__fallthrough;
case SPI_NOR_CMD_PP_1_1_4:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
s_command.DataMode = HAL_OSPI_DATA_4_LINES;
break;
case SPI_NOR_CMD_PP_1_4_4_4B:
__fallthrough;
case SPI_NOR_CMD_PP_1_4_4:
s_command.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
s_command.AddressMode = HAL_OSPI_ADDRESS_4_LINES;
s_command.DataMode = HAL_OSPI_DATA_4_LINES;
break;
default:
/* Use lines based on data_mode set in ospi_prepare_cmd */
break;
}
ret = HAL_OSPI_Command(&dev_data->hospi, &s_command, HAL_OSPI_TIMEOUT_DEFAULT_VALUE);
if (ret != HAL_OK) {
LOG_ERR("%d: Failed to set memory map write cmd", ret);
return -EIO;
}
/* Enable the memory-mapping */
s_MemMappedCfg.TimeOutActivation = HAL_OSPI_TIMEOUT_COUNTER_DISABLE;
ret = HAL_OSPI_MemoryMapped(&dev_data->hospi, &s_MemMappedCfg);
if (ret != HAL_OK) {
LOG_ERR("%d: Failed to enable memory map", ret);
return -EIO;
}
LOG_DBG("MemoryMap mode enabled");
return 0;
}
/* Function to return true if the octoflash is in MemoryMapped else false */
static bool stm32_ospi_is_memorymap(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
return ((READ_BIT(dev_data->hospi.Instance->CR,
OCTOSPI_CR_FMODE) == OCTOSPI_CR_FMODE) ?
true : false);
}
static int stm32_ospi_abort(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
HAL_StatusTypeDef hal_ret;
hal_ret = HAL_OSPI_Abort(&dev_data->hospi);
if (hal_ret != HAL_OK) {
LOG_ERR("%d: OSPI abort failed", hal_ret);
return -EIO;
}
return 0;
}
#endif /* CONFIG_STM32_MEMMAP */
/*
* Function to erase the flash : chip or sector with possible OSPI/SPI and STR/DTR
* to erase the complete chip (using dedicated command) :
* set size >= flash size
* set addr = 0
* NOTE: cannot erase in MemoryMapped mode
*/
static int flash_stm32_ospi_erase(const struct device *dev, off_t addr,
size_t size)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
int ret = 0;
/* Ignore zero size erase */
if (size == 0) {
return 0;
}
/* Maximise erase size : means the complete chip */
if (size > dev_cfg->flash_size) {
size = dev_cfg->flash_size;
}
if (!ospi_address_is_valid(dev, addr, size)) {
LOG_ERR("Error: address or size exceeds expected values: "
"addr 0x%lx, size %zu", (long)addr, size);
return -EINVAL;
}
if (((size % SPI_NOR_SECTOR_SIZE) != 0) && (size < dev_cfg->flash_size)) {
LOG_ERR("Error: wrong sector size 0x%x", size);
return -ENOTSUP;
}
ospi_lock_thread(dev);
#ifdef CONFIG_STM32_MEMMAP
if (stm32_ospi_is_memorymap(dev)) {
/* Abort ongoing transfer to force CS high/BUSY deasserted */
ret = stm32_ospi_abort(dev);
if (ret != 0) {
LOG_ERR("Failed to abort memory-mapped access before erase");
goto end_erase;
}
}
/* Continue with Indirect Mode */
#endif /* CONFIG_STM32_MEMMAP */
OSPI_RegularCmdTypeDef cmd_erase = {
.OperationType = HAL_OSPI_OPTYPE_COMMON_CFG,
.FlashId = HAL_OSPI_FLASH_ID_1,
.AlternateBytesMode = HAL_OSPI_ALTERNATE_BYTES_NONE,
.DataMode = HAL_OSPI_DATA_NONE,
.DummyCycles = 0U,
.DQSMode = HAL_OSPI_DQS_DISABLE,
.SIOOMode = HAL_OSPI_SIOO_INST_EVERY_CMD,
};
if (stm32_ospi_mem_ready(dev_data,
dev_cfg->data_mode, dev_cfg->data_rate) != 0) {
LOG_ERR("Erase failed : flash busy");
ret = -EBUSY;
goto end_erase;
}
cmd_erase.InstructionMode = (dev_cfg->data_mode == OSPI_OPI_MODE)
? HAL_OSPI_INSTRUCTION_8_LINES
: HAL_OSPI_INSTRUCTION_1_LINE;
cmd_erase.InstructionDtrMode = (dev_cfg->data_rate == OSPI_DTR_TRANSFER)
? HAL_OSPI_INSTRUCTION_DTR_ENABLE
: HAL_OSPI_INSTRUCTION_DTR_DISABLE;
cmd_erase.InstructionSize = (dev_cfg->data_mode == OSPI_OPI_MODE)
? HAL_OSPI_INSTRUCTION_16_BITS
: HAL_OSPI_INSTRUCTION_8_BITS;
while ((size > 0) && (ret == 0)) {
ret = stm32_ospi_write_enable(dev_data,
dev_cfg->data_mode, dev_cfg->data_rate);
if (ret != 0) {
LOG_ERR("Erase failed : write enable");
break;
}
if (size == dev_cfg->flash_size) {
/* Chip erase */
LOG_DBG("Chip Erase");
cmd_erase.Address = 0;
cmd_erase.Instruction = (dev_cfg->data_mode == OSPI_OPI_MODE)
? SPI_NOR_OCMD_BULKE
: SPI_NOR_CMD_BULKE;
cmd_erase.AddressMode = HAL_OSPI_ADDRESS_NONE;
/* Full chip erase (Bulk) command */
ospi_send_cmd(dev, &cmd_erase);
size -= dev_cfg->flash_size;
/* Chip (Bulk) erase started, wait until WEL becomes 0 */
ret = stm32_ospi_mem_erased(dev);
if (ret != 0) {
LOG_ERR("Chip Erase failed");
break;
}
} else {
/* Sector or Block erase depending on the size */
LOG_DBG("Sector/Block Erase");
cmd_erase.AddressMode =
(dev_cfg->data_mode == OSPI_OPI_MODE)
? HAL_OSPI_ADDRESS_8_LINES
: HAL_OSPI_ADDRESS_1_LINE;
cmd_erase.AddressDtrMode =
(dev_cfg->data_rate == OSPI_DTR_TRANSFER)
? HAL_OSPI_ADDRESS_DTR_ENABLE
: HAL_OSPI_ADDRESS_DTR_DISABLE;
cmd_erase.AddressSize = stm32_ospi_hal_address_size(dev);
cmd_erase.Address = addr;
const struct jesd216_erase_type *erase_types =
dev_data->erase_types;
const struct jesd216_erase_type *bet = NULL;
for (uint8_t ei = 0;
ei < JESD216_NUM_ERASE_TYPES; ++ei) {
const struct jesd216_erase_type *etp =
&erase_types[ei];
if ((etp->exp != 0)
&& SPI_NOR_IS_ALIGNED(addr, etp->exp)
&& (size >= BIT(etp->exp))
&& ((bet == NULL)
|| (etp->exp > bet->exp))) {
bet = etp;
cmd_erase.Instruction = bet->cmd;
} else if (bet == NULL) {
/* Use the default sector erase cmd */
if (dev_cfg->data_mode == OSPI_OPI_MODE) {
cmd_erase.Instruction = SPI_NOR_OCMD_SE;
} else {
cmd_erase.Instruction =
(stm32_ospi_hal_address_size(dev) ==
HAL_OSPI_ADDRESS_32_BITS)
? SPI_NOR_CMD_SE_4B
: SPI_NOR_CMD_SE;
}
}
/* Avoid using wrong erase type,
* if zero entries are found in erase_types
*/
bet = NULL;
}
LOG_DBG("Sector/Block Erase addr 0x%x, asize 0x%x amode 0x%x instr 0x%x",
cmd_erase.Address, cmd_erase.AddressSize,
cmd_erase.AddressMode, cmd_erase.Instruction);
ospi_send_cmd(dev, &cmd_erase);
if (bet != NULL) {
addr += BIT(bet->exp);
size -= BIT(bet->exp);
} else {
addr += SPI_NOR_SECTOR_SIZE;
size -= SPI_NOR_SECTOR_SIZE;
}
ret = stm32_ospi_mem_ready(dev_data, dev_cfg->data_mode,
dev_cfg->data_rate);
}
}
goto end_erase;
end_erase:
ospi_unlock_thread(dev);
return ret;
}
/* Function to read the flash with possible OSPI/SPI and STR/DTR */
static int flash_stm32_ospi_read(const struct device *dev, off_t addr,
void *data, size_t size)
{
int ret = 0;
if (!ospi_address_is_valid(dev, addr, size)) {
LOG_ERR("Error: address or size exceeds expected values: "
"addr 0x%lx, size %zu", (long)addr, size);
return -EINVAL;
}
/* Ignore zero size read */
if (size == 0) {
return 0;
}
#ifdef CONFIG_STM32_MEMMAP
/* If not MemMapped then configure it */
if (!stm32_ospi_is_memorymap(dev)) {
if (stm32_ospi_set_memorymap(dev) != 0) {
LOG_ERR("READ failed: cannot enable MemoryMap");
return -EIO;
}
}
/* Now in MemMapped mode : read with memcopy */
LOG_DBG("MemoryMapped Read offset: 0x%lx, len: %zu",
(long)(STM32_OSPI_BASE_ADDRESS + addr),
size);
memcpy(data, (uint8_t *)STM32_OSPI_BASE_ADDRESS + addr, size);
#else /* CONFIG_STM32_MEMMAP */
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
OSPI_RegularCmdTypeDef cmd = ospi_prepare_cmd(dev_cfg->data_mode, dev_cfg->data_rate);
if (dev_cfg->data_mode != OSPI_OPI_MODE) {
switch (dev_data->read_mode) {
case JESD216_MODE_112: {
cmd.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
cmd.DataMode = HAL_OSPI_DATA_2_LINES;
break;
}
case JESD216_MODE_122: {
cmd.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd.AddressMode = HAL_OSPI_ADDRESS_2_LINES;
cmd.DataMode = HAL_OSPI_DATA_2_LINES;
break;
}
case JESD216_MODE_114: {
cmd.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
cmd.DataMode = HAL_OSPI_DATA_4_LINES;
break;
}
case JESD216_MODE_144: {
cmd.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd.AddressMode = HAL_OSPI_ADDRESS_4_LINES;
cmd.DataMode = HAL_OSPI_DATA_4_LINES;
break;
}
default:
/* use the mode from ospi_prepare_cmd */
break;
}
}
/* Instruction and DummyCycles are set below */
cmd.Address = addr; /* AddressSize is 32bits in OPSI mode */
cmd.AddressSize = stm32_ospi_hal_address_size(dev);
/* DataSize is set by the read cmd */
/* Configure other parameters */
if (dev_cfg->data_rate == OSPI_DTR_TRANSFER) {
/* DTR transfer rate (==> Octal mode) */
cmd.Instruction = SPI_NOR_OCMD_DTR_RD;
cmd.DummyCycles = SPI_NOR_DUMMY_RD_OCTAL_DTR;
} else {
/* STR transfer rate */
if (dev_cfg->data_mode == OSPI_OPI_MODE) {
/* OPI and STR */
cmd.Instruction = SPI_NOR_OCMD_RD;
cmd.DummyCycles = SPI_NOR_DUMMY_RD_OCTAL;
} else {
/* use SFDP:BFP read instruction */
cmd.Instruction = dev_data->read_opcode;
cmd.DummyCycles = dev_data->read_dummy;
/* in SPI and STR : expecting SPI_NOR_CMD_READ_FAST_4B */
}
}
LOG_DBG("OSPI: read %zu data", size);
ospi_lock_thread(dev);
ret = ospi_read_access(dev, &cmd, data, size);
ospi_unlock_thread(dev);
#endif /* CONFIG_STM32_MEMMAP */
return ret;
}
/*
* Function to write the flash (page program) : with possible OSPI/SPI and STR/DTR
* NOTE: writing in MemoryMapped mode is not guaranted
*/
static int flash_stm32_ospi_write(const struct device *dev, off_t addr,
const void *data, size_t size)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
size_t to_write;
int ret = 0;
if (!ospi_address_is_valid(dev, addr, size)) {
LOG_ERR("Error: address or size exceeds expected values: "
"addr 0x%lx, size %zu", (long)addr, size);
return -EINVAL;
}
/* Ignore zero size write */
if (size == 0) {
return 0;
}
ospi_lock_thread(dev);
#ifdef CONFIG_STM32_MEMMAP
if (stm32_ospi_is_memorymap(dev)) {
/* Abort ongoing transfer to force CS high/BUSY deasserted */
ret = stm32_ospi_abort(dev);
if (ret != 0) {
LOG_ERR("Failed to abort memory-mapped access before write");
goto end_write;
}
}
/* Continue with Indirect Mode */
#endif /* CONFIG_STM32_MEMMAP */
/* page program for STR or DTR mode */
OSPI_RegularCmdTypeDef cmd_pp = ospi_prepare_cmd(dev_cfg->data_mode, dev_cfg->data_rate);
/* using 32bits address also in SPI/STR mode */
cmd_pp.Instruction = dev_data->write_opcode;
if (dev_cfg->data_mode != OSPI_OPI_MODE) {
switch (cmd_pp.Instruction) {
case SPI_NOR_CMD_PP_4B:
__fallthrough;
case SPI_NOR_CMD_PP: {
cmd_pp.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd_pp.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
cmd_pp.DataMode = HAL_OSPI_DATA_1_LINE;
break;
}
case SPI_NOR_CMD_PP_1_1_4_4B:
__fallthrough;
case SPI_NOR_CMD_PP_1_1_4: {
cmd_pp.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd_pp.AddressMode = HAL_OSPI_ADDRESS_1_LINE;
cmd_pp.DataMode = HAL_OSPI_DATA_4_LINES;
break;
}
case SPI_NOR_CMD_PP_1_4_4_4B:
__fallthrough;
case SPI_NOR_CMD_PP_1_4_4: {
cmd_pp.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE;
cmd_pp.AddressMode = HAL_OSPI_ADDRESS_4_LINES;
cmd_pp.DataMode = HAL_OSPI_DATA_4_LINES;
break;
}
default:
/* use the mode from ospi_prepare_cmd */
break;
}
}
cmd_pp.Address = addr;
cmd_pp.AddressSize = stm32_ospi_hal_address_size(dev);
cmd_pp.DummyCycles = 0U;
LOG_DBG("OSPI: write %zu data", size);
ret = stm32_ospi_mem_ready(dev_data,
dev_cfg->data_mode, dev_cfg->data_rate);
if (ret != 0) {
ospi_unlock_thread(dev);
LOG_ERR("OSPI: write not ready");
return -EIO;
}
while ((size > 0) && (ret == 0)) {
to_write = size;
ret = stm32_ospi_write_enable(dev_data,
dev_cfg->data_mode, dev_cfg->data_rate);
if (ret != 0) {
LOG_ERR("OSPI: write not enabled");
break;
}
/* Don't write more than a page. */
if (to_write >= SPI_NOR_PAGE_SIZE) {
to_write = SPI_NOR_PAGE_SIZE;
}
/* Don't write across a page boundary */
if (((addr + to_write - 1U) / SPI_NOR_PAGE_SIZE)
!= (addr / SPI_NOR_PAGE_SIZE)) {
to_write = SPI_NOR_PAGE_SIZE -
(addr % SPI_NOR_PAGE_SIZE);
}
cmd_pp.Address = addr;
ret = ospi_write_access(dev, &cmd_pp, data, to_write);
if (ret != 0) {
LOG_ERR("OSPI: write not access");
break;
}
size -= to_write;
data = (const uint8_t *)data + to_write;
addr += to_write;
/* Configure automatic polling mode to wait for end of program */
ret = stm32_ospi_mem_ready(dev_data,
dev_cfg->data_mode, dev_cfg->data_rate);
if (ret != 0) {
LOG_ERR("OSPI: write PP not ready");
break;
}
}
goto end_write;
end_write:
ospi_unlock_thread(dev);
return ret;
}
static const struct flash_parameters flash_stm32_ospi_parameters = {
.write_block_size = 1,
.erase_value = 0xff
};
static const struct flash_parameters *
flash_stm32_ospi_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_stm32_ospi_parameters;
}
static void flash_stm32_ospi_isr(const struct device *dev)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
HAL_OSPI_IRQHandler(&dev_data->hospi);
}
#if !defined(CONFIG_SOC_SERIES_STM32H7X)
/* weak function required for HAL compilation */
__weak HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma)
{
return HAL_OK;
}
/* weak function required for HAL compilation */
__weak HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma)
{
return HAL_OK;
}
#endif /* !CONFIG_SOC_SERIES_STM32H7X */
/* This function is executed in the interrupt context */
#if STM32_OSPI_USE_DMA
static void ospi_dma_callback(const struct device *dev, void *arg,
uint32_t channel, int status)
{
DMA_HandleTypeDef *hdma = arg;
ARG_UNUSED(dev);
if (status < 0) {
LOG_ERR("DMA callback error with channel %d.", channel);
}
HAL_DMA_IRQHandler(hdma);
}
#endif
/*
* Transfer Error callback.
*/
void HAL_OSPI_ErrorCallback(OSPI_HandleTypeDef *hospi)
{
struct flash_stm32_ospi_data *dev_data =
CONTAINER_OF(hospi, struct flash_stm32_ospi_data, hospi);
LOG_DBG("Error cb");
dev_data->cmd_status = -EIO;
k_sem_give(&dev_data->sync);
}
/*
* Command completed callback.
*/
void HAL_OSPI_CmdCpltCallback(OSPI_HandleTypeDef *hospi)
{
struct flash_stm32_ospi_data *dev_data =
CONTAINER_OF(hospi, struct flash_stm32_ospi_data, hospi);
LOG_DBG("Cmd Cplt cb");
k_sem_give(&dev_data->sync);
}
/*
* Rx Transfer completed callback.
*/
void HAL_OSPI_RxCpltCallback(OSPI_HandleTypeDef *hospi)
{
struct flash_stm32_ospi_data *dev_data =
CONTAINER_OF(hospi, struct flash_stm32_ospi_data, hospi);
LOG_DBG("Rx Cplt cb");
k_sem_give(&dev_data->sync);
}
/*
* Tx Transfer completed callback.
*/
void HAL_OSPI_TxCpltCallback(OSPI_HandleTypeDef *hospi)
{
struct flash_stm32_ospi_data *dev_data =
CONTAINER_OF(hospi, struct flash_stm32_ospi_data, hospi);
LOG_DBG("Tx Cplt cb");
k_sem_give(&dev_data->sync);
}
/*
* Status Match callback.
*/
void HAL_OSPI_StatusMatchCallback(OSPI_HandleTypeDef *hospi)
{
struct flash_stm32_ospi_data *dev_data =
CONTAINER_OF(hospi, struct flash_stm32_ospi_data, hospi);
LOG_DBG("Status Match cb");
k_sem_give(&dev_data->sync);
}
/*
* Timeout callback.
*/
void HAL_OSPI_TimeOutCallback(OSPI_HandleTypeDef *hospi)
{
struct flash_stm32_ospi_data *dev_data =
CONTAINER_OF(hospi, struct flash_stm32_ospi_data, hospi);
LOG_DBG("Timeout cb");
dev_data->cmd_status = -EIO;
k_sem_give(&dev_data->sync);
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static void flash_stm32_ospi_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
struct flash_stm32_ospi_data *dev_data = dev->data;
*layout = &dev_data->layout;
*layout_size = 1;
}
#endif
static const struct flash_driver_api flash_stm32_ospi_driver_api = {
.read = flash_stm32_ospi_read,
.write = flash_stm32_ospi_write,
.erase = flash_stm32_ospi_erase,
.get_parameters = flash_stm32_ospi_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = flash_stm32_ospi_pages_layout,
#endif
#if defined(CONFIG_FLASH_JESD216_API)
.sfdp_read = ospi_read_sfdp,
.read_jedec_id = ospi_read_jedec_id,
#endif /* CONFIG_FLASH_JESD216_API */
};
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static int setup_pages_layout(const struct device *dev)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *data = dev->data;
const size_t flash_size = dev_cfg->flash_size;
uint32_t layout_page_size = data->page_size;
uint8_t value = 0;
int rv = 0;
/* Find the smallest erase size. */
for (size_t i = 0; i < ARRAY_SIZE(data->erase_types); ++i) {
const struct jesd216_erase_type *etp = &data->erase_types[i];
if ((etp->cmd != 0)
&& ((value == 0) || (etp->exp < value))) {
value = etp->exp;
}
}
uint32_t erase_size = BIT(value);
if (erase_size == 0) {
erase_size = SPI_NOR_SECTOR_SIZE;
}
/* We need layout page size to be compatible with erase size */
if ((layout_page_size % erase_size) != 0) {
LOG_DBG("layout page %u not compatible with erase size %u",
layout_page_size, erase_size);
LOG_DBG("erase size will be used as layout page size");
layout_page_size = erase_size;
}
/* Warn but accept layout page sizes that leave inaccessible
* space.
*/
if ((flash_size % layout_page_size) != 0) {
LOG_DBG("layout page %u wastes space with device size %zu",
layout_page_size, flash_size);
}
data->layout.pages_size = layout_page_size;
data->layout.pages_count = flash_size / layout_page_size;
LOG_DBG("layout %u x %u By pages", data->layout.pages_count,
data->layout.pages_size);
return rv;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static int stm32_ospi_read_status_register(const struct device *dev, uint8_t reg_num, uint8_t *reg)
{
OSPI_RegularCmdTypeDef s_command = {
.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE,
.DataMode = HAL_OSPI_DATA_1_LINE,
};
switch (reg_num) {
case 1U:
s_command.Instruction = SPI_NOR_CMD_RDSR;
break;
case 2U:
s_command.Instruction = SPI_NOR_CMD_RDSR2;
break;
case 3U:
s_command.Instruction = SPI_NOR_CMD_RDSR3;
break;
default:
return -EINVAL;
}
return ospi_read_access(dev, &s_command, reg, sizeof(*reg));
}
static int stm32_ospi_write_status_register(const struct device *dev, uint8_t reg_num, uint8_t reg)
{
struct flash_stm32_ospi_data *data = dev->data;
OSPI_RegularCmdTypeDef s_command = {
.Instruction = SPI_NOR_CMD_WRSR,
.InstructionMode = HAL_OSPI_INSTRUCTION_1_LINE,
.DataMode = HAL_OSPI_DATA_1_LINE
};
size_t size;
uint8_t regs[4] = { 0 };
uint8_t *regs_p;
int ret;
if (reg_num == 1U) {
size = 1U;
regs[0] = reg;
regs_p = ®s[0];
/* 1 byte write clears SR2, write SR2 as well */
if (data->qer_type == JESD216_DW15_QER_S2B1v1) {
ret = stm32_ospi_read_status_register(dev, 2, ®s[1]);
if (ret < 0) {
return ret;
}
size = 2U;
}
} else if (reg_num == 2U) {
s_command.Instruction = SPI_NOR_CMD_WRSR2;
size = 1U;
regs[1] = reg;
regs_p = ®s[1];
/* if SR2 write needs SR1 */
if ((data->qer_type == JESD216_DW15_QER_VAL_S2B1v1) ||
(data->qer_type == JESD216_DW15_QER_VAL_S2B1v4) ||
(data->qer_type == JESD216_DW15_QER_VAL_S2B1v5)) {
ret = stm32_ospi_read_status_register(dev, 1, ®s[0]);
if (ret < 0) {
return ret;
}
s_command.Instruction = SPI_NOR_CMD_WRSR;
size = 2U;
regs_p = ®s[0];
}
} else if (reg_num == 3U) {
s_command.Instruction = SPI_NOR_CMD_WRSR3;
size = 1U;
regs[2] = reg;
regs_p = ®s[2];
} else {
return -EINVAL;
}
return ospi_write_access(dev, &s_command, regs_p, size);
}
static int stm32_ospi_enable_qe(const struct device *dev)
{
struct flash_stm32_ospi_data *data = dev->data;
uint8_t qe_reg_num;
uint8_t qe_bit;
uint8_t reg;
int ret;
switch (data->qer_type) {
case JESD216_DW15_QER_NONE:
/* no QE bit, device detects reads based on opcode */
return 0;
case JESD216_DW15_QER_S1B6:
qe_reg_num = 1U;
qe_bit = BIT(6U);
break;
case JESD216_DW15_QER_S2B7:
qe_reg_num = 2U;
qe_bit = BIT(7U);
break;
case JESD216_DW15_QER_S2B1v1:
__fallthrough;
case JESD216_DW15_QER_S2B1v4:
__fallthrough;
case JESD216_DW15_QER_S2B1v5:
__fallthrough;
case JESD216_DW15_QER_S2B1v6:
qe_reg_num = 2U;
qe_bit = BIT(1U);
break;
default:
return -ENOTSUP;
}
ret = stm32_ospi_read_status_register(dev, qe_reg_num, ®);
if (ret < 0) {
return ret;
}
/* exit early if QE bit is already set */
if ((reg & qe_bit) != 0U) {
return 0;
}
ret = stm32_ospi_write_enable(data, OSPI_SPI_MODE, OSPI_STR_TRANSFER);
if (ret < 0) {
return ret;
}
reg |= qe_bit;
ret = stm32_ospi_write_status_register(dev, qe_reg_num, reg);
if (ret < 0) {
return ret;
}
ret = stm32_ospi_mem_ready(data, OSPI_SPI_MODE, OSPI_STR_TRANSFER);
if (ret < 0) {
return ret;
}
/* validate that QE bit is set */
ret = stm32_ospi_read_status_register(dev, qe_reg_num, ®);
if (ret < 0) {
return ret;
}
if ((reg & qe_bit) == 0U) {
LOG_ERR("Status Register %u [0x%02x] not set", qe_reg_num, reg);
ret = -EIO;
}
return ret;
}
static void spi_nor_process_bfp_addrbytes(const struct device *dev,
const uint8_t jesd216_bfp_addrbytes)
{
struct flash_stm32_ospi_data *data = dev->data;
if ((jesd216_bfp_addrbytes == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_4B) ||
(jesd216_bfp_addrbytes == JESD216_SFDP_BFP_DW1_ADDRBYTES_VAL_3B4B)) {
data->address_width = 4U;
} else {
data->address_width = 3U;
}
}
static inline uint8_t spi_nor_convert_read_to_4b(const uint8_t opcode)
{
switch (opcode) {
case SPI_NOR_CMD_READ:
return SPI_NOR_CMD_READ_4B;
case SPI_NOR_CMD_DREAD:
return SPI_NOR_CMD_DREAD_4B;
case SPI_NOR_CMD_2READ:
return SPI_NOR_CMD_2READ_4B;
case SPI_NOR_CMD_QREAD:
return SPI_NOR_CMD_QREAD_4B;
case SPI_NOR_CMD_4READ:
return SPI_NOR_CMD_4READ_4B;
default:
/* use provided */
return opcode;
}
}
static inline uint8_t spi_nor_convert_write_to_4b(const uint8_t opcode)
{
switch (opcode) {
case SPI_NOR_CMD_PP:
return SPI_NOR_CMD_PP_4B;
case SPI_NOR_CMD_PP_1_1_4:
return SPI_NOR_CMD_PP_1_1_4_4B;
case SPI_NOR_CMD_PP_1_4_4:
return SPI_NOR_CMD_PP_1_4_4_4B;
default:
/* use provided */
return opcode;
}
}
static int spi_nor_process_bfp(const struct device *dev,
const struct jesd216_param_header *php,
const struct jesd216_bfp *bfp)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *data = dev->data;
/* must be kept in data mode order, ignore 1-1-1 (always supported) */
const enum jesd216_mode_type supported_read_modes[] = { JESD216_MODE_112, JESD216_MODE_122,
JESD216_MODE_114,
JESD216_MODE_144 };
size_t supported_read_modes_max_idx;
struct jesd216_erase_type *etp = data->erase_types;
size_t idx;
const size_t flash_size = jesd216_bfp_density(bfp) / 8U;
struct jesd216_instr read_instr = { 0 };
struct jesd216_bfp_dw15 dw15;
if (flash_size != dev_cfg->flash_size) {
LOG_DBG("Unexpected flash size: %u", flash_size);
}
LOG_DBG("%s: %u MiBy flash", dev->name, (uint32_t)(flash_size >> 20));
/* Copy over the erase types, preserving their order. (The
* Sector Map Parameter table references them by index.)
*/
memset(data->erase_types, 0, sizeof(data->erase_types));
for (idx = 1U; idx <= ARRAY_SIZE(data->erase_types); ++idx) {
if (jesd216_bfp_erase(bfp, idx, etp) == 0) {
LOG_DBG("Erase %u with %02x",
(uint32_t)BIT(etp->exp), etp->cmd);
}
++etp;
}
spi_nor_process_bfp_addrbytes(dev, jesd216_bfp_addrbytes(bfp));
LOG_DBG("Address width: %u Bytes", data->address_width);
/* use PP opcode based on configured data mode if nothing is set in DTS */
if (data->write_opcode == SPI_NOR_WRITEOC_NONE) {
switch (dev_cfg->data_mode) {
case OSPI_OPI_MODE:
data->write_opcode = SPI_NOR_OCMD_PAGE_PRG;
break;
case OSPI_QUAD_MODE:
data->write_opcode = SPI_NOR_CMD_PP_1_4_4;
break;
case OSPI_DUAL_MODE:
data->write_opcode = SPI_NOR_CMD_PP_1_1_2;
break;
default:
data->write_opcode = SPI_NOR_CMD_PP;
break;
}
}
if (dev_cfg->data_mode != OSPI_OPI_MODE) {
/* determine supported read modes, begin from the slowest */
data->read_mode = JESD216_MODE_111;
data->read_opcode = SPI_NOR_CMD_READ;
data->read_dummy = 0U;
if (dev_cfg->data_mode != OSPI_SPI_MODE) {
if (dev_cfg->data_mode == OSPI_DUAL_MODE) {
/* the index of JESD216_MODE_114 in supported_read_modes */
supported_read_modes_max_idx = 2U;
} else {
supported_read_modes_max_idx = ARRAY_SIZE(supported_read_modes);
}
for (idx = 0U; idx < supported_read_modes_max_idx; ++idx) {
if (jesd216_bfp_read_support(php, bfp, supported_read_modes[idx],
&read_instr) < 0) {
/* not supported */
continue;
}
LOG_DBG("Supports read mode: %d, instr: 0x%X",
supported_read_modes[idx], read_instr.instr);
data->read_mode = supported_read_modes[idx];
data->read_opcode = read_instr.instr;
data->read_dummy =
(read_instr.wait_states + read_instr.mode_clocks);
}
}
/* convert 3-Byte opcodes to 4-Byte (if required) */
if (IS_ENABLED(DT_INST_PROP(0, four_byte_opcodes))) {
if (data->address_width != 4U) {
LOG_DBG("4-Byte opcodes require 4-Byte address width");
return -ENOTSUP;
}
data->read_opcode = spi_nor_convert_read_to_4b(data->read_opcode);
data->write_opcode = spi_nor_convert_write_to_4b(data->write_opcode);
}
/* enable quad mode (if required) */
if (dev_cfg->data_mode == OSPI_QUAD_MODE) {
if (jesd216_bfp_decode_dw15(php, bfp, &dw15) < 0) {
/* will use QER from DTS or default (refer to device data) */
LOG_WRN("Unable to decode QE requirement [DW15]");
} else {
/* bypass DTS QER value */
data->qer_type = dw15.qer;
}
LOG_DBG("QE requirement mode: %x", data->qer_type);
if (stm32_ospi_enable_qe(dev) < 0) {
LOG_ERR("Failed to enable QUAD mode");
return -EIO;
}
LOG_DBG("QUAD mode enabled");
}
}
data->page_size = jesd216_bfp_page_size(php, bfp);
LOG_DBG("Page size %u bytes", data->page_size);
LOG_DBG("Flash size %zu bytes", flash_size);
LOG_DBG("Using read mode: %d, instr: 0x%X, dummy cycles: %u",
data->read_mode, data->read_opcode, data->read_dummy);
LOG_DBG("Using write instr: 0x%X", data->write_opcode);
return 0;
}
static int flash_stm32_ospi_init(const struct device *dev)
{
const struct flash_stm32_ospi_config *dev_cfg = dev->config;
struct flash_stm32_ospi_data *dev_data = dev->data;
uint32_t ahb_clock_freq;
uint32_t prescaler = STM32_OSPI_CLOCK_PRESCALER_MIN;
int ret;
/* The SPI/DTR is not a valid config of data_mode/data_rate according to the DTS */
if ((dev_cfg->data_mode != OSPI_OPI_MODE)
&& (dev_cfg->data_rate == OSPI_DTR_TRANSFER)) {
/* already the right config, continue */
LOG_ERR("OSPI mode SPI|DUAL|QUAD/DTR is not valid");
return -ENOTSUP;
}
/* Signals configuration */
ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
if (ret < 0) {
LOG_ERR("OSPI pinctrl setup failed (%d)", ret);
return ret;
}
if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
#ifdef CONFIG_STM32_MEMMAP
/* If MemoryMapped then configure skip init */
if (stm32_ospi_is_memorymap(dev)) {
LOG_DBG("NOR init'd in MemMapped mode\n");
/* Force HAL instance in correct state */
dev_data->hospi.State = HAL_OSPI_STATE_BUSY_MEM_MAPPED;
return 0;
}
#endif /* CONFIG_STM32_MEMMAP */
#if STM32_OSPI_USE_DMA
/*
* DMA configuration
* Due to use of OSPI HAL API in current driver,
* both HAL and Zephyr DMA drivers should be configured.
* The required configuration for Zephyr DMA driver should only provide
* the minimum information to inform the DMA slot will be in used and
* how to route callbacks.
*/
struct dma_config dma_cfg = dev_data->dma.cfg;
static DMA_HandleTypeDef hdma;
if (!device_is_ready(dev_data->dma.dev)) {
LOG_ERR("%s device not ready", dev_data->dma.dev->name);
return -ENODEV;
}
/* Proceed to the minimum Zephyr DMA driver init */
dma_cfg.user_data = &hdma;
/* HACK: This field is used to inform driver that it is overridden */
dma_cfg.linked_channel = STM32_DMA_HAL_OVERRIDE;
/* Because of the STREAM OFFSET, the DMA channel given here is from 1 - 8 */
ret = dma_config(dev_data->dma.dev,
(dev_data->dma.channel + STM32_DMA_STREAM_OFFSET), &dma_cfg);
if (ret != 0) {
LOG_ERR("Failed to configure DMA channel %d",
dev_data->dma.channel + STM32_DMA_STREAM_OFFSET);
return ret;
}
/* Proceed to the HAL DMA driver init */
if (dma_cfg.source_data_size != dma_cfg.dest_data_size) {
LOG_ERR("Source and destination data sizes not aligned");
return -EINVAL;
}
int index = find_lsb_set(dma_cfg.source_data_size) - 1;
#if CONFIG_DMA_STM32U5
/* Fill the structure for dma init */
hdma.Init.BlkHWRequest = DMA_BREQ_SINGLE_BURST;
hdma.Init.SrcInc = DMA_SINC_FIXED;
hdma.Init.DestInc = DMA_DINC_INCREMENTED;
hdma.Init.SrcDataWidth = table_src_size[index];
hdma.Init.DestDataWidth = table_dest_size[index];
hdma.Init.SrcBurstLength = 4;
hdma.Init.DestBurstLength = 4;
hdma.Init.TransferAllocatedPort = DMA_SRC_ALLOCATED_PORT0 | DMA_DEST_ALLOCATED_PORT1;
hdma.Init.TransferEventMode = DMA_TCEM_BLOCK_TRANSFER;
#else
hdma.Init.PeriphDataAlignment = table_p_size[index];
hdma.Init.MemDataAlignment = table_m_size[index];
hdma.Init.PeriphInc = DMA_PINC_DISABLE;
hdma.Init.MemInc = DMA_MINC_ENABLE;
#endif /* CONFIG_DMA_STM32U5 */
hdma.Init.Mode = DMA_NORMAL;
hdma.Init.Priority = table_priority[dma_cfg.channel_priority];
hdma.Init.Direction = DMA_PERIPH_TO_MEMORY;
#ifdef CONFIG_DMA_STM32_V1
/* TODO: Not tested in this configuration */
hdma.Init.Channel = dma_cfg.dma_slot;
hdma.Instance = __LL_DMA_GET_STREAM_INSTANCE(dev_data->dma.reg,
dev_data->dma.channel);
#else
hdma.Init.Request = dma_cfg.dma_slot;
#if CONFIG_DMA_STM32U5
hdma.Instance = LL_DMA_GET_CHANNEL_INSTANCE(dev_data->dma.reg,
dev_data->dma.channel);
#elif defined(CONFIG_DMAMUX_STM32)
/*
* HAL expects a valid DMA channel (not DMAMUX).
* The channel is from 0 to 7 because of the STM32_DMA_STREAM_OFFSET in the dma_stm32 driver
*/
hdma.Instance = __LL_DMA_GET_CHANNEL_INSTANCE(dev_data->dma.reg,
dev_data->dma.channel);
#else
hdma.Instance = __LL_DMA_GET_CHANNEL_INSTANCE(dev_data->dma.reg,
dev_data->dma.channel-1);
#endif /* CONFIG_DMA_STM32U5 */
#endif /* CONFIG_DMA_STM32_V1 */
/* Initialize DMA HAL */
__HAL_LINKDMA(&dev_data->hospi, hdma, hdma);
if (HAL_DMA_Init(&hdma) != HAL_OK) {
LOG_ERR("OSPI DMA Init failed");
return -EIO;
}
LOG_INF("OSPI with DMA transfer");
#endif /* STM32_OSPI_USE_DMA */
/* Clock configuration */
if (clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
(clock_control_subsys_t) &dev_cfg->pclken) != 0) {
LOG_ERR("Could not enable OSPI clock");
return -EIO;
}
/* Alternate clock config for peripheral if any */
#if DT_CLOCKS_HAS_NAME(STM32_OSPI_NODE, ospi_ker)
if (clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
(clock_control_subsys_t) &dev_cfg->pclken_ker,
NULL) != 0) {
LOG_ERR("Could not select OSPI domain clock");
return -EIO;
}
if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
(clock_control_subsys_t) &dev_cfg->pclken_ker,
&ahb_clock_freq) < 0) {
LOG_ERR("Failed call clock_control_get_rate(pclken_ker)");
return -EIO;
}
#else
if (clock_control_get_rate(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
(clock_control_subsys_t) &dev_cfg->pclken,
&ahb_clock_freq) < 0) {
LOG_ERR("Failed call clock_control_get_rate(pclken)");
return -EIO;
}
#endif
#if DT_CLOCKS_HAS_NAME(STM32_OSPI_NODE, ospi_mgr)
if (clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
(clock_control_subsys_t) &dev_cfg->pclken_mgr) != 0) {
LOG_ERR("Could not enable OSPI Manager clock");
return -EIO;
}
#endif
for (; prescaler <= STM32_OSPI_CLOCK_PRESCALER_MAX; prescaler++) {
uint32_t clk = STM32_OSPI_CLOCK_COMPUTE(ahb_clock_freq, prescaler);
if (clk <= dev_cfg->max_frequency) {
break;
}
}
__ASSERT_NO_MSG(prescaler >= STM32_OSPI_CLOCK_PRESCALER_MIN &&
prescaler <= STM32_OSPI_CLOCK_PRESCALER_MAX);
/* Initialize OSPI HAL structure completely */
dev_data->hospi.Init.FifoThreshold = 4;
dev_data->hospi.Init.ClockPrescaler = prescaler;
#if defined(CONFIG_SOC_SERIES_STM32H5X)
/* The stm32h5xx_hal_xspi does not reduce DEVSIZE before writing the DCR1 */
dev_data->hospi.Init.DeviceSize = find_lsb_set(dev_cfg->flash_size) - 2;
#else
/* Give a bit position from 0 to 31 to the HAL init for the DCR1 reg */
dev_data->hospi.Init.DeviceSize = find_lsb_set(dev_cfg->flash_size) - 1;
#endif /* CONFIG_SOC_SERIES_STM32U5X */
dev_data->hospi.Init.DualQuad = HAL_OSPI_DUALQUAD_DISABLE;
dev_data->hospi.Init.ChipSelectHighTime = 2;
dev_data->hospi.Init.FreeRunningClock = HAL_OSPI_FREERUNCLK_DISABLE;
dev_data->hospi.Init.ClockMode = HAL_OSPI_CLOCK_MODE_0;
#if defined(OCTOSPI_DCR2_WRAPSIZE)
dev_data->hospi.Init.WrapSize = HAL_OSPI_WRAP_NOT_SUPPORTED;
#endif /* OCTOSPI_DCR2_WRAPSIZE */
/* STR mode else Macronix for DTR mode */
if (dev_cfg->data_rate == OSPI_DTR_TRANSFER) {
dev_data->hospi.Init.MemoryType = HAL_OSPI_MEMTYPE_MACRONIX;
dev_data->hospi.Init.DelayHoldQuarterCycle = HAL_OSPI_DHQC_ENABLE;
} else {
dev_data->hospi.Init.MemoryType = HAL_OSPI_MEMTYPE_MICRON;
dev_data->hospi.Init.DelayHoldQuarterCycle = HAL_OSPI_DHQC_DISABLE;
}
dev_data->hospi.Init.ChipSelectBoundary = 0;
#if STM32_OSPI_DLYB_BYPASSED
dev_data->hospi.Init.DelayBlockBypass = HAL_OSPI_DELAY_BLOCK_BYPASSED;
#else
dev_data->hospi.Init.DelayBlockBypass = HAL_OSPI_DELAY_BLOCK_USED;
#endif /* STM32_OSPI_DLYB_BYPASSED */
#if defined(OCTOSPI_DCR4_REFRESH)
dev_data->hospi.Init.Refresh = 0;
#endif /* OCTOSPI_DCR4_REFRESH */
if (HAL_OSPI_Init(&dev_data->hospi) != HAL_OK) {
LOG_ERR("OSPI Init failed");
return -EIO;
}
LOG_DBG("OSPI Init'd");
#if defined(OCTOSPIM)
/* OCTOSPI I/O manager init Function */
OSPIM_CfgTypeDef ospi_mgr_cfg = {0};
if (dev_data->hospi.Instance == OCTOSPI1) {
ospi_mgr_cfg.ClkPort = DT_OSPI_PROP_OR(clk_port, 1);
ospi_mgr_cfg.DQSPort = DT_OSPI_PROP_OR(dqs_port, 1);
ospi_mgr_cfg.NCSPort = DT_OSPI_PROP_OR(ncs_port, 1);
ospi_mgr_cfg.IOLowPort = DT_OSPI_IO_PORT_PROP_OR(io_low_port,
HAL_OSPIM_IOPORT_1_LOW);
ospi_mgr_cfg.IOHighPort = DT_OSPI_IO_PORT_PROP_OR(io_high_port,
HAL_OSPIM_IOPORT_1_HIGH);
} else if (dev_data->hospi.Instance == OCTOSPI2) {
ospi_mgr_cfg.ClkPort = DT_OSPI_PROP_OR(clk_port, 2);
ospi_mgr_cfg.DQSPort = DT_OSPI_PROP_OR(dqs_port, 2);
ospi_mgr_cfg.NCSPort = DT_OSPI_PROP_OR(ncs_port, 2);
ospi_mgr_cfg.IOLowPort = DT_OSPI_IO_PORT_PROP_OR(io_low_port,
HAL_OSPIM_IOPORT_2_LOW);
ospi_mgr_cfg.IOHighPort = DT_OSPI_IO_PORT_PROP_OR(io_high_port,
HAL_OSPIM_IOPORT_2_HIGH);
}
#if defined(OCTOSPIM_CR_MUXEN)
ospi_mgr_cfg.Req2AckTime = 1;
#endif /* OCTOSPIM_CR_MUXEN */
if (HAL_OSPIM_Config(&dev_data->hospi, &ospi_mgr_cfg,
HAL_OSPI_TIMEOUT_DEFAULT_VALUE) != HAL_OK) {
LOG_ERR("OSPI M config failed");
return -EIO;
}
#if defined(CONFIG_SOC_SERIES_STM32U5X)
/* OCTOSPI2 delay block init Function */
HAL_OSPI_DLYB_CfgTypeDef ospi_delay_block_cfg = {0};
ospi_delay_block_cfg.Units = 56;
ospi_delay_block_cfg.PhaseSel = 2;
if (HAL_OSPI_DLYB_SetConfig(&dev_data->hospi, &ospi_delay_block_cfg) != HAL_OK) {
LOG_ERR("OSPI DelayBlock failed");
return -EIO;
}
#endif /* CONFIG_SOC_SERIES_STM32U5X */
#endif /* OCTOSPIM */
#if defined(CONFIG_SOC_SERIES_STM32H5X)
/* OCTOSPI1 delay block init Function */
HAL_XSPI_DLYB_CfgTypeDef xspi_delay_block_cfg = {0};
(void)HAL_XSPI_DLYB_GetClockPeriod(&dev_data->hospi, &xspi_delay_block_cfg);
/* with DTR, set the PhaseSel/4 (empiric value from stm32Cube) */
xspi_delay_block_cfg.PhaseSel /= 4;
if (HAL_XSPI_DLYB_SetConfig(&dev_data->hospi, &xspi_delay_block_cfg) != HAL_OK) {
LOG_ERR("XSPI DelayBlock failed");
return -EIO;
}
LOG_DBG("Delay Block Init");
#endif /* CONFIG_SOC_SERIES_STM32H5X */
/* Initialize semaphores */
k_sem_init(&dev_data->sem, 1, 1);
k_sem_init(&dev_data->sync, 0, 1);
/* Run IRQ init */
dev_cfg->irq_config(dev);
/* Reset NOR flash memory : still with the SPI/STR config for the NOR */
if (stm32_ospi_mem_reset(dev) != 0) {
LOG_ERR("OSPI reset failed");
return -EIO;
}
LOG_DBG("Reset Mem (SPI/STR)");
/* Check if memory is ready in the SPI/STR mode */
if (stm32_ospi_mem_ready(dev_data,
OSPI_SPI_MODE, OSPI_STR_TRANSFER) != 0) {
LOG_ERR("OSPI memory not ready");
return -EIO;
}
LOG_DBG("Mem Ready (SPI/STR)");
#if defined(CONFIG_FLASH_JESD216_API)
/* Process with the RDID (jedec read ID) instruction at init and fill jedec_id Table */
ret = stm32_ospi_read_jedec_id(dev);
if (ret != 0) {
LOG_ERR("Read ID failed: %d", ret);
return ret;
}
#endif /* CONFIG_FLASH_JESD216_API */
if (stm32_ospi_config_mem(dev) != 0) {
LOG_ERR("OSPI mode not config'd (%u rate %u)",
dev_cfg->data_mode, dev_cfg->data_rate);
return -EIO;
}
/* Send the instruction to read the SFDP */
const uint8_t decl_nph = 2;
union {
/* We only process BFP so use one parameter block */
uint8_t raw[JESD216_SFDP_SIZE(decl_nph)];
struct jesd216_sfdp_header sfdp;
} u;
const struct jesd216_sfdp_header *hp = &u.sfdp;
ret = ospi_read_sfdp(dev, 0, u.raw, sizeof(u.raw));
if (ret != 0) {
LOG_ERR("SFDP read failed: %d", ret);
return ret;
}
uint32_t magic = jesd216_sfdp_magic(hp);
if (magic != JESD216_SFDP_MAGIC) {
LOG_ERR("SFDP magic %08x invalid", magic);
return -EINVAL;
}
LOG_DBG("%s: SFDP v %u.%u AP %x with %u PH", dev->name,
hp->rev_major, hp->rev_minor, hp->access, 1 + hp->nph);
const struct jesd216_param_header *php = hp->phdr;
const struct jesd216_param_header *phpe = php +
MIN(decl_nph, 1 + hp->nph);
while (php != phpe) {
uint16_t id = jesd216_param_id(php);
LOG_DBG("PH%u: %04x rev %u.%u: %u DW @ %x",
(php - hp->phdr), id, php->rev_major, php->rev_minor,
php->len_dw, jesd216_param_addr(php));
if (id == JESD216_SFDP_PARAM_ID_BFP) {
union {
uint32_t dw[20];
struct jesd216_bfp bfp;
} u2;
const struct jesd216_bfp *bfp = &u2.bfp;
ret = ospi_read_sfdp(dev, jesd216_param_addr(php),
(uint8_t *)u2.dw,
MIN(sizeof(uint32_t) * php->len_dw, sizeof(u2.dw)));
if (ret == 0) {
ret = spi_nor_process_bfp(dev, php, bfp);
}
if (ret != 0) {
LOG_ERR("SFDP BFP failed: %d", ret);
break;
}
}
if (id == JESD216_SFDP_PARAM_ID_4B_ADDR_INSTR) {
if (dev_data->address_width == 4U) {
/*
* Check table 4 byte address instruction table to get supported
* erase opcodes when running in 4 byte address mode
*/
union {
uint32_t dw[2];
struct {
uint32_t dummy;
uint8_t type[4];
} types;
} u2;
ret = ospi_read_sfdp(dev, jesd216_param_addr(php),
(uint8_t *)u2.dw,
MIN(sizeof(uint32_t) * php->len_dw, sizeof(u2.dw)));
if (ret != 0) {
break;
}
for (uint8_t ei = 0; ei < JESD216_NUM_ERASE_TYPES; ++ei) {
struct jesd216_erase_type *etp = &dev_data->erase_types[ei];
const uint8_t cmd = u2.types.type[ei];
/* 0xff means not supported */
if (cmd == 0xff) {
etp->exp = 0;
etp->cmd = 0;
} else {
etp->cmd = cmd;
};
}
}
}
++php;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
ret = setup_pages_layout(dev);
if (ret != 0) {
LOG_ERR("layout setup failed: %d", ret);
return -ENODEV;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
#ifdef CONFIG_STM32_MEMMAP
/* Now configure the octo Flash in MemoryMapped (access by address) */
ret = stm32_ospi_set_memorymap(dev);
if (ret != 0) {
LOG_ERR("Error (%d): setting NOR in MemoryMapped mode", ret);
return -EINVAL;
}
LOG_DBG("NOR octo-flash in MemoryMapped mode at 0x%lx (0x%x bytes)",
(long)(STM32_OSPI_BASE_ADDRESS),
dev_cfg->flash_size);
#else
LOG_DBG("NOR octo-flash at 0x%lx (0x%x bytes)",
(long)(STM32_OSPI_BASE_ADDRESS),
dev_cfg->flash_size);
#endif /* CONFIG_STM32_MEMMAP */
return 0;
}
#if STM32_OSPI_USE_DMA
#define DMA_CHANNEL_CONFIG(node, dir) \
DT_DMAS_CELL_BY_NAME(node, dir, channel_config)
#define OSPI_DMA_CHANNEL_INIT(node, dir) \
.dev = DEVICE_DT_GET(DT_DMAS_CTLR(node)), \
.channel = DT_DMAS_CELL_BY_NAME(node, dir, channel), \
.reg = (DMA_TypeDef *)DT_REG_ADDR( \
DT_PHANDLE_BY_NAME(node, dmas, dir)),\
.cfg = { \
.dma_slot = DT_DMAS_CELL_BY_NAME(node, dir, slot), \
.source_data_size = STM32_DMA_CONFIG_PERIPHERAL_DATA_SIZE( \
DMA_CHANNEL_CONFIG(node, dir)), \
.dest_data_size = STM32_DMA_CONFIG_MEMORY_DATA_SIZE( \
DMA_CHANNEL_CONFIG(node, dir)), \
.channel_priority = STM32_DMA_CONFIG_PRIORITY( \
DMA_CHANNEL_CONFIG(node, dir)), \
.dma_callback = ospi_dma_callback, \
}, \
#define OSPI_DMA_CHANNEL(node, dir) \
.dma = { \
COND_CODE_1(DT_DMAS_HAS_NAME(node, dir), \
(OSPI_DMA_CHANNEL_INIT(node, dir)), \
(NULL)) \
},
#else
#define OSPI_DMA_CHANNEL(node, dir)
#endif /* CONFIG_USE_STM32_HAL_DMA */
#define OSPI_FLASH_MODULE(drv_id, flash_id) \
(DT_DRV_INST(drv_id), ospi_nor_flash_##flash_id)
#define DT_WRITEOC_PROP_OR(inst, default_value) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, writeoc), \
(_CONCAT(SPI_NOR_CMD_, DT_STRING_TOKEN(DT_DRV_INST(inst), writeoc))), \
((default_value)))
#define DT_QER_PROP_OR(inst, default_value) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, quad_enable_requirements), \
(_CONCAT(JESD216_DW15_QER_VAL_, \
DT_STRING_TOKEN(DT_DRV_INST(inst), quad_enable_requirements))), \
((default_value)))
static void flash_stm32_ospi_irq_config_func(const struct device *dev);
PINCTRL_DT_DEFINE(STM32_OSPI_NODE);
static const struct flash_stm32_ospi_config flash_stm32_ospi_cfg = {
.regs = (OCTOSPI_TypeDef *)DT_REG_ADDR(STM32_OSPI_NODE),
.pclken = {.bus = DT_CLOCKS_CELL_BY_NAME(STM32_OSPI_NODE, ospix, bus),
.enr = DT_CLOCKS_CELL_BY_NAME(STM32_OSPI_NODE, ospix, bits)},
#if DT_CLOCKS_HAS_NAME(STM32_OSPI_NODE, ospi_ker)
.pclken_ker = {.bus = DT_CLOCKS_CELL_BY_NAME(STM32_OSPI_NODE, ospi_ker, bus),
.enr = DT_CLOCKS_CELL_BY_NAME(STM32_OSPI_NODE, ospi_ker, bits)},
#endif
#if DT_CLOCKS_HAS_NAME(STM32_OSPI_NODE, ospi_mgr)
.pclken_mgr = {.bus = DT_CLOCKS_CELL_BY_NAME(STM32_OSPI_NODE, ospi_mgr, bus),
.enr = DT_CLOCKS_CELL_BY_NAME(STM32_OSPI_NODE, ospi_mgr, bits)},
#endif
.irq_config = flash_stm32_ospi_irq_config_func,
.flash_size = DT_INST_REG_ADDR_BY_IDX(0, 1),
.max_frequency = DT_INST_PROP(0, ospi_max_frequency),
.data_mode = DT_INST_PROP(0, spi_bus_width), /* SPI or OPI */
.data_rate = DT_INST_PROP(0, data_rate), /* DTR or STR */
.pcfg = PINCTRL_DT_DEV_CONFIG_GET(STM32_OSPI_NODE),
#if STM32_OSPI_RESET_GPIO
.reset = GPIO_DT_SPEC_INST_GET(0, reset_gpios),
#endif /* STM32_OSPI_RESET_GPIO */
#if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), sfdp_bfp)
.sfdp_bfp = DT_INST_PROP(0, sfdp_bfp),
#endif /* sfdp_bfp */
};
static struct flash_stm32_ospi_data flash_stm32_ospi_dev_data = {
.hospi = {
.Instance = (OCTOSPI_TypeDef *)DT_REG_ADDR(STM32_OSPI_NODE),
.Init = {
.FifoThreshold = STM32_OSPI_FIFO_THRESHOLD,
.SampleShifting = (DT_PROP(STM32_OSPI_NODE, ssht_enable)
? HAL_OSPI_SAMPLE_SHIFTING_HALFCYCLE
: HAL_OSPI_SAMPLE_SHIFTING_NONE),
.ChipSelectHighTime = 1,
.ClockMode = HAL_OSPI_CLOCK_MODE_0,
},
},
.qer_type = DT_QER_PROP_OR(0, JESD216_DW15_QER_VAL_S1B6),
.write_opcode = DT_WRITEOC_PROP_OR(0, SPI_NOR_WRITEOC_NONE),
.page_size = SPI_NOR_PAGE_SIZE, /* by default, to be updated by sfdp */
#if DT_NODE_HAS_PROP(DT_INST(0, st_stm32_ospi_nor), jedec_id)
.jedec_id = DT_INST_PROP(0, jedec_id),
#endif /* jedec_id */
OSPI_DMA_CHANNEL(STM32_OSPI_NODE, tx_rx)
};
DEVICE_DT_INST_DEFINE(0, &flash_stm32_ospi_init, NULL,
&flash_stm32_ospi_dev_data, &flash_stm32_ospi_cfg,
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
&flash_stm32_ospi_driver_api);
static void flash_stm32_ospi_irq_config_func(const struct device *dev)
{
IRQ_CONNECT(DT_IRQN(STM32_OSPI_NODE), DT_IRQ(STM32_OSPI_NODE, priority),
flash_stm32_ospi_isr, DEVICE_DT_INST_GET(0), 0);
irq_enable(DT_IRQN(STM32_OSPI_NODE));
}
``` | /content/code_sandbox/drivers/flash/flash_stm32_ospi.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 24,282 |
```c
/*
*
*/
#define DT_DRV_COMPAT openisa_rv32m1_ftfe
#define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash)
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <string.h>
#include <zephyr/drivers/flash.h>
#include <errno.h>
#include <zephyr/init.h>
#include <soc.h>
#include "flash_priv.h"
#include "fsl_common.h"
#include "fsl_flash.h"
struct flash_priv {
flash_config_t config;
/*
* HACK: flash write protection is managed in software.
*/
struct k_sem write_lock;
uint32_t pflash_block_base;
};
static const struct flash_parameters flash_mcux_parameters = {
.write_block_size = FSL_FEATURE_FLASH_PFLASH_BLOCK_WRITE_UNIT_SIZE,
.erase_value = 0xff,
};
/*
* Interrupt vectors could be executed from flash hence the need for locking.
* The underlying MCUX driver takes care of copying the functions to SRAM.
*
* For more information, see the application note below on Read-While-Write
* path_to_url
*
*/
static int flash_mcux_erase(const struct device *dev, off_t offset,
size_t len)
{
struct flash_priv *priv = dev->data;
uint32_t addr;
status_t rc;
unsigned int key;
if (k_sem_take(&priv->write_lock, K_FOREVER)) {
return -EACCES;
}
addr = offset + priv->pflash_block_base;
key = irq_lock();
rc = FLASH_Erase(&priv->config, addr, len, kFLASH_ApiEraseKey);
irq_unlock(key);
k_sem_give(&priv->write_lock);
return (rc == kStatus_Success) ? 0 : -EINVAL;
}
static int flash_mcux_read(const struct device *dev, off_t offset,
void *data, size_t len)
{
struct flash_priv *priv = dev->data;
uint32_t addr;
/*
* The MCUX supports different flash chips whose valid ranges are
* hidden below the API: until the API export these ranges, we can not
* do any generic validation
*/
addr = offset + priv->pflash_block_base;
memcpy(data, (void *) addr, len);
return 0;
}
static int flash_mcux_write(const struct device *dev, off_t offset,
const void *data, size_t len)
{
struct flash_priv *priv = dev->data;
uint32_t addr;
status_t rc;
unsigned int key;
if (k_sem_take(&priv->write_lock, K_FOREVER)) {
return -EACCES;
}
addr = offset + priv->pflash_block_base;
key = irq_lock();
rc = FLASH_Program(&priv->config, addr, (uint32_t *) data, len);
irq_unlock(key);
k_sem_give(&priv->write_lock);
return (rc == kStatus_Success) ? 0 : -EINVAL;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static const struct flash_pages_layout dev_layout = {
.pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) /
DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
.pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
};
static void flash_mcux_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &dev_layout;
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_parameters *
flash_mcux_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_mcux_parameters;
}
static struct flash_priv flash_data;
static const struct flash_driver_api flash_mcux_api = {
.erase = flash_mcux_erase,
.write = flash_mcux_write,
.read = flash_mcux_read,
.get_parameters = flash_mcux_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = flash_mcux_pages_layout,
#endif
};
static int flash_mcux_init(const struct device *dev)
{
struct flash_priv *priv = dev->data;
uint32_t pflash_block_base;
status_t rc;
CLOCK_EnableClock(kCLOCK_Mscm);
k_sem_init(&priv->write_lock, 1, 1);
rc = FLASH_Init(&priv->config);
FLASH_GetProperty(&priv->config, kFLASH_PropertyPflashBlockBaseAddr,
(uint32_t *)&pflash_block_base);
priv->pflash_block_base = (uint32_t) pflash_block_base;
return (rc == kStatus_Success) ? 0 : -EIO;
}
DEVICE_DT_INST_DEFINE(0, flash_mcux_init, NULL,
&flash_data, NULL, POST_KERNEL,
CONFIG_FLASH_INIT_PRIORITY, &flash_mcux_api);
``` | /content/code_sandbox/drivers/flash/soc_flash_rv32m1.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,015 |
```unknown
config FLASH_NXP_S32_QSPI_NOR
bool "NXP S32 QSPI NOR driver"
default y
depends on DT_HAS_NXP_S32_QSPI_NOR_ENABLED
select MEMC
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_JESD216
select FLASH_HAS_EXPLICIT_ERASE
help
Enable the Flash driver for a NOR Serial Flash Memory device connected
to an NXP S32 QSPI bus.
if FLASH_NXP_S32_QSPI_NOR
config FLASH_NXP_S32_QSPI_NOR_SFDP_RUNTIME
bool "Read flash parameters at runtime"
help
Read flash device characteristics from the device at runtime.
This option should provide functionality for all supported
JESD216-compatible devices, with the following notes:
- Quad Enable Requirements bitfield (DW15) must be present in the SFDP
tables to configure Quad mode. Otherwise it defaults to Dual or
Single mode as supported by the device.
- Soft Reset bitfield (DW16) must be present in the SFDP tables to
automatically reset the device at initialization time.
- 0-X-X mode discovery not yet implemented by the HAL.
If not selected, the driver uses a fixed configuration assuming 256 By
page size and 4 KiBy, 32 KiBy and 64 KiBy erase instructions. The
device size and jedec-id properties must be set in devicetree node.
config FLASH_NXP_S32_QSPI_VERIFY_ERASE
bool "Verify memory after erased"
help
Verify contents of memory after erased.
config FLASH_NXP_S32_QSPI_VERIFY_WRITE
bool "Verify memory after written"
help
Verify contents of memory after written.
config FLASH_NXP_S32_QSPI_LAYOUT_PAGE_SIZE
int "Page size to use for FLASH_LAYOUT feature"
default 4096
help
When CONFIG_FLASH_PAGE_LAYOUT is used this driver will support that API.
By default the page size corresponds to the sector size (4096) for a NOR
flash memory. Other options may include the 32K-byte erase size (32768),
the block size (65536), or any non-zero multiple of the sector size.
endif # FLASH_NXP_S32_QSPI_NOR
``` | /content/code_sandbox/drivers/flash/Kconfig.nxp_s32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 482 |
```objective-c
/*
*
*/
#ifndef ZEPHYR_DRIVERS_FLASH_OSPI_STM32_H_
#define ZEPHYR_DRIVERS_FLASH_OSPI_STM32_H_
#if defined(CONFIG_SOC_SERIES_STM32H5X)
#define NbData DataLength
#define AddressSize AddressWidth
#define InstructionDtrMode InstructionDTRMode
#define AddressDtrMode AddressDTRMode
#define DataDtrMode DataDTRMode
#define InstructionSize InstructionWidth
#define FifoThreshold FifoThresholdByte
#define ChipSelectHighTime ChipSelectHighTimeCycle
#define FlashId IOSelect
#define Match MatchValue
#define Mask MatchMask
#define Interval IntervalTime
#define DeviceSize MemorySize
#define DualQuad MemoryMode
#define OSPI_InitTypeDef XSPI_InitTypeDef
#define OSPI_HandleTypeDef XSPI_HandleTypeDef
#define OSPI_RegularCmdTypeDef XSPI_RegularCmdTypeDef
#define OSPI_AutoPollingTypeDef XSPI_AutoPollingTypeDef
#define HAL_OSPI_Init HAL_XSPI_Init
#define HAL_OSPI_Command HAL_XSPI_Command
#define HAL_OSPI_Receive HAL_XSPI_Receive
#define HAL_OSPI_Receive_DMA HAL_XSPI_Receive_DMA
#define HAL_OSPI_Receive_IT HAL_XSPI_Receive_IT
#define HAL_OSPI_Transmit HAL_XSPI_Transmit
#define HAL_OSPI_Transmit_DMA HAL_XSPI_Transmit_DMA
#define HAL_OSPI_Transmit_IT HAL_XSPI_Transmit_IT
#define HAL_OSPI_AutoPolling HAL_XSPI_AutoPolling
#define HAL_OSPI_AutoPolling_IT HAL_XSPI_AutoPolling_IT
#define HAL_OSPI_IRQHandler HAL_XSPI_IRQHandler
#define HAL_OSPI_Abort HAL_XSPI_Abort
#define HAL_OSPI_ErrorCallback HAL_XSPI_ErrorCallback
#define HAL_OSPI_CmdCpltCallback HAL_XSPI_CmdCpltCallback
#define HAL_OSPI_RxCpltCallback HAL_XSPI_RxCpltCallback
#define HAL_OSPI_TxCpltCallback HAL_XSPI_TxCpltCallback
#define HAL_OSPI_StatusMatchCallback HAL_XSPI_StatusMatchCallback
#define HAL_OSPI_TimeOutCallback HAL_XSPI_TimeOutCallback
#define HAL_OSPI_ADDRESS_NONE HAL_XSPI_ADDRESS_NONE
#define HAL_OSPI_ADDRESS_8_LINES HAL_XSPI_ADDRESS_8_LINES
#define HAL_OSPI_ADDRESS_4_LINES HAL_XSPI_ADDRESS_4_LINES
#define HAL_OSPI_ADDRESS_2_LINES HAL_XSPI_ADDRESS_2_LINES
#define HAL_OSPI_ADDRESS_1_LINE HAL_XSPI_ADDRESS_1_LINE
#define HAL_OSPI_ADDRESS_32_BITS HAL_XSPI_ADDRESS_32_BITS
#define HAL_OSPI_ADDRESS_24_BITS HAL_XSPI_ADDRESS_24_BITS
#define HAL_OSPI_ADDRESS_16_BITS HAL_XSPI_ADDRESS_16_BITS
#define HAL_OSPI_ADDRESS_8_BITS HAL_XSPI_ADDRESS_8_BITS
#define HAL_OSPI_ADDRESS_DTR_ENABLE HAL_XSPI_ADDRESS_DTR_ENABLE
#define HAL_OSPI_ADDRESS_DTR_DISABLE HAL_XSPI_ADDRESS_DTR_DISABLE
#define HAL_OSPI_INSTRUCTION_8_LINES HAL_XSPI_INSTRUCTION_8_LINES
#define HAL_OSPI_INSTRUCTION_4_LINES HAL_XSPI_INSTRUCTION_4_LINES
#define HAL_OSPI_INSTRUCTION_2_LINES HAL_XSPI_INSTRUCTION_2_LINES
#define HAL_OSPI_INSTRUCTION_1_LINE HAL_XSPI_INSTRUCTION_1_LINE
#define HAL_OSPI_INSTRUCTION_32_BITS HAL_XSPI_INSTRUCTION_32_BITS
#define HAL_OSPI_INSTRUCTION_16_BITS HAL_XSPI_INSTRUCTION_16_BITS
#define HAL_OSPI_INSTRUCTION_8_BITS HAL_XSPI_INSTRUCTION_8_BITS
#define HAL_OSPI_INSTRUCTION_DTR_ENABLE HAL_XSPI_INSTRUCTION_DTR_ENABLE
#define HAL_OSPI_INSTRUCTION_DTR_DISABLE HAL_XSPI_INSTRUCTION_DTR_DISABLE
#define HAL_OSPI_ALTERNATE_BYTES_NONE HAL_XSPI_ALT_BYTES_NONE
#define HAL_OSPI_DATA_NONE HAL_XSPI_DATA_NONE
#define HAL_OSPI_DATA_8_LINES HAL_XSPI_DATA_8_LINES
#define HAL_OSPI_DATA_4_LINES HAL_XSPI_DATA_4_LINES
#define HAL_OSPI_DATA_2_LINES HAL_XSPI_DATA_2_LINES
#define HAL_OSPI_DATA_1_LINE HAL_XSPI_DATA_1_LINE
#define HAL_OSPI_DATA_DTR_ENABLE HAL_XSPI_DATA_DTR_ENABLE
#define HAL_OSPI_DATA_DTR_DISABLE HAL_XSPI_DATA_DTR_DISABLE
#define HAL_OSPI_DQS_ENABLE HAL_XSPI_DQS_ENABLE
#define HAL_OSPI_DQS_DISABLE HAL_XSPI_DQS_DISABLE
#define HAL_OSPI_MATCH_MODE_AND HAL_XSPI_MATCH_MODE_AND
#define HAL_OSPI_SIOO_INST_EVERY_CMD HAL_XSPI_SIOO_INST_EVERY_CMD
#define HAL_OSPI_AUTOMATIC_STOP_ENABLE HAL_XSPI_AUTOMATIC_STOP_ENABLE
#define HAL_OSPI_OPTYPE_COMMON_CFG HAL_XSPI_OPTYPE_COMMON_CFG
#define HAL_OSPI_TIMEOUT_DEFAULT_VALUE HAL_XSPI_TIMEOUT_DEFAULT_VALUE
#define HAL_OSPI_CLOCK_MODE_0 HAL_XSPI_CLOCK_MODE_0
#define HAL_OSPI_FLASH_ID_1 HAL_XSPI_SELECT_IO_7_0
#define HAL_OSPI_DUALQUAD_DISABLE HAL_XSPI_SINGLE_MEM
#define HAL_OSPI_DUALQUAD_ENABLE HAL_XSPI_DUAL_MEM
#define HAL_OSPI_SAMPLE_SHIFTING_NONE HAL_XSPI_SAMPLE_SHIFT_NONE
#define HAL_OSPI_SAMPLE_SHIFTING_HALFCYCLE HAL_XSPI_SAMPLE_SHIFT_HALFCYCLE
#define HAL_OSPI_DELAY_BLOCK_USED HAL_XSPI_DELAY_BLOCK_ON
#define HAL_OSPI_DELAY_BLOCK_BYPASSED HAL_XSPI_DELAY_BLOCK_BYPASS
#define HAL_OSPI_MEMTYPE_MICRON HAL_XSPI_MEMTYPE_MICRON
#define HAL_OSPI_MEMTYPE_MACRONIX HAL_XSPI_MEMTYPE_MACRONIX
#define HAL_OSPI_DHQC_ENABLE HAL_XSPI_DHQC_ENABLE
#define HAL_OSPI_DHQC_DISABLE HAL_XSPI_DHQC_DISABLE
#define HAL_OSPI_WRAP_NOT_SUPPORTED HAL_XSPI_WRAP_NOT_SUPPORTED
#define HAL_OSPI_FREERUNCLK_DISABLE HAL_XSPI_FREERUNCLK_DISABLE
#endif /* CONFIG_SOC_SERIES_STM32H5X */
#endif /* ZEPHYR_DRIVERS_FLASH_OSPIH_STM32_H_ */
``` | /content/code_sandbox/drivers/flash/flash_stm32_ospi.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,269 |
```c
/*
*
*/
#include <sys/types.h>
#include <zephyr/device.h>
#include <zephyr/drivers/flash/stm32_flash_api_extensions.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#ifdef CONFIG_USERSPACE
#include <zephyr/syscall.h>
#include <zephyr/internal/syscall_handler.h>
#endif
#include <soc.h>
#include "flash_stm32.h"
LOG_MODULE_REGISTER(flash_stm32_ex_op, CONFIG_FLASH_LOG_LEVEL);
#if defined(CONFIG_FLASH_STM32_WRITE_PROTECT)
int flash_stm32_ex_op_sector_wp(const struct device *dev, const uintptr_t in,
void *out)
{
const struct flash_stm32_ex_op_sector_wp_in *request =
(const struct flash_stm32_ex_op_sector_wp_in *)in;
struct flash_stm32_ex_op_sector_wp_out *result =
(struct flash_stm32_ex_op_sector_wp_out *)out;
uint32_t change_mask;
int rc = 0, rc2 = 0;
#ifdef CONFIG_USERSPACE
bool syscall_trap = z_syscall_trap();
#endif
if (request != NULL) {
#ifdef CONFIG_USERSPACE
struct flash_stm32_ex_op_sector_wp_in in_copy;
if (syscall_trap) {
K_OOPS(k_usermode_from_copy(&in_copy, request,
sizeof(in_copy)));
request = &in_copy;
}
#endif
change_mask = request->enable_mask;
if (!IS_ENABLED(
CONFIG_FLASH_STM32_WRITE_PROTECT_DISABLE_PREVENTION)) {
change_mask |= request->disable_mask;
}
rc = flash_stm32_option_bytes_lock(dev, false);
if (rc == 0) {
rc = flash_stm32_update_wp_sectors(
dev, change_mask, request->enable_mask);
}
rc2 = flash_stm32_option_bytes_lock(dev, true);
if (!rc) {
rc = rc2;
}
}
if (result != NULL) {
#ifdef CONFIG_USERSPACE
struct flash_stm32_ex_op_sector_wp_out out_copy;
if (syscall_trap) {
result = &out_copy;
}
#endif
rc2 = flash_stm32_get_wp_sectors(dev, &result->protected_mask);
if (!rc) {
rc = rc2;
}
#ifdef CONFIG_USERSPACE
if (syscall_trap) {
K_OOPS(k_usermode_to_copy(out, result, sizeof(out_copy)));
}
#endif
}
return rc;
}
#endif /* CONFIG_FLASH_STM32_WRITE_PROTECT */
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION)
int flash_stm32_ex_op_update_rdp(const struct device *dev, bool enable,
bool permanent)
{
uint8_t current_level, target_level;
current_level = flash_stm32_get_rdp_level(dev);
target_level = current_level;
/*
* 0xAA = RDP level 0 (no protection)
* 0xCC = RDP level 2 (permanent protection)
* others = RDP level 1 (protection active)
*/
switch (current_level) {
case FLASH_STM32_RDP2:
if (!enable || !permanent) {
LOG_DBG("RDP level 2 is permanent and can't be changed!");
return -ENOTSUP;
}
break;
case FLASH_STM32_RDP0:
if (enable) {
target_level = FLASH_STM32_RDP1;
if (permanent) {
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION_PERMANENT_ALLOW)
target_level = FLASH_STM32_RDP2;
#else
LOG_DBG("Permanent readout protection (RDP "
"level 0 -> 2) not allowed");
return -ENOTSUP;
#endif
}
}
break;
default: /* FLASH_STM32_RDP1 */
if (enable && permanent) {
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION_PERMANENT_ALLOW)
target_level = FLASH_STM32_RDP2;
#else
LOG_DBG("Permanent readout protection (RDP "
"level 1 -> 2) not allowed");
return -ENOTSUP;
#endif
}
if (!enable) {
#if defined(CONFIG_FLASH_STM32_READOUT_PROTECTION_DISABLE_ALLOW)
target_level = FLASH_STM32_RDP0;
#else
LOG_DBG("Disabling readout protection (RDP "
"level 1 -> 0) not allowed");
return -EACCES;
#endif
}
}
/* Update RDP level if needed */
if (current_level != target_level) {
LOG_INF("RDP changed from 0x%02x to 0x%02x", current_level,
target_level);
flash_stm32_set_rdp_level(dev, target_level);
}
return 0;
}
int flash_stm32_ex_op_rdp(const struct device *dev, const uintptr_t in,
void *out)
{
const struct flash_stm32_ex_op_rdp *request =
(const struct flash_stm32_ex_op_rdp *)in;
struct flash_stm32_ex_op_rdp *result =
(struct flash_stm32_ex_op_rdp *)out;
uint8_t current_level;
#ifdef CONFIG_USERSPACE
struct flash_stm32_ex_op_rdp copy;
bool syscall_trap = z_syscall_trap();
#endif
int rc = 0, rc2 = 0;
if (request != NULL) {
#ifdef CONFIG_USERSPACE
if (syscall_trap) {
K_OOPS(k_usermode_from_copy(©, request, sizeof(copy)));
request = ©
}
#endif
rc = flash_stm32_option_bytes_lock(dev, false);
if (rc == 0) {
rc = flash_stm32_ex_op_update_rdp(dev, request->enable,
request->permanent);
}
rc2 = flash_stm32_option_bytes_lock(dev, true);
if (!rc) {
rc = rc2;
}
}
if (result != NULL) {
#ifdef CONFIG_USERSPACE
if (syscall_trap) {
result = ©
}
#endif
current_level = flash_stm32_get_rdp_level(dev);
/*
* 0xAA = RDP level 0 (no protection)
* 0xCC = RDP level 2 (permanent protection)
* others = RDP level 1 (protection active)
*/
switch (current_level) {
case FLASH_STM32_RDP2:
result->enable = true;
result->permanent = true;
break;
case FLASH_STM32_RDP0:
result->enable = false;
result->permanent = false;
break;
default: /* FLASH_STM32_RDP1 */
result->enable = true;
result->permanent = false;
}
#ifdef CONFIG_USERSPACE
if (syscall_trap) {
K_OOPS(k_usermode_to_copy(out, result, sizeof(copy)));
}
#endif
}
return rc;
}
#endif /* CONFIG_FLASH_STM32_READOUT_PROTECTION */
``` | /content/code_sandbox/drivers/flash/flash_stm32_ex_op.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,542 |
```unknown
config FLASH_NRF_FORCE_ALT
bool
depends on SOC_COMPATIBLE_NRF
help
This option can be enabled to force an alternative implementation
of the flash driver.
menuconfig SOC_FLASH_NRF
bool "Nordic Semiconductor nRF flash driver"
default y
depends on DT_HAS_NORDIC_NRF51_FLASH_CONTROLLER_ENABLED || \
DT_HAS_NORDIC_NRF52_FLASH_CONTROLLER_ENABLED || \
DT_HAS_NORDIC_NRF53_FLASH_CONTROLLER_ENABLED || \
DT_HAS_NORDIC_NRF91_FLASH_CONTROLLER_ENABLED
depends on !FLASH_NRF_FORCE_ALT
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select NRFX_NVMC
select MPU_ALLOW_FLASH_WRITE if ARM_MPU
help
Enables Nordic Semiconductor nRF flash driver.
if SOC_FLASH_NRF
choice SOC_FLASH_NRF_RADIO_SYNC_CHOICE
prompt "Nordic nRFx flash driver synchronization"
default SOC_FLASH_NRF_RADIO_SYNC_TICKER if BT_LL_SW_SPLIT
default SOC_FLASH_NRF_RADIO_SYNC_NONE
help
synchronization between flash memory driver and radio.
config SOC_FLASH_NRF_RADIO_SYNC_TICKER
bool "Nordic nRFx flash driver synchronized with radio"
depends on BT_LL_SW_SPLIT
help
Enable synchronization between flash memory driver and radio using
BLE LL controller ticker API.
config SOC_FLASH_NRF_RADIO_SYNC_NONE
bool "none"
help
disable synchronization between flash memory driver and radio.
endchoice
config SOC_FLASH_NRF_PARTIAL_ERASE
bool "Nordic nRFx flash driver partial erase"
depends on HAS_HW_NRF_NVMC_PE
help
Enable partial erase feature. Partial erase is performed in time
slices instead of blocking MCU, for the time it is needed to
complete operation over given area.
This allows interrupting flash erase between operations
to perform other task by MCU.
This feature may also be used for better syncing flash erase
operations, when compiled with SOC_FLASH_NRF_RADIO_SYNC_TICKER,
with Bluetooth.
config SOC_FLASH_NRF_PARTIAL_ERASE_MS
int "Partial erase timeout in MS"
depends on SOC_FLASH_NRF_PARTIAL_ERASE
default 3
help
This is maximum time, in ms, that NVMC will use to erase part
of Flash, before stopping to let CPU resume operation.
Minimal timeout is 2ms maximum should not exceed half of
FLASH_PAGE_ERASE_MAX_TIME_US im ms.
config SOC_FLASH_NRF_TIMEOUT_MULTIPLIER
int "Multiplier for flash operation timeouts [x0.1]"
depends on !SOC_FLASH_NRF_RADIO_SYNC_NONE
default 15 if SOC_FLASH_NRF_PARTIAL_ERASE && SOC_FLASH_NRF_RADIO_SYNC_TICKER
default 10
help
This is a multiplier that will be divided by 10 that is applied
to the flash erase and write operations timeout. The base for
the multiplication would allow erasing all nRF flash pages in
blocking mode.
config SOC_FLASH_NRF_UICR
bool "Access to UICR"
depends on !TRUSTED_EXECUTION_NONSECURE
help
Enable operations on UICR. Once enabled UICR are written or read as
ordinary flash memory. Erase is possible for whole UICR at once.
config SOC_FLASH_NRF_EMULATE_ONE_BYTE_WRITE_ACCESS
bool "8-bit write block size emulation"
help
When this option is enabled writing chunks less than minimal write
block size parameter (imposed by manufacturer) is possible but operation
is more complex and requires basic user knowledge about NVMC controller.
endif # SOC_FLASH_NRF
``` | /content/code_sandbox/drivers/flash/Kconfig.nrf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 786 |
```c
/*
*
*/
#define DT_DRV_COMPAT nuvoton_numaker_rmc
#include <string.h>
#include <errno.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/logging/log.h>
#include "flash_priv.h"
#include <NuMicro.h>
LOG_MODULE_REGISTER(flash_numaker_rmc, CONFIG_FLASH_LOG_LEVEL);
#define SOC_NV_FLASH_NODE DT_INST(0, soc_nv_flash)
#define SOC_NV_FLASH_WRITE_BLOCK_SIZE DT_PROP_OR(SOC_NV_FLASH_NODE, write_block_size, 0x04)
struct flash_numaker_data {
RMC_T *rmc;
struct k_sem write_lock;
uint32_t flash_block_base;
};
static const struct flash_parameters flash_numaker_parameters = {
.write_block_size = SOC_NV_FLASH_WRITE_BLOCK_SIZE,
.erase_value = 0xff,
.caps = {
.no_explicit_erase = true,
},
};
/* Validate offset and length */
static bool flash_numaker_is_range_valid(off_t offset, size_t len)
{
uint32_t aprom_size = RMC_APROM_END - RMC_APROM_BASE;
/* check for min value */
if ((offset < 0) || (len == 0)) {
return false;
}
/* check for max value */
if (offset >= aprom_size || len > aprom_size || (aprom_size - offset) < len) {
return false;
}
return true;
}
/*
* Erase a flash memory area.
*
* param dev Device struct
* param offset The address's offset
* param len The size of the buffer
* return 0 on success
* return -EINVAL erroneous code
*/
static int flash_numaker_erase(const struct device *dev, off_t offset, size_t len)
{
struct flash_numaker_data *dev_data = dev->data;
uint32_t rc = 0;
unsigned int key;
int page_nums = len / RMC_FLASH_PAGE_SIZE;
uint32_t addr = dev_data->flash_block_base + offset;
/* return SUCCESS for len == 0 (required by tests/drivers/flash) */
if (len == 0) {
return 0;
}
/* Validate range */
if (!flash_numaker_is_range_valid(offset, len)) {
return -EINVAL;
}
/* check alignment and erase only by pages */
if (((addr % RMC_FLASH_PAGE_SIZE) != 0) || ((len % RMC_FLASH_PAGE_SIZE) != 0)) {
return -EINVAL;
}
/* take semaphore */
if (k_sem_take(&dev_data->write_lock, K_NO_WAIT)) {
return -EACCES;
}
SYS_UnlockReg();
key = irq_lock();
while (page_nums) {
/* erase page */
if (RMC_Erase(addr)) {
LOG_ERR("Erase flash page failed or erase time-out");
rc = -EIO;
goto done;
}
page_nums--;
addr += RMC_FLASH_PAGE_SIZE;
}
done:
SYS_LockReg();
irq_unlock(key);
/* release semaphore */
k_sem_give(&dev_data->write_lock);
return rc;
}
/*
* Read a flash memory area.
*
* param dev Device struct
* param offset The address's offset
* param data The buffer to store or read the value
* param length The size of the buffer
* return 0 on success,
* return -EIO erroneous code
*/
static int flash_numaker_read(const struct device *dev, off_t offset, void *data, size_t len)
{
struct flash_numaker_data *dev_data = dev->data;
uint32_t addr = dev_data->flash_block_base + offset;
/* return SUCCESS for len == 0 (required by tests/drivers/flash) */
if (len == 0) {
return 0;
}
/* Validate range */
if (!flash_numaker_is_range_valid(offset, len)) {
return -EINVAL;
}
/* read flash */
memcpy(data, (void *)addr, len);
return 0;
}
static int32_t flash_numaker_block_write(uint32_t u32_addr, const uint8_t *pu8_data, int block_size)
{
int32_t retval;
const uint32_t *pu32_data = (const uint32_t *)pu8_data;
SYS_UnlockReg();
if (block_size == 4) {
retval = RMC_Write(u32_addr, *pu32_data);
} else if (block_size == 8) {
retval = RMC_Write(u32_addr, *pu32_data) |
RMC_Write(u32_addr + 4, *(pu32_data + 1));
} else {
retval = -1;
}
SYS_LockReg();
return retval;
}
static int flash_numaker_write(const struct device *dev, off_t offset, const void *data, size_t len)
{
struct flash_numaker_data *dev_data = dev->data;
uint32_t rc = 0;
unsigned int key;
uint32_t addr = dev_data->flash_block_base + offset;
int block_size = flash_numaker_parameters.write_block_size;
int blocks = len / flash_numaker_parameters.write_block_size;
const uint8_t *pu8_data = (const uint8_t *)data;
/* return SUCCESS for len == 0 (required by tests/drivers/flash) */
if (len == 0) {
return 0;
}
/* Validate range */
if (!flash_numaker_is_range_valid(offset, len)) {
return -EINVAL;
}
/* Validate address alignment */
if ((addr % flash_numaker_parameters.write_block_size) != 0) {
return -EINVAL;
}
/* Validate write size be multiples of the write block size */
if ((len % block_size) != 0) {
return -EINVAL;
}
/* Validate offset be multiples of the write block size */
if ((offset % block_size) != 0) {
return -EINVAL;
}
if (k_sem_take(&dev_data->write_lock, K_FOREVER)) {
return -EACCES;
}
key = irq_lock();
while (blocks) {
if (flash_numaker_block_write(addr, pu8_data, block_size)) {
rc = -EIO;
goto done;
}
pu8_data += block_size;
addr += block_size;
blocks--;
}
done:
irq_unlock(key);
k_sem_give(&dev_data->write_lock);
return rc;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static const struct flash_pages_layout dev_layout = {
.pages_count = DT_REG_SIZE(SOC_NV_FLASH_NODE) /
DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
.pages_size = DT_PROP(SOC_NV_FLASH_NODE, erase_block_size),
};
static void flash_numaker_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &dev_layout;
*layout_size = 1;
}
#endif /* CONFIG_FLASH_PAGE_LAYOUT */
static const struct flash_parameters *flash_numaker_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_numaker_parameters;
}
static struct flash_numaker_data flash_data;
static const struct flash_driver_api flash_numaker_api = {
.erase = flash_numaker_erase,
.write = flash_numaker_write,
.read = flash_numaker_read,
.get_parameters = flash_numaker_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = flash_numaker_pages_layout,
#endif
};
static int flash_numaker_init(const struct device *dev)
{
struct flash_numaker_data *dev_data = dev->data;
k_sem_init(&dev_data->write_lock, 1, 1);
/* Enable RMC ISP function */
SYS_UnlockReg();
RMC_Open();
/* Enable APROM update. */
RMC_ENABLE_AP_UPDATE();
SYS_LockReg();
dev_data->flash_block_base = (uint32_t)RMC_APROM_BASE;
dev_data->rmc = (RMC_T *)DT_REG_ADDR(DT_NODELABEL(rmc));
return 0;
}
DEVICE_DT_INST_DEFINE(0, flash_numaker_init, NULL, &flash_data, NULL, POST_KERNEL,
CONFIG_FLASH_INIT_PRIORITY, &flash_numaker_api);
``` | /content/code_sandbox/drivers/flash/soc_flash_numaker_rmc.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,780 |
```unknown
config SOC_FLASH_ESP32
bool "Espressif ESP32 flash driver"
default y
depends on DT_HAS_ESPRESSIF_ESP32_FLASH_CONTROLLER_ENABLED
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_EXPLICIT_ERASE
help
Enable ESP32 internal flash driver.
config MPU_ALLOW_FLASH_WRITE
bool "Add MPU access to write to flash"
help
Enable this to allow MPU RWX access to flash memory
``` | /content/code_sandbox/drivers/flash/Kconfig.esp32 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 98 |
```c
/*
*
*/
#define DT_DRV_COMPAT nxp_imx_flexspi_hyperflash
#include <zephyr/kernel.h>
#include <errno.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/logging/log.h>
/*
* NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions
* called while interacting with the flexspi MUST be relocated to SRAM or ITCM
* at runtime, so that the chip does not access the flexspi to read program
* instructions while it is being written to
*
* Additionally, no data used by this driver should be stored in flash.
*/
#if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_FLASH_LOG_LEVEL > 0)
#warning "Enabling flash driver logging and XIP mode simultaneously can cause \
read-while-write hazards. This configuration is not recommended."
#endif
LOG_MODULE_REGISTER(flexspi_hyperflash, CONFIG_FLASH_LOG_LEVEL);
#ifdef CONFIG_HAS_MCUX_CACHE
#include <fsl_cache.h>
#endif
#include <zephyr/sys/util.h>
#include "memc_mcux_flexspi.h"
#define SPI_HYPERFLASH_SECTOR_SIZE (0x40000U)
#define SPI_HYPERFLASH_PAGE_SIZE (512U)
#define HYPERFLASH_ERASE_VALUE (0xFF)
#ifdef CONFIG_FLASH_MCUX_FLEXSPI_HYPERFLASH_WRITE_BUFFER
static uint8_t hyperflash_write_buf[SPI_HYPERFLASH_PAGE_SIZE];
#endif
enum {
/* Instructions matching with XIP layout */
READ_DATA = 0,
WRITE_DATA = 1,
READ_STATUS = 2,
WRITE_ENABLE = 4,
ERASE_SECTOR = 6,
PAGE_PROGRAM = 10,
ERASE_CHIP = 12,
};
#define CUSTOM_LUT_LENGTH 64
static const uint32_t flash_flexspi_hyperflash_lut[CUSTOM_LUT_LENGTH] = {
/* Read Data */
[4 * READ_DATA] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xA0,
kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18),
[4 * READ_DATA + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10,
kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04),
/* Write Data */
[4 * WRITE_DATA] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20,
kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18),
[4 * WRITE_DATA + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10,
kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x02),
/* Read Status */
[4 * READ_STATUS] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * READ_STATUS + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * READ_STATUS + 2] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * READ_STATUS + 3] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x70),
[4 * READ_STATUS + 4] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xA0,
kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18),
[4 * READ_STATUS + 5] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10,
kFLEXSPI_Command_DUMMY_RWDS_DDR, kFLEXSPI_8PAD, 0x0B),
[4 * READ_STATUS + 6] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_READ_DDR, kFLEXSPI_8PAD, 0x04,
kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x0),
/* Write Enable */
[4 * WRITE_ENABLE] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * WRITE_ENABLE + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * WRITE_ENABLE + 2] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * WRITE_ENABLE + 3] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * WRITE_ENABLE + 4] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * WRITE_ENABLE + 5] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x55),
[4 * WRITE_ENABLE + 6] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x02),
[4 * WRITE_ENABLE + 7] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x55),
/* Erase Sector */
[4 * ERASE_SECTOR] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_SECTOR + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * ERASE_SECTOR + 2] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * ERASE_SECTOR + 3] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x80),
[4 * ERASE_SECTOR + 4] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_SECTOR + 5] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * ERASE_SECTOR + 6] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * ERASE_SECTOR + 7] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * ERASE_SECTOR + 8] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_SECTOR + 9] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x55),
[4 * ERASE_SECTOR + 10] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x02),
[4 * ERASE_SECTOR + 11] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x55),
[4 * ERASE_SECTOR + 12] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18),
[4 * ERASE_SECTOR + 13] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_SECTOR + 14] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x30,
kFLEXSPI_Command_STOP, kFLEXSPI_1PAD, 0x00),
/* program page with word program command sequence */
[4 * PAGE_PROGRAM] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * PAGE_PROGRAM + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * PAGE_PROGRAM + 2] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * PAGE_PROGRAM + 3] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xA0),
[4 * PAGE_PROGRAM + 4] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x20,
kFLEXSPI_Command_RADDR_DDR, kFLEXSPI_8PAD, 0x18),
[4 * PAGE_PROGRAM + 5] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_CADDR_DDR, kFLEXSPI_8PAD, 0x10,
kFLEXSPI_Command_WRITE_DDR, kFLEXSPI_8PAD, 0x80),
/* Erase chip */
[4 * ERASE_CHIP] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_CHIP + 1] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * ERASE_CHIP + 2] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * ERASE_CHIP + 3] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x80),
/* 1 */
[4 * ERASE_CHIP + 4] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_CHIP + 5] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * ERASE_CHIP + 6] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * ERASE_CHIP + 7] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
/* 2 */
[4 * ERASE_CHIP + 8] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_CHIP + 9] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x55),
[4 * ERASE_CHIP + 10] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x02),
[4 * ERASE_CHIP + 11] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x55),
/* 3 */
[4 * ERASE_CHIP + 12] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00),
[4 * ERASE_CHIP + 13] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0xAA),
[4 * ERASE_CHIP + 14] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x05),
[4 * ERASE_CHIP + 15] =
FLEXSPI_LUT_SEQ(kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x00,
kFLEXSPI_Command_DDR, kFLEXSPI_8PAD, 0x10),
};
struct flash_flexspi_hyperflash_config {
const struct device *controller;
};
/* Device variables used in critical sections should be in this structure */
struct flash_flexspi_hyperflash_data {
struct device controller;
flexspi_device_config_t config;
flexspi_port_t port;
struct flash_pages_layout layout;
struct flash_parameters flash_parameters;
};
static int flash_flexspi_hyperflash_wait_bus_busy(const struct device *dev)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
flexspi_transfer_t transfer;
int ret;
bool is_busy;
uint32_t read_value;
transfer.deviceAddress = 0;
transfer.port = data->port;
transfer.cmdType = kFLEXSPI_Read;
transfer.SeqNumber = 2;
transfer.seqIndex = READ_STATUS;
transfer.data = &read_value;
transfer.dataSize = 2;
do {
ret = memc_flexspi_transfer(&data->controller, &transfer);
if (ret != 0) {
return ret;
}
is_busy = !(read_value & 0x8000);
if (read_value & 0x3200) {
ret = -EINVAL;
break;
}
} while (is_busy);
return ret;
}
static int flash_flexspi_hyperflash_write_enable(const struct device *dev, uint32_t address)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
flexspi_transfer_t transfer;
int ret;
transfer.deviceAddress = address;
transfer.port = data->port;
transfer.cmdType = kFLEXSPI_Command;
transfer.SeqNumber = 2;
transfer.seqIndex = WRITE_ENABLE;
ret = memc_flexspi_transfer(&data->controller, &transfer);
return ret;
}
static int flash_flexspi_hyperflash_check_vendor_id(const struct device *dev)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
uint8_t writebuf[4] = {0x00, 0x98};
uint32_t buffer[2];
int ret;
flexspi_transfer_t transfer;
transfer.deviceAddress = (0x555 * 2);
transfer.port = data->port;
transfer.cmdType = kFLEXSPI_Write;
transfer.SeqNumber = 1;
transfer.seqIndex = WRITE_DATA;
transfer.data = (uint32_t *)writebuf;
transfer.dataSize = 2;
LOG_DBG("Reading id");
ret = memc_flexspi_transfer(&data->controller, &transfer);
if (ret != 0) {
LOG_ERR("failed to CFI");
return ret;
}
transfer.deviceAddress = (0x10 * 2);
transfer.port = data->port;
transfer.cmdType = kFLEXSPI_Read;
transfer.SeqNumber = 1;
transfer.seqIndex = READ_DATA;
transfer.data = buffer;
transfer.dataSize = 8;
ret = memc_flexspi_transfer(&data->controller, &transfer);
if (ret != 0) {
LOG_ERR("failed to read id");
return ret;
}
buffer[1] &= 0xFFFF;
/* Check that the data read out is unicode "QRY" in big-endian order */
if ((buffer[0] != 0x52005100) || (buffer[1] != 0x5900)) {
LOG_ERR("data read out is wrong!");
return -EINVAL;
}
writebuf[1] = 0xF0;
transfer.deviceAddress = 0;
transfer.port = data->port;
transfer.cmdType = kFLEXSPI_Write;
transfer.SeqNumber = 1;
transfer.seqIndex = WRITE_DATA;
transfer.data = (uint32_t *)writebuf;
transfer.dataSize = 2;
ret = memc_flexspi_transfer(&data->controller, &transfer);
if (ret != 0) {
LOG_ERR("failed to exit");
return ret;
}
memc_flexspi_reset(&data->controller);
return ret;
}
static int flash_flexspi_hyperflash_page_program(const struct device *dev, off_t
offset, const void *buffer, size_t len)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
flexspi_transfer_t transfer = {
.deviceAddress = offset,
.port = data->port,
.cmdType = kFLEXSPI_Write,
.SeqNumber = 2,
.seqIndex = PAGE_PROGRAM,
.data = (uint32_t *)buffer,
.dataSize = len,
};
LOG_DBG("Page programming %d bytes to 0x%08lx", len, offset);
return memc_flexspi_transfer(&data->controller, &transfer);
}
static int flash_flexspi_hyperflash_read(const struct device *dev, off_t offset,
void *buffer, size_t len)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
uint8_t *src = memc_flexspi_get_ahb_address(&data->controller,
data->port,
offset);
if (!src) {
return -EINVAL;
}
memcpy(buffer, src, len);
return 0;
}
static int flash_flexspi_hyperflash_write(const struct device *dev, off_t offset,
const void *buffer, size_t len)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
size_t size = len;
uint8_t *src = (uint8_t *)buffer;
unsigned int key = 0;
int i, j;
int ret = -1;
uint8_t *dst = memc_flexspi_get_ahb_address(&data->controller,
data->port,
offset);
if (!dst) {
return -EINVAL;
}
if (memc_flexspi_is_running_xip(&data->controller)) {
/*
* ==== ENTER CRITICAL SECTION ====
* No flash access should be performed in critical section. All
* code and data accessed must reside in ram.
*/
key = irq_lock();
}
/* Clock FlexSPI at 84 MHZ (42MHz SCLK in DDR mode) */
(void)memc_flexspi_update_clock(&data->controller, &data->config,
data->port, MHZ(84));
while (len) {
/* Writing between two page sizes crashes the platform so we
* have to write the part that fits in the first page and then
* update the offset.
*/
i = MIN(SPI_HYPERFLASH_PAGE_SIZE - (offset %
SPI_HYPERFLASH_PAGE_SIZE), len);
#ifdef CONFIG_FLASH_MCUX_FLEXSPI_HYPERFLASH_WRITE_BUFFER
for (j = 0; j < i; j++) {
hyperflash_write_buf[j] = src[j];
}
#endif
ret = flash_flexspi_hyperflash_write_enable(dev, offset);
if (ret != 0) {
LOG_ERR("failed to enable write");
break;
}
#ifdef CONFIG_FLASH_MCUX_FLEXSPI_HYPERFLASH_WRITE_BUFFER
ret = flash_flexspi_hyperflash_page_program(dev, offset,
hyperflash_write_buf, i);
#else
ret = flash_flexspi_hyperflash_page_program(dev, offset, src, i);
#endif
if (ret != 0) {
LOG_ERR("failed to write");
break;
}
ret = flash_flexspi_hyperflash_wait_bus_busy(dev);
if (ret != 0) {
LOG_ERR("failed to wait bus busy");
break;
}
/* Do software reset. */
memc_flexspi_reset(&data->controller);
src += i;
offset += i;
len -= i;
}
/* Clock FlexSPI at 332 MHZ (166 MHz SCLK in DDR mode) */
(void)memc_flexspi_update_clock(&data->controller, &data->config,
data->port, MHZ(332));
#ifdef CONFIG_HAS_MCUX_CACHE
DCACHE_InvalidateByRange((uint32_t) dst, size);
#endif
if (memc_flexspi_is_running_xip(&data->controller)) {
/* ==== EXIT CRITICAL SECTION ==== */
irq_unlock(key);
}
return ret;
}
static int flash_flexspi_hyperflash_erase(const struct device *dev, off_t offset, size_t size)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
flexspi_transfer_t transfer;
int ret = -1;
int i;
unsigned int key = 0;
int num_sectors = size / SPI_HYPERFLASH_SECTOR_SIZE;
uint8_t *dst = memc_flexspi_get_ahb_address(&data->controller,
data->port,
offset);
if (!dst) {
return -EINVAL;
}
if (offset % SPI_HYPERFLASH_SECTOR_SIZE) {
LOG_ERR("Invalid offset");
return -EINVAL;
}
if (size % SPI_HYPERFLASH_SECTOR_SIZE) {
LOG_ERR("Invalid offset");
return -EINVAL;
}
if (memc_flexspi_is_running_xip(&data->controller)) {
/*
* ==== ENTER CRITICAL SECTION ====
* No flash access should be performed in critical section. All
* code and data accessed must reside in ram.
*/
key = irq_lock();
}
for (i = 0; i < num_sectors; i++) {
ret = flash_flexspi_hyperflash_write_enable(dev, offset);
if (ret != 0) {
LOG_ERR("failed to write_enable");
break;
}
LOG_DBG("Erasing sector at 0x%08lx", offset);
transfer.deviceAddress = offset;
transfer.port = data->port;
transfer.cmdType = kFLEXSPI_Command;
transfer.SeqNumber = 4;
transfer.seqIndex = ERASE_SECTOR;
ret = memc_flexspi_transfer(&data->controller, &transfer);
if (ret != 0) {
LOG_ERR("failed to erase");
break;
}
/* wait bus busy */
ret = flash_flexspi_hyperflash_wait_bus_busy(dev);
if (ret != 0) {
LOG_ERR("failed to wait bus busy");
break;
}
/* Do software reset. */
memc_flexspi_reset(&data->controller);
offset += SPI_HYPERFLASH_SECTOR_SIZE;
}
#ifdef CONFIG_HAS_MCUX_CACHE
DCACHE_InvalidateByRange((uint32_t) dst, size);
#endif
if (memc_flexspi_is_running_xip(&data->controller)) {
/* ==== EXIT CRITICAL SECTION ==== */
irq_unlock(key);
}
return ret;
}
static const struct flash_parameters *flash_flexspi_hyperflash_get_parameters(
const struct device *dev)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
return &data->flash_parameters;
}
static void flash_flexspi_hyperflash_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
struct flash_flexspi_hyperflash_data *data = dev->data;
*layout = &data->layout;
*layout_size = 1;
}
static int flash_flexspi_hyperflash_init(const struct device *dev)
{
const struct flash_flexspi_hyperflash_config *config = dev->config;
struct flash_flexspi_hyperflash_data *data = dev->data;
/* Since the controller variable may be used in critical sections,
* copy the device pointer into a variable stored in RAM
*/
memcpy(&data->controller, config->controller, sizeof(struct device));
if (!device_is_ready(&data->controller)) {
LOG_ERR("Controller device not ready");
return -ENODEV;
}
memc_flexspi_wait_bus_idle(&data->controller);
if (memc_flexspi_is_running_xip(&data->controller)) {
/* Wait for bus idle before configuring */
memc_flexspi_wait_bus_idle(&data->controller);
}
if (memc_flexspi_set_device_config(&data->controller, &data->config,
(const uint32_t *)flash_flexspi_hyperflash_lut,
sizeof(flash_flexspi_hyperflash_lut) / MEMC_FLEXSPI_CMD_SIZE,
data->port)) {
LOG_ERR("Could not set device configuration");
return -EINVAL;
}
memc_flexspi_reset(&data->controller);
if (flash_flexspi_hyperflash_check_vendor_id(dev)) {
LOG_ERR("Could not read vendor id");
return -EIO;
}
return 0;
}
static const struct flash_driver_api flash_flexspi_hyperflash_api = {
.read = flash_flexspi_hyperflash_read,
.write = flash_flexspi_hyperflash_write,
.erase = flash_flexspi_hyperflash_erase,
.get_parameters = flash_flexspi_hyperflash_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = flash_flexspi_hyperflash_pages_layout,
#endif
};
#define CONCAT3(x, y, z) x ## y ## z
#define CS_INTERVAL_UNIT(unit) \
CONCAT3(kFLEXSPI_CsIntervalUnit, unit, SckCycle)
#define AHB_WRITE_WAIT_UNIT(unit) \
CONCAT3(kFLEXSPI_AhbWriteWaitUnit, unit, AhbCycle)
#define FLASH_FLEXSPI_DEVICE_CONFIG(n) \
{ \
.flexspiRootClk = MHZ(42), \
.flashSize = DT_INST_PROP(n, size) / 8 / KB(1), \
.CSIntervalUnit = \
CS_INTERVAL_UNIT( \
DT_INST_PROP(n, cs_interval_unit)), \
.CSInterval = DT_INST_PROP(n, cs_interval), \
.CSHoldTime = DT_INST_PROP(n, cs_hold_time), \
.CSSetupTime = DT_INST_PROP(n, cs_setup_time), \
.dataValidTime = DT_INST_PROP(n, data_valid_time), \
.columnspace = DT_INST_PROP(n, column_space), \
.enableWordAddress = DT_INST_PROP(n, word_addressable), \
.AWRSeqIndex = WRITE_DATA, \
.AWRSeqNumber = 1, \
.ARDSeqIndex = READ_DATA, \
.ARDSeqNumber = 1, \
.AHBWriteWaitUnit = \
AHB_WRITE_WAIT_UNIT( \
DT_INST_PROP(n, ahb_write_wait_unit)), \
.AHBWriteWaitInterval = \
DT_INST_PROP(n, ahb_write_wait_interval), \
} \
#define FLASH_FLEXSPI_HYPERFLASH(n) \
static struct flash_flexspi_hyperflash_config \
flash_flexspi_hyperflash_config_##n = { \
.controller = DEVICE_DT_GET(DT_INST_BUS(n)), \
}; \
static struct flash_flexspi_hyperflash_data \
flash_flexspi_hyperflash_data_##n = { \
.config = FLASH_FLEXSPI_DEVICE_CONFIG(n), \
.port = DT_INST_REG_ADDR(n), \
.layout = { \
.pages_count = DT_INST_PROP(n, size) / 8 \
/ SPI_HYPERFLASH_SECTOR_SIZE, \
.pages_size = SPI_HYPERFLASH_SECTOR_SIZE, \
}, \
.flash_parameters = { \
.write_block_size = DT_INST_PROP(n, write_block_size), \
.erase_value = HYPERFLASH_ERASE_VALUE, \
}, \
}; \
\
DEVICE_DT_INST_DEFINE(n, \
flash_flexspi_hyperflash_init, \
NULL, \
&flash_flexspi_hyperflash_data_##n, \
&flash_flexspi_hyperflash_config_##n, \
POST_KERNEL, \
CONFIG_FLASH_INIT_PRIORITY, \
&flash_flexspi_hyperflash_api);
DT_INST_FOREACH_STATUS_OKAY(FLASH_FLEXSPI_HYPERFLASH)
``` | /content/code_sandbox/drivers/flash/flash_mcux_flexspi_hyperflash.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,361 |
```unknown
# Macro to find node in device tree
DT_CHOSEN_CDNS_NAND_NODE := nand
config FLASH_CDNS_NAND
bool "Cadence NAND Flash driver"
default y
depends on DT_HAS_CDNS_NAND_ENABLED
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
help
Enable Cadence NAND support.
if FLASH_CDNS_NAND
config CDNS_NAND_INTERRUPT_SUPPORT
bool "Cadence Nand Interrupt Support"
def_bool $(dt_node_has_prop,$(DT_CHOSEN_CDNS_NAND_NODE),interrupts)
help
Enable Cadence Nand Interrupt Support.
choice
prompt "Set the NAND Operating mode"
default CDNS_NAND_CDMA_MODE
help
Specify the Operating mode used by the driver.
config CDNS_NAND_CDMA_MODE
bool "Cadence Nand CDMA Operating Mode"
config CDNS_NAND_PIO_MODE
bool "Cadence Nand PIO Operating Mode"
config CDNS_NAND_GENERIC_MODE
bool "Cadence Nand Generic Operating Mode"
endchoice
config FLASH_CDNS_CDMA_PAGE_COUNT
int "Set the page count for a single transfer in the CDMA Mode"
default 10
help
Configure the page count for a single transfer in the CDMA Mode
config FLASH_CDNS_CDMA_BLOCK_COUNT
int "Set the block count for a single transfer in the CDMA Mode"
default 10
help
Configure the block count for a single transfer in the CDMA Mode
endif # FLASH_CDNS_NAND
``` | /content/code_sandbox/drivers/flash/Kconfig.cadence_nand | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 329 |
```objective-c
/*
*
*/
#ifndef __SPI_NOR_H__
#define __SPI_NOR_H__
#include <zephyr/sys/util.h>
#define SPI_NOR_MAX_ID_LEN 3
/* Status register bits */
#define SPI_NOR_WIP_BIT BIT(0) /* Write in progress */
#define SPI_NOR_WEL_BIT BIT(1) /* Write enable latch */
/* Flash opcodes */
#define SPI_NOR_CMD_WRSR 0x01 /* Write status register */
#define SPI_NOR_CMD_RDSR 0x05 /* Read status register */
#define SPI_NOR_CMD_WRSR2 0x31 /* Write status register 2 */
#define SPI_NOR_CMD_RDSR2 0x35 /* Read status register 2 */
#define SPI_NOR_CMD_RDSR3 0x15 /* Read status register 3 */
#define SPI_NOR_CMD_WRSR3 0x11 /* Write status register 3 */
#define SPI_NOR_CMD_READ 0x03 /* Read data */
#define SPI_NOR_CMD_READ_FAST 0x0B /* Read data */
#define SPI_NOR_CMD_DREAD 0x3B /* Read data (1-1-2) */
#define SPI_NOR_CMD_2READ 0xBB /* Read data (1-2-2) */
#define SPI_NOR_CMD_QREAD 0x6B /* Read data (1-1-4) */
#define SPI_NOR_CMD_4READ 0xEB /* Read data (1-4-4) */
#define SPI_NOR_CMD_WREN 0x06 /* Write enable */
#define SPI_NOR_CMD_WRDI 0x04 /* Write disable */
#define SPI_NOR_CMD_PP 0x02 /* Page program */
#define SPI_NOR_CMD_PP_1_1_2 0xA2 /* Dual Page program (1-1-2) */
#define SPI_NOR_CMD_PP_1_1_4 0x32 /* Quad Page program (1-1-4) */
#define SPI_NOR_CMD_PP_1_4_4 0x38 /* Quad Page program (1-4-4) */
#define SPI_NOR_CMD_RDCR 0x15 /* Read control register */
#define SPI_NOR_CMD_SE 0x20 /* Sector erase */
#define SPI_NOR_CMD_SE_4B 0x21 /* Sector erase 4 byte address*/
#define SPI_NOR_CMD_BE_32K 0x52 /* Block erase 32KB */
#define SPI_NOR_CMD_BE 0xD8 /* Block erase */
#define SPI_NOR_CMD_CE 0xC7 /* Chip erase */
#define SPI_NOR_CMD_RDID 0x9F /* Read JEDEC ID */
#define SPI_NOR_CMD_ULBPR 0x98 /* Global Block Protection Unlock */
#define SPI_NOR_CMD_4BA 0xB7 /* Enter 4-Byte Address Mode */
#define SPI_NOR_CMD_DPD 0xB9 /* Deep Power Down */
#define SPI_NOR_CMD_RDPD 0xAB /* Release from Deep Power Down */
#define SPI_NOR_CMD_WR_CFGREG2 0x72 /* Write config register 2 */
#define SPI_NOR_CMD_RD_CFGREG2 0x71 /* Read config register 2 */
#define SPI_NOR_CMD_RESET_EN 0x66 /* Reset Enable */
#define SPI_NOR_CMD_RESET_MEM 0x99 /* Reset Memory */
#define SPI_NOR_CMD_BULKE 0x60 /* Bulk Erase */
#define SPI_NOR_CMD_READ_4B 0x13 /* Read data 4 Byte Address */
#define SPI_NOR_CMD_READ_FAST_4B 0x0C /* Fast Read 4 Byte Address */
#define SPI_NOR_CMD_DREAD_4B 0x3C /* Read data (1-1-2) 4 Byte Address */
#define SPI_NOR_CMD_2READ_4B 0xBC /* Read data (1-2-2) 4 Byte Address */
#define SPI_NOR_CMD_QREAD_4B 0x6C /* Read data (1-1-4) 4 Byte Address */
#define SPI_NOR_CMD_4READ_4B 0xEC /* Read data (1-4-4) 4 Byte Address */
#define SPI_NOR_CMD_PP_4B 0x12 /* Page Program 4 Byte Address */
#define SPI_NOR_CMD_PP_1_1_4_4B 0x34 /* Quad Page program (1-1-4) 4 Byte Address */
#define SPI_NOR_CMD_PP_1_4_4_4B 0x3e /* Quad Page program (1-4-4) 4 Byte Address */
/* Flash octal opcodes */
#define SPI_NOR_OCMD_SE 0x21DE /* Octal Sector erase */
#define SPI_NOR_OCMD_CE 0xC738 /* Octal Chip erase */
#define SPI_NOR_OCMD_RDSR 0x05FA /* Octal Read status register */
#define SPI_NOR_OCMD_DTR_RD 0xEE11 /* Octal IO DTR read command */
#define SPI_NOR_OCMD_RD 0xEC13 /* Octal IO read command */
#define SPI_NOR_OCMD_PAGE_PRG 0x12ED /* Octal Page Prog */
#define SPI_NOR_OCMD_WREN 0x06F9 /* Octal Write enable */
#define SPI_NOR_OCMD_NOP 0x00FF /* Octal No operation */
#define SPI_NOR_OCMD_RESET_EN 0x6699 /* Octal Reset Enable */
#define SPI_NOR_OCMD_RESET_MEM 0x9966 /* Octal Reset Memory */
#define SPI_NOR_OCMD_WR_CFGREG2 0x728D /* Octal Write configuration Register2 */
#define SPI_NOR_OCMD_RD_CFGREG2 0x718E /* Octal Read configuration Register2 */
#define SPI_NOR_OCMD_BULKE 0x609F /* Octa Bulk Erase */
/* Page, sector, and block size are standard, not configurable. */
#define SPI_NOR_PAGE_SIZE 0x0100U
#define SPI_NOR_SECTOR_SIZE 0x1000U
#define SPI_NOR_BLOCK_SIZE 0x10000U
/* Flash Auto-polling values */
#define SPI_NOR_WREN_MATCH 0x02
#define SPI_NOR_WREN_MASK 0x02
#define SPI_NOR_WEL_MATCH 0x00
#define SPI_NOR_WEL_MASK 0x02
#define SPI_NOR_MEM_RDY_MATCH 0x00
#define SPI_NOR_MEM_RDY_MASK 0x01
#define SPI_NOR_AUTO_POLLING_INTERVAL 0x10
/* Flash Dummy Cycles values */
#define SPI_NOR_DUMMY_RD 8U
#define SPI_NOR_DUMMY_RD_OCTAL 6U
#define SPI_NOR_DUMMY_RD_OCTAL_DTR 6U
#define SPI_NOR_DUMMY_REG_OCTAL 4U
#define SPI_NOR_DUMMY_REG_OCTAL_DTR 5U
/* Memory registers address */
#define SPI_NOR_REG2_ADDR1 0x0000000
#define SPI_NOR_CR2_STR_OPI_EN 0x01
#define SPI_NOR_CR2_DTR_OPI_EN 0x02
#define SPI_NOR_REG2_ADDR3 0x00000300
#define SPI_NOR_CR2_DUMMY_CYCLES_66MHZ 0x07
/* Test whether offset is aligned to a given number of bits. */
#define SPI_NOR_IS_ALIGNED(_ofs, _bits) (((_ofs) & BIT_MASK(_bits)) == 0)
#define SPI_NOR_IS_SECTOR_ALIGNED(_ofs) SPI_NOR_IS_ALIGNED(_ofs, 12)
#define SPI_NOR_IS_32K_ALIGNED(_ofs) SPI_NOR_IS_ALIGNED(_ofs, 15)
#define SPI_NOR_IS_64K_ALIGNED(_ofs) SPI_NOR_IS_ALIGNED(_ofs, 16)
#define CMD_RDCR 0x15 /* Read the configuration register. */
#endif /*__SPI_NOR_H__*/
``` | /content/code_sandbox/drivers/flash/spi_nor.h | objective-c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,829 |
```c
/*
*
*/
#define DT_DRV_COMPAT nordic_rram_controller
#include <string.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/logging/log.h>
#include <zephyr/irq.h>
#include <zephyr/sys/barrier.h>
#include <hal/nrf_rramc.h>
#include <zephyr/../../drivers/flash/soc_flash_nrf.h>
/* Note that it is supported to compile this driver for both secure
* and non-secure images, but non-secure images cannot call
* nrf_rramc_config_set because NRF_RRAMC_NS does not exist.
*
* Instead, when TF-M boots, it will configure RRAMC with this static
* configuration:
*
* nrf_rramc_config_t config = {
* .mode_write = true,
* .write_buff_size = WRITE_BUFFER_SIZE
* };
*
* nrf_rramc_ready_next_timeout_t params = {
* .value = CONFIG_NRF_RRAM_READYNEXT_TIMEOUT_VALUE,
* .enable = true,
* };
*
* For more details see NCSDK-26982.
*/
LOG_MODULE_REGISTER(flash_nrf_rram, CONFIG_FLASH_LOG_LEVEL);
#define RRAM DT_INST(0, soc_nv_flash)
#if defined(CONFIG_SOC_SERIES_BSIM_NRFXX)
#define RRAM_START NRF_RRAM_BASE_ADDR
#else
#define RRAM_START DT_REG_ADDR(RRAM)
#endif
#define RRAM_SIZE DT_REG_SIZE(RRAM)
#define PAGE_SIZE DT_PROP(RRAM, erase_block_size)
#define PAGE_COUNT ((RRAM_SIZE) / (PAGE_SIZE))
#define WRITE_BLOCK_SIZE_FROM_DT DT_PROP(RRAM, write_block_size)
#define ERASE_VALUE 0xFF
#ifdef CONFIG_MULTITHREADING
static struct k_sem sem_lock;
#define SYNC_INIT() k_sem_init(&sem_lock, 1, 1)
#define SYNC_LOCK() k_sem_take(&sem_lock, K_FOREVER)
#define SYNC_UNLOCK() k_sem_give(&sem_lock)
#else
#define SYNC_INIT()
#define SYNC_LOCK()
#define SYNC_UNLOCK()
#endif /* CONFIG_MULTITHREADING */
#if CONFIG_NRF_RRAM_WRITE_BUFFER_SIZE > 0
#define WRITE_BUFFER_ENABLE 1
#define WRITE_BUFFER_SIZE CONFIG_NRF_RRAM_WRITE_BUFFER_SIZE
#define WRITE_LINE_SIZE 16 /* In bytes, one line is 128 bits. */
#define WRITE_BUFFER_MAX_SIZE (WRITE_BUFFER_SIZE * WRITE_LINE_SIZE)
BUILD_ASSERT((PAGE_SIZE % (WRITE_LINE_SIZE) == 0), "erase-block-size must be a multiple of 16");
BUILD_ASSERT((WRITE_BLOCK_SIZE_FROM_DT % (WRITE_LINE_SIZE) == 0),
"if NRF_RRAM_WRITE_BUFFER_SIZE > 0, then write-block-size must be a multiple of 16");
#else
#define WRITE_BUFFER_ENABLE 0
#define WRITE_BUFFER_SIZE 0
#define WRITE_LINE_SIZE WRITE_BLOCK_SIZE_FROM_DT
#define WRITE_BUFFER_MAX_SIZE 16 /* In bytes, one line is 128 bits. */
BUILD_ASSERT((PAGE_SIZE % (WRITE_LINE_SIZE) == 0),
"erase-block-size must be a multiple of write-block-size");
#endif
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
#if (WRITE_BUFFER_SIZE < 2)
#define FLASH_SLOT_WRITE 500
#elif (WRITE_BUFFER_SIZE < 4)
#define FLASH_SLOT_WRITE 1000
#elif (WRITE_BUFFER_SIZE < 9)
#define FLASH_SLOT_WRITE 2000
#elif (WRITE_BUFFER_SIZE < 17)
#define FLASH_SLOT_WRITE 4000
#else
#define FLASH_SLOT_WRITE 8000 /* longest write takes 7107 us */
#endif
static int write_op(void *context); /* instance of flash_op_handler_t */
static int write_synchronously(off_t addr, const void *data, size_t len);
#endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */
static inline bool is_within_bounds(off_t addr, size_t len, off_t boundary_start,
size_t boundary_size)
{
return (addr >= boundary_start && (addr < (boundary_start + boundary_size)) &&
(len <= (boundary_start + boundary_size - addr)));
}
#if WRITE_BUFFER_ENABLE
static void commit_changes(off_t addr, size_t len)
{
#if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
if (nrf_rramc_empty_buffer_check(NRF_RRAMC)) {
/* The internal write-buffer has been committed to RRAM and is now empty. */
return;
}
#endif
if ((len % (WRITE_BUFFER_MAX_SIZE)) == 0) {
/* Our last operation was buffer size-aligned, so we're done. */
return;
}
#if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
ARG_UNUSED(addr);
nrf_rramc_task_trigger(NRF_RRAMC, NRF_RRAMC_TASK_COMMIT_WRITEBUF);
#else
/*
* When the commit task is unavailable we need to get creative to
* ensure this is committed.
*
* According to the PS the buffer is committed when "There is a
* read operation from a 128-bit word line in the buffer that has
* already been written to".
*
* So we read the last byte that has been written to trigger this
* commit.
*
* If this approach proves to be problematic, e.g. for writes to
* write-only memory, then one would have to rely on
* READYNEXTTIMEOUT to eventually commit the write.
*/
volatile uint8_t dummy_read = *(volatile uint8_t *)(addr + len - 1);
ARG_UNUSED(dummy_read);
#endif
barrier_dmem_fence_full();
}
#endif
static void rram_write(off_t addr, const void *data, size_t len)
{
#if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
nrf_rramc_config_t config = {.mode_write = true, .write_buff_size = WRITE_BUFFER_SIZE};
nrf_rramc_config_set(NRF_RRAMC, &config);
#endif
if (data) {
memcpy((void *)addr, data, len);
} else {
memset((void *)addr, ERASE_VALUE, len);
}
barrier_dmem_fence_full(); /* Barrier following our last write. */
#if WRITE_BUFFER_ENABLE
commit_changes(addr, len);
#endif
#if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE)
config.mode_write = false;
nrf_rramc_config_set(NRF_RRAMC, &config);
#endif
}
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
static void shift_write_context(uint32_t shift, struct flash_context *w_ctx)
{
w_ctx->flash_addr += shift;
/* NULL data_addr => erase emulation request*/
if (w_ctx->data_addr) {
w_ctx->data_addr += shift;
}
w_ctx->len -= shift;
}
static int write_op(void *context)
{
struct flash_context *w_ctx = context;
size_t len;
uint32_t i = 0U;
if (w_ctx->enable_time_limit) {
nrf_flash_sync_get_timestamp_begin();
}
while (w_ctx->len > 0) {
len = (WRITE_BUFFER_MAX_SIZE < w_ctx->len) ? WRITE_BUFFER_MAX_SIZE : w_ctx->len;
rram_write(w_ctx->flash_addr, (const void *)w_ctx->data_addr, len);
shift_write_context(len, w_ctx);
if (w_ctx->len > 0) {
i++;
if (w_ctx->enable_time_limit) {
if (nrf_flash_sync_check_time_limit(i)) {
return FLASH_OP_ONGOING;
}
}
}
}
return FLASH_OP_DONE;
}
static int write_synchronously(off_t addr, const void *data, size_t len)
{
struct flash_context context = {
.data_addr = (uint32_t)data,
.flash_addr = addr,
.len = len,
.enable_time_limit = 1 /* enable time limit */
};
struct flash_op_desc flash_op_desc = {.handler = write_op, .context = &context};
nrf_flash_sync_set_context(FLASH_SLOT_WRITE);
return nrf_flash_sync_exe(&flash_op_desc);
}
#endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */
static int nrf_write(off_t addr, const void *data, size_t len)
{
int ret = 0;
if (!is_within_bounds(addr, len, 0, RRAM_SIZE)) {
return -EINVAL;
}
addr += RRAM_START;
if (!len) {
return 0;
}
LOG_DBG("Write: %p:%zu", (void *)addr, len);
SYNC_LOCK();
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
if (nrf_flash_sync_is_required()) {
ret = write_synchronously(addr, data, len);
} else
#endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */
{
rram_write(addr, data, len);
}
SYNC_UNLOCK();
return ret;
}
static int nrf_rram_read(const struct device *dev, off_t addr, void *data, size_t len)
{
ARG_UNUSED(dev);
if (!is_within_bounds(addr, len, 0, RRAM_SIZE)) {
return -EINVAL;
}
addr += RRAM_START;
memcpy(data, (void *)addr, len);
return 0;
}
static int nrf_rram_write(const struct device *dev, off_t addr, const void *data, size_t len)
{
ARG_UNUSED(dev);
if (data == NULL) {
return -EINVAL;
}
return nrf_write(addr, data, len);
}
static int nrf_rram_erase(const struct device *dev, off_t addr, size_t len)
{
ARG_UNUSED(dev);
return nrf_write(addr, NULL, len);
}
static const struct flash_parameters *nrf_rram_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
static const struct flash_parameters parameters = {
.write_block_size = WRITE_LINE_SIZE,
.erase_value = ERASE_VALUE,
.caps = {
.no_explicit_erase = true,
},
};
return ¶meters;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static void nrf_rram_page_layout(const struct device *dev, const struct flash_pages_layout **layout,
size_t *layout_size)
{
ARG_UNUSED(dev);
static const struct flash_pages_layout pages_layout = {
.pages_count = PAGE_COUNT,
.pages_size = PAGE_SIZE,
};
*layout = &pages_layout;
*layout_size = 1;
}
#endif
static const struct flash_driver_api nrf_rram_api = {
.read = nrf_rram_read,
.write = nrf_rram_write,
.erase = nrf_rram_erase,
.get_parameters = nrf_rram_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = nrf_rram_page_layout,
#endif
};
static int nrf_rram_init(const struct device *dev)
{
ARG_UNUSED(dev);
SYNC_INIT();
#ifndef CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE
nrf_flash_sync_init();
#endif /* !CONFIG_SOC_FLASH_NRF_RADIO_SYNC_NONE */
#if !defined(CONFIG_TRUSTED_EXECUTION_NONSECURE) && CONFIG_NRF_RRAM_READYNEXT_TIMEOUT_VALUE > 0
nrf_rramc_ready_next_timeout_t params = {
.value = CONFIG_NRF_RRAM_READYNEXT_TIMEOUT_VALUE,
.enable = true,
};
nrf_rramc_ready_next_timeout_set(NRF_RRAMC, ¶ms);
#endif
return 0;
}
DEVICE_DT_INST_DEFINE(0, nrf_rram_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY,
&nrf_rram_api);
``` | /content/code_sandbox/drivers/flash/soc_flash_nrf_rram.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,498 |
```unknown
# NUMAKER GPIO driver configuration options
config SOC_FLASH_NUMAKER
bool "Nuvoton NuMaker MCU flash driver"
default y
select FLASH_HAS_PAGE_LAYOUT
select FLASH_HAS_DRIVER_ENABLED
select FLASH_HAS_EXPLICIT_ERASE
select HAS_NUMAKER_FMC
depends on DT_HAS_NUVOTON_NUMAKER_FMC_ENABLED
help
This option enables the FMC driver for Nuvoton NuMaker family of
processors.
Say y if you wish to enable NuMaker FMC.
``` | /content/code_sandbox/drivers/flash/Kconfig.numaker | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 106 |
```c
/*
*
*/
#define DT_DRV_COMPAT zephyr_sim_flash
#include <zephyr/device.h>
#include <zephyr/devicetree.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/sys/util.h>
#include <zephyr/random/random.h>
#include <zephyr/stats/stats.h>
#include <string.h>
#ifdef CONFIG_ARCH_POSIX
#include "flash_simulator_native.h"
#include "cmdline.h"
#include "soc.h"
#define DEFAULT_FLASH_FILE_PATH "flash.bin"
#endif /* CONFIG_ARCH_POSIX */
/* configuration derived from DT */
#ifdef CONFIG_ARCH_POSIX
#define SOC_NV_FLASH_NODE DT_INST_CHILD(0, flash_0)
#else
#define SOC_NV_FLASH_NODE DT_INST_CHILD(0, flash_sim_0)
#endif /* CONFIG_ARCH_POSIX */
#define FLASH_SIMULATOR_BASE_OFFSET DT_REG_ADDR(SOC_NV_FLASH_NODE)
#define FLASH_SIMULATOR_ERASE_UNIT DT_PROP(SOC_NV_FLASH_NODE, erase_block_size)
#define FLASH_SIMULATOR_PROG_UNIT DT_PROP(SOC_NV_FLASH_NODE, write_block_size)
#define FLASH_SIMULATOR_FLASH_SIZE DT_REG_SIZE(SOC_NV_FLASH_NODE)
#define FLASH_SIMULATOR_ERASE_VALUE \
DT_PROP(DT_PARENT(SOC_NV_FLASH_NODE), erase_value)
#define FLASH_SIMULATOR_PAGE_COUNT (FLASH_SIMULATOR_FLASH_SIZE / \
FLASH_SIMULATOR_ERASE_UNIT)
#if (FLASH_SIMULATOR_ERASE_UNIT % FLASH_SIMULATOR_PROG_UNIT)
#error "Erase unit must be a multiple of program unit"
#endif
#define MOCK_FLASH(addr) (mock_flash + (addr) - FLASH_SIMULATOR_BASE_OFFSET)
/* maximum number of pages that can be tracked by the stats module */
#define STATS_PAGE_COUNT_THRESHOLD 256
#define STATS_SECT_EC(N, _) STATS_SECT_ENTRY32(erase_cycles_unit##N)
#define STATS_NAME_EC(N, _) STATS_NAME(flash_sim_stats, erase_cycles_unit##N)
#define STATS_SECT_DIRTYR(N, _) STATS_SECT_ENTRY32(dirty_read_unit##N)
#define STATS_NAME_DIRTYR(N, _) STATS_NAME(flash_sim_stats, dirty_read_unit##N)
#ifdef CONFIG_FLASH_SIMULATOR_STATS
/* increment a unit erase cycles counter */
#define ERASE_CYCLES_INC(U) \
do { \
if (U < STATS_PAGE_COUNT_THRESHOLD) { \
(*(&flash_sim_stats.erase_cycles_unit0 + (U)) += 1); \
} \
} while (false)
#if (CONFIG_FLASH_SIMULATOR_STAT_PAGE_COUNT > STATS_PAGE_COUNT_THRESHOLD)
/* Limitation above is caused by used LISTIFY */
/* Using FLASH_SIMULATOR_FLASH_PAGE_COUNT allows to avoid terrible */
/* error logg at the output and work with the stats module partially */
#define FLASH_SIMULATOR_FLASH_PAGE_COUNT STATS_PAGE_COUNT_THRESHOLD
#else
#define FLASH_SIMULATOR_FLASH_PAGE_COUNT CONFIG_FLASH_SIMULATOR_STAT_PAGE_COUNT
#endif
/* simulator statistics */
STATS_SECT_START(flash_sim_stats)
STATS_SECT_ENTRY32(bytes_read) /* total bytes read */
STATS_SECT_ENTRY32(bytes_written) /* total bytes written */
STATS_SECT_ENTRY32(double_writes) /* num. of writes to non-erased units */
STATS_SECT_ENTRY32(flash_read_calls) /* calls to flash_read() */
STATS_SECT_ENTRY32(flash_read_time_us) /* time spent in flash_read() */
STATS_SECT_ENTRY32(flash_write_calls) /* calls to flash_write() */
STATS_SECT_ENTRY32(flash_write_time_us) /* time spent in flash_write() */
STATS_SECT_ENTRY32(flash_erase_calls) /* calls to flash_erase() */
STATS_SECT_ENTRY32(flash_erase_time_us) /* time spent in flash_erase() */
/* -- per-unit statistics -- */
/* erase cycle count for unit */
LISTIFY(FLASH_SIMULATOR_FLASH_PAGE_COUNT, STATS_SECT_EC, ())
/* number of read operations on worn out erase units */
LISTIFY(FLASH_SIMULATOR_FLASH_PAGE_COUNT, STATS_SECT_DIRTYR, ())
STATS_SECT_END;
STATS_SECT_DECL(flash_sim_stats) flash_sim_stats;
STATS_NAME_START(flash_sim_stats)
STATS_NAME(flash_sim_stats, bytes_read)
STATS_NAME(flash_sim_stats, bytes_written)
STATS_NAME(flash_sim_stats, double_writes)
STATS_NAME(flash_sim_stats, flash_read_calls)
STATS_NAME(flash_sim_stats, flash_read_time_us)
STATS_NAME(flash_sim_stats, flash_write_calls)
STATS_NAME(flash_sim_stats, flash_write_time_us)
STATS_NAME(flash_sim_stats, flash_erase_calls)
STATS_NAME(flash_sim_stats, flash_erase_time_us)
LISTIFY(FLASH_SIMULATOR_FLASH_PAGE_COUNT, STATS_NAME_EC, ())
LISTIFY(FLASH_SIMULATOR_FLASH_PAGE_COUNT, STATS_NAME_DIRTYR, ())
STATS_NAME_END(flash_sim_stats);
/* simulator dynamic thresholds */
STATS_SECT_START(flash_sim_thresholds)
STATS_SECT_ENTRY32(max_write_calls)
STATS_SECT_ENTRY32(max_erase_calls)
STATS_SECT_ENTRY32(max_len)
STATS_SECT_END;
STATS_SECT_DECL(flash_sim_thresholds) flash_sim_thresholds;
STATS_NAME_START(flash_sim_thresholds)
STATS_NAME(flash_sim_thresholds, max_write_calls)
STATS_NAME(flash_sim_thresholds, max_erase_calls)
STATS_NAME(flash_sim_thresholds, max_len)
STATS_NAME_END(flash_sim_thresholds);
#define FLASH_SIM_STATS_INC(group__, var__) STATS_INC(group__, var__)
#define FLASH_SIM_STATS_INCN(group__, var__, n__) STATS_INCN(group__, var__, n__)
#define FLASH_SIM_STATS_INIT_AND_REG(group__, size__, name__) \
STATS_INIT_AND_REG(group__, size__, name__)
#else
#define ERASE_CYCLES_INC(U) do {} while (false)
#define FLASH_SIM_STATS_INC(group__, var__)
#define FLASH_SIM_STATS_INCN(group__, var__, n__)
#define FLASH_SIM_STATS_INIT_AND_REG(group__, size__, name__)
#endif /* CONFIG_FLASH_SIMULATOR_STATS */
#ifdef CONFIG_ARCH_POSIX
static uint8_t *mock_flash;
static int flash_fd = -1;
static const char *flash_file_path;
static bool flash_erase_at_start;
static bool flash_rm_at_exit;
static bool flash_in_ram;
#else
#if DT_NODE_HAS_PROP(DT_PARENT(SOC_NV_FLASH_NODE), memory_region)
#define FLASH_SIMULATOR_MREGION \
LINKER_DT_NODE_REGION_NAME( \
DT_PHANDLE(DT_PARENT(SOC_NV_FLASH_NODE), memory_region))
static uint8_t mock_flash[FLASH_SIMULATOR_FLASH_SIZE] Z_GENERIC_SECTION(FLASH_SIMULATOR_MREGION);
#else
static uint8_t mock_flash[FLASH_SIMULATOR_FLASH_SIZE];
#endif
#endif /* CONFIG_ARCH_POSIX */
static const struct flash_driver_api flash_sim_api;
static const struct flash_parameters flash_sim_parameters = {
.write_block_size = FLASH_SIMULATOR_PROG_UNIT,
.erase_value = FLASH_SIMULATOR_ERASE_VALUE,
.caps = {
#if !defined(CONFIG_FLASH_SIMULATOR_EXPLICIT_ERASE)
.no_explicit_erase = false,
#endif
},
};
static int flash_range_is_valid(const struct device *dev, off_t offset,
size_t len)
{
ARG_UNUSED(dev);
if ((offset + len > FLASH_SIMULATOR_FLASH_SIZE +
FLASH_SIMULATOR_BASE_OFFSET) ||
(offset < FLASH_SIMULATOR_BASE_OFFSET)) {
return 0;
}
return 1;
}
static int flash_sim_read(const struct device *dev, const off_t offset,
void *data,
const size_t len)
{
ARG_UNUSED(dev);
if (!flash_range_is_valid(dev, offset, len)) {
return -EINVAL;
}
if (!IS_ENABLED(CONFIG_FLASH_SIMULATOR_UNALIGNED_READ)) {
if ((offset % FLASH_SIMULATOR_PROG_UNIT) ||
(len % FLASH_SIMULATOR_PROG_UNIT)) {
return -EINVAL;
}
}
FLASH_SIM_STATS_INC(flash_sim_stats, flash_read_calls);
memcpy(data, MOCK_FLASH(offset), len);
FLASH_SIM_STATS_INCN(flash_sim_stats, bytes_read, len);
#ifdef CONFIG_FLASH_SIMULATOR_SIMULATE_TIMING
k_busy_wait(CONFIG_FLASH_SIMULATOR_MIN_READ_TIME_US);
FLASH_SIM_STATS_INCN(flash_sim_stats, flash_read_time_us,
CONFIG_FLASH_SIMULATOR_MIN_READ_TIME_US);
#endif
return 0;
}
static int flash_sim_write(const struct device *dev, const off_t offset,
const void *data, const size_t len)
{
uint8_t buf[FLASH_SIMULATOR_PROG_UNIT];
ARG_UNUSED(dev);
if (!flash_range_is_valid(dev, offset, len)) {
return -EINVAL;
}
if ((offset % FLASH_SIMULATOR_PROG_UNIT) ||
(len % FLASH_SIMULATOR_PROG_UNIT)) {
return -EINVAL;
}
FLASH_SIM_STATS_INC(flash_sim_stats, flash_write_calls);
#if defined(CONFIG_FLASH_SIMULATOR_EXPLICIT_ERASE)
/* check if any unit has been already programmed */
memset(buf, FLASH_SIMULATOR_ERASE_VALUE, sizeof(buf));
#else
memcpy(buf, MOCK_FLASH(offset), sizeof(buf));
#endif
for (uint32_t i = 0; i < len; i += FLASH_SIMULATOR_PROG_UNIT) {
if (memcmp(buf, MOCK_FLASH(offset + i), sizeof(buf))) {
FLASH_SIM_STATS_INC(flash_sim_stats, double_writes);
#if !CONFIG_FLASH_SIMULATOR_DOUBLE_WRITES
return -EIO;
#endif
}
}
#ifdef CONFIG_FLASH_SIMULATOR_STATS
bool data_part_ignored = false;
if (flash_sim_thresholds.max_write_calls != 0) {
if (flash_sim_stats.flash_write_calls >
flash_sim_thresholds.max_write_calls) {
return 0;
} else if (flash_sim_stats.flash_write_calls ==
flash_sim_thresholds.max_write_calls) {
if (flash_sim_thresholds.max_len == 0) {
return 0;
}
data_part_ignored = true;
}
}
#endif
for (uint32_t i = 0; i < len; i++) {
#ifdef CONFIG_FLASH_SIMULATOR_STATS
if (data_part_ignored) {
if (i >= flash_sim_thresholds.max_len) {
return 0;
}
}
#endif /* CONFIG_FLASH_SIMULATOR_STATS */
/* only pull bits to zero */
#if defined(CONFIG_FLASH_SIMULATOR_EXPLICIT_ERASE)
#if FLASH_SIMULATOR_ERASE_VALUE == 0xFF
*(MOCK_FLASH(offset + i)) &= *((uint8_t *)data + i);
#else
*(MOCK_FLASH(offset + i)) |= *((uint8_t *)data + i);
#endif
#else
*(MOCK_FLASH(offset + i)) = *((uint8_t *)data + i);
#endif
}
FLASH_SIM_STATS_INCN(flash_sim_stats, bytes_written, len);
#ifdef CONFIG_FLASH_SIMULATOR_SIMULATE_TIMING
/* wait before returning */
k_busy_wait(CONFIG_FLASH_SIMULATOR_MIN_WRITE_TIME_US);
FLASH_SIM_STATS_INCN(flash_sim_stats, flash_write_time_us,
CONFIG_FLASH_SIMULATOR_MIN_WRITE_TIME_US);
#endif
return 0;
}
static void unit_erase(const uint32_t unit)
{
const off_t unit_addr = FLASH_SIMULATOR_BASE_OFFSET +
(unit * FLASH_SIMULATOR_ERASE_UNIT);
/* erase the memory unit by setting it to erase value */
memset(MOCK_FLASH(unit_addr), FLASH_SIMULATOR_ERASE_VALUE,
FLASH_SIMULATOR_ERASE_UNIT);
}
static int flash_sim_erase(const struct device *dev, const off_t offset,
const size_t len)
{
ARG_UNUSED(dev);
if (!flash_range_is_valid(dev, offset, len)) {
return -EINVAL;
}
/* erase operation must be aligned to the erase unit boundary */
if ((offset % FLASH_SIMULATOR_ERASE_UNIT) ||
(len % FLASH_SIMULATOR_ERASE_UNIT)) {
return -EINVAL;
}
FLASH_SIM_STATS_INC(flash_sim_stats, flash_erase_calls);
#ifdef CONFIG_FLASH_SIMULATOR_STATS
if ((flash_sim_thresholds.max_erase_calls != 0) &&
(flash_sim_stats.flash_erase_calls >=
flash_sim_thresholds.max_erase_calls)){
return 0;
}
#endif
/* the first unit to be erased */
uint32_t unit_start = (offset - FLASH_SIMULATOR_BASE_OFFSET) /
FLASH_SIMULATOR_ERASE_UNIT;
/* erase as many units as necessary and increase their erase counter */
for (uint32_t i = 0; i < len / FLASH_SIMULATOR_ERASE_UNIT; i++) {
ERASE_CYCLES_INC(unit_start + i);
unit_erase(unit_start + i);
}
#ifdef CONFIG_FLASH_SIMULATOR_SIMULATE_TIMING
/* wait before returning */
k_busy_wait(CONFIG_FLASH_SIMULATOR_MIN_ERASE_TIME_US);
FLASH_SIM_STATS_INCN(flash_sim_stats, flash_erase_time_us,
CONFIG_FLASH_SIMULATOR_MIN_ERASE_TIME_US);
#endif
return 0;
}
#ifdef CONFIG_FLASH_PAGE_LAYOUT
static const struct flash_pages_layout flash_sim_pages_layout = {
.pages_count = FLASH_SIMULATOR_PAGE_COUNT,
.pages_size = FLASH_SIMULATOR_ERASE_UNIT,
};
static void flash_sim_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
*layout = &flash_sim_pages_layout;
*layout_size = 1;
}
#endif
static const struct flash_parameters *
flash_sim_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_sim_parameters;
}
static const struct flash_driver_api flash_sim_api = {
.read = flash_sim_read,
.write = flash_sim_write,
.erase = flash_sim_erase,
.get_parameters = flash_sim_get_parameters,
#ifdef CONFIG_FLASH_PAGE_LAYOUT
.page_layout = flash_sim_page_layout,
#endif
};
#ifdef CONFIG_ARCH_POSIX
static int flash_mock_init(const struct device *dev)
{
int rc;
ARG_UNUSED(dev);
if (flash_in_ram == false && flash_file_path == NULL) {
flash_file_path = DEFAULT_FLASH_FILE_PATH;
}
rc = flash_mock_init_native(flash_in_ram, &mock_flash, FLASH_SIMULATOR_FLASH_SIZE,
&flash_fd, flash_file_path, FLASH_SIMULATOR_ERASE_VALUE,
flash_erase_at_start);
if (rc < 0) {
return -EIO;
} else {
return 0;
}
}
#else
#if DT_NODE_HAS_PROP(DT_PARENT(SOC_NV_FLASH_NODE), memory_region)
static int flash_mock_init(const struct device *dev)
{
ARG_UNUSED(dev);
return 0;
}
#else
static int flash_mock_init(const struct device *dev)
{
ARG_UNUSED(dev);
memset(mock_flash, FLASH_SIMULATOR_ERASE_VALUE, ARRAY_SIZE(mock_flash));
return 0;
}
#endif /* DT_NODE_HAS_PROP(DT_PARENT(SOC_NV_FLASH_NODE), memory_region) */
#endif /* CONFIG_ARCH_POSIX */
static int flash_init(const struct device *dev)
{
FLASH_SIM_STATS_INIT_AND_REG(flash_sim_stats, STATS_SIZE_32, "flash_sim_stats");
FLASH_SIM_STATS_INIT_AND_REG(flash_sim_thresholds, STATS_SIZE_32,
"flash_sim_thresholds");
return flash_mock_init(dev);
}
DEVICE_DT_INST_DEFINE(0, flash_init, NULL,
NULL, NULL, POST_KERNEL, CONFIG_FLASH_INIT_PRIORITY,
&flash_sim_api);
#ifdef CONFIG_ARCH_POSIX
static void flash_native_cleanup(void)
{
flash_mock_cleanup_native(flash_in_ram, flash_fd, mock_flash,
FLASH_SIMULATOR_FLASH_SIZE, flash_file_path,
flash_rm_at_exit);
}
static void flash_native_options(void)
{
static struct args_struct_t flash_options[] = {
{ .option = "flash",
.name = "path",
.type = 's',
.dest = (void *)&flash_file_path,
.descript = "Path to binary file to be used as flash, by default \""
DEFAULT_FLASH_FILE_PATH "\""},
{ .is_switch = true,
.option = "flash_erase",
.type = 'b',
.dest = (void *)&flash_erase_at_start,
.descript = "Erase the flash content at startup" },
{ .is_switch = true,
.option = "flash_rm",
.type = 'b',
.dest = (void *)&flash_rm_at_exit,
.descript = "Remove the flash file when terminating the execution" },
{ .is_switch = true,
.option = "flash_in_ram",
.type = 'b',
.dest = (void *)&flash_in_ram,
.descript = "Instead of a file, keep the file content just in RAM. If this is "
"set, flash, flash_erase & flash_rm are ignored. The flash content"
" is always erased at startup" },
ARG_TABLE_ENDMARKER
};
native_add_command_line_opts(flash_options);
}
NATIVE_TASK(flash_native_options, PRE_BOOT_1, 1);
NATIVE_TASK(flash_native_cleanup, ON_EXIT, 1);
#endif /* CONFIG_ARCH_POSIX */
/* Extension to generic flash driver API */
void *z_impl_flash_simulator_get_memory(const struct device *dev,
size_t *mock_size)
{
ARG_UNUSED(dev);
*mock_size = FLASH_SIMULATOR_FLASH_SIZE;
return mock_flash;
}
#ifdef CONFIG_USERSPACE
#include <zephyr/internal/syscall_handler.h>
void *z_vrfy_flash_simulator_get_memory(const struct device *dev,
size_t *mock_size)
{
K_OOPS(K_SYSCALL_SPECIFIC_DRIVER(dev, K_OBJ_DRIVER_FLASH, &flash_sim_api));
return z_impl_flash_simulator_get_memory(dev, mock_size);
}
#include <zephyr/syscalls/flash_simulator_get_memory_mrsh.c>
#endif /* CONFIG_USERSPACE */
``` | /content/code_sandbox/drivers/flash/flash_simulator.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,799 |
```c
/*
*
*/
#define DT_DRV_COMPAT cdns_nand
#include "socfpga_system_manager.h"
#include <zephyr/device.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/kernel.h>
/* Check if reset property is defined */
#define CDNS_NAND_RESET_SUPPORT DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets)
#if CDNS_NAND_RESET_SUPPORT
#include <zephyr/drivers/reset.h>
#endif
#include "flash_cadence_nand_ll.h"
#define DEV_CFG(_dev) ((const struct flash_cadence_nand_config *)(_dev)->config)
#define DEV_DATA(_dev) ((struct flash_cadence_nand_data *const)(_dev)->data)
#define FLASH_WRITE_SIZE DT_PROP(DT_INST(0, DT_DRV_COMPAT), block_size)
#ifdef CONFIG_BOARD_INTEL_SOCFPGA_AGILEX5_SOCDK
#define DFI_CFG_OFFSET 0xFC
/* To check the DFI register setting for NAND in the System Manager */
#define DFI_SEL_CHK (SOCFPGA_SYSMGR_REG_BASE + DFI_CFG_OFFSET)
#endif
LOG_MODULE_REGISTER(flash_cdns_nand, CONFIG_FLASH_LOG_LEVEL);
struct flash_cadence_nand_data {
DEVICE_MMIO_NAMED_RAM(nand_reg);
DEVICE_MMIO_NAMED_RAM(sdma);
/* device info structure */
struct cadence_nand_params params;
/* Mutex to prevent multiple processes from accessing the same driver api */
struct k_mutex nand_mutex;
#if CONFIG_CDNS_NAND_INTERRUPT_SUPPORT
/* Semaphore to send a signal from an interrupt handler to a thread */
struct k_sem interrupt_sem;
#endif
};
struct flash_cadence_nand_config {
DEVICE_MMIO_NAMED_ROM(nand_reg);
DEVICE_MMIO_NAMED_ROM(sdma);
#if CDNS_NAND_RESET_SUPPORT
/* Reset controller device configuration for NAND*/
const struct reset_dt_spec reset;
/* Reset controller device configuration for Combo Phy*/
const struct reset_dt_spec combo_phy_reset;
#endif
#if CONFIG_CDNS_NAND_INTERRUPT_SUPPORT
void (*irq_config)(void);
#endif
};
static const struct flash_parameters flash_cdns_parameters = {.write_block_size = FLASH_WRITE_SIZE,
.erase_value = 0xFF};
#if CONFIG_FLASH_PAGE_LAYOUT
struct flash_pages_layout flash_cdns_pages_layout;
void flash_cdns_page_layout(const struct device *nand_dev, const struct flash_pages_layout **layout,
size_t *layout_size)
{
struct flash_cadence_nand_data *const nand_data = DEV_DATA(nand_dev);
struct cadence_nand_params *nand_param = &nand_data->params;
flash_cdns_pages_layout.pages_count = nand_param->page_count;
flash_cdns_pages_layout.pages_size = nand_param->page_size;
*layout = &flash_cdns_pages_layout;
*layout_size = 1;
}
#endif
static int flash_cdns_nand_erase(const struct device *nand_dev, off_t offset, size_t len)
{
struct flash_cadence_nand_data *const nand_data = DEV_DATA(nand_dev);
struct cadence_nand_params *nand_param = &nand_data->params;
int ret;
k_mutex_lock(&nand_data->nand_mutex, K_FOREVER);
ret = cdns_nand_erase(nand_param, offset, len);
k_mutex_unlock(&nand_data->nand_mutex);
return ret;
}
static int flash_cdns_nand_write(const struct device *nand_dev, off_t offset, const void *data,
size_t len)
{
struct flash_cadence_nand_data *const nand_data = DEV_DATA(nand_dev);
struct cadence_nand_params *nand_param = &nand_data->params;
int ret;
if (data == NULL) {
LOG_ERR("Invalid input parameter for NAND Flash Write!");
return -EINVAL;
}
k_mutex_lock(&nand_data->nand_mutex, K_FOREVER);
ret = cdns_nand_write(nand_param, data, offset, len);
k_mutex_unlock(&nand_data->nand_mutex);
return ret;
}
static int flash_cdns_nand_read(const struct device *nand_dev, off_t offset, void *data, size_t len)
{
struct flash_cadence_nand_data *const nand_data = DEV_DATA(nand_dev);
struct cadence_nand_params *nand_param = &nand_data->params;
int ret;
if (data == NULL) {
LOG_ERR("Invalid input parameter for NAND Flash Read!");
return -EINVAL;
}
k_mutex_lock(&nand_data->nand_mutex, K_FOREVER);
ret = cdns_nand_read(nand_param, data, offset, len);
k_mutex_unlock(&nand_data->nand_mutex);
return ret;
}
static const struct flash_parameters *flash_cdns_get_parameters(const struct device *nand_dev)
{
ARG_UNUSED(nand_dev);
return &flash_cdns_parameters;
}
static const struct flash_driver_api flash_cdns_nand_api = {
.erase = flash_cdns_nand_erase,
.write = flash_cdns_nand_write,
.read = flash_cdns_nand_read,
.get_parameters = flash_cdns_get_parameters,
#ifdef CONFIG_FLASH_PAGE_LAYOUT
.page_layout = flash_cdns_page_layout,
#endif
};
#if CONFIG_CDNS_NAND_INTERRUPT_SUPPORT
static void cdns_nand_irq_handler(const struct device *nand_dev)
{
struct flash_cadence_nand_data *const nand_data = DEV_DATA(nand_dev);
struct cadence_nand_params *nand_param = &nand_data->params;
cdns_nand_irq_handler_ll(nand_param);
k_sem_give(&nand_param->interrupt_sem_t);
}
#endif
static int flash_cdns_nand_init(const struct device *nand_dev)
{
DEVICE_MMIO_NAMED_MAP(nand_dev, nand_reg, K_MEM_CACHE_NONE);
DEVICE_MMIO_NAMED_MAP(nand_dev, sdma, K_MEM_CACHE_NONE);
const struct flash_cadence_nand_config *nand_config = DEV_CFG(nand_dev);
struct flash_cadence_nand_data *const nand_data = DEV_DATA(nand_dev);
struct cadence_nand_params *nand_param = &nand_data->params;
int ret;
#ifdef CONFIG_BOARD_INTEL_SOCFPGA_AGILEX5_SOCDK
uint32_t status;
status = sys_read32(DFI_SEL_CHK);
if ((status & 1) != 0) {
LOG_ERR("DFI not configured for NAND Flash controller!!!");
return -ENODEV;
}
#endif
#if CDNS_NAND_RESET_SUPPORT
/* Reset Combo phy and NAND only if reset controller driver is supported */
if ((nand_config->combo_phy_reset.dev != NULL) && (nand_config->reset.dev != NULL)) {
if (!device_is_ready(nand_config->reset.dev)) {
LOG_ERR("Reset controller device not ready");
return -ENODEV;
}
ret = reset_line_toggle(nand_config->combo_phy_reset.dev,
nand_config->combo_phy_reset.id);
if (ret != 0) {
LOG_ERR("Combo phy reset failed");
return ret;
}
ret = reset_line_toggle(nand_config->reset.dev, nand_config->reset.id);
if (ret != 0) {
LOG_ERR("NAND reset failed");
return ret;
}
}
#endif
nand_param->nand_base = DEVICE_MMIO_NAMED_GET(nand_dev, nand_reg);
nand_param->sdma_base = DEVICE_MMIO_NAMED_GET(nand_dev, sdma);
ret = k_mutex_init(&nand_data->nand_mutex);
if (ret != 0) {
LOG_ERR("Mutex creation Failed");
return ret;
}
#if CONFIG_CDNS_NAND_INTERRUPT_SUPPORT
if (nand_config->irq_config == NULL) {
LOG_ERR("Interrupt function not initialized!!");
return -EINVAL;
}
nand_config->irq_config();
ret = k_sem_init(&nand_param->interrupt_sem_t, 0, 1);
if (ret != 0) {
LOG_ERR("Semaphore creation Failed");
return ret;
}
#endif
nand_param->page_count =
(nand_param->npages_per_block * nand_param->nblocks_per_lun * nand_param->nluns);
/* NAND Memory Controller init */
ret = cdns_nand_init(nand_param);
if (ret != 0) {
LOG_ERR("NAND initialization Failed");
return ret;
}
return 0;
}
#define CDNS_NAND_RESET_SPEC_INIT(inst) \
.reset = RESET_DT_SPEC_INST_GET_BY_IDX(inst, 0), \
.combo_phy_reset = RESET_DT_SPEC_INST_GET_BY_IDX(inst, 1),
#define CREATE_FLASH_CADENCE_NAND_DEVICE(inst) \
IF_ENABLED(CONFIG_CDNS_NAND_INTERRUPT_SUPPORT, \
(static void cdns_nand_irq_config_##inst(void);)) \
struct flash_cadence_nand_data flash_cadence_nand_data_##inst = { \
.params = { \
.datarate_mode = DT_INST_PROP(inst, data_rate_mode), \
}}; \
const struct flash_cadence_nand_config flash_cadence_nand_config_##inst = { \
DEVICE_MMIO_NAMED_ROM_INIT_BY_NAME(nand_reg, DT_DRV_INST(inst)), \
DEVICE_MMIO_NAMED_ROM_INIT_BY_NAME(sdma, DT_DRV_INST(inst)), \
IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, resets), (CDNS_NAND_RESET_SPEC_INIT(inst))) \
IF_ENABLED(CONFIG_CDNS_NAND_INTERRUPT_SUPPORT, \
(.irq_config = cdns_nand_irq_config_##inst,))}; \
DEVICE_DT_INST_DEFINE(inst, flash_cdns_nand_init, NULL, &flash_cadence_nand_data_##inst, \
&flash_cadence_nand_config_##inst, POST_KERNEL, \
CONFIG_FLASH_INIT_PRIORITY, &flash_cdns_nand_api); \
IF_ENABLED(CONFIG_CDNS_NAND_INTERRUPT_SUPPORT, \
(static void cdns_nand_irq_config_##inst(void) \
{ \
IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), \
cdns_nand_irq_handler, DEVICE_DT_INST_GET(inst), 0); \
irq_enable(DT_INST_IRQN(inst)); \
}))
DT_INST_FOREACH_STATUS_OKAY(CREATE_FLASH_CADENCE_NAND_DEVICE)
``` | /content/code_sandbox/drivers/flash/flash_cadence_nand.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,250 |
```c
/*
*
*/
#define DT_DRV_COMPAT st_stm32h7_flash_controller
#include <zephyr/sys/util.h>
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <string.h>
#include <zephyr/drivers/flash.h>
#include <zephyr/init.h>
#include <zephyr/sys/barrier.h>
#include <soc.h>
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
#include <stm32h7rsxx_ll_bus.h>
#include <stm32h7rsxx_ll_utils.h>
#else
#include <stm32h7xx_ll_bus.h>
#include <stm32h7xx_ll_utils.h>
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
#include "flash_stm32.h"
#include "stm32_hsem.h"
#define LOG_DOMAIN flash_stm32h7
#define LOG_LEVEL CONFIG_FLASH_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(LOG_DOMAIN);
/* Let's wait for double the max erase time to be sure that the operation is
* completed.
*/
#define STM32H7_FLASH_TIMEOUT \
(2 * DT_PROP(DT_INST(0, st_stm32_nv_flash), max_erase_time))
#define STM32H7_M4_FLASH_SIZE DT_PROP_OR(DT_INST(0, st_stm32_nv_flash), bank2_flash_size, 0)
#ifdef CONFIG_CPU_CORTEX_M4
#if STM32H7_M4_FLASH_SIZE == 0
#error Flash driver on M4 requires the DT property bank2-flash-size
#else
#define REAL_FLASH_SIZE_KB (KB(STM32H7_M4_FLASH_SIZE * 2))
#endif
#else
#define REAL_FLASH_SIZE_KB KB(LL_GetFlashSize())
#endif
#define SECTOR_PER_BANK ((REAL_FLASH_SIZE_KB / FLASH_SECTOR_SIZE) / 2)
#if defined(DUAL_BANK)
#define STM32H7_SERIES_MAX_FLASH_KB KB(2048)
#define BANK2_OFFSET (STM32H7_SERIES_MAX_FLASH_KB / 2)
/* When flash is dual bank and flash size is smaller than Max flash size of
* the serie, there is a discontinuty between bank1 and bank2.
*/
#define DISCONTINUOUS_BANKS (REAL_FLASH_SIZE_KB < STM32H7_SERIES_MAX_FLASH_KB)
#endif
struct flash_stm32_sector_t {
int sector_index;
int bank;
volatile uint32_t *cr;
volatile uint32_t *sr;
};
#if defined(CONFIG_MULTITHREADING) || defined(CONFIG_STM32H7_DUAL_CORE)
/*
* This is named flash_stm32_sem_take instead of flash_stm32_lock (and
* similarly for flash_stm32_sem_give) to avoid confusion with locking
* actual flash sectors.
*/
static inline void _flash_stm32_sem_take(const struct device *dev)
{
k_sem_take(&FLASH_STM32_PRIV(dev)->sem, K_FOREVER);
z_stm32_hsem_lock(CFG_HW_FLASH_SEMID, HSEM_LOCK_WAIT_FOREVER);
}
static inline void _flash_stm32_sem_give(const struct device *dev)
{
z_stm32_hsem_unlock(CFG_HW_FLASH_SEMID);
k_sem_give(&FLASH_STM32_PRIV(dev)->sem);
}
#define flash_stm32_sem_init(dev) k_sem_init(&FLASH_STM32_PRIV(dev)->sem, 1, 1)
#define flash_stm32_sem_take(dev) _flash_stm32_sem_take(dev)
#define flash_stm32_sem_give(dev) _flash_stm32_sem_give(dev)
#else
#define flash_stm32_sem_init(dev)
#define flash_stm32_sem_take(dev)
#define flash_stm32_sem_give(dev)
#endif
bool flash_stm32_valid_range(const struct device *dev, off_t offset,
uint32_t len,
bool write)
{
#if defined(DUAL_BANK)
if (DISCONTINUOUS_BANKS) {
/*
* In case of bank1/2 discontinuity, the range should not
* start before bank2 and end beyond bank1 at the same time.
* Locations beyond bank2 are caught by flash_stm32_range_exists
*/
if ((offset < BANK2_OFFSET)
&& (offset + len > REAL_FLASH_SIZE_KB / 2)) {
LOG_ERR("Range ovelaps flash bank discontinuity");
return false;
}
}
#endif
if (write) {
if ((offset % (FLASH_NB_32BITWORD_IN_FLASHWORD * 4)) != 0) {
LOG_ERR("Write offset not aligned on flashword length. "
"Offset: 0x%lx, flashword length: %d",
(unsigned long) offset, FLASH_NB_32BITWORD_IN_FLASHWORD * 4);
return false;
}
}
return flash_stm32_range_exists(dev, offset, len);
}
static int flash_stm32_check_status(const struct device *dev)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
/* The hardware corrects single ECC errors and detects double
* ECC errors. Corrected data is returned for single ECC
* errors, so in this case we just log a warning.
*/
#ifdef DUAL_BANK
uint32_t const error_bank2 = (FLASH_FLAG_ALL_ERRORS_BANK2
& ~FLASH_FLAG_SNECCERR_BANK2);
#endif
uint32_t sr;
#if defined(CONFIG_SOC_SERIES_STM32H7RSX)
uint32_t const error_bank = (FLASH_FLAG_ECC_ERRORS
& ~FLASH_FLAG_SNECCERR
& ~FLASH_FLAG_DBECCERR);
/* Read the Interrupt status flags. */
sr = regs->ISR;
if (sr & (FLASH_FLAG_SNECCERR)) {
uint32_t word = regs->ECCSFADDR & FLASH_ECCSFADDR_SEC_FADD;
LOG_WRN("Bank%d ECC error at 0x%08x", 1,
word * 4 * FLASH_NB_32BITWORD_IN_FLASHWORD);
}
if (sr & (FLASH_FLAG_DBECCERR)) {
uint32_t word = regs->ECCDFADDR & FLASH_ECCDFADDR_DED_FADD;
LOG_WRN("Bank%d ECC error at 0x%08x", 1,
word * 4 * FLASH_NB_32BITWORD_IN_FLASHWORD);
}
/* Clear the ECC flags (including FA) */
regs->ICR = FLASH_FLAG_ECC_ERRORS;
if (sr & error_bank) {
#else
uint32_t const error_bank1 = (FLASH_FLAG_ALL_ERRORS_BANK1
& ~FLASH_FLAG_SNECCERR_BANK1);
/* Read the status flags. */
sr = regs->SR1;
if (sr & (FLASH_FLAG_SNECCERR_BANK1|FLASH_FLAG_DBECCERR_BANK1)) {
uint32_t word = regs->ECC_FA1 & FLASH_ECC_FA_FAIL_ECC_ADDR;
LOG_WRN("Bank%d ECC error at 0x%08x", 1,
word * 4 * FLASH_NB_32BITWORD_IN_FLASHWORD);
}
/* Clear the flags (including FA1R) */
regs->CCR1 = FLASH_FLAG_ALL_BANK1;
if (sr & error_bank1) {
#endif /* CONFIG_SOC_SERIES_STM32H7RSX */
LOG_ERR("Status Bank%d: 0x%08x", 1, sr);
return -EIO;
}
#ifdef DUAL_BANK
sr = regs->SR2;
if (sr & (FLASH_FLAG_SNECCERR_BANK1|FLASH_FLAG_DBECCERR_BANK1)) {
uint32_t word = regs->ECC_FA2 & FLASH_ECC_FA_FAIL_ECC_ADDR;
LOG_WRN("Bank%d ECC error at 0x%08x", 2,
word * 4 * FLASH_NB_32BITWORD_IN_FLASHWORD);
}
regs->CCR2 = FLASH_FLAG_ALL_BANK2;
if (sr & error_bank2) {
/* Sometimes the STRBERR is seen when writing to flash
* from M4 (upper 128KiB) with code running from lower
* 896KiB. Don't know why it happens, but technical
* reference manual (section 4.7.4) says application can
* ignore this error and continue with normal write. So
* check and return here if the error is STRBERR and clear
* the error by setting CCR2 bit.
*/
if (sr & FLASH_FLAG_STRBERR_BANK2) {
regs->CCR2 |= FLASH_FLAG_STRBERR_BANK2;
return 0;
}
LOG_ERR("Status Bank%d: 0x%08x", 2, sr);
return -EIO;
}
#endif
return 0;
}
int flash_stm32_wait_flash_idle(const struct device *dev)
{
int64_t timeout_time = k_uptime_get() + STM32H7_FLASH_TIMEOUT;
int rc;
rc = flash_stm32_check_status(dev);
if (rc < 0) {
return -EIO;
}
#ifdef DUAL_BANK
while ((FLASH_STM32_REGS(dev)->SR1 & FLASH_SR_QW)
|| (FLASH_STM32_REGS(dev)->SR2 & FLASH_SR_QW))
#else
while (FLASH_STM32_REGS(dev)->SR1 & FLASH_SR_QW)
#endif
{
if (k_uptime_get() > timeout_time) {
LOG_ERR("Timeout! val: %d", STM32H7_FLASH_TIMEOUT);
return -EIO;
}
}
return 0;
}
static struct flash_stm32_sector_t get_sector(const struct device *dev,
off_t offset)
{
struct flash_stm32_sector_t sector;
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
#ifdef DUAL_BANK
off_t temp_offset = offset + (CONFIG_FLASH_BASE_ADDRESS & 0xffffff);
bool bank_swap;
/* Check whether bank1/2 are swapped */
bank_swap = (READ_BIT(FLASH->OPTCR, FLASH_OPTCR_SWAP_BANK)
== FLASH_OPTCR_SWAP_BANK);
sector.sector_index = offset / FLASH_SECTOR_SIZE;
if ((temp_offset < (REAL_FLASH_SIZE_KB / 2)) && !bank_swap) {
sector.bank = 1;
sector.cr = ®s->CR1;
sector.sr = ®s->SR1;
} else if ((temp_offset >= BANK2_OFFSET) && bank_swap) {
sector.sector_index -= BANK2_OFFSET / FLASH_SECTOR_SIZE;
sector.bank = 1;
sector.cr = ®s->CR2;
sector.sr = ®s->SR2;
} else if ((temp_offset < (REAL_FLASH_SIZE_KB / 2)) && bank_swap) {
sector.bank = 2;
sector.cr = ®s->CR1;
sector.sr = ®s->SR1;
} else if ((temp_offset >= BANK2_OFFSET) && !bank_swap) {
sector.sector_index -= BANK2_OFFSET / FLASH_SECTOR_SIZE;
sector.bank = 2;
sector.cr = ®s->CR2;
sector.sr = ®s->SR2;
} else {
sector.sector_index = 0;
sector.bank = 0;
sector.cr = NULL;
sector.sr = NULL;
}
#else
if (offset < REAL_FLASH_SIZE_KB) {
sector.sector_index = offset / FLASH_SECTOR_SIZE;
sector.bank = 1;
sector.cr = ®s->CR1;
sector.sr = ®s->SR1;
} else {
sector.sector_index = 0;
sector.bank = 0;
sector.cr = NULL;
sector.sr = NULL;
}
#endif
return sector;
}
static int erase_sector(const struct device *dev, int offset)
{
int rc;
struct flash_stm32_sector_t sector = get_sector(dev, offset);
if (sector.bank == 0) {
LOG_ERR("Offset %ld does not exist", (long) offset);
return -EINVAL;
}
/* if the control register is locked, do not fail silently */
if (*(sector.cr) & FLASH_CR_LOCK) {
return -EIO;
}
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
*(sector.cr) &= ~FLASH_CR_SNB;
*(sector.cr) |= (FLASH_CR_SER
| ((sector.sector_index << FLASH_CR_SNB_Pos) & FLASH_CR_SNB));
*(sector.cr) |= FLASH_CR_START;
/* flush the register write */
barrier_dsync_fence_full();
rc = flash_stm32_wait_flash_idle(dev);
*(sector.cr) &= ~(FLASH_CR_SER | FLASH_CR_SNB);
return rc;
}
int flash_stm32_block_erase_loop(const struct device *dev,
unsigned int offset,
unsigned int len)
{
unsigned int address = offset;
int rc = 0;
for (; address <= offset + len - 1 ; address += FLASH_SECTOR_SIZE) {
rc = erase_sector(dev, address);
if (rc < 0) {
break;
}
}
return rc;
}
static int wait_write_queue(const struct flash_stm32_sector_t *sector)
{
int64_t timeout_time = k_uptime_get() + 100;
while (*(sector->sr) & FLASH_SR_QW) {
if (k_uptime_get() > timeout_time) {
LOG_ERR("Timeout! val: %d", 100);
return -EIO;
}
}
return 0;
}
static int write_ndwords(const struct device *dev,
off_t offset, const uint64_t *data,
uint8_t n)
{
volatile uint64_t *flash = (uint64_t *)(offset
+ FLASH_STM32_BASE_ADDRESS);
int rc;
int i;
struct flash_stm32_sector_t sector = get_sector(dev, offset);
if (sector.bank == 0) {
LOG_ERR("Offset %ld does not exist", (long) offset);
return -EINVAL;
}
/* if the control register is locked, do not fail silently */
if (*(sector.cr) & FLASH_CR_LOCK) {
return -EIO;
}
/* Check that no Flash main memory operation is ongoing */
rc = flash_stm32_wait_flash_idle(dev);
if (rc < 0) {
return rc;
}
/* Check if 256 bits location is erased */
for (i = 0; i < n; ++i) {
if (flash[i] != 0xFFFFFFFFFFFFFFFFUL) {
return -EIO;
}
}
/* Set the PG bit */
*(sector.cr) |= FLASH_CR_PG;
/* Flush the register write */
barrier_dsync_fence_full();
/* Perform the data write operation at the desired memory address */
for (i = 0; i < n; ++i) {
/* Source dword may be unaligned, so take extra care when dereferencing it */
flash[i] = UNALIGNED_GET(data + i);
/* Flush the data write */
barrier_dsync_fence_full();
wait_write_queue(§or);
}
/* Wait until the BSY bit is cleared */
rc = flash_stm32_wait_flash_idle(dev);
/* Clear the PG bit */
*(sector.cr) &= (~FLASH_CR_PG);
return rc;
}
int flash_stm32_write_range(const struct device *dev, unsigned int offset,
const void *data, unsigned int len)
{
int rc = 0;
int i, j;
const uint8_t ndwords = FLASH_NB_32BITWORD_IN_FLASHWORD / 2;
const uint8_t nbytes = FLASH_NB_32BITWORD_IN_FLASHWORD * 4;
uint8_t unaligned_datas[nbytes];
for (i = 0; i < len && i + nbytes <= len; i += nbytes, offset += nbytes) {
rc = write_ndwords(dev, offset,
(const uint64_t *) data + (i >> 3),
ndwords);
if (rc < 0) {
return rc;
}
}
/* Handle the remaining bytes if length is not aligned on
* FLASH_NB_32BITWORD_IN_FLASHWORD
*/
if (i < len) {
memset(unaligned_datas, 0xff, sizeof(unaligned_datas));
for (j = 0; j < len - i; ++j) {
unaligned_datas[j] = ((uint8_t *)data)[i + j];
}
rc = write_ndwords(dev, offset,
(const uint64_t *)unaligned_datas,
ndwords);
if (rc < 0) {
return rc;
}
}
return rc;
}
static int flash_stm32h7_write_protection(const struct device *dev, bool enable)
{
FLASH_TypeDef *regs = FLASH_STM32_REGS(dev);
int rc = 0;
if (enable) {
rc = flash_stm32_wait_flash_idle(dev);
if (rc) {
return rc;
}
}
/* Bank 1 */
if (enable) {
regs->CR1 |= FLASH_CR_LOCK;
} else {
if (regs->CR1 & FLASH_CR_LOCK) {
regs->KEYR1 = FLASH_KEY1;
regs->KEYR1 = FLASH_KEY2;
}
}
#ifdef DUAL_BANK
/* Bank 2 */
if (enable) {
regs->CR2 |= FLASH_CR_LOCK;
} else {
if (regs->CR2 & FLASH_CR_LOCK) {
regs->KEYR2 = FLASH_KEY1;
regs->KEYR2 = FLASH_KEY2;
}
}
#endif
if (enable) {
LOG_DBG("Enable write protection");
} else {
LOG_DBG("Disable write protection");
}
return rc;
}
#ifdef CONFIG_CPU_CORTEX_M7
static void flash_stm32h7_flush_caches(const struct device *dev,
off_t offset, size_t len)
{
ARG_UNUSED(dev);
if (!(SCB->CCR & SCB_CCR_DC_Msk)) {
return; /* Cache not enabled */
}
SCB_InvalidateDCache_by_Addr((uint32_t *)(FLASH_STM32_BASE_ADDRESS
+ offset), len);
}
#endif /* CONFIG_CPU_CORTEX_M7 */
static int flash_stm32h7_erase(const struct device *dev, off_t offset,
size_t len)
{
int rc, rc2;
#ifdef CONFIG_CPU_CORTEX_M7
/* Flush whole sectors */
off_t flush_offset = ROUND_DOWN(offset, FLASH_SECTOR_SIZE);
size_t flush_len = ROUND_UP(offset + len - 1, FLASH_SECTOR_SIZE)
- flush_offset;
#endif /* CONFIG_CPU_CORTEX_M7 */
if (!flash_stm32_valid_range(dev, offset, len, true)) {
LOG_ERR("Erase range invalid. Offset: %ld, len: %zu",
(long) offset, len);
return -EINVAL;
}
if (!len) {
return 0;
}
flash_stm32_sem_take(dev);
LOG_DBG("Erase offset: %ld, len: %zu", (long) offset, len);
rc = flash_stm32h7_write_protection(dev, false);
if (rc) {
goto done;
}
rc = flash_stm32_block_erase_loop(dev, offset, len);
#ifdef CONFIG_CPU_CORTEX_M7
/* Flush cache on all sectors affected by the erase */
flash_stm32h7_flush_caches(dev, flush_offset, flush_len);
#elif CONFIG_CPU_CORTEX_M4
if (LL_AHB1_GRP1_IsEnabledClock(LL_AHB1_GRP1_PERIPH_ART)
&& LL_ART_IsEnabled()) {
LOG_ERR("Cortex M4: ART enabled not supported by flash driver");
}
#endif /* CONFIG_CPU_CORTEX_M7 */
done:
rc2 = flash_stm32h7_write_protection(dev, true);
if (!rc) {
rc = rc2;
}
flash_stm32_sem_give(dev);
return rc;
}
static int flash_stm32h7_write(const struct device *dev, off_t offset,
const void *data, size_t len)
{
int rc;
if (!flash_stm32_valid_range(dev, offset, len, true)) {
LOG_ERR("Write range invalid. Offset: %ld, len: %zu",
(long) offset, len);
return -EINVAL;
}
if (!len) {
return 0;
}
flash_stm32_sem_take(dev);
LOG_DBG("Write offset: %ld, len: %zu", (long) offset, len);
rc = flash_stm32h7_write_protection(dev, false);
if (!rc) {
rc = flash_stm32_write_range(dev, offset, data, len);
}
int rc2 = flash_stm32h7_write_protection(dev, true);
if (!rc) {
rc = rc2;
}
flash_stm32_sem_give(dev);
return rc;
}
static int flash_stm32h7_read(const struct device *dev, off_t offset,
void *data,
size_t len)
{
if (!flash_stm32_valid_range(dev, offset, len, false)) {
LOG_ERR("Read range invalid. Offset: %ld, len: %zu",
(long) offset, len);
return -EINVAL;
}
if (!len) {
return 0;
}
LOG_DBG("Read offset: %ld, len: %zu", (long) offset, len);
/* During the read we mask bus errors and only allow NMI.
*
* If the flash has a double ECC error then there is normally
* a bus fault, but we want to return an error code instead.
*/
unsigned int irq_lock_key = irq_lock();
__set_FAULTMASK(1);
SCB->CCR |= SCB_CCR_BFHFNMIGN_Msk;
barrier_dsync_fence_full();
barrier_isync_fence_full();
memcpy(data, (uint8_t *) FLASH_STM32_BASE_ADDRESS + offset, len);
__set_FAULTMASK(0);
SCB->CCR &= ~SCB_CCR_BFHFNMIGN_Msk;
barrier_dsync_fence_full();
barrier_isync_fence_full();
irq_unlock(irq_lock_key);
return flash_stm32_check_status(dev);
}
static const struct flash_parameters flash_stm32h7_parameters = {
.write_block_size = FLASH_STM32_WRITE_BLOCK_SIZE,
.erase_value = 0xff,
};
static const struct flash_parameters *
flash_stm32h7_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_stm32h7_parameters;
}
void flash_stm32_page_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
ARG_UNUSED(dev);
#if defined(DUAL_BANK)
static struct flash_pages_layout stm32h7_flash_layout[3];
if (DISCONTINUOUS_BANKS) {
if (stm32h7_flash_layout[0].pages_count == 0) {
/* Bank1 */
stm32h7_flash_layout[0].pages_count = SECTOR_PER_BANK;
stm32h7_flash_layout[0].pages_size = FLASH_SECTOR_SIZE;
/*
* Dummy page corresponding to discontinuity
* between bank1/2
*/
stm32h7_flash_layout[1].pages_count = 1;
stm32h7_flash_layout[1].pages_size = BANK2_OFFSET
- (SECTOR_PER_BANK * FLASH_SECTOR_SIZE);
/* Bank2 */
stm32h7_flash_layout[2].pages_count = SECTOR_PER_BANK;
stm32h7_flash_layout[2].pages_size = FLASH_SECTOR_SIZE;
}
*layout_size = ARRAY_SIZE(stm32h7_flash_layout);
} else {
if (stm32h7_flash_layout[0].pages_count == 0) {
stm32h7_flash_layout[0].pages_count =
REAL_FLASH_SIZE_KB / FLASH_SECTOR_SIZE;
stm32h7_flash_layout[0].pages_size = FLASH_SECTOR_SIZE;
}
*layout_size = 1;
}
#else
static struct flash_pages_layout stm32h7_flash_layout[1];
if (stm32h7_flash_layout[0].pages_count == 0) {
stm32h7_flash_layout[0].pages_count =
REAL_FLASH_SIZE_KB / FLASH_SECTOR_SIZE;
stm32h7_flash_layout[0].pages_size = FLASH_SECTOR_SIZE;
}
*layout_size = ARRAY_SIZE(stm32h7_flash_layout);
#endif
*layout = stm32h7_flash_layout;
}
static struct flash_stm32_priv flash_data = {
.regs = (FLASH_TypeDef *) DT_INST_REG_ADDR(0),
.pclken = { .bus = DT_INST_CLOCKS_CELL(0, bus),
.enr = DT_INST_CLOCKS_CELL(0, bits)},
};
static const struct flash_driver_api flash_stm32h7_api = {
.erase = flash_stm32h7_erase,
.write = flash_stm32h7_write,
.read = flash_stm32h7_read,
.get_parameters = flash_stm32h7_get_parameters,
#ifdef CONFIG_FLASH_PAGE_LAYOUT
.page_layout = flash_stm32_page_layout,
#endif
};
static int stm32h7_flash_init(const struct device *dev)
{
struct flash_stm32_priv *p = FLASH_STM32_PRIV(dev);
const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
if (!device_is_ready(clk)) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
/* enable clock */
if (clock_control_on(clk, (clock_control_subsys_t)&p->pclken) != 0) {
LOG_ERR("Failed to enable clock");
return -EIO;
}
flash_stm32_sem_init(dev);
LOG_DBG("Flash initialized. BS: %zu",
flash_stm32h7_parameters.write_block_size);
#if ((CONFIG_FLASH_LOG_LEVEL >= LOG_LEVEL_DBG) && CONFIG_FLASH_PAGE_LAYOUT)
const struct flash_pages_layout *layout;
size_t layout_size;
flash_stm32_page_layout(dev, &layout, &layout_size);
for (size_t i = 0; i < layout_size; i++) {
LOG_DBG("Block %zu: bs: %zu count: %zu", i,
layout[i].pages_size, layout[i].pages_count);
}
#endif
return flash_stm32h7_write_protection(dev, false);
}
DEVICE_DT_INST_DEFINE(0, stm32h7_flash_init, NULL,
&flash_data, NULL, POST_KERNEL,
CONFIG_FLASH_INIT_PRIORITY, &flash_stm32h7_api);
``` | /content/code_sandbox/drivers/flash/flash_stm32h7x.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,843 |
```c
/*
*
*/
#include <zephyr/drivers/flash.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/pm/device.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(spi_flash_at45, CONFIG_FLASH_LOG_LEVEL);
#define DT_DRV_COMPAT atmel_at45
/* AT45 commands used by this driver: */
/* - Continuous Array Read (Low Power Mode) */
#define CMD_READ 0x01
/* - Main Memory Byte/Page Program through Buffer 1 without Built-In Erase */
#define CMD_WRITE 0x02
/* - Read-Modify-Write */
#define CMD_MODIFY 0x58
/* - Manufacturer and Device ID Read */
#define CMD_READ_ID 0x9F
/* - Status Register Read */
#define CMD_READ_STATUS 0xD7
/* - Chip Erase */
#define CMD_CHIP_ERASE { 0xC7, 0x94, 0x80, 0x9A }
/* - Sector Erase */
#define CMD_SECTOR_ERASE 0x7C
/* - Block Erase */
#define CMD_BLOCK_ERASE 0x50
/* - Page Erase */
#define CMD_PAGE_ERASE 0x81
/* - Deep Power-Down */
#define CMD_ENTER_DPD 0xB9
/* - Resume from Deep Power-Down */
#define CMD_EXIT_DPD 0xAB
/* - Ultra-Deep Power-Down */
#define CMD_ENTER_UDPD 0x79
/* - Buffer and Page Size Configuration, "Power of 2" binary page size */
#define CMD_BINARY_PAGE_SIZE { 0x3D, 0x2A, 0x80, 0xA6 }
#define STATUS_REG_LSB_RDY_BUSY_BIT 0x80
#define STATUS_REG_LSB_PAGE_SIZE_BIT 0x01
#define INST_HAS_WP_OR(inst) DT_INST_NODE_HAS_PROP(inst, wp_gpios) ||
#define ANY_INST_HAS_WP_GPIOS DT_INST_FOREACH_STATUS_OKAY(INST_HAS_WP_OR) 0
#define INST_HAS_RESET_OR(inst) DT_INST_NODE_HAS_PROP(inst, reset_gpios) ||
#define ANY_INST_HAS_RESET_GPIOS DT_INST_FOREACH_STATUS_OKAY(INST_HAS_RESET_OR) 0
#define DEF_BUF_SET(_name, _buf_array) \
const struct spi_buf_set _name = { \
.buffers = _buf_array, \
.count = ARRAY_SIZE(_buf_array), \
}
struct spi_flash_at45_data {
struct k_sem lock;
};
struct spi_flash_at45_config {
struct spi_dt_spec bus;
#if ANY_INST_HAS_RESET_GPIOS
const struct gpio_dt_spec *reset;
#endif
#if ANY_INST_HAS_WP_GPIOS
const struct gpio_dt_spec *wp;
#endif
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
struct flash_pages_layout pages_layout;
#endif
uint32_t chip_size;
uint32_t sector_size;
uint32_t sector_0a_size;
uint16_t block_size;
uint16_t page_size;
bool no_chip_erase;
bool no_sector_erase;
uint16_t t_enter_dpd; /* in microseconds */
uint16_t t_exit_dpd; /* in microseconds */
bool use_udpd;
uint8_t jedec_id[3];
};
static const struct flash_parameters flash_at45_parameters = {
.write_block_size = 1,
.erase_value = 0xff,
};
static void acquire(const struct device *dev)
{
struct spi_flash_at45_data *data = dev->data;
k_sem_take(&data->lock, K_FOREVER);
}
static void release(const struct device *dev)
{
struct spi_flash_at45_data *data = dev->data;
k_sem_give(&data->lock);
}
static int check_jedec_id(const struct device *dev)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
uint8_t const *expected_id = cfg->jedec_id;
uint8_t read_id[sizeof(cfg->jedec_id)];
const uint8_t opcode = CMD_READ_ID;
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&opcode,
.len = sizeof(opcode),
}
};
const struct spi_buf rx_buf[] = {
{
.len = sizeof(opcode),
},
{
.buf = read_id,
.len = sizeof(read_id),
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
DEF_BUF_SET(rx_buf_set, rx_buf);
err = spi_transceive_dt(&cfg->bus, &tx_buf_set, &rx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
return -EIO;
}
if (memcmp(expected_id, read_id, sizeof(read_id)) != 0) {
LOG_ERR("Wrong JEDEC ID: %02X %02X %02X, "
"expected: %02X %02X %02X",
read_id[0], read_id[1], read_id[2],
expected_id[0], expected_id[1], expected_id[2]);
return -ENODEV;
}
return 0;
}
/*
* Reads 2-byte Status Register:
* - Byte 0 to LSB
* - Byte 1 to MSB
* of the pointed parameter.
*/
static int read_status_register(const struct device *dev, uint16_t *status)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
const uint8_t opcode = CMD_READ_STATUS;
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&opcode,
.len = sizeof(opcode),
}
};
const struct spi_buf rx_buf[] = {
{
.len = sizeof(opcode),
},
{
.buf = status,
.len = sizeof(uint16_t),
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
DEF_BUF_SET(rx_buf_set, rx_buf);
err = spi_transceive_dt(&cfg->bus, &tx_buf_set, &rx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
return -EIO;
}
*status = sys_le16_to_cpu(*status);
return 0;
}
static int wait_until_ready(const struct device *dev)
{
int err;
uint16_t status;
do {
err = read_status_register(dev, &status);
} while (err == 0 && !(status & STATUS_REG_LSB_RDY_BUSY_BIT));
return err;
}
static int configure_page_size(const struct device *dev)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
uint16_t status;
uint8_t const conf_binary_page_size[] = CMD_BINARY_PAGE_SIZE;
const struct spi_buf tx_buf[] = {
{
.buf = (void *)conf_binary_page_size,
.len = sizeof(conf_binary_page_size),
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
err = read_status_register(dev, &status);
if (err != 0) {
return err;
}
/* If the device is already configured for "power of 2" binary
* page size, there is nothing more to do.
*/
if (status & STATUS_REG_LSB_PAGE_SIZE_BIT) {
return 0;
}
err = spi_write_dt(&cfg->bus, &tx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
} else {
err = wait_until_ready(dev);
}
return (err != 0) ? -EIO : 0;
}
static bool is_valid_request(off_t addr, size_t size, size_t chip_size)
{
return (addr >= 0 && (addr + size) <= chip_size);
}
static int spi_flash_at45_read(const struct device *dev, off_t offset,
void *data, size_t len)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
if (!is_valid_request(offset, len, cfg->chip_size)) {
return -ENODEV;
}
uint8_t const op_and_addr[] = {
CMD_READ,
(offset >> 16) & 0xFF,
(offset >> 8) & 0xFF,
(offset >> 0) & 0xFF,
};
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&op_and_addr,
.len = sizeof(op_and_addr),
}
};
const struct spi_buf rx_buf[] = {
{
.len = sizeof(op_and_addr),
},
{
.buf = data,
.len = len,
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
DEF_BUF_SET(rx_buf_set, rx_buf);
acquire(dev);
err = spi_transceive_dt(&cfg->bus, &tx_buf_set, &rx_buf_set);
release(dev);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
}
return (err != 0) ? -EIO : 0;
}
static int perform_write(const struct device *dev, off_t offset,
const void *data, size_t len)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
uint8_t const op_and_addr[] = {
IS_ENABLED(CONFIG_SPI_FLASH_AT45_USE_READ_MODIFY_WRITE)
? CMD_MODIFY
: CMD_WRITE,
(offset >> 16) & 0xFF,
(offset >> 8) & 0xFF,
(offset >> 0) & 0xFF,
};
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&op_and_addr,
.len = sizeof(op_and_addr),
},
{
.buf = (void *)data,
.len = len,
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
err = spi_write_dt(&cfg->bus, &tx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
} else {
err = wait_until_ready(dev);
}
return (err != 0) ? -EIO : 0;
}
static int spi_flash_at45_write(const struct device *dev, off_t offset,
const void *data, size_t len)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err = 0;
if (!is_valid_request(offset, len, cfg->chip_size)) {
return -ENODEV;
}
acquire(dev);
#if ANY_INST_HAS_WP_GPIOS
if (cfg->wp) {
gpio_pin_set_dt(cfg->wp, 0);
}
#endif
while (len) {
size_t chunk_len = len;
off_t current_page_start =
offset - (offset & (cfg->page_size - 1));
off_t current_page_end = current_page_start + cfg->page_size;
if (chunk_len > (current_page_end - offset)) {
chunk_len = (current_page_end - offset);
}
err = perform_write(dev, offset, data, chunk_len);
if (err != 0) {
break;
}
data = (uint8_t *)data + chunk_len;
offset += chunk_len;
len -= chunk_len;
}
#if ANY_INST_HAS_WP_GPIOS
if (cfg->wp) {
gpio_pin_set_dt(cfg->wp, 1);
}
#endif
release(dev);
return err;
}
static int perform_chip_erase(const struct device *dev)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
uint8_t const chip_erase_cmd[] = CMD_CHIP_ERASE;
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&chip_erase_cmd,
.len = sizeof(chip_erase_cmd),
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
err = spi_write_dt(&cfg->bus, &tx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
} else {
err = wait_until_ready(dev);
}
return (err != 0) ? -EIO : 0;
}
static bool is_erase_possible(size_t entity_size,
off_t offset, size_t requested_size)
{
return (requested_size >= entity_size &&
(offset & (entity_size - 1)) == 0);
}
static int perform_erase_op(const struct device *dev, uint8_t opcode,
off_t offset)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err;
uint8_t const op_and_addr[] = {
opcode,
(offset >> 16) & 0xFF,
(offset >> 8) & 0xFF,
(offset >> 0) & 0xFF,
};
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&op_and_addr,
.len = sizeof(op_and_addr),
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
err = spi_write_dt(&cfg->bus, &tx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
} else {
err = wait_until_ready(dev);
}
return (err != 0) ? -EIO : 0;
}
static int spi_flash_at45_erase(const struct device *dev, off_t offset,
size_t size)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err = 0;
if (!is_valid_request(offset, size, cfg->chip_size)) {
return -ENODEV;
}
/* Diagnose region errors before starting to erase. */
if (((offset % cfg->page_size) != 0)
|| ((size % cfg->page_size) != 0)) {
return -EINVAL;
}
acquire(dev);
#if ANY_INST_HAS_WP_GPIOS
if (cfg->wp) {
gpio_pin_set_dt(cfg->wp, 0);
}
#endif
if (!cfg->no_chip_erase && size == cfg->chip_size) {
err = perform_chip_erase(dev);
} else {
while (size) {
size_t sector_size = cfg->sector_size;
if (cfg->sector_0a_size) {
if (offset < cfg->sector_0a_size) {
sector_size = cfg->sector_0a_size;
} else if (offset < cfg->sector_size) {
/* Sector 0b. Calculate its size. */
sector_size -= cfg->sector_0a_size;
}
}
if (!cfg->no_sector_erase &&
is_erase_possible(sector_size,
offset, size)) {
err = perform_erase_op(dev, CMD_SECTOR_ERASE,
offset);
offset += sector_size;
size -= sector_size;
} else if (is_erase_possible(cfg->block_size,
offset, size)) {
err = perform_erase_op(dev, CMD_BLOCK_ERASE,
offset);
offset += cfg->block_size;
size -= cfg->block_size;
} else if (is_erase_possible(cfg->page_size,
offset, size)) {
err = perform_erase_op(dev, CMD_PAGE_ERASE,
offset);
offset += cfg->page_size;
size -= cfg->page_size;
} else {
LOG_ERR("Unsupported erase request: "
"size %zu at 0x%lx",
size, (long)offset);
err = -EINVAL;
}
if (err != 0) {
break;
}
}
}
#if ANY_INST_HAS_WP_GPIOS
if (cfg->wp) {
gpio_pin_set_dt(cfg->wp, 1);
}
#endif
release(dev);
return err;
}
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
static void spi_flash_at45_pages_layout(const struct device *dev,
const struct flash_pages_layout **layout,
size_t *layout_size)
{
const struct spi_flash_at45_config *cfg = dev->config;
*layout = &cfg->pages_layout;
*layout_size = 1;
}
#endif /* defined(CONFIG_FLASH_PAGE_LAYOUT) */
static int power_down_op(const struct device *dev, uint8_t opcode,
uint32_t delay)
{
const struct spi_flash_at45_config *cfg = dev->config;
int err = 0;
const struct spi_buf tx_buf[] = {
{
.buf = (void *)&opcode,
.len = sizeof(opcode),
}
};
DEF_BUF_SET(tx_buf_set, tx_buf);
err = spi_write_dt(&cfg->bus, &tx_buf_set);
if (err != 0) {
LOG_ERR("SPI transaction failed with code: %d/%u",
err, __LINE__);
return -EIO;
}
k_busy_wait(delay);
return 0;
}
static int spi_flash_at45_init(const struct device *dev)
{
const struct spi_flash_at45_config *dev_config = dev->config;
int err;
if (!spi_is_ready_dt(&dev_config->bus)) {
LOG_ERR("SPI bus %s not ready", dev_config->bus.bus->name);
return -ENODEV;
}
#if ANY_INST_HAS_RESET_GPIOS
if (dev_config->reset) {
if (!device_is_ready(dev_config->reset->port)) {
LOG_ERR("Reset pin not ready");
return -ENODEV;
}
if (gpio_pin_configure_dt(dev_config->reset, GPIO_OUTPUT_ACTIVE)) {
LOG_ERR("Couldn't configure reset pin");
return -ENODEV;
}
gpio_pin_set_dt(dev_config->reset, 0);
}
#endif
#if ANY_INST_HAS_WP_GPIOS
if (dev_config->wp) {
if (!device_is_ready(dev_config->wp->port)) {
LOG_ERR("Write protect pin not ready");
return -ENODEV;
}
if (gpio_pin_configure_dt(dev_config->wp, GPIO_OUTPUT_ACTIVE)) {
LOG_ERR("Couldn't configure write protect pin");
return -ENODEV;
}
}
#endif
acquire(dev);
/* Just in case the chip was in the Deep (or Ultra-Deep) Power-Down
* mode, issue the command to bring it back to normal operation.
* Exiting from the Ultra-Deep mode requires only that the CS line
* is asserted for a certain time, so issuing the Resume from Deep
* Power-Down command will work in both cases.
*/
power_down_op(dev, CMD_EXIT_DPD, dev_config->t_exit_dpd);
err = check_jedec_id(dev);
if (err == 0) {
err = configure_page_size(dev);
}
release(dev);
return err;
}
#if defined(CONFIG_PM_DEVICE)
static int spi_flash_at45_pm_action(const struct device *dev,
enum pm_device_action action)
{
const struct spi_flash_at45_config *dev_config = dev->config;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
acquire(dev);
power_down_op(dev, CMD_EXIT_DPD, dev_config->t_exit_dpd);
release(dev);
break;
case PM_DEVICE_ACTION_SUSPEND:
acquire(dev);
power_down_op(dev,
dev_config->use_udpd ? CMD_ENTER_UDPD : CMD_ENTER_DPD,
dev_config->t_enter_dpd);
release(dev);
break;
default:
return -ENOTSUP;
}
return 0;
}
#endif /* defined(CONFIG_PM_DEVICE) */
static const struct flash_parameters *
flash_at45_get_parameters(const struct device *dev)
{
ARG_UNUSED(dev);
return &flash_at45_parameters;
}
static const struct flash_driver_api spi_flash_at45_api = {
.read = spi_flash_at45_read,
.write = spi_flash_at45_write,
.erase = spi_flash_at45_erase,
.get_parameters = flash_at45_get_parameters,
#if defined(CONFIG_FLASH_PAGE_LAYOUT)
.page_layout = spi_flash_at45_pages_layout,
#endif
};
#define INST_HAS_RESET_GPIO(idx) \
DT_INST_NODE_HAS_PROP(idx, reset_gpios)
#define INST_RESET_GPIO_SPEC(idx) \
IF_ENABLED(INST_HAS_RESET_GPIO(idx), \
(static const struct gpio_dt_spec reset_##idx = \
GPIO_DT_SPEC_INST_GET(idx, reset_gpios);))
#define INST_HAS_WP_GPIO(idx) \
DT_INST_NODE_HAS_PROP(idx, wp_gpios)
#define INST_WP_GPIO_SPEC(idx) \
IF_ENABLED(INST_HAS_WP_GPIO(idx), \
(static const struct gpio_dt_spec wp_##idx = \
GPIO_DT_SPEC_INST_GET(idx, wp_gpios);))
#define SPI_FLASH_AT45_INST(idx) \
enum { \
INST_##idx##_BYTES = (DT_INST_PROP(idx, size) / 8), \
INST_##idx##_PAGES = (INST_##idx##_BYTES / \
DT_INST_PROP(idx, page_size)), \
}; \
static struct spi_flash_at45_data inst_##idx##_data = { \
.lock = Z_SEM_INITIALIZER(inst_##idx##_data.lock, 1, 1), \
}; \
INST_RESET_GPIO_SPEC(idx) \
INST_WP_GPIO_SPEC(idx) \
static const struct spi_flash_at45_config inst_##idx##_config = { \
.bus = SPI_DT_SPEC_INST_GET( \
idx, SPI_OP_MODE_MASTER | SPI_TRANSFER_MSB | \
SPI_WORD_SET(8), 0), \
IF_ENABLED(INST_HAS_RESET_GPIO(idx), \
(.reset = &reset_##idx,)) \
IF_ENABLED(INST_HAS_WP_GPIO(idx), \
(.wp = &wp_##idx,)) \
IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, ( \
.pages_layout = { \
.pages_count = INST_##idx##_PAGES, \
.pages_size = DT_INST_PROP(idx, page_size), \
},)) \
.chip_size = INST_##idx##_BYTES, \
.sector_size = DT_INST_PROP(idx, sector_size), \
.sector_0a_size = DT_INST_PROP(idx, sector_0a_pages) \
* DT_INST_PROP(idx, page_size), \
.block_size = DT_INST_PROP(idx, block_size), \
.page_size = DT_INST_PROP(idx, page_size), \
.no_chip_erase = DT_INST_PROP(idx, no_chip_erase), \
.no_sector_erase = DT_INST_PROP(idx, no_sector_erase), \
.t_enter_dpd = DIV_ROUND_UP( \
DT_INST_PROP(idx, enter_dpd_delay), \
NSEC_PER_USEC), \
.t_exit_dpd = DIV_ROUND_UP( \
DT_INST_PROP(idx, exit_dpd_delay), \
NSEC_PER_USEC), \
.use_udpd = DT_INST_PROP(idx, use_udpd), \
.jedec_id = DT_INST_PROP(idx, jedec_id), \
}; \
IF_ENABLED(CONFIG_FLASH_PAGE_LAYOUT, ( \
BUILD_ASSERT( \
(INST_##idx##_PAGES * DT_INST_PROP(idx, page_size)) \
== INST_##idx##_BYTES, \
"Page size specified for instance " #idx " of " \
"atmel,at45 is not compatible with its " \
"total size");)) \
\
PM_DEVICE_DT_INST_DEFINE(idx, spi_flash_at45_pm_action); \
\
DEVICE_DT_INST_DEFINE(idx, \
spi_flash_at45_init, PM_DEVICE_DT_INST_GET(idx), \
&inst_##idx##_data, &inst_##idx##_config, \
POST_KERNEL, CONFIG_SPI_FLASH_AT45_INIT_PRIORITY, \
&spi_flash_at45_api);
DT_INST_FOREACH_STATUS_OKAY(SPI_FLASH_AT45_INST)
``` | /content/code_sandbox/drivers/flash/spi_flash_at45.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,313 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.