repo_name
string
path
string
copies
string
size
string
content
string
license
string
maqiangddb/Android_kernel
drivers/staging/comedi/drivers/das16.c
7986
46364
/* comedi/drivers/das16.c DAS16 driver COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com> Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: das16 Description: DAS16 compatible boards Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze Devices: [Keithley Metrabyte] DAS-16 (das-16), DAS-16G (das-16g), DAS-16F (das-16f), DAS-1201 (das-1201), DAS-1202 (das-1202), DAS-1401 (das-1401), DAS-1402 (das-1402), DAS-1601 (das-1601), DAS-1602 (das-1602), [ComputerBoards] PC104-DAS16/JR (pc104-das16jr), PC104-DAS16JR/16 (pc104-das16jr/16), CIO-DAS16JR/16 (cio-das16jr/16), CIO-DAS16/JR (cio-das16/jr), CIO-DAS1401/12 (cio-das1401/12), CIO-DAS1402/12 (cio-das1402/12), CIO-DAS1402/16 (cio-das1402/16), CIO-DAS1601/12 (cio-das1601/12), CIO-DAS1602/12 (cio-das1602/12), CIO-DAS1602/16 (cio-das1602/16), CIO-DAS16/330 (cio-das16/330) Status: works Updated: 2003-10-12 A rewrite of the das16 and das1600 drivers. Options: [0] - base io address [1] - irq (does nothing, irq is not used anymore) [2] - dma (optional, required for comedi_command support) [3] - master clock speed in MHz (optional, 1 or 10, ignored if board can probe clock, defaults to 1) [4] - analog input range lowest voltage in microvolts (optional, only useful if your board does not have software programmable gain) [5] - analog input range highest voltage in microvolts (optional, only useful if board does not have software programmable gain) [6] - analog output range lowest voltage in microvolts (optional) [7] - analog output range highest voltage in microvolts (optional) [8] - use timer mode for DMA. Timer mode is needed e.g. for buggy DMA controllers in NS CS5530A (Geode Companion), and for 'jr' cards that lack a hardware fifo. This option is no longer needed, since timer mode is _always_ used. Passing a zero for an option is the same as leaving it unspecified. */ /* Testing and debugging help provided by Daniel Koch. Keithley Manuals: 2309.PDF (das16) 4919.PDF (das1400, 1600) 4922.PDF (das-1400) 4923.PDF (das1200, 1400, 1600) Computer boards manuals also available from their website www.measurementcomputing.com */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <asm/dma.h> #include "../comedidev.h" #include "8253.h" #include "8255.h" #include "comedi_fc.h" #undef DEBUG /* #define DEBUG */ #ifdef DEBUG #define DEBUG_PRINT(format, args...) \ printk(KERN_DEBUG "das16: " format, ## args) #else #define DEBUG_PRINT(format, args...) #endif #define DAS16_SIZE 20 /* number of ioports */ #define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */ /* cio-das16.pdf "das16" "das16/f" 0 a/d bits 0-3 start 12 bit 1 a/d bits 4-11 unused 2 mux read mux set 3 di 4 bit do 4 bit 4 unused ao0_lsb 5 unused ao0_msb 6 unused ao1_lsb 7 unused ao1_msb 8 status eoc uni/bip interrupt reset 9 dma, int, trig ctrl set dma, int a pacer control unused b reserved reserved cdef 8254 0123 8255 */ /* cio-das16jr.pdf "das16jr" 0 a/d bits 0-3 start 12 bit 1 a/d bits 4-11 unused 2 mux read mux set 3 di 4 bit do 4 bit 4567 unused unused 8 status eoc uni/bip interrupt reset 9 dma, int, trig ctrl set dma, int a pacer control unused b gain status gain control cdef 8254 */ /* cio-das16jr_16.pdf "das16jr_16" 0 a/d bits 0-7 start 16 bit 1 a/d bits 8-15 unused 2 mux read mux set 3 di 4 bit do 4 bit 4567 unused unused 8 status eoc uni/bip interrupt reset 9 dma, int, trig ctrl set dma, int a pacer control unused b gain status gain control cdef 8254 */ /* cio-das160x-1x.pdf "das1601/12" "das1602/12" "das1602/16" 0 a/d bits 0-3 start 12 bit 1 a/d bits 4-11 unused 2 mux read mux set 3 di 4 bit do 4 bit 4 unused ao0_lsb 5 unused ao0_msb 6 unused ao1_lsb 7 unused ao1_msb 8 status eoc uni/bip interrupt reset 9 dma, int, trig ctrl set dma, int a pacer control unused b gain status gain control cdef 8254 400 8255 404 unused conversion enable 405 unused burst enable 406 unused das1600 enable 407 status */ /* size in bytes of a sample from board */ static const int sample_size = 2; #define DAS16_TRIG 0 #define DAS16_AI_LSB 0 #define DAS16_AI_MSB 1 #define DAS16_MUX 2 #define DAS16_DIO 3 #define DAS16_AO_LSB(x) ((x) ? 6 : 4) #define DAS16_AO_MSB(x) ((x) ? 7 : 5) #define DAS16_STATUS 8 #define BUSY (1<<7) #define UNIPOLAR (1<<6) #define DAS16_MUXBIT (1<<5) #define DAS16_INT (1<<4) #define DAS16_CONTROL 9 #define DAS16_INTE (1<<7) #define DAS16_IRQ(x) (((x) & 0x7) << 4) #define DMA_ENABLE (1<<2) #define PACING_MASK 0x3 #define INT_PACER 0x03 #define EXT_PACER 0x02 #define DAS16_SOFT 0x00 #define DAS16_PACER 0x0A #define DAS16_CTR0 (1<<1) #define DAS16_TRIG0 (1<<0) #define BURST_LEN_BITS(x) (((x) & 0xf) << 4) #define DAS16_GAIN 0x0B #define DAS16_CNTR0_DATA 0x0C #define DAS16_CNTR1_DATA 0x0D #define DAS16_CNTR2_DATA 0x0E #define DAS16_CNTR_CONTROL 0x0F #define DAS16_TERM_CNT 0x00 #define DAS16_ONE_SHOT 0x02 #define DAS16_RATE_GEN 0x04 #define DAS16_CNTR_LSB_MSB 0x30 #define DAS16_CNTR0 0x00 #define DAS16_CNTR1 0x40 #define DAS16_CNTR2 0x80 #define DAS1600_CONV 0x404 #define DAS1600_CONV_DISABLE 0x40 #define DAS1600_BURST 0x405 #define DAS1600_BURST_VAL 0x40 #define DAS1600_ENABLE 0x406 #define DAS1600_ENABLE_VAL 0x40 #define DAS1600_STATUS_B 0x407 #define DAS1600_BME 0x40 #define DAS1600_ME 0x20 #define DAS1600_CD 0x10 #define DAS1600_WS 0x02 #define DAS1600_CLK_10MHZ 0x01 static const struct comedi_lrange range_das1x01_bip = { 4, { BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01), } }; static const struct comedi_lrange range_das1x01_unip = { 4, { UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01), } }; static const struct comedi_lrange range_das1x02_bip = { 4, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), } }; static const struct comedi_lrange range_das1x02_unip = { 4, { UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), } }; static const struct comedi_lrange range_das16jr = { 9, { /* also used by 16/330 */ BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), } }; static const struct comedi_lrange range_das16jr_16 = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), } }; static const int das16jr_gainlist[] = { 8, 0, 1, 2, 3, 4, 5, 6, 7 }; static const int das16jr_16_gainlist[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; static const int das1600_gainlist[] = { 0, 1, 2, 3 }; enum { das16_pg_none = 0, das16_pg_16jr, das16_pg_16jr_16, das16_pg_1601, das16_pg_1602, }; static const int *const das16_gainlists[] = { NULL, das16jr_gainlist, das16jr_16_gainlist, das1600_gainlist, das1600_gainlist, }; static const struct comedi_lrange *const das16_ai_uni_lranges[] = { &range_unknown, &range_das16jr, &range_das16jr_16, &range_das1x01_unip, &range_das1x02_unip, }; static const struct comedi_lrange *const das16_ai_bip_lranges[] = { &range_unknown, &range_das16jr, &range_das16jr_16, &range_das1x01_bip, &range_das1x02_bip, }; struct munge_info { uint8_t byte; unsigned have_byte:1; }; static int das16_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s); static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void das16_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *array, unsigned int num_bytes, unsigned int start_chan_index); static void das16_reset(struct comedi_device *dev); static irqreturn_t das16_dma_interrupt(int irq, void *d); static void das16_timer_interrupt(unsigned long arg); static void das16_interrupt(struct comedi_device *dev); static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns, int flags); static int das1600_mode_detect(struct comedi_device *dev); static unsigned int das16_suggest_transfer_size(struct comedi_device *dev, struct comedi_cmd cmd); static void reg_dump(struct comedi_device *dev); struct das16_board { const char *name; void *ai; unsigned int ai_nbits; unsigned int ai_speed; /* max conversion speed in nanosec */ unsigned int ai_pg; void *ao; unsigned int ao_nbits; void *di; void *do_; unsigned int i8255_offset; unsigned int i8254_offset; unsigned int size; unsigned int id; }; static const struct das16_board das16_boards[] = { { .name = "das-16", .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 15000, .ai_pg = das16_pg_none, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x10, .i8254_offset = 0x0c, .size = 0x14, .id = 0x00, }, { .name = "das-16g", .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 15000, .ai_pg = das16_pg_none, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x10, .i8254_offset = 0x0c, .size = 0x14, .id = 0x00, }, { .name = "das-16f", .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 8500, .ai_pg = das16_pg_none, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x10, .i8254_offset = 0x0c, .size = 0x14, .id = 0x00, }, { .name = "cio-das16", /* cio-das16.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 20000, .ai_pg = das16_pg_none, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x10, .i8254_offset = 0x0c, .size = 0x14, .id = 0x80, }, { .name = "cio-das16/f", /* das16.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_none, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x10, .i8254_offset = 0x0c, .size = 0x14, .id = 0x80, }, { .name = "cio-das16/jr", /* cio-das16jr.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 7692, .ai_pg = das16_pg_16jr, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x10, .id = 0x00, }, { .name = "pc104-das16jr", /* pc104-das16jr_xx.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 3300, .ai_pg = das16_pg_16jr, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x10, .id = 0x00, }, { .name = "cio-das16jr/16", /* cio-das16jr_16.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 16, .ai_speed = 10000, .ai_pg = das16_pg_16jr_16, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x10, .id = 0x00, }, { .name = "pc104-das16jr/16", /* pc104-das16jr_xx.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 16, .ai_speed = 10000, .ai_pg = das16_pg_16jr_16, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x10, .id = 0x00, }, { .name = "das-1201", /* 4924.pdf (keithley user's manual) */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 20000, .ai_pg = das16_pg_none, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0x20, }, { .name = "das-1202", /* 4924.pdf (keithley user's manual) */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_none, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0x20, }, { /* 4919.pdf and 4922.pdf (keithley user's manual) */ .name = "das-1401", .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_1601, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x0, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0 /* 4919.pdf says id bits are 0xe0, 4922.pdf says 0xc0 */ }, { /* 4919.pdf and 4922.pdf (keithley user's manual) */ .name = "das-1402", .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_1602, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x0, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0 /* 4919.pdf says id bits are 0xe0, 4922.pdf says 0xc0 */ }, { .name = "das-1601", /* 4919.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_1601, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "das-1602", /* 4919.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_1602, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das1401/12", /* cio-das1400_series.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 6250, .ai_pg = das16_pg_1601, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das1402/12", /* cio-das1400_series.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 6250, .ai_pg = das16_pg_1602, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das1402/16", /* cio-das1400_series.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 16, .ai_speed = 10000, .ai_pg = das16_pg_1602, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das1601/12", /* cio-das160x-1x.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 6250, .ai_pg = das16_pg_1601, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das1602/12", /* cio-das160x-1x.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 10000, .ai_pg = das16_pg_1602, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das1602/16", /* cio-das160x-1x.pdf */ .ai = das16_ai_rinsn, .ai_nbits = 16, .ai_speed = 10000, .ai_pg = das16_pg_1602, .ao = das16_ao_winsn, .ao_nbits = 12, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0x400, .i8254_offset = 0x0c, .size = 0x408, .id = 0xc0}, { .name = "cio-das16/330", /* ? */ .ai = das16_ai_rinsn, .ai_nbits = 12, .ai_speed = 3030, .ai_pg = das16_pg_16jr, .ao = NULL, .di = das16_di_rbits, .do_ = das16_do_wbits, .i8255_offset = 0, .i8254_offset = 0x0c, .size = 0x14, .id = 0xf0}, #if 0 { .name = "das16/330i", /* ? */ }, { .name = "das16/jr/ctr5", /* ? */ }, { /* cio-das16_m1_16.pdf, this board is a bit quirky, no dma */ .name = "cio-das16/m1/16", }, #endif }; static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das16_detach(struct comedi_device *dev); static struct comedi_driver driver_das16 = { .driver_name = "das16", .module = THIS_MODULE, .attach = das16_attach, .detach = das16_detach, .board_name = &das16_boards[0].name, .num_names = ARRAY_SIZE(das16_boards), .offset = sizeof(das16_boards[0]), }; #define DAS16_TIMEOUT 1000 /* Period for timer interrupt in jiffies. It's a function * to deal with possibility of dynamic HZ patches */ static inline int timer_period(void) { return HZ / 20; } struct das16_private_struct { unsigned int ai_unipolar; /* unipolar flag */ unsigned int ai_singleended; /* single ended flag */ unsigned int clockbase; /* master clock speed in ns */ volatile unsigned int control_state; /* dma, interrupt and trigger control bits */ volatile unsigned long adc_byte_count; /* number of bytes remaining */ /* divisor dividing master clock to get conversion frequency */ unsigned int divisor1; /* divisor dividing master clock to get conversion frequency */ unsigned int divisor2; unsigned int dma_chan; /* dma channel */ uint16_t *dma_buffer[2]; dma_addr_t dma_buffer_addr[2]; unsigned int current_buffer; volatile unsigned int dma_transfer_size; /* target number of bytes to transfer per dma shot */ /** * user-defined analog input and output ranges * defined from config options */ struct comedi_lrange *user_ai_range_table; struct comedi_lrange *user_ao_range_table; struct timer_list timer; /* for timed interrupt */ volatile short timer_running; volatile short timer_mode; /* true if using timer mode */ }; #define devpriv ((struct das16_private_struct *)(dev->private)) #define thisboard ((struct das16_board *)(dev->board_ptr)) static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0, tmp; int gain, start_chan, i; int mask; /* make sure triggers are valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; mask = TRIG_FOLLOW; /* if board supports burst mode */ if (thisboard->size > 0x400) mask |= TRIG_TIMER | TRIG_EXT; cmd->scan_begin_src &= mask; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; mask = TRIG_TIMER | TRIG_EXT; /* if board supports burst mode */ if (thisboard->size > 0x400) mask |= TRIG_NOW; cmd->convert_src &= mask; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /** * step 2: make sure trigger sources are unique and * mutually compatible */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT && cmd->scan_begin_src != TRIG_FOLLOW) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT) err++; /* make sure scan_begin_src and convert_src dont conflict */ if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } /* check against maximum frequency */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ai_speed * cmd->chanlist_len) { cmd->scan_begin_arg = thisboard->ai_speed * cmd->chanlist_len; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->stop_src == TRIG_NONE) { if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { unsigned int tmp = cmd->scan_begin_arg; /* set divisors, correct timing arguments */ i8253_cascade_ns_to_timer_2div(devpriv->clockbase, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); err += (tmp != cmd->scan_begin_arg); } if (cmd->convert_src == TRIG_TIMER) { unsigned int tmp = cmd->convert_arg; /* set divisors, correct timing arguments */ i8253_cascade_ns_to_timer_2div(devpriv->clockbase, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); err += (tmp != cmd->convert_arg); } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist) { gain = CR_RANGE(cmd->chanlist[0]); start_chan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (start_chan + i) % s->n_chan) { comedi_error(dev, "entries in chanlist must be " "consecutive channels, " "counting upwards\n"); err++; } if (CR_RANGE(cmd->chanlist[i]) != gain) { comedi_error(dev, "entries in chanlist must all " "have the same gain\n"); err++; } } } if (err) return 5; return 0; } static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int byte; unsigned long flags; int range; if (devpriv->dma_chan == 0 || (dev->irq == 0 && devpriv->timer_mode == 0)) { comedi_error(dev, "irq (or use of 'timer mode') dma required to " "execute comedi_cmd"); return -1; } if (cmd->flags & TRIG_RT) { comedi_error(dev, "isa dma transfers cannot be performed with " "TRIG_RT, aborting"); return -1; } devpriv->adc_byte_count = cmd->stop_arg * cmd->chanlist_len * sizeof(uint16_t); /* disable conversions for das1600 mode */ if (thisboard->size > 0x400) outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV); /* set scan limits */ byte = CR_CHAN(cmd->chanlist[0]); byte |= CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]) << 4; outb(byte, dev->iobase + DAS16_MUX); /* set gain (this is also burst rate register but according to * computer boards manual, burst rate does nothing, even on * keithley cards) */ if (thisboard->ai_pg != das16_pg_none) { range = CR_RANGE(cmd->chanlist[0]); outb((das16_gainlists[thisboard->ai_pg])[range], dev->iobase + DAS16_GAIN); } /* set counter mode and counts */ cmd->convert_arg = das16_set_pacer(dev, cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); DEBUG_PRINT("pacer period: %d ns\n", cmd->convert_arg); /* enable counters */ byte = 0; /* Enable burst mode if appropriate. */ if (thisboard->size > 0x400) { if (cmd->convert_src == TRIG_NOW) { outb(DAS1600_BURST_VAL, dev->iobase + DAS1600_BURST); /* set burst length */ byte |= BURST_LEN_BITS(cmd->chanlist_len - 1); } else { outb(0, dev->iobase + DAS1600_BURST); } } outb(byte, dev->iobase + DAS16_PACER); /* set up dma transfer */ flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); devpriv->current_buffer = 0; set_dma_addr(devpriv->dma_chan, devpriv->dma_buffer_addr[devpriv->current_buffer]); /* set appropriate size of transfer */ devpriv->dma_transfer_size = das16_suggest_transfer_size(dev, *cmd); set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size); enable_dma(devpriv->dma_chan); release_dma_lock(flags); /* set up interrupt */ if (devpriv->timer_mode) { devpriv->timer_running = 1; devpriv->timer.expires = jiffies + timer_period(); add_timer(&devpriv->timer); devpriv->control_state &= ~DAS16_INTE; } else { /* clear interrupt bit */ outb(0x00, dev->iobase + DAS16_STATUS); /* enable interrupts */ devpriv->control_state |= DAS16_INTE; } devpriv->control_state |= DMA_ENABLE; devpriv->control_state &= ~PACING_MASK; if (cmd->convert_src == TRIG_EXT) devpriv->control_state |= EXT_PACER; else devpriv->control_state |= INT_PACER; outb(devpriv->control_state, dev->iobase + DAS16_CONTROL); /* Enable conversions if using das1600 mode */ if (thisboard->size > 0x400) outb(0, dev->iobase + DAS1600_CONV); return 0; } static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts, dma and pacer clocked conversions */ devpriv->control_state &= ~DAS16_INTE & ~PACING_MASK & ~DMA_ENABLE; outb(devpriv->control_state, dev->iobase + DAS16_CONTROL); if (devpriv->dma_chan) disable_dma(devpriv->dma_chan); /* disable SW timer */ if (devpriv->timer_mode && devpriv->timer_running) { devpriv->timer_running = 0; del_timer(&devpriv->timer); } /* disable burst mode */ if (thisboard->size > 0x400) outb(0, dev->iobase + DAS1600_BURST); spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } static void das16_reset(struct comedi_device *dev) { outb(0, dev->iobase + DAS16_STATUS); outb(0, dev->iobase + DAS16_CONTROL); outb(0, dev->iobase + DAS16_PACER); outb(0, dev->iobase + DAS16_CNTR_CONTROL); } static int das16_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int range; int chan; int msb, lsb; /* disable interrupts and pacing */ devpriv->control_state &= ~DAS16_INTE & ~DMA_ENABLE & ~PACING_MASK; outb(devpriv->control_state, dev->iobase + DAS16_CONTROL); /* set multiplexer */ chan = CR_CHAN(insn->chanspec); chan |= CR_CHAN(insn->chanspec) << 4; outb(chan, dev->iobase + DAS16_MUX); /* set gain */ if (thisboard->ai_pg != das16_pg_none) { range = CR_RANGE(insn->chanspec); outb((das16_gainlists[thisboard->ai_pg])[range], dev->iobase + DAS16_GAIN); } for (n = 0; n < insn->n; n++) { /* trigger conversion */ outb_p(0, dev->iobase + DAS16_TRIG); for (i = 0; i < DAS16_TIMEOUT; i++) { if (!(inb(dev->iobase + DAS16_STATUS) & BUSY)) break; } if (i == DAS16_TIMEOUT) { printk("das16: timeout\n"); return -ETIME; } msb = inb(dev->iobase + DAS16_AI_MSB); lsb = inb(dev->iobase + DAS16_AI_LSB); if (thisboard->ai_nbits == 12) data[n] = ((lsb >> 4) & 0xf) | (msb << 4); else data[n] = lsb | (msb << 8); } return n; } static int das16_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits; bits = inb(dev->iobase + DAS16_DIO) & 0xf; data[1] = bits; data[0] = 0; return 2; } static int das16_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int wbits; /* only set bits that have been masked */ data[0] &= 0xf; wbits = s->state; /* zero bits that have been masked */ wbits &= ~data[0]; /* set masked bits */ wbits |= data[0] & data[1]; s->state = wbits; data[1] = wbits; outb(s->state, dev->iobase + DAS16_DIO); return 2; } static int das16_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int lsb, msb; int chan; chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) { if (thisboard->ao_nbits == 12) { lsb = (data[i] << 4) & 0xff; msb = (data[i] >> 4) & 0xff; } else { lsb = data[i] & 0xff; msb = (data[i] >> 8) & 0xff; } outb(lsb, dev->iobase + DAS16_AO_LSB(chan)); outb(msb, dev->iobase + DAS16_AO_MSB(chan)); } return i; } static irqreturn_t das16_dma_interrupt(int irq, void *d) { int status; struct comedi_device *dev = d; status = inb(dev->iobase + DAS16_STATUS); if ((status & DAS16_INT) == 0) { DEBUG_PRINT("spurious interrupt\n"); return IRQ_NONE; } /* clear interrupt */ outb(0x00, dev->iobase + DAS16_STATUS); das16_interrupt(dev); return IRQ_HANDLED; } static void das16_timer_interrupt(unsigned long arg) { struct comedi_device *dev = (struct comedi_device *)arg; das16_interrupt(dev); if (devpriv->timer_running) mod_timer(&devpriv->timer, jiffies + timer_period()); } /* the pc104-das16jr (at least) has problems if the dma transfer is interrupted in the middle of transferring a 16 bit sample, so this function takes care to get an even transfer count after disabling dma channel. */ static int disable_dma_on_even(struct comedi_device *dev) { int residue; int i; static const int disable_limit = 100; static const int enable_timeout = 100; disable_dma(devpriv->dma_chan); residue = get_dma_residue(devpriv->dma_chan); for (i = 0; i < disable_limit && (residue % 2); ++i) { int j; enable_dma(devpriv->dma_chan); for (j = 0; j < enable_timeout; ++j) { int new_residue; udelay(2); new_residue = get_dma_residue(devpriv->dma_chan); if (new_residue != residue) break; } disable_dma(devpriv->dma_chan); residue = get_dma_residue(devpriv->dma_chan); } if (i == disable_limit) { comedi_error(dev, "failed to get an even dma transfer, " "could be trouble."); } return residue; } static void das16_interrupt(struct comedi_device *dev) { unsigned long dma_flags, spin_flags; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; struct comedi_cmd *cmd; int num_bytes, residue; int buffer_index; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return; } /* initialize async here to make sure it is not NULL */ async = s->async; cmd = &async->cmd; if (devpriv->dma_chan == 0) { comedi_error(dev, "interrupt with no dma channel?"); return; } spin_lock_irqsave(&dev->spinlock, spin_flags); if ((devpriv->control_state & DMA_ENABLE) == 0) { spin_unlock_irqrestore(&dev->spinlock, spin_flags); DEBUG_PRINT("interrupt while dma disabled?\n"); return; } dma_flags = claim_dma_lock(); clear_dma_ff(devpriv->dma_chan); residue = disable_dma_on_even(dev); /* figure out how many points to read */ if (residue > devpriv->dma_transfer_size) { comedi_error(dev, "residue > transfer size!\n"); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; num_bytes = 0; } else num_bytes = devpriv->dma_transfer_size - residue; if (cmd->stop_src == TRIG_COUNT && num_bytes >= devpriv->adc_byte_count) { num_bytes = devpriv->adc_byte_count; async->events |= COMEDI_CB_EOA; } buffer_index = devpriv->current_buffer; devpriv->current_buffer = (devpriv->current_buffer + 1) % 2; devpriv->adc_byte_count -= num_bytes; /* figure out how many bytes for next transfer */ if (cmd->stop_src == TRIG_COUNT && devpriv->timer_mode == 0 && devpriv->dma_transfer_size > devpriv->adc_byte_count) devpriv->dma_transfer_size = devpriv->adc_byte_count; /* re-enable dma */ if ((async->events & COMEDI_CB_EOA) == 0) { set_dma_addr(devpriv->dma_chan, devpriv->dma_buffer_addr[devpriv->current_buffer]); set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size); enable_dma(devpriv->dma_chan); /* reenable conversions for das1600 mode, (stupid hardware) */ if (thisboard->size > 0x400 && devpriv->timer_mode == 0) outb(0x00, dev->iobase + DAS1600_CONV); } release_dma_lock(dma_flags); spin_unlock_irqrestore(&dev->spinlock, spin_flags); cfc_write_array_to_buffer(s, devpriv->dma_buffer[buffer_index], num_bytes); cfc_handle_events(dev, s); } static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(devpriv->clockbase, &(devpriv->divisor1), &(devpriv->divisor2), &ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(dev->iobase + DAS16_CNTR0_DATA, 0, 1, devpriv->divisor1, 2); i8254_load(dev->iobase + DAS16_CNTR0_DATA, 0, 2, devpriv->divisor2, 2); return ns; } static void reg_dump(struct comedi_device *dev) { DEBUG_PRINT("********DAS1600 REGISTER DUMP********\n"); DEBUG_PRINT("DAS16_MUX: %x\n", inb(dev->iobase + DAS16_MUX)); DEBUG_PRINT("DAS16_DIO: %x\n", inb(dev->iobase + DAS16_DIO)); DEBUG_PRINT("DAS16_STATUS: %x\n", inb(dev->iobase + DAS16_STATUS)); DEBUG_PRINT("DAS16_CONTROL: %x\n", inb(dev->iobase + DAS16_CONTROL)); DEBUG_PRINT("DAS16_PACER: %x\n", inb(dev->iobase + DAS16_PACER)); DEBUG_PRINT("DAS16_GAIN: %x\n", inb(dev->iobase + DAS16_GAIN)); DEBUG_PRINT("DAS16_CNTR_CONTROL: %x\n", inb(dev->iobase + DAS16_CNTR_CONTROL)); DEBUG_PRINT("DAS1600_CONV: %x\n", inb(dev->iobase + DAS1600_CONV)); DEBUG_PRINT("DAS1600_BURST: %x\n", inb(dev->iobase + DAS1600_BURST)); DEBUG_PRINT("DAS1600_ENABLE: %x\n", inb(dev->iobase + DAS1600_ENABLE)); DEBUG_PRINT("DAS1600_STATUS_B: %x\n", inb(dev->iobase + DAS1600_STATUS_B)); } static int das16_probe(struct comedi_device *dev, struct comedi_devconfig *it) { int status; int diobits; /* status is available on all boards */ status = inb(dev->iobase + DAS16_STATUS); if ((status & UNIPOLAR)) devpriv->ai_unipolar = 1; else devpriv->ai_unipolar = 0; if ((status & DAS16_MUXBIT)) devpriv->ai_singleended = 1; else devpriv->ai_singleended = 0; /* diobits indicates boards */ diobits = inb(dev->iobase + DAS16_DIO) & 0xf0; printk(KERN_INFO " id bits are 0x%02x\n", diobits); if (thisboard->id != diobits) { printk(KERN_INFO " requested board's id bits are 0x%x (ignore)\n", thisboard->id); } return 0; } static int das1600_mode_detect(struct comedi_device *dev) { int status = 0; status = inb(dev->iobase + DAS1600_STATUS_B); if (status & DAS1600_CLK_10MHZ) { devpriv->clockbase = 100; printk(KERN_INFO " 10MHz pacer clock\n"); } else { devpriv->clockbase = 1000; printk(KERN_INFO " 1MHz pacer clock\n"); } reg_dump(dev); return 0; } /* * * Options list: * 0 I/O base * 1 IRQ * 2 DMA * 3 Clock speed (in MHz) */ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; unsigned int irq; unsigned long iobase; unsigned int dma_chan; int timer_mode; unsigned long flags; struct comedi_krange *user_ai_range, *user_ao_range; iobase = it->options[0]; #if 0 irq = it->options[1]; timer_mode = it->options[8]; #endif /* always use time_mode since using irq can drop samples while * waiting for dma done interrupt (due to hardware limitations) */ irq = 0; timer_mode = 1; if (timer_mode) irq = 0; printk(KERN_INFO "comedi%d: das16:", dev->minor); /* check that clock setting is valid */ if (it->options[3]) { if (it->options[3] != 0 && it->options[3] != 1 && it->options[3] != 10) { printk ("\n Invalid option. Master clock must be set " "to 1 or 10 (MHz)\n"); return -EINVAL; } } ret = alloc_private(dev, sizeof(struct das16_private_struct)); if (ret < 0) return ret; if (thisboard->size < 0x400) { printk(" 0x%04lx-0x%04lx\n", iobase, iobase + thisboard->size); if (!request_region(iobase, thisboard->size, "das16")) { printk(KERN_ERR " I/O port conflict\n"); return -EIO; } } else { printk(KERN_INFO " 0x%04lx-0x%04lx 0x%04lx-0x%04lx\n", iobase, iobase + 0x0f, iobase + 0x400, iobase + 0x400 + (thisboard->size & 0x3ff)); if (!request_region(iobase, 0x10, "das16")) { printk(KERN_ERR " I/O port conflict: 0x%04lx-0x%04lx\n", iobase, iobase + 0x0f); return -EIO; } if (!request_region(iobase + 0x400, thisboard->size & 0x3ff, "das16")) { release_region(iobase, 0x10); printk(KERN_ERR " I/O port conflict: 0x%04lx-0x%04lx\n", iobase + 0x400, iobase + 0x400 + (thisboard->size & 0x3ff)); return -EIO; } } dev->iobase = iobase; /* probe id bits to make sure they are consistent */ if (das16_probe(dev, it)) { printk(KERN_ERR " id bits do not match selected board, aborting\n"); return -EINVAL; } dev->board_name = thisboard->name; /* get master clock speed */ if (thisboard->size < 0x400) { if (it->options[3]) devpriv->clockbase = 1000 / it->options[3]; else devpriv->clockbase = 1000; /* 1 MHz default */ } else { das1600_mode_detect(dev); } /* now for the irq */ if (irq > 1 && irq < 8) { ret = request_irq(irq, das16_dma_interrupt, 0, "das16", dev); if (ret < 0) return ret; dev->irq = irq; printk(KERN_INFO " ( irq = %u )", irq); } else if (irq == 0) { printk(" ( no irq )"); } else { printk(" invalid irq\n"); return -EINVAL; } /* initialize dma */ dma_chan = it->options[2]; if (dma_chan == 1 || dma_chan == 3) { /* allocate dma buffers */ int i; for (i = 0; i < 2; i++) { devpriv->dma_buffer[i] = pci_alloc_consistent( NULL, DAS16_DMA_SIZE, &devpriv->dma_buffer_addr[i]); if (devpriv->dma_buffer[i] == NULL) return -ENOMEM; } if (request_dma(dma_chan, "das16")) { printk(KERN_ERR " failed to allocate dma channel %i\n", dma_chan); return -EINVAL; } devpriv->dma_chan = dma_chan; flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); set_dma_mode(devpriv->dma_chan, DMA_MODE_READ); release_dma_lock(flags); printk(KERN_INFO " ( dma = %u)\n", dma_chan); } else if (dma_chan == 0) { printk(KERN_INFO " ( no dma )\n"); } else { printk(KERN_ERR " invalid dma channel\n"); return -EINVAL; } /* get any user-defined input range */ if (thisboard->ai_pg == das16_pg_none && (it->options[4] || it->options[5])) { /* allocate single-range range table */ devpriv->user_ai_range_table = kmalloc(sizeof(struct comedi_lrange) + sizeof(struct comedi_krange), GFP_KERNEL); /* initialize ai range */ devpriv->user_ai_range_table->length = 1; user_ai_range = devpriv->user_ai_range_table->range; user_ai_range->min = it->options[4]; user_ai_range->max = it->options[5]; user_ai_range->flags = UNIT_volt; } /* get any user-defined output range */ if (it->options[6] || it->options[7]) { /* allocate single-range range table */ devpriv->user_ao_range_table = kmalloc(sizeof(struct comedi_lrange) + sizeof(struct comedi_krange), GFP_KERNEL); /* initialize ao range */ devpriv->user_ao_range_table->length = 1; user_ao_range = devpriv->user_ao_range_table->range; user_ao_range->min = it->options[6]; user_ao_range->max = it->options[7]; user_ao_range->flags = UNIT_volt; } if (timer_mode) { init_timer(&(devpriv->timer)); devpriv->timer.function = das16_timer_interrupt; devpriv->timer.data = (unsigned long)dev; } devpriv->timer_mode = timer_mode ? 1 : 0; ret = alloc_subdevices(dev, 5); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; /* ai */ if (thisboard->ai) { s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; if (devpriv->ai_singleended) { s->n_chan = 16; s->len_chanlist = 16; s->subdev_flags |= SDF_GROUND; } else { s->n_chan = 8; s->len_chanlist = 8; s->subdev_flags |= SDF_DIFF; } s->maxdata = (1 << thisboard->ai_nbits) - 1; if (devpriv->user_ai_range_table) { /* user defined ai range */ s->range_table = devpriv->user_ai_range_table; } else if (devpriv->ai_unipolar) { s->range_table = das16_ai_uni_lranges[thisboard->ai_pg]; } else { s->range_table = das16_ai_bip_lranges[thisboard->ai_pg]; } s->insn_read = thisboard->ai; s->do_cmdtest = das16_cmd_test; s->do_cmd = das16_cmd_exec; s->cancel = das16_cancel; s->munge = das16_ai_munge; } else { s->type = COMEDI_SUBD_UNUSED; } s = dev->subdevices + 1; /* ao */ if (thisboard->ao) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 2; s->maxdata = (1 << thisboard->ao_nbits) - 1; /* user defined ao range */ if (devpriv->user_ao_range_table) s->range_table = devpriv->user_ao_range_table; else s->range_table = &range_unknown; s->insn_write = thisboard->ao; } else { s->type = COMEDI_SUBD_UNUSED; } s = dev->subdevices + 2; /* di */ if (thisboard->di) { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = thisboard->di; } else { s->type = COMEDI_SUBD_UNUSED; } s = dev->subdevices + 3; /* do */ if (thisboard->do_) { s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = thisboard->do_; /* initialize digital output lines */ outb(s->state, dev->iobase + DAS16_DIO); } else { s->type = COMEDI_SUBD_UNUSED; } s = dev->subdevices + 4; /* 8255 */ if (thisboard->i8255_offset != 0) { subdev_8255_init(dev, s, NULL, (dev->iobase + thisboard->i8255_offset)); } else { s->type = COMEDI_SUBD_UNUSED; } das16_reset(dev); /* set the interrupt level */ devpriv->control_state = DAS16_IRQ(dev->irq); outb(devpriv->control_state, dev->iobase + DAS16_CONTROL); /* turn on das1600 mode if available */ if (thisboard->size > 0x400) { outb(DAS1600_ENABLE_VAL, dev->iobase + DAS1600_ENABLE); outb(0, dev->iobase + DAS1600_CONV); outb(0, dev->iobase + DAS1600_BURST); } return 0; } static int das16_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: das16: remove\n", dev->minor); das16_reset(dev); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 4); if (devpriv) { int i; for (i = 0; i < 2; i++) { if (devpriv->dma_buffer[i]) pci_free_consistent(NULL, DAS16_DMA_SIZE, devpriv->dma_buffer[i], devpriv-> dma_buffer_addr[i]); } if (devpriv->dma_chan) free_dma(devpriv->dma_chan); kfree(devpriv->user_ai_range_table); kfree(devpriv->user_ao_range_table); } if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) { if (thisboard->size < 0x400) { release_region(dev->iobase, thisboard->size); } else { release_region(dev->iobase, 0x10); release_region(dev->iobase + 0x400, thisboard->size & 0x3ff); } } return 0; } static int __init driver_das16_init_module(void) { return comedi_driver_register(&driver_das16); } static void __exit driver_das16_cleanup_module(void) { comedi_driver_unregister(&driver_das16); } module_init(driver_das16_init_module); module_exit(driver_das16_cleanup_module); /* utility function that suggests a dma transfer size in bytes */ static unsigned int das16_suggest_transfer_size(struct comedi_device *dev, struct comedi_cmd cmd) { unsigned int size; unsigned int freq; /* if we are using timer interrupt, we don't care how long it * will take to complete transfer since it will be interrupted * by timer interrupt */ if (devpriv->timer_mode) return DAS16_DMA_SIZE; /* otherwise, we are relying on dma terminal count interrupt, * so pick a reasonable size */ if (cmd.convert_src == TRIG_TIMER) freq = 1000000000 / cmd.convert_arg; else if (cmd.scan_begin_src == TRIG_TIMER) freq = (1000000000 / cmd.scan_begin_arg) * cmd.chanlist_len; /* return some default value */ else freq = 0xffffffff; if (cmd.flags & TRIG_WAKE_EOS) { size = sample_size * cmd.chanlist_len; } else { /* make buffer fill in no more than 1/3 second */ size = (freq / 3) * sample_size; } /* set a minimum and maximum size allowed */ if (size > DAS16_DMA_SIZE) size = DAS16_DMA_SIZE - DAS16_DMA_SIZE % sample_size; else if (size < sample_size) size = sample_size; if (cmd.stop_src == TRIG_COUNT && size > devpriv->adc_byte_count) size = devpriv->adc_byte_count; return size; } static void das16_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *array, unsigned int num_bytes, unsigned int start_chan_index) { unsigned int i, num_samples = num_bytes / sizeof(short); short *data = array; for (i = 0; i < num_samples; i++) { data[i] = le16_to_cpu(data[i]); if (thisboard->ai_nbits == 12) data[i] = (data[i] >> 4) & 0xfff; } } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
googyanas/Googy-Kernel
drivers/scsi/lasi700.c
9266
4801
/* -*- mode: c; c-basic-offset: 8 -*- */ /* PARISC LASI driver for the 53c700 chip * * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ /* * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently * debugging this driver on the parisc architecture and suggesting * many improvements and bug fixes. * * Thanks also go to Linuxcare Inc. for providing several PARISC * machines for me to debug the driver on. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/hardware.h> #include <asm/parisc-device.h> #include <asm/delay.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("lasi700 SCSI Driver"); MODULE_LICENSE("GPL"); #define LASI_700_SVERSION 0x00071 #define LASI_710_SVERSION 0x00082 #define LASI700_ID_TABLE { \ .hw_type = HPHW_FIO, \ .sversion = LASI_700_SVERSION, \ .hversion = HVERSION_ANY_ID, \ .hversion_rev = HVERSION_REV_ANY_ID, \ } #define LASI710_ID_TABLE { \ .hw_type = HPHW_FIO, \ .sversion = LASI_710_SVERSION, \ .hversion = HVERSION_ANY_ID, \ .hversion_rev = HVERSION_REV_ANY_ID, \ } #define LASI700_CLOCK 25 #define LASI710_CLOCK 40 #define LASI_SCSI_CORE_OFFSET 0x100 static struct parisc_device_id lasi700_ids[] = { LASI700_ID_TABLE, LASI710_ID_TABLE, { 0 } }; static struct scsi_host_template lasi700_template = { .name = "LASI SCSI 53c700", .proc_name = "lasi700", .this_id = 7, .module = THIS_MODULE, }; MODULE_DEVICE_TABLE(parisc, lasi700_ids); static int __init lasi700_probe(struct parisc_device *dev) { unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET; struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); if (!hostdata) { dev_printk(KERN_ERR, &dev->dev, "Failed to allocate host data\n"); return -ENOMEM; } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); hostdata->base = ioremap_nocache(base, 0x100); hostdata->differential = 0; if (dev->id.sversion == LASI_700_SVERSION) { hostdata->clock = LASI700_CLOCK; hostdata->force_le_on_be = 1; } else { hostdata->clock = LASI710_CLOCK; hostdata->force_le_on_be = 0; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; hostdata->burst_length = 8; } host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); if (!host) goto out_kfree; host->this_id = 7; host->base = base; host->irq = dev->irq; if(request_irq(dev->irq, NCR_700_intr, IRQF_SHARED, "lasi700", host)) { printk(KERN_ERR "lasi700: request_irq failed!\n"); goto out_put_host; } dev_set_drvdata(&dev->dev, host); scsi_scan_host(host); return 0; out_put_host: scsi_host_put(host); out_kfree: iounmap(hostdata->base); kfree(hostdata); return -ENODEV; } static int __exit lasi700_driver_remove(struct parisc_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)host->hostdata[0]; scsi_remove_host(host); NCR_700_release(host); free_irq(host->irq, host); iounmap(hostdata->base); kfree(hostdata); return 0; } static struct parisc_driver lasi700_driver = { .name = "lasi_scsi", .id_table = lasi700_ids, .probe = lasi700_probe, .remove = __devexit_p(lasi700_driver_remove), }; static int __init lasi700_init(void) { return register_parisc_driver(&lasi700_driver); } static void __exit lasi700_exit(void) { unregister_parisc_driver(&lasi700_driver); } module_init(lasi700_init); module_exit(lasi700_exit);
gpl-2.0
hashem78/G-Kernel
fs/cachefiles/xattr.c
9266
6565
/* CacheFiles extended attribute management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/quotaops.h> #include <linux/xattr.h> #include <linux/slab.h> #include "internal.h" static const char cachefiles_xattr_cache[] = XATTR_USER_PREFIX "CacheFiles.cache"; /* * check the type label on an object * - done using xattrs */ int cachefiles_check_object_type(struct cachefiles_object *object) { struct dentry *dentry = object->dentry; char type[3], xtype[3]; int ret; ASSERT(dentry); ASSERT(dentry->d_inode); if (!object->fscache.cookie) strcpy(type, "C3"); else snprintf(type, 3, "%02x", object->fscache.cookie->def->type); _enter("%p{%s}", object, type); /* attempt to install a type label directly */ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, type, 2, XATTR_CREATE); if (ret == 0) { _debug("SET"); /* we succeeded */ goto error; } if (ret != -EEXIST) { kerror("Can't set xattr on %*.*s [%lu] (err %d)", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, -ret); goto error; } /* read the current type label */ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, xtype, 3); if (ret < 0) { if (ret == -ERANGE) goto bad_type_length; kerror("Can't read xattr on %*.*s [%lu] (err %d)", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, -ret); goto error; } /* check the type is what we're expecting */ if (ret != 2) goto bad_type_length; if (xtype[0] != type[0] || xtype[1] != type[1]) goto bad_type; ret = 0; error: _leave(" = %d", ret); return ret; bad_type_length: kerror("Cache object %lu type xattr length incorrect", dentry->d_inode->i_ino); ret = -EIO; goto error; bad_type: xtype[2] = 0; kerror("Cache object %*.*s [%lu] type %s not %s", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, xtype, type); ret = -EIO; goto error; } /* * set the state xattr on a cache file */ int cachefiles_set_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct dentry *dentry = object->dentry; int ret; ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_CREATE); if (ret < 0 && ret != -ENOMEM) cachefiles_io_error_obj( object, "Failed to set xattr with error %d", ret); _leave(" = %d", ret); return ret; } /* * update the state xattr on a cache file */ int cachefiles_update_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct dentry *dentry = object->dentry; int ret; ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_REPLACE); if (ret < 0 && ret != -ENOMEM) cachefiles_io_error_obj( object, "Failed to update xattr with error %d", ret); _leave(" = %d", ret); return ret; } /* * check the state xattr on a cache file * - return -ESTALE if the object should be deleted */ int cachefiles_check_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct cachefiles_xattr *auxbuf; struct dentry *dentry = object->dentry; int ret; _enter("%p,#%d", object, auxdata->len); ASSERT(dentry); ASSERT(dentry->d_inode); auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL); if (!auxbuf) { _leave(" = -ENOMEM"); return -ENOMEM; } /* read the current type label */ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, &auxbuf->type, 512 + 1); if (ret < 0) { if (ret == -ENODATA) goto stale; /* no attribute - power went off * mid-cull? */ if (ret == -ERANGE) goto bad_type_length; cachefiles_io_error_obj(object, "Can't read xattr on %lu (err %d)", dentry->d_inode->i_ino, -ret); goto error; } /* check the on-disk object */ if (ret < 1) goto bad_type_length; if (auxbuf->type != auxdata->type) goto stale; auxbuf->len = ret; /* consult the netfs */ if (object->fscache.cookie->def->check_aux) { enum fscache_checkaux result; unsigned int dlen; dlen = auxbuf->len - 1; _debug("checkaux %s #%u", object->fscache.cookie->def->name, dlen); result = fscache_check_aux(&object->fscache, &auxbuf->data, dlen); switch (result) { /* entry okay as is */ case FSCACHE_CHECKAUX_OKAY: goto okay; /* entry requires update */ case FSCACHE_CHECKAUX_NEEDS_UPDATE: break; /* entry requires deletion */ case FSCACHE_CHECKAUX_OBSOLETE: goto stale; default: BUG(); } /* update the current label */ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_REPLACE); if (ret < 0) { cachefiles_io_error_obj(object, "Can't update xattr on %lu" " (error %d)", dentry->d_inode->i_ino, -ret); goto error; } } okay: ret = 0; error: kfree(auxbuf); _leave(" = %d", ret); return ret; bad_type_length: kerror("Cache object %lu xattr length incorrect", dentry->d_inode->i_ino); ret = -EIO; goto error; stale: ret = -ESTALE; goto error; } /* * remove the object's xattr to mark it stale */ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, struct dentry *dentry) { int ret; ret = vfs_removexattr(dentry, cachefiles_xattr_cache); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) ret = 0; else if (ret != -ENOMEM) cachefiles_io_error(cache, "Can't remove xattr from %lu" " (error %d)", dentry->d_inode->i_ino, -ret); } _leave(" = %d", ret); return ret; }
gpl-2.0
AntaresOne/android_kernel_samsung_jf
drivers/staging/sbe-2t3e3/maps.c
11058
4481
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include <linux/kernel.h> #include "2t3e3.h" const u32 cpld_reg_map[][2] = { { 0x0000, 0x0080 }, /* 0 - Port Control Register A (PCRA) */ { 0x0004, 0x0084 }, /* 1 - Port Control Register B (PCRB) */ { 0x0008, 0x0088 }, /* 2 - LCV Count Register (PLCR) */ { 0x000c, 0x008c }, /* 3 - LCV Threshold register (PLTR) */ { 0x0010, 0x0090 }, /* 4 - Payload Fill Register (PPFR) */ { 0x0200, 0x0200 }, /* 5 - Board ID / FPGA Programming Status Register */ { 0x0204, 0x0204 }, /* 6 - FPGA Version Register */ { 0x0800, 0x1000 }, /* 7 - Framer Registers Base Address */ { 0x2000, 0x2000 }, /* 8 - Serial Chip Select Register */ { 0x2004, 0x2004 }, /* 9 - Static Reset Register */ { 0x2008, 0x2008 }, /* 10 - Pulse Reset Register */ { 0x200c, 0x200c }, /* 11 - FPGA Reconfiguration Register */ { 0x2010, 0x2014 }, /* 12 - LED Register (LEDR) */ { 0x2018, 0x201c }, /* 13 - LIU Control and Status Register (PISCR) */ { 0x2020, 0x2024 }, /* 14 - Interrupt Enable Register (PIER) */ { 0x0068, 0x00e8 }, /* 15 - Port Control Register C (PCRC) */ { 0x006c, 0x00ec }, /* 16 - Port Bandwidth Start (PBWF) */ { 0x0070, 0x00f0 }, /* 17 - Port Bandwidth Stop (PBWL) */ }; const u32 cpld_val_map[][2] = { { 0x01, 0x02 }, /* LIU1 / LIU2 select for Serial Chip Select */ { 0x04, 0x08 }, /* DAC1 / DAC2 select for Serial Chip Select */ { 0x00, 0x04 }, /* LOOP1 / LOOP2 - select of loop timing source */ { 0x01, 0x02 } /* PORT1 / PORT2 - select LIU and Framer for reset */ }; const u32 t3e3_framer_reg_map[] = { 0x00, /* 0 - OPERATING_MODE */ 0x01, /* 1 - IO_CONTROL */ 0x04, /* 2 - BLOCK_INTERRUPT_ENABLE */ 0x05, /* 3 - BLOCK_INTERRUPT_STATUS */ 0x10, /* 4 - T3_RX_CONFIGURATION_STATUS, E3_RX_CONFIGURATION_STATUS_1 */ 0x11, /* 5 - T3_RX_STATUS, E3_RX_CONFIGURATION_STATUS_2 */ 0x12, /* 6 - T3_RX_INTERRUPT_ENABLE, E3_RX_INTERRUPT_ENABLE_1 */ 0x13, /* 7 - T3_RX_INTERRUPT_STATUS, E3_RX_INTERRUPT_ENABLE_2 */ 0x14, /* 8 - T3_RX_SYNC_DETECT_ENABLE, E3_RX_INTERRUPT_STATUS_1 */ 0x15, /* 9 - E3_RX_INTERRUPT_STATUS_2 */ 0x16, /* 10 - T3_RX_FEAC */ 0x17, /* 11 - T3_RX_FEAC_INTERRUPT_ENABLE_STATUS */ 0x18, /* 12 - T3_RX_LAPD_CONTROL, E3_RX_LAPD_CONTROL */ 0x19, /* 13 - T3_RX_LAPD_STATUS, E3_RX_LAPD_STATUS */ 0x1a, /* 14 - E3_RX_NR_BYTE, E3_RX_SERVICE_BITS */ 0x1b, /* 15 - E3_RX_GC_BYTE */ 0x30, /* 16 - T3_TX_CONFIGURATION, E3_TX_CONFIGURATION */ 0x31, /* 17 - T3_TX_FEAC_CONFIGURATION_STATUS */ 0x32, /* 18 - T3_TX_FEAC */ 0x33, /* 19 - T3_TX_LAPD_CONFIGURATION, E3_TX_LAPD_CONFIGURATION */ 0x34, /* 20 - T3_TX_LAPD_STATUS, E3_TX_LAPD_STATUS_INTERRUPT */ 0x35, /* 21 - T3_TX_MBIT_MASK, E3_TX_GC_BYTE, E3_TX_SERVICE_BITS */ 0x36, /* 22 - T3_TX_FBIT_MASK, E3_TX_MA_BYTE */ 0x37, /* 23 - T3_TX_FBIT_MASK_2, E3_TX_NR_BYTE */ 0x38, /* 24 - T3_TX_FBIT_MASK_3 */ 0x48, /* 25 - E3_TX_FA1_ERROR_MASK, E3_TX_FAS_ERROR_MASK_UPPER */ 0x49, /* 26 - E3_TX_FA2_ERROR_MASK, E3_TX_FAS_ERROR_MASK_LOWER */ 0x4a, /* 27 - E3_TX_BIP8_MASK, E3_TX_BIP4_MASK */ 0x50, /* 28 - PMON_LCV_EVENT_COUNT_MSB */ 0x51, /* 29 - PMON_LCV_EVENT_COUNT_LSB */ 0x52, /* 30 - PMON_FRAMING_BIT_ERROR_EVENT_COUNT_MSB */ 0x53, /* 31 - PMON_FRAMING_BIT_ERROR_EVENT_COUNT_LSB */ 0x54, /* 32 - PMON_PARITY_ERROR_EVENT_COUNT_MSB */ 0x55, /* 33 - PMON_PARITY_ERROR_EVENT_COUNT_LSB */ 0x56, /* 34 - PMON_FEBE_EVENT_COUNT_MSB */ 0x57, /* 35 - PMON_FEBE_EVENT_COUNT_LSB */ 0x58, /* 36 - PMON_CP_BIT_ERROR_EVENT_COUNT_MSB */ 0x59, /* 37 - PMON_CP_BIT_ERROR_EVENT_COUNT_LSB */ 0x6c, /* 38 - PMON_HOLDING_REGISTER */ 0x6d, /* 39 - ONE_SECOND_ERROR_STATUS */ 0x6e, /* 40 - LCV_ONE_SECOND_ACCUMULATOR_MSB */ 0x6f, /* 41 - LCV_ONE_SECOND_ACCUMULATOR_LSB */ 0x70, /* 42 - FRAME_PARITY_ERROR_ONE_SECOND_ACCUMULATOR_MSB */ 0x71, /* 43 - FRAME_PARITY_ERROR_ONE_SECOND_ACCUMULATOR_LSB */ 0x72, /* 44 - FRAME_CP_BIT_ERROR_ONE_SECOND_ACCUMULATOR_MSB */ 0x73, /* 45 - FRAME_CP_BIT_ERROR_ONE_SECOND_ACCUMULATOR_LSB */ 0x80, /* 46 - LINE_INTERFACE_DRIVE */ 0x81 /* 47 - LINE_INTERFACE_SCAN */ }; const u32 t3e3_liu_reg_map[] = { 0x00, /* REG0 */ 0x01, /* REG1 */ 0x02, /* REG2 */ 0x03, /* REG3 */ 0x04 /* REG4 */ };
gpl-2.0
mseaborn/plash-glibc
localedata/tests-mbwc/dat_iswgraph.c
51
6016
/* * TEST SUITE FOR MB/WC FUNCTIONS IN C LIBRARY * * FILE: dat_iswgraph.c * * ISW*: int iswgraph (wint_t wc); */ #include "dat_isw-funcs.h" TST_ISW_LOC (GRAPH, graph) = { { TST_ISW_REC (de, graph) { { { 0x0080 }, { 0,1,0 } }, /* CTRL */ { { 0x009F }, { 0,1,0 } }, /* CTRL */ #ifdef SHOJI_IS_RIGHT { { 0x00A0 }, { 0,1,0 } }, /* NB SPACE */ #else { { 0x00A0 }, { 0,0,0 } }, /* NB SPACE */ #endif { { 0x00A1 }, { 0,0,0 } }, /* UD ! */ { { 0x00B0 }, { 0,0,0 } }, /* Degree */ { { 0x00B1 }, { 0,0,0 } }, /* +- sign */ { { 0x00B2 }, { 0,0,0 } }, /* SUP 2 */ { { 0x00B3 }, { 0,0,0 } }, /* SUP 3 */ { { 0x00B4 }, { 0,0,0 } }, /* ACUTE */ { { 0x00B8 }, { 0,0,0 } }, /* CEDILLA */ { { 0x00B9 }, { 0,0,0 } }, /* SUP 1 */ { { 0x00BB }, { 0,0,0 } }, /* >> */ { { 0x00BC }, { 0,0,0 } }, /* 1/4 */ { { 0x00BD }, { 0,0,0 } }, /* 1/2 */ { { 0x00BE }, { 0,0,0 } }, /* 3/4 */ { { 0x00BF }, { 0,0,0 } }, /* UD ? */ { { 0x00C0 }, { 0,0,0 } }, /* A Grave */ { { 0x00D6 }, { 0,0,0 } }, /* O dia */ { { 0x00D7 }, { 0,0,0 } }, /* multipl. */ { { 0x00D8 }, { 0,0,0 } }, /* O stroke */ { { 0x00DF }, { 0,0,0 } }, /* small Sh */ { { 0x00E0 }, { 0,0,0 } }, /* a grave */ { { 0x00F6 }, { 0,0,0 } }, /* o dia */ { { 0x00F7 }, { 0,0,0 } }, /* division */ { { 0x00F8 }, { 0,0,0 } }, /* o stroke */ { { 0x00FF }, { 0,0,0 } }, /* y dia */ { .is_last = 1 } /* Last element. */ } }, { TST_ISW_REC (enUS, graph) { { { WEOF }, { 0,1,0 } }, { { 0x0000 }, { 0,1,0 } }, { { 0x001F }, { 0,1,0 } }, { { 0x0020 }, { 0,1,0 } }, { { 0x0021 }, { 0,0,0 } }, { { 0x002F }, { 0,0,0 } }, { { 0x0030 }, { 0,0,0 } }, { { 0x0039 }, { 0,0,0 } }, { { 0x003A }, { 0,0,0 } }, { { 0x0040 }, { 0,0,0 } }, { { 0x0041 }, { 0,0,0 } }, { { 0x005A }, { 0,0,0 } }, { { 0x005B }, { 0,0,0 } }, { { 0x0060 }, { 0,0,0 } }, { { 0x0061 }, { 0,0,0 } }, { { 0x007A }, { 0,0,0 } }, { { 0x007B }, { 0,0,0 } }, { { 0x007E }, { 0,0,0 } }, { { 0x007F }, { 0,1,0 } }, { { 0x0080 }, { 0,1,0 } }, /* 20 */ { .is_last = 1 } /* Last element. */ } }, { TST_ISW_REC( eucJP, graph ) { { { 0x3000 }, { 0,1,0 } }, /* IDEO. SPACE */ #ifdef SHOJI_IS_RIGHT { { 0x3020 }, { 0,1,0 } }, /* POSTAL MARK FACE */ { { 0x3029 }, { 0,1,0 } }, /* Hangzhou NUM9 */ { { 0x302F }, { 0,1,0 } }, /* Diacritics(Hangul) */ { { 0x3037 }, { 0,1,0 } }, /* Separator Symbol */ { { 0x303F }, { 0,1,0 } }, /* IDEO. HALF SPACE */ #else { { 0x3020 }, { 0,0,0 } }, /* POSTAL MARK FACE */ { { 0x3029 }, { 0,0,0 } }, /* Hangzhou NUM9 */ { { 0x302F }, { 0,0,0 } }, /* Diacritics(Hangul) */ { { 0x3037 }, { 0,0,0 } }, /* Separator Symbol */ { { 0x303F }, { 0,0,0 } }, /* IDEO. HALF SPACE */ #endif { { 0x3041 }, { 0,0,0 } }, /* HIRAGANA a */ #ifdef SHOJI_IS_RIGHT { { 0x3094 }, { 0,1,0 } }, /* HIRAGANA u" */ /* non jis */ { { 0x3099 }, { 0,1,0 } }, /* SOUND MARK */ #else { { 0x3094 }, { 0,0,0 } }, /* HIRAGANA u" */ /* non jis */ { { 0x3099 }, { 0,0,0 } }, /* SOUND MARK */ #endif { { 0x309E }, { 0,0,0 } }, /* ITERATION MARK */ /* 10 */ { { 0x30A1 }, { 0,0,0 } }, /* KATAKANA a */ #ifdef SHOJI_IS_RIGHT { { 0x30FA }, { 0,1,0 } }, /* KATAKANA wo" */ /* non jis */ #else { { 0x30FA }, { 0,0,0 } }, /* KATAKANA wo" */ /* non jis */ #endif { { 0x30FB }, { 0,0,0 } }, /* KATAKANA MID.DOT */ { { 0x30FE }, { 0,0,0 } }, /* KATAKANA ITERATION */ #ifdef SHOJI_IS_RIGHT { { 0x3191 }, { 0,1,0 } }, /* KANBUN REV.MARK */ { { 0x3243 }, { 0,1,0 } }, /* IDEO. MARK (reach) */ { { 0x32CB }, { 0,1,0 } }, /* IDEO.TEL.SYM.DEC12 */ { { 0x32FE }, { 0,1,0 } }, /* MARU KATAKANA wo */ { { 0x33FE }, { 0,1,0 } }, /* CJK IDEO.TEL.31th */ #else { { 0x3191 }, { 0,0,0 } }, /* KANBUN REV.MARK */ { { 0x3243 }, { 0,0,0 } }, /* IDEO. MARK (reach) */ { { 0x32CB }, { 0,0,0 } }, /* IDEO.TEL.SYM.DEC12 */ { { 0x32FE }, { 0,0,0 } }, /* MARU KATAKANA wo */ { { 0x33FE }, { 0,0,0 } }, /* CJK IDEO.TEL.31th */ #endif { { 0x4E00 }, { 0,0,0 } }, /* CJK UNI.IDEO. */ /* 20 */ { { 0x4E05 }, { 0,0,0 } }, /* CJK UNI.IDEO. */ #ifdef SHOJI_IS_RIGHT { { 0x4E06 }, { 0,1,0 } }, /* CJK UNI.IDEO.NON-J */ #else { { 0x4E06 }, { 0,0,0 } }, /* CJK UNI.IDEO.NON-J */ #endif { { 0x4E07 }, { 0,0,0 } }, /* CJK UNI.IDEO. */ { { 0x4FFF }, { 0,0,0 } }, /* CJK UNI.IDEO. */ { { 0x9000 }, { 0,0,0 } }, /* CJK UNI.IDEO. */ { { 0x9006 }, { 0,0,0 } }, /* CJK UNI.IDEO. */ #ifdef SHOJI_IS_RIGHT { { 0x9007 }, { 0,1,0 } }, /* CJK UNI.IDEO.NON-J */ { { 0x9FA4 }, { 0,1,0 } }, /* CJK UNI.IDEO.NON-J */ #else { { 0x9007 }, { 0,0,0 } }, /* CJK UNI.IDEO.NON-J */ { { 0x9FA4 }, { 0,0,0 } }, /* CJK UNI.IDEO.NON-J */ #endif { { 0x9FA5 }, { 0,0,0 } }, /* CJK UNI.IDEO. */ #ifdef SHOJI_IS_RIGHT { { 0xFE4F }, { 0,1,0 } }, /* CJK Wave Low Line */ /* 30 */ #else { { 0xFE4F }, { 0,0,0 } }, /* CJK Wave Low Line */ /* 30 */ #endif { { 0xFF0F }, { 0,0,0 } }, /* FULL SLASH */ { { 0xFF19 }, { 0,0,0 } }, /* FULL 9 */ { { 0xFF20 }, { 0,0,0 } }, /* FULL @ */ { { 0xFF3A }, { 0,0,0 } }, /* FULL Z */ { { 0xFF40 }, { 0,0,0 } }, /* FULL GRAVE ACC. */ { { 0xFF5A }, { 0,0,0 } }, /* FULL z */ { { 0xFF5E }, { 0,0,0 } }, /* FULL ~ (tilde) */ { { 0xFF61 }, { 0,0,0 } }, /* HALF IDEO.STOP. . */ { { 0xFF65 }, { 0,0,0 } }, /* HALF KATA MID.DOT */ { { 0xFF66 }, { 0,0,0 } }, /* HALF KATA WO */ { { 0xFF6F }, { 0,0,0 } }, /* HALF KATA tu */ { { 0xFF70 }, { 0,0,0 } }, /* HALF KATA PL - */ { { 0xFF71 }, { 0,0,0 } }, /* HALF KATA A */ { { 0xFF9E }, { 0,0,0 } }, /* HALF KATA MI */ { .is_last = 1 } /* Last element. */ } }, { TST_ISW_REC (end, graph) } };
gpl-2.0
GunioRobot/macgdb
bfd/corefile.c
51
4318
/* Core file generic interface routines for BFD. Copyright 1990, 1991, 1992, 1993, 1994, 2000, 2001, 2002, 2003, 2005, 2007 Free Software Foundation, Inc. Written by Cygnus Support. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* SECTION Core files SUBSECTION Core file functions DESCRIPTION These are functions pertaining to core files. */ #include "sysdep.h" #include "bfd.h" #include "libbfd.h" /* FUNCTION bfd_core_file_failing_command SYNOPSIS const char *bfd_core_file_failing_command (bfd *abfd); DESCRIPTION Return a read-only string explaining which program was running when it failed and produced the core file @var{abfd}. */ const char * bfd_core_file_failing_command (bfd *abfd) { if (abfd->format != bfd_core) { bfd_set_error (bfd_error_invalid_operation); return NULL; } return BFD_SEND (abfd, _core_file_failing_command, (abfd)); } /* FUNCTION bfd_core_file_failing_signal SYNOPSIS int bfd_core_file_failing_signal (bfd *abfd); DESCRIPTION Returns the signal number which caused the core dump which generated the file the BFD @var{abfd} is attached to. */ int bfd_core_file_failing_signal (bfd *abfd) { if (abfd->format != bfd_core) { bfd_set_error (bfd_error_invalid_operation); return 0; } return BFD_SEND (abfd, _core_file_failing_signal, (abfd)); } /* FUNCTION core_file_matches_executable_p SYNOPSIS bfd_boolean core_file_matches_executable_p (bfd *core_bfd, bfd *exec_bfd); DESCRIPTION Return <<TRUE>> if the core file attached to @var{core_bfd} was generated by a run of the executable file attached to @var{exec_bfd}, <<FALSE>> otherwise. */ bfd_boolean core_file_matches_executable_p (bfd *core_bfd, bfd *exec_bfd) { if (core_bfd->format != bfd_core || exec_bfd->format != bfd_object) { bfd_set_error (bfd_error_wrong_format); return FALSE; } return BFD_SEND (core_bfd, _core_file_matches_executable_p, (core_bfd, exec_bfd)); } /* FUNCTION generic_core_file_matches_executable_p SYNOPSIS bfd_boolean generic_core_file_matches_executable_p (bfd *core_bfd, bfd *exec_bfd); DESCRIPTION Return TRUE if the core file attached to @var{core_bfd} was generated by a run of the executable file attached to @var{exec_bfd}. The match is based on executable basenames only. Note: When not able to determine the core file failing command or the executable name, we still return TRUE even though we're not sure that core file and executable match. This is to avoid generating a false warning in situations where we really don't know whether they match or not. */ bfd_boolean generic_core_file_matches_executable_p (bfd *core_bfd, bfd *exec_bfd) { char *exec; char *core; char *last_slash; if (exec_bfd == NULL || core_bfd == NULL) return TRUE; /* The cast below is to avoid a compiler warning due to the assignment of the const char * returned by bfd_core_file_failing_command to a non-const char *. In this case, the assignement does not lead to breaking the const, as we're only reading the string. */ core = (char *) bfd_core_file_failing_command (core_bfd); if (core == NULL) return TRUE; exec = bfd_get_filename (exec_bfd); if (exec == NULL) return TRUE; last_slash = strrchr (core, '/'); if (last_slash != NULL) core = last_slash + 1; last_slash = strrchr (exec, '/'); if (last_slash != NULL) exec = last_slash + 1; return strcmp (exec, core) == 0; }
gpl-2.0
akhilnarang/ThugLife_sprout
drivers/misc/mediatek/power/mt6582/upmu_common.c
51
535175
/* * Copyright (C) 2011-2014 MediaTek Inc. * * This program is free software: you can redistribute it and/or modify it under the terms of the * GNU General Public License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with this program. * If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/xlog.h> #include <linux/module.h> #include <mach/pmic_mt6323_sw.h> #include <mach/upmu_common.h> #include <mach/upmu_hw.h> //temp - will be removed void upmu_set_rg_clksq_en(kal_uint32 val) { } //------------------------------ void upmu_set_rg_vcdt_hv_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCDT_HV_EN_MASK), (kal_uint32)(PMIC_RG_VCDT_HV_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_chr_ldo_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON0), (&val), (kal_uint32)(PMIC_RGS_CHR_LDO_DET_MASK), (kal_uint32)(PMIC_RGS_CHR_LDO_DET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_pchr_automode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_AUTOMODE_MASK), (kal_uint32)(PMIC_RG_PCHR_AUTOMODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_EN_MASK), (kal_uint32)(PMIC_RG_CSDAC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chr_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHR_EN_MASK), (kal_uint32)(PMIC_RG_CHR_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_chrdet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON0), (&val), (kal_uint32)(PMIC_RGS_CHRDET_MASK), (kal_uint32)(PMIC_RGS_CHRDET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rgs_vcdt_lv_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON0), (&val), (kal_uint32)(PMIC_RGS_VCDT_LV_DET_MASK), (kal_uint32)(PMIC_RGS_VCDT_LV_DET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rgs_vcdt_hv_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON0), (&val), (kal_uint32)(PMIC_RGS_VCDT_HV_DET_MASK), (kal_uint32)(PMIC_RGS_VCDT_HV_DET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcdt_lv_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCDT_LV_VTH_MASK), (kal_uint32)(PMIC_RG_VCDT_LV_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcdt_hv_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCDT_HV_VTH_MASK), (kal_uint32)(PMIC_RG_VCDT_HV_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbat_cv_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_CV_EN_MASK), (kal_uint32)(PMIC_RG_VBAT_CV_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbat_cc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_CC_EN_MASK), (kal_uint32)(PMIC_RG_VBAT_CC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_cs_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CS_EN_MASK), (kal_uint32)(PMIC_RG_CS_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_cs_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON2), (&val), (kal_uint32)(PMIC_RGS_CS_DET_MASK), (kal_uint32)(PMIC_RGS_CS_DET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rgs_vbat_cv_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON2), (&val), (kal_uint32)(PMIC_RGS_VBAT_CV_DET_MASK), (kal_uint32)(PMIC_RGS_VBAT_CV_DET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rgs_vbat_cc_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON2), (&val), (kal_uint32)(PMIC_RGS_VBAT_CC_DET_MASK), (kal_uint32)(PMIC_RGS_VBAT_CC_DET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vbat_cv_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_CV_VTH_MASK), (kal_uint32)(PMIC_RG_VBAT_CV_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbat_cc_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_CC_VTH_MASK), (kal_uint32)(PMIC_RG_VBAT_CC_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_cs_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CS_VTH_MASK), (kal_uint32)(PMIC_RG_CS_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_tohtc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_TOHTC_MASK), (kal_uint32)(PMIC_RG_PCHR_TOHTC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_toltc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_TOLTC_MASK), (kal_uint32)(PMIC_RG_PCHR_TOLTC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbat_ov_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_OV_EN_MASK), (kal_uint32)(PMIC_RG_VBAT_OV_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbat_ov_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_OV_VTH_MASK), (kal_uint32)(PMIC_RG_VBAT_OV_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbat_ov_deg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBAT_OV_DEG_MASK), (kal_uint32)(PMIC_RG_VBAT_OV_DEG_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_vbat_ov_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON6), (&val), (kal_uint32)(PMIC_RGS_VBAT_OV_DET_MASK), (kal_uint32)(PMIC_RGS_VBAT_OV_DET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_baton_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BATON_EN_MASK), (kal_uint32)(PMIC_RG_BATON_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_baton_ht_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BATON_HT_EN_MASK), (kal_uint32)(PMIC_RG_BATON_HT_EN_SHIFT) ); pmic_unlock(); } void upmu_set_baton_tdet_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_BATON_TDET_EN_MASK), (kal_uint32)(PMIC_BATON_TDET_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_baton_ht_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BATON_HT_TRIM_MASK), (kal_uint32)(PMIC_RG_BATON_HT_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_baton_ht_trim_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BATON_HT_TRIM_SET_MASK), (kal_uint32)(PMIC_RG_BATON_HT_TRIM_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_baton_undet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON7), (&val), (kal_uint32)(PMIC_RGS_BATON_UNDET_MASK), (kal_uint32)(PMIC_RGS_BATON_UNDET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_csdac_data(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_DATA_MASK), (kal_uint32)(PMIC_RG_CSDAC_DATA_SHIFT) ); pmic_unlock(); } void upmu_set_rg_frc_csvth_usbdl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FRC_CSVTH_USBDL_MASK), (kal_uint32)(PMIC_RG_FRC_CSVTH_USBDL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_pchr_flag_out(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON10), (&val), (kal_uint32)(PMIC_RGS_PCHR_FLAG_OUT_MASK), (kal_uint32)(PMIC_RGS_PCHR_FLAG_OUT_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_pchr_flag_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_FLAG_EN_MASK), (kal_uint32)(PMIC_RG_PCHR_FLAG_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_otg_bvalid_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OTG_BVALID_EN_MASK), (kal_uint32)(PMIC_RG_OTG_BVALID_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_otg_bvalid_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON10), (&val), (kal_uint32)(PMIC_RGS_OTG_BVALID_DET_MASK), (kal_uint32)(PMIC_RGS_OTG_BVALID_DET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_pchr_flag_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_FLAG_SEL_MASK), (kal_uint32)(PMIC_RG_PCHR_FLAG_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_testmode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_TESTMODE_MASK), (kal_uint32)(PMIC_RG_PCHR_TESTMODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_testmode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_TESTMODE_MASK), (kal_uint32)(PMIC_RG_CSDAC_TESTMODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_RST_MASK), (kal_uint32)(PMIC_RG_PCHR_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_ft_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_FT_CTRL_MASK), (kal_uint32)(PMIC_RG_PCHR_FT_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrwdt_td(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRWDT_TD_MASK), (kal_uint32)(PMIC_RG_CHRWDT_TD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrwdt_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRWDT_EN_MASK), (kal_uint32)(PMIC_RG_CHRWDT_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrwdt_wr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRWDT_WR_MASK), (kal_uint32)(PMIC_RG_CHRWDT_WR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_rv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_RV_MASK), (kal_uint32)(PMIC_RG_PCHR_RV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrwdt_int_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRWDT_INT_EN_MASK), (kal_uint32)(PMIC_RG_CHRWDT_INT_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrwdt_flag_wr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRWDT_FLAG_WR_MASK), (kal_uint32)(PMIC_RG_CHRWDT_FLAG_WR_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_chrwdt_out(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON15), (&val), (kal_uint32)(PMIC_RGS_CHRWDT_OUT_MASK), (kal_uint32)(PMIC_RGS_CHRWDT_OUT_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_uvlo_vthl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_UVLO_VTHL_MASK), (kal_uint32)(PMIC_RG_UVLO_VTHL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_usbdl_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_USBDL_RST_MASK), (kal_uint32)(PMIC_RG_USBDL_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_usbdl_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_USBDL_SET_MASK), (kal_uint32)(PMIC_RG_USBDL_SET_SHIFT) ); pmic_unlock(); } void upmu_set_adcin_vsen_mux_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_ADCIN_VSEN_MUX_EN_MASK), (kal_uint32)(PMIC_ADCIN_VSEN_MUX_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adcin_vsen_ext_baton_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADCIN_VSEN_EXT_BATON_EN_MASK), (kal_uint32)(PMIC_RG_ADCIN_VSEN_EXT_BATON_EN_SHIFT) ); pmic_unlock(); } void upmu_set_adcin_vbat_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_ADCIN_VBAT_EN_MASK), (kal_uint32)(PMIC_ADCIN_VBAT_EN_SHIFT) ); pmic_unlock(); } void upmu_set_adcin_vsen_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_ADCIN_VSEN_EN_MASK), (kal_uint32)(PMIC_ADCIN_VSEN_EN_SHIFT) ); pmic_unlock(); } void upmu_set_adcin_vchr_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_ADCIN_VCHR_EN_MASK), (kal_uint32)(PMIC_ADCIN_VCHR_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_rsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_RSEL_MASK), (kal_uint32)(PMIC_RG_BGR_RSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_unchop_ph(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_UNCHOP_PH_MASK), (kal_uint32)(PMIC_RG_BGR_UNCHOP_PH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_unchop(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_UNCHOP_MASK), (kal_uint32)(PMIC_RG_BGR_UNCHOP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_bb_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_BB_CTRL_MASK), (kal_uint32)(PMIC_RG_BC11_BB_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_RST_MASK), (kal_uint32)(PMIC_RG_BC11_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_vsrc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_VSRC_EN_MASK), (kal_uint32)(PMIC_RG_BC11_VSRC_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rgs_bc11_cmp_out(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHR_CON18), (&val), (kal_uint32)(PMIC_RGS_BC11_CMP_OUT_MASK), (kal_uint32)(PMIC_RGS_BC11_CMP_OUT_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_bc11_vref_vth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_VREF_VTH_MASK), (kal_uint32)(PMIC_RG_BC11_VREF_VTH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_cmp_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_CMP_EN_MASK), (kal_uint32)(PMIC_RG_BC11_CMP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_ipd_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_IPD_EN_MASK), (kal_uint32)(PMIC_RG_BC11_IPD_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_ipu_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_IPU_EN_MASK), (kal_uint32)(PMIC_RG_BC11_IPU_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bc11_bias_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BC11_BIAS_EN_MASK), (kal_uint32)(PMIC_RG_BC11_BIAS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_stp_inc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_STP_INC_MASK), (kal_uint32)(PMIC_RG_CSDAC_STP_INC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_stp_dec(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_STP_DEC_MASK), (kal_uint32)(PMIC_RG_CSDAC_STP_DEC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_dly(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_DLY_MASK), (kal_uint32)(PMIC_RG_CSDAC_DLY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_stp(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_STP_MASK), (kal_uint32)(PMIC_RG_CSDAC_STP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_low_ich_db(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON22), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LOW_ICH_DB_MASK), (kal_uint32)(PMIC_RG_LOW_ICH_DB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrind_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON22), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRIND_ON_MASK), (kal_uint32)(PMIC_RG_CHRIND_ON_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chrind_dimming(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON22), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHRIND_DIMMING_MASK), (kal_uint32)(PMIC_RG_CHRIND_DIMMING_SHIFT) ); pmic_unlock(); } void upmu_set_rg_cv_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CV_MODE_MASK), (kal_uint32)(PMIC_RG_CV_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcdt_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCDT_MODE_MASK), (kal_uint32)(PMIC_RG_VCDT_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_csdac_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CSDAC_MODE_MASK), (kal_uint32)(PMIC_RG_CSDAC_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_tracking_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TRACKING_EN_MASK), (kal_uint32)(PMIC_RG_TRACKING_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_hwcv_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_HWCV_EN_MASK), (kal_uint32)(PMIC_RG_HWCV_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ulc_det_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ULC_DET_EN_MASK), (kal_uint32)(PMIC_RG_ULC_DET_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_trim_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_TRIM_EN_MASK), (kal_uint32)(PMIC_RG_BGR_TRIM_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ichrg_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ICHRG_TRIM_MASK), (kal_uint32)(PMIC_RG_ICHRG_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON25), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_TRIM_MASK), (kal_uint32)(PMIC_RG_BGR_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ovp_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OVP_TRIM_MASK), (kal_uint32)(PMIC_RG_OVP_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chr_osc_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHR_OSC_TRIM_MASK), (kal_uint32)(PMIC_RG_CHR_OSC_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_qi_bgr_ext_buf_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_QI_BGR_EXT_BUF_EN_MASK), (kal_uint32)(PMIC_QI_BGR_EXT_BUF_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_test_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_TEST_EN_MASK), (kal_uint32)(PMIC_RG_BGR_TEST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_test_rstb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_TEST_RSTB_MASK), (kal_uint32)(PMIC_RG_BGR_TEST_RSTB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_dac_usbdl_max(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON28), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DAC_USBDL_MAX_MASK), (kal_uint32)(PMIC_RG_DAC_USBDL_MAX_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHR_CON29), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_RSV_MASK), (kal_uint32)(PMIC_RG_PCHR_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_thr_det_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_THR_DET_DIS_MASK), (kal_uint32)(PMIC_THR_DET_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_thr_tmode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_THR_TMODE_MASK), (kal_uint32)(PMIC_RG_THR_TMODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_thr_temp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_THR_TEMP_SEL_MASK), (kal_uint32)(PMIC_RG_THR_TEMP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_thr_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_THR_SEL_MASK), (kal_uint32)(PMIC_RG_STRUP_THR_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_thr_hwpdn_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_THR_HWPDN_EN_MASK), (kal_uint32)(PMIC_THR_HWPDN_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_thrdet_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_THRDET_SEL_MASK), (kal_uint32)(PMIC_RG_THRDET_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_iref_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_IREF_TRIM_MASK), (kal_uint32)(PMIC_RG_STRUP_IREF_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_usbdl_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_USBDL_EN_MASK), (kal_uint32)(PMIC_RG_USBDL_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fchr_keydet_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FCHR_KEYDET_EN_MASK), (kal_uint32)(PMIC_RG_FCHR_KEYDET_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fchr_pu_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FCHR_PU_EN_MASK), (kal_uint32)(PMIC_RG_FCHR_PU_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_en_drvsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EN_DRVSEL_MASK), (kal_uint32)(PMIC_RG_EN_DRVSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rst_drvsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RST_DRVSEL_MASK), (kal_uint32)(PMIC_RG_RST_DRVSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vref_bg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VREF_BG_MASK), (kal_uint32)(PMIC_RG_VREF_BG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pmu_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PMU_RSV_MASK), (kal_uint32)(PMIC_RG_PMU_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_thr_test(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_THR_TEST_MASK), (kal_uint32)(PMIC_THR_TEST_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_pmu_thr_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(STRUP_CON5), (&val), (kal_uint32)(PMIC_PMU_THR_DEB_MASK), (kal_uint32)(PMIC_PMU_THR_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_pmu_thr_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(STRUP_CON5), (&val), (kal_uint32)(PMIC_PMU_THR_STATUS_MASK), (kal_uint32)(PMIC_PMU_THR_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_dduvlo_deb_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_DDUVLO_DEB_EN_MASK), (kal_uint32)(PMIC_DDUVLO_DEB_EN_SHIFT) ); pmic_unlock(); } void upmu_set_pwrbb_deb_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_PWRBB_DEB_EN_MASK), (kal_uint32)(PMIC_PWRBB_DEB_EN_SHIFT) ); pmic_unlock(); } void upmu_set_strup_osc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_OSC_EN_MASK), (kal_uint32)(PMIC_STRUP_OSC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_strup_osc_en_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_OSC_EN_SEL_MASK), (kal_uint32)(PMIC_STRUP_OSC_EN_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_strup_ft_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_FT_CTRL_MASK), (kal_uint32)(PMIC_STRUP_FT_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_strup_pwron_force(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_PWRON_FORCE_MASK), (kal_uint32)(PMIC_STRUP_PWRON_FORCE_SHIFT) ); pmic_unlock(); } void upmu_set_bias_gen_en_force(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_BIAS_GEN_EN_FORCE_MASK), (kal_uint32)(PMIC_BIAS_GEN_EN_FORCE_SHIFT) ); pmic_unlock(); } void upmu_set_strup_pwron(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_PWRON_MASK), (kal_uint32)(PMIC_STRUP_PWRON_SHIFT) ); pmic_unlock(); } void upmu_set_strup_pwron_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_PWRON_SEL_MASK), (kal_uint32)(PMIC_STRUP_PWRON_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_bias_gen_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_BIAS_GEN_EN_MASK), (kal_uint32)(PMIC_BIAS_GEN_EN_SHIFT) ); pmic_unlock(); } void upmu_set_bias_gen_en_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_BIAS_GEN_EN_SEL_MASK), (kal_uint32)(PMIC_BIAS_GEN_EN_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rtc_xosc32_enb_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RTC_XOSC32_ENB_SW_MASK), (kal_uint32)(PMIC_RTC_XOSC32_ENB_SW_SHIFT) ); pmic_unlock(); } void upmu_set_rtc_xosc32_enb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RTC_XOSC32_ENB_SEL_MASK), (kal_uint32)(PMIC_RTC_XOSC32_ENB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_strup_dig_io_pg_force(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_DIG_IO_PG_FORCE_MASK), (kal_uint32)(PMIC_STRUP_DIG_IO_PG_FORCE_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_PG_ENB_MASK), (kal_uint32)(PMIC_VPROC_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_PG_ENB_MASK), (kal_uint32)(PMIC_VSYS_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vm_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VM_PG_ENB_MASK), (kal_uint32)(PMIC_VM_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vio18_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VIO18_PG_ENB_MASK), (kal_uint32)(PMIC_VIO18_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vtcxo_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VTCXO_PG_ENB_MASK), (kal_uint32)(PMIC_VTCXO_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_va_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VA_PG_ENB_MASK), (kal_uint32)(PMIC_VA_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vio28_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VIO28_PG_ENB_MASK), (kal_uint32)(PMIC_VIO28_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vgp2_pg_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VGP2_PG_ENB_MASK), (kal_uint32)(PMIC_VGP2_PG_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_pg_h2l_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_PG_H2L_EN_MASK), (kal_uint32)(PMIC_VPROC_PG_H2L_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_pg_h2l_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_PG_H2L_EN_MASK), (kal_uint32)(PMIC_VSYS_PG_H2L_EN_SHIFT) ); pmic_unlock(); } void upmu_set_strup_con6_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_CON6_RSV0_MASK), (kal_uint32)(PMIC_STRUP_CON6_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_clr_just_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_CLR_JUST_RST_MASK), (kal_uint32)(PMIC_CLR_JUST_RST_SHIFT) ); pmic_unlock(); } void upmu_set_uvlo_l2h_deb_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_UVLO_L2H_DEB_EN_MASK), (kal_uint32)(PMIC_UVLO_L2H_DEB_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_just_pwrkey_rst(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(STRUP_CON8), (&val), (kal_uint32)(PMIC_JUST_PWRKEY_RST_MASK), (kal_uint32)(PMIC_JUST_PWRKEY_RST_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_osc_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(STRUP_CON8), (&val), (kal_uint32)(PMIC_QI_OSC_EN_MASK), (kal_uint32)(PMIC_QI_OSC_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_strup_ext_pmic_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_EXT_PMIC_EN_MASK), (kal_uint32)(PMIC_STRUP_EXT_PMIC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_strup_ext_pmic_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_EXT_PMIC_SEL_MASK), (kal_uint32)(PMIC_STRUP_EXT_PMIC_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_strup_con8_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_CON8_RSV0_MASK), (kal_uint32)(PMIC_STRUP_CON8_RSV0_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_ext_pmic_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(STRUP_CON9), (&val), (kal_uint32)(PMIC_QI_EXT_PMIC_EN_MASK), (kal_uint32)(PMIC_QI_EXT_PMIC_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_strup_auxadc_start_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_AUXADC_START_SW_MASK), (kal_uint32)(PMIC_STRUP_AUXADC_START_SW_SHIFT) ); pmic_unlock(); } void upmu_set_strup_auxadc_rstb_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_AUXADC_RSTB_SW_MASK), (kal_uint32)(PMIC_STRUP_AUXADC_RSTB_SW_SHIFT) ); pmic_unlock(); } void upmu_set_strup_auxadc_start_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_AUXADC_START_SEL_MASK), (kal_uint32)(PMIC_STRUP_AUXADC_START_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_strup_auxadc_rstb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_AUXADC_RSTB_SEL_MASK), (kal_uint32)(PMIC_STRUP_AUXADC_RSTB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_strup_pwroff_seq_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_PWROFF_SEQ_EN_MASK), (kal_uint32)(PMIC_STRUP_PWROFF_SEQ_EN_SHIFT) ); pmic_unlock(); } void upmu_set_strup_pwroff_preoff_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(STRUP_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_STRUP_PWROFF_PREOFF_EN_MASK), (kal_uint32)(PMIC_STRUP_PWROFF_PREOFF_EN_SHIFT) ); pmic_unlock(); } void upmu_set_spk_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_EN_L_MASK), (kal_uint32)(PMIC_SPK_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_spkmode_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_SPKMODE_L_MASK), (kal_uint32)(PMIC_SPKMODE_L_SHIFT) ); pmic_unlock(); } void upmu_set_spk_trim_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TRIM_EN_L_MASK), (kal_uint32)(PMIC_SPK_TRIM_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_spk_oc_shdn_dl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_OC_SHDN_DL_MASK), (kal_uint32)(PMIC_SPK_OC_SHDN_DL_SHIFT) ); pmic_unlock(); } void upmu_set_spk_ther_shdn_l_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_THER_SHDN_L_EN_MASK), (kal_uint32)(PMIC_SPK_THER_SHDN_L_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_gainl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_GAINL_MASK), (kal_uint32)(PMIC_RG_SPK_GAINL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_da_spk_offset_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON1), (&val), (kal_uint32)(PMIC_DA_SPK_OFFSET_L_MASK), (kal_uint32)(PMIC_DA_SPK_OFFSET_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_da_spk_lead_dglh_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON1), (&val), (kal_uint32)(PMIC_DA_SPK_LEAD_DGLH_L_MASK), (kal_uint32)(PMIC_DA_SPK_LEAD_DGLH_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_spk_lead_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON1), (&val), (kal_uint32)(PMIC_NI_SPK_LEAD_L_MASK), (kal_uint32)(PMIC_NI_SPK_LEAD_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_spk_offset_l_ov(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON1), (&val), (kal_uint32)(PMIC_SPK_OFFSET_L_OV_MASK), (kal_uint32)(PMIC_SPK_OFFSET_L_OV_SHIFT) ); pmic_unlock(); return val; } void upmu_set_spk_offset_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_OFFSET_L_SW_MASK), (kal_uint32)(PMIC_SPK_OFFSET_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_lead_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_LEAD_L_SW_MASK), (kal_uint32)(PMIC_SPK_LEAD_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_offset_l_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_OFFSET_L_MODE_MASK), (kal_uint32)(PMIC_SPK_OFFSET_L_MODE_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_spk_trim_done_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON1), (&val), (kal_uint32)(PMIC_SPK_TRIM_DONE_L_MASK), (kal_uint32)(PMIC_SPK_TRIM_DONE_L_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_spk_intg_rst_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_INTG_RST_L_MASK), (kal_uint32)(PMIC_RG_SPK_INTG_RST_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_force_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_FORCE_EN_L_MASK), (kal_uint32)(PMIC_RG_SPK_FORCE_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_slew_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_SLEW_L_MASK), (kal_uint32)(PMIC_RG_SPK_SLEW_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spkab_obias_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPKAB_OBIAS_L_MASK), (kal_uint32)(PMIC_RG_SPKAB_OBIAS_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spkrcv_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPKRCV_EN_L_MASK), (kal_uint32)(PMIC_RG_SPKRCV_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_drc_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_DRC_EN_L_MASK), (kal_uint32)(PMIC_RG_SPK_DRC_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_test_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_TEST_EN_L_MASK), (kal_uint32)(PMIC_RG_SPK_TEST_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spkab_oc_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPKAB_OC_EN_L_MASK), (kal_uint32)(PMIC_RG_SPKAB_OC_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_oc_en_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_OC_EN_L_MASK), (kal_uint32)(PMIC_RG_SPK_OC_EN_L_SHIFT) ); pmic_unlock(); } void upmu_set_spk_trim_wnd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TRIM_WND_MASK), (kal_uint32)(PMIC_SPK_TRIM_WND_SHIFT) ); pmic_unlock(); } void upmu_set_spk_trim_thd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TRIM_THD_MASK), (kal_uint32)(PMIC_SPK_TRIM_THD_SHIFT) ); pmic_unlock(); } void upmu_set_spk_oc_wnd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_OC_WND_MASK), (kal_uint32)(PMIC_SPK_OC_WND_SHIFT) ); pmic_unlock(); } void upmu_set_spk_oc_thd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_OC_THD_MASK), (kal_uint32)(PMIC_SPK_OC_THD_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_spk_d_oc_l_deg(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON6), (&val), (kal_uint32)(PMIC_SPK_D_OC_L_DEG_MASK), (kal_uint32)(PMIC_SPK_D_OC_L_DEG_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_spk_ab_oc_l_deg(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(SPK_CON6), (&val), (kal_uint32)(PMIC_SPK_AB_OC_L_DEG_MASK), (kal_uint32)(PMIC_SPK_AB_OC_L_DEG_SHIFT) ); pmic_unlock(); return val; } void upmu_set_spk_td1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TD1_MASK), (kal_uint32)(PMIC_SPK_TD1_SHIFT) ); pmic_unlock(); } void upmu_set_spk_td2(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TD2_MASK), (kal_uint32)(PMIC_SPK_TD2_SHIFT) ); pmic_unlock(); } void upmu_set_spk_td3(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TD3_MASK), (kal_uint32)(PMIC_SPK_TD3_SHIFT) ); pmic_unlock(); } void upmu_set_spk_trim_div(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TRIM_DIV_MASK), (kal_uint32)(PMIC_SPK_TRIM_DIV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_btl_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BTL_SET_MASK), (kal_uint32)(PMIC_RG_BTL_SET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_ibias_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_IBIAS_SEL_MASK), (kal_uint32)(PMIC_RG_SPK_IBIAS_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_ccode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_CCODE_MASK), (kal_uint32)(PMIC_RG_SPK_CCODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_en_view_vcm(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_EN_VIEW_VCM_MASK), (kal_uint32)(PMIC_RG_SPK_EN_VIEW_VCM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_en_view_clk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_EN_VIEW_CLK_MASK), (kal_uint32)(PMIC_RG_SPK_EN_VIEW_CLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_vcm_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_VCM_SEL_MASK), (kal_uint32)(PMIC_RG_SPK_VCM_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_vcm_ibsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_VCM_IBSEL_MASK), (kal_uint32)(PMIC_RG_SPK_VCM_IBSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_fbrc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_FBRC_EN_MASK), (kal_uint32)(PMIC_RG_SPK_FBRC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spkab_ovdrv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPKAB_OVDRV_MASK), (kal_uint32)(PMIC_RG_SPKAB_OVDRV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_octh_d(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_OCTH_D_MASK), (kal_uint32)(PMIC_RG_SPK_OCTH_D_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_RSV_MASK), (kal_uint32)(PMIC_RG_SPK_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spkpga_gain(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPKPGA_GAIN_MASK), (kal_uint32)(PMIC_RG_SPKPGA_GAIN_SHIFT) ); pmic_unlock(); } void upmu_set_spk_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_RSV0_MASK), (kal_uint32)(PMIC_SPK_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_spk_vcm_fast_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_VCM_FAST_EN_MASK), (kal_uint32)(PMIC_SPK_VCM_FAST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_spk_test_mode0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TEST_MODE0_MASK), (kal_uint32)(PMIC_SPK_TEST_MODE0_SHIFT) ); pmic_unlock(); } void upmu_set_spk_test_mode1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TEST_MODE1_MASK), (kal_uint32)(PMIC_SPK_TEST_MODE1_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_isense_refsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_ISENSE_REFSEL_MASK), (kal_uint32)(PMIC_RG_SPK_ISENSE_REFSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_isense_gainsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_ISENSE_GAINSEL_MASK), (kal_uint32)(PMIC_RG_SPK_ISENSE_GAINSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isense_pd_reset(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISENSE_PD_RESET_MASK), (kal_uint32)(PMIC_RG_ISENSE_PD_RESET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_isense_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_ISENSE_EN_MASK), (kal_uint32)(PMIC_RG_SPK_ISENSE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_isense_test_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_ISENSE_TEST_EN_MASK), (kal_uint32)(PMIC_RG_SPK_ISENSE_TEST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_spk_td_wait(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TD_WAIT_MASK), (kal_uint32)(PMIC_SPK_TD_WAIT_SHIFT) ); pmic_unlock(); } void upmu_set_spk_td_done(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TD_DONE_MASK), (kal_uint32)(PMIC_SPK_TD_DONE_SHIFT) ); pmic_unlock(); } void upmu_set_spk_en_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_EN_MODE_MASK), (kal_uint32)(PMIC_SPK_EN_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_spk_vcm_fast_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_VCM_FAST_SW_MASK), (kal_uint32)(PMIC_SPK_VCM_FAST_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_rst_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_RST_L_SW_MASK), (kal_uint32)(PMIC_SPK_RST_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spkmode_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPKMODE_L_SW_MASK), (kal_uint32)(PMIC_SPKMODE_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_depop_en_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_DEPOP_EN_L_SW_MASK), (kal_uint32)(PMIC_SPK_DEPOP_EN_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_en_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_EN_L_SW_MASK), (kal_uint32)(PMIC_SPK_EN_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_outstg_en_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_OUTSTG_EN_L_SW_MASK), (kal_uint32)(PMIC_SPK_OUTSTG_EN_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_trim_en_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TRIM_EN_L_SW_MASK), (kal_uint32)(PMIC_SPK_TRIM_EN_L_SW_SHIFT) ); pmic_unlock(); } void upmu_set_spk_trim_stop_l_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SPK_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_SPK_TRIM_STOP_L_SW_MASK), (kal_uint32)(PMIC_SPK_TRIM_STOP_L_SW_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_cid(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CID), (&val), (kal_uint32)(PMIC_CID_MASK), (kal_uint32)(PMIC_CID_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_clksq_en_aud(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CLKSQ_EN_AUD_MASK), (kal_uint32)(PMIC_RG_CLKSQ_EN_AUD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_clksq_en_aux(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CLKSQ_EN_AUX_MASK), (kal_uint32)(PMIC_RG_CLKSQ_EN_AUX_SHIFT) ); pmic_unlock(); } void upmu_set_rg_clksq_en_fqr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CLKSQ_EN_FQR_MASK), (kal_uint32)(PMIC_RG_CLKSQ_EN_FQR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_75k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_75K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_STRUP_75K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_32k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_32K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_STRUP_32K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc_75k_div4_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC_75K_DIV4_CK_PDN_MASK), (kal_uint32)(PMIC_RG_RTC_75K_DIV4_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc_75k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC_75K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_RTC_75K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc_32k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC_32K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_RTC_32K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_32k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_32K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_PCHR_32K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ldostb_1m_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LDOSTB_1M_CK_PDN_MASK), (kal_uint32)(PMIC_RG_LDOSTB_1M_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_intrp_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INTRP_CK_PDN_MASK), (kal_uint32)(PMIC_RG_INTRP_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_drv_32k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DRV_32K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_DRV_32K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_buck_1m_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BUCK_1M_CK_PDN_MASK), (kal_uint32)(PMIC_RG_BUCK_1M_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_buck_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BUCK_CK_PDN_MASK), (kal_uint32)(PMIC_RG_BUCK_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_buck_ana_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BUCK_ANA_CK_PDN_MASK), (kal_uint32)(PMIC_RG_BUCK_ANA_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_buck32k_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BUCK32K_PDN_MASK), (kal_uint32)(PMIC_RG_BUCK32K_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_6m_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_6M_PDN_MASK), (kal_uint32)(PMIC_RG_STRUP_6M_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_pwm_div_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_PWM_DIV_PDN_MASK), (kal_uint32)(PMIC_RG_SPK_PWM_DIV_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_div_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_DIV_PDN_MASK), (kal_uint32)(PMIC_RG_SPK_DIV_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_CK_PDN_MASK), (kal_uint32)(PMIC_RG_SPK_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pwmoc_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PWMOC_CK_PDN_MASK), (kal_uint32)(PMIC_RG_PWMOC_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fqmtr_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FQMTR_PDN_MASK), (kal_uint32)(PMIC_RG_FQMTR_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_drv_2m_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DRV_2M_CK_PDN_MASK), (kal_uint32)(PMIC_RG_DRV_2M_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_drv_1m_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DRV_1M_CK_PDN_MASK), (kal_uint32)(PMIC_RG_DRV_1M_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud_26m_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD_26M_PDN_MASK), (kal_uint32)(PMIC_RG_AUD_26M_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_accdet_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ACCDET_CK_PDN_MASK), (kal_uint32)(PMIC_RG_ACCDET_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc_mclk_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC_MCLK_PDN_MASK), (kal_uint32)(PMIC_RG_RTC_MCLK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smps_ck_div_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMPS_CK_DIV_PDN_MASK), (kal_uint32)(PMIC_RG_SMPS_CK_DIV_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_CK_PDN_MASK), (kal_uint32)(PMIC_RG_EFUSE_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc32k_1v8_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC32K_1V8_PDN_MASK), (kal_uint32)(PMIC_RG_RTC32K_1V8_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_clksq_en_aux_md(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CLKSQ_EN_AUX_MD_MASK), (kal_uint32)(PMIC_RG_CLKSQ_EN_AUX_MD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_sdm_ck_wake_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_WAKE_PDN_MASK), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_WAKE_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink0_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK0_CK_PDN_MASK), (kal_uint32)(PMIC_RG_ISINK0_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink1_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK1_CK_PDN_MASK), (kal_uint32)(PMIC_RG_ISINK1_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink2_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK2_CK_PDN_MASK), (kal_uint32)(PMIC_RG_ISINK2_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink3_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK3_CK_PDN_MASK), (kal_uint32)(PMIC_RG_ISINK3_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_sdm_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_PDN_MASK), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_ctl_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_CTL_CK_PDN_MASK), (kal_uint32)(PMIC_RG_AUXADC_CTL_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_32k_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_32K_CK_PDN_MASK), (kal_uint32)(PMIC_RG_AUXADC_32K_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud26m_div4_ck_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKPDN2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD26M_DIV4_CK_PDN_MASK), (kal_uint32)(PMIC_RG_AUD26M_DIV4_CK_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_man_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_MAN_RST_MASK), (kal_uint32)(PMIC_RG_EFUSE_MAN_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_RST_MASK), (kal_uint32)(PMIC_RG_AUXADC_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audio_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDIO_RST_MASK), (kal_uint32)(PMIC_RG_AUDIO_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_accdet_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ACCDET_RST_MASK), (kal_uint32)(PMIC_RG_ACCDET_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_RST_MASK), (kal_uint32)(PMIC_RG_SPK_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_driver_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DRIVER_RST_MASK), (kal_uint32)(PMIC_RG_DRIVER_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC_RST_MASK), (kal_uint32)(PMIC_RG_RTC_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fqmtr_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FQMTR_RST_MASK), (kal_uint32)(PMIC_RG_FQMTR_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_top_rst_con_rsv_15_9(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TOP_RST_CON_RSV_15_9_MASK), (kal_uint32)(PMIC_RG_TOP_RST_CON_RSV_15_9_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ap_rst_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AP_RST_DIS_MASK), (kal_uint32)(PMIC_RG_AP_RST_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_sysrstb_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SYSRSTB_EN_MASK), (kal_uint32)(PMIC_RG_SYSRSTB_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_man_rst_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_MAN_RST_EN_MASK), (kal_uint32)(PMIC_RG_STRUP_MAN_RST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_newldo_rstb_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_NEWLDO_RSTB_EN_MASK), (kal_uint32)(PMIC_RG_NEWLDO_RSTB_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rst_part_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RST_PART_SEL_MASK), (kal_uint32)(PMIC_RG_RST_PART_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_homekey_rst_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_HOMEKEY_RST_EN_MASK), (kal_uint32)(PMIC_RG_HOMEKEY_RST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pwrkey_rst_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PWRKEY_RST_EN_MASK), (kal_uint32)(PMIC_RG_PWRKEY_RST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pwrrst_tmr_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PWRRST_TMR_DIS_MASK), (kal_uint32)(PMIC_RG_PWRRST_TMR_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pwrkey_rst_td(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_RST_MISC), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PWRKEY_RST_TD_MASK), (kal_uint32)(PMIC_RG_PWRKEY_RST_TD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_srclken_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SRCLKEN_EN_MASK), (kal_uint32)(PMIC_RG_SRCLKEN_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_osc_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OSC_SEL_MASK), (kal_uint32)(PMIC_RG_OSC_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_sdm_sel_hw_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_SDM_SEL_HW_MODE_MASK), (kal_uint32)(PMIC_RG_AUXADC_SDM_SEL_HW_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_srclken_hw_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SRCLKEN_HW_MODE_MASK), (kal_uint32)(PMIC_RG_SRCLKEN_HW_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_osc_hw_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OSC_HW_MODE_MASK), (kal_uint32)(PMIC_RG_OSC_HW_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_osc_hw_src_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OSC_HW_SRC_SEL_MASK), (kal_uint32)(PMIC_RG_OSC_HW_SRC_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_sdm_ck_hw_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_HW_MODE_MASK), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_HW_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smps_autoff_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMPS_AUTOFF_DIS_MASK), (kal_uint32)(PMIC_RG_SMPS_AUTOFF_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_buck_1m_autoff_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BUCK_1M_AUTOFF_DIS_MASK), (kal_uint32)(PMIC_RG_BUCK_1M_AUTOFF_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_buck_ana_autoff_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BUCK_ANA_AUTOFF_DIS_MASK), (kal_uint32)(PMIC_RG_BUCK_ANA_AUTOFF_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_regck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_REGCK_SEL_MASK), (kal_uint32)(PMIC_RG_REGCK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_pwm_div_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_PWM_DIV_SEL_MASK), (kal_uint32)(PMIC_RG_SPK_PWM_DIV_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_div_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_DIV_SEL_MASK), (kal_uint32)(PMIC_RG_SPK_DIV_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fqmtr_cksel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FQMTR_CKSEL_MASK), (kal_uint32)(PMIC_RG_FQMTR_CKSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_accdet_cksel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ACCDET_CKSEL_MASK), (kal_uint32)(PMIC_RG_ACCDET_CKSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink0_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK0_CK_SEL_MASK), (kal_uint32)(PMIC_RG_ISINK0_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink1_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK1_CK_SEL_MASK), (kal_uint32)(PMIC_RG_ISINK1_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink2_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK2_CK_SEL_MASK), (kal_uint32)(PMIC_RG_ISINK2_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink3_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK3_CK_SEL_MASK), (kal_uint32)(PMIC_RG_ISINK3_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_sdm_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_SEL_MASK), (kal_uint32)(PMIC_RG_AUXADC_SDM_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audio_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKCON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDIO_CK_SEL_MASK), (kal_uint32)(PMIC_RG_AUDIO_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc32k_tst_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC32K_TST_DIS_MASK), (kal_uint32)(PMIC_RG_RTC32K_TST_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_tst_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_TST_DIS_MASK), (kal_uint32)(PMIC_RG_SPK_TST_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smps_tst_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMPS_TST_DIS_MASK), (kal_uint32)(PMIC_RG_SMPS_TST_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pmu75k_tst_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PMU75K_TST_DIS_MASK), (kal_uint32)(PMIC_RG_PMU75K_TST_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud26m_tst_dis(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD26M_TST_DIS_MASK), (kal_uint32)(PMIC_RG_AUD26M_TST_DIS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spk_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPK_TSTSEL_MASK), (kal_uint32)(PMIC_RG_SPK_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smps_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMPS_TSTSEL_MASK), (kal_uint32)(PMIC_RG_SMPS_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtc32k_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTC32K_TSTSEL_MASK), (kal_uint32)(PMIC_RG_RTC32K_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pmu75k_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PMU75K_TSTSEL_MASK), (kal_uint32)(PMIC_RG_PMU75K_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud26m_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD26M_TSTSEL_MASK), (kal_uint32)(PMIC_RG_AUD26M_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rtcdet_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RTCDET_TSTSEL_MASK), (kal_uint32)(PMIC_RG_RTCDET_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pwmoc_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PWMOC_TSTSEL_MASK), (kal_uint32)(PMIC_RG_PWMOC_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ldostb_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LDOSTB_TSTSEL_MASK), (kal_uint32)(PMIC_RG_LDOSTB_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK_TSTSEL_MASK), (kal_uint32)(PMIC_RG_ISINK_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fqmtr_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FQMTR_TSTSEL_MASK), (kal_uint32)(PMIC_RG_FQMTR_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_classd_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CLASSD_TSTSEL_MASK), (kal_uint32)(PMIC_RG_CLASSD_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_sdm_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_SDM_TSTSEL_MASK), (kal_uint32)(PMIC_RG_AUXADC_SDM_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud26m_div4_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD26M_DIV4_TSTSEL_MASK), (kal_uint32)(PMIC_RG_AUD26M_DIV4_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audif_tstsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDIF_TSTSEL_MASK), (kal_uint32)(PMIC_RG_AUDIF_TSTSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_test_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_TEST_CK_SEL_MASK), (kal_uint32)(PMIC_RG_BGR_TEST_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pchr_test_ck_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PCHR_TEST_CK_SEL_MASK), (kal_uint32)(PMIC_RG_PCHR_TEST_CK_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_strup_75k_26m_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STRUP_75K_26M_SEL_MASK), (kal_uint32)(PMIC_RG_STRUP_75K_26M_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_bgr_testmode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_BGR_TESTMODE_MASK), (kal_uint32)(PMIC_RG_BGR_TESTMODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_top_cktst2_rsv_15_8(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TOP_CKTST2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TOP_CKTST2_RSV_15_8_MASK), (kal_uint32)(PMIC_RG_TOP_CKTST2_RSV_15_8_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_test_out(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(TEST_OUT), (&val), (kal_uint32)(PMIC_TEST_OUT_MASK), (kal_uint32)(PMIC_TEST_OUT_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_mon_flag_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_MON_FLAG_SEL_MASK), (kal_uint32)(PMIC_RG_MON_FLAG_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_mon_grp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_MON_GRP_SEL_MASK), (kal_uint32)(PMIC_RG_MON_GRP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_driver(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_DRIVER_MASK), (kal_uint32)(PMIC_RG_TEST_DRIVER_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_classd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_CLASSD_MASK), (kal_uint32)(PMIC_RG_TEST_CLASSD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_aud(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_AUD_MASK), (kal_uint32)(PMIC_RG_TEST_AUD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_auxadc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_AUXADC_MASK), (kal_uint32)(PMIC_RG_TEST_AUXADC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_nandtree_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_NANDTREE_MODE_MASK), (kal_uint32)(PMIC_RG_NANDTREE_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_MODE_MASK), (kal_uint32)(PMIC_RG_EFUSE_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_strup(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_STRUP_MASK), (kal_uint32)(PMIC_RG_TEST_STRUP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_spk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_SPK_MASK), (kal_uint32)(PMIC_RG_TEST_SPK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_test_spk_pwm(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TEST_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TEST_SPK_PWM_MASK), (kal_uint32)(PMIC_RG_TEST_SPK_PWM_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_en_status_vproc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VPROC_MASK), (kal_uint32)(PMIC_EN_STATUS_VPROC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vsys(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VSYS_MASK), (kal_uint32)(PMIC_EN_STATUS_VSYS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vpa(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VPA_MASK), (kal_uint32)(PMIC_EN_STATUS_VPA_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vrtc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VRTC_MASK), (kal_uint32)(PMIC_EN_STATUS_VRTC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_va(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VA_MASK), (kal_uint32)(PMIC_EN_STATUS_VA_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcama(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCAMA_MASK), (kal_uint32)(PMIC_EN_STATUS_VCAMA_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcamd(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCAMD_MASK), (kal_uint32)(PMIC_EN_STATUS_VCAMD_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcam_af(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCAM_AF_MASK), (kal_uint32)(PMIC_EN_STATUS_VCAM_AF_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcam_io(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCAM_IO_MASK), (kal_uint32)(PMIC_EN_STATUS_VCAM_IO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcn28(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCN28_MASK), (kal_uint32)(PMIC_EN_STATUS_VCN28_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcn33(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCN33_MASK), (kal_uint32)(PMIC_EN_STATUS_VCN33_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vcn_1v8(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VCN_1V8_MASK), (kal_uint32)(PMIC_EN_STATUS_VCN_1V8_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vemc_3v3(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VEMC_3V3_MASK), (kal_uint32)(PMIC_EN_STATUS_VEMC_3V3_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vgp1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VGP1_MASK), (kal_uint32)(PMIC_EN_STATUS_VGP1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vgp2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VGP2_MASK), (kal_uint32)(PMIC_EN_STATUS_VGP2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vgp3(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS0), (&val), (kal_uint32)(PMIC_EN_STATUS_VGP3_MASK), (kal_uint32)(PMIC_EN_STATUS_VGP3_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vibr(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VIBR_MASK), (kal_uint32)(PMIC_EN_STATUS_VIBR_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vio18(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VIO18_MASK), (kal_uint32)(PMIC_EN_STATUS_VIO18_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vio28(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VIO28_MASK), (kal_uint32)(PMIC_EN_STATUS_VIO28_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vm(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VM_MASK), (kal_uint32)(PMIC_EN_STATUS_VM_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vmc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VMC_MASK), (kal_uint32)(PMIC_EN_STATUS_VMC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vmch(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VMCH_MASK), (kal_uint32)(PMIC_EN_STATUS_VMCH_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vrf18(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VRF18_MASK), (kal_uint32)(PMIC_EN_STATUS_VRF18_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vsim1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VSIM1_MASK), (kal_uint32)(PMIC_EN_STATUS_VSIM1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vsim2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VSIM2_MASK), (kal_uint32)(PMIC_EN_STATUS_VSIM2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vtcxo(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VTCXO_MASK), (kal_uint32)(PMIC_EN_STATUS_VTCXO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_en_status_vusb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EN_STATUS1), (&val), (kal_uint32)(PMIC_EN_STATUS_VUSB_MASK), (kal_uint32)(PMIC_EN_STATUS_VUSB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vproc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VPROC_MASK), (kal_uint32)(PMIC_OC_STATUS_VPROC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vsys(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VSYS_MASK), (kal_uint32)(PMIC_OC_STATUS_VSYS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vpa(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VPA_MASK), (kal_uint32)(PMIC_OC_STATUS_VPA_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_va(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VA_MASK), (kal_uint32)(PMIC_OC_STATUS_VA_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcama(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCAMA_MASK), (kal_uint32)(PMIC_OC_STATUS_VCAMA_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcamd(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCAMD_MASK), (kal_uint32)(PMIC_OC_STATUS_VCAMD_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcam_af(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCAM_AF_MASK), (kal_uint32)(PMIC_OC_STATUS_VCAM_AF_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcam_io(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCAM_IO_MASK), (kal_uint32)(PMIC_OC_STATUS_VCAM_IO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcn28(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCN28_MASK), (kal_uint32)(PMIC_OC_STATUS_VCN28_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcn33(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCN33_MASK), (kal_uint32)(PMIC_OC_STATUS_VCN33_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vcn_1v8(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VCN_1V8_MASK), (kal_uint32)(PMIC_OC_STATUS_VCN_1V8_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vemc_3v3(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VEMC_3V3_MASK), (kal_uint32)(PMIC_OC_STATUS_VEMC_3V3_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vgp1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VGP1_MASK), (kal_uint32)(PMIC_OC_STATUS_VGP1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vgp2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VGP2_MASK), (kal_uint32)(PMIC_OC_STATUS_VGP2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vgp3(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS0), (&val), (kal_uint32)(PMIC_OC_STATUS_VGP3_MASK), (kal_uint32)(PMIC_OC_STATUS_VGP3_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vibr(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VIBR_MASK), (kal_uint32)(PMIC_OC_STATUS_VIBR_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vio18(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VIO18_MASK), (kal_uint32)(PMIC_OC_STATUS_VIO18_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vio28(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VIO28_MASK), (kal_uint32)(PMIC_OC_STATUS_VIO28_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vm(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VM_MASK), (kal_uint32)(PMIC_OC_STATUS_VM_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vmc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VMC_MASK), (kal_uint32)(PMIC_OC_STATUS_VMC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vmch(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VMCH_MASK), (kal_uint32)(PMIC_OC_STATUS_VMCH_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vrf18(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VRF18_MASK), (kal_uint32)(PMIC_OC_STATUS_VRF18_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vsim1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VSIM1_MASK), (kal_uint32)(PMIC_OC_STATUS_VSIM1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vsim2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VSIM2_MASK), (kal_uint32)(PMIC_OC_STATUS_VSIM2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vtcxo(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VTCXO_MASK), (kal_uint32)(PMIC_OC_STATUS_VTCXO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_oc_status_vusb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_OC_STATUS_VUSB_MASK), (kal_uint32)(PMIC_OC_STATUS_VUSB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_spk_oc_det_d_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_NI_SPK_OC_DET_D_L_MASK), (kal_uint32)(PMIC_NI_SPK_OC_DET_D_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_spk_oc_det_ab_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(OCSTATUS1), (&val), (kal_uint32)(PMIC_NI_SPK_OC_DET_AB_L_MASK), (kal_uint32)(PMIC_NI_SPK_OC_DET_AB_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vproc_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VPROC_PG_DEB_MASK), (kal_uint32)(PMIC_VPROC_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vsys_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VSYS_PG_DEB_MASK), (kal_uint32)(PMIC_VSYS_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vm_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VM_PG_DEB_MASK), (kal_uint32)(PMIC_VM_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vio18_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VIO18_PG_DEB_MASK), (kal_uint32)(PMIC_VIO18_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vtcxo_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VTCXO_PG_DEB_MASK), (kal_uint32)(PMIC_VTCXO_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_va_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VA_PG_DEB_MASK), (kal_uint32)(PMIC_VA_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vio28_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VIO28_PG_DEB_MASK), (kal_uint32)(PMIC_VIO28_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vgp2_pg_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(PGSTATUS), (&val), (kal_uint32)(PMIC_VGP2_PG_DEB_MASK), (kal_uint32)(PMIC_VGP2_PG_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_pmu_test_mode_scan(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_PMU_TEST_MODE_SCAN_MASK), (kal_uint32)(PMIC_PMU_TEST_MODE_SCAN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_pwrkey_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_PWRKEY_DEB_MASK), (kal_uint32)(PMIC_PWRKEY_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_fchrkey_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_FCHRKEY_DEB_MASK), (kal_uint32)(PMIC_FCHRKEY_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_vbat_ov(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_VBAT_OV_MASK), (kal_uint32)(PMIC_VBAT_OV_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_pchr_chrdet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_PCHR_CHRDET_MASK), (kal_uint32)(PMIC_PCHR_CHRDET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ro_baton_undet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_RO_BATON_UNDET_MASK), (kal_uint32)(PMIC_RO_BATON_UNDET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rtc_xtal_det_done(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_RTC_XTAL_DET_DONE_MASK), (kal_uint32)(PMIC_RTC_XTAL_DET_DONE_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_xosc32_enb_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(CHRSTATUS), (&val), (kal_uint32)(PMIC_XOSC32_ENB_DET_MASK), (kal_uint32)(PMIC_XOSC32_ENB_DET_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rtc_xtal_det_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(CHRSTATUS), (kal_uint32)(val), (kal_uint32)(PMIC_RTC_XTAL_DET_RSV_MASK), (kal_uint32)(PMIC_RTC_XTAL_DET_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simap_tdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMAP_TDSEL_MASK), (kal_uint32)(PMIC_RG_SIMAP_TDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud_tdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD_TDSEL_MASK), (kal_uint32)(PMIC_RG_AUD_TDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spi_tdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPI_TDSEL_MASK), (kal_uint32)(PMIC_RG_SPI_TDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pmu_tdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PMU_TDSEL_MASK), (kal_uint32)(PMIC_RG_PMU_TDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simls_tdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(TDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMLS_TDSEL_MASK), (kal_uint32)(PMIC_RG_SIMLS_TDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simap_rdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMAP_RDSEL_MASK), (kal_uint32)(PMIC_RG_SIMAP_RDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud_rdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD_RDSEL_MASK), (kal_uint32)(PMIC_RG_AUD_RDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_spi_rdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPI_RDSEL_MASK), (kal_uint32)(PMIC_RG_SPI_RDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pmu_rdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PMU_RDSEL_MASK), (kal_uint32)(PMIC_RG_PMU_RDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simls_rdsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RDSEL_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMLS_RDSEL_MASK), (kal_uint32)(PMIC_RG_SIMLS_RDSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_sysrstb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SYSRSTB_MASK), (kal_uint32)(PMIC_RG_SMT_SYSRSTB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_int(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_INT_MASK), (kal_uint32)(PMIC_RG_SMT_INT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_srclken(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SRCLKEN_MASK), (kal_uint32)(PMIC_RG_SMT_SRCLKEN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_rtc_32k1v8(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_RTC_32K1V8_MASK), (kal_uint32)(PMIC_RG_SMT_RTC_32K1V8_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_spi_clk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SPI_CLK_MASK), (kal_uint32)(PMIC_RG_SMT_SPI_CLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_spi_csn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SPI_CSN_MASK), (kal_uint32)(PMIC_RG_SMT_SPI_CSN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_spi_mosi(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SPI_MOSI_MASK), (kal_uint32)(PMIC_RG_SMT_SPI_MOSI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_spi_miso(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SPI_MISO_MASK), (kal_uint32)(PMIC_RG_SMT_SPI_MISO_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_aud_clk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_AUD_CLK_MASK), (kal_uint32)(PMIC_RG_SMT_AUD_CLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_aud_mosi(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_AUD_MOSI_MASK), (kal_uint32)(PMIC_RG_SMT_AUD_MOSI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_aud_miso(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_AUD_MISO_MASK), (kal_uint32)(PMIC_RG_SMT_AUD_MISO_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_sim1_ap_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIM1_AP_SCLK_MASK), (kal_uint32)(PMIC_RG_SMT_SIM1_AP_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_sim1_ap_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIM1_AP_SRST_MASK), (kal_uint32)(PMIC_RG_SMT_SIM1_AP_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_simls1_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIMLS1_SCLK_MASK), (kal_uint32)(PMIC_RG_SMT_SIMLS1_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_simls1_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIMLS1_SRST_MASK), (kal_uint32)(PMIC_RG_SMT_SIMLS1_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_sim2_ap_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIM2_AP_SCLK_MASK), (kal_uint32)(PMIC_RG_SMT_SIM2_AP_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_sim2_ap_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIM2_AP_SRST_MASK), (kal_uint32)(PMIC_RG_SMT_SIM2_AP_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_simls2_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIMLS2_SCLK_MASK), (kal_uint32)(PMIC_RG_SMT_SIMLS2_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smt_simls2_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SMT_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMT_SIMLS2_SRST_MASK), (kal_uint32)(PMIC_RG_SMT_SIMLS2_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_int(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_INT_MASK), (kal_uint32)(PMIC_RG_OCTL_INT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_srclken(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SRCLKEN_MASK), (kal_uint32)(PMIC_RG_OCTL_SRCLKEN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_rtc_32k1v8(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_RTC_32K1V8_MASK), (kal_uint32)(PMIC_RG_OCTL_RTC_32K1V8_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_spi_clk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SPI_CLK_MASK), (kal_uint32)(PMIC_RG_OCTL_SPI_CLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_spi_csn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SPI_CSN_MASK), (kal_uint32)(PMIC_RG_OCTL_SPI_CSN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_spi_mosi(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SPI_MOSI_MASK), (kal_uint32)(PMIC_RG_OCTL_SPI_MOSI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_spi_miso(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SPI_MISO_MASK), (kal_uint32)(PMIC_RG_OCTL_SPI_MISO_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_aud_clk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_AUD_CLK_MASK), (kal_uint32)(PMIC_RG_OCTL_AUD_CLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_aud_mosi(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_AUD_MOSI_MASK), (kal_uint32)(PMIC_RG_OCTL_AUD_MOSI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_aud_miso(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_AUD_MISO_MASK), (kal_uint32)(PMIC_RG_OCTL_AUD_MISO_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_sim1_ap_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIM1_AP_SCLK_MASK), (kal_uint32)(PMIC_RG_OCTL_SIM1_AP_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_sim1_ap_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIM1_AP_SRST_MASK), (kal_uint32)(PMIC_RG_OCTL_SIM1_AP_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_simls1_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIMLS1_SCLK_MASK), (kal_uint32)(PMIC_RG_OCTL_SIMLS1_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_simls1_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIMLS1_SRST_MASK), (kal_uint32)(PMIC_RG_OCTL_SIMLS1_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_sim2_ap_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIM2_AP_SCLK_MASK), (kal_uint32)(PMIC_RG_OCTL_SIM2_AP_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_sim2_ap_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIM2_AP_SRST_MASK), (kal_uint32)(PMIC_RG_OCTL_SIM2_AP_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_simls2_sclk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIMLS2_SCLK_MASK), (kal_uint32)(PMIC_RG_OCTL_SIMLS2_SCLK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_octl_simls2_srst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DRV_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OCTL_SIMLS2_SRST_MASK), (kal_uint32)(PMIC_RG_OCTL_SIMLS2_SRST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simls1_sclk_conf(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SIMLS1_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMLS1_SCLK_CONF_MASK), (kal_uint32)(PMIC_RG_SIMLS1_SCLK_CONF_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simls1_srst_conf(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SIMLS1_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMLS1_SRST_CONF_MASK), (kal_uint32)(PMIC_RG_SIMLS1_SRST_CONF_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simls2_sclk_conf(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SIMLS2_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMLS2_SCLK_CONF_MASK), (kal_uint32)(PMIC_RG_SIMLS2_SCLK_CONF_SHIFT) ); pmic_unlock(); } void upmu_set_rg_simls2_srst_conf(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(SIMLS2_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SIMLS2_SRST_CONF_MASK), (kal_uint32)(PMIC_RG_SIMLS2_SRST_CONF_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_spkl_ab(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_SPKL_AB_MASK), (kal_uint32)(PMIC_RG_INT_EN_SPKL_AB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_spkl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_SPKL_MASK), (kal_uint32)(PMIC_RG_INT_EN_SPKL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_bat_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_BAT_L_MASK), (kal_uint32)(PMIC_RG_INT_EN_BAT_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_bat_h(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_BAT_H_MASK), (kal_uint32)(PMIC_RG_INT_EN_BAT_H_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_watchdog(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_WATCHDOG_MASK), (kal_uint32)(PMIC_RG_INT_EN_WATCHDOG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_pwrkey(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_PWRKEY_MASK), (kal_uint32)(PMIC_RG_INT_EN_PWRKEY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_thr_l(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_THR_L_MASK), (kal_uint32)(PMIC_RG_INT_EN_THR_L_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_thr_h(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_THR_H_MASK), (kal_uint32)(PMIC_RG_INT_EN_THR_H_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_vbaton_undet(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_VBATON_UNDET_MASK), (kal_uint32)(PMIC_RG_INT_EN_VBATON_UNDET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_bvalid_det(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_BVALID_DET_MASK), (kal_uint32)(PMIC_RG_INT_EN_BVALID_DET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_chrdet(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_CHRDET_MASK), (kal_uint32)(PMIC_RG_INT_EN_CHRDET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_ov(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_OV_MASK), (kal_uint32)(PMIC_RG_INT_EN_OV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_ldo(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_LDO_MASK), (kal_uint32)(PMIC_RG_INT_EN_LDO_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_fchrkey(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_FCHRKEY_MASK), (kal_uint32)(PMIC_RG_INT_EN_FCHRKEY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_accdet(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_ACCDET_MASK), (kal_uint32)(PMIC_RG_INT_EN_ACCDET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_audio(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_AUDIO_MASK), (kal_uint32)(PMIC_RG_INT_EN_AUDIO_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_rtc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_RTC_MASK), (kal_uint32)(PMIC_RG_INT_EN_RTC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_vproc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_VPROC_MASK), (kal_uint32)(PMIC_RG_INT_EN_VPROC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_vsys(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_VSYS_MASK), (kal_uint32)(PMIC_RG_INT_EN_VSYS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_int_en_vpa(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_INT_EN_VPA_MASK), (kal_uint32)(PMIC_RG_INT_EN_VPA_SHIFT) ); pmic_unlock(); } void upmu_set_polarity(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_MISC_CON), (kal_uint32)(val), (kal_uint32)(PMIC_POLARITY_MASK), (kal_uint32)(PMIC_POLARITY_SHIFT) ); pmic_unlock(); } void upmu_set_polarity_vbaton_undet(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_MISC_CON), (kal_uint32)(val), (kal_uint32)(PMIC_POLARITY_VBATON_UNDET_MASK), (kal_uint32)(PMIC_POLARITY_VBATON_UNDET_SHIFT) ); pmic_unlock(); } void upmu_set_polarity_bvalid_det(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_MISC_CON), (kal_uint32)(val), (kal_uint32)(PMIC_POLARITY_BVALID_DET_MASK), (kal_uint32)(PMIC_POLARITY_BVALID_DET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_fchrkey_int_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_MISC_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_FCHRKEY_INT_SEL_MASK), (kal_uint32)(PMIC_RG_FCHRKEY_INT_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_pwrkey_int_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_MISC_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_PWRKEY_INT_SEL_MASK), (kal_uint32)(PMIC_RG_PWRKEY_INT_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_ivgen_ext_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(INT_MISC_CON), (kal_uint32)(val), (kal_uint32)(PMIC_IVGEN_EXT_EN_MASK), (kal_uint32)(PMIC_IVGEN_EXT_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rg_int_status_spkl_ab(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_SPKL_AB_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_SPKL_AB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_spkl(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_SPKL_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_SPKL_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_bat_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_BAT_L_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_BAT_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_bat_h(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_BAT_H_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_BAT_H_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_watchdog(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_WATCHDOG_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_WATCHDOG_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_pwrkey(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_PWRKEY_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_PWRKEY_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_thr_l(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_THR_L_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_THR_L_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_thr_h(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_THR_H_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_THR_H_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_vbaton_undet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_VBATON_UNDET_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_VBATON_UNDET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_bvalid_det(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_BVALID_DET_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_BVALID_DET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_chrdet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_CHRDET_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_CHRDET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_ov(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS0), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_OV_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_OV_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_ldo(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_LDO_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_LDO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_fchrkey(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_FCHRKEY_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_FCHRKEY_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_accdet(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_ACCDET_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_ACCDET_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_audio(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_AUDIO_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_AUDIO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_rtc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_RTC_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_RTC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_vproc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_VPROC_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_VPROC_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_vsys(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_VSYS_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_VSYS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_int_status_vpa(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(INT_STATUS1), (&val), (kal_uint32)(PMIC_RG_INT_STATUS_VPA_MASK), (kal_uint32)(PMIC_RG_INT_STATUS_VPA_SHIFT) ); pmic_unlock(); return val; } void upmu_set_oc_gear_bvalid_det(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_GEAR_0), (kal_uint32)(val), (kal_uint32)(PMIC_OC_GEAR_BVALID_DET_MASK), (kal_uint32)(PMIC_OC_GEAR_BVALID_DET_SHIFT) ); pmic_unlock(); } void upmu_set_oc_gear_vbaton_undet(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_GEAR_1), (kal_uint32)(val), (kal_uint32)(PMIC_OC_GEAR_VBATON_UNDET_MASK), (kal_uint32)(PMIC_OC_GEAR_VBATON_UNDET_SHIFT) ); pmic_unlock(); } void upmu_set_oc_gear_ldo(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_GEAR_2), (kal_uint32)(val), (kal_uint32)(PMIC_OC_GEAR_LDO_MASK), (kal_uint32)(PMIC_OC_GEAR_LDO_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_oc_thd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VPROC), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_OC_THD_MASK), (kal_uint32)(PMIC_VPROC_OC_THD_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_oc_wnd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VPROC), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_OC_WND_MASK), (kal_uint32)(PMIC_VPROC_OC_WND_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_deg_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VPROC), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DEG_EN_MASK), (kal_uint32)(PMIC_VPROC_DEG_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_oc_thd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VSYS), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_OC_THD_MASK), (kal_uint32)(PMIC_VSYS_OC_THD_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_oc_wnd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VSYS), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_OC_WND_MASK), (kal_uint32)(PMIC_VSYS_OC_WND_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_deg_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VSYS), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DEG_EN_MASK), (kal_uint32)(PMIC_VSYS_DEG_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_oc_thd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VPA), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_OC_THD_MASK), (kal_uint32)(PMIC_VPA_OC_THD_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_oc_wnd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VPA), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_OC_WND_MASK), (kal_uint32)(PMIC_VPA_OC_WND_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_deg_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(OC_CTL_VPA), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_DEG_EN_MASK), (kal_uint32)(PMIC_VPA_DEG_EN_SHIFT) ); pmic_unlock(); } void upmu_set_fqmtr_tcksel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(FQMTR_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_FQMTR_TCKSEL_MASK), (kal_uint32)(PMIC_FQMTR_TCKSEL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_fqmtr_busy(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(FQMTR_CON0), (&val), (kal_uint32)(PMIC_FQMTR_BUSY_MASK), (kal_uint32)(PMIC_FQMTR_BUSY_SHIFT) ); pmic_unlock(); return val; } void upmu_set_fqmtr_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(FQMTR_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_FQMTR_EN_MASK), (kal_uint32)(PMIC_FQMTR_EN_SHIFT) ); pmic_unlock(); } void upmu_set_fqmtr_winset(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(FQMTR_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_FQMTR_WINSET_MASK), (kal_uint32)(PMIC_FQMTR_WINSET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_fqmtr_data(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(FQMTR_CON2), (&val), (kal_uint32)(PMIC_FQMTR_DATA_MASK), (kal_uint32)(PMIC_FQMTR_DATA_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_spi_con(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RG_SPI_CON), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SPI_CON_MASK), (kal_uint32)(PMIC_RG_SPI_CON_SHIFT) ); pmic_unlock(); } void upmu_set_dew_dio_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_DIO_EN), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_DIO_EN_MASK), (kal_uint32)(PMIC_DEW_DIO_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_dew_read_test(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DEW_READ_TEST), (&val), (kal_uint32)(PMIC_DEW_READ_TEST_MASK), (kal_uint32)(PMIC_DEW_READ_TEST_SHIFT) ); pmic_unlock(); return val; } void upmu_set_dew_write_test(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_WRITE_TEST), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_WRITE_TEST_MASK), (kal_uint32)(PMIC_DEW_WRITE_TEST_SHIFT) ); pmic_unlock(); } void upmu_set_dew_crc_swrst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CRC_SWRST), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CRC_SWRST_MASK), (kal_uint32)(PMIC_DEW_CRC_SWRST_SHIFT) ); pmic_unlock(); } void upmu_set_dew_crc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CRC_EN), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CRC_EN_MASK), (kal_uint32)(PMIC_DEW_CRC_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_dew_crc_val(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DEW_CRC_VAL), (&val), (kal_uint32)(PMIC_DEW_CRC_VAL_MASK), (kal_uint32)(PMIC_DEW_CRC_VAL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_dew_dbg_mon_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_DBG_MON_SEL), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_DBG_MON_SEL_MASK), (kal_uint32)(PMIC_DEW_DBG_MON_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_dew_cipher_key_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CIPHER_KEY_SEL), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CIPHER_KEY_SEL_MASK), (kal_uint32)(PMIC_DEW_CIPHER_KEY_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_dew_cipher_iv_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CIPHER_IV_SEL), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CIPHER_IV_SEL_MASK), (kal_uint32)(PMIC_DEW_CIPHER_IV_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_dew_cipher_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CIPHER_EN), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CIPHER_EN_MASK), (kal_uint32)(PMIC_DEW_CIPHER_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_dew_cipher_rdy(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DEW_CIPHER_RDY), (&val), (kal_uint32)(PMIC_DEW_CIPHER_RDY_MASK), (kal_uint32)(PMIC_DEW_CIPHER_RDY_SHIFT) ); pmic_unlock(); return val; } void upmu_set_dew_cipher_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CIPHER_MODE), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CIPHER_MODE_MASK), (kal_uint32)(PMIC_DEW_CIPHER_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_dew_cipher_swrst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_CIPHER_SWRST), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_CIPHER_SWRST_MASK), (kal_uint32)(PMIC_DEW_CIPHER_SWRST_SHIFT) ); pmic_unlock(); } void upmu_set_dew_rddmy_no(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_RDDMY_NO), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_RDDMY_NO_MASK), (kal_uint32)(PMIC_DEW_RDDMY_NO_SHIFT) ); pmic_unlock(); } void upmu_set_dew_rdata_dly_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DEW_RDATA_DLY_SEL), (kal_uint32)(val), (kal_uint32)(PMIC_DEW_RDATA_DLY_SEL_MASK), (kal_uint32)(PMIC_DEW_RDATA_DLY_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_smps_testmode_b(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SMPS_TESTMODE_B_MASK), (kal_uint32)(PMIC_RG_SMPS_TESTMODE_B_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vproc_dig_mon(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(BUCK_CON1), (&val), (kal_uint32)(PMIC_QI_VPROC_DIG_MON_MASK), (kal_uint32)(PMIC_QI_VPROC_DIG_MON_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vsys_dig_mon(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(BUCK_CON1), (&val), (kal_uint32)(PMIC_QI_VSYS_DIG_MON_MASK), (kal_uint32)(PMIC_QI_VSYS_DIG_MON_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsleep_src0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_VSLEEP_SRC0_MASK), (kal_uint32)(PMIC_VSLEEP_SRC0_SHIFT) ); pmic_unlock(); } void upmu_set_vsleep_src1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_VSLEEP_SRC1_MASK), (kal_uint32)(PMIC_VSLEEP_SRC1_SHIFT) ); pmic_unlock(); } void upmu_set_r2r_src0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_R2R_SRC0_MASK), (kal_uint32)(PMIC_R2R_SRC0_SHIFT) ); pmic_unlock(); } void upmu_set_r2r_src1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_R2R_SRC1_MASK), (kal_uint32)(PMIC_R2R_SRC1_SHIFT) ); pmic_unlock(); } void upmu_set_buck_osc_sel_src0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_BUCK_OSC_SEL_SRC0_MASK), (kal_uint32)(PMIC_BUCK_OSC_SEL_SRC0_SHIFT) ); pmic_unlock(); } void upmu_set_srclken_dly_src1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_SRCLKEN_DLY_SRC1_MASK), (kal_uint32)(PMIC_SRCLKEN_DLY_SRC1_SHIFT) ); pmic_unlock(); } void upmu_set_buck_con5_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_BUCK_CON5_RSV0_MASK), (kal_uint32)(PMIC_BUCK_CON5_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_triml(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_TRIML_MASK), (kal_uint32)(PMIC_RG_VPROC_TRIML_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_trimh(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_TRIMH_MASK), (kal_uint32)(PMIC_RG_VPROC_TRIMH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_csm(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_CSM_MASK), (kal_uint32)(PMIC_RG_VPROC_CSM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_zxos_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_ZXOS_TRIM_MASK), (kal_uint32)(PMIC_RG_VPROC_ZXOS_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_rzsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_RZSEL_MASK), (kal_uint32)(PMIC_RG_VPROC_RZSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_cc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_CC_MASK), (kal_uint32)(PMIC_RG_VPROC_CC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_csr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_CSR_MASK), (kal_uint32)(PMIC_RG_VPROC_CSR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_csl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_CSL_MASK), (kal_uint32)(PMIC_RG_VPROC_CSL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_zx_os(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_ZX_OS_MASK), (kal_uint32)(PMIC_RG_VPROC_ZX_OS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_avp_os(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_AVP_OS_MASK), (kal_uint32)(PMIC_RG_VPROC_AVP_OS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_avp_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_AVP_EN_MASK), (kal_uint32)(PMIC_RG_VPROC_AVP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_modeset(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_MODESET_MASK), (kal_uint32)(PMIC_RG_VPROC_MODESET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VPROC_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_slp(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_SLP_MASK), (kal_uint32)(PMIC_RG_VPROC_SLP_SHIFT) ); pmic_unlock(); } void upmu_set_qi_vproc_vsleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_QI_VPROC_VSLEEP_MASK), (kal_uint32)(PMIC_QI_VPROC_VSLEEP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vproc_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPROC_RSV_MASK), (kal_uint32)(PMIC_RG_VPROC_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_en_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_EN_CTRL_MASK), (kal_uint32)(PMIC_VPROC_EN_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vosel_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VOSEL_CTRL_MASK), (kal_uint32)(PMIC_VPROC_VOSEL_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_dlc_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_CTRL_MASK), (kal_uint32)(PMIC_VPROC_DLC_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_burst_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_BURST_CTRL_MASK), (kal_uint32)(PMIC_VPROC_BURST_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_EN_MASK), (kal_uint32)(PMIC_VPROC_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vproc_stb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON7), (&val), (kal_uint32)(PMIC_QI_VPROC_STB_MASK), (kal_uint32)(PMIC_QI_VPROC_STB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vproc_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON7), (&val), (kal_uint32)(PMIC_QI_VPROC_EN_MASK), (kal_uint32)(PMIC_QI_VPROC_EN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vproc_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON7), (&val), (kal_uint32)(PMIC_QI_VPROC_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VPROC_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vproc_sfchg_frate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_SFCHG_FRATE_MASK), (kal_uint32)(PMIC_VPROC_SFCHG_FRATE_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_sfchg_fen(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_SFCHG_FEN_MASK), (kal_uint32)(PMIC_VPROC_SFCHG_FEN_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_sfchg_rrate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_SFCHG_RRATE_MASK), (kal_uint32)(PMIC_VPROC_SFCHG_RRATE_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_sfchg_ren(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_SFCHG_REN_MASK), (kal_uint32)(PMIC_VPROC_SFCHG_REN_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VOSEL_MASK), (kal_uint32)(PMIC_VPROC_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vosel_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VOSEL_ON_MASK), (kal_uint32)(PMIC_VPROC_VOSEL_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vosel_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VOSEL_SLEEP_MASK), (kal_uint32)(PMIC_VPROC_VOSEL_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vproc_vosel(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON12), (&val), (kal_uint32)(PMIC_NI_VPROC_VOSEL_MASK), (kal_uint32)(PMIC_NI_VPROC_VOSEL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vproc_burst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_BURST_MASK), (kal_uint32)(PMIC_VPROC_BURST_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_burst_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_BURST_ON_MASK), (kal_uint32)(PMIC_VPROC_BURST_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_burst_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_BURST_SLEEP_MASK), (kal_uint32)(PMIC_VPROC_BURST_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vproc_burst(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON13), (&val), (kal_uint32)(PMIC_QI_VPROC_BURST_MASK), (kal_uint32)(PMIC_QI_VPROC_BURST_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vproc_dlc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_MASK), (kal_uint32)(PMIC_VPROC_DLC_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_dlc_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_ON_MASK), (kal_uint32)(PMIC_VPROC_DLC_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_dlc_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_SLEEP_MASK), (kal_uint32)(PMIC_VPROC_DLC_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vproc_dlc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON14), (&val), (kal_uint32)(PMIC_QI_VPROC_DLC_MASK), (kal_uint32)(PMIC_QI_VPROC_DLC_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vproc_dlc_n(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_N_MASK), (kal_uint32)(PMIC_VPROC_DLC_N_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_dlc_n_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_N_ON_MASK), (kal_uint32)(PMIC_VPROC_DLC_N_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_dlc_n_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_DLC_N_SLEEP_MASK), (kal_uint32)(PMIC_VPROC_DLC_N_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vproc_dlc_n(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON15), (&val), (kal_uint32)(PMIC_QI_VPROC_DLC_N_MASK), (kal_uint32)(PMIC_QI_VPROC_DLC_N_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vproc_transtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_TRANSTD_MASK), (kal_uint32)(PMIC_VPROC_TRANSTD_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vosel_trans_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VOSEL_TRANS_EN_MASK), (kal_uint32)(PMIC_VPROC_VOSEL_TRANS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vosel_trans_once(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VOSEL_TRANS_ONCE_MASK), (kal_uint32)(PMIC_VPROC_VOSEL_TRANS_ONCE_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vproc_vosel_trans(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON18), (&val), (kal_uint32)(PMIC_NI_VPROC_VOSEL_TRANS_MASK), (kal_uint32)(PMIC_NI_VPROC_VOSEL_TRANS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vproc_vsleep_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VSLEEP_EN_MASK), (kal_uint32)(PMIC_VPROC_VSLEEP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_r2r_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_R2R_PDN_MASK), (kal_uint32)(PMIC_VPROC_R2R_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_vproc_vsleep_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPROC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPROC_VSLEEP_SEL_MASK), (kal_uint32)(PMIC_VPROC_VSLEEP_SEL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vproc_r2r_pdn(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON18), (&val), (kal_uint32)(PMIC_NI_VPROC_R2R_PDN_MASK), (kal_uint32)(PMIC_NI_VPROC_R2R_PDN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_vproc_vsleep_sel(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPROC_CON18), (&val), (kal_uint32)(PMIC_NI_VPROC_VSLEEP_SEL_MASK), (kal_uint32)(PMIC_NI_VPROC_VSLEEP_SEL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vsys_triml(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_TRIML_MASK), (kal_uint32)(PMIC_RG_VSYS_TRIML_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_trimh(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_TRIMH_MASK), (kal_uint32)(PMIC_RG_VSYS_TRIMH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_csm(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_CSM_MASK), (kal_uint32)(PMIC_RG_VSYS_CSM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_zxos_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_ZXOS_TRIM_MASK), (kal_uint32)(PMIC_RG_VSYS_ZXOS_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_rzsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_RZSEL_MASK), (kal_uint32)(PMIC_RG_VSYS_RZSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_cc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_CC_MASK), (kal_uint32)(PMIC_RG_VSYS_CC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_csr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_CSR_MASK), (kal_uint32)(PMIC_RG_VSYS_CSR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_csl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_CSL_MASK), (kal_uint32)(PMIC_RG_VSYS_CSL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_zx_os(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_ZX_OS_MASK), (kal_uint32)(PMIC_RG_VSYS_ZX_OS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_avp_os(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_AVP_OS_MASK), (kal_uint32)(PMIC_RG_VSYS_AVP_OS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_avp_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_AVP_EN_MASK), (kal_uint32)(PMIC_RG_VSYS_AVP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_modeset(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_MODESET_MASK), (kal_uint32)(PMIC_RG_VSYS_MODESET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VSYS_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_slp(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_SLP_MASK), (kal_uint32)(PMIC_RG_VSYS_SLP_SHIFT) ); pmic_unlock(); } void upmu_set_qi_vsys_vsleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_QI_VSYS_VSLEEP_MASK), (kal_uint32)(PMIC_QI_VSYS_VSLEEP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsys_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYS_RSV_MASK), (kal_uint32)(PMIC_RG_VSYS_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_en_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_EN_CTRL_MASK), (kal_uint32)(PMIC_VSYS_EN_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vosel_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VOSEL_CTRL_MASK), (kal_uint32)(PMIC_VSYS_VOSEL_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_dlc_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_CTRL_MASK), (kal_uint32)(PMIC_VSYS_DLC_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_burst_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_BURST_CTRL_MASK), (kal_uint32)(PMIC_VSYS_BURST_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_EN_MASK), (kal_uint32)(PMIC_VSYS_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vsys_stb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON7), (&val), (kal_uint32)(PMIC_QI_VSYS_STB_MASK), (kal_uint32)(PMIC_QI_VSYS_STB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vsys_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON7), (&val), (kal_uint32)(PMIC_QI_VSYS_EN_MASK), (kal_uint32)(PMIC_QI_VSYS_EN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vsys_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON7), (&val), (kal_uint32)(PMIC_QI_VSYS_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VSYS_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsys_sfchg_frate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_SFCHG_FRATE_MASK), (kal_uint32)(PMIC_VSYS_SFCHG_FRATE_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_sfchg_fen(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_SFCHG_FEN_MASK), (kal_uint32)(PMIC_VSYS_SFCHG_FEN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_sfchg_rrate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_SFCHG_RRATE_MASK), (kal_uint32)(PMIC_VSYS_SFCHG_RRATE_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_sfchg_ren(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_SFCHG_REN_MASK), (kal_uint32)(PMIC_VSYS_SFCHG_REN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VOSEL_MASK), (kal_uint32)(PMIC_VSYS_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vosel_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VOSEL_ON_MASK), (kal_uint32)(PMIC_VSYS_VOSEL_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vosel_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VOSEL_SLEEP_MASK), (kal_uint32)(PMIC_VSYS_VOSEL_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vsys_vosel(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON12), (&val), (kal_uint32)(PMIC_NI_VSYS_VOSEL_MASK), (kal_uint32)(PMIC_NI_VSYS_VOSEL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsys_burst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_BURST_MASK), (kal_uint32)(PMIC_VSYS_BURST_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_burst_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_BURST_ON_MASK), (kal_uint32)(PMIC_VSYS_BURST_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_burst_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_BURST_SLEEP_MASK), (kal_uint32)(PMIC_VSYS_BURST_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vsys_burst(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON13), (&val), (kal_uint32)(PMIC_QI_VSYS_BURST_MASK), (kal_uint32)(PMIC_QI_VSYS_BURST_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsys_dlc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_MASK), (kal_uint32)(PMIC_VSYS_DLC_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_dlc_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_ON_MASK), (kal_uint32)(PMIC_VSYS_DLC_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_dlc_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_SLEEP_MASK), (kal_uint32)(PMIC_VSYS_DLC_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vsys_dlc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON14), (&val), (kal_uint32)(PMIC_QI_VSYS_DLC_MASK), (kal_uint32)(PMIC_QI_VSYS_DLC_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsys_dlc_n(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_N_MASK), (kal_uint32)(PMIC_VSYS_DLC_N_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_dlc_n_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_N_ON_MASK), (kal_uint32)(PMIC_VSYS_DLC_N_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_dlc_n_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_DLC_N_SLEEP_MASK), (kal_uint32)(PMIC_VSYS_DLC_N_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vsys_dlc_n(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON15), (&val), (kal_uint32)(PMIC_QI_VSYS_DLC_N_MASK), (kal_uint32)(PMIC_QI_VSYS_DLC_N_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsys_transtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_TRANSTD_MASK), (kal_uint32)(PMIC_VSYS_TRANSTD_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vosel_trans_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VOSEL_TRANS_EN_MASK), (kal_uint32)(PMIC_VSYS_VOSEL_TRANS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vosel_trans_once(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VOSEL_TRANS_ONCE_MASK), (kal_uint32)(PMIC_VSYS_VOSEL_TRANS_ONCE_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vsys_vosel_trans(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON18), (&val), (kal_uint32)(PMIC_NI_VSYS_VOSEL_TRANS_MASK), (kal_uint32)(PMIC_NI_VSYS_VOSEL_TRANS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vsys_vsleep_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VSLEEP_EN_MASK), (kal_uint32)(PMIC_VSYS_VSLEEP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_r2r_pdn(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_R2R_PDN_MASK), (kal_uint32)(PMIC_VSYS_R2R_PDN_SHIFT) ); pmic_unlock(); } void upmu_set_vsys_vsleep_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VSYS_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VSYS_VSLEEP_SEL_MASK), (kal_uint32)(PMIC_VSYS_VSLEEP_SEL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vsys_r2r_pdn(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON18), (&val), (kal_uint32)(PMIC_NI_VSYS_R2R_PDN_MASK), (kal_uint32)(PMIC_NI_VSYS_R2R_PDN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_vsys_vsleep_sel(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VSYS_CON18), (&val), (kal_uint32)(PMIC_NI_VSYS_VSLEEP_SEL_MASK), (kal_uint32)(PMIC_NI_VSYS_VSLEEP_SEL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vpa_triml(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_TRIML_MASK), (kal_uint32)(PMIC_RG_VPA_TRIML_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_trimh(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_TRIMH_MASK), (kal_uint32)(PMIC_RG_VPA_TRIMH_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_rzsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_RZSEL_MASK), (kal_uint32)(PMIC_RG_VPA_RZSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_cc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_CC_MASK), (kal_uint32)(PMIC_RG_VPA_CC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_csr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_CSR_MASK), (kal_uint32)(PMIC_RG_VPA_CSR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_csl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_CSL_MASK), (kal_uint32)(PMIC_RG_VPA_CSL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_slew_nmos(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_SLEW_NMOS_MASK), (kal_uint32)(PMIC_RG_VPA_SLEW_NMOS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_slew(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_SLEW_MASK), (kal_uint32)(PMIC_RG_VPA_SLEW_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_zx_os(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_ZX_OS_MASK), (kal_uint32)(PMIC_RG_VPA_ZX_OS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_modeset(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_MODESET_MASK), (kal_uint32)(PMIC_RG_VPA_MODESET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VPA_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_csmir(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_CSMIR_MASK), (kal_uint32)(PMIC_RG_VPA_CSMIR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_vbat_del(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_VBAT_DEL_MASK), (kal_uint32)(PMIC_RG_VPA_VBAT_DEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_slp(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_SLP_MASK), (kal_uint32)(PMIC_RG_VPA_SLP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_gpu_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_GPU_EN_MASK), (kal_uint32)(PMIC_RG_VPA_GPU_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpa_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPA_RSV_MASK), (kal_uint32)(PMIC_RG_VPA_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_en_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_EN_CTRL_MASK), (kal_uint32)(PMIC_VPA_EN_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_CTRL_MASK), (kal_uint32)(PMIC_VPA_VOSEL_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_dlc_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_DLC_CTRL_MASK), (kal_uint32)(PMIC_VPA_DLC_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_burst_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURST_CTRL_MASK), (kal_uint32)(PMIC_VPA_BURST_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_EN_MASK), (kal_uint32)(PMIC_VPA_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vpa_stb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON7), (&val), (kal_uint32)(PMIC_QI_VPA_STB_MASK), (kal_uint32)(PMIC_QI_VPA_STB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vpa_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON7), (&val), (kal_uint32)(PMIC_QI_VPA_EN_MASK), (kal_uint32)(PMIC_QI_VPA_EN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vpa_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON7), (&val), (kal_uint32)(PMIC_QI_VPA_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VPA_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vpa_sfchg_frate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_SFCHG_FRATE_MASK), (kal_uint32)(PMIC_VPA_SFCHG_FRATE_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_sfchg_fen(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_SFCHG_FEN_MASK), (kal_uint32)(PMIC_VPA_SFCHG_FEN_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_sfchg_rrate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_SFCHG_RRATE_MASK), (kal_uint32)(PMIC_VPA_SFCHG_RRATE_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_sfchg_ren(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_SFCHG_REN_MASK), (kal_uint32)(PMIC_VPA_SFCHG_REN_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_MASK), (kal_uint32)(PMIC_VPA_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_ON_MASK), (kal_uint32)(PMIC_VPA_VOSEL_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_SLEEP_MASK), (kal_uint32)(PMIC_VPA_VOSEL_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vpa_vosel(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON12), (&val), (kal_uint32)(PMIC_NI_VPA_VOSEL_MASK), (kal_uint32)(PMIC_NI_VPA_VOSEL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vpa_dlc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_DLC_MASK), (kal_uint32)(PMIC_VPA_DLC_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_dlc_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_DLC_ON_MASK), (kal_uint32)(PMIC_VPA_DLC_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_dlc_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_DLC_SLEEP_MASK), (kal_uint32)(PMIC_VPA_DLC_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vpa_dlc(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON14), (&val), (kal_uint32)(PMIC_QI_VPA_DLC_MASK), (kal_uint32)(PMIC_QI_VPA_DLC_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vpa_bursth(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURSTH_MASK), (kal_uint32)(PMIC_VPA_BURSTH_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_bursth_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURSTH_ON_MASK), (kal_uint32)(PMIC_VPA_BURSTH_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_bursth_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURSTH_SLEEP_MASK), (kal_uint32)(PMIC_VPA_BURSTH_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vpa_bursth(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON16), (&val), (kal_uint32)(PMIC_QI_VPA_BURSTH_MASK), (kal_uint32)(PMIC_QI_VPA_BURSTH_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vpa_burstl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURSTL_MASK), (kal_uint32)(PMIC_VPA_BURSTL_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_burstl_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURSTL_ON_MASK), (kal_uint32)(PMIC_VPA_BURSTL_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_burstl_sleep(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_BURSTL_SLEEP_MASK), (kal_uint32)(PMIC_VPA_BURSTL_SLEEP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vpa_burstl(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON17), (&val), (kal_uint32)(PMIC_QI_VPA_BURSTL_MASK), (kal_uint32)(PMIC_QI_VPA_BURSTL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vpa_transtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_TRANSTD_MASK), (kal_uint32)(PMIC_VPA_TRANSTD_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_trans_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_TRANS_EN_MASK), (kal_uint32)(PMIC_VPA_VOSEL_TRANS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_trans_once(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_TRANS_ONCE_MASK), (kal_uint32)(PMIC_VPA_VOSEL_TRANS_ONCE_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_vpa_dvs_bw(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(VPA_CON18), (&val), (kal_uint32)(PMIC_NI_VPA_DVS_BW_MASK), (kal_uint32)(PMIC_NI_VPA_DVS_BW_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vpa_dlc_map_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_DLC_MAP_EN_MASK), (kal_uint32)(PMIC_VPA_DLC_MAP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_dlc001(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_DLC001_MASK), (kal_uint32)(PMIC_VPA_VOSEL_DLC001_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_dlc011(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_DLC011_MASK), (kal_uint32)(PMIC_VPA_VOSEL_DLC011_SHIFT) ); pmic_unlock(); } void upmu_set_vpa_vosel_dlc111(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(VPA_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_VPA_VOSEL_DLC111_MASK), (kal_uint32)(PMIC_VPA_VOSEL_DLC111_SHIFT) ); pmic_unlock(); } void upmu_set_k_rst_done(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_RST_DONE_MASK), (kal_uint32)(PMIC_K_RST_DONE_SHIFT) ); pmic_unlock(); } void upmu_set_k_map_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_MAP_SEL_MASK), (kal_uint32)(PMIC_K_MAP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_k_once_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_ONCE_EN_MASK), (kal_uint32)(PMIC_K_ONCE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_k_once(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_ONCE_MASK), (kal_uint32)(PMIC_K_ONCE_SHIFT) ); pmic_unlock(); } void upmu_set_k_start_manual(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_START_MANUAL_MASK), (kal_uint32)(PMIC_K_START_MANUAL_SHIFT) ); pmic_unlock(); } void upmu_set_k_src_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_SRC_SEL_MASK), (kal_uint32)(PMIC_K_SRC_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_k_auto_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_AUTO_EN_MASK), (kal_uint32)(PMIC_K_AUTO_EN_SHIFT) ); pmic_unlock(); } void upmu_set_k_inv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_K_INV_MASK), (kal_uint32)(PMIC_K_INV_SHIFT) ); pmic_unlock(); } void upmu_set_k_control_smps(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(BUCK_K_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_K_CONTROL_SMPS_MASK), (kal_uint32)(PMIC_K_CONTROL_SMPS_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_k_result(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(BUCK_K_CON2), (&val), (kal_uint32)(PMIC_K_RESULT_MASK), (kal_uint32)(PMIC_K_RESULT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_k_done(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(BUCK_K_CON2), (&val), (kal_uint32)(PMIC_K_DONE_MASK), (kal_uint32)(PMIC_K_DONE_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_k_control(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(BUCK_K_CON2), (&val), (kal_uint32)(PMIC_K_CONTROL_MASK), (kal_uint32)(PMIC_K_CONTROL_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_smps_osc_cal(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(BUCK_K_CON2), (&val), (kal_uint32)(PMIC_QI_SMPS_OSC_CAL_MASK), (kal_uint32)(PMIC_QI_SMPS_OSC_CAL_SHIFT) ); pmic_unlock(); return val; } void upmu_set_isink_ch0_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH0_MODE_MASK), (kal_uint32)(PMIC_ISINK_CH0_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_isink0_rsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK0_RSV1_MASK), (kal_uint32)(PMIC_ISINK0_RSV1_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim0_duty(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM0_DUTY_MASK), (kal_uint32)(PMIC_ISINK_DIM0_DUTY_SHIFT) ); pmic_unlock(); } void upmu_set_isink0_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK0_RSV0_MASK), (kal_uint32)(PMIC_ISINK0_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim0_fsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM0_FSEL_MASK), (kal_uint32)(PMIC_ISINK_DIM0_FSEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr0_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR0_EN_MASK), (kal_uint32)(PMIC_ISINK_SFSTR0_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr0_tc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR0_TC_MASK), (kal_uint32)(PMIC_ISINK_SFSTR0_TC_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch0_step(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH0_STEP_MASK), (kal_uint32)(PMIC_ISINK_CH0_STEP_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath0_toff_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH0_TOFF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH0_TOFF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath0_ton_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH0_TON_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH0_TON_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath0_trf_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK0_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH0_TRF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH0_TRF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch1_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH1_MODE_MASK), (kal_uint32)(PMIC_ISINK_CH1_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_isink1_rsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK1_RSV1_MASK), (kal_uint32)(PMIC_ISINK1_RSV1_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim1_duty(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM1_DUTY_MASK), (kal_uint32)(PMIC_ISINK_DIM1_DUTY_SHIFT) ); pmic_unlock(); } void upmu_set_isink1_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK1_RSV0_MASK), (kal_uint32)(PMIC_ISINK1_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim1_fsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM1_FSEL_MASK), (kal_uint32)(PMIC_ISINK_DIM1_FSEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr1_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR1_EN_MASK), (kal_uint32)(PMIC_ISINK_SFSTR1_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr1_tc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR1_TC_MASK), (kal_uint32)(PMIC_ISINK_SFSTR1_TC_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch1_step(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH1_STEP_MASK), (kal_uint32)(PMIC_ISINK_CH1_STEP_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath1_toff_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH1_TOFF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH1_TOFF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath1_ton_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH1_TON_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH1_TON_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath1_trf_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK1_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH1_TRF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH1_TRF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch2_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH2_MODE_MASK), (kal_uint32)(PMIC_ISINK_CH2_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_isink2_rsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK2_RSV1_MASK), (kal_uint32)(PMIC_ISINK2_RSV1_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim2_duty(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM2_DUTY_MASK), (kal_uint32)(PMIC_ISINK_DIM2_DUTY_SHIFT) ); pmic_unlock(); } void upmu_set_isink2_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK2_RSV0_MASK), (kal_uint32)(PMIC_ISINK2_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim2_fsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM2_FSEL_MASK), (kal_uint32)(PMIC_ISINK_DIM2_FSEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr2_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR2_EN_MASK), (kal_uint32)(PMIC_ISINK_SFSTR2_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr2_tc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR2_TC_MASK), (kal_uint32)(PMIC_ISINK_SFSTR2_TC_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch2_step(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH2_STEP_MASK), (kal_uint32)(PMIC_ISINK_CH2_STEP_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath2_toff_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH2_TOFF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH2_TOFF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath2_ton_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH2_TON_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH2_TON_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath2_trf_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK2_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH2_TRF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH2_TRF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch3_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH3_MODE_MASK), (kal_uint32)(PMIC_ISINK_CH3_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_isink3_rsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK3_RSV1_MASK), (kal_uint32)(PMIC_ISINK3_RSV1_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim3_duty(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM3_DUTY_MASK), (kal_uint32)(PMIC_ISINK_DIM3_DUTY_SHIFT) ); pmic_unlock(); } void upmu_set_isink3_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK3_RSV0_MASK), (kal_uint32)(PMIC_ISINK3_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_isink_dim3_fsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_DIM3_FSEL_MASK), (kal_uint32)(PMIC_ISINK_DIM3_FSEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr3_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR3_EN_MASK), (kal_uint32)(PMIC_ISINK_SFSTR3_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_sfstr3_tc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_SFSTR3_TC_MASK), (kal_uint32)(PMIC_ISINK_SFSTR3_TC_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch3_step(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH3_STEP_MASK), (kal_uint32)(PMIC_ISINK_CH3_STEP_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath3_toff_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH3_TOFF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH3_TOFF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath3_ton_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH3_TON_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH3_TON_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_isink_breath3_trf_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK3_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_BREATH3_TRF_SEL_MASK), (kal_uint32)(PMIC_ISINK_BREATH3_TRF_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isinks_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINKS_RSV_MASK), (kal_uint32)(PMIC_RG_ISINKS_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink3_double_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK3_DOUBLE_EN_MASK), (kal_uint32)(PMIC_RG_ISINK3_DOUBLE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink2_double_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK2_DOUBLE_EN_MASK), (kal_uint32)(PMIC_RG_ISINK2_DOUBLE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink1_double_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK1_DOUBLE_EN_MASK), (kal_uint32)(PMIC_RG_ISINK1_DOUBLE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_isink0_double_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ISINK0_DOUBLE_EN_MASK), (kal_uint32)(PMIC_RG_ISINK0_DOUBLE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_trim_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TRIM_SEL_MASK), (kal_uint32)(PMIC_RG_TRIM_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_trim_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_ANA0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_TRIM_EN_MASK), (kal_uint32)(PMIC_RG_TRIM_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_ni_isink3_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ISINK_ANA1), (&val), (kal_uint32)(PMIC_NI_ISINK3_STATUS_MASK), (kal_uint32)(PMIC_NI_ISINK3_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_isink2_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ISINK_ANA1), (&val), (kal_uint32)(PMIC_NI_ISINK2_STATUS_MASK), (kal_uint32)(PMIC_NI_ISINK2_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_isink1_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ISINK_ANA1), (&val), (kal_uint32)(PMIC_NI_ISINK1_STATUS_MASK), (kal_uint32)(PMIC_NI_ISINK1_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_ni_isink0_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ISINK_ANA1), (&val), (kal_uint32)(PMIC_NI_ISINK0_STATUS_MASK), (kal_uint32)(PMIC_NI_ISINK0_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_isink_phase0_dly_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_PHASE_DLY), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_PHASE0_DLY_EN_MASK), (kal_uint32)(PMIC_ISINK_PHASE0_DLY_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_phase1_dly_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_PHASE_DLY), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_PHASE1_DLY_EN_MASK), (kal_uint32)(PMIC_ISINK_PHASE1_DLY_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_phase2_dly_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_PHASE_DLY), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_PHASE2_DLY_EN_MASK), (kal_uint32)(PMIC_ISINK_PHASE2_DLY_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_phase3_dly_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_PHASE_DLY), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_PHASE3_DLY_EN_MASK), (kal_uint32)(PMIC_ISINK_PHASE3_DLY_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_phase_dly_tc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_PHASE_DLY), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_PHASE_DLY_TC_MASK), (kal_uint32)(PMIC_ISINK_PHASE_DLY_TC_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch0_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH0_EN_MASK), (kal_uint32)(PMIC_ISINK_CH0_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch1_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH1_EN_MASK), (kal_uint32)(PMIC_ISINK_CH1_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch2_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH2_EN_MASK), (kal_uint32)(PMIC_ISINK_CH2_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_ch3_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CH3_EN_MASK), (kal_uint32)(PMIC_ISINK_CH3_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_chop0_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CHOP0_EN_MASK), (kal_uint32)(PMIC_ISINK_CHOP0_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_chop1_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CHOP1_EN_MASK), (kal_uint32)(PMIC_ISINK_CHOP1_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_chop2_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CHOP2_EN_MASK), (kal_uint32)(PMIC_ISINK_CHOP2_EN_SHIFT) ); pmic_unlock(); } void upmu_set_isink_chop3_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ISINK_EN_CTRL), (kal_uint32)(val), (kal_uint32)(PMIC_ISINK_CHOP3_EN_MASK), (kal_uint32)(PMIC_ISINK_CHOP3_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_analdorsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ANALDORSV1_MASK), (kal_uint32)(PMIC_RG_ANALDORSV1_SHIFT) ); pmic_unlock(); } void upmu_set_vtcxo_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_VTCXO_LP_SEL_MASK), (kal_uint32)(PMIC_VTCXO_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vtcxo_lp_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_VTCXO_LP_SET_MASK), (kal_uint32)(PMIC_VTCXO_LP_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vtcxo_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON1), (&val), (kal_uint32)(PMIC_QI_VTCXO_MODE_MASK), (kal_uint32)(PMIC_QI_VTCXO_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vtcxo_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VTCXO_STBTD_MASK), (kal_uint32)(PMIC_RG_VTCXO_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vtcxo_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VTCXO_EN_MASK), (kal_uint32)(PMIC_RG_VTCXO_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vtcxo_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_VTCXO_ON_CTRL_MASK), (kal_uint32)(PMIC_VTCXO_ON_CTRL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vtcxo_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON1), (&val), (kal_uint32)(PMIC_QI_VTCXO_EN_MASK), (kal_uint32)(PMIC_QI_VTCXO_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_va_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_VA_LP_SEL_MASK), (kal_uint32)(PMIC_VA_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_va_lp_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_VA_LP_SET_MASK), (kal_uint32)(PMIC_VA_LP_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_va_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON2), (&val), (kal_uint32)(PMIC_QI_VA_MODE_MASK), (kal_uint32)(PMIC_QI_VA_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_va_sense_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_SENSE_SEL_MASK), (kal_uint32)(PMIC_RG_VA_SENSE_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_STBTD_MASK), (kal_uint32)(PMIC_RG_VA_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_EN_MASK), (kal_uint32)(PMIC_RG_VA_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_va_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON2), (&val), (kal_uint32)(PMIC_QI_VA_EN_MASK), (kal_uint32)(PMIC_QI_VA_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_analdorsv2(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ANALDORSV2_MASK), (kal_uint32)(PMIC_RG_ANALDORSV2_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_STBTD_MASK), (kal_uint32)(PMIC_RG_VCAMA_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_EN_MASK), (kal_uint32)(PMIC_RG_VCAMA_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCAMA_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VA_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vtcxo_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VTCXO_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VTCXO_BIST_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcama_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON5), (&val), (kal_uint32)(PMIC_QI_VCAMA_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCAMA_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_va_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON5), (&val), (kal_uint32)(PMIC_QI_VA_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VA_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vtcxo_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON5), (&val), (kal_uint32)(PMIC_QI_VTCXO_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VTCXO_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_analdorsv3(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ANALDORSV3_MASK), (kal_uint32)(PMIC_RG_ANALDORSV3_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vtcxo_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VTCXO_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VTCXO_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vtcxo_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VTCXO_OCFB_MASK), (kal_uint32)(PMIC_RG_VTCXO_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vtcxo_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VTCXO_CAL_MASK), (kal_uint32)(PMIC_RG_VTCXO_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VA_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_OCFB_MASK), (kal_uint32)(PMIC_RG_VA_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_VOSEL_MASK), (kal_uint32)(PMIC_RG_VA_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_va_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VA_CAL_MASK), (kal_uint32)(PMIC_RG_VA_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_fbsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_FBSEL_MASK), (kal_uint32)(PMIC_RG_VCAMA_FBSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCAMA_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_OCFB_MASK), (kal_uint32)(PMIC_RG_VCAMA_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VCAMA_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_VOSEL_MASK), (kal_uint32)(PMIC_RG_VCAMA_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcama_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_VCAMA_ON_CTRL_MASK), (kal_uint32)(PMIC_VCAMA_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcama_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMA_CAL_MASK), (kal_uint32)(PMIC_RG_VCAMA_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_reserve_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RESERVE_STB_SEL_MASK), (kal_uint32)(PMIC_RG_RESERVE_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aldo_reserve(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ALDO_RESERVE_MASK), (kal_uint32)(PMIC_RG_ALDO_RESERVE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_VOSEL_MASK), (kal_uint32)(PMIC_RG_VCN33_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCN33_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vcn33_on_ctrl_bt(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_VCN33_ON_CTRL_BT_MASK), (kal_uint32)(PMIC_VCN33_ON_CTRL_BT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_OCFB_MASK), (kal_uint32)(PMIC_RG_VCN33_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_en_bt(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_EN_BT_MASK), (kal_uint32)(PMIC_RG_VCN33_EN_BT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_CAL_MASK), (kal_uint32)(PMIC_RG_VCN33_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_STBTD_MASK), (kal_uint32)(PMIC_RG_VCN33_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_en_wifi(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_EN_WIFI_MASK), (kal_uint32)(PMIC_RG_VCN33_EN_WIFI_SHIFT) ); pmic_unlock(); } void upmu_set_vcn33_on_ctrl_wifi(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_VCN33_ON_CTRL_WIFI_MASK), (kal_uint32)(PMIC_VCN33_ON_CTRL_WIFI_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn33_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON17), (&val), (kal_uint32)(PMIC_QI_VCN33_EN_MASK), (kal_uint32)(PMIC_QI_VCN33_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcn28_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCN28_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn28_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCN28_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn28_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_OCFB_MASK), (kal_uint32)(PMIC_RG_VCN28_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn28_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_VOSEL_MASK), (kal_uint32)(PMIC_RG_VCN28_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn28_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_CAL_MASK), (kal_uint32)(PMIC_RG_VCN28_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn28_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_STBTD_MASK), (kal_uint32)(PMIC_RG_VCN28_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn28_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN28_EN_MASK), (kal_uint32)(PMIC_RG_VCN28_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vcn28_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_VCN28_ON_CTRL_MASK), (kal_uint32)(PMIC_VCN28_ON_CTRL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn28_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON19), (&val), (kal_uint32)(PMIC_QI_VCN28_EN_MASK), (kal_uint32)(PMIC_QI_VCN28_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vcn28_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_VCN28_LP_SEL_MASK), (kal_uint32)(PMIC_VCN28_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcn28_lp_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_VCN28_LP_SET_MASK), (kal_uint32)(PMIC_VCN28_LP_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn28_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON20), (&val), (kal_uint32)(PMIC_QI_VCN28_MODE_MASK), (kal_uint32)(PMIC_QI_VCN28_MODE_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vcn28_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON20), (&val), (kal_uint32)(PMIC_QI_VCN28_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCN28_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vcn33_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_VCN33_LP_SEL_MASK), (kal_uint32)(PMIC_VCN33_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcn33_lp_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_VCN33_LP_SET_MASK), (kal_uint32)(PMIC_VCN33_LP_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn33_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON21), (&val), (kal_uint32)(PMIC_QI_VCN33_MODE_MASK), (kal_uint32)(PMIC_QI_VCN33_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcn33_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_EN_MASK), (kal_uint32)(PMIC_RG_VCN33_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn33_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ANALDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN33_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCN33_BIST_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn33_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ANALDO_CON21), (&val), (kal_uint32)(PMIC_QI_VCN33_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCN33_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vio28_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_VIO28_LP_SEL_MASK), (kal_uint32)(PMIC_VIO28_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vio28_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_VIO28_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VIO28_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vio28_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON0), (&val), (kal_uint32)(PMIC_QI_VIO28_MODE_MASK), (kal_uint32)(PMIC_QI_VIO28_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vio28_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO28_STBTD_MASK), (kal_uint32)(PMIC_RG_VIO28_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_vio28_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_VIO28_EN_MASK), (kal_uint32)(PMIC_VIO28_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vio28_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON0), (&val), (kal_uint32)(PMIC_QI_VIO28_EN_MASK), (kal_uint32)(PMIC_QI_VIO28_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vusb_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_VUSB_LP_SEL_MASK), (kal_uint32)(PMIC_VUSB_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vusb_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_VUSB_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VUSB_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vusb_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON2), (&val), (kal_uint32)(PMIC_QI_VUSB_MODE_MASK), (kal_uint32)(PMIC_QI_VUSB_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vusb_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VUSB_STBTD_MASK), (kal_uint32)(PMIC_RG_VUSB_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vusb_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VUSB_EN_MASK), (kal_uint32)(PMIC_RG_VUSB_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vusb_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON2), (&val), (kal_uint32)(PMIC_QI_VUSB_EN_MASK), (kal_uint32)(PMIC_QI_VUSB_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vmc_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_VMC_LP_SEL_MASK), (kal_uint32)(PMIC_VMC_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vmc_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_VMC_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VMC_LP_MODE_SET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STB_SEL_MASK), (kal_uint32)(PMIC_RG_STB_SEL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vmc_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON3), (&val), (kal_uint32)(PMIC_QI_VMC_MODE_MASK), (kal_uint32)(PMIC_QI_VMC_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vmc_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_STBTD_MASK), (kal_uint32)(PMIC_RG_VMC_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_EN_MASK), (kal_uint32)(PMIC_RG_VMC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_int_dis_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_INT_DIS_SEL_MASK), (kal_uint32)(PMIC_RG_VMC_INT_DIS_SEL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vmc_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON3), (&val), (kal_uint32)(PMIC_QI_VMC_EN_MASK), (kal_uint32)(PMIC_QI_VMC_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vmch_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VMCH_LP_SEL_MASK), (kal_uint32)(PMIC_VMCH_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vmch_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_VMCH_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VMCH_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vmch_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON5), (&val), (kal_uint32)(PMIC_QI_VMCH_MODE_MASK), (kal_uint32)(PMIC_QI_VMCH_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vmch_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_STBTD_MASK), (kal_uint32)(PMIC_RG_VMCH_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_EN_MASK), (kal_uint32)(PMIC_RG_VMCH_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vmch_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON5), (&val), (kal_uint32)(PMIC_QI_VMCH_EN_MASK), (kal_uint32)(PMIC_QI_VMCH_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vemc_3v3_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_VEMC_3V3_LP_SEL_MASK), (kal_uint32)(PMIC_VEMC_3V3_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vemc_3v3_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_VEMC_3V3_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VEMC_3V3_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vemc_3v3_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON6), (&val), (kal_uint32)(PMIC_QI_VEMC_3V3_MODE_MASK), (kal_uint32)(PMIC_QI_VEMC_3V3_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vemc_3v3_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_STBTD_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_EN_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vemc_3v3_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON6), (&val), (kal_uint32)(PMIC_QI_VEMC_3V3_EN_MASK), (kal_uint32)(PMIC_QI_VEMC_3V3_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_vgp1_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VGP1_LP_SEL_MASK), (kal_uint32)(PMIC_VGP1_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vgp1_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_VGP1_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VGP1_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vgp1_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON7), (&val), (kal_uint32)(PMIC_QI_VGP1_MODE_MASK), (kal_uint32)(PMIC_QI_VGP1_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vgp1_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_STBTD_MASK), (kal_uint32)(PMIC_RG_VGP1_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_EN_MASK), (kal_uint32)(PMIC_RG_VGP1_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vgp2_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VGP2_LP_SEL_MASK), (kal_uint32)(PMIC_VGP2_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vgp2_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_VGP2_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VGP2_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vgp2_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON8), (&val), (kal_uint32)(PMIC_QI_VGP2_MODE_MASK), (kal_uint32)(PMIC_QI_VGP2_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vgp2_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_STBTD_MASK), (kal_uint32)(PMIC_RG_VGP2_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_EN_MASK), (kal_uint32)(PMIC_RG_VGP2_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vgp3_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_VGP3_LP_SEL_MASK), (kal_uint32)(PMIC_VGP3_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vgp3_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_VGP3_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VGP3_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vgp3_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON9), (&val), (kal_uint32)(PMIC_QI_VGP3_MODE_MASK), (kal_uint32)(PMIC_QI_VGP3_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vgp3_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_STBTD_MASK), (kal_uint32)(PMIC_RG_VGP3_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_EN_MASK), (kal_uint32)(PMIC_RG_VGP3_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn_1v8_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vcn_1v8_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_VCN_1V8_ON_CTRL_MASK), (kal_uint32)(PMIC_VCN_1V8_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn_1v8_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_OCFB_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn_1v8_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn_1v8_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_CAL_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_vcn_1v8_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_VCN_1V8_LP_SEL_MASK), (kal_uint32)(PMIC_VCN_1V8_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcn_1v8_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_VCN_1V8_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VCN_1V8_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn_1v8_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON11), (&val), (kal_uint32)(PMIC_QI_VCN_1V8_MODE_MASK), (kal_uint32)(PMIC_QI_VCN_1V8_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcn_1v8_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_STBTD_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn_1v8_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_EN_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcn_1v8_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON11), (&val), (kal_uint32)(PMIC_QI_VCN_1V8_EN_MASK), (kal_uint32)(PMIC_QI_VCN_1V8_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_stb_sim1_sio(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STB_SIM1_SIO_MASK), (kal_uint32)(PMIC_RG_STB_SIM1_SIO_SHIFT) ); pmic_unlock(); } void upmu_set_re_digldorsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_RE_DIGLDORSV1_MASK), (kal_uint32)(PMIC_RE_DIGLDORSV1_SHIFT) ); pmic_unlock(); } void upmu_set_vsim1_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM1_LP_SEL_MASK), (kal_uint32)(PMIC_VSIM1_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vsim1_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM1_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VSIM1_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vsim1_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON13), (&val), (kal_uint32)(PMIC_QI_VSIM1_MODE_MASK), (kal_uint32)(PMIC_QI_VSIM1_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vsim1_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_STBTD_MASK), (kal_uint32)(PMIC_RG_VSIM1_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_EN_MASK), (kal_uint32)(PMIC_RG_VSIM1_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vsim2_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM2_LP_SEL_MASK), (kal_uint32)(PMIC_VSIM2_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vsim2_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM2_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VSIM2_LP_MODE_SET_SHIFT) ); pmic_unlock(); } void upmu_set_vsim2_ther_shdn_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM2_THER_SHDN_EN_MASK), (kal_uint32)(PMIC_VSIM2_THER_SHDN_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vsim2_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON14), (&val), (kal_uint32)(PMIC_QI_VSIM2_MODE_MASK), (kal_uint32)(PMIC_QI_VSIM2_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vsim2_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_STBTD_MASK), (kal_uint32)(PMIC_RG_VSIM2_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_EN_MASK), (kal_uint32)(PMIC_RG_VSIM2_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrtc_force_on(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRTC_FORCE_ON_MASK), (kal_uint32)(PMIC_RG_VRTC_FORCE_ON_SHIFT) ); pmic_unlock(); } void upmu_set_vrtc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_VRTC_EN_MASK), (kal_uint32)(PMIC_VRTC_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vrtc_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON15), (&val), (kal_uint32)(PMIC_QI_VRTC_EN_MASK), (kal_uint32)(PMIC_QI_VRTC_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vemc_3v3_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VMCH_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VMC_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vusb_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VUSB_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VUSB_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio28_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO28_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VIO28_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrtc_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRTC_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VRTC_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VSIM2_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VSIM1_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VIBR_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VGP3_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VGP2_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VGP1_BIST_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vemc_3v3_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON18), (&val), (kal_uint32)(PMIC_QI_VEMC_3V3_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VEMC_3V3_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vmch_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON18), (&val), (kal_uint32)(PMIC_QI_VMCH_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VMCH_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vmc_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON18), (&val), (kal_uint32)(PMIC_QI_VMC_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VMC_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vusb_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON18), (&val), (kal_uint32)(PMIC_QI_VUSB_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VUSB_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vio28_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON18), (&val), (kal_uint32)(PMIC_QI_VIO28_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VIO28_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vsim2_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON19), (&val), (kal_uint32)(PMIC_QI_VSIM2_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VSIM2_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vsim1_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON19), (&val), (kal_uint32)(PMIC_QI_VSIM1_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VSIM1_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vibr_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON19), (&val), (kal_uint32)(PMIC_QI_VIBR_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VIBR_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vgp3_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON19), (&val), (kal_uint32)(PMIC_QI_VGP3_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VGP3_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vgp2_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON19), (&val), (kal_uint32)(PMIC_QI_VGP2_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VGP2_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vgp1_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON19), (&val), (kal_uint32)(PMIC_QI_VGP1_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VGP1_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_stb_sim2_sio(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_RG_STB_SIM2_SIO_MASK), (kal_uint32)(PMIC_RG_STB_SIM2_SIO_SHIFT) ); pmic_unlock(); } void upmu_set_re_digldorsv2(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_RE_DIGLDORSV2_MASK), (kal_uint32)(PMIC_RE_DIGLDORSV2_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio28_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO28_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VIO28_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio28_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO28_OCFB_MASK), (kal_uint32)(PMIC_RG_VIO28_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio28_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO28_CAL_MASK), (kal_uint32)(PMIC_RG_VIO28_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vusb_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VUSB_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VUSB_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vusb_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VUSB_OCFB_MASK), (kal_uint32)(PMIC_RG_VUSB_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vusb_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VUSB_CAL_MASK), (kal_uint32)(PMIC_RG_VUSB_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VMC_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vmc_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_VMC_ON_CTRL_MASK), (kal_uint32)(PMIC_VMC_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_OCFB_MASK), (kal_uint32)(PMIC_RG_VMC_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_VOSEL_MASK), (kal_uint32)(PMIC_RG_VMC_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VMC_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_stb_sel_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_STB_SEL_CAL_MASK), (kal_uint32)(PMIC_RG_VMC_STB_SEL_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmc_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMC_CAL_MASK), (kal_uint32)(PMIC_RG_VMC_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VMCH_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vmch_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_VMCH_ON_CTRL_MASK), (kal_uint32)(PMIC_VMCH_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_OCFB_MASK), (kal_uint32)(PMIC_RG_VMCH_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_db_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_DB_EN_MASK), (kal_uint32)(PMIC_RG_VMCH_DB_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VMCH_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_VOSEL_MASK), (kal_uint32)(PMIC_RG_VMCH_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_stb_sel_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_STB_SEL_CAL_MASK), (kal_uint32)(PMIC_RG_VMCH_STB_SEL_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vmch_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VMCH_CAL_MASK), (kal_uint32)(PMIC_RG_VMCH_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vemc_3v3_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_VEMC_3V3_ON_CTRL_MASK), (kal_uint32)(PMIC_VEMC_3V3_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_OCFB_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_dl_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_DL_EN_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_DL_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_db_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_DB_EN_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_DB_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_VOSEL_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_stb_sel_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_STB_SEL_CAL_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_STB_SEL_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vemc_3v3_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VEMC_3V3_CAL_MASK), (kal_uint32)(PMIC_RG_VEMC_3V3_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON28), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VGP1_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON28), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_OCFB_MASK), (kal_uint32)(PMIC_RG_VGP1_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON28), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VGP1_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON28), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_VOSEL_MASK), (kal_uint32)(PMIC_RG_VGP1_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp1_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON28), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP1_CAL_MASK), (kal_uint32)(PMIC_RG_VGP1_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON29), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VGP2_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON29), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_OCFB_MASK), (kal_uint32)(PMIC_RG_VGP2_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON29), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VGP2_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON29), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_VOSEL_MASK), (kal_uint32)(PMIC_RG_VGP2_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp2_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON29), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP2_CAL_MASK), (kal_uint32)(PMIC_RG_VGP2_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON30), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VGP3_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON30), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_OCFB_MASK), (kal_uint32)(PMIC_RG_VGP3_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON30), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VGP3_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON30), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_VOSEL_MASK), (kal_uint32)(PMIC_RG_VGP3_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vgp3_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON30), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VGP3_CAL_MASK), (kal_uint32)(PMIC_RG_VGP3_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_vcam_af_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON31), (kal_uint32)(val), (kal_uint32)(PMIC_VCAM_AF_LP_SEL_MASK), (kal_uint32)(PMIC_VCAM_AF_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcam_af_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON31), (kal_uint32)(val), (kal_uint32)(PMIC_VCAM_AF_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VCAM_AF_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcam_af_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON31), (&val), (kal_uint32)(PMIC_QI_VCAM_AF_MODE_MASK), (kal_uint32)(PMIC_QI_VCAM_AF_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcam_af_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON31), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_STBTD_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_af_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON31), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_EN_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_af_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON32), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_af_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON32), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_OCFB_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_vcam_af_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON32), (kal_uint32)(val), (kal_uint32)(PMIC_VCAM_AF_ON_CTRL_MASK), (kal_uint32)(PMIC_VCAM_AF_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_af_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON32), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_af_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON32), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_VOSEL_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_af_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON32), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_CAL_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_re_digldorsv3(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON33), (kal_uint32)(val), (kal_uint32)(PMIC_RE_DIGLDORSV3_MASK), (kal_uint32)(PMIC_RE_DIGLDORSV3_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON34), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VSIM1_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON34), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_OCFB_MASK), (kal_uint32)(PMIC_RG_VSIM1_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON34), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VSIM1_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON34), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_VOSEL_MASK), (kal_uint32)(PMIC_RG_VSIM1_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim1_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON34), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM1_CAL_MASK), (kal_uint32)(PMIC_RG_VSIM1_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON35), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VSIM2_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON35), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_OCFB_MASK), (kal_uint32)(PMIC_RG_VSIM2_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON35), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VSIM2_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON35), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_VOSEL_MASK), (kal_uint32)(PMIC_RG_VSIM2_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsim2_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON35), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSIM2_CAL_MASK), (kal_uint32)(PMIC_RG_VSIM2_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vsysldo_reserve(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON36), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VSYSLDO_RESERVE_MASK), (kal_uint32)(PMIC_RG_VSYSLDO_RESERVE_SHIFT) ); pmic_unlock(); } void upmu_set_vibr_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON39), (kal_uint32)(val), (kal_uint32)(PMIC_VIBR_LP_SEL_MASK), (kal_uint32)(PMIC_VIBR_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vibr_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON39), (kal_uint32)(val), (kal_uint32)(PMIC_VIBR_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VIBR_LP_MODE_SET_SHIFT) ); pmic_unlock(); } void upmu_set_vibr_ther_shen_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON39), (kal_uint32)(val), (kal_uint32)(PMIC_VIBR_THER_SHEN_EN_MASK), (kal_uint32)(PMIC_VIBR_THER_SHEN_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vibr_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON39), (&val), (kal_uint32)(PMIC_QI_VIBR_MODE_MASK), (kal_uint32)(PMIC_QI_VIBR_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vibr_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON39), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_STBTD_MASK), (kal_uint32)(PMIC_RG_VIBR_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON39), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_EN_MASK), (kal_uint32)(PMIC_RG_VIBR_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON40), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VIBR_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON40), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_OCFB_MASK), (kal_uint32)(PMIC_RG_VIBR_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON40), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VIBR_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON40), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_VOSEL_MASK), (kal_uint32)(PMIC_RG_VIBR_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_stb_sel_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON40), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_STB_SEL_CAL_MASK), (kal_uint32)(PMIC_RG_VIBR_STB_SEL_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vibr_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON40), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIBR_CAL_MASK), (kal_uint32)(PMIC_RG_VIBR_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_digldo_rsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON41), (kal_uint32)(val), (kal_uint32)(PMIC_DIGLDO_RSV1_MASK), (kal_uint32)(PMIC_DIGLDO_RSV1_SHIFT) ); pmic_unlock(); } void upmu_set_digldo_rsv0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON41), (kal_uint32)(val), (kal_uint32)(PMIC_DIGLDO_RSV0_MASK), (kal_uint32)(PMIC_DIGLDO_RSV0_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ldo_ft(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON41), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LDO_FT_MASK), (kal_uint32)(PMIC_RG_LDO_FT_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcam_io_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VCAM_IO_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCAM_IO_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vcamd_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VCAMD_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCAMD_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vcn_1v8_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VCN_1V8_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCN_1V8_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vio18_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VIO18_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VIO18_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vrf18_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VRF18_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VRF18_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vm_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VM_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VM_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_qi_vcam_af_oc_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON42), (&val), (kal_uint32)(PMIC_QI_VCAM_AF_OC_STATUS_MASK), (kal_uint32)(PMIC_QI_VCAM_AF_OC_STATUS_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcam_af_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_AF_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCAM_AF_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_io_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_IO_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCAMD_IO_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcn_1v8_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCN_1V8_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCN_1V8_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VCAMD_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio18_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VIO18_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VM_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrf18_bist_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON43), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_BIST_EN_MASK), (kal_uint32)(PMIC_RG_VRF18_BIST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vibr_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON44), (kal_uint32)(val), (kal_uint32)(PMIC_VIBR_ON_CTRL_MASK), (kal_uint32)(PMIC_VIBR_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vsim2_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON44), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM2_ON_CTRL_MASK), (kal_uint32)(PMIC_VSIM2_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vsim1_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON44), (kal_uint32)(val), (kal_uint32)(PMIC_VSIM1_ON_CTRL_MASK), (kal_uint32)(PMIC_VSIM1_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vgp3_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON44), (kal_uint32)(val), (kal_uint32)(PMIC_VGP3_ON_CTRL_MASK), (kal_uint32)(PMIC_VGP3_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vgp2_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON44), (kal_uint32)(val), (kal_uint32)(PMIC_VGP2_ON_CTRL_MASK), (kal_uint32)(PMIC_VGP2_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vgp1_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON44), (kal_uint32)(val), (kal_uint32)(PMIC_VGP1_ON_CTRL_MASK), (kal_uint32)(PMIC_VGP1_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_vrf18_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON45), (kal_uint32)(val), (kal_uint32)(PMIC_VRF18_LP_SEL_MASK), (kal_uint32)(PMIC_VRF18_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vrf18_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON45), (kal_uint32)(val), (kal_uint32)(PMIC_VRF18_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VRF18_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vrf18_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON45), (&val), (kal_uint32)(PMIC_QI_VRF18_MODE_MASK), (kal_uint32)(PMIC_QI_VRF18_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vrf18_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON45), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_STBTD_MASK), (kal_uint32)(PMIC_RG_VRF18_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrf18_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON45), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_EN_MASK), (kal_uint32)(PMIC_RG_VRF18_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrf18_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON46), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VRF18_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vrf18_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON46), (kal_uint32)(val), (kal_uint32)(PMIC_VRF18_ON_CTRL_MASK), (kal_uint32)(PMIC_VRF18_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrf18_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON46), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_OCFB_MASK), (kal_uint32)(PMIC_RG_VRF18_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrf18_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON46), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VRF18_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vrf18_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON46), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VRF18_CAL_MASK), (kal_uint32)(PMIC_RG_VRF18_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_vm_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON47), (kal_uint32)(val), (kal_uint32)(PMIC_VM_LP_SEL_MASK), (kal_uint32)(PMIC_VM_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vm_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON47), (kal_uint32)(val), (kal_uint32)(PMIC_VM_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VM_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vm_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON47), (&val), (kal_uint32)(PMIC_QI_VM_MODE_MASK), (kal_uint32)(PMIC_QI_VM_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vm_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON47), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_STBTD_MASK), (kal_uint32)(PMIC_RG_VM_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON47), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_EN_MASK), (kal_uint32)(PMIC_RG_VM_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vm_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON47), (&val), (kal_uint32)(PMIC_QI_VM_EN_MASK), (kal_uint32)(PMIC_QI_VM_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vm_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON48), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VM_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_plcur_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON48), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_PLCUR_EN_MASK), (kal_uint32)(PMIC_RG_VM_PLCUR_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_plcur_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON48), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_PLCUR_CAL_MASK), (kal_uint32)(PMIC_RG_VM_PLCUR_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON48), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_VOSEL_MASK), (kal_uint32)(PMIC_RG_VM_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON48), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_OCFB_MASK), (kal_uint32)(PMIC_RG_VM_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vm_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON48), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VM_CAL_MASK), (kal_uint32)(PMIC_RG_VM_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_vio18_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON49), (kal_uint32)(val), (kal_uint32)(PMIC_VIO18_LP_SEL_MASK), (kal_uint32)(PMIC_VIO18_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vio18_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON49), (kal_uint32)(val), (kal_uint32)(PMIC_VIO18_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VIO18_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vio18_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON49), (&val), (kal_uint32)(PMIC_QI_VIO18_MODE_MASK), (kal_uint32)(PMIC_QI_VIO18_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vio18_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON49), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_STBTD_MASK), (kal_uint32)(PMIC_RG_VIO18_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio18_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON49), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_EN_MASK), (kal_uint32)(PMIC_RG_VIO18_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vio18_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON49), (&val), (kal_uint32)(PMIC_QI_VIO18_EN_MASK), (kal_uint32)(PMIC_QI_VIO18_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vio18_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON50), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VIO18_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vio18_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON50), (kal_uint32)(val), (kal_uint32)(PMIC_VIO18_ON_CTRL_MASK), (kal_uint32)(PMIC_VIO18_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio18_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON50), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_OCFB_MASK), (kal_uint32)(PMIC_RG_VIO18_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio18_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON50), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VIO18_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vio18_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON50), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VIO18_CAL_MASK), (kal_uint32)(PMIC_RG_VIO18_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_vcamd_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON51), (kal_uint32)(val), (kal_uint32)(PMIC_VCAMD_LP_SEL_MASK), (kal_uint32)(PMIC_VCAMD_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcamd_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON51), (kal_uint32)(val), (kal_uint32)(PMIC_VCAMD_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VCAMD_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcamd_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON51), (&val), (kal_uint32)(PMIC_QI_VCAMD_MODE_MASK), (kal_uint32)(PMIC_QI_VCAMD_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcamd_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON51), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_STBTD_MASK), (kal_uint32)(PMIC_RG_VCAMD_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON51), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_EN_MASK), (kal_uint32)(PMIC_RG_VCAMD_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcamd_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON51), (&val), (kal_uint32)(PMIC_QI_VCAMD_EN_MASK), (kal_uint32)(PMIC_QI_VCAMD_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcamd_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON52), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCAMD_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vcamd_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON52), (kal_uint32)(val), (kal_uint32)(PMIC_VCAMD_ON_CTRL_MASK), (kal_uint32)(PMIC_VCAMD_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON52), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_OCFB_MASK), (kal_uint32)(PMIC_RG_VCAMD_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON52), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VCAMD_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_vosel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON52), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_VOSEL_MASK), (kal_uint32)(PMIC_RG_VCAMD_VOSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcamd_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON52), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAMD_CAL_MASK), (kal_uint32)(PMIC_RG_VCAMD_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_vcam_io_lp_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON53), (kal_uint32)(val), (kal_uint32)(PMIC_VCAM_IO_LP_SEL_MASK), (kal_uint32)(PMIC_VCAM_IO_LP_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_vcam_io_lp_mode_set(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON53), (kal_uint32)(val), (kal_uint32)(PMIC_VCAM_IO_LP_MODE_SET_MASK), (kal_uint32)(PMIC_VCAM_IO_LP_MODE_SET_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcam_io_mode(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON53), (&val), (kal_uint32)(PMIC_QI_VCAM_IO_MODE_MASK), (kal_uint32)(PMIC_QI_VCAM_IO_MODE_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcam_io_stbtd(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON53), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_IO_STBTD_MASK), (kal_uint32)(PMIC_RG_VCAM_IO_STBTD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_io_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON53), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_IO_EN_MASK), (kal_uint32)(PMIC_RG_VCAM_IO_EN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_qi_vcam_io_en(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(DIGLDO_CON53), (&val), (kal_uint32)(PMIC_QI_VCAM_IO_EN_MASK), (kal_uint32)(PMIC_QI_VCAM_IO_EN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_vcam_io_ndis_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON54), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_IO_NDIS_EN_MASK), (kal_uint32)(PMIC_RG_VCAM_IO_NDIS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_vcam_io_on_ctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON54), (kal_uint32)(val), (kal_uint32)(PMIC_VCAM_IO_ON_CTRL_MASK), (kal_uint32)(PMIC_VCAM_IO_ON_CTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_io_ocfb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON54), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_IO_OCFB_MASK), (kal_uint32)(PMIC_RG_VCAM_IO_OCFB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_io_stb_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON54), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_IO_STB_SEL_MASK), (kal_uint32)(PMIC_RG_VCAM_IO_STB_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vcam_io_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(DIGLDO_CON54), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VCAM_IO_CAL_MASK), (kal_uint32)(PMIC_RG_VCAM_IO_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_addr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_ADDR_MASK), (kal_uint32)(PMIC_RG_EFUSE_ADDR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_prog(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_PROG_MASK), (kal_uint32)(PMIC_RG_EFUSE_PROG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_EN_MASK), (kal_uint32)(PMIC_RG_EFUSE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_pkey(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_PKEY_MASK), (kal_uint32)(PMIC_RG_EFUSE_PKEY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_rd_trig(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_RD_TRIG_MASK), (kal_uint32)(PMIC_RG_EFUSE_RD_TRIG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_prog_src(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_PROG_SRC_MASK), (kal_uint32)(PMIC_RG_EFUSE_PROG_SRC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_skip_efuse_out(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SKIP_EFUSE_OUT_MASK), (kal_uint32)(PMIC_RG_SKIP_EFUSE_OUT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_rd_rdy_bypass(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_RD_RDY_BYPASS_MASK), (kal_uint32)(PMIC_RG_RD_RDY_BYPASS_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rg_efuse_rd_ack(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_CON6), (&val), (kal_uint32)(PMIC_RG_EFUSE_RD_ACK_MASK), (kal_uint32)(PMIC_RG_EFUSE_RD_ACK_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_busy(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_CON6), (&val), (kal_uint32)(PMIC_RG_EFUSE_BUSY_MASK), (kal_uint32)(PMIC_RG_EFUSE_BUSY_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_efuse_val_0_15(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_0_15), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_0_15_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_0_15_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_16_31(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_16_31), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_16_31_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_16_31_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_32_47(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_32_47), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_32_47_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_32_47_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_48_63(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_48_63), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_48_63_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_48_63_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_64_79(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_64_79), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_64_79_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_64_79_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_80_95(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_80_95), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_80_95_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_80_95_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_96_111(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_96_111), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_96_111_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_96_111_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_112_127(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_112_127), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_112_127_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_112_127_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_128_143(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_128_143), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_128_143_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_128_143_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_144_159(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_144_159), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_144_159_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_144_159_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_160_175(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_160_175), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_160_175_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_160_175_SHIFT) ); pmic_unlock(); } void upmu_set_rg_efuse_val_176_191(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_VAL_176_191), (kal_uint32)(val), (kal_uint32)(PMIC_RG_EFUSE_VAL_176_191_MASK), (kal_uint32)(PMIC_RG_EFUSE_VAL_176_191_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rg_efuse_dout_0_15(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_0_15), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_0_15_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_0_15_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_16_31(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_16_31), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_16_31_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_16_31_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_32_47(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_32_47), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_32_47_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_32_47_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_48_63(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_48_63), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_48_63_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_48_63_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_64_79(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_64_79), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_64_79_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_64_79_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_80_95(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_80_95), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_80_95_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_80_95_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_96_111(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_96_111), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_96_111_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_96_111_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_112_127(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_112_127), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_112_127_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_112_127_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_128_143(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_128_143), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_128_143_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_128_143_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_144_159(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_144_159), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_144_159_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_144_159_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_160_175(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_160_175), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_160_175_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_160_175_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_efuse_dout_176_191(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(EFUSE_DOUT_176_191), (&val), (kal_uint32)(PMIC_RG_EFUSE_DOUT_176_191_MASK), (kal_uint32)(PMIC_RG_EFUSE_DOUT_176_191_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_otp_pa(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OTP_PA_MASK), (kal_uint32)(PMIC_RG_OTP_PA_SHIFT) ); pmic_unlock(); } void upmu_set_rg_otp_pdin(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OTP_PDIN_MASK), (kal_uint32)(PMIC_RG_OTP_PDIN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_otp_ptm(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(EFUSE_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OTP_PTM_MASK), (kal_uint32)(PMIC_RG_OTP_PTM_SHIFT) ); pmic_unlock(); } void upmu_set_mix_eosc32_opt(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_EOSC32_OPT_MASK), (kal_uint32)(PMIC_MIX_EOSC32_OPT_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_mix_xosc32_stp_cpdtb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(RTC_MIX_CON0), (&val), (kal_uint32)(PMIC_MIX_XOSC32_STP_CPDTB_MASK), (kal_uint32)(PMIC_MIX_XOSC32_STP_CPDTB_SHIFT) ); pmic_unlock(); return val; } void upmu_set_mix_xosc32_stp_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_XOSC32_STP_PWDB_MASK), (kal_uint32)(PMIC_MIX_XOSC32_STP_PWDB_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_mix_xosc32_stp_lpdtb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(RTC_MIX_CON0), (&val), (kal_uint32)(PMIC_MIX_XOSC32_STP_LPDTB_MASK), (kal_uint32)(PMIC_MIX_XOSC32_STP_LPDTB_SHIFT) ); pmic_unlock(); return val; } void upmu_set_mix_xosc32_stp_lpden(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_XOSC32_STP_LPDEN_MASK), (kal_uint32)(PMIC_MIX_XOSC32_STP_LPDEN_SHIFT) ); pmic_unlock(); } void upmu_set_mix_xosc32_stp_lpdrst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_XOSC32_STP_LPDRST_MASK), (kal_uint32)(PMIC_MIX_XOSC32_STP_LPDRST_SHIFT) ); pmic_unlock(); } void upmu_set_mix_xosc32_stp_cali(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_XOSC32_STP_CALI_MASK), (kal_uint32)(PMIC_MIX_XOSC32_STP_CALI_SHIFT) ); pmic_unlock(); } void upmu_set_stmp_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_STMP_MODE_MASK), (kal_uint32)(PMIC_STMP_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_mix_eosc32_stp_chop_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_EOSC32_STP_CHOP_EN_MASK), (kal_uint32)(PMIC_MIX_EOSC32_STP_CHOP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_mix_dcxo_stp_lvsh_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_DCXO_STP_LVSH_EN_MASK), (kal_uint32)(PMIC_MIX_DCXO_STP_LVSH_EN_SHIFT) ); pmic_unlock(); } void upmu_set_mix_pmu_stp_ddlo_vrtc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_PMU_STP_DDLO_VRTC_MASK), (kal_uint32)(PMIC_MIX_PMU_STP_DDLO_VRTC_SHIFT) ); pmic_unlock(); } void upmu_set_mix_pmu_stp_ddlo_vrtc_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_PMU_STP_DDLO_VRTC_EN_MASK), (kal_uint32)(PMIC_MIX_PMU_STP_DDLO_VRTC_EN_SHIFT) ); pmic_unlock(); } void upmu_set_mix_rtc_stp_xosc32_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_RTC_STP_XOSC32_ENB_MASK), (kal_uint32)(PMIC_MIX_RTC_STP_XOSC32_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_mix_dcxo_stp_test_deglitch_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_DCXO_STP_TEST_DEGLITCH_MODE_MASK), (kal_uint32)(PMIC_MIX_DCXO_STP_TEST_DEGLITCH_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_mix_eosc32_stp_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_EOSC32_STP_RSV_MASK), (kal_uint32)(PMIC_MIX_EOSC32_STP_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_mix_eosc32_vct_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_EOSC32_VCT_EN_MASK), (kal_uint32)(PMIC_MIX_EOSC32_VCT_EN_SHIFT) ); pmic_unlock(); } void upmu_set_mix_stp_bbwakeup(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_STP_BBWAKEUP_MASK), (kal_uint32)(PMIC_MIX_STP_BBWAKEUP_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_mix_stp_rtc_ddlo(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(RTC_MIX_CON1), (&val), (kal_uint32)(PMIC_MIX_STP_RTC_DDLO_MASK), (kal_uint32)(PMIC_MIX_STP_RTC_DDLO_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_mix_rtc_xosc32_enb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(RTC_MIX_CON1), (&val), (kal_uint32)(PMIC_MIX_RTC_XOSC32_ENB_MASK), (kal_uint32)(PMIC_MIX_RTC_XOSC32_ENB_SHIFT) ); pmic_unlock(); return val; } void upmu_set_mix_efuse_xosc32_enb_opt(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(RTC_MIX_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_MIX_EFUSE_XOSC32_ENB_OPT_MASK), (kal_uint32)(PMIC_MIX_EFUSE_XOSC32_ENB_OPT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vcfg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VCFG_MASK), (kal_uint32)(PMIC_RG_AUDULL_VCFG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vupg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VUPG_MASK), (kal_uint32)(PMIC_RG_AUDULL_VUPG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vpwdb_pga(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VPWDB_PGA_MASK), (kal_uint32)(PMIC_RG_AUDULL_VPWDB_PGA_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vpwdb_adc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VPWDB_ADC_MASK), (kal_uint32)(PMIC_RG_AUDULL_VPWDB_ADC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vadc_denb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VADC_DENB_MASK), (kal_uint32)(PMIC_RG_AUDULL_VADC_DENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vadc_dvref_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VADC_DVREF_CAL_MASK), (kal_uint32)(PMIC_RG_AUDULL_VADC_DVREF_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vref24_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VREF24_EN_MASK), (kal_uint32)(PMIC_RG_AUDULL_VREF24_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vcm14_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VCM14_EN_MASK), (kal_uint32)(PMIC_RG_AUDULL_VCM14_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vcmsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VCMSEL_MASK), (kal_uint32)(PMIC_RG_AUDULL_VCMSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_chs_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_CHS_EN_MASK), (kal_uint32)(PMIC_RG_AUDULL_CHS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audull_vcali(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULL_VCALI_MASK), (kal_uint32)(PMIC_RG_AUDULL_VCALI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vcfg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VCFG_MASK), (kal_uint32)(PMIC_RG_AUDULR_VCFG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vupg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VUPG_MASK), (kal_uint32)(PMIC_RG_AUDULR_VUPG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vpwdb_pga(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VPWDB_PGA_MASK), (kal_uint32)(PMIC_RG_AUDULR_VPWDB_PGA_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vpwdb_adc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VPWDB_ADC_MASK), (kal_uint32)(PMIC_RG_AUDULR_VPWDB_ADC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vadc_denb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VADC_DENB_MASK), (kal_uint32)(PMIC_RG_AUDULR_VADC_DENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vadc_dvref_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VADC_DVREF_CAL_MASK), (kal_uint32)(PMIC_RG_AUDULR_VADC_DVREF_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vref24_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VREF24_EN_MASK), (kal_uint32)(PMIC_RG_AUDULR_VREF24_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vcm14_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VCM14_EN_MASK), (kal_uint32)(PMIC_RG_AUDULR_VCM14_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vcmsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VCMSEL_MASK), (kal_uint32)(PMIC_RG_AUDULR_VCMSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_chs_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_CHS_EN_MASK), (kal_uint32)(PMIC_RG_AUDULR_CHS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audulr_vcali(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDULR_VCALI_MASK), (kal_uint32)(PMIC_RG_AUDULR_VCALI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud_igbias_cali(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD_IGBIAS_CALI_MASK), (kal_uint32)(PMIC_RG_AUD_IGBIAS_CALI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aud_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUD_RSV_MASK), (kal_uint32)(PMIC_RG_AUD_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_amuter(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AMUTER_MASK), (kal_uint32)(PMIC_RG_AMUTER_SHIFT) ); pmic_unlock(); } void upmu_set_rg_amutel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AMUTEL_MASK), (kal_uint32)(PMIC_RG_AMUTEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adacl_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADACL_PWDB_MASK), (kal_uint32)(PMIC_RG_ADACL_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adacr_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADACR_PWDB_MASK), (kal_uint32)(PMIC_RG_ADACR_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_abias_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ABIAS_PWDB_MASK), (kal_uint32)(PMIC_RG_ABIAS_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aoutl_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AOUTL_PWDB_MASK), (kal_uint32)(PMIC_RG_AOUTL_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_aoutr_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AOUTR_PWDB_MASK), (kal_uint32)(PMIC_RG_AOUTR_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_acali(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ACALI_MASK), (kal_uint32)(PMIC_RG_ACALI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_apgr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_APGR_MASK), (kal_uint32)(PMIC_RG_APGR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_apgl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_APGL_MASK), (kal_uint32)(PMIC_RG_APGL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_abuf_bias(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ABUF_BIAS_MASK), (kal_uint32)(PMIC_RG_ABUF_BIAS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_abuf_inshort(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ABUF_INSHORT_MASK), (kal_uint32)(PMIC_RG_ABUF_INSHORT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ahfmode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AHFMODE_MASK), (kal_uint32)(PMIC_RG_AHFMODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adacck_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADACCK_EN_MASK), (kal_uint32)(PMIC_RG_ADACCK_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_dacref(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DACREF_MASK), (kal_uint32)(PMIC_RG_DACREF_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adepopx_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADEPOPX_EN_MASK), (kal_uint32)(PMIC_RG_ADEPOPX_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adepopx(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADEPOPX_MASK), (kal_uint32)(PMIC_RG_ADEPOPX_SHIFT) ); pmic_unlock(); } void upmu_set_rg_depop_vcm_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DEPOP_VCM_EN_MASK), (kal_uint32)(PMIC_RG_DEPOP_VCM_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_depop_vcmsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DEPOP_VCMSEL_MASK), (kal_uint32)(PMIC_RG_DEPOP_VCMSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_depop_cursel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DEPOP_CURSEL_MASK), (kal_uint32)(PMIC_RG_DEPOP_CURSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_chargeoption_depop(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CHARGEOPTION_DEPOP_MASK), (kal_uint32)(PMIC_RG_CHARGEOPTION_DEPOP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_avcmgen_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AVCMGEN_EN_MASK), (kal_uint32)(PMIC_RG_AVCMGEN_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auddl_vref24_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDDL_VREF24_EN_MASK), (kal_uint32)(PMIC_RG_AUDDL_VREF24_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_abirsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ABIRSV_MASK), (kal_uint32)(PMIC_RG_ABIRSV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_float(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_FLOAT_MASK), (kal_uint32)(PMIC_RG_VBUF_FLOAT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vdpg(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VDPG_MASK), (kal_uint32)(PMIC_RG_VDPG_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_PWDB_MASK), (kal_uint32)(PMIC_RG_VBUF_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_bias(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_BIAS_MASK), (kal_uint32)(PMIC_RG_VBUF_BIAS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vdepop(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VDEPOP_MASK), (kal_uint32)(PMIC_RG_VDEPOP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_v2spk(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_V2SPK_MASK), (kal_uint32)(PMIC_RG_V2SPK_SHIFT) ); pmic_unlock(); } void upmu_set_rg_hsoutstbenh(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_RG_HSOUTSTBENH_MASK), (kal_uint32)(PMIC_RG_HSOUTSTBENH_SHIFT) ); pmic_unlock(); } void upmu_set_audtop_con8_rsv_0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_AUDTOP_CON8_RSV_0_MASK), (kal_uint32)(PMIC_AUDTOP_CON8_RSV_0_SHIFT) ); pmic_unlock(); } void upmu_set_rg_clksq_monen(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CLKSQ_MONEN_MASK), (kal_uint32)(PMIC_RG_CLKSQ_MONEN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auddigmicen(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDDIGMICEN_MASK), (kal_uint32)(PMIC_RG_AUDDIGMICEN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audpwdbmicbias(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDPWDBMICBIAS_MASK), (kal_uint32)(PMIC_RG_AUDPWDBMICBIAS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auddigmicpduty(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDDIGMICPDUTY_MASK), (kal_uint32)(PMIC_RG_AUDDIGMICPDUTY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auddigmicnduty(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDDIGMICNDUTY_MASK), (kal_uint32)(PMIC_RG_AUDDIGMICNDUTY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auddigmicbias(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDDIGMICBIAS_MASK), (kal_uint32)(PMIC_RG_AUDDIGMICBIAS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audmicbiasvref(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDMICBIASVREF_MASK), (kal_uint32)(PMIC_RG_AUDMICBIASVREF_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audsparevmic(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDSPAREVMIC_MASK), (kal_uint32)(PMIC_RG_AUDSPAREVMIC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbirx_zcd_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBIRX_ZCD_EN_MASK), (kal_uint32)(PMIC_RG_VBIRX_ZCD_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbirx_zcd_cali(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBIRX_ZCD_CALI_MASK), (kal_uint32)(PMIC_RG_VBIRX_ZCD_CALI_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbirx_zcd_hys_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUDTOP_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBIRX_ZCD_HYS_ENB_MASK), (kal_uint32)(PMIC_RG_VBIRX_ZCD_HYS_ENB_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rg_vbirx_zcd_status(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUDTOP_CON9), (&val), (kal_uint32)(PMIC_RG_VBIRX_ZCD_STATUS_MASK), (kal_uint32)(PMIC_RG_VBIRX_ZCD_STATUS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_batsns(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC0), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_BATSNS_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_BATSNS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_batsns(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC0), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_BATSNS_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_BATSNS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_isense(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC1), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_ISENSE_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_ISENSE_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_isense(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC1), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_ISENSE_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_ISENSE_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_vcdt(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC2), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_VCDT_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_VCDT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_vcdt(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC2), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_VCDT_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_VCDT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_baton1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC3), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_BATON1_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_BATON1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_baton1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC3), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_BATON1_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_BATON1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_thr_sense1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC4), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_THR_SENSE1_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_THR_SENSE1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_thr_sense1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC4), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_THR_SENSE1_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_THR_SENSE1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_thr_sense2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC5), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_THR_SENSE2_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_THR_SENSE2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_thr_sense2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC5), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_THR_SENSE2_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_THR_SENSE2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_baton2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC6), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_BATON2_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_BATON2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_baton2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC6), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_BATON2_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_BATON2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_ch5(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC7), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_CH5_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_CH5_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_ch5(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC7), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_CH5_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_CH5_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_wakeup_pchr(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC8), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_WAKEUP_PCHR_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_WAKEUP_PCHR_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_wakeup_pchr(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC8), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_WAKEUP_PCHR_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_WAKEUP_PCHR_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_wakeup_swchr(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC9), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_WAKEUP_SWCHR_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_WAKEUP_SWCHR_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_wakeup_swchr(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC9), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_WAKEUP_SWCHR_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_WAKEUP_SWCHR_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_lbat(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC10), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_LBAT_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_LBAT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_lbat(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC10), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_LBAT_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_LBAT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_ch6(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC11), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_CH6_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_CH6_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_ch6(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC11), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_CH6_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_CH6_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_gps(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC12), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_GPS_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_GPS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_gps(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC13), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_GPS_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_GPS_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_gps_lsb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC14), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_GPS_LSB_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_GPS_LSB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_md(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC15), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_MD_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_MD_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_md_lsb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC16), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_MD_LSB_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_MD_LSB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_md(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC16), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_MD_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_MD_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_int(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC17), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_INT_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_INT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_rdy_int(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC17), (&val), (kal_uint32)(PMIC_RG_ADC_RDY_INT_MASK), (kal_uint32)(PMIC_RG_ADC_RDY_INT_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_rsv1(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC18), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_RSV1_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_RSV1_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_rsv2(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC19), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_RSV2_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_RSV2_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_out_rsv3(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_ADC20), (&val), (kal_uint32)(PMIC_RG_ADC_OUT_RSV3_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_RSV3_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_sw_gain_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_RSV1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SW_GAIN_TRIM_MASK), (kal_uint32)(PMIC_RG_SW_GAIN_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_sw_offset_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_RSV2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SW_OFFSET_TRIM_MASK), (kal_uint32)(PMIC_RG_SW_OFFSET_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_pwdb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_PWDB_MASK), (kal_uint32)(PMIC_RG_ADC_PWDB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_pwdb_swctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_PWDB_SWCTRL_MASK), (kal_uint32)(PMIC_RG_ADC_PWDB_SWCTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_cali_rate(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_CALI_RATE_MASK), (kal_uint32)(PMIC_RG_ADC_CALI_RATE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_cali_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_CALI_EN_MASK), (kal_uint32)(PMIC_RG_ADC_CALI_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_cali_force(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_CALI_FORCE_MASK), (kal_uint32)(PMIC_RG_ADC_CALI_FORCE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_autorst_range(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_AUTORST_RANGE_MASK), (kal_uint32)(PMIC_RG_ADC_AUTORST_RANGE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_autorst_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_AUTORST_EN_MASK), (kal_uint32)(PMIC_RG_ADC_AUTORST_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_latch_edge(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_LATCH_EDGE_MASK), (kal_uint32)(PMIC_RG_ADC_LATCH_EDGE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_filter_order(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_FILTER_ORDER_MASK), (kal_uint32)(PMIC_RG_ADC_FILTER_ORDER_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_swctrl_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_SWCTRL_EN_MASK), (kal_uint32)(PMIC_RG_ADC_SWCTRL_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adcin_vsen_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADCIN_VSEN_EN_MASK), (kal_uint32)(PMIC_RG_ADCIN_VSEN_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adcin_vsen_mux_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADCIN_VSEN_MUX_EN_MASK), (kal_uint32)(PMIC_RG_ADCIN_VSEN_MUX_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adcin_vbat_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADCIN_VBAT_EN_MASK), (kal_uint32)(PMIC_RG_ADCIN_VBAT_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adcin_chr_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADCIN_CHR_EN_MASK), (kal_uint32)(PMIC_RG_ADCIN_CHR_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_chsel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_CHSEL_MASK), (kal_uint32)(PMIC_RG_AUXADC_CHSEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_debt_max(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_DEBT_MAX_MASK), (kal_uint32)(PMIC_RG_LBAT_DEBT_MAX_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_debt_min(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_DEBT_MIN_MASK), (kal_uint32)(PMIC_RG_LBAT_DEBT_MIN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_det_prd_15_0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_DET_PRD_15_0_MASK), (kal_uint32)(PMIC_RG_LBAT_DET_PRD_15_0_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_det_prd_19_16(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_DET_PRD_19_16_MASK), (kal_uint32)(PMIC_RG_LBAT_DET_PRD_19_16_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_volt_max(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_VOLT_MAX_MASK), (kal_uint32)(PMIC_RG_LBAT_VOLT_MAX_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_irq_en_max(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_IRQ_EN_MAX_MASK), (kal_uint32)(PMIC_RG_LBAT_IRQ_EN_MAX_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_en_max(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_EN_MAX_MASK), (kal_uint32)(PMIC_RG_LBAT_EN_MAX_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rg_lbat_max_irq_b(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_CON5), (&val), (kal_uint32)(PMIC_RG_LBAT_MAX_IRQ_B_MASK), (kal_uint32)(PMIC_RG_LBAT_MAX_IRQ_B_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_lbat_volt_min(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_VOLT_MIN_MASK), (kal_uint32)(PMIC_RG_LBAT_VOLT_MIN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_irq_en_min(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_IRQ_EN_MIN_MASK), (kal_uint32)(PMIC_RG_LBAT_IRQ_EN_MIN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_lbat_en_min(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_RG_LBAT_EN_MIN_MASK), (kal_uint32)(PMIC_RG_LBAT_EN_MIN_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_rg_lbat_min_irq_b(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_CON6), (&val), (kal_uint32)(PMIC_RG_LBAT_MIN_IRQ_B_MASK), (kal_uint32)(PMIC_RG_LBAT_MIN_IRQ_B_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_lbat_debounce_count_max(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_CON7), (&val), (kal_uint32)(PMIC_RG_LBAT_DEBOUNCE_COUNT_MAX_MASK), (kal_uint32)(PMIC_RG_LBAT_DEBOUNCE_COUNT_MAX_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_lbat_debounce_count_min(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_CON8), (&val), (kal_uint32)(PMIC_RG_LBAT_DEBOUNCE_COUNT_MIN_MASK), (kal_uint32)(PMIC_RG_LBAT_DEBOUNCE_COUNT_MIN_SHIFT) ); pmic_unlock(); return val; } void upmu_set_rg_data_reuse_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DATA_REUSE_SEL_MASK), (kal_uint32)(PMIC_RG_DATA_REUSE_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_auxadc_bist_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUXADC_BIST_ENB_MASK), (kal_uint32)(PMIC_RG_AUXADC_BIST_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_osr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OSR_MASK), (kal_uint32)(PMIC_RG_OSR_SHIFT) ); pmic_unlock(); } void upmu_set_rg_osr_gps(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_RG_OSR_GPS_MASK), (kal_uint32)(PMIC_RG_OSR_GPS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch7_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH7_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH7_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch6_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH6_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH6_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch5_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH5_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH5_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch4_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH4_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH4_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch3_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH3_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH3_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch2_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH2_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH2_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_ch0_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_CH0_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_CH0_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_calen(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_CALEN_MASK), (kal_uint32)(PMIC_RG_VBUF_CALEN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_exten(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_EXTEN_MASK), (kal_uint32)(PMIC_RG_VBUF_EXTEN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_byp(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_BYP_MASK), (kal_uint32)(PMIC_RG_VBUF_BYP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vbuf_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VBUF_EN_MASK), (kal_uint32)(PMIC_RG_VBUF_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_source_lbat_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_RG_SOURCE_LBAT_SEL_MASK), (kal_uint32)(PMIC_RG_SOURCE_LBAT_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_efuse_gain_ch0_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_EFUSE_GAIN_CH0_TRIM_MASK), (kal_uint32)(PMIC_EFUSE_GAIN_CH0_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_efuse_offset_ch0_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON13), (kal_uint32)(val), (kal_uint32)(PMIC_EFUSE_OFFSET_CH0_TRIM_MASK), (kal_uint32)(PMIC_EFUSE_OFFSET_CH0_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_efuse_gain_ch4_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON14), (kal_uint32)(val), (kal_uint32)(PMIC_EFUSE_GAIN_CH4_TRIM_MASK), (kal_uint32)(PMIC_EFUSE_GAIN_CH4_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_efuse_offset_ch4_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_EFUSE_OFFSET_CH4_TRIM_MASK), (kal_uint32)(PMIC_EFUSE_OFFSET_CH4_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_efuse_gain_ch7_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_EFUSE_GAIN_CH7_TRIM_MASK), (kal_uint32)(PMIC_EFUSE_GAIN_CH7_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_efuse_offset_ch7_trim(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON17), (kal_uint32)(val), (kal_uint32)(PMIC_EFUSE_OFFSET_CH7_TRIM_MASK), (kal_uint32)(PMIC_EFUSE_OFFSET_CH7_TRIM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_ibias(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_IBIAS_MASK), (kal_uint32)(PMIC_RG_ADC_IBIAS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_rst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_RST_MASK), (kal_uint32)(PMIC_RG_ADC_RST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_lp_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_LP_EN_MASK), (kal_uint32)(PMIC_RG_ADC_LP_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_input_short(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_INPUT_SHORT_MASK), (kal_uint32)(PMIC_RG_ADC_INPUT_SHORT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_chopper_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_CHOPPER_EN_MASK), (kal_uint32)(PMIC_RG_ADC_CHOPPER_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vpwdb_adc(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VPWDB_ADC_MASK), (kal_uint32)(PMIC_RG_VPWDB_ADC_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vref18_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VREF18_EN_MASK), (kal_uint32)(PMIC_RG_VREF18_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_chs_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_CHS_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_CHS_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_dvref_cal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_DVREF_CAL_MASK), (kal_uint32)(PMIC_RG_ADC_DVREF_CAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_denb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON18), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_DENB_MASK), (kal_uint32)(PMIC_RG_ADC_DENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_sleep_mode_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_SLEEP_MODE_EN_MASK), (kal_uint32)(PMIC_RG_ADC_SLEEP_MODE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_gps_status(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_GPS_STATUS_MASK), (kal_uint32)(PMIC_RG_ADC_GPS_STATUS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_rsv_bit(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_RSV_BIT_MASK), (kal_uint32)(PMIC_RG_ADC_RSV_BIT_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_test_mode_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TEST_MODE_EN_MASK), (kal_uint32)(PMIC_RG_ADC_TEST_MODE_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_test_out_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TEST_OUT_SEL_MASK), (kal_uint32)(PMIC_RG_ADC_TEST_OUT_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_deci_bypass_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DECI_BYPASS_EN_MASK), (kal_uint32)(PMIC_RG_DECI_BYPASS_EN_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_clk_aon(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_CLK_AON_MASK), (kal_uint32)(PMIC_RG_ADC_CLK_AON_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_deci_force(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_DECI_FORCE_MASK), (kal_uint32)(PMIC_RG_ADC_DECI_FORCE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_deci_gdly(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON19), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_DECI_GDLY_MASK), (kal_uint32)(PMIC_RG_ADC_DECI_GDLY_SHIFT) ); pmic_unlock(); } void upmu_set_rg_md_rqst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON20), (kal_uint32)(val), (kal_uint32)(PMIC_RG_MD_RQST_MASK), (kal_uint32)(PMIC_RG_MD_RQST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_gps_rqst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON21), (kal_uint32)(val), (kal_uint32)(PMIC_RG_GPS_RQST_MASK), (kal_uint32)(PMIC_RG_GPS_RQST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ap_rqst_list(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON22), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AP_RQST_LIST_MASK), (kal_uint32)(PMIC_RG_AP_RQST_LIST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ap_rqst(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON22), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AP_RQST_MASK), (kal_uint32)(PMIC_RG_AP_RQST_SHIFT) ); pmic_unlock(); } void upmu_set_rg_ap_rqst_list_rsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON23), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AP_RQST_LIST_RSV_MASK), (kal_uint32)(PMIC_RG_AP_RQST_LIST_RSV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_out_trim_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_OUT_TRIM_ENB_MASK), (kal_uint32)(PMIC_RG_ADC_OUT_TRIM_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_trim_comp(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_TRIM_COMP_MASK), (kal_uint32)(PMIC_RG_ADC_TRIM_COMP_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_2s_comp_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_2S_COMP_ENB_MASK), (kal_uint32)(PMIC_RG_ADC_2S_COMP_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_cic_out_raw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_CIC_OUT_RAW_MASK), (kal_uint32)(PMIC_RG_CIC_OUT_RAW_SHIFT) ); pmic_unlock(); } void upmu_set_rg_data_skip_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DATA_SKIP_ENB_MASK), (kal_uint32)(PMIC_RG_DATA_SKIP_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_data_skip_num(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON24), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DATA_SKIP_NUM_MASK), (kal_uint32)(PMIC_RG_DATA_SKIP_NUM_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_rev(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON25), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_REV_MASK), (kal_uint32)(PMIC_RG_ADC_REV_SHIFT) ); pmic_unlock(); } void upmu_set_rg_deci_gdly_sel_mode(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DECI_GDLY_SEL_MODE_MASK), (kal_uint32)(PMIC_RG_DECI_GDLY_SEL_MODE_SHIFT) ); pmic_unlock(); } void upmu_set_rg_deci_gdly_vref18_selb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_DECI_GDLY_VREF18_SELB_MASK), (kal_uint32)(PMIC_RG_DECI_GDLY_VREF18_SELB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_rsv1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_RSV1_MASK), (kal_uint32)(PMIC_RG_ADC_RSV1_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vref18_enb(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON26), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VREF18_ENB_MASK), (kal_uint32)(PMIC_RG_VREF18_ENB_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_md_status(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_MD_STATUS_MASK), (kal_uint32)(PMIC_RG_ADC_MD_STATUS_SHIFT) ); pmic_unlock(); } void upmu_set_rg_adc_rsv2(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_ADC_RSV2_MASK), (kal_uint32)(PMIC_RG_ADC_RSV2_SHIFT) ); pmic_unlock(); } void upmu_set_rg_vref18_enb_md(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(AUXADC_CON27), (kal_uint32)(val), (kal_uint32)(PMIC_RG_VREF18_ENB_MD_MASK), (kal_uint32)(PMIC_RG_VREF18_ENB_MD_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audaccdetvthcal(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDACCDETVTHCAL_MASK), (kal_uint32)(PMIC_RG_AUDACCDETVTHCAL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audaccdetswctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDACCDETSWCTRL_MASK), (kal_uint32)(PMIC_RG_AUDACCDETSWCTRL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audaccdettvdet(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDACCDETTVDET_MASK), (kal_uint32)(PMIC_RG_AUDACCDETTVDET_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audaccdetvin1pulllow(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDACCDETVIN1PULLLOW_MASK), (kal_uint32)(PMIC_RG_AUDACCDETVIN1PULLLOW_SHIFT) ); pmic_unlock(); } void upmu_set_audaccdetauxadcswctrl(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_AUDACCDETAUXADCSWCTRL_MASK), (kal_uint32)(PMIC_AUDACCDETAUXADCSWCTRL_SHIFT) ); pmic_unlock(); } void upmu_set_audaccdetauxadcswctrl_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_AUDACCDETAUXADCSWCTRL_SEL_MASK), (kal_uint32)(PMIC_AUDACCDETAUXADCSWCTRL_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_rg_audaccdetrsv(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON0), (kal_uint32)(val), (kal_uint32)(PMIC_RG_AUDACCDETRSV_MASK), (kal_uint32)(PMIC_RG_AUDACCDETRSV_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_EN_MASK), (kal_uint32)(PMIC_ACCDET_EN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_seq_init(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON1), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_SEQ_INIT_MASK), (kal_uint32)(PMIC_ACCDET_SEQ_INIT_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_cmp_pwm_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_CMP_PWM_EN_MASK), (kal_uint32)(PMIC_ACCDET_CMP_PWM_EN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_vth_pwm_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_VTH_PWM_EN_MASK), (kal_uint32)(PMIC_ACCDET_VTH_PWM_EN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_mbias_pwm_en(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_MBIAS_PWM_EN_MASK), (kal_uint32)(PMIC_ACCDET_MBIAS_PWM_EN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_cmp_pwm_idle(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_CMP_PWM_IDLE_MASK), (kal_uint32)(PMIC_ACCDET_CMP_PWM_IDLE_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_vth_pwm_idle(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_VTH_PWM_IDLE_MASK), (kal_uint32)(PMIC_ACCDET_VTH_PWM_IDLE_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_mbias_pwm_idle(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON2), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_MBIAS_PWM_IDLE_MASK), (kal_uint32)(PMIC_ACCDET_MBIAS_PWM_IDLE_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_pwm_width(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON3), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_PWM_WIDTH_MASK), (kal_uint32)(PMIC_ACCDET_PWM_WIDTH_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_pwm_thresh(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON4), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_PWM_THRESH_MASK), (kal_uint32)(PMIC_ACCDET_PWM_THRESH_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_rise_delay(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_RISE_DELAY_MASK), (kal_uint32)(PMIC_ACCDET_RISE_DELAY_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_fall_delay(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON5), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_FALL_DELAY_MASK), (kal_uint32)(PMIC_ACCDET_FALL_DELAY_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_debounce0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON6), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_DEBOUNCE0_MASK), (kal_uint32)(PMIC_ACCDET_DEBOUNCE0_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_debounce1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON7), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_DEBOUNCE1_MASK), (kal_uint32)(PMIC_ACCDET_DEBOUNCE1_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_debounce2(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON8), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_DEBOUNCE2_MASK), (kal_uint32)(PMIC_ACCDET_DEBOUNCE2_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_debounce3(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON9), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_DEBOUNCE3_MASK), (kal_uint32)(PMIC_ACCDET_DEBOUNCE3_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_ival_cur_in(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_IVAL_CUR_IN_MASK), (kal_uint32)(PMIC_ACCDET_IVAL_CUR_IN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_ival_sam_in(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_IVAL_SAM_IN_MASK), (kal_uint32)(PMIC_ACCDET_IVAL_SAM_IN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_ival_mem_in(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_IVAL_MEM_IN_MASK), (kal_uint32)(PMIC_ACCDET_IVAL_MEM_IN_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_ival_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON10), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_IVAL_SEL_MASK), (kal_uint32)(PMIC_ACCDET_IVAL_SEL_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_accdet_irq(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON11), (&val), (kal_uint32)(PMIC_ACCDET_IRQ_MASK), (kal_uint32)(PMIC_ACCDET_IRQ_SHIFT) ); pmic_unlock(); return val; } void upmu_set_accdet_irq_clr(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON11), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_IRQ_CLR_MASK), (kal_uint32)(PMIC_ACCDET_IRQ_CLR_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_test_mode0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_TEST_MODE0_MASK), (kal_uint32)(PMIC_ACCDET_TEST_MODE0_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_test_mode1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_TEST_MODE1_MASK), (kal_uint32)(PMIC_ACCDET_TEST_MODE1_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_test_mode2(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_TEST_MODE2_MASK), (kal_uint32)(PMIC_ACCDET_TEST_MODE2_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_test_mode3(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_TEST_MODE3_MASK), (kal_uint32)(PMIC_ACCDET_TEST_MODE3_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_test_mode4(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_TEST_MODE4_MASK), (kal_uint32)(PMIC_ACCDET_TEST_MODE4_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_test_mode5(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_TEST_MODE5_MASK), (kal_uint32)(PMIC_ACCDET_TEST_MODE5_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_pwm_sel(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_PWM_SEL_MASK), (kal_uint32)(PMIC_ACCDET_PWM_SEL_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_in_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_IN_SW_MASK), (kal_uint32)(PMIC_ACCDET_IN_SW_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_cmp_en_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_CMP_EN_SW_MASK), (kal_uint32)(PMIC_ACCDET_CMP_EN_SW_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_vth_en_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_VTH_EN_SW_MASK), (kal_uint32)(PMIC_ACCDET_VTH_EN_SW_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_mbias_en_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_MBIAS_EN_SW_MASK), (kal_uint32)(PMIC_ACCDET_MBIAS_EN_SW_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_pwm_en_sw(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON12), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_PWM_EN_SW_MASK), (kal_uint32)(PMIC_ACCDET_PWM_EN_SW_SHIFT) ); pmic_unlock(); } kal_uint32 upmu_get_accdet_in(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_IN_MASK), (kal_uint32)(PMIC_ACCDET_IN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_cur_in(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_CUR_IN_MASK), (kal_uint32)(PMIC_ACCDET_CUR_IN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_sam_in(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_SAM_IN_MASK), (kal_uint32)(PMIC_ACCDET_SAM_IN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_mem_in(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_MEM_IN_MASK), (kal_uint32)(PMIC_ACCDET_MEM_IN_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_state(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_STATE_MASK), (kal_uint32)(PMIC_ACCDET_STATE_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_mbias_clk(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_MBIAS_CLK_MASK), (kal_uint32)(PMIC_ACCDET_MBIAS_CLK_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_vth_clk(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_VTH_CLK_MASK), (kal_uint32)(PMIC_ACCDET_VTH_CLK_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_cmp_clk(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_ACCDET_CMP_CLK_MASK), (kal_uint32)(PMIC_ACCDET_CMP_CLK_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_da_audaccdetauxadcswctrl(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON13), (&val), (kal_uint32)(PMIC_DA_AUDACCDETAUXADCSWCTRL_MASK), (kal_uint32)(PMIC_DA_AUDACCDETAUXADCSWCTRL_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_accdet_cur_deb(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(ACCDET_CON14), (&val), (kal_uint32)(PMIC_ACCDET_CUR_DEB_MASK), (kal_uint32)(PMIC_ACCDET_CUR_DEB_SHIFT) ); pmic_unlock(); return val; } kal_uint32 upmu_get_rg_adc_deci_gdly(void) { kal_uint32 ret=0; kal_uint32 val=0; pmic_lock(); ret=pmic_read_interface( (kal_uint32)(AUXADC_CON19), (&val), (kal_uint32)(PMIC_RG_ADC_DECI_GDLY_MASK), (kal_uint32)(PMIC_RG_ADC_DECI_GDLY_SHIFT) ); pmic_unlock(); return val; } void upmu_set_accdet_rsv_con0(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON15), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_RSV_CON0_MASK), (kal_uint32)(PMIC_ACCDET_RSV_CON0_SHIFT) ); pmic_unlock(); } void upmu_set_accdet_rsv_con1(kal_uint32 val) { kal_uint32 ret=0; pmic_lock(); ret=pmic_config_interface( (kal_uint32)(ACCDET_CON16), (kal_uint32)(val), (kal_uint32)(PMIC_ACCDET_RSV_CON1_MASK), (kal_uint32)(PMIC_ACCDET_RSV_CON1_SHIFT) ); pmic_unlock(); } //export some api for WMT use EXPORT_SYMBOL(upmu_set_vcn33_on_ctrl_bt); EXPORT_SYMBOL(upmu_set_vcn28_on_ctrl); EXPORT_SYMBOL(upmu_set_vcn_1v8_lp_mode_set); EXPORT_SYMBOL(upmu_set_vcn33_on_ctrl_wifi); EXPORT_SYMBOL(upmu_set_rg_vcn33_vosel);
gpl-2.0
SK4G/android_kernel_samsung_sidekick4g
drivers/video/matrox/matroxfb_crtc2.c
563
20373
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450. * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.65 2002/08/14 * */ #include "matroxfb_maven.h" #include "matroxfb_crtc2.h" #include "matroxfb_misc.h" #include "matroxfb_DAC1064.h" #include <linux/matroxfb.h> #include <linux/uaccess.h> /* **************************************************** */ static int mem = 8192; module_param(mem, int, 0); MODULE_PARM_DESC(mem, "Memory size reserved for dualhead (default=8MB)"); /* **************************************************** */ static int matroxfb_dh_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info* info) { u_int32_t col; #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) if (regno >= 16) return 1; if (m2info->fbcon.var.grayscale) { /* gray = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } red = CNVT_TOHW(red, m2info->fbcon.var.red.length); green = CNVT_TOHW(green, m2info->fbcon.var.green.length); blue = CNVT_TOHW(blue, m2info->fbcon.var.blue.length); transp = CNVT_TOHW(transp, m2info->fbcon.var.transp.length); col = (red << m2info->fbcon.var.red.offset) | (green << m2info->fbcon.var.green.offset) | (blue << m2info->fbcon.var.blue.offset) | (transp << m2info->fbcon.var.transp.offset); switch (m2info->fbcon.var.bits_per_pixel) { case 16: m2info->cmap[regno] = col | (col << 16); break; case 32: m2info->cmap[regno] = col; break; } return 0; #undef m2info } static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info, struct my_timming* mt, int mode, unsigned int pos) { u_int32_t tmp; u_int32_t datactl; struct matrox_fb_info *minfo = m2info->primary_dev; switch (mode) { case 15: tmp = 0x00200000; break; case 16: tmp = 0x00400000; break; /* case 32: */ default: tmp = 0x00800000; break; } tmp |= 0x00000001; /* enable CRTC2 */ datactl = 0; if (minfo->outputs[1].src == MATROXFB_SRC_CRTC2) { if (minfo->devflags.g450dac) { tmp |= 0x00000006; /* source from secondary pixel PLL */ /* no vidrst when in monitor mode */ if (minfo->outputs[1].mode != MATROXFB_OUTPUT_MODE_MONITOR) { tmp |= 0xC0001000; /* Enable H/V vidrst */ } } else { tmp |= 0x00000002; /* source from VDOCLK */ tmp |= 0xC0000000; /* enable vvidrst & hvidrst */ /* MGA TVO is our clock source */ } } else if (minfo->outputs[0].src == MATROXFB_SRC_CRTC2) { tmp |= 0x00000004; /* source from pixclock */ /* PIXPLL is our clock source */ } if (minfo->outputs[0].src == MATROXFB_SRC_CRTC2) { tmp |= 0x00100000; /* connect CRTC2 to DAC */ } if (mt->interlaced) { tmp |= 0x02000000; /* interlaced, second field is bigger, as G450 apparently ignores it */ mt->VDisplay >>= 1; mt->VSyncStart >>= 1; mt->VSyncEnd >>= 1; mt->VTotal >>= 1; } if ((mt->HTotal & 7) == 2) { datactl |= 0x00000010; mt->HTotal &= ~7; } tmp |= 0x10000000; /* 0x10000000 is VIDRST polarity */ mga_outl(0x3C14, ((mt->HDisplay - 8) << 16) | (mt->HTotal - 8)); mga_outl(0x3C18, ((mt->HSyncEnd - 8) << 16) | (mt->HSyncStart - 8)); mga_outl(0x3C1C, ((mt->VDisplay - 1) << 16) | (mt->VTotal - 1)); mga_outl(0x3C20, ((mt->VSyncEnd - 1) << 16) | (mt->VSyncStart - 1)); mga_outl(0x3C24, ((mt->VSyncStart) << 16) | (mt->HSyncStart)); /* preload */ { u_int32_t linelen = m2info->fbcon.var.xres_virtual * (m2info->fbcon.var.bits_per_pixel >> 3); if (tmp & 0x02000000) { /* field #0 is smaller, so... */ mga_outl(0x3C2C, pos); /* field #1 vmemory start */ mga_outl(0x3C28, pos + linelen); /* field #0 vmemory start */ linelen <<= 1; m2info->interlaced = 1; } else { mga_outl(0x3C28, pos); /* vmemory start */ m2info->interlaced = 0; } mga_outl(0x3C40, linelen); } mga_outl(0x3C4C, datactl); /* data control */ if (tmp & 0x02000000) { int i; mga_outl(0x3C10, tmp & ~0x02000000); for (i = 0; i < 2; i++) { unsigned int nl; unsigned int lastl = 0; while ((nl = mga_inl(0x3C48) & 0xFFF) >= lastl) { lastl = nl; } } } mga_outl(0x3C10, tmp); minfo->hw.crtc2.ctl = tmp; tmp = mt->VDisplay << 16; /* line compare */ if (mt->sync & FB_SYNC_HOR_HIGH_ACT) tmp |= 0x00000100; if (mt->sync & FB_SYNC_VERT_HIGH_ACT) tmp |= 0x00000200; mga_outl(0x3C44, tmp); } static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) { struct matrox_fb_info *minfo = m2info->primary_dev; mga_outl(0x3C10, 0x00000004); /* disable CRTC2, CRTC1->DAC1, PLL as clock source */ minfo->hw.crtc2.ctl = 0x00000004; } static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info, struct fb_var_screeninfo* var) { unsigned int pos; unsigned int linelen; unsigned int pixelsize; struct matrox_fb_info *minfo = m2info->primary_dev; m2info->fbcon.var.xoffset = var->xoffset; m2info->fbcon.var.yoffset = var->yoffset; pixelsize = m2info->fbcon.var.bits_per_pixel >> 3; linelen = m2info->fbcon.var.xres_virtual * pixelsize; pos = m2info->fbcon.var.yoffset * linelen + m2info->fbcon.var.xoffset * pixelsize; pos += m2info->video.offbase; if (m2info->interlaced) { mga_outl(0x3C2C, pos); mga_outl(0x3C28, pos + linelen); } else { mga_outl(0x3C28, pos); } } static int matroxfb_dh_decode_var(struct matroxfb_dh_fb_info* m2info, struct fb_var_screeninfo* var, int *visual, int *video_cmap_len, int *mode) { unsigned int mask; unsigned int memlen; unsigned int vramlen; switch (var->bits_per_pixel) { case 16: mask = 0x1F; break; case 32: mask = 0x0F; break; default: return -EINVAL; } vramlen = m2info->video.len_usable; if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; var->xres_virtual = (var->xres_virtual + mask) & ~mask; if (var->yres_virtual > 32767) return -EINVAL; memlen = var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3); if (memlen > vramlen) return -EINVAL; if (var->xoffset + var->xres > var->xres_virtual) var->xoffset = var->xres_virtual - var->xres; if (var->yoffset + var->yres > var->yres_virtual) var->yoffset = var->yres_virtual - var->yres; var->xres &= ~7; var->left_margin &= ~7; var->right_margin &= ~7; var->hsync_len &= ~7; *mode = var->bits_per_pixel; if (var->bits_per_pixel == 16) { if (var->green.length == 5) { var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; *mode = 15; } else { var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; } } else { var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; } *visual = FB_VISUAL_TRUECOLOR; *video_cmap_len = 16; return 0; } static int matroxfb_dh_open(struct fb_info* info, int user) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) struct matrox_fb_info *minfo = m2info->primary_dev; if (minfo) { int err; if (minfo->dead) { return -ENXIO; } err = minfo->fbops.fb_open(&minfo->fbcon, user); if (err) { return err; } } return 0; #undef m2info } static int matroxfb_dh_release(struct fb_info* info, int user) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) int err = 0; struct matrox_fb_info *minfo = m2info->primary_dev; if (minfo) { err = minfo->fbops.fb_release(&minfo->fbcon, user); } return err; #undef m2info } /* * This function is called before the register_framebuffer so * no locking is needed. */ static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) { struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; strcpy(fix->id, "MATROX DH"); fix->smem_start = m2info->video.base; fix->smem_len = m2info->video.len_usable; fix->ypanstep = 1; fix->ywrapstep = 0; fix->xpanstep = 8; /* TBD */ fix->mmio_start = m2info->mmio.base; fix->mmio_len = m2info->mmio.len; fix->accel = 0; /* no accel... */ } static int matroxfb_dh_check_var(struct fb_var_screeninfo* var, struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) int visual; int cmap_len; int mode; return matroxfb_dh_decode_var(m2info, var, &visual, &cmap_len, &mode); #undef m2info } static int matroxfb_dh_set_par(struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) int visual; int cmap_len; int mode; int err; struct fb_var_screeninfo* var = &info->var; struct matrox_fb_info *minfo = m2info->primary_dev; if ((err = matroxfb_dh_decode_var(m2info, var, &visual, &cmap_len, &mode)) != 0) return err; /* cmap */ { m2info->fbcon.screen_base = vaddr_va(m2info->video.vbase); m2info->fbcon.fix.visual = visual; m2info->fbcon.fix.type = FB_TYPE_PACKED_PIXELS; m2info->fbcon.fix.type_aux = 0; m2info->fbcon.fix.line_length = (var->xres_virtual * var->bits_per_pixel) >> 3; } { struct my_timming mt; unsigned int pos; int out; int cnt; matroxfb_var2my(&m2info->fbcon.var, &mt); mt.crtc = MATROXFB_SRC_CRTC2; /* CRTC2 delay */ mt.delay = 34; pos = (m2info->fbcon.var.yoffset * m2info->fbcon.var.xres_virtual + m2info->fbcon.var.xoffset) * m2info->fbcon.var.bits_per_pixel >> 3; pos += m2info->video.offbase; cnt = 0; down_read(&minfo->altout.lock); for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) { cnt++; if (minfo->outputs[out].output->compute) { minfo->outputs[out].output->compute(minfo->outputs[out].data, &mt); } } } minfo->crtc2.pixclock = mt.pixclock; minfo->crtc2.mnp = mt.mnp; up_read(&minfo->altout.lock); if (cnt) { matroxfb_dh_restore(m2info, &mt, mode, pos); } else { matroxfb_dh_disable(m2info); } DAC1064_global_init(minfo); DAC1064_global_restore(minfo); down_read(&minfo->altout.lock); for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2 && minfo->outputs[out].output->program) { minfo->outputs[out].output->program(minfo->outputs[out].data); } } for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2 && minfo->outputs[out].output->start) { minfo->outputs[out].output->start(minfo->outputs[out].data); } } up_read(&minfo->altout.lock); } m2info->initialized = 1; return 0; #undef m2info } static int matroxfb_dh_pan_display(struct fb_var_screeninfo* var, struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) matroxfb_dh_pan_var(m2info, var); return 0; #undef m2info } static int matroxfb_dh_get_vblank(const struct matroxfb_dh_fb_info* m2info, struct fb_vblank* vblank) { struct matrox_fb_info *minfo = m2info->primary_dev; matroxfb_enable_irq(minfo, 0); memset(vblank, 0, sizeof(*vblank)); vblank->flags = FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VBLANK; /* mask out reserved bits + field number (odd/even) */ vblank->vcount = mga_inl(0x3C48) & 0x000007FF; /* compatibility stuff */ if (vblank->vcount >= m2info->fbcon.var.yres) vblank->flags |= FB_VBLANK_VBLANKING; if (test_bit(0, &minfo->irq_flags)) { vblank->flags |= FB_VBLANK_HAVE_COUNT; /* Only one writer, aligned int value... it should work without lock and without atomic_t */ vblank->count = minfo->crtc2.vsync.cnt; } return 0; } static int matroxfb_dh_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) struct matrox_fb_info *minfo = m2info->primary_dev; DBG(__func__) switch (cmd) { case FBIOGET_VBLANK: { struct fb_vblank vblank; int err; err = matroxfb_dh_get_vblank(m2info, &vblank); if (err) return err; if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank))) return -EFAULT; return 0; } case FBIO_WAITFORVSYNC: { u_int32_t crt; if (get_user(crt, (u_int32_t __user *)arg)) return -EFAULT; if (crt != 0) return -ENODEV; return matroxfb_wait_for_sync(minfo, 1); } case MATROXFB_SET_OUTPUT_MODE: case MATROXFB_GET_OUTPUT_MODE: case MATROXFB_GET_ALL_OUTPUTS: { return minfo->fbcon.fbops->fb_ioctl(&minfo->fbcon, cmd, arg); } case MATROXFB_SET_OUTPUT_CONNECTION: { u_int32_t tmp; int out; int changes; if (get_user(tmp, (u_int32_t __user *)arg)) return -EFAULT; for (out = 0; out < 32; out++) { if (tmp & (1 << out)) { if (out >= MATROXFB_MAX_OUTPUTS) return -ENXIO; if (!minfo->outputs[out].output) return -ENXIO; switch (minfo->outputs[out].src) { case MATROXFB_SRC_NONE: case MATROXFB_SRC_CRTC2: break; default: return -EBUSY; } } } if (minfo->devflags.panellink) { if (tmp & MATROXFB_OUTPUT_CONN_DFP) return -EINVAL; if ((minfo->outputs[2].src == MATROXFB_SRC_CRTC1) && tmp) return -EBUSY; } changes = 0; for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (tmp & (1 << out)) { if (minfo->outputs[out].src != MATROXFB_SRC_CRTC2) { changes = 1; minfo->outputs[out].src = MATROXFB_SRC_CRTC2; } } else if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) { changes = 1; minfo->outputs[out].src = MATROXFB_SRC_NONE; } } if (!changes) return 0; matroxfb_dh_set_par(info); return 0; } case MATROXFB_GET_OUTPUT_CONNECTION: { u_int32_t conn = 0; int out; for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) { conn |= 1 << out; } } if (put_user(conn, (u_int32_t __user *)arg)) return -EFAULT; return 0; } case MATROXFB_GET_AVAILABLE_OUTPUTS: { u_int32_t tmp = 0; int out; for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].output) { switch (minfo->outputs[out].src) { case MATROXFB_SRC_NONE: case MATROXFB_SRC_CRTC2: tmp |= 1 << out; break; } } } if (minfo->devflags.panellink) { tmp &= ~MATROXFB_OUTPUT_CONN_DFP; if (minfo->outputs[2].src == MATROXFB_SRC_CRTC1) { tmp = 0; } } if (put_user(tmp, (u_int32_t __user *)arg)) return -EFAULT; return 0; } } return -ENOTTY; #undef m2info } static int matroxfb_dh_blank(int blank, struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) switch (blank) { case 1: case 2: case 3: case 4: default:; } /* do something... */ return 0; #undef m2info } static struct fb_ops matroxfb_dh_ops = { .owner = THIS_MODULE, .fb_open = matroxfb_dh_open, .fb_release = matroxfb_dh_release, .fb_check_var = matroxfb_dh_check_var, .fb_set_par = matroxfb_dh_set_par, .fb_setcolreg = matroxfb_dh_setcolreg, .fb_pan_display =matroxfb_dh_pan_display, .fb_blank = matroxfb_dh_blank, .fb_ioctl = matroxfb_dh_ioctl, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static struct fb_var_screeninfo matroxfb_dh_defined = { 640,480,640,480,/* W,H, virtual W,H */ 0,0, /* offset */ 32, /* depth */ 0, /* gray */ {0,0,0}, /* R */ {0,0,0}, /* G */ {0,0,0}, /* B */ {0,0,0}, /* alpha */ 0, /* nonstd */ FB_ACTIVATE_NOW, -1,-1, /* display size */ 0, /* accel flags */ 39721L,48L,16L,33L,10L, 96L,2,0, /* no sync info */ FB_VMODE_NONINTERLACED, 0, {0,0,0,0,0} }; static int matroxfb_dh_regit(const struct matrox_fb_info *minfo, struct matroxfb_dh_fb_info *m2info) { #define minfo (m2info->primary_dev) void* oldcrtc2; m2info->fbcon.fbops = &matroxfb_dh_ops; m2info->fbcon.flags = FBINFO_FLAG_DEFAULT; m2info->fbcon.flags |= FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; m2info->fbcon.pseudo_palette = m2info->cmap; fb_alloc_cmap(&m2info->fbcon.cmap, 256, 1); if (mem < 64) mem *= 1024; if (mem < 64*1024) mem *= 1024; mem &= ~0x00000FFF; /* PAGE_MASK? */ if (minfo->video.len_usable + mem <= minfo->video.len) m2info->video.offbase = minfo->video.len - mem; else if (minfo->video.len < mem) { return -ENOMEM; } else { /* check yres on first head... */ m2info->video.borrowed = mem; minfo->video.len_usable -= mem; m2info->video.offbase = minfo->video.len_usable; } m2info->video.base = minfo->video.base + m2info->video.offbase; m2info->video.len = m2info->video.len_usable = m2info->video.len_maximum = mem; m2info->video.vbase.vaddr = vaddr_va(minfo->video.vbase) + m2info->video.offbase; m2info->mmio.base = minfo->mmio.base; m2info->mmio.vbase = minfo->mmio.vbase; m2info->mmio.len = minfo->mmio.len; matroxfb_dh_init_fix(m2info); if (register_framebuffer(&m2info->fbcon)) { return -ENXIO; } if (!m2info->initialized) fb_set_var(&m2info->fbcon, &matroxfb_dh_defined); down_write(&minfo->crtc2.lock); oldcrtc2 = minfo->crtc2.info; minfo->crtc2.info = m2info; up_write(&minfo->crtc2.lock); if (oldcrtc2) { printk(KERN_ERR "matroxfb_crtc2: Internal consistency check failed: crtc2 already present: %p\n", oldcrtc2); } return 0; #undef minfo } /* ************************** */ static int matroxfb_dh_registerfb(struct matroxfb_dh_fb_info* m2info) { #define minfo (m2info->primary_dev) if (matroxfb_dh_regit(minfo, m2info)) { printk(KERN_ERR "matroxfb_crtc2: secondary head failed to register\n"); return -1; } printk(KERN_INFO "matroxfb_crtc2: secondary head of fb%u was registered as fb%u\n", minfo->fbcon.node, m2info->fbcon.node); m2info->fbcon_registered = 1; return 0; #undef minfo } static void matroxfb_dh_deregisterfb(struct matroxfb_dh_fb_info* m2info) { #define minfo (m2info->primary_dev) if (m2info->fbcon_registered) { int id; struct matroxfb_dh_fb_info* crtc2; down_write(&minfo->crtc2.lock); crtc2 = minfo->crtc2.info; if (crtc2 == m2info) minfo->crtc2.info = NULL; up_write(&minfo->crtc2.lock); if (crtc2 != m2info) { printk(KERN_ERR "matroxfb_crtc2: Internal consistency check failed: crtc2 mismatch at unload: %p != %p\n", crtc2, m2info); printk(KERN_ERR "matroxfb_crtc2: Expect kernel crash after module unload.\n"); return; } id = m2info->fbcon.node; unregister_framebuffer(&m2info->fbcon); /* return memory back to primary head */ minfo->video.len_usable += m2info->video.borrowed; printk(KERN_INFO "matroxfb_crtc2: fb%u unregistered\n", id); m2info->fbcon_registered = 0; } #undef minfo } static void* matroxfb_crtc2_probe(struct matrox_fb_info* minfo) { struct matroxfb_dh_fb_info* m2info; /* hardware is CRTC2 incapable... */ if (!minfo->devflags.crtc2) return NULL; m2info = kzalloc(sizeof(*m2info), GFP_KERNEL); if (!m2info) { printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n"); return NULL; } m2info->primary_dev = minfo; if (matroxfb_dh_registerfb(m2info)) { kfree(m2info); printk(KERN_ERR "matroxfb_crtc2: CRTC2 framebuffer failed to register\n"); return NULL; } return m2info; } static void matroxfb_crtc2_remove(struct matrox_fb_info* minfo, void* crtc2) { matroxfb_dh_deregisterfb(crtc2); kfree(crtc2); } static struct matroxfb_driver crtc2 = { .name = "Matrox G400 CRTC2", .probe = matroxfb_crtc2_probe, .remove = matroxfb_crtc2_remove }; static int matroxfb_crtc2_init(void) { if (fb_get_options("matrox_crtc2fb", NULL)) return -ENODEV; matroxfb_register_driver(&crtc2); return 0; } static void matroxfb_crtc2_exit(void) { matroxfb_unregister_driver(&crtc2); } MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Matrox G400 CRTC2 driver"); MODULE_LICENSE("GPL"); module_init(matroxfb_crtc2_init); module_exit(matroxfb_crtc2_exit); /* we do not have __setup() yet */
gpl-2.0
EviGL/Telegram
TMessagesProj/jni/opus/ogg/bitwise.c
563
23408
/******************************************************************** * * * THIS FILE IS PART OF THE Ogg CONTAINER SOURCE CODE. * * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS * * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. * * * * THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2010 * * by the Xiph.Org Foundation http://www.xiph.org/ * * * ******************************************************************** function: packing variable sized words into an octet stream last mod: $Id: bitwise.c 18051 2011-08-04 17:56:39Z giles $ ********************************************************************/ /* We're 'LSb' endian; if we write a word but read individual bits, then we'll read the lsb first */ #include <string.h> #include <stdlib.h> #include <limits.h> #include <ogg/ogg.h> #define BUFFER_INCREMENT 256 static const unsigned long mask[]= {0x00000000,0x00000001,0x00000003,0x00000007,0x0000000f, 0x0000001f,0x0000003f,0x0000007f,0x000000ff,0x000001ff, 0x000003ff,0x000007ff,0x00000fff,0x00001fff,0x00003fff, 0x00007fff,0x0000ffff,0x0001ffff,0x0003ffff,0x0007ffff, 0x000fffff,0x001fffff,0x003fffff,0x007fffff,0x00ffffff, 0x01ffffff,0x03ffffff,0x07ffffff,0x0fffffff,0x1fffffff, 0x3fffffff,0x7fffffff,0xffffffff }; static const unsigned int mask8B[]= {0x00,0x80,0xc0,0xe0,0xf0,0xf8,0xfc,0xfe,0xff}; void oggpack_writeinit(oggpack_buffer *b){ memset(b,0,sizeof(*b)); b->ptr=b->buffer=_ogg_malloc(BUFFER_INCREMENT); b->buffer[0]='\0'; b->storage=BUFFER_INCREMENT; } void oggpackB_writeinit(oggpack_buffer *b){ oggpack_writeinit(b); } int oggpack_writecheck(oggpack_buffer *b){ if(!b->ptr || !b->storage)return -1; return 0; } int oggpackB_writecheck(oggpack_buffer *b){ return oggpack_writecheck(b); } void oggpack_writetrunc(oggpack_buffer *b,long bits){ long bytes=bits>>3; if(b->ptr){ bits-=bytes*8; b->ptr=b->buffer+bytes; b->endbit=bits; b->endbyte=bytes; *b->ptr&=mask[bits]; } } void oggpackB_writetrunc(oggpack_buffer *b,long bits){ long bytes=bits>>3; if(b->ptr){ bits-=bytes*8; b->ptr=b->buffer+bytes; b->endbit=bits; b->endbyte=bytes; *b->ptr&=mask8B[bits]; } } /* Takes only up to 32 bits. */ void oggpack_write(oggpack_buffer *b,unsigned long value,int bits){ if(bits<0 || bits>32) goto err; if(b->endbyte>=b->storage-4){ void *ret; if(!b->ptr)return; if(b->storage>LONG_MAX-BUFFER_INCREMENT) goto err; ret=_ogg_realloc(b->buffer,b->storage+BUFFER_INCREMENT); if(!ret) goto err; b->buffer=ret; b->storage+=BUFFER_INCREMENT; b->ptr=b->buffer+b->endbyte; } value&=mask[bits]; bits+=b->endbit; b->ptr[0]|=value<<b->endbit; if(bits>=8){ b->ptr[1]=(unsigned char)(value>>(8-b->endbit)); if(bits>=16){ b->ptr[2]=(unsigned char)(value>>(16-b->endbit)); if(bits>=24){ b->ptr[3]=(unsigned char)(value>>(24-b->endbit)); if(bits>=32){ if(b->endbit) b->ptr[4]=(unsigned char)(value>>(32-b->endbit)); else b->ptr[4]=0; } } } } b->endbyte+=bits/8; b->ptr+=bits/8; b->endbit=bits&7; return; err: oggpack_writeclear(b); } /* Takes only up to 32 bits. */ void oggpackB_write(oggpack_buffer *b,unsigned long value,int bits){ if(bits<0 || bits>32) goto err; if(b->endbyte>=b->storage-4){ void *ret; if(!b->ptr)return; if(b->storage>LONG_MAX-BUFFER_INCREMENT) goto err; ret=_ogg_realloc(b->buffer,b->storage+BUFFER_INCREMENT); if(!ret) goto err; b->buffer=ret; b->storage+=BUFFER_INCREMENT; b->ptr=b->buffer+b->endbyte; } value=(value&mask[bits])<<(32-bits); bits+=b->endbit; b->ptr[0]|=value>>(24+b->endbit); if(bits>=8){ b->ptr[1]=(unsigned char)(value>>(16+b->endbit)); if(bits>=16){ b->ptr[2]=(unsigned char)(value>>(8+b->endbit)); if(bits>=24){ b->ptr[3]=(unsigned char)(value>>(b->endbit)); if(bits>=32){ if(b->endbit) b->ptr[4]=(unsigned char)(value<<(8-b->endbit)); else b->ptr[4]=0; } } } } b->endbyte+=bits/8; b->ptr+=bits/8; b->endbit=bits&7; return; err: oggpack_writeclear(b); } void oggpack_writealign(oggpack_buffer *b){ int bits=8-b->endbit; if(bits<8) oggpack_write(b,0,bits); } void oggpackB_writealign(oggpack_buffer *b){ int bits=8-b->endbit; if(bits<8) oggpackB_write(b,0,bits); } static void oggpack_writecopy_helper(oggpack_buffer *b, void *source, long bits, void (*w)(oggpack_buffer *, unsigned long, int), int msb){ unsigned char *ptr=(unsigned char *)source; long bytes=bits/8; bits-=bytes*8; if(b->endbit){ int i; /* unaligned copy. Do it the hard way. */ for(i=0;i<bytes;i++) w(b,(unsigned long)(ptr[i]),8); }else{ /* aligned block copy */ if(b->endbyte+bytes+1>=b->storage){ void *ret; if(!b->ptr) goto err; if(b->endbyte+bytes+BUFFER_INCREMENT>b->storage) goto err; b->storage=b->endbyte+bytes+BUFFER_INCREMENT; ret=_ogg_realloc(b->buffer,b->storage); if(!ret) goto err; b->buffer=ret; b->ptr=b->buffer+b->endbyte; } memmove(b->ptr,source,bytes); b->ptr+=bytes; b->endbyte+=bytes; *b->ptr=0; } if(bits){ if(msb) w(b,(unsigned long)(ptr[bytes]>>(8-bits)),bits); else w(b,(unsigned long)(ptr[bytes]),bits); } return; err: oggpack_writeclear(b); } void oggpack_writecopy(oggpack_buffer *b,void *source,long bits){ oggpack_writecopy_helper(b,source,bits,oggpack_write,0); } void oggpackB_writecopy(oggpack_buffer *b,void *source,long bits){ oggpack_writecopy_helper(b,source,bits,oggpackB_write,1); } void oggpack_reset(oggpack_buffer *b){ if(!b->ptr)return; b->ptr=b->buffer; b->buffer[0]=0; b->endbit=b->endbyte=0; } void oggpackB_reset(oggpack_buffer *b){ oggpack_reset(b); } void oggpack_writeclear(oggpack_buffer *b){ if(b->buffer)_ogg_free(b->buffer); memset(b,0,sizeof(*b)); } void oggpackB_writeclear(oggpack_buffer *b){ oggpack_writeclear(b); } void oggpack_readinit(oggpack_buffer *b,unsigned char *buf,int bytes){ memset(b,0,sizeof(*b)); b->buffer=b->ptr=buf; b->storage=bytes; } void oggpackB_readinit(oggpack_buffer *b,unsigned char *buf,int bytes){ oggpack_readinit(b,buf,bytes); } /* Read in bits without advancing the bitptr; bits <= 32 */ long oggpack_look(oggpack_buffer *b,int bits){ unsigned long ret; unsigned long m; if(bits<0 || bits>32) return -1; m=mask[bits]; bits+=b->endbit; if(b->endbyte >= b->storage-4){ /* not the main path */ if(b->endbyte > b->storage-((bits+7)>>3)) return -1; /* special case to avoid reading b->ptr[0], which might be past the end of the buffer; also skips some useless accounting */ else if(!bits)return(0L); } ret=b->ptr[0]>>b->endbit; if(bits>8){ ret|=b->ptr[1]<<(8-b->endbit); if(bits>16){ ret|=b->ptr[2]<<(16-b->endbit); if(bits>24){ ret|=b->ptr[3]<<(24-b->endbit); if(bits>32 && b->endbit) ret|=b->ptr[4]<<(32-b->endbit); } } } return(m&ret); } /* Read in bits without advancing the bitptr; bits <= 32 */ long oggpackB_look(oggpack_buffer *b,int bits){ unsigned long ret; int m=32-bits; if(m<0 || m>32) return -1; bits+=b->endbit; if(b->endbyte >= b->storage-4){ /* not the main path */ if(b->endbyte > b->storage-((bits+7)>>3)) return -1; /* special case to avoid reading b->ptr[0], which might be past the end of the buffer; also skips some useless accounting */ else if(!bits)return(0L); } ret=b->ptr[0]<<(24+b->endbit); if(bits>8){ ret|=b->ptr[1]<<(16+b->endbit); if(bits>16){ ret|=b->ptr[2]<<(8+b->endbit); if(bits>24){ ret|=b->ptr[3]<<(b->endbit); if(bits>32 && b->endbit) ret|=b->ptr[4]>>(8-b->endbit); } } } return ((ret&0xffffffff)>>(m>>1))>>((m+1)>>1); } long oggpack_look1(oggpack_buffer *b){ if(b->endbyte>=b->storage)return(-1); return((b->ptr[0]>>b->endbit)&1); } long oggpackB_look1(oggpack_buffer *b){ if(b->endbyte>=b->storage)return(-1); return((b->ptr[0]>>(7-b->endbit))&1); } void oggpack_adv(oggpack_buffer *b,int bits){ bits+=b->endbit; if(b->endbyte > b->storage-((bits+7)>>3)) goto overflow; b->ptr+=bits/8; b->endbyte+=bits/8; b->endbit=bits&7; return; overflow: b->ptr=NULL; b->endbyte=b->storage; b->endbit=1; } void oggpackB_adv(oggpack_buffer *b,int bits){ oggpack_adv(b,bits); } void oggpack_adv1(oggpack_buffer *b){ if(++(b->endbit)>7){ b->endbit=0; b->ptr++; b->endbyte++; } } void oggpackB_adv1(oggpack_buffer *b){ oggpack_adv1(b); } /* bits <= 32 */ long oggpack_read(oggpack_buffer *b,int bits){ long ret; unsigned long m; if(bits<0 || bits>32) goto err; m=mask[bits]; bits+=b->endbit; if(b->endbyte >= b->storage-4){ /* not the main path */ if(b->endbyte > b->storage-((bits+7)>>3)) goto overflow; /* special case to avoid reading b->ptr[0], which might be past the end of the buffer; also skips some useless accounting */ else if(!bits)return(0L); } ret=b->ptr[0]>>b->endbit; if(bits>8){ ret|=b->ptr[1]<<(8-b->endbit); if(bits>16){ ret|=b->ptr[2]<<(16-b->endbit); if(bits>24){ ret|=b->ptr[3]<<(24-b->endbit); if(bits>32 && b->endbit){ ret|=b->ptr[4]<<(32-b->endbit); } } } } ret&=m; b->ptr+=bits/8; b->endbyte+=bits/8; b->endbit=bits&7; return ret; overflow: err: b->ptr=NULL; b->endbyte=b->storage; b->endbit=1; return -1L; } /* bits <= 32 */ long oggpackB_read(oggpack_buffer *b,int bits){ long ret; long m=32-bits; if(m<0 || m>32) goto err; bits+=b->endbit; if(b->endbyte+4>=b->storage){ /* not the main path */ if(b->endbyte > b->storage-((bits+7)>>3)) goto overflow; /* special case to avoid reading b->ptr[0], which might be past the end of the buffer; also skips some useless accounting */ else if(!bits)return(0L); } ret=b->ptr[0]<<(24+b->endbit); if(bits>8){ ret|=b->ptr[1]<<(16+b->endbit); if(bits>16){ ret|=b->ptr[2]<<(8+b->endbit); if(bits>24){ ret|=b->ptr[3]<<(b->endbit); if(bits>32 && b->endbit) ret|=b->ptr[4]>>(8-b->endbit); } } } ret=((ret&0xffffffffUL)>>(m>>1))>>((m+1)>>1); b->ptr+=bits/8; b->endbyte+=bits/8; b->endbit=bits&7; return ret; overflow: err: b->ptr=NULL; b->endbyte=b->storage; b->endbit=1; return -1L; } long oggpack_read1(oggpack_buffer *b){ long ret; if(b->endbyte >= b->storage) goto overflow; ret=(b->ptr[0]>>b->endbit)&1; b->endbit++; if(b->endbit>7){ b->endbit=0; b->ptr++; b->endbyte++; } return ret; overflow: b->ptr=NULL; b->endbyte=b->storage; b->endbit=1; return -1L; } long oggpackB_read1(oggpack_buffer *b){ long ret; if(b->endbyte >= b->storage) goto overflow; ret=(b->ptr[0]>>(7-b->endbit))&1; b->endbit++; if(b->endbit>7){ b->endbit=0; b->ptr++; b->endbyte++; } return ret; overflow: b->ptr=NULL; b->endbyte=b->storage; b->endbit=1; return -1L; } long oggpack_bytes(oggpack_buffer *b){ return(b->endbyte+(b->endbit+7)/8); } long oggpack_bits(oggpack_buffer *b){ return(b->endbyte*8+b->endbit); } long oggpackB_bytes(oggpack_buffer *b){ return oggpack_bytes(b); } long oggpackB_bits(oggpack_buffer *b){ return oggpack_bits(b); } unsigned char *oggpack_get_buffer(oggpack_buffer *b){ return(b->buffer); } unsigned char *oggpackB_get_buffer(oggpack_buffer *b){ return oggpack_get_buffer(b); } /* Self test of the bitwise routines; everything else is based on them, so they damned well better be solid. */ #ifdef _V_SELFTEST #include <stdio.h> static int ilog(unsigned int v){ int ret=0; while(v){ ret++; v>>=1; } return(ret); } oggpack_buffer o; oggpack_buffer r; void report(char *in){ fprintf(stderr,"%s",in); exit(1); } void cliptest(unsigned long *b,int vals,int bits,int *comp,int compsize){ long bytes,i; unsigned char *buffer; oggpack_reset(&o); for(i=0;i<vals;i++) oggpack_write(&o,b[i],bits?bits:ilog(b[i])); buffer=oggpack_get_buffer(&o); bytes=oggpack_bytes(&o); if(bytes!=compsize)report("wrong number of bytes!\n"); for(i=0;i<bytes;i++)if(buffer[i]!=comp[i]){ for(i=0;i<bytes;i++)fprintf(stderr,"%x %x\n",(int)buffer[i],(int)comp[i]); report("wrote incorrect value!\n"); } oggpack_readinit(&r,buffer,bytes); for(i=0;i<vals;i++){ int tbit=bits?bits:ilog(b[i]); if(oggpack_look(&r,tbit)==-1) report("out of data!\n"); if(oggpack_look(&r,tbit)!=(b[i]&mask[tbit])) report("looked at incorrect value!\n"); if(tbit==1) if(oggpack_look1(&r)!=(b[i]&mask[tbit])) report("looked at single bit incorrect value!\n"); if(tbit==1){ if(oggpack_read1(&r)!=(b[i]&mask[tbit])) report("read incorrect single bit value!\n"); }else{ if(oggpack_read(&r,tbit)!=(b[i]&mask[tbit])) report("read incorrect value!\n"); } } if(oggpack_bytes(&r)!=bytes)report("leftover bytes after read!\n"); } void cliptestB(unsigned long *b,int vals,int bits,int *comp,int compsize){ long bytes,i; unsigned char *buffer; oggpackB_reset(&o); for(i=0;i<vals;i++) oggpackB_write(&o,b[i],bits?bits:ilog(b[i])); buffer=oggpackB_get_buffer(&o); bytes=oggpackB_bytes(&o); if(bytes!=compsize)report("wrong number of bytes!\n"); for(i=0;i<bytes;i++)if(buffer[i]!=comp[i]){ for(i=0;i<bytes;i++)fprintf(stderr,"%x %x\n",(int)buffer[i],(int)comp[i]); report("wrote incorrect value!\n"); } oggpackB_readinit(&r,buffer,bytes); for(i=0;i<vals;i++){ int tbit=bits?bits:ilog(b[i]); if(oggpackB_look(&r,tbit)==-1) report("out of data!\n"); if(oggpackB_look(&r,tbit)!=(b[i]&mask[tbit])) report("looked at incorrect value!\n"); if(tbit==1) if(oggpackB_look1(&r)!=(b[i]&mask[tbit])) report("looked at single bit incorrect value!\n"); if(tbit==1){ if(oggpackB_read1(&r)!=(b[i]&mask[tbit])) report("read incorrect single bit value!\n"); }else{ if(oggpackB_read(&r,tbit)!=(b[i]&mask[tbit])) report("read incorrect value!\n"); } } if(oggpackB_bytes(&r)!=bytes)report("leftover bytes after read!\n"); } int main(void){ unsigned char *buffer; long bytes,i; static unsigned long testbuffer1[]= {18,12,103948,4325,543,76,432,52,3,65,4,56,32,42,34,21,1,23,32,546,456,7, 567,56,8,8,55,3,52,342,341,4,265,7,67,86,2199,21,7,1,5,1,4}; int test1size=43; static unsigned long testbuffer2[]= {216531625L,1237861823,56732452,131,3212421,12325343,34547562,12313212, 1233432,534,5,346435231,14436467,7869299,76326614,167548585, 85525151,0,12321,1,349528352}; int test2size=21; static unsigned long testbuffer3[]= {1,0,14,0,1,0,12,0,1,0,0,0,1,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,1,1,1,1,1,0,0,1, 0,1,30,1,1,1,0,0,1,0,0,0,12,0,11,0,1,0,0,1}; int test3size=56; static unsigned long large[]= {2136531625L,2137861823,56732452,131,3212421,12325343,34547562,12313212, 1233432,534,5,2146435231,14436467,7869299,76326614,167548585, 85525151,0,12321,1,2146528352}; int onesize=33; static int one[33]={146,25,44,151,195,15,153,176,233,131,196,65,85,172,47,40, 34,242,223,136,35,222,211,86,171,50,225,135,214,75,172, 223,4}; static int oneB[33]={150,101,131,33,203,15,204,216,105,193,156,65,84,85,222, 8,139,145,227,126,34,55,244,171,85,100,39,195,173,18, 245,251,128}; int twosize=6; static int two[6]={61,255,255,251,231,29}; static int twoB[6]={247,63,255,253,249,120}; int threesize=54; static int three[54]={169,2,232,252,91,132,156,36,89,13,123,176,144,32,254, 142,224,85,59,121,144,79,124,23,67,90,90,216,79,23,83, 58,135,196,61,55,129,183,54,101,100,170,37,127,126,10, 100,52,4,14,18,86,77,1}; static int threeB[54]={206,128,42,153,57,8,183,251,13,89,36,30,32,144,183, 130,59,240,121,59,85,223,19,228,180,134,33,107,74,98, 233,253,196,135,63,2,110,114,50,155,90,127,37,170,104, 200,20,254,4,58,106,176,144,0}; int foursize=38; static int four[38]={18,6,163,252,97,194,104,131,32,1,7,82,137,42,129,11,72, 132,60,220,112,8,196,109,64,179,86,9,137,195,208,122,169, 28,2,133,0,1}; static int fourB[38]={36,48,102,83,243,24,52,7,4,35,132,10,145,21,2,93,2,41, 1,219,184,16,33,184,54,149,170,132,18,30,29,98,229,67, 129,10,4,32}; int fivesize=45; static int five[45]={169,2,126,139,144,172,30,4,80,72,240,59,130,218,73,62, 241,24,210,44,4,20,0,248,116,49,135,100,110,130,181,169, 84,75,159,2,1,0,132,192,8,0,0,18,22}; static int fiveB[45]={1,84,145,111,245,100,128,8,56,36,40,71,126,78,213,226, 124,105,12,0,133,128,0,162,233,242,67,152,77,205,77, 172,150,169,129,79,128,0,6,4,32,0,27,9,0}; int sixsize=7; static int six[7]={17,177,170,242,169,19,148}; static int sixB[7]={136,141,85,79,149,200,41}; /* Test read/write together */ /* Later we test against pregenerated bitstreams */ oggpack_writeinit(&o); fprintf(stderr,"\nSmall preclipped packing (LSb): "); cliptest(testbuffer1,test1size,0,one,onesize); fprintf(stderr,"ok."); fprintf(stderr,"\nNull bit call (LSb): "); cliptest(testbuffer3,test3size,0,two,twosize); fprintf(stderr,"ok."); fprintf(stderr,"\nLarge preclipped packing (LSb): "); cliptest(testbuffer2,test2size,0,three,threesize); fprintf(stderr,"ok."); fprintf(stderr,"\n32 bit preclipped packing (LSb): "); oggpack_reset(&o); for(i=0;i<test2size;i++) oggpack_write(&o,large[i],32); buffer=oggpack_get_buffer(&o); bytes=oggpack_bytes(&o); oggpack_readinit(&r,buffer,bytes); for(i=0;i<test2size;i++){ if(oggpack_look(&r,32)==-1)report("out of data. failed!"); if(oggpack_look(&r,32)!=large[i]){ fprintf(stderr,"%ld != %ld (%lx!=%lx):",oggpack_look(&r,32),large[i], oggpack_look(&r,32),large[i]); report("read incorrect value!\n"); } oggpack_adv(&r,32); } if(oggpack_bytes(&r)!=bytes)report("leftover bytes after read!\n"); fprintf(stderr,"ok."); fprintf(stderr,"\nSmall unclipped packing (LSb): "); cliptest(testbuffer1,test1size,7,four,foursize); fprintf(stderr,"ok."); fprintf(stderr,"\nLarge unclipped packing (LSb): "); cliptest(testbuffer2,test2size,17,five,fivesize); fprintf(stderr,"ok."); fprintf(stderr,"\nSingle bit unclipped packing (LSb): "); cliptest(testbuffer3,test3size,1,six,sixsize); fprintf(stderr,"ok."); fprintf(stderr,"\nTesting read past end (LSb): "); oggpack_readinit(&r,(unsigned char *)"\0\0\0\0\0\0\0\0",8); for(i=0;i<64;i++){ if(oggpack_read(&r,1)!=0){ fprintf(stderr,"failed; got -1 prematurely.\n"); exit(1); } } if(oggpack_look(&r,1)!=-1 || oggpack_read(&r,1)!=-1){ fprintf(stderr,"failed; read past end without -1.\n"); exit(1); } oggpack_readinit(&r,(unsigned char *)"\0\0\0\0\0\0\0\0",8); if(oggpack_read(&r,30)!=0 || oggpack_read(&r,16)!=0){ fprintf(stderr,"failed 2; got -1 prematurely.\n"); exit(1); } if(oggpack_look(&r,18)!=0 || oggpack_look(&r,18)!=0){ fprintf(stderr,"failed 3; got -1 prematurely.\n"); exit(1); } if(oggpack_look(&r,19)!=-1 || oggpack_look(&r,19)!=-1){ fprintf(stderr,"failed; read past end without -1.\n"); exit(1); } if(oggpack_look(&r,32)!=-1 || oggpack_look(&r,32)!=-1){ fprintf(stderr,"failed; read past end without -1.\n"); exit(1); } oggpack_writeclear(&o); fprintf(stderr,"ok.\n"); /********** lazy, cut-n-paste retest with MSb packing ***********/ /* Test read/write together */ /* Later we test against pregenerated bitstreams */ oggpackB_writeinit(&o); fprintf(stderr,"\nSmall preclipped packing (MSb): "); cliptestB(testbuffer1,test1size,0,oneB,onesize); fprintf(stderr,"ok."); fprintf(stderr,"\nNull bit call (MSb): "); cliptestB(testbuffer3,test3size,0,twoB,twosize); fprintf(stderr,"ok."); fprintf(stderr,"\nLarge preclipped packing (MSb): "); cliptestB(testbuffer2,test2size,0,threeB,threesize); fprintf(stderr,"ok."); fprintf(stderr,"\n32 bit preclipped packing (MSb): "); oggpackB_reset(&o); for(i=0;i<test2size;i++) oggpackB_write(&o,large[i],32); buffer=oggpackB_get_buffer(&o); bytes=oggpackB_bytes(&o); oggpackB_readinit(&r,buffer,bytes); for(i=0;i<test2size;i++){ if(oggpackB_look(&r,32)==-1)report("out of data. failed!"); if(oggpackB_look(&r,32)!=large[i]){ fprintf(stderr,"%ld != %ld (%lx!=%lx):",oggpackB_look(&r,32),large[i], oggpackB_look(&r,32),large[i]); report("read incorrect value!\n"); } oggpackB_adv(&r,32); } if(oggpackB_bytes(&r)!=bytes)report("leftover bytes after read!\n"); fprintf(stderr,"ok."); fprintf(stderr,"\nSmall unclipped packing (MSb): "); cliptestB(testbuffer1,test1size,7,fourB,foursize); fprintf(stderr,"ok."); fprintf(stderr,"\nLarge unclipped packing (MSb): "); cliptestB(testbuffer2,test2size,17,fiveB,fivesize); fprintf(stderr,"ok."); fprintf(stderr,"\nSingle bit unclipped packing (MSb): "); cliptestB(testbuffer3,test3size,1,sixB,sixsize); fprintf(stderr,"ok."); fprintf(stderr,"\nTesting read past end (MSb): "); oggpackB_readinit(&r,(unsigned char *)"\0\0\0\0\0\0\0\0",8); for(i=0;i<64;i++){ if(oggpackB_read(&r,1)!=0){ fprintf(stderr,"failed; got -1 prematurely.\n"); exit(1); } } if(oggpackB_look(&r,1)!=-1 || oggpackB_read(&r,1)!=-1){ fprintf(stderr,"failed; read past end without -1.\n"); exit(1); } oggpackB_readinit(&r,(unsigned char *)"\0\0\0\0\0\0\0\0",8); if(oggpackB_read(&r,30)!=0 || oggpackB_read(&r,16)!=0){ fprintf(stderr,"failed 2; got -1 prematurely.\n"); exit(1); } if(oggpackB_look(&r,18)!=0 || oggpackB_look(&r,18)!=0){ fprintf(stderr,"failed 3; got -1 prematurely.\n"); exit(1); } if(oggpackB_look(&r,19)!=-1 || oggpackB_look(&r,19)!=-1){ fprintf(stderr,"failed; read past end without -1.\n"); exit(1); } if(oggpackB_look(&r,32)!=-1 || oggpackB_look(&r,32)!=-1){ fprintf(stderr,"failed; read past end without -1.\n"); exit(1); } oggpackB_writeclear(&o); fprintf(stderr,"ok.\n\n"); return(0); } #endif /* _V_SELFTEST */ #undef BUFFER_INCREMENT
gpl-2.0
cleaton/liquid_kernel
drivers/mtd/devices/sst25l.c
563
11847
/* * sst25l.c * * Driver for SST25L SPI Flash chips * * Copyright © 2009 Bluewater Systems Ltd * Author: Andre Renaud <andre@bluewatersys.com> * Author: Ryan Mallon <ryan@bluewatersys.com> * * Based on m25p80.c * * This code is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> /* Erases can take up to 3 seconds! */ #define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000) #define SST25L_CMD_WRSR 0x01 /* Write status register */ #define SST25L_CMD_WRDI 0x04 /* Write disable */ #define SST25L_CMD_RDSR 0x05 /* Read status register */ #define SST25L_CMD_WREN 0x06 /* Write enable */ #define SST25L_CMD_READ 0x03 /* High speed read */ #define SST25L_CMD_EWSR 0x50 /* Enable write status register */ #define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */ #define SST25L_CMD_READ_ID 0x90 /* Read device ID */ #define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */ #define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */ #define SST25L_STATUS_WREN (1 << 1) /* Write enabled */ #define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */ #define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */ struct sst25l_flash { struct spi_device *spi; struct mutex lock; struct mtd_info mtd; int partitioned; }; struct flash_info { const char *name; uint16_t device_id; unsigned page_size; unsigned nr_pages; unsigned erase_size; }; #define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) static struct flash_info __initdata sst25l_flash_info[] = { {"sst25lf020a", 0xbf43, 256, 1024, 4096}, {"sst25lf040a", 0xbf44, 256, 2048, 4096}, }; static int sst25l_status(struct sst25l_flash *flash, int *status) { unsigned char command, response; int err; command = SST25L_CMD_RDSR; err = spi_write_then_read(flash->spi, &command, 1, &response, 1); if (err < 0) return err; *status = response; return 0; } static int sst25l_write_enable(struct sst25l_flash *flash, int enable) { unsigned char command[2]; int status, err; command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI; err = spi_write(flash->spi, command, 1); if (err) return err; command[0] = SST25L_CMD_EWSR; err = spi_write(flash->spi, command, 1); if (err) return err; command[0] = SST25L_CMD_WRSR; command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1; err = spi_write(flash->spi, command, 2); if (err) return err; if (enable) { err = sst25l_status(flash, &status); if (err) return err; if (!(status & SST25L_STATUS_WREN)) return -EROFS; } return 0; } static int sst25l_wait_till_ready(struct sst25l_flash *flash) { unsigned long deadline; int status, err; deadline = jiffies + MAX_READY_WAIT_JIFFIES; do { err = sst25l_status(flash, &status); if (err) return err; if (!(status & SST25L_STATUS_BUSY)) return 0; cond_resched(); } while (!time_after_eq(jiffies, deadline)); return -ETIMEDOUT; } static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset) { unsigned char command[4]; int err; err = sst25l_write_enable(flash, 1); if (err) return err; command[0] = SST25L_CMD_SECTOR_ERASE; command[1] = offset >> 16; command[2] = offset >> 8; command[3] = offset; err = spi_write(flash->spi, command, 4); if (err) return err; err = sst25l_wait_till_ready(flash); if (err) return err; return sst25l_write_enable(flash, 0); } static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr) { struct sst25l_flash *flash = to_sst25l_flash(mtd); uint32_t addr, end; int err; /* Sanity checks */ if (instr->addr + instr->len > flash->mtd.size) return -EINVAL; if ((uint32_t)instr->len % mtd->erasesize) return -EINVAL; if ((uint32_t)instr->addr % mtd->erasesize) return -EINVAL; addr = instr->addr; end = addr + instr->len; mutex_lock(&flash->lock); err = sst25l_wait_till_ready(flash); if (err) { mutex_unlock(&flash->lock); return err; } while (addr < end) { err = sst25l_erase_sector(flash, addr); if (err) { mutex_unlock(&flash->lock); instr->state = MTD_ERASE_FAILED; dev_err(&flash->spi->dev, "Erase failed\n"); return err; } addr += mtd->erasesize; } mutex_unlock(&flash->lock); instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { struct sst25l_flash *flash = to_sst25l_flash(mtd); struct spi_transfer transfer[2]; struct spi_message message; unsigned char command[4]; int ret; /* Sanity checking */ if (len == 0) return 0; if (from + len > flash->mtd.size) return -EINVAL; if (retlen) *retlen = 0; spi_message_init(&message); memset(&transfer, 0, sizeof(transfer)); command[0] = SST25L_CMD_READ; command[1] = from >> 16; command[2] = from >> 8; command[3] = from; transfer[0].tx_buf = command; transfer[0].len = sizeof(command); spi_message_add_tail(&transfer[0], &message); transfer[1].rx_buf = buf; transfer[1].len = len; spi_message_add_tail(&transfer[1], &message); mutex_lock(&flash->lock); /* Wait for previous write/erase to complete */ ret = sst25l_wait_till_ready(flash); if (ret) { mutex_unlock(&flash->lock); return ret; } spi_sync(flash->spi, &message); if (retlen && message.actual_length > sizeof(command)) *retlen += message.actual_length - sizeof(command); mutex_unlock(&flash->lock); return 0; } static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const unsigned char *buf) { struct sst25l_flash *flash = to_sst25l_flash(mtd); int i, j, ret, bytes, copied = 0; unsigned char command[5]; /* Sanity checks */ if (!len) return 0; if (to + len > flash->mtd.size) return -EINVAL; if ((uint32_t)to % mtd->writesize) return -EINVAL; mutex_lock(&flash->lock); ret = sst25l_write_enable(flash, 1); if (ret) goto out; for (i = 0; i < len; i += mtd->writesize) { ret = sst25l_wait_till_ready(flash); if (ret) goto out; /* Write the first byte of the page */ command[0] = SST25L_CMD_AAI_PROGRAM; command[1] = (to + i) >> 16; command[2] = (to + i) >> 8; command[3] = (to + i); command[4] = buf[i]; ret = spi_write(flash->spi, command, 5); if (ret < 0) goto out; copied++; /* * Write the remaining bytes using auto address * increment mode */ bytes = min_t(uint32_t, mtd->writesize, len - i); for (j = 1; j < bytes; j++, copied++) { ret = sst25l_wait_till_ready(flash); if (ret) goto out; command[1] = buf[i + j]; ret = spi_write(flash->spi, command, 2); if (ret) goto out; } } out: ret = sst25l_write_enable(flash, 0); if (retlen) *retlen = copied; mutex_unlock(&flash->lock); return ret; } static struct flash_info *__init sst25l_match_device(struct spi_device *spi) { struct flash_info *flash_info = NULL; unsigned char command[4], response; int i, err; uint16_t id; command[0] = SST25L_CMD_READ_ID; command[1] = 0; command[2] = 0; command[3] = 0; err = spi_write_then_read(spi, command, sizeof(command), &response, 1); if (err < 0) { dev_err(&spi->dev, "error reading device id msb\n"); return NULL; } id = response << 8; command[0] = SST25L_CMD_READ_ID; command[1] = 0; command[2] = 0; command[3] = 1; err = spi_write_then_read(spi, command, sizeof(command), &response, 1); if (err < 0) { dev_err(&spi->dev, "error reading device id lsb\n"); return NULL; } id |= response; for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++) if (sst25l_flash_info[i].device_id == id) flash_info = &sst25l_flash_info[i]; if (!flash_info) dev_err(&spi->dev, "unknown id %.4x\n", id); return flash_info; } static int __init sst25l_probe(struct spi_device *spi) { struct flash_info *flash_info; struct sst25l_flash *flash; struct flash_platform_data *data; int ret, i; flash_info = sst25l_match_device(spi); if (!flash_info) return -ENODEV; flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL); if (!flash) return -ENOMEM; flash->spi = spi; mutex_init(&flash->lock); dev_set_drvdata(&spi->dev, flash); data = spi->dev.platform_data; if (data && data->name) flash->mtd.name = data->name; else flash->mtd.name = dev_name(&spi->dev); flash->mtd.type = MTD_NORFLASH; flash->mtd.flags = MTD_CAP_NORFLASH; flash->mtd.erasesize = flash_info->erase_size; flash->mtd.writesize = flash_info->page_size; flash->mtd.size = flash_info->page_size * flash_info->nr_pages; flash->mtd.erase = sst25l_erase; flash->mtd.read = sst25l_read; flash->mtd.write = sst25l_write; dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, (long long)flash->mtd.size >> 10); DEBUG(MTD_DEBUG_LEVEL2, "mtd .name = %s, .size = 0x%llx (%lldMiB) " ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", flash->mtd.name, (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), flash->mtd.erasesize, flash->mtd.erasesize / 1024, flash->mtd.numeraseregions); if (flash->mtd.numeraseregions) for (i = 0; i < flash->mtd.numeraseregions; i++) DEBUG(MTD_DEBUG_LEVEL2, "mtd.eraseregions[%d] = { .offset = 0x%llx, " ".erasesize = 0x%.8x (%uKiB), " ".numblocks = %d }\n", i, (long long)flash->mtd.eraseregions[i].offset, flash->mtd.eraseregions[i].erasesize, flash->mtd.eraseregions[i].erasesize / 1024, flash->mtd.eraseregions[i].numblocks); if (mtd_has_partitions()) { struct mtd_partition *parts = NULL; int nr_parts = 0; if (mtd_has_cmdlinepart()) { static const char *part_probes[] = {"cmdlinepart", NULL}; nr_parts = parse_mtd_partitions(&flash->mtd, part_probes, &parts, 0); } if (nr_parts <= 0 && data && data->parts) { parts = data->parts; nr_parts = data->nr_parts; } if (nr_parts > 0) { for (i = 0; i < nr_parts; i++) { DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " "{.name = %s, .offset = 0x%llx, " ".size = 0x%llx (%lldKiB) }\n", i, parts[i].name, (long long)parts[i].offset, (long long)parts[i].size, (long long)(parts[i].size >> 10)); } flash->partitioned = 1; return add_mtd_partitions(&flash->mtd, parts, nr_parts); } } else if (data->nr_parts) { dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", data->nr_parts, data->name); } ret = add_mtd_device(&flash->mtd); if (ret == 1) { kfree(flash); dev_set_drvdata(&spi->dev, NULL); return -ENODEV; } return 0; } static int __exit sst25l_remove(struct spi_device *spi) { struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); int ret; if (mtd_has_partitions() && flash->partitioned) ret = del_mtd_partitions(&flash->mtd); else ret = del_mtd_device(&flash->mtd); if (ret == 0) kfree(flash); return ret; } static struct spi_driver sst25l_driver = { .driver = { .name = "sst25l", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = sst25l_probe, .remove = __exit_p(sst25l_remove), }; static int __init sst25l_init(void) { return spi_register_driver(&sst25l_driver); } static void __exit sst25l_exit(void) { spi_unregister_driver(&sst25l_driver); } module_init(sst25l_init); module_exit(sst25l_exit); MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " "Ryan Mallon <ryan@bluewatersys.com>"); MODULE_LICENSE("GPL");
gpl-2.0
unusual-thoughts/linux-xps13
sound/pci/ymfpci/ymfpci_main.c
1075
72902
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of YMF724/740/744/754 chips * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/tlv.h> #include "ymfpci.h" #include <sound/asoundef.h> #include <sound/mpu401.h> #include <asm/byteorder.h> /* * common I/O routines */ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip); static inline u8 snd_ymfpci_readb(struct snd_ymfpci *chip, u32 offset) { return readb(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writeb(struct snd_ymfpci *chip, u32 offset, u8 val) { writeb(val, chip->reg_area_virt + offset); } static inline u16 snd_ymfpci_readw(struct snd_ymfpci *chip, u32 offset) { return readw(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writew(struct snd_ymfpci *chip, u32 offset, u16 val) { writew(val, chip->reg_area_virt + offset); } static inline u32 snd_ymfpci_readl(struct snd_ymfpci *chip, u32 offset) { return readl(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writel(struct snd_ymfpci *chip, u32 offset, u32 val) { writel(val, chip->reg_area_virt + offset); } static int snd_ymfpci_codec_ready(struct snd_ymfpci *chip, int secondary) { unsigned long end_time; u32 reg = secondary ? YDSXGR_SECSTATUSADR : YDSXGR_PRISTATUSADR; end_time = jiffies + msecs_to_jiffies(750); do { if ((snd_ymfpci_readw(chip, reg) & 0x8000) == 0) return 0; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); dev_err(chip->card->dev, "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_ymfpci_readw(chip, reg)); return -EBUSY; } static void snd_ymfpci_codec_write(struct snd_ac97 *ac97, u16 reg, u16 val) { struct snd_ymfpci *chip = ac97->private_data; u32 cmd; snd_ymfpci_codec_ready(chip, 0); cmd = ((YDSXG_AC97WRITECMD | reg) << 16) | val; snd_ymfpci_writel(chip, YDSXGR_AC97CMDDATA, cmd); } static u16 snd_ymfpci_codec_read(struct snd_ac97 *ac97, u16 reg) { struct snd_ymfpci *chip = ac97->private_data; if (snd_ymfpci_codec_ready(chip, 0)) return ~0; snd_ymfpci_writew(chip, YDSXGR_AC97CMDADR, YDSXG_AC97READCMD | reg); if (snd_ymfpci_codec_ready(chip, 0)) return ~0; if (chip->device_id == PCI_DEVICE_ID_YAMAHA_744 && chip->rev < 2) { int i; for (i = 0; i < 600; i++) snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } return snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } /* * Misc routines */ static u32 snd_ymfpci_calc_delta(u32 rate) { switch (rate) { case 8000: return 0x02aaab00; case 11025: return 0x03accd00; case 16000: return 0x05555500; case 22050: return 0x07599a00; case 32000: return 0x0aaaab00; case 44100: return 0x0eb33300; default: return ((rate << 16) / 375) << 5; } } static u32 def_rate[8] = { 100, 2000, 8000, 11025, 16000, 22050, 32000, 48000 }; static u32 snd_ymfpci_calc_lpfK(u32 rate) { u32 i; static u32 val[8] = { 0x00570000, 0x06AA0000, 0x18B20000, 0x20930000, 0x2B9A0000, 0x35A10000, 0x3EAA0000, 0x40000000 }; if (rate == 44100) return 0x40000000; /* FIXME: What's the right value? */ for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } static u32 snd_ymfpci_calc_lpfQ(u32 rate) { u32 i; static u32 val[8] = { 0x35280000, 0x34A70000, 0x32020000, 0x31770000, 0x31390000, 0x31C90000, 0x33D00000, 0x40000000 }; if (rate == 44100) return 0x370A0000; for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } /* * Hardware start management */ static void snd_ymfpci_hw_start(struct snd_ymfpci *chip) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->start_count++ > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | 3); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) { unsigned long flags; long timeout = 1000; spin_lock_irqsave(&chip->reg_lock, flags); if (--chip->start_count > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~3); while (timeout-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) break; } if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } /* * Playback voice management */ static int voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { struct snd_ymfpci_voice *voice, *voice2; int idx; *rvoice = NULL; for (idx = 0; idx < YDSXG_PLAYBACK_VOICES; idx += pair ? 2 : 1) { voice = &chip->voices[idx]; voice2 = pair ? &chip->voices[idx+1] : NULL; if (voice->use || (voice2 && voice2->use)) continue; voice->use = 1; if (voice2) voice2->use = 1; switch (type) { case YMFPCI_PCM: voice->pcm = 1; if (voice2) voice2->pcm = 1; break; case YMFPCI_SYNTH: voice->synth = 1; break; case YMFPCI_MIDI: voice->midi = 1; break; } snd_ymfpci_hw_start(chip); if (voice2) snd_ymfpci_hw_start(chip); *rvoice = voice; return 0; } return -ENOMEM; } static int snd_ymfpci_voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { unsigned long flags; int result; if (snd_BUG_ON(!rvoice)) return -EINVAL; if (snd_BUG_ON(pair && type != YMFPCI_PCM)) return -EINVAL; spin_lock_irqsave(&chip->voice_lock, flags); for (;;) { result = voice_alloc(chip, type, pair, rvoice); if (result == 0 || type != YMFPCI_PCM) break; /* TODO: synth/midi voice deallocation */ break; } spin_unlock_irqrestore(&chip->voice_lock, flags); return result; } static int snd_ymfpci_voice_free(struct snd_ymfpci *chip, struct snd_ymfpci_voice *pvoice) { unsigned long flags; if (snd_BUG_ON(!pvoice)) return -EINVAL; snd_ymfpci_hw_stop(chip); spin_lock_irqsave(&chip->voice_lock, flags); if (pvoice->number == chip->src441_used) { chip->src441_used = -1; pvoice->ypcm->use_441_slot = 0; } pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = 0; pvoice->ypcm = NULL; pvoice->interrupt = NULL; spin_unlock_irqrestore(&chip->voice_lock, flags); return 0; } /* * PCM part */ static void snd_ymfpci_pcm_interrupt(struct snd_ymfpci *chip, struct snd_ymfpci_voice *voice) { struct snd_ymfpci_pcm *ypcm; u32 pos, delta; if ((ypcm = voice->ypcm) == NULL) return; if (ypcm->substream == NULL) return; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(voice->bank[chip->active_bank].start); if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { /* dev_dbg(chip->card->dev, "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ ypcm->period_pos %= ypcm->period_size; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(ypcm->substream); spin_lock(&chip->reg_lock); } if (unlikely(ypcm->update_pcm_vol)) { unsigned int subs = ypcm->substream->number; unsigned int next_bank = 1 - chip->active_bank; struct snd_ymfpci_playback_bank *bank; u32 volume; bank = &voice->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15); bank->left_gain_end = volume; if (ypcm->output_rear) bank->eff2_gain_end = volume; if (ypcm->voices[1]) bank = &ypcm->voices[1]->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].right << 15); bank->right_gain_end = volume; if (ypcm->output_rear) bank->eff3_gain_end = volume; ypcm->update_pcm_vol--; } } spin_unlock(&chip->reg_lock); } static void snd_ymfpci_pcm_capture_interrupt(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci *chip = ypcm->chip; u32 pos, delta; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { ypcm->period_pos %= ypcm->period_size; /* dev_dbg(chip->card->dev, "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } static int snd_ymfpci_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; struct snd_kcontrol *kctl = NULL; int result = 0; spin_lock(&chip->reg_lock); if (ypcm->voices[0] == NULL) { result = -EINVAL; goto __unlock; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: chip->ctrl_playback[ypcm->voices[0]->number + 1] = cpu_to_le32(ypcm->voices[0]->bank_addr); if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = cpu_to_le32(ypcm->voices[1]->bank_addr); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } /* fall through */ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: chip->ctrl_playback[ypcm->voices[0]->number + 1] = 0; if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = 0; ypcm->running = 0; break; default: result = -EINVAL; break; } __unlock: spin_unlock(&chip->reg_lock); if (kctl) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); return result; } static int snd_ymfpci_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; int result = 0; u32 tmp; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) | (1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) & ~(1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 0; break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } static int snd_ymfpci_pcm_voice_alloc(struct snd_ymfpci_pcm *ypcm, int voices) { int err; if (ypcm->voices[1] != NULL && voices < 2) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (voices == 1 && ypcm->voices[0] != NULL) return 0; /* already allocated */ if (voices == 2 && ypcm->voices[0] != NULL && ypcm->voices[1] != NULL) return 0; /* already allocated */ if (voices > 1) { if (ypcm->voices[0] != NULL && ypcm->voices[1] == NULL) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } } err = snd_ymfpci_voice_alloc(ypcm->chip, YMFPCI_PCM, voices > 1, &ypcm->voices[0]); if (err < 0) return err; ypcm->voices[0]->ypcm = ypcm; ypcm->voices[0]->interrupt = snd_ymfpci_pcm_interrupt; if (voices > 1) { ypcm->voices[1] = &ypcm->chip->voices[ypcm->voices[0]->number + 1]; ypcm->voices[1]->ypcm = ypcm; } return 0; } static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int voiceidx, struct snd_pcm_runtime *runtime, int has_pcm_volume) { struct snd_ymfpci_voice *voice = ypcm->voices[voiceidx]; u32 format; u32 delta = snd_ymfpci_calc_delta(runtime->rate); u32 lpfQ = snd_ymfpci_calc_lpfQ(runtime->rate); u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate); struct snd_ymfpci_playback_bank *bank; unsigned int nbank; u32 vol_left, vol_right; u8 use_left, use_right; unsigned long flags; if (snd_BUG_ON(!voice)) return; if (runtime->channels == 1) { use_left = 1; use_right = 1; } else { use_left = (voiceidx & 1) == 0; use_right = !use_left; } if (has_pcm_volume) { vol_left = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].left << 15); vol_right = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].right << 15); } else { vol_left = cpu_to_le32(0x40000000); vol_right = cpu_to_le32(0x40000000); } spin_lock_irqsave(&ypcm->chip->voice_lock, flags); format = runtime->channels == 2 ? 0x00010000 : 0; if (snd_pcm_format_width(runtime->format) == 8) format |= 0x80000000; else if (ypcm->chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && runtime->rate == 44100 && runtime->channels == 2 && voiceidx == 0 && (ypcm->chip->src441_used == -1 || ypcm->chip->src441_used == voice->number)) { ypcm->chip->src441_used = voice->number; ypcm->use_441_slot = 1; format |= 0x10000000; } if (ypcm->chip->src441_used == voice->number && (format & 0x10000000) == 0) { ypcm->chip->src441_used = -1; ypcm->use_441_slot = 0; } if (runtime->channels == 2 && (voiceidx & 1) != 0) format |= 1; spin_unlock_irqrestore(&ypcm->chip->voice_lock, flags); for (nbank = 0; nbank < 2; nbank++) { bank = &voice->bank[nbank]; memset(bank, 0, sizeof(*bank)); bank->format = cpu_to_le32(format); bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size); bank->lpfQ = cpu_to_le32(lpfQ); bank->delta = bank->delta_end = cpu_to_le32(delta); bank->lpfK = bank->lpfK_end = cpu_to_le32(lpfK); bank->eg_gain = bank->eg_gain_end = cpu_to_le32(0x40000000); if (ypcm->output_front) { if (use_left) { bank->left_gain = bank->left_gain_end = vol_left; } if (use_right) { bank->right_gain = bank->right_gain_end = vol_right; } } if (ypcm->output_rear) { if (!ypcm->swap_rear) { if (use_left) { bank->eff2_gain = bank->eff2_gain_end = vol_left; } if (use_right) { bank->eff3_gain = bank->eff3_gain_end = vol_right; } } else { /* The SPDIF out channels seem to be swapped, so we have * to swap them here, too. The rear analog out channels * will be wrong, but otherwise AC3 would not work. */ if (use_left) { bank->eff3_gain = bank->eff3_gain_end = vol_left; } if (use_right) { bank->eff2_gain = bank->eff2_gain_end = vol_right; } } } } } static int snd_ymfpci_ac3_init(struct snd_ymfpci *chip) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 4096, &chip->ac3_tmp_base) < 0) return -ENOMEM; chip->bank_effect[3][0]->base = chip->bank_effect[3][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr); chip->bank_effect[3][0]->loop_end = chip->bank_effect[3][1]->loop_end = cpu_to_le32(1024); chip->bank_effect[4][0]->base = chip->bank_effect[4][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr + 2048); chip->bank_effect[4][0]->loop_end = chip->bank_effect[4][1]->loop_end = cpu_to_le32(1024); spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) | 3 << 3); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_ac3_done(struct snd_ymfpci *chip) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) & ~(3 << 3)); spin_unlock_irq(&chip->reg_lock); // snd_ymfpci_irq_wait(chip); if (chip->ac3_tmp_base.area) { snd_dma_free_pages(&chip->ac3_tmp_base); chip->ac3_tmp_base.area = NULL; } return 0; } static int snd_ymfpci_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; if ((err = snd_ymfpci_pcm_voice_alloc(ypcm, params_channels(hw_params))) < 0) return err; return 0; } static int snd_ymfpci_playback_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; if (runtime->private_data == NULL) return 0; ypcm = runtime->private_data; /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); snd_pcm_lib_free_pages(substream); if (ypcm->voices[1]) { snd_ymfpci_voice_free(chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (ypcm->voices[0]) { snd_ymfpci_voice_free(chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } return 0; } static int snd_ymfpci_playback_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_kcontrol *kctl; unsigned int nvoice; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; for (nvoice = 0; nvoice < runtime->channels; nvoice++) snd_ymfpci_pcm_init_voice(ypcm, nvoice, runtime, substream->pcm == chip->pcm); if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); } return 0; } static int snd_ymfpci_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ymfpci_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); return snd_pcm_lib_free_pages(substream); } static int snd_ymfpci_capture_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_capture_bank * bank; int nbank; u32 rate, format; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; ypcm->shift = 0; rate = ((48000 * 4096) / runtime->rate) - 1; format = 0; if (runtime->channels == 2) { format |= 2; ypcm->shift++; } if (snd_pcm_format_width(runtime->format) == 8) format |= 1; else ypcm->shift++; switch (ypcm->capture_bank_number) { case 0: snd_ymfpci_writel(chip, YDSXGR_RECFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_RECSLOTSR, rate); break; case 1: snd_ymfpci_writel(chip, YDSXGR_ADCFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_ADCSLOTSR, rate); break; } for (nbank = 0; nbank < 2; nbank++) { bank = chip->bank_capture[ypcm->capture_bank_number][nbank]; bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size << ypcm->shift); bank->start = 0; bank->num_of_loops = 0; } return 0; } static snd_pcm_uframes_t snd_ymfpci_playback_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_voice *voice = ypcm->voices[0]; if (!(ypcm->running && voice)) return 0; return le32_to_cpu(voice->bank[chip->active_bank].start); } static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (!ypcm->running) return 0; return le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; } static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) { wait_queue_t wait; int loops = 4; while (loops-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_MODE) & 3) == 0) continue; init_waitqueue_entry(&wait, current); add_wait_queue(&chip->interrupt_sleep, &wait); atomic_inc(&chip->interrupt_sleep_count); schedule_timeout_uninterruptible(msecs_to_jiffies(50)); remove_wait_queue(&chip->interrupt_sleep, &wait); } } static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) { struct snd_ymfpci *chip = dev_id; u32 status, nvoice, mode; struct snd_ymfpci_voice *voice; status = snd_ymfpci_readl(chip, YDSXGR_STATUS); if (status & 0x80000000) { chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; spin_lock(&chip->voice_lock); for (nvoice = 0; nvoice < YDSXG_PLAYBACK_VOICES; nvoice++) { voice = &chip->voices[nvoice]; if (voice->interrupt) voice->interrupt(chip, voice); } for (nvoice = 0; nvoice < YDSXG_CAPTURE_VOICES; nvoice++) { if (chip->capture_substream[nvoice]) snd_ymfpci_pcm_capture_interrupt(chip->capture_substream[nvoice]); } #if 0 for (nvoice = 0; nvoice < YDSXG_EFFECT_VOICES; nvoice++) { if (chip->effect_substream[nvoice]) snd_ymfpci_pcm_effect_interrupt(chip->effect_substream[nvoice]); } #endif spin_unlock(&chip->voice_lock); spin_lock(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_STATUS, 0x80000000); mode = snd_ymfpci_readl(chip, YDSXGR_MODE) | 2; snd_ymfpci_writel(chip, YDSXGR_MODE, mode); spin_unlock(&chip->reg_lock); if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } } status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG); if (status & 1) { if (chip->timer) snd_timer_interrupt(chip->timer, chip->timer_ticks); } snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status); if (chip->rawmidi) snd_mpu401_uart_interrupt(irq, chip->rawmidi->private_data); return IRQ_HANDLED; } static struct snd_pcm_hardware snd_ymfpci_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_ymfpci_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static void snd_ymfpci_pcm_free_substream(struct snd_pcm_runtime *runtime) { kfree(runtime->private_data); } static int snd_ymfpci_playback_open_1(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; runtime->hw = snd_ymfpci_playback; /* FIXME? True value is 256/48 = 5.33333 ms */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5334, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = PLAYBACK_VOICE; ypcm->substream = substream; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; return 0; } /* call with spinlock held */ static void ymfpci_open_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) /* set AC3 */ snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | (1 << 30)); /* enable second codec (4CHEN) */ snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) | 0x0010); } } /* call with spinlock held */ static void ymfpci_close_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~(1 << 30)); snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) & ~0x0010); } } static int snd_ymfpci_playback_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 1; ypcm->output_rear = chip->mode_dup4ch ? 1 : 0; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear) { ymfpci_open_extension(chip); chip->rear_opened++; } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_playback_spdif_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 1; spin_lock_irq(&chip->reg_lock); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) | 2); ymfpci_open_extension(chip); chip->spdif_pcm_bits = chip->spdif_bits; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); chip->spdif_opened++; spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return 0; } static int snd_ymfpci_playback_4ch_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); ymfpci_open_extension(chip); chip->rear_opened++; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_capture_open(struct snd_pcm_substream *substream, u32 capture_bank_number) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; runtime->hw = snd_ymfpci_capture; /* FIXME? True value is 256/48 = 5.33333 ms */ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5334, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = capture_bank_number + CAPTURE_REC; ypcm->substream = substream; ypcm->capture_bank_number = capture_bank_number; chip->capture_substream[capture_bank_number] = substream; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; snd_ymfpci_hw_start(chip); return 0; } static int snd_ymfpci_capture_rec_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 0); } static int snd_ymfpci_capture_ac97_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 1); } static int snd_ymfpci_playback_close_1(struct snd_pcm_substream *substream) { return 0; } static int snd_ymfpci_playback_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear && chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_spdif_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); chip->spdif_opened = 0; ymfpci_close_extension(chip); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & ~2); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_4ch_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); if (chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_capture_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (ypcm != NULL) { chip->capture_substream[ypcm->capture_bank_number] = NULL; snd_ymfpci_hw_stop(chip); } return 0; } static struct snd_pcm_ops snd_ymfpci_playback_ops = { .open = snd_ymfpci_playback_open, .close = snd_ymfpci_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = { .open = snd_ymfpci_capture_rec_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_rec_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, snd_pcm_std_chmaps, 2, 0, NULL); } static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = { .open = snd_ymfpci_capture_ac97_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_ac97_ops); /* global setup */ pcm->info_flags = 0; sprintf(pcm->name, "YMFPCI - %s", chip->device_id == PCI_DEVICE_ID_YAMAHA_754 ? "Direct Recording" : "AC'97"); chip->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); return 0; } static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = { .open = snd_ymfpci_playback_spdif_open, .close = snd_ymfpci_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_spdif_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - IEC958"); chip->pcm_spdif = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); return 0; } static struct snd_pcm_ops snd_ymfpci_playback_4ch_ops = { .open = snd_ymfpci_playback_4ch_open, .close = snd_ymfpci_playback_4ch_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; static const struct snd_pcm_chmap_elem surround_map[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { } }; int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_4ch_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - Rear PCM"); chip->pcm_4ch = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, surround_map, 2, 0, NULL); } static int snd_ymfpci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_bits != val; chip->spdif_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 1) && chip->pcm_spdif == NULL) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_default = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_ymfpci_spdif_default_info, .get = snd_ymfpci_spdif_default_get, .put = snd_ymfpci_spdif_default_put }; static int snd_ymfpci_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = 0x3e; ucontrol->value.iec958.status[1] = 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static struct snd_kcontrol_new snd_ymfpci_spdif_mask = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_ymfpci_spdif_mask_info, .get = snd_ymfpci_spdif_mask_get, }; static int snd_ymfpci_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_pcm_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_pcm_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_pcm_bits != val; chip->spdif_pcm_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 2)) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_stream = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_ymfpci_spdif_stream_info, .get = snd_ymfpci_spdif_stream_get, .put = snd_ymfpci_spdif_stream_put }; static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"}; return snd_ctl_enum_info(info, 1, 3, texts); } static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg; spin_lock_irq(&chip->reg_lock); reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); spin_unlock_irq(&chip->reg_lock); if (!(reg & 0x100)) value->value.enumerated.item[0] = 0; else value->value.enumerated.item[0] = 1 + ((reg & 0x200) != 0); return 0; } static int snd_ymfpci_drec_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg, old_reg; spin_lock_irq(&chip->reg_lock); old_reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); if (value->value.enumerated.item[0] == 0) reg = old_reg & ~0x100; else reg = (old_reg & ~0x300) | 0x100 | ((value->value.enumerated.item[0] == 2) << 9); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, reg); spin_unlock_irq(&chip->reg_lock); return reg != old_reg; } static struct snd_kcontrol_new snd_ymfpci_drec_source = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Direct Recording Source", .info = snd_ymfpci_drec_source_info, .get = snd_ymfpci_drec_source_get, .put = snd_ymfpci_drec_source_put }; /* * Mixer controls */ #define YMFPCI_SINGLE(xname, xindex, reg, shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ymfpci_info_single, \ .get = snd_ymfpci_get_single, .put = snd_ymfpci_put_single, \ .private_value = ((reg) | ((shift) << 16)) } #define snd_ymfpci_info_single snd_ctl_boolean_mono_info static int snd_ymfpci_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } ucontrol->value.integer.value[0] = (snd_ymfpci_readl(chip, reg) >> shift) & mask; return 0; } static int snd_ymfpci_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; int change; unsigned int val, oval; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } val = (ucontrol->value.integer.value[0] & mask); val <<= shift; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_ymfpci_writel(chip, reg, val); spin_unlock_irq(&chip->reg_lock); return change; } static const DECLARE_TLV_DB_LINEAR(db_scale_native, TLV_DB_GAIN_MUTE, 0); #define YMFPCI_DOUBLE(xname, xindex, reg) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = snd_ymfpci_info_double, \ .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_double, \ .private_value = reg, \ .tlv = { .p = db_scale_native } } static int snd_ymfpci_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int reg = kcontrol->private_value; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 16383; return 0; } static int snd_ymfpci_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; unsigned int val; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; spin_lock_irq(&chip->reg_lock); val = snd_ymfpci_readl(chip, reg); spin_unlock_irq(&chip->reg_lock); ucontrol->value.integer.value[0] = (val >> shift_left) & mask; ucontrol->value.integer.value[1] = (val >> shift_right) & mask; return 0; } static int snd_ymfpci_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; int change; unsigned int val1, val2, oval; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val1 = (oval & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2; change = val1 != oval; snd_ymfpci_writel(chip, reg, val1); spin_unlock_irq(&chip->reg_lock); return change; } static int snd_ymfpci_put_nativedacvol(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = YDSXGR_NATIVEDACOUTVOL; unsigned int reg2 = YDSXGR_BUF441OUTVOL; int change; unsigned int value, oval; value = ucontrol->value.integer.value[0] & 0x3fff; value |= (ucontrol->value.integer.value[1] & 0x3fff) << 16; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); change = value != oval; snd_ymfpci_writel(chip, reg, value); snd_ymfpci_writel(chip, reg2, value); spin_unlock_irq(&chip->reg_lock); return change; } /* * 4ch duplication */ #define snd_ymfpci_info_dup4ch snd_ctl_boolean_mono_info static int snd_ymfpci_get_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->mode_dup4ch; return 0; } static int snd_ymfpci_put_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int change; change = (ucontrol->value.integer.value[0] != chip->mode_dup4ch); if (change) chip->mode_dup4ch = !!ucontrol->value.integer.value[0]; return change; } static struct snd_kcontrol_new snd_ymfpci_dup4ch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "4ch Duplication", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = snd_ymfpci_info_dup4ch, .get = snd_ymfpci_get_dup4ch, .put = snd_ymfpci_put_dup4ch, }; static struct snd_kcontrol_new snd_ymfpci_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wave Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = snd_ymfpci_info_double, .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_nativedacvol, .private_value = YDSXGR_NATIVEDACOUTVOL, .tlv = { .p = db_scale_native }, }, YMFPCI_DOUBLE("Wave Capture Volume", 0, YDSXGR_NATIVEDACLOOPVOL), YMFPCI_DOUBLE("Digital Capture Volume", 0, YDSXGR_NATIVEDACINVOL), YMFPCI_DOUBLE("Digital Capture Volume", 1, YDSXGR_NATIVEADCINVOL), YMFPCI_DOUBLE("ADC Playback Volume", 0, YDSXGR_PRIADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 0, YDSXGR_PRIADCLOOPVOL), YMFPCI_DOUBLE("ADC Playback Volume", 1, YDSXGR_SECADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 1, YDSXGR_SECADCLOOPVOL), YMFPCI_DOUBLE("FM Legacy Playback Volume", 0, YDSXGR_LEGACYOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ", PLAYBACK,VOLUME), 0, YDSXGR_ZVOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("", CAPTURE,VOLUME), 0, YDSXGR_ZVLOOPVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ",PLAYBACK,VOLUME), 1, YDSXGR_SPDIFOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,VOLUME), 1, YDSXGR_SPDIFLOOPVOL), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), 0, YDSXGR_SPDIFOUTCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, YDSXGR_SPDIFINCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("Loop",NONE,NONE), 0, YDSXGR_SPDIFINCTRL, 4), }; /* * GPIO */ static int snd_ymfpci_get_gpio_out(struct snd_ymfpci *chip, int pin) { u16 reg, mode; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << (pin + 8)); reg |= (1 << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); /* set the level mode for input line */ mode = snd_ymfpci_readw(chip, YDSXGR_GPIOTYPECONFIG); mode &= ~(3 << (pin * 2)); snd_ymfpci_writew(chip, YDSXGR_GPIOTYPECONFIG, mode); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); mode = snd_ymfpci_readw(chip, YDSXGR_GPIOINSTATUS); spin_unlock_irqrestore(&chip->reg_lock, flags); return (mode >> pin) & 1; } static int snd_ymfpci_set_gpio_out(struct snd_ymfpci *chip, int pin, int enable) { u16 reg; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << pin); reg &= ~(1 << (pin + 8)); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); snd_ymfpci_writew(chip, YDSXGR_GPIOOUTCTRL, enable << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } #define snd_ymfpci_gpio_sw_info snd_ctl_boolean_mono_info static int snd_ymfpci_gpio_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 0; } static int snd_ymfpci_gpio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; if (snd_ymfpci_get_gpio_out(chip, pin) != ucontrol->value.integer.value[0]) { snd_ymfpci_set_gpio_out(chip, pin, !!ucontrol->value.integer.value[0]); ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_rear_shared = { .name = "Shared Rear/Line-In Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_ymfpci_gpio_sw_info, .get = snd_ymfpci_gpio_sw_get, .put = snd_ymfpci_gpio_sw_put, .private_value = 2, }; /* * PCM voice volume */ static int snd_ymfpci_pcm_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0x8000; return 0; } static int snd_ymfpci_pcm_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; ucontrol->value.integer.value[0] = chip->pcm_mixer[subs].left; ucontrol->value.integer.value[1] = chip->pcm_mixer[subs].right; return 0; } static int snd_ymfpci_pcm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; struct snd_pcm_substream *substream; unsigned long flags; if (ucontrol->value.integer.value[0] != chip->pcm_mixer[subs].left || ucontrol->value.integer.value[1] != chip->pcm_mixer[subs].right) { chip->pcm_mixer[subs].left = ucontrol->value.integer.value[0]; chip->pcm_mixer[subs].right = ucontrol->value.integer.value[1]; if (chip->pcm_mixer[subs].left > 0x8000) chip->pcm_mixer[subs].left = 0x8000; if (chip->pcm_mixer[subs].right > 0x8000) chip->pcm_mixer[subs].right = 0x8000; substream = (struct snd_pcm_substream *)kcontrol->private_value; spin_lock_irqsave(&chip->voice_lock, flags); if (substream->runtime && substream->runtime->private_data) { struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; if (!ypcm->use_441_slot) ypcm->update_pcm_vol = 2; } spin_unlock_irqrestore(&chip->voice_lock, flags); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_pcm_volume = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "PCM Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .info = snd_ymfpci_pcm_vol_info, .get = snd_ymfpci_pcm_vol_get, .put = snd_ymfpci_pcm_vol_put, }; /* * Mixer routines */ static void snd_ymfpci_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct snd_ymfpci *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ymfpci_mixer_free_ac97(struct snd_ac97 *ac97) { struct snd_ymfpci *chip = ac97->private_data; chip->ac97 = NULL; } int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch) { struct snd_ac97_template ac97; struct snd_kcontrol *kctl; struct snd_pcm_substream *substream; unsigned int idx; int err; static struct snd_ac97_bus_ops ops = { .write = snd_ymfpci_codec_write, .read = snd_ymfpci_codec_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_ymfpci_mixer_free_ac97_bus; chip->ac97_bus->no_vra = 1; /* YMFPCI doesn't need VRA */ memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ymfpci_mixer_free_ac97; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; /* to be sure */ snd_ac97_update_bits(chip->ac97, AC97_EXTENDED_STATUS, AC97_EA_VRA|AC97_EA_VRM, 0); for (idx = 0; idx < ARRAY_SIZE(snd_ymfpci_controls); idx++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip))) < 0) return err; } if (chip->ac97->ext_id & AC97_EI_SDAC) { kctl = snd_ctl_new1(&snd_ymfpci_dup4ch, chip); err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; } /* add S/PDIF control */ if (snd_BUG_ON(!chip->pcm_spdif)) return -ENXIO; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; chip->spdif_pcm_ctl = kctl; /* direct recording source */ if (chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && (err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_drec_source, chip))) < 0) return err; /* * shared rear/line-in */ if (rear_switch) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_rear_shared, chip))) < 0) return err; } /* per-voice volume */ substream = chip->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; for (idx = 0; idx < 32; ++idx) { kctl = snd_ctl_new1(&snd_ymfpci_pcm_volume, chip); if (!kctl) return -ENOMEM; kctl->id.device = chip->pcm->device; kctl->id.subdevice = idx; kctl->private_value = (unsigned long)substream; if ((err = snd_ctl_add(chip->card, kctl)) < 0) return err; chip->pcm_mixer[idx].left = 0x8000; chip->pcm_mixer[idx].right = 0x8000; chip->pcm_mixer[idx].ctl = kctl; substream = substream->next; } return 0; } /* * timer */ static int snd_ymfpci_timer_start(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; unsigned int count; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); if (timer->sticks > 1) { chip->timer_ticks = timer->sticks; count = timer->sticks - 1; } else { /* * Divisor 1 is not allowed; fake it by using divisor 2 and * counting two ticks for each interrupt. */ chip->timer_ticks = 2; count = 2 - 1; } snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_stop(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x00); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_precise_resolution(struct snd_timer *timer, unsigned long *num, unsigned long *den) { *num = 1; *den = 96000; return 0; } static struct snd_timer_hardware snd_ymfpci_timer_hw = { .flags = SNDRV_TIMER_HW_AUTO, .resolution = 10417, /* 1 / 96 kHz = 10.41666...us */ .ticks = 0x10000, .start = snd_ymfpci_timer_start, .stop = snd_ymfpci_timer_stop, .precise_resolution = snd_ymfpci_timer_precise_resolution, }; int snd_ymfpci_timer(struct snd_ymfpci *chip, int device) { struct snd_timer *timer = NULL; struct snd_timer_id tid; int err; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = chip->card->number; tid.device = device; tid.subdevice = 0; if ((err = snd_timer_new(chip->card, "YMFPCI", &tid, &timer)) >= 0) { strcpy(timer->name, "YMFPCI timer"); timer->private_data = chip; timer->hw = snd_ymfpci_timer_hw; } chip->timer = timer; return err; } /* * proc interface */ static void snd_ymfpci_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ymfpci *chip = entry->private_data; int i; snd_iprintf(buffer, "YMFPCI\n\n"); for (i = 0; i <= YDSXGR_WORKBASE; i += 4) snd_iprintf(buffer, "%04x: %04x\n", i, snd_ymfpci_readl(chip, i)); } static int snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(card, "ymfpci", &entry)) snd_info_set_text_ops(entry, chip, snd_ymfpci_proc_read); return 0; } /* * initialization routines */ static void snd_ymfpci_aclink_reset(struct pci_dev * pci) { u8 cmd; pci_read_config_byte(pci, PCIR_DSXG_CTRL, &cmd); #if 0 // force to reset if (cmd & 0x03) { #endif pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd | 0x03); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL1, 0); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL2, 0); #if 0 } #endif } static void snd_ymfpci_enable_dsp(struct snd_ymfpci *chip) { snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000001); } static void snd_ymfpci_disable_dsp(struct snd_ymfpci *chip) { u32 val; int timeout = 1000; val = snd_ymfpci_readl(chip, YDSXGR_CONFIG); if (val) snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000000); while (timeout-- > 0) { val = snd_ymfpci_readl(chip, YDSXGR_STATUS); if ((val & 0x00000002) == 0) break; } } static int snd_ymfpci_request_firmware(struct snd_ymfpci *chip) { int err, is_1e; const char *name; err = request_firmware(&chip->dsp_microcode, "yamaha/ds1_dsp.fw", &chip->pci->dev); if (err >= 0) { if (chip->dsp_microcode->size != YDSXG_DSPLENGTH) { dev_err(chip->card->dev, "DSP microcode has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; is_1e = chip->device_id == PCI_DEVICE_ID_YAMAHA_724F || chip->device_id == PCI_DEVICE_ID_YAMAHA_740C || chip->device_id == PCI_DEVICE_ID_YAMAHA_744 || chip->device_id == PCI_DEVICE_ID_YAMAHA_754; name = is_1e ? "yamaha/ds1e_ctrl.fw" : "yamaha/ds1_ctrl.fw"; err = request_firmware(&chip->controller_microcode, name, &chip->pci->dev); if (err >= 0) { if (chip->controller_microcode->size != YDSXG_CTRLLENGTH) { dev_err(chip->card->dev, "controller microcode has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; return 0; } MODULE_FIRMWARE("yamaha/ds1_dsp.fw"); MODULE_FIRMWARE("yamaha/ds1_ctrl.fw"); MODULE_FIRMWARE("yamaha/ds1e_ctrl.fw"); static void snd_ymfpci_download_image(struct snd_ymfpci *chip) { int i; u16 ctrl; const __le32 *inst; snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x00000000); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00010000); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0x00000000); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); /* setup DSP instruction code */ inst = (const __le32 *)chip->dsp_microcode->data; for (i = 0; i < YDSXG_DSPLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_DSPINSTRAM + (i << 2), le32_to_cpu(inst[i])); /* setup control instruction code */ inst = (const __le32 *)chip->controller_microcode->data; for (i = 0; i < YDSXG_CTRLLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_CTRLINSTRAM + (i << 2), le32_to_cpu(inst[i])); snd_ymfpci_enable_dsp(chip); } static int snd_ymfpci_memalloc(struct snd_ymfpci *chip) { long size, playback_ctrl_size; int voice, bank, reg; u8 *ptr; dma_addr_t ptr_addr; playback_ctrl_size = 4 + 4 * YDSXG_PLAYBACK_VOICES; chip->bank_size_playback = snd_ymfpci_readl(chip, YDSXGR_PLAYCTRLSIZE) << 2; chip->bank_size_capture = snd_ymfpci_readl(chip, YDSXGR_RECCTRLSIZE) << 2; chip->bank_size_effect = snd_ymfpci_readl(chip, YDSXGR_EFFCTRLSIZE) << 2; chip->work_size = YDSXG_DEFAULT_WORK_SIZE; size = ALIGN(playback_ctrl_size, 0x100) + ALIGN(chip->bank_size_playback * 2 * YDSXG_PLAYBACK_VOICES, 0x100) + ALIGN(chip->bank_size_capture * 2 * YDSXG_CAPTURE_VOICES, 0x100) + ALIGN(chip->bank_size_effect * 2 * YDSXG_EFFECT_VOICES, 0x100) + chip->work_size; /* work_ptr must be aligned to 256 bytes, but it's already covered with the kernel page allocation mechanism */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), size, &chip->work_ptr) < 0) return -ENOMEM; ptr = chip->work_ptr.area; ptr_addr = chip->work_ptr.addr; memset(ptr, 0, size); /* for sure */ chip->bank_base_playback = ptr; chip->bank_base_playback_addr = ptr_addr; chip->ctrl_playback = (u32 *)ptr; chip->ctrl_playback[0] = cpu_to_le32(YDSXG_PLAYBACK_VOICES); ptr += ALIGN(playback_ctrl_size, 0x100); ptr_addr += ALIGN(playback_ctrl_size, 0x100); for (voice = 0; voice < YDSXG_PLAYBACK_VOICES; voice++) { chip->voices[voice].number = voice; chip->voices[voice].bank = (struct snd_ymfpci_playback_bank *)ptr; chip->voices[voice].bank_addr = ptr_addr; for (bank = 0; bank < 2; bank++) { chip->bank_playback[voice][bank] = (struct snd_ymfpci_playback_bank *)ptr; ptr += chip->bank_size_playback; ptr_addr += chip->bank_size_playback; } } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_capture = ptr; chip->bank_base_capture_addr = ptr_addr; for (voice = 0; voice < YDSXG_CAPTURE_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_capture[voice][bank] = (struct snd_ymfpci_capture_bank *)ptr; ptr += chip->bank_size_capture; ptr_addr += chip->bank_size_capture; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_effect = ptr; chip->bank_base_effect_addr = ptr_addr; for (voice = 0; voice < YDSXG_EFFECT_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_effect[voice][bank] = (struct snd_ymfpci_effect_bank *)ptr; ptr += chip->bank_size_effect; ptr_addr += chip->bank_size_effect; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->work_base = ptr; chip->work_base_addr = ptr_addr; snd_BUG_ON(ptr + chip->work_size != chip->work_ptr.area + chip->work_ptr.bytes); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, chip->bank_base_capture_addr); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, chip->bank_base_effect_addr); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, chip->work_base_addr); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, chip->work_size >> 2); /* S/PDIF output initialization */ chip->spdif_bits = chip->spdif_pcm_bits = SNDRV_PCM_DEFAULT_CON_SPDIF & 0xffff; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, 0); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); /* S/PDIF input initialization */ snd_ymfpci_writew(chip, YDSXGR_SPDIFINCTRL, 0); /* digital mixer setup */ for (reg = 0x80; reg < 0xc0; reg += 4) snd_ymfpci_writel(chip, reg, 0); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_ZVOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_SPDIFOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEADCINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_PRIADCLOOPVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0x3fff3fff); return 0; } static int snd_ymfpci_free(struct snd_ymfpci *chip) { u16 ctrl; if (snd_BUG_ON(!chip)) return -EINVAL; if (chip->res_reg_area) { /* don't touch busy hardware */ snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_STATUS, ~0); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, 0); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); } snd_ymfpci_ac3_done(chip); /* Set PCI device to D3 state */ #if 0 /* FIXME: temporarily disabled, otherwise we cannot fire up * the chip again unless reboot. ACPI bug? */ pci_set_power_state(chip->pci, PCI_D3hot); #endif #ifdef CONFIG_PM_SLEEP kfree(chip->saved_regs); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); release_and_free_resource(chip->mpu_res); release_and_free_resource(chip->fm_res); snd_ymfpci_free_gameport(chip); iounmap(chip->reg_area_virt); if (chip->work_ptr.area) snd_dma_free_pages(&chip->work_ptr); release_and_free_resource(chip->res_reg_area); pci_write_config_word(chip->pci, 0x40, chip->old_legacy_ctrl); pci_disable_device(chip->pci); release_firmware(chip->dsp_microcode); release_firmware(chip->controller_microcode); kfree(chip); return 0; } static int snd_ymfpci_dev_free(struct snd_device *device) { struct snd_ymfpci *chip = device->device_data; return snd_ymfpci_free(chip); } #ifdef CONFIG_PM_SLEEP static int saved_regs_index[] = { /* spdif */ YDSXGR_SPDIFOUTCTRL, YDSXGR_SPDIFOUTSTATUS, YDSXGR_SPDIFINCTRL, /* volumes */ YDSXGR_PRIADCLOOPVOL, YDSXGR_NATIVEDACINVOL, YDSXGR_NATIVEDACOUTVOL, YDSXGR_BUF441OUTVOL, YDSXGR_NATIVEADCINVOL, YDSXGR_SPDIFLOOPVOL, YDSXGR_SPDIFOUTVOL, YDSXGR_ZVOUTVOL, YDSXGR_LEGACYOUTVOL, /* address bases */ YDSXGR_PLAYCTRLBASE, YDSXGR_RECCTRLBASE, YDSXGR_EFFCTRLBASE, YDSXGR_WORKBASE, /* capture set up */ YDSXGR_MAPOFREC, YDSXGR_RECFORMAT, YDSXGR_RECSLOTSR, YDSXGR_ADCFORMAT, YDSXGR_ADCSLOTSR, }; #define YDSXGR_NUM_SAVED_REGS ARRAY_SIZE(saved_regs_index) static int snd_ymfpci_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct snd_ymfpci *chip = card->private_data; unsigned int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_pcm_suspend_all(chip->pcm2); snd_pcm_suspend_all(chip->pcm_spdif); snd_pcm_suspend_all(chip->pcm_4ch); snd_ac97_suspend(chip->ac97); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) chip->saved_regs[i] = snd_ymfpci_readl(chip, saved_regs_index[i]); chip->saved_ydsxgr_mode = snd_ymfpci_readl(chip, YDSXGR_MODE); pci_read_config_word(chip->pci, PCIR_DSXG_LEGACY, &chip->saved_dsxg_legacy); pci_read_config_word(chip->pci, PCIR_DSXG_ELEGACY, &chip->saved_dsxg_elegacy); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_disable_dsp(chip); return 0; } static int snd_ymfpci_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_ymfpci *chip = card->private_data; unsigned int i; snd_ymfpci_aclink_reset(pci); snd_ymfpci_codec_ready(chip, 0); snd_ymfpci_download_image(chip); udelay(100); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) snd_ymfpci_writel(chip, saved_regs_index[i], chip->saved_regs[i]); snd_ac97_resume(chip->ac97); pci_write_config_word(chip->pci, PCIR_DSXG_LEGACY, chip->saved_dsxg_legacy); pci_write_config_word(chip->pci, PCIR_DSXG_ELEGACY, chip->saved_dsxg_elegacy); /* start hw again */ if (chip->start_count > 0) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MODE, chip->saved_ydsxgr_mode); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT); spin_unlock_irq(&chip->reg_lock); } snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } SIMPLE_DEV_PM_OPS(snd_ymfpci_pm, snd_ymfpci_suspend, snd_ymfpci_resume); #endif /* CONFIG_PM_SLEEP */ int snd_ymfpci_create(struct snd_card *card, struct pci_dev *pci, unsigned short old_legacy_ctrl, struct snd_ymfpci **rchip) { struct snd_ymfpci *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_ymfpci_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->old_legacy_ctrl = old_legacy_ctrl; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->voice_lock); init_waitqueue_head(&chip->interrupt_sleep); atomic_set(&chip->interrupt_sleep_count, 0); chip->card = card; chip->pci = pci; chip->irq = -1; chip->device_id = pci->device; chip->rev = pci->revision; chip->reg_area_phys = pci_resource_start(pci, 0); chip->reg_area_virt = ioremap_nocache(chip->reg_area_phys, 0x8000); pci_set_master(pci); chip->src441_used = -1; if ((chip->res_reg_area = request_mem_region(chip->reg_area_phys, 0x8000, "YMFPCI")) == NULL) { dev_err(chip->card->dev, "unable to grab memory region 0x%lx-0x%lx\n", chip->reg_area_phys, chip->reg_area_phys + 0x8000 - 1); snd_ymfpci_free(chip); return -EBUSY; } if (request_irq(pci->irq, snd_ymfpci_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(chip->card->dev, "unable to grab IRQ %d\n", pci->irq); snd_ymfpci_free(chip); return -EBUSY; } chip->irq = pci->irq; snd_ymfpci_aclink_reset(pci); if (snd_ymfpci_codec_ready(chip, 0) < 0) { snd_ymfpci_free(chip); return -EIO; } err = snd_ymfpci_request_firmware(chip); if (err < 0) { dev_err(chip->card->dev, "firmware request failed: %d\n", err); snd_ymfpci_free(chip); return err; } snd_ymfpci_download_image(chip); udelay(100); /* seems we need a delay after downloading image.. */ if (snd_ymfpci_memalloc(chip) < 0) { snd_ymfpci_free(chip); return -EIO; } if ((err = snd_ymfpci_ac3_init(chip)) < 0) { snd_ymfpci_free(chip); return err; } #ifdef CONFIG_PM_SLEEP chip->saved_regs = kmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32), GFP_KERNEL); if (chip->saved_regs == NULL) { snd_ymfpci_free(chip); return -ENOMEM; } #endif if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ymfpci_free(chip); return err; } snd_ymfpci_proc_init(card, chip); *rchip = chip; return 0; }
gpl-2.0
Aayushya/kernel_motorola_msm8226
drivers/md/persistent-data/dm-btree-spine.c
1331
5115
/* * Copyright (C) 2011 Red Hat, Inc. * * This file is released under the GPL. */ #include "dm-btree-internal.h" #include "dm-transaction-manager.h" #include <linux/device-mapper.h> #define DM_MSG_PREFIX "btree spine" /*----------------------------------------------------------------*/ #define BTREE_CSUM_XOR 121107 static int node_check(struct dm_block_validator *v, struct dm_block *b, size_t block_size); static void node_prepare_for_write(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct btree_node *n = dm_block_data(b); struct node_header *h = &n->header; h->blocknr = cpu_to_le64(dm_block_location(b)); h->csum = cpu_to_le32(dm_bm_checksum(&h->flags, block_size - sizeof(__le32), BTREE_CSUM_XOR)); BUG_ON(node_check(v, b, 4096)); } static int node_check(struct dm_block_validator *v, struct dm_block *b, size_t block_size) { struct btree_node *n = dm_block_data(b); struct node_header *h = &n->header; size_t value_size; __le32 csum_disk; uint32_t flags; if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { DMERR("node_check failed blocknr %llu wanted %llu", le64_to_cpu(h->blocknr), dm_block_location(b)); return -ENOTBLK; } csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags, block_size - sizeof(__le32), BTREE_CSUM_XOR)); if (csum_disk != h->csum) { DMERR("node_check failed csum %u wanted %u", le32_to_cpu(csum_disk), le32_to_cpu(h->csum)); return -EILSEQ; } value_size = le32_to_cpu(h->value_size); if (sizeof(struct node_header) + (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { DMERR("node_check failed: max_entries too large"); return -EILSEQ; } if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { DMERR("node_check failed, too many entries"); return -EILSEQ; } /* * The node must be either INTERNAL or LEAF. */ flags = le32_to_cpu(h->flags); if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) { DMERR("node_check failed, node is neither INTERNAL or LEAF"); return -EILSEQ; } return 0; } struct dm_block_validator btree_node_validator = { .name = "btree_node", .prepare_for_write = node_prepare_for_write, .check = node_check }; /*----------------------------------------------------------------*/ static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, struct dm_block **result) { return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); } static int bn_shadow(struct dm_btree_info *info, dm_block_t orig, struct dm_btree_value_type *vt, struct dm_block **result) { int r, inc; r = dm_tm_shadow_block(info->tm, orig, &btree_node_validator, result, &inc); if (!r && inc) inc_children(info->tm, dm_block_data(*result), vt); return r; } int new_block(struct dm_btree_info *info, struct dm_block **result) { return dm_tm_new_block(info->tm, &btree_node_validator, result); } int unlock_block(struct dm_btree_info *info, struct dm_block *b) { return dm_tm_unlock(info->tm, b); } /*----------------------------------------------------------------*/ void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info) { s->info = info; s->count = 0; s->nodes[0] = NULL; s->nodes[1] = NULL; } int exit_ro_spine(struct ro_spine *s) { int r = 0, i; for (i = 0; i < s->count; i++) { int r2 = unlock_block(s->info, s->nodes[i]); if (r2 < 0) r = r2; } return r; } int ro_step(struct ro_spine *s, dm_block_t new_child) { int r; if (s->count == 2) { r = unlock_block(s->info, s->nodes[0]); if (r < 0) return r; s->nodes[0] = s->nodes[1]; s->count--; } r = bn_read_lock(s->info, new_child, s->nodes + s->count); if (!r) s->count++; return r; } struct btree_node *ro_node(struct ro_spine *s) { struct dm_block *block; BUG_ON(!s->count); block = s->nodes[s->count - 1]; return dm_block_data(block); } /*----------------------------------------------------------------*/ void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info) { s->info = info; s->count = 0; } int exit_shadow_spine(struct shadow_spine *s) { int r = 0, i; for (i = 0; i < s->count; i++) { int r2 = unlock_block(s->info, s->nodes[i]); if (r2 < 0) r = r2; } return r; } int shadow_step(struct shadow_spine *s, dm_block_t b, struct dm_btree_value_type *vt) { int r; if (s->count == 2) { r = unlock_block(s->info, s->nodes[0]); if (r < 0) return r; s->nodes[0] = s->nodes[1]; s->count--; } r = bn_shadow(s->info, b, vt, s->nodes + s->count); if (!r) { if (!s->count) s->root = dm_block_location(s->nodes[0]); s->count++; } return r; } struct dm_block *shadow_current(struct shadow_spine *s) { BUG_ON(!s->count); return s->nodes[s->count - 1]; } struct dm_block *shadow_parent(struct shadow_spine *s) { BUG_ON(s->count != 2); return s->count == 2 ? s->nodes[0] : NULL; } int shadow_has_parent(struct shadow_spine *s) { return s->count >= 2; } int shadow_root(struct shadow_spine *s) { return s->root; }
gpl-2.0
amitbagaria/samsung-kernel-latona
drivers/watchdog/wm8350_wdt.c
4147
6980
/* * Watchdog driver for the wm8350 * * Copyright (C) 2007, 2008 Wolfson Microelectronics <linux@wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #include <linux/mfd/wm8350/core.h> static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static unsigned long wm8350_wdt_users; static struct miscdevice wm8350_wdt_miscdev; static int wm8350_wdt_expect_close; static DEFINE_MUTEX(wdt_mutex); static struct { int time; /* Seconds */ u16 val; /* To be set in WM8350_SYSTEM_CONTROL_2 */ } wm8350_wdt_cfgs[] = { { 1, 0x02 }, { 2, 0x04 }, { 4, 0x05 }, }; static struct wm8350 *get_wm8350(void) { return dev_get_drvdata(wm8350_wdt_miscdev.parent); } static int wm8350_wdt_set_timeout(struct wm8350 *wm8350, u16 value) { int ret; u16 reg; mutex_lock(&wdt_mutex); wm8350_reg_unlock(wm8350); reg = wm8350_reg_read(wm8350, WM8350_SYSTEM_CONTROL_2); reg &= ~WM8350_WDOG_TO_MASK; reg |= value; ret = wm8350_reg_write(wm8350, WM8350_SYSTEM_CONTROL_2, reg); wm8350_reg_lock(wm8350); mutex_unlock(&wdt_mutex); return ret; } static int wm8350_wdt_start(struct wm8350 *wm8350) { int ret; u16 reg; mutex_lock(&wdt_mutex); wm8350_reg_unlock(wm8350); reg = wm8350_reg_read(wm8350, WM8350_SYSTEM_CONTROL_2); reg &= ~WM8350_WDOG_MODE_MASK; reg |= 0x20; ret = wm8350_reg_write(wm8350, WM8350_SYSTEM_CONTROL_2, reg); wm8350_reg_lock(wm8350); mutex_unlock(&wdt_mutex); return ret; } static int wm8350_wdt_stop(struct wm8350 *wm8350) { int ret; u16 reg; mutex_lock(&wdt_mutex); wm8350_reg_unlock(wm8350); reg = wm8350_reg_read(wm8350, WM8350_SYSTEM_CONTROL_2); reg &= ~WM8350_WDOG_MODE_MASK; ret = wm8350_reg_write(wm8350, WM8350_SYSTEM_CONTROL_2, reg); wm8350_reg_lock(wm8350); mutex_unlock(&wdt_mutex); return ret; } static int wm8350_wdt_kick(struct wm8350 *wm8350) { int ret; u16 reg; mutex_lock(&wdt_mutex); reg = wm8350_reg_read(wm8350, WM8350_SYSTEM_CONTROL_2); ret = wm8350_reg_write(wm8350, WM8350_SYSTEM_CONTROL_2, reg); mutex_unlock(&wdt_mutex); return ret; } static int wm8350_wdt_open(struct inode *inode, struct file *file) { struct wm8350 *wm8350 = get_wm8350(); int ret; if (!wm8350) return -ENODEV; if (test_and_set_bit(0, &wm8350_wdt_users)) return -EBUSY; ret = wm8350_wdt_start(wm8350); if (ret != 0) return ret; return nonseekable_open(inode, file); } static int wm8350_wdt_release(struct inode *inode, struct file *file) { struct wm8350 *wm8350 = get_wm8350(); if (wm8350_wdt_expect_close) wm8350_wdt_stop(wm8350); else { dev_warn(wm8350->dev, "Watchdog device closed uncleanly\n"); wm8350_wdt_kick(wm8350); } clear_bit(0, &wm8350_wdt_users); return 0; } static ssize_t wm8350_wdt_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct wm8350 *wm8350 = get_wm8350(); size_t i; if (count) { wm8350_wdt_kick(wm8350); if (!nowayout) { /* In case it was set long ago */ wm8350_wdt_expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != count; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') wm8350_wdt_expect_close = 42; } } } return count; } static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "WM8350 Watchdog", }; static long wm8350_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct wm8350 *wm8350 = get_wm8350(); int ret = -ENOTTY, time, i; void __user *argp = (void __user *)arg; int __user *p = argp; u16 reg; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_SETOPTIONS: { int options; if (get_user(options, p)) return -EFAULT; ret = -EINVAL; /* Setting both simultaneously means at least one must fail */ if (options == WDIOS_DISABLECARD) ret = wm8350_wdt_start(wm8350); if (options == WDIOS_ENABLECARD) ret = wm8350_wdt_stop(wm8350); break; } case WDIOC_KEEPALIVE: ret = wm8350_wdt_kick(wm8350); break; case WDIOC_SETTIMEOUT: ret = get_user(time, p); if (ret) break; if (time == 0) { if (nowayout) ret = -EINVAL; else wm8350_wdt_stop(wm8350); break; } for (i = 0; i < ARRAY_SIZE(wm8350_wdt_cfgs); i++) if (wm8350_wdt_cfgs[i].time == time) break; if (i == ARRAY_SIZE(wm8350_wdt_cfgs)) ret = -EINVAL; else ret = wm8350_wdt_set_timeout(wm8350, wm8350_wdt_cfgs[i].val); break; case WDIOC_GETTIMEOUT: reg = wm8350_reg_read(wm8350, WM8350_SYSTEM_CONTROL_2); reg &= WM8350_WDOG_TO_MASK; for (i = 0; i < ARRAY_SIZE(wm8350_wdt_cfgs); i++) if (wm8350_wdt_cfgs[i].val == reg) break; if (i == ARRAY_SIZE(wm8350_wdt_cfgs)) { dev_warn(wm8350->dev, "Unknown watchdog configuration: %x\n", reg); ret = -EINVAL; } else ret = put_user(wm8350_wdt_cfgs[i].time, p); } return ret; } static const struct file_operations wm8350_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wm8350_wdt_write, .unlocked_ioctl = wm8350_wdt_ioctl, .open = wm8350_wdt_open, .release = wm8350_wdt_release, }; static struct miscdevice wm8350_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wm8350_wdt_fops, }; static int __devinit wm8350_wdt_probe(struct platform_device *pdev) { struct wm8350 *wm8350 = platform_get_drvdata(pdev); if (!wm8350) { pr_err("No driver data supplied\n"); return -ENODEV; } /* Default to 4s timeout */ wm8350_wdt_set_timeout(wm8350, 0x05); wm8350_wdt_miscdev.parent = &pdev->dev; return misc_register(&wm8350_wdt_miscdev); } static int __devexit wm8350_wdt_remove(struct platform_device *pdev) { misc_deregister(&wm8350_wdt_miscdev); return 0; } static struct platform_driver wm8350_wdt_driver = { .probe = wm8350_wdt_probe, .remove = __devexit_p(wm8350_wdt_remove), .driver = { .name = "wm8350-wdt", }, }; static int __init wm8350_wdt_init(void) { return platform_driver_register(&wm8350_wdt_driver); } module_init(wm8350_wdt_init); static void __exit wm8350_wdt_exit(void) { platform_driver_unregister(&wm8350_wdt_driver); } module_exit(wm8350_wdt_exit); MODULE_AUTHOR("Mark Brown"); MODULE_DESCRIPTION("WM8350 Watchdog"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8350-wdt");
gpl-2.0
syhost/android_kernel_pantech_ef62l
arch/tile/kernel/single_step.c
4403
22014
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * A code-rewriter that enables instruction single-stepping. * Derived from iLib's single-stepping code. */ #ifndef __tilegx__ /* Hardware support for single step unavailable. */ /* These functions are only used on the TILE platform */ #include <linux/slab.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/mman.h> #include <linux/types.h> #include <linux/err.h> #include <asm/cacheflush.h> #include <asm/unaligned.h> #include <arch/abi.h> #include <arch/opcode.h> #define signExtend17(val) sign_extend((val), 17) #define TILE_X1_MASK (0xffffffffULL << 31) int unaligned_printk; static int __init setup_unaligned_printk(char *str) { long val; if (strict_strtol(str, 0, &val) != 0) return 0; unaligned_printk = val; pr_info("Printk for each unaligned data accesses is %s\n", unaligned_printk ? "enabled" : "disabled"); return 1; } __setup("unaligned_printk=", setup_unaligned_printk); unsigned int unaligned_fixup_count; enum mem_op { MEMOP_NONE, MEMOP_LOAD, MEMOP_STORE, MEMOP_LOAD_POSTINCR, MEMOP_STORE_POSTINCR }; static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset) { tile_bundle_bits result; /* mask out the old offset */ tile_bundle_bits mask = create_BrOff_X1(-1); result = n & (~mask); /* or in the new offset */ result |= create_BrOff_X1(offset); return result; } static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) { tile_bundle_bits result; tile_bundle_bits op; result = n & (~TILE_X1_MASK); op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | create_Dest_X1(dest) | create_SrcB_X1(TREG_ZERO) | create_SrcA_X1(src) ; result |= op; return result; } static inline tile_bundle_bits nop_X1(tile_bundle_bits n) { return move_X1(n, TREG_ZERO, TREG_ZERO); } static inline tile_bundle_bits addi_X1( tile_bundle_bits n, int dest, int src, int imm) { n &= ~TILE_X1_MASK; n |= (create_SrcA_X1(src) | create_Dest_X1(dest) | create_Imm8_X1(imm) | create_S_X1(0) | create_Opcode_X1(IMM_0_OPCODE_X1) | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); return n; } static tile_bundle_bits rewrite_load_store_unaligned( struct single_step_state *state, tile_bundle_bits bundle, struct pt_regs *regs, enum mem_op mem_op, int size, int sign_ext) { unsigned char __user *addr; int val_reg, addr_reg, err, val; /* Get address and value registers */ if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { addr_reg = get_SrcA_Y2(bundle); val_reg = get_SrcBDest_Y2(bundle); } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { addr_reg = get_SrcA_X1(bundle); val_reg = get_Dest_X1(bundle); } else { addr_reg = get_SrcA_X1(bundle); val_reg = get_SrcB_X1(bundle); } /* * If registers are not GPRs, don't try to handle it. * * FIXME: we could handle non-GPR loads by getting the real value * from memory, writing it to the single step buffer, using a * temp_reg to hold a pointer to that memory, then executing that * instruction and resetting temp_reg. For non-GPR stores, it's a * little trickier; we could use the single step buffer for that * too, but we'd have to add some more state bits so that we could * call back in here to copy that value to the real target. For * now, we just handle the simple case. */ if ((val_reg >= PTREGS_NR_GPRS && (val_reg != TREG_ZERO || mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR)) || addr_reg >= PTREGS_NR_GPRS) return bundle; /* If it's aligned, don't handle it specially */ addr = (void __user *)regs->regs[addr_reg]; if (((unsigned long)addr % size) == 0) return bundle; /* * Return SIGBUS with the unaligned address, if requested. * Note that we return SIGBUS even for completely invalid addresses * as long as they are in fact unaligned; this matches what the * tilepro hardware would be doing, if it could provide us with the * actual bad address in an SPR, which it doesn't. */ if (unaligned_fixup == 0) { siginfo_t info = { .si_signo = SIGBUS, .si_code = BUS_ADRALN, .si_addr = addr }; trace_unhandled_signal("unaligned trap", regs, (unsigned long)addr, SIGBUS); force_sig_info(info.si_signo, &info, current); return (tilepro_bundle_bits) 0; } #ifndef __LITTLE_ENDIAN # error We assume little-endian representation with copy_xx_user size 2 here #endif /* Handle unaligned load/store */ if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { unsigned short val_16; switch (size) { case 2: err = copy_from_user(&val_16, addr, sizeof(val_16)); val = sign_ext ? ((short)val_16) : val_16; break; case 4: err = copy_from_user(&val, addr, sizeof(val)); break; default: BUG(); } if (err == 0) { state->update_reg = val_reg; state->update_value = val; state->update = 1; } } else { val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; err = copy_to_user(addr, &val, size); } if (err) { siginfo_t info = { .si_signo = SIGSEGV, .si_code = SEGV_MAPERR, .si_addr = addr }; trace_unhandled_signal("segfault", regs, (unsigned long)addr, SIGSEGV); force_sig_info(info.si_signo, &info, current); return (tile_bundle_bits) 0; } if (unaligned_printk || unaligned_fixup_count == 0) { pr_info("Process %d/%s: PC %#lx: Fixup of" " unaligned %s at %#lx.\n", current->pid, current->comm, regs->pc, (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) ? "load" : "store", (unsigned long)addr); if (!unaligned_printk) { #define P pr_info P("\n"); P("Unaligned fixups in the kernel will slow your application considerably.\n"); P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"); P("which requests the kernel show all unaligned fixups, or write a \"0\"\n"); P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"); P("access will become a SIGBUS you can debug. No further warnings will be\n"); P("shown so as to avoid additional slowdown, but you can track the number\n"); P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"); P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"); P("\n"); #undef P } } ++unaligned_fixup_count; if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { /* Convert the Y2 instruction to a prefetch. */ bundle &= ~(create_SrcBDest_Y2(-1) | create_Opcode_Y2(-1)); bundle |= (create_SrcBDest_Y2(TREG_ZERO) | create_Opcode_Y2(LW_OPCODE_Y2)); /* Replace the load postincr with an addi */ } else if (mem_op == MEMOP_LOAD_POSTINCR) { bundle = addi_X1(bundle, addr_reg, addr_reg, get_Imm8_X1(bundle)); /* Replace the store postincr with an addi */ } else if (mem_op == MEMOP_STORE_POSTINCR) { bundle = addi_X1(bundle, addr_reg, addr_reg, get_Dest_Imm8_X1(bundle)); } else { /* Convert the X1 instruction to a nop. */ bundle &= ~(create_Opcode_X1(-1) | create_UnShOpcodeExtension_X1(-1) | create_UnOpcodeExtension_X1(-1)); bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | create_UnShOpcodeExtension_X1( UN_0_SHUN_0_OPCODE_X1) | create_UnOpcodeExtension_X1( NOP_UN_0_SHUN_0_OPCODE_X1)); } return bundle; } /* * Called after execve() has started the new image. This allows us * to reset the info state. Note that the the mmap'ed memory, if there * was any, has already been unmapped by the exec. */ void single_step_execve(void) { struct thread_info *ti = current_thread_info(); kfree(ti->step_state); ti->step_state = NULL; } /** * single_step_once() - entry point when single stepping has been triggered. * @regs: The machine register state * * When we arrive at this routine via a trampoline, the single step * engine copies the executing bundle to the single step buffer. * If the instruction is a condition branch, then the target is * reset to one past the next instruction. If the instruction * sets the lr, then that is noted. If the instruction is a jump * or call, then the new target pc is preserved and the current * bundle instruction set to null. * * The necessary post-single-step rewriting information is stored in * single_step_state-> We use data segment values because the * stack will be rewound when we run the rewritten single-stepped * instruction. */ void single_step_once(struct pt_regs *regs) { extern tile_bundle_bits __single_step_ill_insn; extern tile_bundle_bits __single_step_j_insn; extern tile_bundle_bits __single_step_addli_insn; extern tile_bundle_bits __single_step_auli_insn; struct thread_info *info = (void *)current_thread_info(); struct single_step_state *state = info->step_state; int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); tile_bundle_bits __user *buffer, *pc; tile_bundle_bits bundle; int temp_reg; int target_reg = TREG_LR; int err; enum mem_op mem_op = MEMOP_NONE; int size = 0, sign_ext = 0; /* happy compiler */ asm( " .pushsection .rodata.single_step\n" " .align 8\n" " .globl __single_step_ill_insn\n" "__single_step_ill_insn:\n" " ill\n" " .globl __single_step_addli_insn\n" "__single_step_addli_insn:\n" " { nop; addli r0, zero, 0 }\n" " .globl __single_step_auli_insn\n" "__single_step_auli_insn:\n" " { nop; auli r0, r0, 0 }\n" " .globl __single_step_j_insn\n" "__single_step_j_insn:\n" " j .\n" " .popsection\n" ); /* * Enable interrupts here to allow touching userspace and the like. * The callers expect this: do_trap() already has interrupts * enabled, and do_work_pending() handles functions that enable * interrupts internally. */ local_irq_enable(); if (state == NULL) { /* allocate a page of writable, executable memory */ state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); if (state == NULL) { pr_err("Out of kernel memory trying to single-step\n"); return; } /* allocate a cache line of writable, executable memory */ buffer = (void __user *) vm_mmap(NULL, 0, 64, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0); if (IS_ERR((void __force *)buffer)) { kfree(state); pr_err("Out of kernel pages trying to single-step\n"); return; } state->buffer = buffer; state->is_enabled = 0; info->step_state = state; /* Validate our stored instruction patterns */ BUG_ON(get_Opcode_X1(__single_step_addli_insn) != ADDLI_OPCODE_X1); BUG_ON(get_Opcode_X1(__single_step_auli_insn) != AULI_OPCODE_X1); BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); } /* * If we are returning from a syscall, we still haven't hit the * "ill" for the swint1 instruction. So back the PC up to be * pointing at the swint1, but we'll actually return directly * back to the "ill" so we come back in via SIGILL as if we * had "executed" the swint1 without ever being in kernel space. */ if (regs->faultnum == INT_SWINT_1) regs->pc -= 8; pc = (tile_bundle_bits __user *)(regs->pc); if (get_user(bundle, pc) != 0) { pr_err("Couldn't read instruction at %p trying to step\n", pc); return; } /* We'll follow the instruction with 2 ill op bundles */ state->orig_pc = (unsigned long)pc; state->next_pc = (unsigned long)(pc + 1); state->branch_next_pc = 0; state->update = 0; if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) { /* two wide, check for control flow */ int opcode = get_Opcode_X1(bundle); switch (opcode) { /* branches */ case BRANCH_OPCODE_X1: { s32 offset = signExtend17(get_BrOff_X1(bundle)); /* * For branches, we use a rewriting trick to let the * hardware evaluate whether the branch is taken or * untaken. We record the target offset and then * rewrite the branch instruction to target 1 insn * ahead if the branch is taken. We then follow the * rewritten branch with two bundles, each containing * an "ill" instruction. The supervisor examines the * pc after the single step code is executed, and if * the pc is the first ill instruction, then the * branch (if any) was not taken. If the pc is the * second ill instruction, then the branch was * taken. The new pc is computed for these cases, and * inserted into the registers for the thread. If * the pc is the start of the single step code, then * an exception or interrupt was taken before the * code started processing, and the same "original" * pc is restored. This change, different from the * original implementation, has the advantage of * executing a single user instruction. */ state->branch_next_pc = (unsigned long)(pc + offset); /* rewrite branch offset to go forward one bundle */ bundle = set_BrOff_X1(bundle, 2); } break; /* jumps */ case JALB_OPCODE_X1: case JALF_OPCODE_X1: state->update = 1; state->next_pc = (unsigned long) (pc + get_JOffLong_X1(bundle)); break; case JB_OPCODE_X1: case JF_OPCODE_X1: state->next_pc = (unsigned long) (pc + get_JOffLong_X1(bundle)); bundle = nop_X1(bundle); break; case SPECIAL_0_OPCODE_X1: switch (get_RRROpcodeExtension_X1(bundle)) { /* jump-register */ case JALRP_SPECIAL_0_OPCODE_X1: case JALR_SPECIAL_0_OPCODE_X1: state->update = 1; state->next_pc = regs->regs[get_SrcA_X1(bundle)]; break; case JRP_SPECIAL_0_OPCODE_X1: case JR_SPECIAL_0_OPCODE_X1: state->next_pc = regs->regs[get_SrcA_X1(bundle)]; bundle = nop_X1(bundle); break; case LNK_SPECIAL_0_OPCODE_X1: state->update = 1; target_reg = get_Dest_X1(bundle); break; /* stores */ case SH_SPECIAL_0_OPCODE_X1: mem_op = MEMOP_STORE; size = 2; break; case SW_SPECIAL_0_OPCODE_X1: mem_op = MEMOP_STORE; size = 4; break; } break; /* loads and iret */ case SHUN_0_OPCODE_X1: if (get_UnShOpcodeExtension_X1(bundle) == UN_0_SHUN_0_OPCODE_X1) { switch (get_UnOpcodeExtension_X1(bundle)) { case LH_UN_0_SHUN_0_OPCODE_X1: mem_op = MEMOP_LOAD; size = 2; sign_ext = 1; break; case LH_U_UN_0_SHUN_0_OPCODE_X1: mem_op = MEMOP_LOAD; size = 2; sign_ext = 0; break; case LW_UN_0_SHUN_0_OPCODE_X1: mem_op = MEMOP_LOAD; size = 4; break; case IRET_UN_0_SHUN_0_OPCODE_X1: { unsigned long ex0_0 = __insn_mfspr( SPR_EX_CONTEXT_0_0); unsigned long ex0_1 = __insn_mfspr( SPR_EX_CONTEXT_0_1); /* * Special-case it if we're iret'ing * to PL0 again. Otherwise just let * it run and it will generate SIGILL. */ if (EX1_PL(ex0_1) == USER_PL) { state->next_pc = ex0_0; regs->ex1 = ex0_1; bundle = nop_X1(bundle); } } } } break; #if CHIP_HAS_WH64() /* postincrement operations */ case IMM_0_OPCODE_X1: switch (get_ImmOpcodeExtension_X1(bundle)) { case LWADD_IMM_0_OPCODE_X1: mem_op = MEMOP_LOAD_POSTINCR; size = 4; break; case LHADD_IMM_0_OPCODE_X1: mem_op = MEMOP_LOAD_POSTINCR; size = 2; sign_ext = 1; break; case LHADD_U_IMM_0_OPCODE_X1: mem_op = MEMOP_LOAD_POSTINCR; size = 2; sign_ext = 0; break; case SWADD_IMM_0_OPCODE_X1: mem_op = MEMOP_STORE_POSTINCR; size = 4; break; case SHADD_IMM_0_OPCODE_X1: mem_op = MEMOP_STORE_POSTINCR; size = 2; break; default: break; } break; #endif /* CHIP_HAS_WH64() */ } if (state->update) { /* * Get an available register. We start with a * bitmask with 1's for available registers. * We truncate to the low 32 registers since * we are guaranteed to have set bits in the * low 32 bits, then use ctz to pick the first. */ u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | (1ULL << get_SrcA_X0(bundle)) | (1ULL << get_SrcB_X0(bundle)) | (1ULL << target_reg)); temp_reg = __builtin_ctz(mask); state->update_reg = temp_reg; state->update_value = regs->regs[temp_reg]; regs->regs[temp_reg] = (unsigned long) (pc+1); regs->flags |= PT_FLAGS_RESTORE_REGS; bundle = move_X1(bundle, target_reg, temp_reg); } } else { int opcode = get_Opcode_Y2(bundle); switch (opcode) { /* loads */ case LH_OPCODE_Y2: mem_op = MEMOP_LOAD; size = 2; sign_ext = 1; break; case LH_U_OPCODE_Y2: mem_op = MEMOP_LOAD; size = 2; sign_ext = 0; break; case LW_OPCODE_Y2: mem_op = MEMOP_LOAD; size = 4; break; /* stores */ case SH_OPCODE_Y2: mem_op = MEMOP_STORE; size = 2; break; case SW_OPCODE_Y2: mem_op = MEMOP_STORE; size = 4; break; } } /* * Check if we need to rewrite an unaligned load/store. * Returning zero is a special value meaning we need to SIGSEGV. */ if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) { bundle = rewrite_load_store_unaligned(state, bundle, regs, mem_op, size, sign_ext); if (bundle == 0) return; } /* write the bundle to our execution area */ buffer = state->buffer; err = __put_user(bundle, buffer++); /* * If we're really single-stepping, we take an INT_ILL after. * If we're just handling an unaligned access, we can just * jump directly back to where we were in user code. */ if (is_single_step) { err |= __put_user(__single_step_ill_insn, buffer++); err |= __put_user(__single_step_ill_insn, buffer++); } else { long delta; if (state->update) { /* We have some state to update; do it inline */ int ha16; bundle = __single_step_addli_insn; bundle |= create_Dest_X1(state->update_reg); bundle |= create_Imm16_X1(state->update_value); err |= __put_user(bundle, buffer++); bundle = __single_step_auli_insn; bundle |= create_Dest_X1(state->update_reg); bundle |= create_SrcA_X1(state->update_reg); ha16 = (state->update_value + 0x8000) >> 16; bundle |= create_Imm16_X1(ha16); err |= __put_user(bundle, buffer++); state->update = 0; } /* End with a jump back to the next instruction */ delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) - (unsigned long)buffer) >> TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; bundle = __single_step_j_insn; bundle |= create_JOffLong_X1(delta); err |= __put_user(bundle, buffer++); } if (err) { pr_err("Fault when writing to single-step buffer\n"); return; } /* * Flush the buffer. * We do a local flush only, since this is a thread-specific buffer. */ __flush_icache_range((unsigned long)state->buffer, (unsigned long)buffer); /* Indicate enabled */ state->is_enabled = is_single_step; regs->pc = (unsigned long)state->buffer; /* Fault immediately if we are coming back from a syscall. */ if (regs->faultnum == INT_SWINT_1) regs->pc += 8; } #else #include <linux/smp.h> #include <linux/ptrace.h> #include <arch/spr_def.h> static DEFINE_PER_CPU(unsigned long, ss_saved_pc); /* * Called directly on the occasion of an interrupt. * * If the process doesn't have single step set, then we use this as an * opportunity to turn single step off. * * It has been mentioned that we could conditionally turn off single stepping * on each entry into the kernel and rely on single_step_once to turn it * on for the processes that matter (as we already do), but this * implementation is somewhat more efficient in that we muck with registers * once on a bum interrupt rather than on every entry into the kernel. * * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred, * so we have to run through this process again before we can say that an * instruction has executed. * * swint will set CANCELED, but it's a legitimate instruction. Fortunately * it changes the PC. If it hasn't changed, then we know that the interrupt * wasn't generated by swint and we'll need to run this process again before * we can say an instruction has executed. * * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get * on with our lives. */ void gx_singlestep_handle(struct pt_regs *regs, int fault_num) { unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); struct thread_info *info = (void *)current_thread_info(); int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); if (is_single_step == 0) { __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0); } else if ((*ss_pc != regs->pc) || (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) { ptrace_notify(SIGTRAP); control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); } } /* * Called from need_singlestep. Set up the control registers and the enable * register, then return back. */ void single_step_once(struct pt_regs *regs) { unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); *ss_pc = regs->pc; control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL); } void single_step_execve(void) { /* Nothing */ } #endif /* !__tilegx__ */
gpl-2.0
RJDTWO/android_kernel_oneplus_msm8974
arch/alpha/kernel/traps.c
4403
27638
/* * arch/alpha/kernel/traps.c * * (C) Copyright 1994 Linus Torvalds */ /* * This file initializes the trap entry points */ #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/ratelimit.h> #include <asm/gentrap.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <asm/sysinfo.h> #include <asm/hwrpb.h> #include <asm/mmu_context.h> #include <asm/special_insns.h> #include "proto.h" /* Work-around for some SRMs which mishandle opDEC faults. */ static int opDEC_fix; static void __cpuinit opDEC_check(void) { __asm__ __volatile__ ( /* Load the address of... */ " br $16, 1f\n" /* A stub instruction fault handler. Just add 4 to the pc and continue. */ " ldq $16, 8($sp)\n" " addq $16, 4, $16\n" " stq $16, 8($sp)\n" " call_pal %[rti]\n" /* Install the instruction fault handler. */ "1: lda $17, 3\n" " call_pal %[wrent]\n" /* With that in place, the fault from the round-to-minf fp insn will arrive either at the "lda 4" insn (bad) or one past that (good). This places the correct fixup in %0. */ " lda %[fix], 0\n" " cvttq/svm $f31,$f31\n" " lda %[fix], 4" : [fix] "=r" (opDEC_fix) : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent) : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25"); if (opDEC_fix) printk("opDEC fixup enabled.\n"); } void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) { printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", regs->pc, regs->r26, regs->ps, print_tainted()); print_symbol("pc is at %s\n", regs->pc); print_symbol("ra is at %s\n", regs->r26 ); printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", regs->r0, regs->r1, regs->r2); printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", regs->r3, regs->r4, regs->r5); printk("t5 = %016lx t6 = %016lx t7 = %016lx\n", regs->r6, regs->r7, regs->r8); if (r9_15) { printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", r9_15[9], r9_15[10], r9_15[11]); printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", r9_15[12], r9_15[13], r9_15[14]); printk("s6 = %016lx\n", r9_15[15]); } printk("a0 = %016lx a1 = %016lx a2 = %016lx\n", regs->r16, regs->r17, regs->r18); printk("a3 = %016lx a4 = %016lx a5 = %016lx\n", regs->r19, regs->r20, regs->r21); printk("t8 = %016lx t9 = %016lx t10= %016lx\n", regs->r22, regs->r23, regs->r24); printk("t11= %016lx pv = %016lx at = %016lx\n", regs->r25, regs->r27, regs->r28); printk("gp = %016lx sp = %p\n", regs->gp, regs+1); #if 0 __halt(); #endif } #if 0 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"}; #endif static void dik_show_code(unsigned int *pc) { long i; printk("Code:"); for (i = -6; i < 2; i++) { unsigned int insn; if (__get_user(insn, (unsigned int __user *)pc + i)) break; printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); } printk("\n"); } static void dik_show_trace(unsigned long *sp) { long i = 0; printk("Trace:\n"); while (0x1ff8 & (unsigned long) sp) { extern char _stext[], _etext[]; unsigned long tmp = *sp; sp++; if (tmp < (unsigned long) &_stext) continue; if (tmp >= (unsigned long) &_etext) continue; printk("[<%lx>]", tmp); print_symbol(" %s", tmp); printk("\n"); if (i > 40) { printk(" ..."); break; } } printk("\n"); } static int kstack_depth_to_print = 24; void show_stack(struct task_struct *task, unsigned long *sp) { unsigned long *stack; int i; /* * debugging aid: "show_stack(NULL);" prints the * back trace for this cpu. */ if(sp==NULL) sp=(unsigned long*)&sp; stack = sp; for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; if (i && ((i % 4) == 0)) printk("\n "); printk("%016lx ", *stack++); } printk("\n"); dik_show_trace(sp); } void dump_stack(void) { show_stack(NULL, NULL); } EXPORT_SYMBOL(dump_stack); void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { if (regs->ps & 8) return; #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE); dik_show_trace((unsigned long *)(regs+1)); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } do_exit(SIGSEGV); } #ifndef CONFIG_MATHEMU static long dummy_emul(void) { return 0; } long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; long (*alpha_fp_emul) (unsigned long pc) = (void *)dummy_emul; #else long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); long alpha_fp_emul (unsigned long pc); #endif asmlinkage void do_entArith(unsigned long summary, unsigned long write_mask, struct pt_regs *regs) { long si_code = FPE_FLTINV; siginfo_t info; if (summary & 1) { /* Software-completion summary bit is set, so try to emulate the instruction. If the processor supports precise exceptions, we don't have to search. */ if (!amask(AMASK_PRECISE_TRAP)) si_code = alpha_fp_emul(regs->pc - 4); else si_code = alpha_fp_emul_imprecise(regs, write_mask); if (si_code == 0) return; } die_if_kernel("Arithmetic fault", regs, 0, NULL); info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) regs->pc; send_sig_info(SIGFPE, &info, current); } asmlinkage void do_entIF(unsigned long type, struct pt_regs *regs) { siginfo_t info; int signo, code; if ((regs->ps & ~IPL_MAX) == 0) { if (type == 1) { const unsigned int *data = (const unsigned int *) regs->pc; printk("Kernel bug at %s:%d\n", (const char *)(data[1] | (long)data[2] << 32), data[0]); } die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), regs, type, NULL); } switch (type) { case 0: /* breakpoint */ info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_trapno = 0; info.si_addr = (void __user *) regs->pc; if (ptrace_cancel_bpt(current)) { regs->pc -= 4; /* make pc point to former bpt */ } send_sig_info(SIGTRAP, &info, current); return; case 1: /* bugcheck */ info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = __SI_FAULT; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); return; case 2: /* gentrap */ info.si_addr = (void __user *) regs->pc; info.si_trapno = regs->r16; switch ((long) regs->r16) { case GEN_INTOVF: signo = SIGFPE; code = FPE_INTOVF; break; case GEN_INTDIV: signo = SIGFPE; code = FPE_INTDIV; break; case GEN_FLTOVF: signo = SIGFPE; code = FPE_FLTOVF; break; case GEN_FLTDIV: signo = SIGFPE; code = FPE_FLTDIV; break; case GEN_FLTUND: signo = SIGFPE; code = FPE_FLTUND; break; case GEN_FLTINV: signo = SIGFPE; code = FPE_FLTINV; break; case GEN_FLTINE: signo = SIGFPE; code = FPE_FLTRES; break; case GEN_ROPRAND: signo = SIGFPE; code = __SI_FAULT; break; case GEN_DECOVF: case GEN_DECDIV: case GEN_DECINV: case GEN_ASSERTERR: case GEN_NULPTRERR: case GEN_STKOVF: case GEN_STRLENERR: case GEN_SUBSTRERR: case GEN_RANGERR: case GEN_SUBRNG: case GEN_SUBRNG1: case GEN_SUBRNG2: case GEN_SUBRNG3: case GEN_SUBRNG4: case GEN_SUBRNG5: case GEN_SUBRNG6: case GEN_SUBRNG7: default: signo = SIGTRAP; code = __SI_FAULT; break; } info.si_signo = signo; info.si_errno = 0; info.si_code = code; info.si_addr = (void __user *) regs->pc; send_sig_info(signo, &info, current); return; case 4: /* opDEC */ if (implver() == IMPLVER_EV4) { long si_code; /* The some versions of SRM do not handle the opDEC properly - they return the PC of the opDEC fault, not the instruction after as the Alpha architecture requires. Here we fix it up. We do this by intentionally causing an opDEC fault during the boot sequence and testing if we get the correct PC. If not, we set a flag to correct it every time through. */ regs->pc += opDEC_fix; /* EV4 does not implement anything except normal rounding. Everything else will come here as an illegal instruction. Emulate them. */ si_code = alpha_fp_emul(regs->pc - 4); if (si_code == 0) return; if (si_code > 0) { info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) regs->pc; send_sig_info(SIGFPE, &info, current); return; } } break; case 3: /* FEN fault */ /* Irritating users can call PAL_clrfen to disable the FPU for the process. The kernel will then trap in do_switch_stack and undo_switch_stack when we try to save and restore the FP registers. Given that GCC by default generates code that uses the FP registers, PAL_clrfen is not useful except for DoS attacks. So turn the bleeding FPU back on and be done with it. */ current_thread_info()->pcb.flags |= 1; __reload_thread(&current_thread_info()->pcb); return; case 5: /* illoc */ default: /* unexpected instruction-fault type */ ; } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *) regs->pc; send_sig_info(SIGILL, &info, current); } /* There is an ifdef in the PALcode in MILO that enables a "kernel debugging entry point" as an unprivileged call_pal. We don't want to have anything to do with it, but unfortunately several versions of MILO included in distributions have it enabled, and if we don't put something on the entry point we'll oops. */ asmlinkage void do_entDbg(struct pt_regs *regs) { siginfo_t info; die_if_kernel("Instruction fault", regs, 0, NULL); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *) regs->pc; force_sig_info(SIGILL, &info, current); } /* * entUna has a different register layout to be reasonably simple. It * needs access to all the integer registers (the kernel doesn't use * fp-regs), and it needs to have them in order for simpler access. * * Due to the non-standard register layout (and because we don't want * to handle floating-point regs), user-mode unaligned accesses are * handled separately by do_entUnaUser below. * * Oh, btw, we don't handle the "gp" register correctly, but if we fault * on a gp-register unaligned load/store, something is _very_ wrong * in the kernel anyway.. */ struct allregs { unsigned long regs[32]; unsigned long ps, pc, gp, a0, a1, a2; }; struct unaligned_stat { unsigned long count, va, pc; } unaligned[2]; /* Macro for exception fixup code to access integer registers. */ #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) asmlinkage void do_entUna(void * va, unsigned long opcode, unsigned long reg, struct allregs *regs) { long error, tmp1, tmp2, tmp3, tmp4; unsigned long pc = regs->pc - 4; unsigned long *_regs = regs->regs; const struct exception_table_entry *fixup; unaligned[0].count++; unaligned[0].va = (unsigned long) va; unaligned[0].pc = pc; /* We don't want to use the generic get/put unaligned macros as we want to trap exceptions. Only if we actually get an exception will we decide whether we should have caught it. */ switch (opcode) { case 0x0c: /* ldwu */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,1(%3)\n" " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = tmp1|tmp2; return; case 0x28: /* ldl */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = (int)(tmp1|tmp2); return; case 0x29: /* ldq */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = tmp1|tmp2; return; /* Note that the store sequences do not indicate that they change memory because it _should_ be affecting nothing in this context. (Otherwise we have other, much larger, problems.) */ case 0x0d: /* stw */ __asm__ __volatile__( "1: ldq_u %2,1(%5)\n" "2: ldq_u %1,0(%5)\n" " inswh %6,%5,%4\n" " inswl %6,%5,%3\n" " mskwh %2,%5,%2\n" " mskwl %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; case 0x2c: /* stl */ __asm__ __volatile__( "1: ldq_u %2,3(%5)\n" "2: ldq_u %1,0(%5)\n" " inslh %6,%5,%4\n" " insll %6,%5,%3\n" " msklh %2,%5,%2\n" " mskll %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; case 0x2d: /* stq */ __asm__ __volatile__( "1: ldq_u %2,7(%5)\n" "2: ldq_u %1,0(%5)\n" " insqh %6,%5,%4\n" " insql %6,%5,%3\n" " mskqh %2,%5,%2\n" " mskql %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; } printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); do_exit(SIGSEGV); got_exception: /* Ok, we caught the exception, but we don't want it. Is there someone to pass it along to? */ if ((fixup = search_exception_tables(pc)) != 0) { unsigned long newpc; newpc = fixup_exception(una_reg, fixup, pc); printk("Forwarding unaligned exception at %lx (%lx)\n", pc, newpc); regs->pc = newpc; return; } /* * Yikes! No one to forward the exception to. * Since the registers are in a weird format, dump them ourselves. */ printk("%s(%d): unhandled unaligned exception\n", current->comm, task_pid_nr(current)); printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", pc, una_reg(26), regs->ps); printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", una_reg(0), una_reg(1), una_reg(2)); printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", una_reg(3), una_reg(4), una_reg(5)); printk("r6 = %016lx r7 = %016lx r8 = %016lx\n", una_reg(6), una_reg(7), una_reg(8)); printk("r9 = %016lx r10= %016lx r11= %016lx\n", una_reg(9), una_reg(10), una_reg(11)); printk("r12= %016lx r13= %016lx r14= %016lx\n", una_reg(12), una_reg(13), una_reg(14)); printk("r15= %016lx\n", una_reg(15)); printk("r16= %016lx r17= %016lx r18= %016lx\n", una_reg(16), una_reg(17), una_reg(18)); printk("r19= %016lx r20= %016lx r21= %016lx\n", una_reg(19), una_reg(20), una_reg(21)); printk("r22= %016lx r23= %016lx r24= %016lx\n", una_reg(22), una_reg(23), una_reg(24)); printk("r25= %016lx r27= %016lx r28= %016lx\n", una_reg(25), una_reg(27), una_reg(28)); printk("gp = %016lx sp = %p\n", regs->gp, regs+1); dik_show_code((unsigned int *)pc); dik_show_trace((unsigned long *)(regs+1)); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } do_exit(SIGSEGV); } /* * Convert an s-floating point value in memory format to the * corresponding value in register format. The exponent * needs to be remapped to preserve non-finite values * (infinities, not-a-numbers, denormals). */ static inline unsigned long s_mem_to_reg (unsigned long s_mem) { unsigned long frac = (s_mem >> 0) & 0x7fffff; unsigned long sign = (s_mem >> 31) & 0x1; unsigned long exp_msb = (s_mem >> 30) & 0x1; unsigned long exp_low = (s_mem >> 23) & 0x7f; unsigned long exp; exp = (exp_msb << 10) | exp_low; /* common case */ if (exp_msb) { if (exp_low == 0x7f) { exp = 0x7ff; } } else { if (exp_low == 0x00) { exp = 0x000; } else { exp |= (0x7 << 7); } } return (sign << 63) | (exp << 52) | (frac << 29); } /* * Convert an s-floating point value in register format to the * corresponding value in memory format. */ static inline unsigned long s_reg_to_mem (unsigned long s_reg) { return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34); } /* * Handle user-level unaligned fault. Handling user-level unaligned * faults is *extremely* slow and produces nasty messages. A user * program *should* fix unaligned faults ASAP. * * Notice that we have (almost) the regular kernel stack layout here, * so finding the appropriate registers is a little more difficult * than in the kernel case. * * Finally, we handle regular integer load/stores only. In * particular, load-linked/store-conditionally and floating point * load/stores are not supported. The former make no sense with * unaligned faults (they are guaranteed to fail) and I don't think * the latter will occur in any decent program. * * Sigh. We *do* have to handle some FP operations, because GCC will * uses them as temporary storage for integer memory to memory copies. * However, we need to deal with stt/ldt and sts/lds only. */ #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \ | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \ | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \ | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */ #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \ | 1L << 0x2c | 1L << 0x2d /* stl stq */ \ | 1L << 0x0d | 1L << 0x0e ) /* stw stb */ #define R(x) ((size_t) &((struct pt_regs *)0)->x) static int unauser_reg_offsets[32] = { R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), /* r9 ... r15 are stored in front of regs. */ -56, -48, -40, -32, -24, -16, -8, R(r16), R(r17), R(r18), R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), R(r27), R(r28), R(gp), 0, 0 }; #undef R asmlinkage void do_entUnaUser(void __user * va, unsigned long opcode, unsigned long reg, struct pt_regs *regs) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long fake_reg, *reg_addr = &fake_reg; siginfo_t info; long error; /* Check the UAC bits to decide what the user wants us to do with the unaliged access. */ if (!test_thread_flag (TIF_UAC_NOPRINT)) { if (__ratelimit(&ratelimit)) { printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", current->comm, task_pid_nr(current), regs->pc - 4, va, opcode, reg); } } if (test_thread_flag (TIF_UAC_SIGBUS)) goto give_sigbus; /* Not sure why you'd want to use this, but... */ if (test_thread_flag (TIF_UAC_NOFIX)) return; /* Don't bother reading ds in the access check since we already know that this came from the user. Also rely on the fact that the page at TASK_SIZE is unmapped and so can't be touched anyway. */ if (!__access_ok((unsigned long)va, 0, USER_DS)) goto give_sigsegv; ++unaligned[1].count; unaligned[1].va = (unsigned long)va; unaligned[1].pc = regs->pc - 4; if ((1L << opcode) & OP_INT_MASK) { /* it's an integer load/store */ if (reg < 30) { reg_addr = (unsigned long *) ((char *)regs + unauser_reg_offsets[reg]); } else if (reg == 30) { /* usp in PAL regs */ fake_reg = rdusp(); } else { /* zero "register" */ fake_reg = 0; } } /* We don't want to use the generic get/put unaligned macros as we want to trap exceptions. Only if we actually get an exception will we decide whether we should have caught it. */ switch (opcode) { case 0x0c: /* ldwu */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,1(%3)\n" " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = tmp1|tmp2; break; case 0x22: /* lds */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2))); return; case 0x23: /* ldt */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; alpha_write_fp_reg(reg, tmp1|tmp2); return; case 0x28: /* ldl */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = (int)(tmp1|tmp2); break; case 0x29: /* ldq */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = tmp1|tmp2; break; /* Note that the store sequences do not indicate that they change memory because it _should_ be affecting nothing in this context. (Otherwise we have other, much larger, problems.) */ case 0x0d: /* stw */ __asm__ __volatile__( "1: ldq_u %2,1(%5)\n" "2: ldq_u %1,0(%5)\n" " inswh %6,%5,%4\n" " inswl %6,%5,%3\n" " mskwh %2,%5,%2\n" " mskwl %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; case 0x26: /* sts */ fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); /* FALLTHRU */ case 0x2c: /* stl */ __asm__ __volatile__( "1: ldq_u %2,3(%5)\n" "2: ldq_u %1,0(%5)\n" " inslh %6,%5,%4\n" " insll %6,%5,%3\n" " msklh %2,%5,%2\n" " mskll %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; case 0x27: /* stt */ fake_reg = alpha_read_fp_reg(reg); /* FALLTHRU */ case 0x2d: /* stq */ __asm__ __volatile__( "1: ldq_u %2,7(%5)\n" "2: ldq_u %1,0(%5)\n" " insqh %6,%5,%4\n" " insql %6,%5,%3\n" " mskqh %2,%5,%2\n" " mskql %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; default: /* What instruction were you trying to use, exactly? */ goto give_sigbus; } /* Only integer loads should get here; everyone else returns early. */ if (reg == 30) wrusp(fake_reg); return; give_sigsegv: regs->pc -= 4; /* make pc point to faulting insn */ info.si_signo = SIGSEGV; info.si_errno = 0; /* We need to replicate some of the logic in mm/fault.c, since we don't have access to the fault code in the exception handling return path. */ if (!__access_ok((unsigned long)va, 0, USER_DS)) info.si_code = SEGV_ACCERR; else { struct mm_struct *mm = current->mm; down_read(&mm->mmap_sem); if (find_vma(mm, (unsigned long)va)) info.si_code = SEGV_ACCERR; else info.si_code = SEGV_MAPERR; up_read(&mm->mmap_sem); } info.si_addr = va; send_sig_info(SIGSEGV, &info, current); return; give_sigbus: regs->pc -= 4; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = va; send_sig_info(SIGBUS, &info, current); return; } void __cpuinit trap_init(void) { /* Tell PAL-code what global pointer we want in the kernel. */ register unsigned long gptr __asm__("$29"); wrkgp(gptr); /* Hack for Multia (UDB) and JENSEN: some of their SRMs have a bug in the handling of the opDEC fault. Fix it up if so. */ if (implver() == IMPLVER_EV4) opDEC_check(); wrent(entArith, 1); wrent(entMM, 2); wrent(entIF, 3); wrent(entUna, 4); wrent(entSys, 5); wrent(entDbg, 6); }
gpl-2.0
shazzl/TW_i9205_JB
arch/arm/mach-ux500/board-mop500.c
4659
21898
/* * Copyright (C) 2008-2009 ST-Ericsson * * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <linux/amba/serial.h> #include <linux/spi/spi.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/regulator/ab8500.h> #include <linux/mfd/tc3589x.h> #include <linux/mfd/tps6105x.h> #include <linux/mfd/abx500/ab8500-gpio.h> #include <linux/leds-lp5521.h> #include <linux/input.h> #include <linux/smsc911x.h> #include <linux/gpio_keys.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/hardware/gic.h> #include <plat/i2c.h> #include <plat/ste_dma40.h> #include <plat/pincfg.h> #include <plat/gpio-nomadik.h> #include <mach/hardware.h> #include <mach/setup.h> #include <mach/devices.h> #include <mach/irqs.h> #include "pins-db8500.h" #include "ste-dma40-db8500.h" #include "devices-db8500.h" #include "board-mop500.h" #include "board-mop500-regulators.h" static struct gpio_led snowball_led_array[] = { { .name = "user_led", .default_trigger = "none", .gpio = 142, }, }; static struct gpio_led_platform_data snowball_led_data = { .leds = snowball_led_array, .num_leds = ARRAY_SIZE(snowball_led_array), }; static struct platform_device snowball_led_dev = { .name = "leds-gpio", .dev = { .platform_data = &snowball_led_data, }, }; static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { .gpio_base = MOP500_AB8500_PIN_GPIO(1), .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE, /* config_reg is the initial configuration of ab8500 pins. * The pins can be configured as GPIO or alt functions based * on value present in GpioSel1 to GpioSel6 and AlternatFunction * register. This is the array of 7 configuration settings. * One has to compile time decide these settings. Below is the * explanation of these setting * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured * as GPIO then this register selectes the alternate fucntions */ .config_reg = {0x00, 0x1E, 0x80, 0x01, 0x7A, 0x00, 0x00}, }; static struct gpio_keys_button snowball_key_array[] = { { .gpio = 32, .type = EV_KEY, .code = KEY_1, .desc = "userpb", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 151, .type = EV_KEY, .code = KEY_2, .desc = "extkb1", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 152, .type = EV_KEY, .code = KEY_3, .desc = "extkb2", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 161, .type = EV_KEY, .code = KEY_4, .desc = "extkb3", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, { .gpio = 162, .type = EV_KEY, .code = KEY_5, .desc = "extkb4", .active_low = 1, .debounce_interval = 50, .wakeup = 1, }, }; static struct gpio_keys_platform_data snowball_key_data = { .buttons = snowball_key_array, .nbuttons = ARRAY_SIZE(snowball_key_array), }; static struct platform_device snowball_key_dev = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &snowball_key_data, } }; static struct smsc911x_platform_config snowball_sbnet_cfg = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, .shift = 1, }; static struct resource sbnet_res[] = { { .name = "smsc911x-memory", .start = (0x5000 << 16), .end = (0x5000 << 16) + 0xffff, .flags = IORESOURCE_MEM, }, { .start = NOMADIK_GPIO_TO_IRQ(140), .end = NOMADIK_GPIO_TO_IRQ(140), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct platform_device snowball_sbnet_dev = { .name = "smsc911x", .num_resources = ARRAY_SIZE(sbnet_res), .resource = sbnet_res, .dev = { .platform_data = &snowball_sbnet_cfg, }, }; static struct ab8500_platform_data ab8500_platdata = { .irq_base = MOP500_AB8500_IRQ_BASE, .regulator_reg_init = ab8500_regulator_reg_init, .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init), .regulator = ab8500_regulators, .num_regulator = ARRAY_SIZE(ab8500_regulators), .gpio = &ab8500_gpio_pdata, }; static struct resource ab8500_resources[] = { [0] = { .start = IRQ_DB8500_AB8500, .end = IRQ_DB8500_AB8500, .flags = IORESOURCE_IRQ } }; struct platform_device ab8500_device = { .name = "ab8500-i2c", .id = 0, .dev = { .platform_data = &ab8500_platdata, }, .num_resources = 1, .resource = ab8500_resources, }; /* * TPS61052 */ static struct tps6105x_platform_data mop500_tps61052_data = { .mode = TPS6105X_MODE_VOLTAGE, .regulator_data = &tps61052_regulator, }; /* * TC35892 */ static void mop500_tc35892_init(struct tc3589x *tc3589x, unsigned int base) { struct device *parent = NULL; #if 0 /* FIXME: Is the sdi actually part of tc3589x? */ parent = tc3589x->dev; #endif mop500_sdi_tc35892_init(parent); } static struct tc3589x_gpio_platform_data mop500_tc35892_gpio_data = { .gpio_base = MOP500_EGPIO(0), .setup = mop500_tc35892_init, }; static struct tc3589x_platform_data mop500_tc35892_data = { .block = TC3589x_BLOCK_GPIO, .gpio = &mop500_tc35892_gpio_data, .irq_base = MOP500_EGPIO_IRQ_BASE, }; static struct lp5521_led_config lp5521_pri_led[] = { [0] = { .chan_nr = 0, .led_current = 0x2f, .max_current = 0x5f, }, [1] = { .chan_nr = 1, .led_current = 0x2f, .max_current = 0x5f, }, [2] = { .chan_nr = 2, .led_current = 0x2f, .max_current = 0x5f, }, }; static struct lp5521_platform_data __initdata lp5521_pri_data = { .label = "lp5521_pri", .led_config = &lp5521_pri_led[0], .num_channels = 3, .clock_mode = LP5521_CLOCK_EXT, }; static struct lp5521_led_config lp5521_sec_led[] = { [0] = { .chan_nr = 0, .led_current = 0x2f, .max_current = 0x5f, }, [1] = { .chan_nr = 1, .led_current = 0x2f, .max_current = 0x5f, }, [2] = { .chan_nr = 2, .led_current = 0x2f, .max_current = 0x5f, }, }; static struct lp5521_platform_data __initdata lp5521_sec_data = { .label = "lp5521_sec", .led_config = &lp5521_sec_led[0], .num_channels = 3, .clock_mode = LP5521_CLOCK_EXT, }; static struct i2c_board_info __initdata mop500_i2c0_devices[] = { { I2C_BOARD_INFO("tc3589x", 0x42), .irq = NOMADIK_GPIO_TO_IRQ(217), .platform_data = &mop500_tc35892_data, }, /* I2C0 devices only available prior to HREFv60 */ { I2C_BOARD_INFO("tps61052", 0x33), .platform_data = &mop500_tps61052_data, }, }; #define NUM_PRE_V60_I2C0_DEVICES 1 static struct i2c_board_info __initdata mop500_i2c2_devices[] = { { /* lp5521 LED driver, 1st device */ I2C_BOARD_INFO("lp5521", 0x33), .platform_data = &lp5521_pri_data, }, { /* lp5521 LED driver, 2st device */ I2C_BOARD_INFO("lp5521", 0x34), .platform_data = &lp5521_sec_data, }, { /* Light sensor Rohm BH1780GLI */ I2C_BOARD_INFO("bh1780", 0x29), }, }; #define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \ static struct nmk_i2c_controller u8500_i2c##id##_data = { \ /* \ * slave data setup time, which is \ * 250 ns,100ns,10ns which is 14,6,2 \ * respectively for a 48 Mhz \ * i2c clock \ */ \ .slsu = _slsu, \ /* Tx FIFO threshold */ \ .tft = _tft, \ /* Rx FIFO threshold */ \ .rft = _rft, \ /* std. mode operation */ \ .clk_freq = clk, \ /* Slave response timeout(ms) */\ .timeout = t_out, \ .sm = _sm, \ } /* * The board uses 4 i2c controllers, initialize all of * them with slave data setup time of 250 ns, * Tx & Rx FIFO threshold values as 8 and standard * mode of operation */ U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); static void __init mop500_i2c_init(struct device *parent) { db8500_add_i2c0(parent, &u8500_i2c0_data); db8500_add_i2c1(parent, &u8500_i2c1_data); db8500_add_i2c2(parent, &u8500_i2c2_data); db8500_add_i2c3(parent, &u8500_i2c3_data); } static struct gpio_keys_button mop500_gpio_keys[] = { { .desc = "SFH7741 Proximity Sensor", .type = EV_SW, .code = SW_FRONT_PROXIMITY, .active_low = 0, .can_disable = 1, } }; static struct regulator *prox_regulator; static int mop500_prox_activate(struct device *dev); static void mop500_prox_deactivate(struct device *dev); static struct gpio_keys_platform_data mop500_gpio_keys_data = { .buttons = mop500_gpio_keys, .nbuttons = ARRAY_SIZE(mop500_gpio_keys), .enable = mop500_prox_activate, .disable = mop500_prox_deactivate, }; static struct platform_device mop500_gpio_keys_device = { .name = "gpio-keys", .id = 0, .dev = { .platform_data = &mop500_gpio_keys_data, }, }; static int mop500_prox_activate(struct device *dev) { prox_regulator = regulator_get(&mop500_gpio_keys_device.dev, "vcc"); if (IS_ERR(prox_regulator)) { dev_err(&mop500_gpio_keys_device.dev, "no regulator\n"); return PTR_ERR(prox_regulator); } regulator_enable(prox_regulator); return 0; } static void mop500_prox_deactivate(struct device *dev) { regulator_disable(prox_regulator); regulator_put(prox_regulator); } /* add any platform devices here - TODO */ static struct platform_device *mop500_platform_devs[] __initdata = { &mop500_gpio_keys_device, &ab8500_device, }; #ifdef CONFIG_STE_DMA40 static struct stedma40_chan_cfg ssp0_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV8_SSP0_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg ssp0_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV8_SSP0_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; #endif static struct pl022_ssp_controller ssp0_plat = { .bus_id = 0, #ifdef CONFIG_STE_DMA40 .enable_dma = 1, .dma_filter = stedma40_filter, .dma_rx_param = &ssp0_dma_cfg_rx, .dma_tx_param = &ssp0_dma_cfg_tx, #else .enable_dma = 0, #endif /* on this platform, gpio 31,142,144,214 & * 224 are connected as chip selects */ .num_chipselect = 5, }; static void __init mop500_spi_init(struct device *parent) { db8500_add_ssp0(parent, &ssp0_plat); } #ifdef CONFIG_STE_DMA40 static struct stedma40_chan_cfg uart0_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV13_UART0_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart0_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV13_UART0_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart1_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV12_UART1_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart1_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV12_UART1_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart2_dma_cfg_rx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_PERIPH_TO_MEM, .src_dev_type = DB8500_DMA_DEV11_UART2_RX, .dst_dev_type = STEDMA40_DEV_DST_MEMORY, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; static struct stedma40_chan_cfg uart2_dma_cfg_tx = { .mode = STEDMA40_MODE_LOGICAL, .dir = STEDMA40_MEM_TO_PERIPH, .src_dev_type = STEDMA40_DEV_SRC_MEMORY, .dst_dev_type = DB8500_DMA_DEV11_UART2_TX, .src_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH, }; #endif static pin_cfg_t mop500_pins_uart0[] = { GPIO0_U0_CTSn | PIN_INPUT_PULLUP, GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, GPIO2_U0_RXD | PIN_INPUT_PULLUP, GPIO3_U0_TXD | PIN_OUTPUT_HIGH, }; #define PRCC_K_SOFTRST_SET 0x18 #define PRCC_K_SOFTRST_CLEAR 0x1C static void ux500_uart0_reset(void) { void __iomem *prcc_rst_set, *prcc_rst_clr; prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + PRCC_K_SOFTRST_SET); prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + PRCC_K_SOFTRST_CLEAR); /* Activate soft reset PRCC_K_SOFTRST_CLEAR */ writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr); udelay(1); /* Release soft reset PRCC_K_SOFTRST_SET */ writel((readl(prcc_rst_set) | 0x1), prcc_rst_set); udelay(1); } static void ux500_uart0_init(void) { int ret; ret = nmk_config_pins(mop500_pins_uart0, ARRAY_SIZE(mop500_pins_uart0)); if (ret < 0) pr_err("pl011: uart pins_enable failed\n"); } static void ux500_uart0_exit(void) { int ret; ret = nmk_config_pins_sleep(mop500_pins_uart0, ARRAY_SIZE(mop500_pins_uart0)); if (ret < 0) pr_err("pl011: uart pins_disable failed\n"); } static struct amba_pl011_data uart0_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart0_dma_cfg_rx, .dma_tx_param = &uart0_dma_cfg_tx, #endif .init = ux500_uart0_init, .exit = ux500_uart0_exit, .reset = ux500_uart0_reset, }; static struct amba_pl011_data uart1_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart1_dma_cfg_rx, .dma_tx_param = &uart1_dma_cfg_tx, #endif }; static struct amba_pl011_data uart2_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart2_dma_cfg_rx, .dma_tx_param = &uart2_dma_cfg_tx, #endif }; static void __init mop500_uart_init(struct device *parent) { db8500_add_uart0(parent, &uart0_plat); db8500_add_uart1(parent, &uart1_plat); db8500_add_uart2(parent, &uart2_plat); } static struct platform_device *snowball_platform_devs[] __initdata = { &snowball_led_dev, &snowball_key_dev, &snowball_sbnet_dev, &ab8500_device, }; static void __init mop500_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; parent = u8500_init_devices(); mop500_pins_init(); /* FIXME: parent of ab8500 should be prcmu */ for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); mop500_i2c_init(parent); mop500_sdi_init(parent); mop500_spi_init(parent); mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } static void __init snowball_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; parent = u8500_init_devices(); snowball_pins_init(); for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) snowball_platform_devs[i]->dev.parent = parent; platform_add_devices(snowball_platform_devs, ARRAY_SIZE(snowball_platform_devs)); mop500_i2c_init(parent); snowball_sdi_init(parent); mop500_spi_init(parent); mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } static void __init hrefv60_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; /* * The HREFv60 board removed a GPIO expander and routed * all these GPIO pins to the internal GPIO controller * instead. */ mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; parent = u8500_init_devices(); hrefv60_pins_init(); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); mop500_i2c_init(parent); hrefv60_sdi_init(parent); mop500_spi_init(parent); mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } MACHINE_START(U8500, "ST-Ericsson MOP500 platform") /* Maintainer: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> */ .atag_offset = 0x100, .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = mop500_init_machine, MACHINE_END MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+") .atag_offset = 0x100, .map_io = u8500_map_io, .init_irq = ux500_init_irq, .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = hrefv60_init_machine, MACHINE_END MACHINE_START(SNOWBALL, "Calao Systems Snowball platform") .atag_offset = 0x100, .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = snowball_init_machine, MACHINE_END #ifdef CONFIG_MACH_UX500_DT struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat), OF_DEV_AUXDATA("arm,pl011", 0x80121000, "uart1", &uart1_plat), OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", &uart2_plat), OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat), {}, }; static const struct of_device_id u8500_soc_node[] = { /* only create devices below soc node */ { .compatible = "stericsson,db8500", }, { }, }; static void __init u8500_init_machine(void) { struct device *parent = NULL; int i2c0_devs; int i; parent = u8500_init_devices(); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) snowball_platform_devs[i]->dev.parent = parent; /* automatically probe child nodes of db8500 device */ of_platform_populate(NULL, u8500_soc_node, u8500_auxdata_lookup, parent); if (of_machine_is_compatible("st-ericsson,mop500")) { mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; mop500_pins_init(); platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); mop500_sdi_init(parent); } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { snowball_pins_init(); platform_add_devices(snowball_platform_devs, ARRAY_SIZE(snowball_platform_devs)); snowball_sdi_init(parent); } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { /* * The HREFv60 board removed a GPIO expander and routed * all these GPIO pins to the internal GPIO controller * instead. */ mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; hrefv60_pins_init(); platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); hrefv60_sdi_init(parent); } mop500_i2c_init(parent); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); /* This board has full regulator constraints */ regulator_has_full_constraints(); } static const char * u8500_dt_board_compat[] = { "calaosystems,snowball-a9500", "st-ericsson,hrefv60+", "st-ericsson,u8500", "st-ericsson,mop500", NULL, }; DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)") .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ .timer = &ux500_timer, .handle_irq = gic_handle_irq, .init_machine = u8500_init_machine, .dt_compat = u8500_dt_board_compat, MACHINE_END #endif
gpl-2.0
championswimmer/android_kernel_sony_seagull
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
5171
8396
/* exynos_drm_fbdev.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Authors: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm_crtc.h" #include "drm_fb_helper.h" #include "drm_crtc_helper.h" #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" #define MAX_CONNECTOR 4 #define PREFERRED_BPP 32 #define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\ drm_fb_helper) struct exynos_drm_fbdev { struct drm_fb_helper drm_fb_helper; struct exynos_drm_gem_obj *exynos_gem_obj; }; static struct fb_ops exynos_drm_fb_ops = { .owner = THIS_MODULE, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, .fb_pan_display = drm_fb_helper_pan_display, .fb_setcmap = drm_fb_helper_setcmap, }; static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, struct drm_framebuffer *fb) { struct fb_info *fbi = helper->fbdev; struct drm_device *dev = helper->dev; struct exynos_drm_gem_buf *buffer; unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); unsigned long offset; DRM_DEBUG_KMS("%s\n", __FILE__); drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); /* RGB formats use only one buffer */ buffer = exynos_drm_fb_buffer(fb, 0); if (!buffer) { DRM_LOG_KMS("buffer is null.\n"); return -EFAULT; } offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); offset += fbi->var.yoffset * fb->pitches[0]; dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; fbi->screen_base = buffer->kvaddr + offset; fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); fbi->screen_size = size; fbi->fix.smem_len = size; return 0; } static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_device *dev = helper->dev; struct fb_info *fbi; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct platform_device *pdev = dev->platformdev; unsigned long size; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); mutex_lock(&dev->struct_mutex); fbi = framebuffer_alloc(0, &pdev->dev); if (!fbi) { DRM_ERROR("failed to allocate fb info.\n"); ret = -ENOMEM; goto out; } size = mode_cmd.pitches[0] * mode_cmd.height; /* 0 means to allocate physically continuous memory */ exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); if (IS_ERR(exynos_gem_obj)) { ret = PTR_ERR(exynos_gem_obj); goto out; } exynos_fbdev->exynos_gem_obj = exynos_gem_obj; helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem_obj->base); if (IS_ERR_OR_NULL(helper->fb)) { DRM_ERROR("failed to create drm framebuffer.\n"); ret = PTR_ERR(helper->fb); goto out; } helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &exynos_drm_fb_ops; ret = fb_alloc_cmap(&fbi->cmap, 256, 0); if (ret) { DRM_ERROR("failed to allocate cmap.\n"); goto out; } ret = exynos_drm_fbdev_update(helper, helper->fb); if (ret < 0) { fb_dealloc_cmap(&fbi->cmap); goto out; } /* * if failed, all resources allocated above would be released by * drm_mode_config_cleanup() when drm_load() had been called prior * to any specific driver such as fimd or hdmi driver. */ out: mutex_unlock(&dev->struct_mutex); return ret; } static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { int ret = 0; DRM_DEBUG_KMS("%s\n", __FILE__); /* * with !helper->fb, it means that this funcion is called first time * and after that, the helper->fb would be used as clone mode. */ if (!helper->fb) { ret = exynos_drm_fbdev_create(helper, sizes); if (ret < 0) { DRM_ERROR("failed to create fbdev.\n"); return ret; } /* * fb_helper expects a value more than 1 if succeed * because register_framebuffer() should be called. */ ret = 1; } return ret; } static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { .fb_probe = exynos_drm_fbdev_probe, }; int exynos_drm_fbdev_init(struct drm_device *dev) { struct exynos_drm_fbdev *fbdev; struct exynos_drm_private *private = dev->dev_private; struct drm_fb_helper *helper; unsigned int num_crtc; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) return 0; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) { DRM_ERROR("failed to allocate drm fbdev.\n"); return -ENOMEM; } private->fb_helper = helper = &fbdev->drm_fb_helper; helper->funcs = &exynos_drm_fb_helper_funcs; num_crtc = dev->mode_config.num_crtc; ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR); if (ret < 0) { DRM_ERROR("failed to initialize drm fb helper.\n"); goto err_init; } ret = drm_fb_helper_single_add_all_connectors(helper); if (ret < 0) { DRM_ERROR("failed to register drm_fb_helper_connector.\n"); goto err_setup; } ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); if (ret < 0) { DRM_ERROR("failed to set up hw configuration.\n"); goto err_setup; } return 0; err_setup: drm_fb_helper_fini(helper); err_init: private->fb_helper = NULL; kfree(fbdev); return ret; } static void exynos_drm_fbdev_destroy(struct drm_device *dev, struct drm_fb_helper *fb_helper) { struct drm_framebuffer *fb; /* release drm framebuffer and real buffer */ if (fb_helper->fb && fb_helper->fb->funcs) { fb = fb_helper->fb; if (fb && fb->funcs->destroy) fb->funcs->destroy(fb); } /* release linux framebuffer */ if (fb_helper->fbdev) { struct fb_info *info; int ret; info = fb_helper->fbdev; ret = unregister_framebuffer(info); if (ret < 0) DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } drm_fb_helper_fini(fb_helper); } void exynos_drm_fbdev_fini(struct drm_device *dev) { struct exynos_drm_private *private = dev->dev_private; struct exynos_drm_fbdev *fbdev; if (!private || !private->fb_helper) return; fbdev = to_exynos_fbdev(private->fb_helper); if (fbdev->exynos_gem_obj) exynos_drm_gem_destroy(fbdev->exynos_gem_obj); exynos_drm_fbdev_destroy(dev, private->fb_helper); kfree(fbdev); private->fb_helper = NULL; } void exynos_drm_fbdev_restore_mode(struct drm_device *dev) { struct exynos_drm_private *private = dev->dev_private; if (!private || !private->fb_helper) return; drm_fb_helper_restore_fbdev_mode(private->fb_helper); }
gpl-2.0
J-Team/android_kernel_samsung_u8500
fs/hfs/bfind.c
8499
4663
/* * linux/fs/hfs/bfind.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Search routines for btrees */ #include <linux/slab.h> #include "btree.h" int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) { void *ptr; fd->tree = tree; fd->bnode = NULL; ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); if (!ptr) return -ENOMEM; fd->search_key = ptr; fd->key = ptr + tree->max_key_len + 2; dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); mutex_lock(&tree->tree_lock); return 0; } void hfs_find_exit(struct hfs_find_data *fd) { hfs_bnode_put(fd->bnode); kfree(fd->search_key); dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); mutex_unlock(&fd->tree->tree_lock); fd->tree = NULL; } /* Find the record in bnode that best matches key (not greater than...)*/ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) { int cmpval; u16 off, len, keylen; int rec; int b, e; int res; b = 0; e = bnode->num_recs - 1; res = -ENOENT; do { rec = (e + b) / 2; len = hfs_brec_lenoff(bnode, rec, &off); keylen = hfs_brec_keylen(bnode, rec); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); cmpval = bnode->tree->keycmp(fd->key, fd->search_key); if (!cmpval) { e = rec; res = 0; goto done; } if (cmpval < 0) b = rec + 1; else e = rec - 1; } while (b <= e); if (rec != e && e >= 0) { len = hfs_brec_lenoff(bnode, e, &off); keylen = hfs_brec_keylen(bnode, e); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); } done: fd->record = e; fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; fail: return res; } /* Traverse a B*Tree from the root to a leaf finding best fit to key */ /* Return allocated copy of node found, set recnum to best record */ int hfs_brec_find(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *bnode; u32 nidx, parent; __be32 data; int height, res; tree = fd->tree; if (fd->bnode) hfs_bnode_put(fd->bnode); fd->bnode = NULL; nidx = tree->root; if (!nidx) return -ENOENT; height = tree->depth; res = 0; parent = 0; for (;;) { bnode = hfs_bnode_find(tree, nidx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; break; } if (bnode->height != height) goto invalid; if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF)) goto invalid; bnode->parent = parent; res = __hfs_brec_find(bnode, fd); if (!height) break; if (fd->record < 0) goto release; parent = nidx; hfs_bnode_read(bnode, &data, fd->entryoffset, 4); nidx = be32_to_cpu(data); hfs_bnode_put(bnode); } fd->bnode = bnode; return res; invalid: printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", height, bnode->height, bnode->type, nidx, parent); res = -EIO; release: hfs_bnode_put(bnode); return res; } int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len) { int res; res = hfs_brec_find(fd); if (res) return res; if (fd->entrylength > rec_len) return -EINVAL; hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength); return 0; } int hfs_brec_goto(struct hfs_find_data *fd, int cnt) { struct hfs_btree *tree; struct hfs_bnode *bnode; int idx, res = 0; u16 off, len, keylen; bnode = fd->bnode; tree = bnode->tree; if (cnt < 0) { cnt = -cnt; while (cnt > fd->record) { cnt -= fd->record + 1; fd->record = bnode->num_recs - 1; idx = bnode->prev; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record -= cnt; } else { while (cnt >= bnode->num_recs - fd->record) { cnt -= bnode->num_recs - fd->record; fd->record = 0; idx = bnode->next; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record += cnt; } len = hfs_brec_lenoff(bnode, fd->record, &off); keylen = hfs_brec_keylen(bnode, fd->record); if (keylen == 0) { res = -EINVAL; goto out; } fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; hfs_bnode_read(bnode, fd->key, off, keylen); out: fd->bnode = bnode; return res; }
gpl-2.0
thanhphat11/android_kernel_pantech_msm8974
net/ax25/ax25_std_timer.c
9267
4311
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Frederic Rible F1OAT (frible@teaser.fr) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> void ax25_std_heartbeat_expiry(ax25_cb *ax25) { struct sock *sk = ax25->sk; if (sk) bh_lock_sock(sk); switch (ax25->state) { case AX25_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (!sk || sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { if (sk) { sock_hold(sk); ax25_destroy_socket(ax25); bh_unlock_sock(sk); sock_put(sk); } else ax25_destroy_socket(ax25); return; } break; case AX25_STATE_3: case AX25_STATE_4: /* * Check the state of the receive buffer. */ if (sk != NULL) { if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && (ax25->condition & AX25_COND_OWN_RX_BUSY)) { ax25->condition &= ~AX25_COND_OWN_RX_BUSY; ax25->condition &= ~AX25_COND_ACK_PENDING; ax25_send_control(ax25, AX25_RR, AX25_POLLOFF, AX25_RESPONSE); break; } } } if (sk) bh_unlock_sock(sk); ax25_start_heartbeat(ax25); } void ax25_std_t2timer_expiry(ax25_cb *ax25) { if (ax25->condition & AX25_COND_ACK_PENDING) { ax25->condition &= ~AX25_COND_ACK_PENDING; ax25_std_timeout_response(ax25); } } void ax25_std_t3timer_expiry(ax25_cb *ax25) { ax25->n2count = 0; ax25_std_transmit_enquiry(ax25); ax25->state = AX25_STATE_4; } void ax25_std_idletimer_expiry(ax25_cb *ax25) { ax25_clear_queues(ax25); ax25->n2count = 0; ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25->state = AX25_STATE_2; ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); ax25->sk->sk_state = TCP_CLOSE; ax25->sk->sk_err = 0; ax25->sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(ax25->sk, SOCK_DEAD)) { ax25->sk->sk_state_change(ax25->sk); sock_set_flag(ax25->sk, SOCK_DEAD); } bh_unlock_sock(ax25->sk); } } void ax25_std_t1timer_expiry(ax25_cb *ax25) { switch (ax25->state) { case AX25_STATE_1: if (ax25->n2count == ax25->n2) { if (ax25->modulus == AX25_MODULUS) { ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; ax25->n2count = 0; ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND); } } else { ax25->n2count++; if (ax25->modulus == AX25_MODULUS) ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND); else ax25_send_control(ax25, AX25_SABME, AX25_POLLON, AX25_COMMAND); } break; case AX25_STATE_2: if (ax25->n2count == ax25->n2) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->n2count++; ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); } break; case AX25_STATE_3: ax25->n2count = 1; ax25_std_transmit_enquiry(ax25); ax25->state = AX25_STATE_4; break; case AX25_STATE_4: if (ax25->n2count == ax25->n2) { ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE); ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->n2count++; ax25_std_transmit_enquiry(ax25); } break; } ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); }
gpl-2.0
fergy/optimus-l3_e400_kernel
drivers/video/via/via_aux_edid.c
9779
2399
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * generic EDID driver */ #include <linux/slab.h> #include <linux/fb.h> #include "via_aux.h" #include "../edid.h" static const char *name = "EDID"; static void query_edid(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; unsigned char edid[EDID_LENGTH]; bool valid = false; if (spec) { fb_destroy_modedb(spec->modedb); } else { spec = kmalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return; } spec->version = spec->revision = 0; if (via_aux_read(drv, 0x00, edid, EDID_LENGTH)) { fb_edid_to_monspecs(edid, spec); valid = spec->version || spec->revision; } if (!valid) { kfree(spec); spec = NULL; } else printk(KERN_DEBUG "EDID: %s %s\n", spec->manufacturer, spec->monitor); drv->data = spec; } static const struct fb_videomode *get_preferred_mode(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; int i; if (!spec || !spec->modedb || !(spec->misc & FB_MISC_1ST_DETAIL)) return NULL; for (i = 0; i < spec->modedb_len; i++) { if (spec->modedb[i].flag & FB_MODE_IS_FIRST && spec->modedb[i].flag & FB_MODE_IS_DETAILED) return &spec->modedb[i]; } return NULL; } static void cleanup(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; if (spec) fb_destroy_modedb(spec->modedb); } void via_aux_edid_probe(struct via_aux_bus *bus) { struct via_aux_drv drv = { .bus = bus, .addr = 0x50, .name = name, .cleanup = cleanup, .get_preferred_mode = get_preferred_mode}; query_edid(&drv); /* as EDID devices can be connected/disconnected just add the driver */ via_aux_add(&drv); }
gpl-2.0
jshafer817/cm11_kernel_hp_tenderloin34
drivers/video/via/via_aux_edid.c
9779
2399
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * generic EDID driver */ #include <linux/slab.h> #include <linux/fb.h> #include "via_aux.h" #include "../edid.h" static const char *name = "EDID"; static void query_edid(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; unsigned char edid[EDID_LENGTH]; bool valid = false; if (spec) { fb_destroy_modedb(spec->modedb); } else { spec = kmalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return; } spec->version = spec->revision = 0; if (via_aux_read(drv, 0x00, edid, EDID_LENGTH)) { fb_edid_to_monspecs(edid, spec); valid = spec->version || spec->revision; } if (!valid) { kfree(spec); spec = NULL; } else printk(KERN_DEBUG "EDID: %s %s\n", spec->manufacturer, spec->monitor); drv->data = spec; } static const struct fb_videomode *get_preferred_mode(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; int i; if (!spec || !spec->modedb || !(spec->misc & FB_MISC_1ST_DETAIL)) return NULL; for (i = 0; i < spec->modedb_len; i++) { if (spec->modedb[i].flag & FB_MODE_IS_FIRST && spec->modedb[i].flag & FB_MODE_IS_DETAILED) return &spec->modedb[i]; } return NULL; } static void cleanup(struct via_aux_drv *drv) { struct fb_monspecs *spec = drv->data; if (spec) fb_destroy_modedb(spec->modedb); } void via_aux_edid_probe(struct via_aux_bus *bus) { struct via_aux_drv drv = { .bus = bus, .addr = 0x50, .name = name, .cleanup = cleanup, .get_preferred_mode = get_preferred_mode}; query_edid(&drv); /* as EDID devices can be connected/disconnected just add the driver */ via_aux_add(&drv); }
gpl-2.0
DooMLoRD/semc-kernel-msm7x30-dev
arch/blackfin/mach-common/cache-c.c
11059
1798
/* * Blackfin cache control code (simpler control-style functions) * * Copyright 2004-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/init.h> #include <asm/blackfin.h> #include <asm/cplbinit.h> /* Invalidate the Entire Data cache by * clearing DMC[1:0] bits */ void blackfin_invalidate_entire_dcache(void) { u32 dmem = bfin_read_DMEM_CONTROL(); bfin_write_DMEM_CONTROL(dmem & ~0xc); SSYNC(); bfin_write_DMEM_CONTROL(dmem); SSYNC(); } /* Invalidate the Entire Instruction cache by * clearing IMC bit */ void blackfin_invalidate_entire_icache(void) { u32 imem = bfin_read_IMEM_CONTROL(); bfin_write_IMEM_CONTROL(imem & ~0x4); SSYNC(); bfin_write_IMEM_CONTROL(imem); SSYNC(); } #if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE) static void bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr, unsigned long cplb_data, unsigned long mem_control, unsigned long mem_mask) { int i; for (i = 0; i < MAX_CPLBS; i++) { bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr); bfin_write32(cplb_data + i * 4, cplb_tbl[i].data); } _enable_cplb(mem_control, mem_mask); } #ifdef CONFIG_BFIN_ICACHE void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl) { bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL, (IMC | ENICPLB)); } #endif #ifdef CONFIG_BFIN_DCACHE void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) { /* * Anomaly notes: * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL * register, so that the port preferences for DAG0 and DAG1 are set * to port B */ bfin_cache_init(dcplb_tbl, DCPLB_ADDR0, DCPLB_DATA0, DMEM_CONTROL, (DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0))); } #endif #endif
gpl-2.0
NoelMacwan/Kernel-Nicki-15.1.C.2.9
drivers/media/video/sn9c102/sn9c102_tas5110c1b.c
12851
4411
/*************************************************************************** * Plug-in for TAS5110C1B image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int tas5110c1b_init(struct sn9c102_device* cam) { int err = 0; err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x44, 0x01}, {0x00, 0x10}, {0x00, 0x11}, {0x0a, 0x14}, {0x60, 0x17}, {0x06, 0x18}, {0xfb, 0x19}); err += sn9c102_i2c_write(cam, 0xc0, 0x80); return err; } static int tas5110c1b_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x20, 0xf6 - ctrl->value); break; default: return -EINVAL; } return err ? -EIO : 0; } static int tas5110c1b_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 69, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 9; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); /* Don't change ! */ err += sn9c102_write_reg(cam, 0x14, 0x1a); err += sn9c102_write_reg(cam, 0x0a, 0x1b); err += sn9c102_write_reg(cam, sn9c102_pread_reg(cam, 0x19), 0x19); return err; } static int tas5110c1b_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) err += sn9c102_write_reg(cam, 0x2b, 0x19); else err += sn9c102_write_reg(cam, 0xfb, 0x19); return err; } static const struct sn9c102_sensor tas5110c1b = { .name = "TAS5110C1B", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .sysfs_ops = SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_3WIRES, .init = &tas5110c1b_init, .qctrl = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0xf6, .step = 0x01, .default_value = 0x40, .flags = 0, }, }, .set_ctrl = &tas5110c1b_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 352, .height = 288, }, .defrect = { .left = 0, .top = 0, .width = 352, .height = 288, }, }, .set_crop = &tas5110c1b_set_crop, .pix_format = { .width = 352, .height = 288, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &tas5110c1b_set_pix_format }; int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam) { const struct usb_device_id tas5110c1b_id_table[] = { { USB_DEVICE(0x0c45, 0x6001), }, { USB_DEVICE(0x0c45, 0x6005), }, { USB_DEVICE(0x0c45, 0x60ab), }, { } }; /* Sensor detection is based on USB pid/vid */ if (!sn9c102_match_id(cam, tas5110c1b_id_table)) return -ENODEV; sn9c102_attach_sensor(cam, &tas5110c1b); return 0; }
gpl-2.0
thicklizard/ge-patches
drivers/usb/gadget/u_data_hsic.c
52
27238
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/termios.h> #include <linux/netdevice.h> #include <linux/debugfs.h> #include <linux/bitops.h> #include <linux/termios.h> #include <mach/usb_bridge.h> #include <mach/usb_gadget_xport.h> static unsigned int no_data_ports; static const char *data_bridge_names[] = { "dun_data_hsic0", "rmnet_data_hsic0" }; #define DATA_BRIDGE_NAME_MAX_LEN 20 #define GHSIC_DATA_RMNET_RX_Q_SIZE 50 #define GHSIC_DATA_RMNET_TX_Q_SIZE 300 #define GHSIC_DATA_SERIAL_RX_Q_SIZE 10 #define GHSIC_DATA_SERIAL_TX_Q_SIZE 20 #define GHSIC_DATA_RX_REQ_SIZE 2048 #define GHSIC_DATA_TX_INTR_THRESHOLD 20 static unsigned int ghsic_data_rmnet_tx_q_size = GHSIC_DATA_RMNET_TX_Q_SIZE; module_param(ghsic_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_rmnet_rx_q_size = GHSIC_DATA_RMNET_RX_Q_SIZE; module_param(ghsic_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_serial_tx_q_size = GHSIC_DATA_SERIAL_TX_Q_SIZE; module_param(ghsic_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_serial_rx_q_size = GHSIC_DATA_SERIAL_RX_Q_SIZE; module_param(ghsic_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_rx_req_size = GHSIC_DATA_RX_REQ_SIZE; module_param(ghsic_data_rx_req_size, uint, S_IRUGO | S_IWUSR); unsigned int ghsic_data_tx_intr_thld = GHSIC_DATA_TX_INTR_THRESHOLD; module_param(ghsic_data_tx_intr_thld, uint, S_IRUGO | S_IWUSR); #define GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD 500 #define GHSIC_DATA_FLOW_CTRL_DISABLE 300 #define GHSIC_DATA_FLOW_CTRL_SUPPORT 1 #define GHSIC_DATA_PENDLIMIT_WITH_BRIDGE 500 static unsigned int ghsic_data_fctrl_support = GHSIC_DATA_FLOW_CTRL_SUPPORT; module_param(ghsic_data_fctrl_support, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_fctrl_en_thld = GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD; module_param(ghsic_data_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_fctrl_dis_thld = GHSIC_DATA_FLOW_CTRL_DISABLE; module_param(ghsic_data_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); static unsigned int ghsic_data_pend_limit_with_bridge = GHSIC_DATA_PENDLIMIT_WITH_BRIDGE; module_param(ghsic_data_pend_limit_with_bridge, uint, S_IRUGO | S_IWUSR); #define CH_OPENED 0 #define CH_READY 1 struct gdata_port { unsigned port_num; atomic_t connected; struct usb_ep *in; struct usb_ep *out; enum gadget_type gtype; unsigned int tx_q_size; struct list_head tx_idle; struct sk_buff_head tx_skb_q; spinlock_t tx_lock; unsigned int rx_q_size; struct list_head rx_idle; struct sk_buff_head rx_skb_q; spinlock_t rx_lock; struct workqueue_struct *wq; struct work_struct connect_w; struct work_struct disconnect_w; struct work_struct write_tomdm_w; struct work_struct write_tohost_w; struct bridge brdg; unsigned long bridge_sts; unsigned int n_tx_req_queued; unsigned long to_modem; unsigned long to_host; unsigned int rx_throttled_cnt; unsigned int rx_unthrottled_cnt; unsigned int tx_throttled_cnt; unsigned int tx_unthrottled_cnt; unsigned int tomodem_drp_cnt; unsigned int unthrottled_pnd_skbs; }; static struct { struct gdata_port *port; struct platform_driver pdrv; } gdata_ports[NUM_PORTS]; static unsigned int get_timestamp(void); static void dbg_timestamp(char *, struct sk_buff *); static void ghsic_data_start_rx(struct gdata_port *port); static void ghsic_data_free_requests(struct usb_ep *ep, struct list_head *head) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); usb_ep_free_request(ep, req); } } static int ghsic_data_alloc_requests(struct usb_ep *ep, struct list_head *head, int num, void (*cb)(struct usb_ep *ep, struct usb_request *), gfp_t flags) { int i; struct usb_request *req; pr_debug("%s: ep:%s head:%p num:%d cb:%p", __func__, ep->name, head, num, cb); for (i = 0; i < num; i++) { req = usb_ep_alloc_request(ep, flags); if (!req) { pr_debug("%s: req allocated:%d\n", __func__, i); return list_empty(head) ? -ENOMEM : 0; } req->complete = cb; list_add(&req->list, head); } return 0; } static void ghsic_data_unthrottle_tx(void *ctx) { struct gdata_port *port = ctx; unsigned long flags; if (!port || !atomic_read(&port->connected)) return; spin_lock_irqsave(&port->rx_lock, flags); port->tx_unthrottled_cnt++; spin_unlock_irqrestore(&port->rx_lock, flags); queue_work(port->wq, &port->write_tomdm_w); pr_debug("%s: port num =%d unthrottled\n", __func__, port->port_num); } static void ghsic_data_write_tohost(struct work_struct *w) { unsigned long flags; struct sk_buff *skb; int ret; struct usb_request *req; struct usb_ep *ep; struct gdata_port *port; struct timestamp_info *info; port = container_of(w, struct gdata_port, write_tohost_w); if (!port) return; spin_lock_irqsave(&port->tx_lock, flags); ep = port->in; if (!ep) { spin_unlock_irqrestore(&port->tx_lock, flags); return; } while (!list_empty(&port->tx_idle)) { skb = __skb_dequeue(&port->tx_skb_q); if (!skb) break; req = list_first_entry(&port->tx_idle, struct usb_request, list); req->context = skb; req->buf = skb->data; req->length = skb->len; port->n_tx_req_queued++; if (port->n_tx_req_queued == ghsic_data_tx_intr_thld) { req->no_interrupt = 0; port->n_tx_req_queued = 0; } else { req->no_interrupt = 1; } req->zero = 1; list_del(&req->list); info = (struct timestamp_info *)skb->cb; info->tx_queued = get_timestamp(); spin_unlock_irqrestore(&port->tx_lock, flags); ret = usb_ep_queue(ep, req, GFP_KERNEL); spin_lock_irqsave(&port->tx_lock, flags); if (ret) { pr_err("%s: usb epIn failed\n", __func__); list_add(&req->list, &port->tx_idle); dev_kfree_skb_any(skb); break; } port->to_host++; if (ghsic_data_fctrl_support && port->tx_skb_q.qlen <= ghsic_data_fctrl_dis_thld && test_and_clear_bit(RX_THROTTLED, &port->brdg.flags)) { port->rx_unthrottled_cnt++; port->unthrottled_pnd_skbs = port->tx_skb_q.qlen; pr_debug_ratelimited("%s: disable flow ctrl:" " tx skbq len: %u\n", __func__, port->tx_skb_q.qlen); data_bridge_unthrottle_rx(port->brdg.ch_id); } } spin_unlock_irqrestore(&port->tx_lock, flags); } static int ghsic_data_receive(void *p, void *data, size_t len) { struct gdata_port *port = p; unsigned long flags; struct sk_buff *skb = data; if (!port || !atomic_read(&port->connected)) { dev_kfree_skb_any(skb); return -ENOTCONN; } pr_debug("%s: p:%p#%d skb_len:%d\n", __func__, port, port->port_num, skb->len); spin_lock_irqsave(&port->tx_lock, flags); __skb_queue_tail(&port->tx_skb_q, skb); if (ghsic_data_fctrl_support && port->tx_skb_q.qlen >= ghsic_data_fctrl_en_thld) { set_bit(RX_THROTTLED, &port->brdg.flags); port->rx_throttled_cnt++; pr_debug_ratelimited("%s: flow ctrl enabled: tx skbq len: %u\n", __func__, port->tx_skb_q.qlen); spin_unlock_irqrestore(&port->tx_lock, flags); queue_work(port->wq, &port->write_tohost_w); return -EBUSY; } spin_unlock_irqrestore(&port->tx_lock, flags); queue_work(port->wq, &port->write_tohost_w); return 0; } static void ghsic_data_write_tomdm(struct work_struct *w) { struct gdata_port *port; struct sk_buff *skb; struct timestamp_info *info; unsigned long flags; int ret; port = container_of(w, struct gdata_port, write_tomdm_w); if (!port || !atomic_read(&port->connected)) return; spin_lock_irqsave(&port->rx_lock, flags); if (test_bit(TX_THROTTLED, &port->brdg.flags)) { spin_unlock_irqrestore(&port->rx_lock, flags); goto start_rx; } while ((skb = __skb_dequeue(&port->rx_skb_q))) { pr_debug("%s: port:%p tom:%lu pno:%d\n", __func__, port, port->to_modem, port->port_num); info = (struct timestamp_info *)skb->cb; info->rx_done_sent = get_timestamp(); spin_unlock_irqrestore(&port->rx_lock, flags); ret = data_bridge_write(port->brdg.ch_id, skb); spin_lock_irqsave(&port->rx_lock, flags); if (ret < 0) { if (ret == -EBUSY) { port->tx_throttled_cnt++; break; } pr_err_ratelimited("%s: write error:%d\n", __func__, ret); port->tomodem_drp_cnt++; dev_kfree_skb_any(skb); break; } port->to_modem++; } spin_unlock_irqrestore(&port->rx_lock, flags); start_rx: ghsic_data_start_rx(port); } static void ghsic_data_epin_complete(struct usb_ep *ep, struct usb_request *req) { struct gdata_port *port = ep->driver_data; struct sk_buff *skb = req->context; int status = req->status; switch (status) { case 0: dbg_timestamp("DL", skb); break; case -ECONNRESET: case -ESHUTDOWN: dev_kfree_skb_any(skb); req->buf = 0; usb_ep_free_request(ep, req); return; default: pr_err("%s: data tx ep error %d\n", __func__, status); break; } dev_kfree_skb_any(skb); spin_lock(&port->tx_lock); list_add_tail(&req->list, &port->tx_idle); spin_unlock(&port->tx_lock); queue_work(port->wq, &port->write_tohost_w); } static void ghsic_data_epout_complete(struct usb_ep *ep, struct usb_request *req) { struct gdata_port *port = ep->driver_data; struct sk_buff *skb = req->context; struct timestamp_info *info = (struct timestamp_info *)skb->cb; int status = req->status; int queue = 0; switch (status) { case 0: skb_put(skb, req->actual); queue = 1; break; case -ECONNRESET: case -ESHUTDOWN: dev_kfree_skb_any(skb); req->buf = 0; usb_ep_free_request(ep, req); return; default: pr_err_ratelimited("%s: %s response error %d, %d/%d\n", __func__, ep->name, status, req->actual, req->length); dev_kfree_skb_any(skb); break; } spin_lock(&port->rx_lock); if (queue) { info->rx_done = get_timestamp(); __skb_queue_tail(&port->rx_skb_q, skb); list_add_tail(&req->list, &port->rx_idle); queue_work(port->wq, &port->write_tomdm_w); } spin_unlock(&port->rx_lock); } static void ghsic_data_start_rx(struct gdata_port *port) { struct usb_request *req; struct usb_ep *ep; unsigned long flags; int ret; struct sk_buff *skb; struct timestamp_info *info; unsigned int created; pr_debug("%s: port:%p\n", __func__, port); if (!port) return; spin_lock_irqsave(&port->rx_lock, flags); ep = port->out; if (!ep) { spin_unlock_irqrestore(&port->rx_lock, flags); return; } while (atomic_read(&port->connected) && !list_empty(&port->rx_idle)) { if (port->rx_skb_q.qlen > ghsic_data_pend_limit_with_bridge) break; req = list_first_entry(&port->rx_idle, struct usb_request, list); created = get_timestamp(); skb = alloc_skb(ghsic_data_rx_req_size, GFP_ATOMIC); if (!skb) break; info = (struct timestamp_info *)skb->cb; info->created = created; list_del(&req->list); req->buf = skb->data; req->length = ghsic_data_rx_req_size; req->context = skb; info->rx_queued = get_timestamp(); spin_unlock_irqrestore(&port->rx_lock, flags); ret = usb_ep_queue(ep, req, GFP_KERNEL); spin_lock_irqsave(&port->rx_lock, flags); if (ret) { dev_kfree_skb_any(skb); pr_err_ratelimited("%s: rx queue failed\n", __func__); if (atomic_read(&port->connected)) list_add(&req->list, &port->rx_idle); else usb_ep_free_request(ep, req); break; } } spin_unlock_irqrestore(&port->rx_lock, flags); } static void ghsic_data_start_io(struct gdata_port *port) { unsigned long flags; struct usb_ep *ep; int ret; pr_debug("%s: port:%p\n", __func__, port); if (!port) return; spin_lock_irqsave(&port->rx_lock, flags); ep = port->out; if (!ep) { spin_unlock_irqrestore(&port->rx_lock, flags); return; } ret = ghsic_data_alloc_requests(ep, &port->rx_idle, port->rx_q_size, ghsic_data_epout_complete, GFP_ATOMIC); if (ret) { pr_err("%s: rx req allocation failed\n", __func__); spin_unlock_irqrestore(&port->rx_lock, flags); return; } spin_unlock_irqrestore(&port->rx_lock, flags); spin_lock_irqsave(&port->tx_lock, flags); ep = port->in; if (!ep) { spin_unlock_irqrestore(&port->tx_lock, flags); return; } ret = ghsic_data_alloc_requests(ep, &port->tx_idle, port->tx_q_size, ghsic_data_epin_complete, GFP_ATOMIC); if (ret) { pr_err("%s: tx req allocation failed\n", __func__); ghsic_data_free_requests(ep, &port->rx_idle); spin_unlock_irqrestore(&port->tx_lock, flags); return; } spin_unlock_irqrestore(&port->tx_lock, flags); ghsic_data_start_rx(port); } static void ghsic_data_connect_w(struct work_struct *w) { struct gdata_port *port = container_of(w, struct gdata_port, connect_w); int ret; if (!port || !atomic_read(&port->connected) || !test_bit(CH_READY, &port->bridge_sts)) return; pr_debug("%s: port:%p\n", __func__, port); ret = data_bridge_open(&port->brdg); if (ret) { pr_err("%s: unable open bridge ch:%d err:%d\n", __func__, port->brdg.ch_id, ret); return; } set_bit(CH_OPENED, &port->bridge_sts); ghsic_data_start_io(port); } static void ghsic_data_disconnect_w(struct work_struct *w) { struct gdata_port *port = container_of(w, struct gdata_port, disconnect_w); if (!test_bit(CH_OPENED, &port->bridge_sts)) return; data_bridge_close(port->brdg.ch_id); clear_bit(CH_OPENED, &port->bridge_sts); } static void ghsic_data_free_buffers(struct gdata_port *port) { struct sk_buff *skb; unsigned long flags; if (!port) return; spin_lock_irqsave(&port->tx_lock, flags); if (!port->in) { spin_unlock_irqrestore(&port->tx_lock, flags); return; } ghsic_data_free_requests(port->in, &port->tx_idle); while ((skb = __skb_dequeue(&port->tx_skb_q))) dev_kfree_skb_any(skb); spin_unlock_irqrestore(&port->tx_lock, flags); spin_lock_irqsave(&port->rx_lock, flags); if (!port->out) { spin_unlock_irqrestore(&port->rx_lock, flags); return; } ghsic_data_free_requests(port->out, &port->rx_idle); while ((skb = __skb_dequeue(&port->rx_skb_q))) dev_kfree_skb_any(skb); spin_unlock_irqrestore(&port->rx_lock, flags); } static int ghsic_data_probe(struct platform_device *pdev) { struct gdata_port *port; pr_debug("%s: name:%s no_data_ports= %d\n", __func__, pdev->name, no_data_ports); if (pdev->id >= no_data_ports) { pr_err("%s: invalid port: %d\n", __func__, pdev->id); return -EINVAL; } port = gdata_ports[pdev->id].port; set_bit(CH_READY, &port->bridge_sts); if (atomic_read(&port->connected)) queue_work(port->wq, &port->connect_w); return 0; } static int ghsic_data_remove(struct platform_device *pdev) { struct gdata_port *port; struct usb_ep *ep_in; struct usb_ep *ep_out; pr_debug("%s: name:%s\n", __func__, pdev->name); if (pdev->id >= no_data_ports) { pr_err("%s: invalid port: %d\n", __func__, pdev->id); return -EINVAL; } port = gdata_ports[pdev->id].port; ep_in = port->in; if (ep_in) usb_ep_fifo_flush(ep_in); ep_out = port->out; if (ep_out) usb_ep_fifo_flush(ep_out); ghsic_data_free_buffers(port); data_bridge_close(port->brdg.ch_id); clear_bit(CH_READY, &port->bridge_sts); clear_bit(CH_OPENED, &port->bridge_sts); return 0; } static void ghsic_data_port_free(int portno) { struct gdata_port *port = gdata_ports[portno].port; struct platform_driver *pdrv = &gdata_ports[portno].pdrv; destroy_workqueue(port->wq); kfree(port); if (pdrv) platform_driver_unregister(pdrv); } static int ghsic_data_port_alloc(unsigned port_num, enum gadget_type gtype) { struct gdata_port *port; struct platform_driver *pdrv; port = kzalloc(sizeof(struct gdata_port), GFP_KERNEL); if (!port) return -ENOMEM; port->wq = create_singlethread_workqueue(data_bridge_names[port_num]); if (!port->wq) { pr_err("%s: Unable to create workqueue:%s\n", __func__, data_bridge_names[port_num]); kfree(port); return -ENOMEM; } port->port_num = port_num; spin_lock_init(&port->rx_lock); spin_lock_init(&port->tx_lock); INIT_WORK(&port->connect_w, ghsic_data_connect_w); INIT_WORK(&port->disconnect_w, ghsic_data_disconnect_w); INIT_WORK(&port->write_tohost_w, ghsic_data_write_tohost); INIT_WORK(&port->write_tomdm_w, ghsic_data_write_tomdm); INIT_LIST_HEAD(&port->tx_idle); INIT_LIST_HEAD(&port->rx_idle); skb_queue_head_init(&port->tx_skb_q); skb_queue_head_init(&port->rx_skb_q); port->gtype = gtype; port->brdg.ch_id = port_num; port->brdg.ctx = port; port->brdg.ops.send_pkt = ghsic_data_receive; port->brdg.ops.unthrottle_tx = ghsic_data_unthrottle_tx; gdata_ports[port_num].port = port; pdrv = &gdata_ports[port_num].pdrv; pdrv->probe = ghsic_data_probe; pdrv->remove = ghsic_data_remove; pdrv->driver.name = data_bridge_names[port_num]; pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); pr_debug("%s: port:%p portno:%d\n", __func__, port, port_num); return 0; } void ghsic_data_disconnect(void *gptr, int port_num) { struct gdata_port *port; unsigned long flags; pr_debug("%s: port#%d\n", __func__, port_num); port = gdata_ports[port_num].port; if (port_num > no_data_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return; } if (!gptr || !port) { pr_err("%s: port is null\n", __func__); return; } ghsic_data_free_buffers(port); if (port->in) { usb_ep_disable(port->in); port->in->driver_data = NULL; } if (port->out) { usb_ep_disable(port->out); port->out->driver_data = NULL; } atomic_set(&port->connected, 0); spin_lock_irqsave(&port->tx_lock, flags); port->in = NULL; port->n_tx_req_queued = 0; clear_bit(RX_THROTTLED, &port->brdg.flags); spin_unlock_irqrestore(&port->tx_lock, flags); spin_lock_irqsave(&port->rx_lock, flags); port->out = NULL; clear_bit(TX_THROTTLED, &port->brdg.flags); spin_unlock_irqrestore(&port->rx_lock, flags); queue_work(port->wq, &port->disconnect_w); } int ghsic_data_connect(void *gptr, int port_num) { struct gdata_port *port; struct gserial *gser; struct grmnet *gr; unsigned long flags; int ret = 0; pr_debug("%s: port#%d\n", __func__, port_num); port = gdata_ports[port_num].port; if (port_num > no_data_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } if (!gptr || !port) { pr_err("%s: port is null\n", __func__); return -ENODEV; } if (port->gtype == USB_GADGET_SERIAL) { gser = gptr; spin_lock_irqsave(&port->tx_lock, flags); port->in = gser->in; spin_unlock_irqrestore(&port->tx_lock, flags); spin_lock_irqsave(&port->rx_lock, flags); port->out = gser->out; spin_unlock_irqrestore(&port->rx_lock, flags); port->tx_q_size = ghsic_data_serial_tx_q_size; port->rx_q_size = ghsic_data_serial_rx_q_size; gser->in->driver_data = port; gser->out->driver_data = port; } else { gr = gptr; spin_lock_irqsave(&port->tx_lock, flags); port->in = gr->in; spin_unlock_irqrestore(&port->tx_lock, flags); spin_lock_irqsave(&port->rx_lock, flags); port->out = gr->out; spin_unlock_irqrestore(&port->rx_lock, flags); port->tx_q_size = ghsic_data_rmnet_tx_q_size; port->rx_q_size = ghsic_data_rmnet_rx_q_size; gr->in->driver_data = port; gr->out->driver_data = port; } ret = usb_ep_enable(port->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, port->in); goto fail; } ret = usb_ep_enable(port->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, port->out); usb_ep_disable(port->in); goto fail; } atomic_set(&port->connected, 1); spin_lock_irqsave(&port->tx_lock, flags); port->to_host = 0; port->rx_throttled_cnt = 0; port->rx_unthrottled_cnt = 0; port->unthrottled_pnd_skbs = 0; spin_unlock_irqrestore(&port->tx_lock, flags); spin_lock_irqsave(&port->rx_lock, flags); port->to_modem = 0; port->tomodem_drp_cnt = 0; port->tx_throttled_cnt = 0; port->tx_unthrottled_cnt = 0; spin_unlock_irqrestore(&port->rx_lock, flags); queue_work(port->wq, &port->connect_w); fail: return ret; } #if defined(CONFIG_DEBUG_FS) #define DEBUG_BUF_SIZE 1024 static unsigned int record_timestamp; module_param(record_timestamp, uint, S_IRUGO | S_IWUSR); static struct timestamp_buf dbg_data = { .idx = 0, .lck = __RW_LOCK_UNLOCKED(lck) }; static unsigned int get_timestamp(void) { struct timeval tval; unsigned int stamp; if (!record_timestamp) return 0; do_gettimeofday(&tval); stamp = tval.tv_sec & 0xFFF; stamp = stamp * 1000000 + tval.tv_usec; return stamp; } static void dbg_inc(unsigned *idx) { *idx = (*idx + 1) & (DBG_DATA_MAX-1); } static void dbg_timestamp(char *event, struct sk_buff * skb) { unsigned long flags; struct timestamp_info *info = (struct timestamp_info *)skb->cb; if (!record_timestamp) return; write_lock_irqsave(&dbg_data.lck, flags); scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG, "%p %u[%s] %u %u %u %u %u %u\n", skb, skb->len, event, info->created, info->rx_queued, info->rx_done, info->rx_done_sent, info->tx_queued, get_timestamp()); dbg_inc(&dbg_data.idx); write_unlock_irqrestore(&dbg_data.lck, flags); } static ssize_t show_timestamp(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { unsigned long flags; unsigned i; unsigned j = 0; char *buf; int ret = 0; if (!record_timestamp) return 0; buf = kzalloc(sizeof(char) * 4 * DEBUG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; read_lock_irqsave(&dbg_data.lck, flags); i = dbg_data.idx; for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) { if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG)) continue; j += scnprintf(buf + j, (4 * DEBUG_BUF_SIZE) - j, "%s\n", dbg_data.buf[i]); } read_unlock_irqrestore(&dbg_data.lck, flags); ret = simple_read_from_buffer(ubuf, count, ppos, buf, j); kfree(buf); return ret; } const struct file_operations gdata_timestamp_ops = { .read = show_timestamp, }; static ssize_t ghsic_data_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gdata_port *port; struct platform_driver *pdrv; char *buf; unsigned long flags; int ret; int i; int temp = 0; buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < no_data_ports; i++) { port = gdata_ports[i].port; if (!port) continue; pdrv = &gdata_ports[i].pdrv; spin_lock_irqsave(&port->rx_lock, flags); temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, "\nName: %s\n" "#PORT:%d port#: %p\n" "data_ch_open: %d\n" "data_ch_ready: %d\n" "\n******UL INFO*****\n\n" "dpkts_to_modem: %lu\n" "tomodem_drp_cnt: %u\n" "rx_buf_len: %u\n" "tx thld cnt %u\n" "tx unthld cnt %u\n" "TX_THROTTLED %d\n", pdrv->driver.name, i, port, test_bit(CH_OPENED, &port->bridge_sts), test_bit(CH_READY, &port->bridge_sts), port->to_modem, port->tomodem_drp_cnt, port->rx_skb_q.qlen, port->tx_throttled_cnt, port->tx_unthrottled_cnt, test_bit(TX_THROTTLED, &port->brdg.flags)); spin_unlock_irqrestore(&port->rx_lock, flags); spin_lock_irqsave(&port->tx_lock, flags); temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, "\n******DL INFO******\n\n" "dpkts_to_usbhost: %lu\n" "tx_buf_len: %u\n" "rx thld cnt %u\n" "rx unthld cnt %u\n" "uthld pnd skbs %u\n" "RX_THROTTLED %d\n", port->to_host, port->tx_skb_q.qlen, port->rx_throttled_cnt, port->rx_unthrottled_cnt, port->unthrottled_pnd_skbs, test_bit(RX_THROTTLED, &port->brdg.flags)); spin_unlock_irqrestore(&port->tx_lock, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); kfree(buf); return ret; } static ssize_t ghsic_data_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct gdata_port *port; int i; unsigned long flags; for (i = 0; i < no_data_ports; i++) { port = gdata_ports[i].port; if (!port) continue; spin_lock_irqsave(&port->rx_lock, flags); port->to_modem = 0; port->tomodem_drp_cnt = 0; port->tx_throttled_cnt = 0; port->tx_unthrottled_cnt = 0; spin_unlock_irqrestore(&port->rx_lock, flags); spin_lock_irqsave(&port->tx_lock, flags); port->to_host = 0; port->rx_throttled_cnt = 0; port->rx_unthrottled_cnt = 0; port->unthrottled_pnd_skbs = 0; spin_unlock_irqrestore(&port->tx_lock, flags); } return count; } const struct file_operations ghsic_stats_ops = { .read = ghsic_data_read_stats, .write = ghsic_data_reset_stats, }; static struct dentry *gdata_dent; static struct dentry *gdata_dfile_stats; static struct dentry *gdata_dfile_tstamp; static void ghsic_data_debugfs_init(void) { gdata_dent = debugfs_create_dir("ghsic_data_xport", 0); if (IS_ERR(gdata_dent)) return; gdata_dfile_stats = debugfs_create_file("status", 0444, gdata_dent, 0, &ghsic_stats_ops); if (!gdata_dfile_stats || IS_ERR(gdata_dfile_stats)) { debugfs_remove(gdata_dent); return; } gdata_dfile_tstamp = debugfs_create_file("timestamp", 0644, gdata_dent, 0, &gdata_timestamp_ops); if (!gdata_dfile_tstamp || IS_ERR(gdata_dfile_tstamp)) debugfs_remove(gdata_dent); } static void ghsic_data_debugfs_exit(void) { debugfs_remove(gdata_dfile_stats); debugfs_remove(gdata_dfile_tstamp); debugfs_remove(gdata_dent); } #else static void ghsic_data_debugfs_init(void) { } static void ghsic_data_debugfs_exit(void) { } static void dbg_timestamp(char *event, struct sk_buff * skb) { return; } static unsigned int get_timestamp(void) { return 0; } #endif int ghsic_data_setup(unsigned num_ports, enum gadget_type gtype) { int first_port_id = no_data_ports; int total_num_ports = num_ports + no_data_ports; int ret = 0; int i; if (!num_ports || total_num_ports > NUM_PORTS) { pr_err("%s: Invalid num of ports count:%d\n", __func__, num_ports); return -EINVAL; } pr_debug("%s: count: %d\n", __func__, num_ports); for (i = first_port_id; i < (num_ports + first_port_id); i++) { no_data_ports++; ret = ghsic_data_port_alloc(i, gtype); if (ret) { no_data_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_ports; } } return first_port_id; free_ports: for (i = first_port_id; i < no_data_ports; i++) ghsic_data_port_free(i); no_data_ports = first_port_id; return ret; } static int __init ghsic_data_init(void) { ghsic_data_debugfs_init(); return 0; } module_init(ghsic_data_init); static void __exit ghsic_data_exit(void) { ghsic_data_debugfs_exit(); } module_exit(ghsic_data_exit); MODULE_DESCRIPTION("hsic data xport driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
MoKee/android_kernel_samsung_msm8930-common
drivers/battery/smb358_charger.c
52
29481
/* * smb358_charger.c * Samsung SMB358 Charger Driver * * Copyright (C) 2012 Samsung Electronics * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define DEBUG #if defined(CONFIG_SEC_PRODUCT_8930) #include <linux/battery/sec_charger_8930.h> #else #include <linux/battery/sec_charger.h> #endif #include <linux/debugfs.h> #include <linux/seq_file.h> static int smb358_i2c_write(struct i2c_client *client, int reg, u8 *buf) { int ret; ret = i2c_smbus_write_i2c_block_data(client, reg, 1, buf); if (ret < 0) dev_err(&client->dev, "%s: Error(%d)\n", __func__, ret); return ret; } static int smb358_i2c_read(struct i2c_client *client, int reg, u8 *buf) { int ret; ret = i2c_smbus_read_i2c_block_data(client, reg, 1, buf); if (ret < 0) dev_err(&client->dev, "%s: Error(%d)\n", __func__, ret); return ret; } /*static void smb358_i2c_write_array(struct i2c_client *client, u8 *buf, int size) { int i; for (i = 0; i < size; i += 3) smb358_i2c_write(client, (u8) (*(buf + i)), (buf + i) + 1); }*/ static int smb358_update_reg(struct i2c_client *client, int reg, u8 data) { int ret; u8 r_data = 0; u8 w_data = 0; u8 o_data = data; ret = smb358_i2c_read(client, reg, &r_data); if (ret < 0) { dev_err(&client->dev, "%s: error - read(%d)\n", __func__, ret); goto error; } w_data = r_data | data; ret = smb358_i2c_write(client, reg, &w_data); if (ret < 0) { dev_err(&client->dev, "%s: error - write(%d)\n", __func__, ret); goto error; } ret = smb358_i2c_read(client, reg, &data); if (ret < 0) { dev_err(&client->dev, "%s: error - read(%d)\n", __func__, ret); goto error; } dev_dbg(&client->dev, "%s: reg(0x%02x) 0x%02x : 0x%02x -> 0x%02x -> 0x%02x\n", __func__, reg, o_data, r_data, w_data, data); error: return ret; } static int smb358_clear_reg(struct i2c_client *client, int reg, u8 data) { int ret; u8 r_data = 0; u8 w_data = 0; u8 o_data = data; ret = smb358_i2c_read(client, reg, &r_data); if (ret < 0) { dev_err(&client->dev, "%s: error - read(%d)\n", __func__, ret); goto error; } w_data = r_data & (~data); ret = smb358_i2c_write(client, reg, &w_data); if (ret < 0) { dev_err(&client->dev, "%s: error - write(%d)\n", __func__, ret); goto error; } ret = smb358_i2c_read(client, reg, &data); if (ret < 0) { dev_err(&client->dev, "%s: error - read(%d)\n", __func__, ret); goto error; } dev_dbg(&client->dev, "%s: reg(0x%02x)- 0x%02x : 0x%02x -> 0x%02x -> 0x%02x\n", __func__, reg, o_data, r_data, w_data, data); error: return ret; } static int smb358_volatile_writes(struct i2c_client *client, u8 value) { int ret = 0; if (value == SMB358_ENABLE_WRITE) { ret = smb358_update_reg(client, SMB358_COMMAND_A, 0x80); if (ret < 0) { dev_err(&client->dev, "%s: error(%d)\n", __func__, ret); goto error; } dev_dbg(&client->dev, "%s: ENABLED\n", __func__); } else { ret = smb358_clear_reg(client, SMB358_COMMAND_A, 0x80); if (ret < 0) { dev_err(&client->dev, "%s: error(%d)\n", __func__, ret); goto error; } dev_dbg(&client->dev, "%s: DISABLED\n", __func__); } error: return ret; } static void smb358_set_command(struct i2c_client *client, int reg, u8 datum) { int val; u8 after_data; if (smb358_i2c_write(client, reg, &datum) < 0) dev_err(&client->dev, "%s : error!\n", __func__); msleep(20); val = smb358_i2c_read(client, reg, &after_data); if (val >= 0) dev_info(&client->dev, "%s : reg(0x%02x) 0x%02x => 0x%02x\n", __func__, reg, datum, after_data); else dev_err(&client->dev, "%s : error!\n", __func__); } #if 0 static void smb358_test_read(struct i2c_client *client) { u8 data = 0; u32 addr = 0; for (addr = 0; addr <= 0x0f; addr++) { smb358_i2c_read(client, addr, &data); dev_dbg(&client->dev, "%s : smb358 addr : 0x%02x data : 0x%02x\n", __func__, addr, data); } for (addr = 0x30; addr <= 0x3f; addr++) { smb358_i2c_read(client, addr, &data); dev_dbg(&client->dev, "%s : smb358 addr : 0x%02x data : 0x%02x\n", __func__, addr, data); } } #endif static void smb358_read_regs(struct i2c_client *client, char *str) { u8 data = 0; u32 addr = 0; for (addr = 0; addr <= 0x0f; addr++) { smb358_i2c_read(client, addr, &data); sprintf(str+strlen(str), "0x%x, ", data); } /* "#" considered as new line in application */ sprintf(str+strlen(str), "#"); for (addr = 0x30; addr <= 0x3f; addr++) { smb358_i2c_read(client, addr, &data); sprintf(str+strlen(str), "0x%x, ", data); } } static int smb358_get_charging_status(struct i2c_client *client) { int status = POWER_SUPPLY_STATUS_UNKNOWN; u8 data_a = 0; u8 data_b = 0; u8 data_c = 0; u8 data_d = 0; u8 data_e = 0; /* need delay to update charger status */ msleep(500); /*smb358_test_read(client);*/ smb358_i2c_read(client, SMB358_STATUS_A, &data_a); dev_dbg(&client->dev, "%s : charger status A(0x%02x)\n", __func__, data_a); smb358_i2c_read(client, SMB358_STATUS_B, &data_b); dev_dbg(&client->dev, "%s : charger status B(0x%02x)\n", __func__, data_b); smb358_i2c_read(client, SMB358_STATUS_C, &data_c); dev_dbg(&client->dev, "%s : charger status C(0x%02x)\n", __func__, data_c); smb358_i2c_read(client, SMB358_STATUS_D, &data_d); dev_dbg(&client->dev, "%s : charger status D(0x%02x)\n", __func__, data_d); smb358_i2c_read(client, SMB358_STATUS_E, &data_e); dev_dbg(&client->dev, "%s : charger status E(0x%02x)\n", __func__, data_e); /* At least one charge cycle terminated, * Charge current < Termination Current */ if (data_c & 0x20) { /* top-off by full charging */ status = POWER_SUPPLY_STATUS_FULL; goto charging_status_end; } /* Is enabled ? */ if (data_c & 0x01) { /* check for 0x06 : no charging (0b00) */ /* not charging */ if (!(data_c & 0x06)) { status = POWER_SUPPLY_STATUS_NOT_CHARGING; goto charging_status_end; } else { status = POWER_SUPPLY_STATUS_CHARGING; goto charging_status_end; } } else status = POWER_SUPPLY_STATUS_DISCHARGING; charging_status_end: return (int)status; } static int smb358_get_charging_health(struct i2c_client *client) { int health = POWER_SUPPLY_HEALTH_GOOD; u8 data_a = 0; u8 data_b = 0; u8 data_c = 0; u8 data_d = 0; u8 data_e = 0; smb358_i2c_read(client, SMB358_STATUS_A, &data_a); dev_dbg(&client->dev, "%s : charger status A(0x%02x)\n", __func__, data_a); smb358_i2c_read(client, SMB358_STATUS_B, &data_b); dev_dbg(&client->dev, "%s : charger status B(0x%02x)\n", __func__, data_b); smb358_i2c_read(client, SMB358_STATUS_C, &data_c); dev_dbg(&client->dev, "%s : charger status C(0x%02x)\n", __func__, data_c); smb358_i2c_read(client, SMB358_STATUS_D, &data_d); dev_dbg(&client->dev, "%s : charger status D(0x%02x)\n", __func__, data_d); smb358_i2c_read(client, SMB358_STATUS_E, &data_e); dev_dbg(&client->dev, "%s : charger status E(0x%02x)\n", __func__, data_e); smb358_i2c_read(client, SMB358_INTERRUPT_STATUS_E, &data_e); dev_dbg(&client->dev, "%s : charger interrupt status E(0x%02x)\n", __func__, data_e); if (data_e & 0x01) health = POWER_SUPPLY_HEALTH_UNDERVOLTAGE; else if (data_e & 0x04) health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; return (int)health; } /*static void smb358_allow_volatile_writes(struct i2c_client *client) { int val, reg; u8 data; reg = SMB358_COMMAND_A; val = smb358_i2c_read(client, reg, &data); if ((val >= 0) && !(data & 0x80)) { dev_dbg(&client->dev, "%s : reg(0x%02x): 0x%02x", __func__, reg, data); data |= (0x1 << 7); if (smb358_i2c_write(client, reg, &data) < 0) dev_err(&client->dev, "%s : error!\n", __func__); val = smb358_i2c_read(client, reg, &data); if (val >= 0) { data = (u8) data; dev_dbg(&client->dev, " => 0x%02x\n", data); } } }*/ static u8 smb358_get_float_voltage_data( int float_voltage) { u8 data; if (float_voltage < 3500) data = 0; else if(float_voltage <= 4340) data = (float_voltage - 3500) / 20; else if(float_voltage == 4350) data = 43; /* (4340 -3500)/20 + 1 */ else if(float_voltage <= 4500) data = (float_voltage - 3500) / 20 + 1; else data = 51; return data; } static u8 smb358_get_input_current_limit_data( struct sec_charger_info *charger, int input_current) { u8 data; if (input_current <= 300) data = 0x00; else if (input_current <= 500) data = 0x01; else if (input_current <= 700) data = 0x02; else if (input_current <= 1000) data = 0x03; else if (input_current <= 1200) data = 0x04; else if (input_current <= 1500) data = 0x05; else if (input_current <= 1800) data = 0x06; else if (input_current <= 2000) data = 0x07; else data = 0x07; /* set input current limit as maximum */ return (data << 4); } static u8 smb358_get_term_current_limit_data( int termination_current) { u8 data; if (termination_current <= 30) data = 0x00; else if (termination_current <= 40) data = 0x01; else if (termination_current <= 60) data = 0x02; else if (termination_current <= 80) data = 0x03; else if (termination_current <= 100) data = 0x04; else if (termination_current <= 125) data = 0x05; else if (termination_current <= 150) data = 0x06; else if (termination_current <= 200) data = 0x07; else data = 0x07; /* set input current limit as maximum */ return data; } static u8 smb358_get_fast_charging_current_data( int fast_charging_current) { u8 data; if (fast_charging_current <= 200) data = 0x00; else if (fast_charging_current <= 450) data = 0x01; else if (fast_charging_current <= 600) data = 0x02; else if (fast_charging_current <= 900) data = 0x03; else if (fast_charging_current <= 1300) data = 0x04; else if (fast_charging_current <= 1500) data = 0x05; else if (fast_charging_current <= 1800) data = 0x06; else if (fast_charging_current <= 2000) data = 0x07; else data = 0x07; /* set input current limit as maximum */ return data << 5; } static void smb358_enter_suspend(struct i2c_client *client) { u8 data = 0; pr_info("%s: ENTER SUSPEND\n", __func__); smb358_set_command(client, SMB358_COMMAND_A, 0x80); smb358_set_command(client, SMB358_PIN_ENABLE_CONTROL, 0x18); data = (data | 0x4); smb358_set_command(client, SMB358_COMMAND_A, data); } static void smb358_charger_function_control( struct i2c_client *client) { struct sec_charger_info *charger = i2c_get_clientdata(client); union power_supply_propval val; int full_check_type, status; u8 data, chgcurrent; union power_supply_propval input_value; psy_do_property("battery", get, POWER_SUPPLY_PROP_HEALTH, input_value); charger->health = input_value.intval; psy_do_property("battery", get, POWER_SUPPLY_PROP_STATUS, input_value); status = input_value.intval; if (charger->charging_current < 0) { dev_dbg(&client->dev, "%s : OTG is activated. Ignore command!\n", __func__); return; } smb358_volatile_writes(client, SMB358_ENABLE_WRITE); if (charger->health == POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) { pr_info("[SMB358] Unspec_failure, charger suspend\n"); smb358_enter_suspend(client); } else if (charger->cable_type == POWER_SUPPLY_TYPE_BATTERY) { #if defined(CONFIG_MACH_BISCOTTO) if (charger->pdata->check_vbus_status()) { /* Charger, Input current Disabled */ smb358_set_command(client, SMB358_COMMAND_A, 0xc4); dev_dbg(&client->dev, "%s : no input current!\n", __func__); } else smb358_set_command(client, SMB358_COMMAND_A, 0xc0); #else /* Charger Disabled */ smb358_set_command(client, SMB358_COMMAND_A, 0xc0); #endif pr_info("[SMB358] Set the registers to the default configuration\n"); /* Set the registers to the default configuration */ smb358_set_command(client, SMB358_CHARGE_CURRENT, 0xFE); smb358_set_command(client, SMB358_INPUT_CURRENTLIMIT, 0x74); smb358_set_command(client, SMB358_VARIOUS_FUNCTIONS, 0xD7); data = 0x00; data |= smb358_get_float_voltage_data(charger->pdata->chg_float_voltage); smb358_set_command(client, SMB358_FLOAT_VOLTAGE, data); /* Disable Automatic Recharge */ smb358_set_command(client, SMB358_CHARGE_CONTROL, 0x84); smb358_set_command(client, SMB358_STAT_TIMERS_CONTROL, 0x0F); smb358_set_command(client, SMB358_PIN_ENABLE_CONTROL, 0x09); smb358_set_command(client, SMB358_THERM_CONTROL_A, 0xF0); smb358_set_command(client, SMB358_SYSOK_USB30_SELECTION, 0x09); smb358_set_command(client, SMB358_OTHER_CONTROL_A, 0x00); smb358_set_command(client, SMB358_OTG_TLIM_THERM_CONTROL, 0xF6); smb358_set_command(client, SMB358_LIMIT_CELL_TEMPERATURE_MONITOR, 0xA5); smb358_set_command(client, SMB358_FAULT_INTERRUPT, 0x00); smb358_set_command(client, SMB358_STATUS_INTERRUPT, 0x00); } else { psy_do_property("battery", get, POWER_SUPPLY_PROP_CHARGE_NOW, val); if (val.intval == SEC_BATTERY_CHARGING_1ST) full_check_type = charger->pdata->full_check_type; else full_check_type = charger->pdata->full_check_type_2nd; smb358_i2c_read(client, SMB358_COMMAND_A, &data); if ((data & 0x02) && (status != POWER_SUPPLY_STATUS_FULL)) { chgcurrent = 0; smb358_i2c_read(client, SMB358_CHARGE_CURRENT, &chgcurrent); chgcurrent &= 0xE0; /* get fast charging current */ if (chgcurrent == smb358_get_fast_charging_current_data( charger->charging_current)) { pr_info("[SMB358] Skip the Same charging current setting\n"); #if defined(CONFIG_MACH_LT02_ATT) smb358_set_command(client, SMB358_THERM_CONTROL_A, 0xB0); #endif goto control_skip; } } /* [STEP - 1] ================================================ * Volatile write permission(bit 7) - allow(1) * Charging Enable(bit 1) - Disabled(0, default) * STAT Output(bit 0) - Enabled(0) */ #if defined(CONFIG_MACH_LT02) smb358_set_command(client, SMB358_COMMAND_A, 0xC2); #else smb358_set_command(client, SMB358_COMMAND_A, 0xC0); #endif /* [STEP - 2] ================================================ * USB 5/1(9/1.5) Mode(bit 1) - USB1/USB1.5(0), USB5/USB9(1) * USB/HC Mode(bit 0) - USB5/1 or USB9/1.5 Mode(0) * High-Current Mode(1) */ switch (charger->cable_type) { case POWER_SUPPLY_TYPE_MAINS: case POWER_SUPPLY_TYPE_MISC: case POWER_SUPPLY_TYPE_USB_CDP: case POWER_SUPPLY_TYPE_UARTOFF: /* High-current mode */ data = 0x03; break; case POWER_SUPPLY_TYPE_USB: case POWER_SUPPLY_TYPE_USB_DCP: case POWER_SUPPLY_TYPE_USB_ACA: /* USB5 */ data = 0x02; break; default: /* USB1 */ data = 0x00; break; } smb358_set_command(client, SMB358_COMMAND_B, data); /* [STEP 3] Charge Current(0x00) =============================== * Set pre-charge current(bit 4:3) - 450mA(11) * Set fast charge current(bit 7:5) * Set termination current(bit 2:0) */ dev_info(&client->dev, "%s : fast charging current (%dmA)\n", __func__, charger->charging_current); data = 0x18; data |= smb358_get_fast_charging_current_data( charger->charging_current); switch (full_check_type) { case SEC_BATTERY_FULLCHARGED_CHGGPIO: case SEC_BATTERY_FULLCHARGED_CHGINT: case SEC_BATTERY_FULLCHARGED_CHGPSY: if (val.intval == SEC_BATTERY_CHARGING_1ST) { dev_info(&client->dev, "%s : termination current (%dmA)\n", __func__, charger->pdata->charging_current[ charger->cable_type].full_check_current_1st); data |= smb358_get_term_current_limit_data( charger->pdata->charging_current[ charger->cable_type].full_check_current_1st); } else { dev_info(&client->dev, "%s : termination current (%dmA)\n", __func__, charger->pdata->charging_current[ charger->cable_type].full_check_current_2nd); data |= smb358_get_term_current_limit_data( charger->pdata->charging_current[ charger->cable_type].full_check_current_2nd); } break; } smb358_set_command(client, SMB358_CHARGE_CURRENT, data); /* [STEP - 4] ================================================= * Enable(EN) Pin Control(bit 6) - i2c(0), Pin(1) * Pin control(bit 5) - active high(0), active low(1) * USB5/1/HC input State(bit3) - Dual-state input(1) * USB Input Pre-bias(bit 0) - Enable(1) */ data = 0x09; if (charger->pdata->chg_gpio_en) data |= 0x40; if (charger->pdata->chg_polarity_en) data |= 0x20; smb358_set_command(client, SMB358_PIN_ENABLE_CONTROL, data); /* [STEP - 5] =============================================== */ dev_info(&client->dev, "%s : input current (%dmA)\n", __func__, charger->pdata->charging_current [charger->cable_type].input_current_limit); /* Input current limit */ data = 0x00; data |= smb358_get_input_current_limit_data( charger, charger->pdata->charging_current [charger->cable_type].input_current_limit); smb358_set_command(client, SMB358_INPUT_CURRENTLIMIT, data); /* [STEP - 6] ================================================= * Input to System FET(bit 7) - Controlled by Register(1) * Max System voltage(bit 5) - Vflt + 0.1v(0) * AICL(bit 4) - Enalbed(1) * VCHG Function(bit 0) - Enabled(1) */ if (charger->pdata->chg_functions_setting & SEC_CHARGER_NO_GRADUAL_CHARGING_CURRENT) /* disable AICL */ smb358_set_command(client, SMB358_VARIOUS_FUNCTIONS, 0x81); else { /* disable AICL */ smb358_set_command(client, SMB358_VARIOUS_FUNCTIONS, 0x81); /* enable AICL */ smb358_set_command(client, SMB358_VARIOUS_FUNCTIONS, 0x95); } /* [STEP - 7] ================================================= * Pre-charged to Fast-charge Voltage Threshold(Bit 7:6) - 2.3V * Float Voltage(bit 5:0) */ dev_dbg(&client->dev, "%s : float voltage (%dmV)\n", __func__, charger->pdata->chg_float_voltage); data = 0x00; data |= smb358_get_float_voltage_data( charger->pdata->chg_float_voltage); smb358_set_command(client, SMB358_FLOAT_VOLTAGE, data); /* [STEP - 8] ================================================= * Charge control * Automatic Recharge disable(bit 7), * Current Termination disable(bit 6), * BMD disable(bit 5:4), * INOK Output Configuration : Push-pull(bit 3) * APSD disable */ #if defined(CONFIG_MACH_LT02) data = 0xC0; #else data = 0xC1; #endif switch (full_check_type) { case SEC_BATTERY_FULLCHARGED_CHGGPIO: case SEC_BATTERY_FULLCHARGED_CHGINT: case SEC_BATTERY_FULLCHARGED_CHGPSY: /* Enable Current Termination */ data &= 0xB1; break; } smb358_set_command(client, SMB358_CHARGE_CONTROL, data); /* [STEP - 9] ================================================= * STAT active low(bit 7), * Complete charge Timeout(bit 3:2) - Disabled(11) * Pre-charge Timeout(bit 1:0) - Disable(11) */ smb358_set_command(client, SMB358_STAT_TIMERS_CONTROL, 0x1F); #if defined(CONFIG_MACH_LT02) /* [STEP - 10] ================================================= * Mininum System Voltage(bit 6) - 3.15v(1) * Therm monitor(bit 4) - Disabled(1) * Soft Cold/Hot Temp Limit Behavior(bit 3:2, bit 1:0) - * Charger Current + Float voltage Compensation(11) */ smb358_set_command(client, SMB358_THERM_CONTROL_A, 0xB0); #else /* [STEP - 10] ================================================= * Mininum System Voltage(bit 6) - 3.75v(1) * Therm monitor(bit 4) - Disabled(1) * Soft Cold/Hot Temp Limit Behavior(bit 3:2, bit 1:0) - * Charger Current + Float voltage Compensation(11) */ smb358_set_command(client, SMB358_THERM_CONTROL_A, 0xF0); #endif /* [STEP - 11] ================================================ * OTG/ID Pin Control(bit 7:6) - RID Disabled, OTG I2c(00) * Minimum System Voltage(bit 4) - 3.75V(1) * Low-Battery/SYSOK Voltage threshold(bit 3:0) - 2.5V(0001) * if this bit is disabled, * input current for system will be disabled */ smb358_set_command(client, SMB358_OTHER_CONTROL_A, 0x11); /* [STEP - 12] ================================================ * Charge Current Compensation(bit 7:6) - 200mA(00) * Digital Thermal Regulation Threshold(bit 5:4) - 130c * OTG current Limit at USBIN(Bit 3:2) - 900mA(11) * OTG Battery UVLO Threshold(Bit 1:0) - 3.3V(11) */ smb358_set_command(client, SMB358_OTG_TLIM_THERM_CONTROL, 0x3F); /* [STEP - 13] ================================================ * Hard/Soft Limit Cell temp monitor */ smb358_set_command(client, SMB358_LIMIT_CELL_TEMPERATURE_MONITOR, 0x01); /* [STEP - 14] ================================================ * FAULT interrupt - Disabled */ smb358_set_command(client, SMB358_FAULT_INTERRUPT, 0x00); /* [STEP - 15] ================================================ * STATUS ingerrupt - Clear */ smb358_set_command(client, SMB358_STATUS_INTERRUPT, 0x00); /* [STEP - 16] ================================================ * Volatile write permission(bit 7) - allowed(1) * Charging Enable(bit 1) - Enabled(1) * STAT Output(bit 0) - Enabled(0) */ #if !defined(CONFIG_MACH_LT02) smb358_set_command(client, SMB358_COMMAND_A, 0xC2); #endif } control_skip: smb358_volatile_writes(client, SMB358_DISABLE_WRITE); } static void smb358_charger_otg_control( struct i2c_client *client) { struct sec_charger_info *charger = i2c_get_clientdata(client); smb358_volatile_writes(client, SMB358_ENABLE_WRITE); if (charger->cable_type == POWER_SUPPLY_TYPE_BATTERY) { /* Charger Disabled */ smb358_clear_reg(client, SMB358_COMMAND_A, 0x02); } else { /* Change "OTG output current limit" to 250mA */ smb358_clear_reg(client, SMB358_OTG_TLIM_THERM_CONTROL, 0x0C); /* OTG Enalbed*/ smb358_update_reg(client, SMB358_COMMAND_A, 0x10); smb358_set_command(client, SMB358_COMMAND_B, 0x00); /* Change "OTG output current limit" to 750mA */ smb358_update_reg(client, SMB358_OTG_TLIM_THERM_CONTROL, 0x80); } smb358_volatile_writes(client, SMB358_DISABLE_WRITE); } static void smb358_set_charging_current( struct i2c_client *client, int charging_current) { u8 data; smb358_volatile_writes(client, SMB358_ENABLE_WRITE); smb358_i2c_read(client, SMB358_CHARGE_CURRENT, &data); data &= 0x1f; data |= smb358_get_fast_charging_current_data(charging_current); smb358_set_command(client, SMB358_CHARGE_CURRENT, data); smb358_volatile_writes(client, SMB358_DISABLE_WRITE); } static void smb358_set_charging_input_current_limit( struct i2c_client *client, int input_current_limit) { struct sec_charger_info *charger = i2c_get_clientdata(client); u8 data; smb358_volatile_writes(client, SMB358_ENABLE_WRITE); /* Input current limit */ data = 0; data = smb358_get_input_current_limit_data( charger, input_current_limit); smb358_set_command(client, SMB358_INPUT_CURRENTLIMIT, data); smb358_volatile_writes(client, SMB358_DISABLE_WRITE); } void smb358_charger_shutdown(struct i2c_client *client) { pr_info("%s: smb358 Charging Disabled\n", __func__); smb358_volatile_writes(client, SMB358_ENABLE_WRITE); smb358_set_command(client, SMB358_THERM_CONTROL_A, 0xF0); smb358_set_command(client, SMB358_COMMAND_A, 0x80); smb358_volatile_writes(client, SMB358_DISABLE_WRITE); } static int smb358_debugfs_show(struct seq_file *s, void *data) { struct sec_charger_info *charger = s->private; u8 reg; u8 reg_data; seq_printf(s, "SMB CHARGER IC :\n"); seq_printf(s, "==================\n"); for (reg = 0x00; reg <= 0x0E; reg++) { smb358_i2c_read(charger->client, reg, &reg_data); seq_printf(s, "0x%02x:\t0x%02x\n", reg, reg_data); } for (reg = 0x30; reg <= 0x3F; reg++) { smb358_i2c_read(charger->client, reg, &reg_data); seq_printf(s, "0x%02x:\t0x%02x\n", reg, reg_data); } seq_printf(s, "\n"); return 0; } static int smb358_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, smb358_debugfs_show, inode->i_private); } static const struct file_operations smb358_debugfs_fops = { .open = smb358_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; bool sec_hal_chg_init(struct i2c_client *client) { struct sec_charger_info *charger = i2c_get_clientdata(client); dev_info(&client->dev, "%s: SMB358 Charger init(Start)!!\n", __func__); /*smb358_test_read(client);*/ (void) debugfs_create_file("smb358_regs", S_IRUGO, NULL, (void *)charger, &smb358_debugfs_fops); return true; } bool sec_hal_chg_suspend(struct i2c_client *client) { return true; } bool sec_hal_chg_resume(struct i2c_client *client) { return true; } bool sec_hal_chg_get_property(struct i2c_client *client, enum power_supply_property psp, union power_supply_propval *val) { struct sec_charger_info *charger = i2c_get_clientdata(client); u8 data; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = smb358_get_charging_status(client); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: if (charger->is_charging) val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; else val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = smb358_get_charging_health(client); break; /* calculated input current limit value */ case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: /* charging current */ if (charger->charging_current) { smb358_i2c_read(client, SMB358_STATUS_B, &data); if (data & 0x20) switch (data & 0x07) { case 0: val->intval = 100; break; case 1: val->intval = 200; break; case 2: val->intval = 450; break; case 3: val->intval = 600; break; case 4: val->intval = 900; break; case 5: val->intval = 1300; break; case 6: val->intval = 1500; break; case 7: val->intval = 1800; break; } else switch ((data & 0x18) >> 3) { case 0: val->intval = 100; break; case 1: val->intval = 150; break; case 2: val->intval = 200; break; case 3: val->intval = 250; break; } } else val->intval = 0; dev_dbg(&client->dev, "%s : set-current(%dmA), current now(%dmA)\n", __func__, charger->charging_current, val->intval); break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: smb358_i2c_read(client, SMB358_INTERRUPT_STATUS_F, &data); if (data & 0x01) val->intval = 1; else val->intval = 0; break; default: return false; } return true; } bool sec_hal_chg_set_property(struct i2c_client *client, enum power_supply_property psp, const union power_supply_propval *val) { struct sec_charger_info *charger = i2c_get_clientdata(client); switch (psp) { /* val->intval : type */ case POWER_SUPPLY_PROP_ONLINE: if (charger->charging_current < 0) smb358_charger_otg_control(client); else if (charger->charging_current > 0) smb358_charger_function_control(client); else { smb358_charger_function_control(client); smb358_charger_otg_control(client); } /*smb358_test_read(client);*/ break; case POWER_SUPPLY_PROP_CURRENT_MAX: /* input current limit set */ /* calculated input current limit value */ case POWER_SUPPLY_PROP_CURRENT_NOW: smb358_set_charging_input_current_limit(client, val->intval); break; /* val->intval : charging current */ case POWER_SUPPLY_PROP_CURRENT_AVG: smb358_set_charging_current(client, val->intval); break; default: return false; } return true; } ssize_t sec_hal_chg_show_attrs(struct device *dev, const ptrdiff_t offset, char *buf) { struct power_supply *psy = dev_get_drvdata(dev); struct sec_charger_info *chg = container_of(psy, struct sec_charger_info, psy_chg); int i = 0; char *str = NULL; switch (offset) { case CHG_DATA: i += scnprintf(buf + i, PAGE_SIZE - i, "%x\n", chg->reg_data); break; case CHG_REGS: str = kzalloc(sizeof(char)*1024, GFP_KERNEL); if (!str) return -ENOMEM; smb358_read_regs(chg->client, str); i += scnprintf(buf + i, PAGE_SIZE - i, "%s\n", str); kfree(str); break; default: i = -EINVAL; break; } return i; } ssize_t sec_hal_chg_store_attrs(struct device *dev, const ptrdiff_t offset, const char *buf, size_t count) { struct power_supply *psy = dev_get_drvdata(dev); struct sec_charger_info *chg = container_of(psy, struct sec_charger_info, psy_chg); int ret = 0; int x = 0; u8 data = 0; switch (offset) { case CHG_REG: if (sscanf(buf, "%x\n", &x) == 1) { chg->reg_addr = x; smb358_i2c_read(chg->client, chg->reg_addr, &data); chg->reg_data = data; dev_dbg(dev, "%s: (read) addr = 0x%x, data = 0x%x\n", __func__, chg->reg_addr, chg->reg_data); ret = count; } break; case CHG_DATA: if (sscanf(buf, "%x\n", &x) == 1) { data = (u8)x; dev_dbg(dev, "%s: (write) addr = 0x%x, data = 0x%x\n", __func__, chg->reg_addr, data); smb358_i2c_write(chg->client, chg->reg_addr, &data); ret = count; } break; default: ret = -EINVAL; break; } return ret; }
gpl-2.0
wangxiaofei6485/linux-2.6.32-fl2440
drivers/mtd/mtdoops.c
564
11417
/* * MTD Oops/Panic logger * * Copyright (C) 2007 Nokia Corporation. All rights reserved. * * Author: Richard Purdie <rpurdie@openedhand.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/console.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 #define OOPS_PAGE_SIZE 4096 static struct mtdoops_context { int mtd_index; struct work_struct work_erase; struct work_struct work_write; struct mtd_info *mtd; int oops_pages; int nextpage; int nextcount; char *name; void *oops_buf; /* writecount and disabling ready are spin lock protected */ spinlock_t writecount_lock; int ready; int writecount; } oops_cxt; static void mtdoops_erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } static int mtdoops_erase_block(struct mtd_info *mtd, int offset) { struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; int ret; init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = mtdoops_erase_callback; erase.addr = offset; erase.len = mtd->erasesize; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = mtd->erase(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " "on \"%s\" failed\n", (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); return 0; } static void mtdoops_inc_counter(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; size_t retlen; u32 count; int ret; cxt->nextpage++; if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; cxt->nextcount++; if (cxt->nextcount == 0xffffffff) cxt->nextcount = 0; ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret); schedule_work(&cxt->work_erase); return; } /* See if we need to erase the next block */ if (count != 0xffffffff) { schedule_work(&cxt->work_erase); return; } printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); cxt->ready = 1; } /* Scheduled work - when we can't proceed without erasing a block */ static void mtdoops_workfunc_erase(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_erase); struct mtd_info *mtd = cxt->mtd; int i = 0, j, ret, mod; /* We were unregistered */ if (!mtd) return; mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; if (mod != 0) { cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while (mtd->block_isbad) { ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); if (!ret) break; if (ret < 0) { printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); return; } badblock: printk(KERN_WARNING "mtdoops: Bad block at %08x\n", cxt->nextpage * OOPS_PAGE_SIZE); i++; cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { printk(KERN_ERR "mtdoops: All blocks bad!\n"); return; } } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); if (ret >= 0) { printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); cxt->ready = 1; return; } if (mtd->block_markbad && (ret == -EIO)) { ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); if (ret < 0) { printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); return; } } goto badblock; } static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; size_t retlen; int ret; if (cxt->writecount < OOPS_PAGE_SIZE) memset(cxt->oops_buf + cxt->writecount, 0xff, OOPS_PAGE_SIZE - cxt->writecount); if (panic) ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); else ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); cxt->writecount = 0; if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); mtdoops_inc_counter(cxt); } static void mtdoops_workfunc_write(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_write); mtdoops_write(cxt, 0); } static void find_next_position(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; int ret, page, maxpos = 0; u32 count[2], maxcount = 0xffffffff; size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); continue; } if (count[1] != MTDOOPS_KERNMSG_MAGIC) continue; if (count[0] == 0xffffffff) continue; if (maxcount == 0xffffffff) { maxcount = count[0]; maxpos = page; } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { maxcount = count[0]; maxpos = page; } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { maxcount = count[0]; maxpos = page; } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) && (maxcount > 0x80000000)) { maxcount = count[0]; maxpos = page; } } if (maxcount == 0xffffffff) { cxt->nextpage = 0; cxt->nextcount = 1; schedule_work(&cxt->work_erase); return; } cxt->nextpage = maxpos; cxt->nextcount = maxcount; mtdoops_inc_counter(cxt); } static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; if (cxt->name && !strcmp(mtd->name, cxt->name)) cxt->mtd_index = mtd->index; if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) return; if (mtd->size < (mtd->erasesize * 2)) { printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", mtd->index); return; } if (mtd->erasesize < OOPS_PAGE_SIZE) { printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", mtd->index); return; } cxt->mtd = mtd; if (mtd->size > INT_MAX) cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; else cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; find_next_position(cxt); printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); } static void mtdoops_notify_remove(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) return; cxt->mtd = NULL; flush_scheduled_work(); } static void mtdoops_console_sync(void) { struct mtdoops_context *cxt = &oops_cxt; struct mtd_info *mtd = cxt->mtd; unsigned long flags; if (!cxt->ready || !mtd || cxt->writecount == 0) return; /* * Once ready is 0 and we've held the lock no further writes to the * buffer will happen */ spin_lock_irqsave(&cxt->writecount_lock, flags); if (!cxt->ready) { spin_unlock_irqrestore(&cxt->writecount_lock, flags); return; } cxt->ready = 0; spin_unlock_irqrestore(&cxt->writecount_lock, flags); if (mtd->panic_write && in_interrupt()) /* Interrupt context, we're going to panic so try and log */ mtdoops_write(cxt, 1); else schedule_work(&cxt->work_write); } static void mtdoops_console_write(struct console *co, const char *s, unsigned int count) { struct mtdoops_context *cxt = co->data; struct mtd_info *mtd = cxt->mtd; unsigned long flags; if (!oops_in_progress) { mtdoops_console_sync(); return; } if (!cxt->ready || !mtd) return; /* Locking on writecount ensures sequential writes to the buffer */ spin_lock_irqsave(&cxt->writecount_lock, flags); /* Check ready status didn't change whilst waiting for the lock */ if (!cxt->ready) { spin_unlock_irqrestore(&cxt->writecount_lock, flags); return; } if (cxt->writecount == 0) { u32 *stamp = cxt->oops_buf; *stamp++ = cxt->nextcount; *stamp = MTDOOPS_KERNMSG_MAGIC; cxt->writecount = 8; } if ((count + cxt->writecount) > OOPS_PAGE_SIZE) count = OOPS_PAGE_SIZE - cxt->writecount; memcpy(cxt->oops_buf + cxt->writecount, s, count); cxt->writecount += count; spin_unlock_irqrestore(&cxt->writecount_lock, flags); if (cxt->writecount == OOPS_PAGE_SIZE) mtdoops_console_sync(); } static int __init mtdoops_console_setup(struct console *co, char *options) { struct mtdoops_context *cxt = co->data; if (cxt->mtd_index != -1 || cxt->name) return -EBUSY; if (options) { cxt->name = kstrdup(options, GFP_KERNEL); return 0; } if (co->index == -1) return -EINVAL; cxt->mtd_index = co->index; return 0; } static struct mtd_notifier mtdoops_notifier = { .add = mtdoops_notify_add, .remove = mtdoops_notify_remove, }; static struct console mtdoops_console = { .name = "ttyMTD", .write = mtdoops_console_write, .setup = mtdoops_console_setup, .unblank = mtdoops_console_sync, .index = -1, .data = &oops_cxt, }; static int __init mtdoops_console_init(void) { struct mtdoops_context *cxt = &oops_cxt; cxt->mtd_index = -1; cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); spin_lock_init(&cxt->writecount_lock); if (!cxt->oops_buf) { printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); return -ENOMEM; } INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); register_console(&mtdoops_console); register_mtd_user(&mtdoops_notifier); return 0; } static void __exit mtdoops_console_exit(void) { struct mtdoops_context *cxt = &oops_cxt; unregister_mtd_user(&mtdoops_notifier); unregister_console(&mtdoops_console); kfree(cxt->name); vfree(cxt->oops_buf); } subsys_initcall(mtdoops_console_init); module_exit(mtdoops_console_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
gpl-2.0
qnhoang81/Kernel_POC
drivers/isdn/hisax/isdnl2.c
564
42986
/* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $ * * Author Karsten Keil * based on the teles driver from Jan den Ouden * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * Thanks to Jan den Ouden * Fritz Elfert * */ #include <linux/init.h> #include "hisax.h" #include "isdnl2.h" const char *l2_revision = "$Revision: 2.30.2.4 $"; static void l2m_debug(struct FsmInst *fi, char *fmt, ...); static struct Fsm l2fsm; enum { ST_L2_1, ST_L2_2, ST_L2_3, ST_L2_4, ST_L2_5, ST_L2_6, ST_L2_7, ST_L2_8, }; #define L2_STATE_COUNT (ST_L2_8+1) static char *strL2State[] = { "ST_L2_1", "ST_L2_2", "ST_L2_3", "ST_L2_4", "ST_L2_5", "ST_L2_6", "ST_L2_7", "ST_L2_8", }; enum { EV_L2_UI, EV_L2_SABME, EV_L2_DISC, EV_L2_DM, EV_L2_UA, EV_L2_FRMR, EV_L2_SUPER, EV_L2_I, EV_L2_DL_DATA, EV_L2_ACK_PULL, EV_L2_DL_UNIT_DATA, EV_L2_DL_ESTABLISH_REQ, EV_L2_DL_RELEASE_REQ, EV_L2_MDL_ASSIGN, EV_L2_MDL_REMOVE, EV_L2_MDL_ERROR, EV_L1_DEACTIVATE, EV_L2_T200, EV_L2_T203, EV_L2_SET_OWN_BUSY, EV_L2_CLEAR_OWN_BUSY, EV_L2_FRAME_ERROR, }; #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1) static char *strL2Event[] = { "EV_L2_UI", "EV_L2_SABME", "EV_L2_DISC", "EV_L2_DM", "EV_L2_UA", "EV_L2_FRMR", "EV_L2_SUPER", "EV_L2_I", "EV_L2_DL_DATA", "EV_L2_ACK_PULL", "EV_L2_DL_UNIT_DATA", "EV_L2_DL_ESTABLISH_REQ", "EV_L2_DL_RELEASE_REQ", "EV_L2_MDL_ASSIGN", "EV_L2_MDL_REMOVE", "EV_L2_MDL_ERROR", "EV_L1_DEACTIVATE", "EV_L2_T200", "EV_L2_T203", "EV_L2_SET_OWN_BUSY", "EV_L2_CLEAR_OWN_BUSY", "EV_L2_FRAME_ERROR", }; static int l2addrsize(struct Layer2 *l2); static void set_peer_busy(struct Layer2 *l2) { test_and_set_bit(FLG_PEER_BUSY, &l2->flag); if (!skb_queue_empty(&l2->i_queue) || !skb_queue_empty(&l2->ui_queue)) test_and_set_bit(FLG_L2BLOCK, &l2->flag); } static void clear_peer_busy(struct Layer2 *l2) { if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag)) test_and_clear_bit(FLG_L2BLOCK, &l2->flag); } static void InitWin(struct Layer2 *l2) { int i; for (i = 0; i < MAX_WINDOW; i++) l2->windowar[i] = NULL; } static int freewin1(struct Layer2 *l2) { int i, cnt = 0; for (i = 0; i < MAX_WINDOW; i++) { if (l2->windowar[i]) { cnt++; dev_kfree_skb(l2->windowar[i]); l2->windowar[i] = NULL; } } return cnt; } static inline void freewin(struct PStack *st) { freewin1(&st->l2); } static void ReleaseWin(struct Layer2 *l2) { int cnt; if((cnt = freewin1(l2))) printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt); } static inline unsigned int cansend(struct PStack *st) { unsigned int p1; if(test_bit(FLG_MOD128, &st->l2.flag)) p1 = (st->l2.vs - st->l2.va) % 128; else p1 = (st->l2.vs - st->l2.va) % 8; return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag)); } static inline void clear_exception(struct Layer2 *l2) { test_and_clear_bit(FLG_ACK_PEND, &l2->flag); test_and_clear_bit(FLG_REJEXC, &l2->flag); test_and_clear_bit(FLG_OWN_BUSY, &l2->flag); clear_peer_busy(l2); } static inline int l2headersize(struct Layer2 *l2, int ui) { return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) + (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1)); } inline int l2addrsize(struct Layer2 *l2) { return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1); } static int sethdraddr(struct Layer2 *l2, u_char * header, int rsp) { u_char *ptr = header; int crbit = rsp; if (test_bit(FLG_LAPD, &l2->flag)) { *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0); *ptr++ = (l2->tei << 1) | 1; return (2); } else { if (test_bit(FLG_ORIG, &l2->flag)) crbit = !crbit; if (crbit) *ptr++ = 1; else *ptr++ = 3; return (1); } } static inline void enqueue_super(struct PStack *st, struct sk_buff *skb) { if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len; st->l2.l2l1(st, PH_DATA | REQUEST, skb); } #define enqueue_ui(a, b) enqueue_super(a, b) static inline int IsUI(u_char * data) { return ((data[0] & 0xef) == UI); } static inline int IsUA(u_char * data) { return ((data[0] & 0xef) == UA); } static inline int IsDM(u_char * data) { return ((data[0] & 0xef) == DM); } static inline int IsDISC(u_char * data) { return ((data[0] & 0xef) == DISC); } static inline int IsSFrame(u_char * data, struct PStack *st) { register u_char d = *data; if (!test_bit(FLG_MOD128, &st->l2.flag)) d &= 0xf; return(((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c)); } static inline int IsSABME(u_char * data, struct PStack *st) { u_char d = data[0] & ~0x10; return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM); } static inline int IsREJ(u_char * data, struct PStack *st) { return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ); } static inline int IsFRMR(u_char * data) { return ((data[0] & 0xef) == FRMR); } static inline int IsRNR(u_char * data, struct PStack *st) { return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR); } static int iframe_error(struct PStack *st, struct sk_buff *skb) { int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1); int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len < i) return 'N'; if ((skb->len - i) > st->l2.maxlen) return 'O'; return 0; } static int super_error(struct PStack *st, struct sk_buff *skb) { if (skb->len != l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1)) return 'N'; return 0; } static int unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp) { int rsp = (*skb->data & 0x2) >> 1; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (rsp != wantrsp) return 'L'; if (skb->len != l2addrsize(&st->l2) + 1) return 'N'; return 0; } static int UI_error(struct PStack *st, struct sk_buff *skb) { int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1) return 'O'; return 0; } static int FRMR_error(struct PStack *st, struct sk_buff *skb) { int headers = l2addrsize(&st->l2) + 1; u_char *datap = skb->data + headers; int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &st->l2.flag)) rsp = !rsp; if (!rsp) return 'L'; if (test_bit(FLG_MOD128, &st->l2.flag)) { if (skb->len < headers + 5) return 'N'; else l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x", datap[0], datap[1], datap[2], datap[3], datap[4]); } else { if (skb->len < headers + 3) return 'N'; else l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x", datap[0], datap[1], datap[2]); } return 0; } static unsigned int legalnr(struct PStack *st, unsigned int nr) { struct Layer2 *l2 = &st->l2; if(test_bit(FLG_MOD128, &l2->flag)) return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128); else return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8); } static void setva(struct PStack *st, unsigned int nr) { struct Layer2 *l2 = &st->l2; int len; u_long flags; spin_lock_irqsave(&l2->lock, flags); while (l2->va != nr) { (l2->va)++; if(test_bit(FLG_MOD128, &l2->flag)) l2->va %= 128; else l2->va %= 8; len = l2->windowar[l2->sow]->len; if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type) len = -1; dev_kfree_skb(l2->windowar[l2->sow]); l2->windowar[l2->sow] = NULL; l2->sow = (l2->sow + 1) % l2->window; spin_unlock_irqrestore(&l2->lock, flags); if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >=0)) lli_writewakeup(st, len); spin_lock_irqsave(&l2->lock, flags); } spin_unlock_irqrestore(&l2->lock, flags); } static void send_uframe(struct PStack *st, u_char cmd, u_char cr) { struct sk_buff *skb; u_char tmp[MAX_HEADER_LEN]; int i; i = sethdraddr(&st->l2, tmp, cr); tmp[i++] = cmd; if (!(skb = alloc_skb(i, GFP_ATOMIC))) { printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n"); return; } memcpy(skb_put(skb, i), tmp, i); enqueue_super(st, skb); } static inline u_char get_PollFlag(struct PStack * st, struct sk_buff * skb) { return (skb->data[l2addrsize(&(st->l2))] & 0x10); } static inline u_char get_PollFlagFree(struct PStack *st, struct sk_buff *skb) { u_char PF; PF = get_PollFlag(st, skb); dev_kfree_skb(skb); return (PF); } static inline void start_t200(struct PStack *st, int i) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &st->l2.flag); } static inline void restart_t200(struct PStack *st, int i) { FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &st->l2.flag); } static inline void stop_t200(struct PStack *st, int i) { if(test_and_clear_bit(FLG_T200_RUN, &st->l2.flag)) FsmDelTimer(&st->l2.t200, i); } static inline void st5_dl_release_l2l3(struct PStack *st) { int pr; if(test_and_clear_bit(FLG_PEND_REL, &st->l2.flag)) pr = DL_RELEASE | CONFIRM; else pr = DL_RELEASE | INDICATION; st->l2.l2l3(st, pr, NULL); } static inline void lapb_dl_release_l2l3(struct PStack *st, int f) { if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); st->l2.l2l3(st, DL_RELEASE | f, NULL); } static void establishlink(struct FsmInst *fi) { struct PStack *st = fi->userdata; u_char cmd; clear_exception(&st->l2); st->l2.rc = 0; cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10; send_uframe(st, cmd, CMD); FsmDelTimer(&st->l2.t203, 1); restart_t200(st, 1); test_and_clear_bit(FLG_PEND_REL, &st->l2.flag); freewin(st); FsmChangeState(fi, ST_L2_5); } static void l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct PStack *st = fi->userdata; if (get_PollFlagFree(st, skb)) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C'); else st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D'); } static void l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct PStack *st = fi->userdata; if (get_PollFlagFree(st, skb)) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B'); else { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } } static void l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct PStack *st = fi->userdata; if (get_PollFlagFree(st, skb)) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B'); else { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E'); } establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } static void l2_go_st3(struct FsmInst *fi, int event, void *arg) { FsmChangeState(fi, ST_L2_3); } static void l2_mdl_assign(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; FsmChangeState(fi, ST_L2_3); st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL); } static void l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&st->l2.ui_queue, skb); FsmChangeState(fi, ST_L2_2); st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL); } static void l2_queue_ui(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&st->l2.ui_queue, skb); } static void tx_ui(struct PStack *st) { struct sk_buff *skb; u_char header[MAX_HEADER_LEN]; int i; i = sethdraddr(&(st->l2), header, CMD); header[i++] = UI; while ((skb = skb_dequeue(&st->l2.ui_queue))) { memcpy(skb_push(skb, i), header, i); enqueue_ui(st, skb); } } static void l2_send_ui(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&st->l2.ui_queue, skb); tx_ui(st); } static void l2_got_ui(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2headersize(&st->l2, 1)); st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb); /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * in states 1-3 for broadcast */ } static void l2_establish(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } static void l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); test_and_clear_bit(FLG_PEND_REL, &st->l2.flag); } static void l2_l3_reestablish(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } static void l2_release(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL); } static void l2_pend_rel(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; test_and_set_bit(FLG_PEND_REL, &st->l2.flag); } static void l2_disconnect(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); freewin(st); FsmChangeState(fi, ST_L2_6); st->l2.rc = 0; send_uframe(st, DISC | 0x10, CMD); FsmDelTimer(&st->l2.t203, 1); restart_t200(st, 2); } static void l2_start_multi(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); clear_exception(&st->l2); st->l2.vs = 0; st->l2.va = 0; st->l2.vr = 0; st->l2.sow = 0; FsmChangeState(fi, ST_L2_7); FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3); st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); } static void l2_send_UA(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); } static void l2_send_DM(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; send_uframe(st, DM | get_PollFlagFree(st, skb), RSP); } static void l2_restart_multi(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int est = 0, state; state = fi->state; send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F'); if (st->l2.vs != st->l2.va) { skb_queue_purge(&st->l2.i_queue); est = 1; } clear_exception(&st->l2); st->l2.vs = 0; st->l2.va = 0; st->l2.vr = 0; st->l2.sow = 0; FsmChangeState(fi, ST_L2_7); stop_t200(st, 3); FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3); if (est) st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); if ((ST_L2_7==state) || (ST_L2_8 == state)) if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_stop_multi(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; FsmChangeState(fi, ST_L2_4); FsmDelTimer(&st->l2.t203, 3); stop_t200(st, 4); send_uframe(st, UA | get_PollFlagFree(st, skb), RSP); skb_queue_purge(&st->l2.i_queue); freewin(st); lapb_dl_release_l2l3(st, INDICATION); } static void l2_connected(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int pr=-1; if (!get_PollFlag(st, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag)) l2_disconnect(fi, event, arg); if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) { pr = DL_ESTABLISH | CONFIRM; } else if (st->l2.vs != st->l2.va) { skb_queue_purge(&st->l2.i_queue); pr = DL_ESTABLISH | INDICATION; } stop_t200(st, 5); st->l2.vr = 0; st->l2.vs = 0; st->l2.va = 0; st->l2.sow = 0; FsmChangeState(fi, ST_L2_7); FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4); if (pr != -1) st->l2.l2l3(st, pr, NULL); if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_released(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlag(st, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); stop_t200(st, 6); lapb_dl_release_l2l3(st, CONFIRM); FsmChangeState(fi, ST_L2_4); } static void l2_reestablish(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlagFree(st, skb)) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } } static void l2_st5_dm_release(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(st, skb)) { stop_t200(st, 7); if (!test_bit(FLG_L3_INIT, &st->l2.flag)) skb_queue_purge(&st->l2.i_queue); if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); st5_dl_release_l2l3(st); FsmChangeState(fi, ST_L2_4); } } static void l2_st6_dm_release(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(st, skb)) { stop_t200(st, 8); lapb_dl_release_l2l3(st, CONFIRM); FsmChangeState(fi, ST_L2_4); } } static inline void enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf) { struct sk_buff *skb; struct Layer2 *l2; u_char tmp[MAX_HEADER_LEN]; int i; l2 = &st->l2; i = sethdraddr(l2, tmp, cr); if (test_bit(FLG_MOD128, &l2->flag)) { tmp[i++] = typ; tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0); } else tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); if (!(skb = alloc_skb(i, GFP_ATOMIC))) { printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n"); return; } memcpy(skb_put(skb, i), tmp, i); enqueue_super(st, skb); } static inline void enquiry_response(struct PStack *st) { if (test_bit(FLG_OWN_BUSY, &st->l2.flag)) enquiry_cr(st, RNR, RSP, 1); else enquiry_cr(st, RR, RSP, 1); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); } static inline void transmit_enquiry(struct PStack *st) { if (test_bit(FLG_OWN_BUSY, &st->l2.flag)) enquiry_cr(st, RNR, CMD, 1); else enquiry_cr(st, RR, CMD, 1); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); start_t200(st, 9); } static void nrerrorrecovery(struct FsmInst *fi) { struct PStack *st = fi->userdata; st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } static void invoke_retransmission(struct PStack *st, unsigned int nr) { struct Layer2 *l2 = &st->l2; u_int p1; u_long flags; spin_lock_irqsave(&l2->lock, flags); if (l2->vs != nr) { while (l2->vs != nr) { (l2->vs)--; if(test_bit(FLG_MOD128, &l2->flag)) { l2->vs %= 128; p1 = (l2->vs - l2->va) % 128; } else { l2->vs %= 8; p1 = (l2->vs - l2->va) % 8; } p1 = (p1 + l2->sow) % l2->window; if (test_bit(FLG_LAPB, &l2->flag)) st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0); skb_queue_head(&l2->i_queue, l2->windowar[p1]); l2->windowar[p1] = NULL; } spin_unlock_irqrestore(&l2->lock, flags); st->l2.l2l1(st, PH_PULL | REQUEST, NULL); return; } spin_unlock_irqrestore(&l2->lock, flags); } static void l2_st7_got_super(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, typ = RR; unsigned int nr; struct Layer2 *l2 = &st->l2; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, st)) { set_peer_busy(l2); typ = RNR; } else clear_peer_busy(l2); if (IsREJ(skb->data, st)) typ = REJ; if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (PollFlag) { if (rsp) st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A'); else enquiry_response(st); } if (legalnr(st, nr)) { if (typ == REJ) { setva(st, nr); invoke_retransmission(st, nr); stop_t200(st, 10); if (FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 6)) l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ"); } else if ((nr == l2->vs) && (typ == RR)) { setva(st, nr); stop_t200(st, 11); FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 7); } else if ((l2->va != nr) || (typ == RNR)) { setva(st, nr); if(typ != RR) FsmDelTimer(&st->l2.t203, 9); restart_t200(st, 12); } if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); } static void l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0); if (!test_bit(FLG_L3_INIT, &st->l2.flag)) skb_queue_tail(&st->l2.i_queue, skb); else dev_kfree_skb(skb); } static void l2_feed_i_pull(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0); skb_queue_tail(&st->l2.i_queue, skb); st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_feed_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0); skb_queue_tail(&st->l2.i_queue, skb); } static void l2_got_iframe(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; struct Layer2 *l2 = &(st->l2); int PollFlag, ns, i; unsigned int nr; i = l2addrsize(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = ((skb->data[i + 1] & 0x1) == 0x1); ns = skb->data[i] >> 1; nr = (skb->data[i + 1] >> 1) & 0x7f; } else { PollFlag = (skb->data[i] & 0x10); ns = (skb->data[i] >> 1) & 0x7; nr = (skb->data[i] >> 5) & 0x7; } if (test_bit(FLG_OWN_BUSY, &l2->flag)) { dev_kfree_skb(skb); if(PollFlag) enquiry_response(st); } else if (l2->vr == ns) { (l2->vr)++; if(test_bit(FLG_MOD128, &l2->flag)) l2->vr %= 128; else l2->vr %= 8; test_and_clear_bit(FLG_REJEXC, &l2->flag); if (PollFlag) enquiry_response(st); else test_and_set_bit(FLG_ACK_PEND, &l2->flag); skb_pull(skb, l2headersize(l2, 0)); st->l2.l2l3(st, DL_DATA | INDICATION, skb); } else { /* n(s)!=v(r) */ dev_kfree_skb(skb); if (test_and_set_bit(FLG_REJEXC, &l2->flag)) { if (PollFlag) enquiry_response(st); } else { enquiry_cr(st, REJ, RSP, PollFlag); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } } if (legalnr(st, nr)) { if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) { if (nr == st->l2.vs) { stop_t200(st, 13); FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 7); } else if (nr != st->l2.va) restart_t200(st, 14); } setva(st, nr); } else { nrerrorrecovery(fi); return; } if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag)) enquiry_cr(st, RR, RSP, 0); } static void l2_got_tei(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->l2.tei = (long) arg; if (fi->state == ST_L2_3) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } else FsmChangeState(fi, ST_L2_4); if (!skb_queue_empty(&st->l2.ui_queue)) tx_ui(st); } static void l2_st5_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); } else if (st->l2.rc == st->l2.N200) { FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); skb_queue_purge(&st->l2.i_queue); st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G'); if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); st5_dl_release_l2l3(st); } else { st->l2.rc++; FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10, CMD); } } static void l2_st6_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); } else if (st->l2.rc == st->l2.N200) { FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H'); lapb_dl_release_l2l3(st, CONFIRM); } else { st->l2.rc++; FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); send_uframe(st, DISC | 0x10, CMD); } } static void l2_st7_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); st->l2.rc = 0; FsmChangeState(fi, ST_L2_8); transmit_enquiry(st); st->l2.rc++; } static void l2_st8_tout_200(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &st->l2.flag); if (st->l2.rc == st->l2.N200) { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } else { transmit_enquiry(st); st->l2.rc++; } } static void l2_st7_tout_203(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if (test_bit(FLG_LAPD, &st->l2.flag) && test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) { FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9); return; } FsmChangeState(fi, ST_L2_8); transmit_enquiry(st); st->l2.rc = 0; } static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb, *oskb; struct Layer2 *l2 = &st->l2; u_char header[MAX_HEADER_LEN]; int i; int unsigned p1; u_long flags; if (!cansend(st)) return; skb = skb_dequeue(&l2->i_queue); if (!skb) return; spin_lock_irqsave(&l2->lock, flags); if(test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; else p1 = (l2->vs - l2->va) % 8; p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) { printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", p1); dev_kfree_skb(l2->windowar[p1]); } l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC); i = sethdraddr(&st->l2, header, CMD); if (test_bit(FLG_MOD128, &l2->flag)) { header[i++] = l2->vs << 1; header[i++] = l2->vr << 1; l2->vs = (l2->vs + 1) % 128; } else { header[i++] = (l2->vr << 5) | (l2->vs << 1); l2->vs = (l2->vs + 1) % 8; } spin_unlock_irqrestore(&l2->lock, flags); p1 = skb->data - skb->head; if (p1 >= i) memcpy(skb_push(skb, i), header, i); else { printk(KERN_WARNING "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); oskb = skb; skb = alloc_skb(oskb->len + i, GFP_ATOMIC); memcpy(skb_put(skb, i), header, i); skb_copy_from_linear_data(oskb, skb_put(skb, oskb->len), oskb->len); dev_kfree_skb(oskb); } st->l2.l2l1(st, PH_PULL | INDICATION, skb); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { FsmDelTimer(&st->l2.t203, 13); FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11); } if (!skb_queue_empty(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } static void l2_st8_got_super(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, rnr = 0; unsigned int nr; struct Layer2 *l2 = &st->l2; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, st)) { set_peer_busy(l2); rnr = 1; } else clear_peer_busy(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (rsp && PollFlag) { if (legalnr(st, nr)) { if (rnr) { restart_t200(st, 15); } else { stop_t200(st, 16); FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 5); setva(st, nr); } invoke_retransmission(st, nr); FsmChangeState(fi, ST_L2_7); if (!skb_queue_empty(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); } else { if (!rsp && PollFlag) enquiry_response(st); if (legalnr(st, nr)) { setva(st, nr); } else nrerrorrecovery(fi); } } static void l2_got_FRMR(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2addrsize(&st->l2) + 1); if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */ (IsUA(skb->data) && (fi->state == ST_L2_7))) { st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } dev_kfree_skb(skb); } static void l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); st->l2.tei = -1; FsmChangeState(fi, ST_L2_1); } static void l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); st->l2.tei = -1; st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); FsmChangeState(fi, ST_L2_1); } static void l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); st->l2.tei = -1; stop_t200(st, 17); st5_dl_release_l2l3(st); FsmChangeState(fi, ST_L2_1); } static void l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); st->l2.tei = -1; stop_t200(st, 18); st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL); FsmChangeState(fi, ST_L2_1); } static void l2_tei_remove(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); st->l2.tei = -1; stop_t200(st, 17); FsmDelTimer(&st->l2.t203, 19); st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); FsmChangeState(fi, ST_L2_1); } static void l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag)) st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); } static void l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); stop_t200(st, 19); st5_dl_release_l2l3(st); FsmChangeState(fi, ST_L2_4); } static void l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.ui_queue); stop_t200(st, 20); st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL); FsmChangeState(fi, ST_L2_4); } static void l2_persistent_da(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); freewin(st); stop_t200(st, 19); FsmDelTimer(&st->l2.t203, 19); st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL); FsmChangeState(fi, ST_L2_4); } static void l2_set_own_busy(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if(!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) { enquiry_cr(st, RNR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); } } static void l2_clear_own_busy(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; if(!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) { enquiry_cr(st, RR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); } } static void l2_frame_error(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->ma.layer(st, MDL_ERROR | INDICATION, arg); } static void l2_frame_error_reest(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; st->ma.layer(st, MDL_ERROR | INDICATION, arg); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &st->l2.flag); } static struct FsmNode L2FnList[] __initdata = { {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign}, {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3}, {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish}, {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3}, {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release}, {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel}, {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest}, {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull}, {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue}, {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign}, {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui}, {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui}, {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui}, {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove}, {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove}, {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove}, {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove}, {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove}, {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_4, EV_L2_SABME, l2_start_multi}, {ST_L2_5, EV_L2_SABME, l2_send_UA}, {ST_L2_6, EV_L2_SABME, l2_send_DM}, {ST_L2_7, EV_L2_SABME, l2_restart_multi}, {ST_L2_8, EV_L2_SABME, l2_restart_multi}, {ST_L2_4, EV_L2_DISC, l2_send_DM}, {ST_L2_5, EV_L2_DISC, l2_send_DM}, {ST_L2_6, EV_L2_DISC, l2_send_UA}, {ST_L2_7, EV_L2_DISC, l2_stop_multi}, {ST_L2_8, EV_L2_DISC, l2_stop_multi}, {ST_L2_4, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_5, EV_L2_UA, l2_connected}, {ST_L2_6, EV_L2_UA, l2_released}, {ST_L2_7, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_8, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_4, EV_L2_DM, l2_reestablish}, {ST_L2_5, EV_L2_DM, l2_st5_dm_release}, {ST_L2_6, EV_L2_DM, l2_st6_dm_release}, {ST_L2_7, EV_L2_DM, l2_mdl_error_dm}, {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm}, {ST_L2_1, EV_L2_UI, l2_got_ui}, {ST_L2_2, EV_L2_UI, l2_got_ui}, {ST_L2_3, EV_L2_UI, l2_got_ui}, {ST_L2_4, EV_L2_UI, l2_got_ui}, {ST_L2_5, EV_L2_UI, l2_got_ui}, {ST_L2_6, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_UI, l2_got_ui}, {ST_L2_8, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_8, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_7, EV_L2_SUPER, l2_st7_got_super}, {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, {ST_L2_7, EV_L2_I, l2_got_iframe}, {ST_L2_8, EV_L2_I, l2_got_iframe}, {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove}, {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove}, {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da}, {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da}, {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da}, {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da}, }; static void isdnl2_l1l2(struct PStack *st, int pr, void *arg) { struct sk_buff *skb = arg; u_char *datap; int ret = 1, len; int c = 0; switch (pr) { case (PH_DATA | INDICATION): datap = skb->data; len = l2addrsize(&st->l2); if (skb->len > len) datap += len; else { FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N'); dev_kfree_skb(skb); return; } if (!(*datap & 1)) { /* I-Frame */ if(!(c = iframe_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb); } else if (IsSFrame(datap, st)) { /* S-Frame */ if(!(c = super_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb); } else if (IsUI(datap)) { if(!(c = UI_error(st, skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb); } else if (IsSABME(datap, st)) { if(!(c = unnum_error(st, skb, CMD))) ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb); } else if (IsUA(datap)) { if(!(c = unnum_error(st, skb, RSP))) ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb); } else if (IsDISC(datap)) { if(!(c = unnum_error(st, skb, CMD))) ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb); } else if (IsDM(datap)) { if(!(c = unnum_error(st, skb, RSP))) ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb); } else if (IsFRMR(datap)) { if(!(c = FRMR_error(st,skb))) ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb); } else { FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L'); dev_kfree_skb(skb); ret = 0; } if(c) { dev_kfree_skb(skb); FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c); ret = 0; } if (ret) dev_kfree_skb(skb); break; case (PH_PULL | CONFIRM): FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg); break; case (PH_PAUSE | INDICATION): test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag); break; case (PH_PAUSE | CONFIRM): test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag); break; case (PH_ACTIVATE | CONFIRM): case (PH_ACTIVATE | INDICATION): test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag); if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag)) FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg); break; case (PH_DEACTIVATE | INDICATION): case (PH_DEACTIVATE | CONFIRM): test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag); FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg); break; default: l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr); break; } } static void isdnl2_l3l2(struct PStack *st, int pr, void *arg) { switch (pr) { case (DL_DATA | REQUEST): if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) { dev_kfree_skb((struct sk_buff *) arg); } break; case (DL_UNIT_DATA | REQUEST): if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) { dev_kfree_skb((struct sk_buff *) arg); } break; case (DL_ESTABLISH | REQUEST): if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) { if (test_bit(FLG_LAPD, &st->l2.flag) || test_bit(FLG_ORIG, &st->l2.flag)) { FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg); } } else { if (test_bit(FLG_LAPD, &st->l2.flag) || test_bit(FLG_ORIG, &st->l2.flag)) { test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag); } st->l2.l2l1(st, PH_ACTIVATE, NULL); } break; case (DL_RELEASE | REQUEST): if (test_bit(FLG_LAPB, &st->l2.flag)) { st->l2.l2l1(st, PH_DEACTIVATE, NULL); } FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg); break; case (MDL_ASSIGN | REQUEST): FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg); break; case (MDL_REMOVE | REQUEST): FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg); break; case (MDL_ERROR | RESPONSE): FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg); break; } } void releasestack_isdnl2(struct PStack *st) { FsmDelTimer(&st->l2.t200, 21); FsmDelTimer(&st->l2.t203, 16); skb_queue_purge(&st->l2.i_queue); skb_queue_purge(&st->l2.ui_queue); ReleaseWin(&st->l2); } static void l2m_debug(struct FsmInst *fi, char *fmt, ...) { va_list args; struct PStack *st = fi->userdata; va_start(args, fmt); VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args); va_end(args); } void setstack_isdnl2(struct PStack *st, char *debug_id) { spin_lock_init(&st->l2.lock); st->l1.l1l2 = isdnl2_l1l2; st->l3.l3l2 = isdnl2_l3l2; skb_queue_head_init(&st->l2.i_queue); skb_queue_head_init(&st->l2.ui_queue); InitWin(&st->l2); st->l2.debug = 0; st->l2.l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &st->l2.flag)) st->l2.l2m.state = ST_L2_4; else st->l2.l2m.state = ST_L2_1; st->l2.l2m.debug = 0; st->l2.l2m.userdata = st; st->l2.l2m.userint = 0; st->l2.l2m.printdebug = l2m_debug; strcpy(st->l2.debug_id, debug_id); FsmInitTimer(&st->l2.l2m, &st->l2.t200); FsmInitTimer(&st->l2.l2m, &st->l2.t203); } static void transl2_l3l2(struct PStack *st, int pr, void *arg) { switch (pr) { case (DL_DATA | REQUEST): case (DL_UNIT_DATA | REQUEST): st->l2.l2l1(st, PH_DATA | REQUEST, arg); break; case (DL_ESTABLISH | REQUEST): st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL); break; case (DL_RELEASE | REQUEST): st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL); break; } } void setstack_transl2(struct PStack *st) { st->l3.l3l2 = transl2_l3l2; } void releasestack_transl2(struct PStack *st) { } int __init Isdnl2New(void) { l2fsm.state_count = L2_STATE_COUNT; l2fsm.event_count = L2_EVENT_COUNT; l2fsm.strEvent = strL2Event; l2fsm.strState = strL2State; return FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); } void Isdnl2Free(void) { FsmFree(&l2fsm); }
gpl-2.0
jyunyen/Nexus7_Kernal
drivers/media/video/vino.c
564
101089
/* * Driver for the VINO (Video In No Out) system found in SGI Indys. * * This file is subject to the terms and conditions of the GNU General Public * License version 2 as published by the Free Software Foundation. * * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi> * * Based on the previous version of the driver for 2.4 kernels by: * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> * * v4l2_device/v4l2_subdev conversion by: * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl> * * Note: this conversion is untested! Please contact the linux-media * mailinglist if you can test this, together with the test results. */ /* * TODO: * - remove "mark pages reserved-hacks" from memory allocation code * and implement fault() * - check decimation, calculating and reporting image size when * using decimation * - implement read(), user mode buffers and overlay (?) */ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/time.h> #include <linux/kmod.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <linux/mutex.h> #include <asm/paccess.h> #include <asm/io.h> #include <asm/sgi/ip22.h> #include <asm/sgi/mc.h> #include "vino.h" #include "saa7191.h" #include "indycam.h" /* Uncomment the following line to get lots and lots of (mostly useless) * debug info. * Note that the debug output also slows down the driver significantly */ // #define VINO_DEBUG // #define VINO_DEBUG_INT #define VINO_MODULE_VERSION "0.0.7" MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver"); MODULE_VERSION(VINO_MODULE_VERSION); MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>"); MODULE_LICENSE("GPL"); #ifdef VINO_DEBUG #define dprintk(x...) printk("VINO: " x); #else #define dprintk(x...) #endif #define VINO_NO_CHANNEL 0 #define VINO_CHANNEL_A 1 #define VINO_CHANNEL_B 2 #define VINO_PAL_WIDTH 768 #define VINO_PAL_HEIGHT 576 #define VINO_NTSC_WIDTH 640 #define VINO_NTSC_HEIGHT 480 #define VINO_MIN_WIDTH 32 #define VINO_MIN_HEIGHT 32 #define VINO_CLIPPING_START_ODD_D1 1 #define VINO_CLIPPING_START_ODD_PAL 15 #define VINO_CLIPPING_START_ODD_NTSC 12 #define VINO_CLIPPING_START_EVEN_D1 2 #define VINO_CLIPPING_START_EVEN_PAL 15 #define VINO_CLIPPING_START_EVEN_NTSC 12 #define VINO_INPUT_CHANNEL_COUNT 3 /* the number is the index for vino_inputs */ #define VINO_INPUT_NONE -1 #define VINO_INPUT_COMPOSITE 0 #define VINO_INPUT_SVIDEO 1 #define VINO_INPUT_D1 2 #define VINO_PAGE_RATIO (PAGE_SIZE / VINO_PAGE_SIZE) #define VINO_FIFO_THRESHOLD_DEFAULT 16 #define VINO_FRAMEBUFFER_SIZE ((VINO_PAL_WIDTH \ * VINO_PAL_HEIGHT * 4 \ + 3 * PAGE_SIZE) & ~(PAGE_SIZE - 1)) #define VINO_FRAMEBUFFER_COUNT_MAX 8 #define VINO_FRAMEBUFFER_UNUSED 0 #define VINO_FRAMEBUFFER_IN_USE 1 #define VINO_FRAMEBUFFER_READY 2 #define VINO_QUEUE_ERROR -1 #define VINO_QUEUE_MAGIC 0x20050125 #define VINO_MEMORY_NONE 0 #define VINO_MEMORY_MMAP 1 #define VINO_MEMORY_USERPTR 2 #define VINO_DUMMY_DESC_COUNT 4 #define VINO_DESC_FETCH_DELAY 5 /* microseconds */ #define VINO_MAX_FRAME_SKIP_COUNT 128 /* the number is the index for vino_data_formats */ #define VINO_DATA_FMT_NONE -1 #define VINO_DATA_FMT_GREY 0 #define VINO_DATA_FMT_RGB332 1 #define VINO_DATA_FMT_RGB32 2 #define VINO_DATA_FMT_YUV 3 #define VINO_DATA_FMT_COUNT 4 /* the number is the index for vino_data_norms */ #define VINO_DATA_NORM_NONE -1 #define VINO_DATA_NORM_NTSC 0 #define VINO_DATA_NORM_PAL 1 #define VINO_DATA_NORM_SECAM 2 #define VINO_DATA_NORM_D1 3 #define VINO_DATA_NORM_COUNT 4 /* I2C controller flags */ #define SGI_I2C_FORCE_IDLE (0 << 0) #define SGI_I2C_NOT_IDLE (1 << 0) #define SGI_I2C_WRITE (0 << 1) #define SGI_I2C_READ (1 << 1) #define SGI_I2C_RELEASE_BUS (0 << 2) #define SGI_I2C_HOLD_BUS (1 << 2) #define SGI_I2C_XFER_DONE (0 << 4) #define SGI_I2C_XFER_BUSY (1 << 4) #define SGI_I2C_ACK (0 << 5) #define SGI_I2C_NACK (1 << 5) #define SGI_I2C_BUS_OK (0 << 7) #define SGI_I2C_BUS_ERR (1 << 7) /* Internal data structure definitions */ struct vino_input { char *name; v4l2_std_id std; }; struct vino_clipping { unsigned int left, right, top, bottom; }; struct vino_data_format { /* the description */ char *description; /* bytes per pixel */ unsigned int bpp; /* V4L2 fourcc code */ __u32 pixelformat; /* V4L2 colorspace (duh!) */ enum v4l2_colorspace colorspace; }; struct vino_data_norm { char *description; unsigned int width, height; struct vino_clipping odd; struct vino_clipping even; v4l2_std_id std; unsigned int fps_min, fps_max; __u32 framelines; }; struct vino_descriptor_table { /* the number of PAGE_SIZE sized pages in the buffer */ unsigned int page_count; /* virtual (kmalloc'd) pointers to the actual data * (in PAGE_SIZE chunks, used with mmap streaming) */ unsigned long *virtual; /* cpu address for the VINO descriptor table * (contains DMA addresses, VINO_PAGE_SIZE chunks) */ unsigned long *dma_cpu; /* dma address for the VINO descriptor table * (contains DMA addresses, VINO_PAGE_SIZE chunks) */ dma_addr_t dma; }; struct vino_framebuffer { /* identifier nubmer */ unsigned int id; /* the length of the whole buffer */ unsigned int size; /* the length of actual data in buffer */ unsigned int data_size; /* the data format */ unsigned int data_format; /* the state of buffer data */ unsigned int state; /* is the buffer mapped in user space? */ unsigned int map_count; /* memory offset for mmap() */ unsigned int offset; /* frame counter */ unsigned int frame_counter; /* timestamp (written when image capture finishes) */ struct timeval timestamp; struct vino_descriptor_table desc_table; spinlock_t state_lock; }; struct vino_framebuffer_fifo { unsigned int length; unsigned int used; unsigned int head; unsigned int tail; unsigned int data[VINO_FRAMEBUFFER_COUNT_MAX]; }; struct vino_framebuffer_queue { unsigned int magic; /* VINO_MEMORY_NONE, VINO_MEMORY_MMAP or VINO_MEMORY_USERPTR */ unsigned int type; unsigned int length; /* data field of in and out contain index numbers for buffer */ struct vino_framebuffer_fifo in; struct vino_framebuffer_fifo out; struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_COUNT_MAX]; spinlock_t queue_lock; struct mutex queue_mutex; wait_queue_head_t frame_wait_queue; }; struct vino_interrupt_data { struct timeval timestamp; unsigned int frame_counter; unsigned int skip_count; unsigned int skip; }; struct vino_channel_settings { unsigned int channel; int input; unsigned int data_format; unsigned int data_norm; struct vino_clipping clipping; unsigned int decimation; unsigned int line_size; unsigned int alpha; unsigned int fps; unsigned int framert_reg; unsigned int fifo_threshold; struct vino_framebuffer_queue fb_queue; /* number of the current field */ unsigned int field; /* read in progress */ int reading; /* streaming is active */ int streaming; /* the driver is currently processing the queue */ int capturing; struct mutex mutex; spinlock_t capture_lock; unsigned int users; struct vino_interrupt_data int_data; /* V4L support */ struct video_device *vdev; }; struct vino_settings { struct v4l2_device v4l2_dev; struct vino_channel_settings a; struct vino_channel_settings b; /* the channel which owns this client: * VINO_NO_CHANNEL, VINO_CHANNEL_A or VINO_CHANNEL_B */ unsigned int decoder_owner; struct v4l2_subdev *decoder; unsigned int camera_owner; struct v4l2_subdev *camera; /* a lock for vino register access */ spinlock_t vino_lock; /* a lock for channel input changes */ spinlock_t input_lock; unsigned long dummy_page; struct vino_descriptor_table dummy_desc_table; }; /* Module parameters */ /* * Using vino_pixel_conversion the ABGR32-format pixels supplied * by the VINO chip can be converted to more common formats * like RGBA32 (or probably RGB24 in the future). This way we * can give out data that can be specified correctly with * the V4L2-definitions. * * The pixel format is specified as RGBA32 when no conversion * is used. * * Note that this only affects the 32-bit bit depth. * * Use non-zero value to enable conversion. */ static int vino_pixel_conversion; module_param_named(pixelconv, vino_pixel_conversion, int, 0); MODULE_PARM_DESC(pixelconv, "enable pixel conversion (non-zero value enables)"); /* Internal data structures */ static struct sgi_vino *vino; static struct vino_settings *vino_drvdata; #define camera_call(o, f, args...) \ v4l2_subdev_call(vino_drvdata->camera, o, f, ##args) #define decoder_call(o, f, args...) \ v4l2_subdev_call(vino_drvdata->decoder, o, f, ##args) static const char *vino_driver_name = "vino"; static const char *vino_driver_description = "SGI VINO"; static const char *vino_bus_name = "GIO64 bus"; static const char *vino_vdev_name_a = "SGI VINO Channel A"; static const char *vino_vdev_name_b = "SGI VINO Channel B"; static void vino_capture_tasklet(unsigned long channel); DECLARE_TASKLET(vino_tasklet_a, vino_capture_tasklet, VINO_CHANNEL_A); DECLARE_TASKLET(vino_tasklet_b, vino_capture_tasklet, VINO_CHANNEL_B); static const struct vino_input vino_inputs[] = { { .name = "Composite", .std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, }, { .name = "S-Video", .std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, }, { .name = "D1/IndyCam", .std = V4L2_STD_NTSC, } }; static const struct vino_data_format vino_data_formats[] = { { .description = "8-bit greyscale", .bpp = 1, .pixelformat = V4L2_PIX_FMT_GREY, .colorspace = V4L2_COLORSPACE_SMPTE170M, }, { .description = "8-bit dithered RGB 3-3-2", .bpp = 1, .pixelformat = V4L2_PIX_FMT_RGB332, .colorspace = V4L2_COLORSPACE_SRGB, }, { .description = "32-bit RGB", .bpp = 4, .pixelformat = V4L2_PIX_FMT_RGB32, .colorspace = V4L2_COLORSPACE_SRGB, }, { .description = "YUV 4:2:2", .bpp = 2, .pixelformat = V4L2_PIX_FMT_YUYV, // XXX: swapped? .colorspace = V4L2_COLORSPACE_SMPTE170M, } }; static const struct vino_data_norm vino_data_norms[] = { { .description = "NTSC", .std = V4L2_STD_NTSC, .fps_min = 6, .fps_max = 30, .framelines = 525, .width = VINO_NTSC_WIDTH, .height = VINO_NTSC_HEIGHT, .odd = { .top = VINO_CLIPPING_START_ODD_NTSC, .left = 0, .bottom = VINO_CLIPPING_START_ODD_NTSC + VINO_NTSC_HEIGHT / 2 - 1, .right = VINO_NTSC_WIDTH, }, .even = { .top = VINO_CLIPPING_START_EVEN_NTSC, .left = 0, .bottom = VINO_CLIPPING_START_EVEN_NTSC + VINO_NTSC_HEIGHT / 2 - 1, .right = VINO_NTSC_WIDTH, }, }, { .description = "PAL", .std = V4L2_STD_PAL, .fps_min = 5, .fps_max = 25, .framelines = 625, .width = VINO_PAL_WIDTH, .height = VINO_PAL_HEIGHT, .odd = { .top = VINO_CLIPPING_START_ODD_PAL, .left = 0, .bottom = VINO_CLIPPING_START_ODD_PAL + VINO_PAL_HEIGHT / 2 - 1, .right = VINO_PAL_WIDTH, }, .even = { .top = VINO_CLIPPING_START_EVEN_PAL, .left = 0, .bottom = VINO_CLIPPING_START_EVEN_PAL + VINO_PAL_HEIGHT / 2 - 1, .right = VINO_PAL_WIDTH, }, }, { .description = "SECAM", .std = V4L2_STD_SECAM, .fps_min = 5, .fps_max = 25, .framelines = 625, .width = VINO_PAL_WIDTH, .height = VINO_PAL_HEIGHT, .odd = { .top = VINO_CLIPPING_START_ODD_PAL, .left = 0, .bottom = VINO_CLIPPING_START_ODD_PAL + VINO_PAL_HEIGHT / 2 - 1, .right = VINO_PAL_WIDTH, }, .even = { .top = VINO_CLIPPING_START_EVEN_PAL, .left = 0, .bottom = VINO_CLIPPING_START_EVEN_PAL + VINO_PAL_HEIGHT / 2 - 1, .right = VINO_PAL_WIDTH, }, }, { .description = "NTSC/D1", .std = V4L2_STD_NTSC, .fps_min = 6, .fps_max = 30, .framelines = 525, .width = VINO_NTSC_WIDTH, .height = VINO_NTSC_HEIGHT, .odd = { .top = VINO_CLIPPING_START_ODD_D1, .left = 0, .bottom = VINO_CLIPPING_START_ODD_D1 + VINO_NTSC_HEIGHT / 2 - 1, .right = VINO_NTSC_WIDTH, }, .even = { .top = VINO_CLIPPING_START_EVEN_D1, .left = 0, .bottom = VINO_CLIPPING_START_EVEN_D1 + VINO_NTSC_HEIGHT / 2 - 1, .right = VINO_NTSC_WIDTH, }, } }; #define VINO_INDYCAM_V4L2_CONTROL_COUNT 9 struct v4l2_queryctrl vino_indycam_v4l2_controls[] = { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Automatic Gain Control", .minimum = 0, .maximum = 1, .step = 1, .default_value = INDYCAM_AGC_DEFAULT, }, { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Automatic White Balance", .minimum = 0, .maximum = 1, .step = 1, .default_value = INDYCAM_AWB_DEFAULT, }, { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = INDYCAM_GAIN_MIN, .maximum = INDYCAM_GAIN_MAX, .step = 1, .default_value = INDYCAM_GAIN_DEFAULT, }, { .id = INDYCAM_CONTROL_RED_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Saturation", .minimum = INDYCAM_RED_SATURATION_MIN, .maximum = INDYCAM_RED_SATURATION_MAX, .step = 1, .default_value = INDYCAM_RED_SATURATION_DEFAULT, }, { .id = INDYCAM_CONTROL_BLUE_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Saturation", .minimum = INDYCAM_BLUE_SATURATION_MIN, .maximum = INDYCAM_BLUE_SATURATION_MAX, .step = 1, .default_value = INDYCAM_BLUE_SATURATION_DEFAULT, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = INDYCAM_RED_BALANCE_MIN, .maximum = INDYCAM_RED_BALANCE_MAX, .step = 1, .default_value = INDYCAM_RED_BALANCE_DEFAULT, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = INDYCAM_BLUE_BALANCE_MIN, .maximum = INDYCAM_BLUE_BALANCE_MAX, .step = 1, .default_value = INDYCAM_BLUE_BALANCE_DEFAULT, }, { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Shutter Control", .minimum = INDYCAM_SHUTTER_MIN, .maximum = INDYCAM_SHUTTER_MAX, .step = 1, .default_value = INDYCAM_SHUTTER_DEFAULT, }, { .id = V4L2_CID_GAMMA, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gamma", .minimum = INDYCAM_GAMMA_MIN, .maximum = INDYCAM_GAMMA_MAX, .step = 1, .default_value = INDYCAM_GAMMA_DEFAULT, } }; #define VINO_SAA7191_V4L2_CONTROL_COUNT 9 struct v4l2_queryctrl vino_saa7191_v4l2_controls[] = { { .id = V4L2_CID_HUE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Hue", .minimum = SAA7191_HUE_MIN, .maximum = SAA7191_HUE_MAX, .step = 1, .default_value = SAA7191_HUE_DEFAULT, }, { .id = SAA7191_CONTROL_BANDPASS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Luminance Bandpass", .minimum = SAA7191_BANDPASS_MIN, .maximum = SAA7191_BANDPASS_MAX, .step = 1, .default_value = SAA7191_BANDPASS_DEFAULT, }, { .id = SAA7191_CONTROL_BANDPASS_WEIGHT, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Luminance Bandpass Weight", .minimum = SAA7191_BANDPASS_WEIGHT_MIN, .maximum = SAA7191_BANDPASS_WEIGHT_MAX, .step = 1, .default_value = SAA7191_BANDPASS_WEIGHT_DEFAULT, }, { .id = SAA7191_CONTROL_CORING, .type = V4L2_CTRL_TYPE_INTEGER, .name = "HF Luminance Coring", .minimum = SAA7191_CORING_MIN, .maximum = SAA7191_CORING_MAX, .step = 1, .default_value = SAA7191_CORING_DEFAULT, }, { .id = SAA7191_CONTROL_FORCE_COLOUR, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Force Colour", .minimum = SAA7191_FORCE_COLOUR_MIN, .maximum = SAA7191_FORCE_COLOUR_MAX, .step = 1, .default_value = SAA7191_FORCE_COLOUR_DEFAULT, }, { .id = SAA7191_CONTROL_CHROMA_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Chrominance Gain Control", .minimum = SAA7191_CHROMA_GAIN_MIN, .maximum = SAA7191_CHROMA_GAIN_MAX, .step = 1, .default_value = SAA7191_CHROMA_GAIN_DEFAULT, }, { .id = SAA7191_CONTROL_VTRC, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "VTR Time Constant", .minimum = SAA7191_VTRC_MIN, .maximum = SAA7191_VTRC_MAX, .step = 1, .default_value = SAA7191_VTRC_DEFAULT, }, { .id = SAA7191_CONTROL_LUMA_DELAY, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Luminance Delay Compensation", .minimum = SAA7191_LUMA_DELAY_MIN, .maximum = SAA7191_LUMA_DELAY_MAX, .step = 1, .default_value = SAA7191_LUMA_DELAY_DEFAULT, }, { .id = SAA7191_CONTROL_VNR, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Vertical Noise Reduction", .minimum = SAA7191_VNR_MIN, .maximum = SAA7191_VNR_MAX, .step = 1, .default_value = SAA7191_VNR_DEFAULT, } }; /* VINO framebuffer/DMA descriptor management */ static void vino_free_buffer_with_count(struct vino_framebuffer *fb, unsigned int count) { unsigned int i; dprintk("vino_free_buffer_with_count(): count = %d\n", count); for (i = 0; i < count; i++) { ClearPageReserved(virt_to_page((void *)fb->desc_table.virtual[i])); dma_unmap_single(NULL, fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i], PAGE_SIZE, DMA_FROM_DEVICE); free_page(fb->desc_table.virtual[i]); } dma_free_coherent(NULL, VINO_PAGE_RATIO * (fb->desc_table.page_count + 4) * sizeof(dma_addr_t), (void *)fb->desc_table.dma_cpu, fb->desc_table.dma); kfree(fb->desc_table.virtual); memset(fb, 0, sizeof(struct vino_framebuffer)); } static void vino_free_buffer(struct vino_framebuffer *fb) { vino_free_buffer_with_count(fb, fb->desc_table.page_count); } static int vino_allocate_buffer(struct vino_framebuffer *fb, unsigned int size) { unsigned int count, i, j; int ret = 0; dprintk("vino_allocate_buffer():\n"); if (size < 1) return -EINVAL; memset(fb, 0, sizeof(struct vino_framebuffer)); count = ((size / PAGE_SIZE) + 4) & ~3; dprintk("vino_allocate_buffer(): size = %d, count = %d\n", size, count); /* allocate memory for table with virtual (page) addresses */ fb->desc_table.virtual = (unsigned long *) kmalloc(count * sizeof(unsigned long), GFP_KERNEL); if (!fb->desc_table.virtual) return -ENOMEM; /* allocate memory for table with dma addresses * (has space for four extra descriptors) */ fb->desc_table.dma_cpu = dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) * sizeof(dma_addr_t), &fb->desc_table.dma, GFP_KERNEL | GFP_DMA); if (!fb->desc_table.dma_cpu) { ret = -ENOMEM; goto out_free_virtual; } /* allocate pages for the buffer and acquire the according * dma addresses */ for (i = 0; i < count; i++) { dma_addr_t dma_data_addr; fb->desc_table.virtual[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!fb->desc_table.virtual[i]) { ret = -ENOBUFS; break; } dma_data_addr = dma_map_single(NULL, (void *)fb->desc_table.virtual[i], PAGE_SIZE, DMA_FROM_DEVICE); for (j = 0; j < VINO_PAGE_RATIO; j++) { fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] = dma_data_addr + VINO_PAGE_SIZE * j; } SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i])); } /* page_count needs to be set anyway, because the descriptor table has * been allocated according to this number */ fb->desc_table.page_count = count; if (ret) { /* the descriptor with index i doesn't contain * a valid address yet */ vino_free_buffer_with_count(fb, i); return ret; } //fb->size = size; fb->size = count * PAGE_SIZE; fb->data_format = VINO_DATA_FMT_NONE; /* set the dma stop-bit for the last (count+1)th descriptor */ fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP; return 0; out_free_virtual: kfree(fb->desc_table.virtual); return ret; } #if 0 /* user buffers not fully implemented yet */ static int vino_prepare_user_buffer(struct vino_framebuffer *fb, void *user, unsigned int size) { unsigned int count, i, j; int ret = 0; dprintk("vino_prepare_user_buffer():\n"); if (size < 1) return -EINVAL; memset(fb, 0, sizeof(struct vino_framebuffer)); count = ((size / PAGE_SIZE)) & ~3; dprintk("vino_prepare_user_buffer(): size = %d, count = %d\n", size, count); /* allocate memory for table with virtual (page) addresses */ fb->desc_table.virtual = (unsigned long *) kmalloc(count * sizeof(unsigned long), GFP_KERNEL); if (!fb->desc_table.virtual) return -ENOMEM; /* allocate memory for table with dma addresses * (has space for four extra descriptors) */ fb->desc_table.dma_cpu = dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) * sizeof(dma_addr_t), &fb->desc_table.dma, GFP_KERNEL | GFP_DMA); if (!fb->desc_table.dma_cpu) { ret = -ENOMEM; goto out_free_virtual; } /* allocate pages for the buffer and acquire the according * dma addresses */ for (i = 0; i < count; i++) { dma_addr_t dma_data_addr; fb->desc_table.virtual[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!fb->desc_table.virtual[i]) { ret = -ENOBUFS; break; } dma_data_addr = dma_map_single(NULL, (void *)fb->desc_table.virtual[i], PAGE_SIZE, DMA_FROM_DEVICE); for (j = 0; j < VINO_PAGE_RATIO; j++) { fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] = dma_data_addr + VINO_PAGE_SIZE * j; } SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i])); } /* page_count needs to be set anyway, because the descriptor table has * been allocated according to this number */ fb->desc_table.page_count = count; if (ret) { /* the descriptor with index i doesn't contain * a valid address yet */ vino_free_buffer_with_count(fb, i); return ret; } //fb->size = size; fb->size = count * PAGE_SIZE; /* set the dma stop-bit for the last (count+1)th descriptor */ fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP; return 0; out_free_virtual: kfree(fb->desc_table.virtual); return ret; } #endif static void vino_sync_buffer(struct vino_framebuffer *fb) { int i; dprintk("vino_sync_buffer():\n"); for (i = 0; i < fb->desc_table.page_count; i++) dma_sync_single_for_cpu(NULL, fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i], PAGE_SIZE, DMA_FROM_DEVICE); } /* Framebuffer fifo functions (need to be locked externally) */ static inline void vino_fifo_init(struct vino_framebuffer_fifo *f, unsigned int length) { f->length = 0; f->used = 0; f->head = 0; f->tail = 0; if (length > VINO_FRAMEBUFFER_COUNT_MAX) length = VINO_FRAMEBUFFER_COUNT_MAX; f->length = length; } /* returns true/false */ static inline int vino_fifo_has_id(struct vino_framebuffer_fifo *f, unsigned int id) { unsigned int i; for (i = f->head; i == (f->tail - 1); i = (i + 1) % f->length) { if (f->data[i] == id) return 1; } return 0; } #if 0 /* returns true/false */ static inline int vino_fifo_full(struct vino_framebuffer_fifo *f) { return (f->used == f->length); } #endif static inline unsigned int vino_fifo_get_used(struct vino_framebuffer_fifo *f) { return f->used; } static int vino_fifo_enqueue(struct vino_framebuffer_fifo *f, unsigned int id) { if (id >= f->length) { return VINO_QUEUE_ERROR; } if (vino_fifo_has_id(f, id)) { return VINO_QUEUE_ERROR; } if (f->used < f->length) { f->data[f->tail] = id; f->tail = (f->tail + 1) % f->length; f->used++; } else { return VINO_QUEUE_ERROR; } return 0; } static int vino_fifo_peek(struct vino_framebuffer_fifo *f, unsigned int *id) { if (f->used > 0) { *id = f->data[f->head]; } else { return VINO_QUEUE_ERROR; } return 0; } static int vino_fifo_dequeue(struct vino_framebuffer_fifo *f, unsigned int *id) { if (f->used > 0) { *id = f->data[f->head]; f->head = (f->head + 1) % f->length; f->used--; } else { return VINO_QUEUE_ERROR; } return 0; } /* Framebuffer queue functions */ /* execute with queue_lock locked */ static void vino_queue_free_with_count(struct vino_framebuffer_queue *q, unsigned int length) { unsigned int i; q->length = 0; memset(&q->in, 0, sizeof(struct vino_framebuffer_fifo)); memset(&q->out, 0, sizeof(struct vino_framebuffer_fifo)); for (i = 0; i < length; i++) { dprintk("vino_queue_free_with_count(): freeing buffer %d\n", i); vino_free_buffer(q->buffer[i]); kfree(q->buffer[i]); } q->type = VINO_MEMORY_NONE; q->magic = 0; } static void vino_queue_free(struct vino_framebuffer_queue *q) { dprintk("vino_queue_free():\n"); if (q->magic != VINO_QUEUE_MAGIC) return; if (q->type != VINO_MEMORY_MMAP) return; mutex_lock(&q->queue_mutex); vino_queue_free_with_count(q, q->length); mutex_unlock(&q->queue_mutex); } static int vino_queue_init(struct vino_framebuffer_queue *q, unsigned int *length) { unsigned int i; int ret = 0; dprintk("vino_queue_init(): length = %d\n", *length); if (q->magic == VINO_QUEUE_MAGIC) { dprintk("vino_queue_init(): queue already initialized!\n"); return -EINVAL; } if (q->type != VINO_MEMORY_NONE) { dprintk("vino_queue_init(): queue already initialized!\n"); return -EINVAL; } if (*length < 1) return -EINVAL; mutex_lock(&q->queue_mutex); if (*length > VINO_FRAMEBUFFER_COUNT_MAX) *length = VINO_FRAMEBUFFER_COUNT_MAX; q->length = 0; for (i = 0; i < *length; i++) { dprintk("vino_queue_init(): allocating buffer %d\n", i); q->buffer[i] = kmalloc(sizeof(struct vino_framebuffer), GFP_KERNEL); if (!q->buffer[i]) { dprintk("vino_queue_init(): kmalloc() failed\n"); ret = -ENOMEM; break; } ret = vino_allocate_buffer(q->buffer[i], VINO_FRAMEBUFFER_SIZE); if (ret) { kfree(q->buffer[i]); dprintk("vino_queue_init(): " "vino_allocate_buffer() failed\n"); break; } q->buffer[i]->id = i; if (i > 0) { q->buffer[i]->offset = q->buffer[i - 1]->offset + q->buffer[i - 1]->size; } else { q->buffer[i]->offset = 0; } spin_lock_init(&q->buffer[i]->state_lock); dprintk("vino_queue_init(): buffer = %d, offset = %d, " "size = %d\n", i, q->buffer[i]->offset, q->buffer[i]->size); } if (ret) { vino_queue_free_with_count(q, i); *length = 0; } else { q->length = *length; vino_fifo_init(&q->in, q->length); vino_fifo_init(&q->out, q->length); q->type = VINO_MEMORY_MMAP; q->magic = VINO_QUEUE_MAGIC; } mutex_unlock(&q->queue_mutex); return ret; } static struct vino_framebuffer *vino_queue_add(struct vino_framebuffer_queue *q, unsigned int id) { struct vino_framebuffer *ret = NULL; unsigned int total; unsigned long flags; dprintk("vino_queue_add(): id = %d\n", id); if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; if (id >= q->length) goto out; /* not needed?: if (vino_fifo_full(&q->out)) { goto out; }*/ /* check that outgoing queue isn't already full * (or that it won't become full) */ total = vino_fifo_get_used(&q->in) + vino_fifo_get_used(&q->out); if (total >= q->length) goto out; if (vino_fifo_enqueue(&q->in, id)) goto out; ret = q->buffer[id]; out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } static struct vino_framebuffer *vino_queue_transfer(struct vino_framebuffer_queue *q) { struct vino_framebuffer *ret = NULL; struct vino_framebuffer *fb; int id; unsigned long flags; dprintk("vino_queue_transfer():\n"); if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; // now this actually removes an entry from the incoming queue if (vino_fifo_dequeue(&q->in, &id)) { goto out; } dprintk("vino_queue_transfer(): id = %d\n", id); fb = q->buffer[id]; // we have already checked that the outgoing queue is not full, but... if (vino_fifo_enqueue(&q->out, id)) { printk(KERN_ERR "vino_queue_transfer(): " "outgoing queue is full, this shouldn't happen!\n"); goto out; } ret = fb; out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } /* returns true/false */ static int vino_queue_incoming_contains(struct vino_framebuffer_queue *q, unsigned int id) { int ret = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; ret = vino_fifo_has_id(&q->in, id); out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } /* returns true/false */ static int vino_queue_outgoing_contains(struct vino_framebuffer_queue *q, unsigned int id) { int ret = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; ret = vino_fifo_has_id(&q->out, id); out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } static int vino_queue_get_incoming(struct vino_framebuffer_queue *q, unsigned int *used) { int ret = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return VINO_QUEUE_ERROR; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) { ret = VINO_QUEUE_ERROR; goto out; } *used = vino_fifo_get_used(&q->in); out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } static int vino_queue_get_outgoing(struct vino_framebuffer_queue *q, unsigned int *used) { int ret = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return VINO_QUEUE_ERROR; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) { ret = VINO_QUEUE_ERROR; goto out; } *used = vino_fifo_get_used(&q->out); out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } #if 0 static int vino_queue_get_total(struct vino_framebuffer_queue *q, unsigned int *total) { int ret = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return VINO_QUEUE_ERROR; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) { ret = VINO_QUEUE_ERROR; goto out; } *total = vino_fifo_get_used(&q->in) + vino_fifo_get_used(&q->out); out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } #endif static struct vino_framebuffer *vino_queue_peek(struct vino_framebuffer_queue *q, unsigned int *id) { struct vino_framebuffer *ret = NULL; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; if (vino_fifo_peek(&q->in, id)) { goto out; } ret = q->buffer[*id]; out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } static struct vino_framebuffer *vino_queue_remove(struct vino_framebuffer_queue *q, unsigned int *id) { struct vino_framebuffer *ret = NULL; unsigned long flags; dprintk("vino_queue_remove():\n"); if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; if (vino_fifo_dequeue(&q->out, id)) { goto out; } dprintk("vino_queue_remove(): id = %d\n", *id); ret = q->buffer[*id]; out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } static struct vino_framebuffer *vino_queue_get_buffer(struct vino_framebuffer_queue *q, unsigned int id) { struct vino_framebuffer *ret = NULL; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); if (q->length == 0) goto out; if (id >= q->length) goto out; ret = q->buffer[id]; out: spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } static unsigned int vino_queue_get_length(struct vino_framebuffer_queue *q) { unsigned int length = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return length; } spin_lock_irqsave(&q->queue_lock, flags); length = q->length; spin_unlock_irqrestore(&q->queue_lock, flags); return length; } static int vino_queue_has_mapped_buffers(struct vino_framebuffer_queue *q) { unsigned int i; int ret = 0; unsigned long flags; if (q->magic != VINO_QUEUE_MAGIC) { return ret; } spin_lock_irqsave(&q->queue_lock, flags); for (i = 0; i < q->length; i++) { if (q->buffer[i]->map_count > 0) { ret = 1; break; } } spin_unlock_irqrestore(&q->queue_lock, flags); return ret; } /* VINO functions */ /* execute with input_lock locked */ static void vino_update_line_size(struct vino_channel_settings *vcs) { unsigned int w = vcs->clipping.right - vcs->clipping.left; unsigned int d = vcs->decimation; unsigned int bpp = vino_data_formats[vcs->data_format].bpp; unsigned int lsize; dprintk("update_line_size(): before: w = %d, d = %d, " "line_size = %d\n", w, d, vcs->line_size); /* line size must be multiple of 8 bytes */ lsize = (bpp * (w / d)) & ~7; w = (lsize / bpp) * d; vcs->clipping.right = vcs->clipping.left + w; vcs->line_size = lsize; dprintk("update_line_size(): after: w = %d, d = %d, " "line_size = %d\n", w, d, vcs->line_size); } /* execute with input_lock locked */ static void vino_set_clipping(struct vino_channel_settings *vcs, unsigned int x, unsigned int y, unsigned int w, unsigned int h) { unsigned int maxwidth, maxheight; unsigned int d; maxwidth = vino_data_norms[vcs->data_norm].width; maxheight = vino_data_norms[vcs->data_norm].height; d = vcs->decimation; y &= ~1; /* odd/even fields */ if (x > maxwidth) { x = 0; } if (y > maxheight) { y = 0; } if (((w / d) < VINO_MIN_WIDTH) || ((h / d) < VINO_MIN_HEIGHT)) { w = VINO_MIN_WIDTH * d; h = VINO_MIN_HEIGHT * d; } if ((x + w) > maxwidth) { w = maxwidth - x; if ((w / d) < VINO_MIN_WIDTH) x = maxwidth - VINO_MIN_WIDTH * d; } if ((y + h) > maxheight) { h = maxheight - y; if ((h / d) < VINO_MIN_HEIGHT) y = maxheight - VINO_MIN_HEIGHT * d; } vcs->clipping.left = x; vcs->clipping.top = y; vcs->clipping.right = x + w; vcs->clipping.bottom = y + h; vino_update_line_size(vcs); dprintk("clipping %d, %d, %d, %d / %d - %d\n", vcs->clipping.left, vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom, vcs->decimation, vcs->line_size); } /* execute with input_lock locked */ static inline void vino_set_default_clipping(struct vino_channel_settings *vcs) { vino_set_clipping(vcs, 0, 0, vino_data_norms[vcs->data_norm].width, vino_data_norms[vcs->data_norm].height); } /* execute with input_lock locked */ static void vino_set_scaling(struct vino_channel_settings *vcs, unsigned int w, unsigned int h) { unsigned int x, y, curw, curh, d; x = vcs->clipping.left; y = vcs->clipping.top; curw = vcs->clipping.right - vcs->clipping.left; curh = vcs->clipping.bottom - vcs->clipping.top; d = max(curw / w, curh / h); dprintk("scaling w: %d, h: %d, curw: %d, curh: %d, d: %d\n", w, h, curw, curh, d); if (d < 1) { d = 1; } else if (d > 8) { d = 8; } vcs->decimation = d; vino_set_clipping(vcs, x, y, w * d, h * d); dprintk("scaling %d, %d, %d, %d / %d - %d\n", vcs->clipping.left, vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom, vcs->decimation, vcs->line_size); } /* execute with input_lock locked */ static inline void vino_set_default_scaling(struct vino_channel_settings *vcs) { vino_set_scaling(vcs, vcs->clipping.right - vcs->clipping.left, vcs->clipping.bottom - vcs->clipping.top); } /* execute with input_lock locked */ static void vino_set_framerate(struct vino_channel_settings *vcs, unsigned int fps) { unsigned int mask; switch (vcs->data_norm) { case VINO_DATA_NORM_NTSC: case VINO_DATA_NORM_D1: fps = (unsigned int)(fps / 6) * 6; // FIXME: round! if (fps < vino_data_norms[vcs->data_norm].fps_min) fps = vino_data_norms[vcs->data_norm].fps_min; if (fps > vino_data_norms[vcs->data_norm].fps_max) fps = vino_data_norms[vcs->data_norm].fps_max; switch (fps) { case 6: mask = 0x003; break; case 12: mask = 0x0c3; break; case 18: mask = 0x333; break; case 24: mask = 0x3ff; break; case 30: mask = 0xfff; break; default: mask = VINO_FRAMERT_FULL; } vcs->framert_reg = VINO_FRAMERT_RT(mask); break; case VINO_DATA_NORM_PAL: case VINO_DATA_NORM_SECAM: fps = (unsigned int)(fps / 5) * 5; // FIXME: round! if (fps < vino_data_norms[vcs->data_norm].fps_min) fps = vino_data_norms[vcs->data_norm].fps_min; if (fps > vino_data_norms[vcs->data_norm].fps_max) fps = vino_data_norms[vcs->data_norm].fps_max; switch (fps) { case 5: mask = 0x003; break; case 10: mask = 0x0c3; break; case 15: mask = 0x333; break; case 20: mask = 0x0ff; break; case 25: mask = 0x3ff; break; default: mask = VINO_FRAMERT_FULL; } vcs->framert_reg = VINO_FRAMERT_RT(mask) | VINO_FRAMERT_PAL; break; } vcs->fps = fps; } /* execute with input_lock locked */ static inline void vino_set_default_framerate(struct vino_channel_settings *vcs) { vino_set_framerate(vcs, vino_data_norms[vcs->data_norm].fps_max); } /* VINO I2C bus functions */ struct i2c_algo_sgi_data { void *data; /* private data for lowlevel routines */ unsigned (*getctrl)(void *data); void (*setctrl)(void *data, unsigned val); unsigned (*rdata)(void *data); void (*wdata)(void *data, unsigned val); int xfer_timeout; int ack_timeout; }; static int wait_xfer_done(struct i2c_algo_sgi_data *adap) { int i; for (i = 0; i < adap->xfer_timeout; i++) { if ((adap->getctrl(adap->data) & SGI_I2C_XFER_BUSY) == 0) return 0; udelay(1); } return -ETIMEDOUT; } static int wait_ack(struct i2c_algo_sgi_data *adap) { int i; if (wait_xfer_done(adap)) return -ETIMEDOUT; for (i = 0; i < adap->ack_timeout; i++) { if ((adap->getctrl(adap->data) & SGI_I2C_NACK) == 0) return 0; udelay(1); } return -ETIMEDOUT; } static int force_idle(struct i2c_algo_sgi_data *adap) { int i; adap->setctrl(adap->data, SGI_I2C_FORCE_IDLE); for (i = 0; i < adap->xfer_timeout; i++) { if ((adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE) == 0) goto out; udelay(1); } return -ETIMEDOUT; out: if (adap->getctrl(adap->data) & SGI_I2C_BUS_ERR) return -EIO; return 0; } static int do_address(struct i2c_algo_sgi_data *adap, unsigned int addr, int rd) { if (rd) adap->setctrl(adap->data, SGI_I2C_NOT_IDLE); /* Check if bus is idle, eventually force it to do so */ if (adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE) if (force_idle(adap)) return -EIO; /* Write out the i2c chip address and specify operation */ adap->setctrl(adap->data, SGI_I2C_HOLD_BUS | SGI_I2C_WRITE | SGI_I2C_NOT_IDLE); if (rd) addr |= 1; adap->wdata(adap->data, addr); if (wait_ack(adap)) return -EIO; return 0; } static int i2c_read(struct i2c_algo_sgi_data *adap, unsigned char *buf, unsigned int len) { int i; adap->setctrl(adap->data, SGI_I2C_HOLD_BUS | SGI_I2C_READ | SGI_I2C_NOT_IDLE); for (i = 0; i < len; i++) { if (wait_xfer_done(adap)) return -EIO; buf[i] = adap->rdata(adap->data); } adap->setctrl(adap->data, SGI_I2C_RELEASE_BUS | SGI_I2C_FORCE_IDLE); return 0; } static int i2c_write(struct i2c_algo_sgi_data *adap, unsigned char *buf, unsigned int len) { int i; /* We are already in write state */ for (i = 0; i < len; i++) { adap->wdata(adap->data, buf[i]); if (wait_ack(adap)) return -EIO; } return 0; } static int sgi_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_algo_sgi_data *adap = i2c_adap->algo_data; struct i2c_msg *p; int i, err = 0; for (i = 0; !err && i < num; i++) { p = &msgs[i]; err = do_address(adap, p->addr, p->flags & I2C_M_RD); if (err || !p->len) continue; if (p->flags & I2C_M_RD) err = i2c_read(adap, p->buf, p->len); else err = i2c_write(adap, p->buf, p->len); } return (err < 0) ? err : i; } static u32 sgi_func(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm sgi_algo = { .master_xfer = sgi_xfer, .functionality = sgi_func, }; static unsigned i2c_vino_getctrl(void *data) { return vino->i2c_control; } static void i2c_vino_setctrl(void *data, unsigned val) { vino->i2c_control = val; } static unsigned i2c_vino_rdata(void *data) { return vino->i2c_data; } static void i2c_vino_wdata(void *data, unsigned val) { vino->i2c_data = val; } static struct i2c_algo_sgi_data i2c_sgi_vino_data = { .getctrl = &i2c_vino_getctrl, .setctrl = &i2c_vino_setctrl, .rdata = &i2c_vino_rdata, .wdata = &i2c_vino_wdata, .xfer_timeout = 200, .ack_timeout = 1000, }; static struct i2c_adapter vino_i2c_adapter = { .name = "VINO I2C bus", .algo = &sgi_algo, .algo_data = &i2c_sgi_vino_data, .owner = THIS_MODULE, }; /* * Prepare VINO for DMA transfer... * (execute only with vino_lock and input_lock locked) */ static int vino_dma_setup(struct vino_channel_settings *vcs, struct vino_framebuffer *fb) { u32 ctrl, intr; struct sgi_vino_channel *ch; const struct vino_data_norm *norm; dprintk("vino_dma_setup():\n"); vcs->field = 0; fb->frame_counter = 0; ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b; norm = &vino_data_norms[vcs->data_norm]; ch->page_index = 0; ch->line_count = 0; /* VINO line size register is set 8 bytes less than actual */ ch->line_size = vcs->line_size - 8; /* let VINO know where to transfer data */ ch->start_desc_tbl = fb->desc_table.dma; ch->next_4_desc = fb->desc_table.dma; /* give vino time to fetch the first four descriptors, 5 usec * should be more than enough time */ udelay(VINO_DESC_FETCH_DELAY); dprintk("vino_dma_setup(): start desc = %08x, next 4 desc = %08x\n", ch->start_desc_tbl, ch->next_4_desc); /* set the alpha register */ ch->alpha = vcs->alpha; /* set clipping registers */ ch->clip_start = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.top / 2) | VINO_CLIP_EVEN(norm->even.top + vcs->clipping.top / 2) | VINO_CLIP_X(vcs->clipping.left); ch->clip_end = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.bottom / 2 - 1) | VINO_CLIP_EVEN(norm->even.top + vcs->clipping.bottom / 2 - 1) | VINO_CLIP_X(vcs->clipping.right); /* set the size of actual content in the buffer (DECIMATION !) */ fb->data_size = ((vcs->clipping.right - vcs->clipping.left) / vcs->decimation) * ((vcs->clipping.bottom - vcs->clipping.top) / vcs->decimation) * vino_data_formats[vcs->data_format].bpp; ch->frame_rate = vcs->framert_reg; ctrl = vino->control; intr = vino->intr_status; if (vcs->channel == VINO_CHANNEL_A) { /* All interrupt conditions for this channel was cleared * so clear the interrupt status register and enable * interrupts */ intr &= ~VINO_INTSTAT_A; ctrl |= VINO_CTRL_A_INT; /* enable synchronization */ ctrl |= VINO_CTRL_A_SYNC_ENBL; /* enable frame assembly */ ctrl |= VINO_CTRL_A_INTERLEAVE_ENBL; /* set decimation used */ if (vcs->decimation < 2) ctrl &= ~VINO_CTRL_A_DEC_ENBL; else { ctrl |= VINO_CTRL_A_DEC_ENBL; ctrl &= ~VINO_CTRL_A_DEC_SCALE_MASK; ctrl |= (vcs->decimation - 1) << VINO_CTRL_A_DEC_SCALE_SHIFT; } /* select input interface */ if (vcs->input == VINO_INPUT_D1) ctrl |= VINO_CTRL_A_SELECT; else ctrl &= ~VINO_CTRL_A_SELECT; /* palette */ ctrl &= ~(VINO_CTRL_A_LUMA_ONLY | VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER); } else { intr &= ~VINO_INTSTAT_B; ctrl |= VINO_CTRL_B_INT; ctrl |= VINO_CTRL_B_SYNC_ENBL; ctrl |= VINO_CTRL_B_INTERLEAVE_ENBL; if (vcs->decimation < 2) ctrl &= ~VINO_CTRL_B_DEC_ENBL; else { ctrl |= VINO_CTRL_B_DEC_ENBL; ctrl &= ~VINO_CTRL_B_DEC_SCALE_MASK; ctrl |= (vcs->decimation - 1) << VINO_CTRL_B_DEC_SCALE_SHIFT; } if (vcs->input == VINO_INPUT_D1) ctrl |= VINO_CTRL_B_SELECT; else ctrl &= ~VINO_CTRL_B_SELECT; ctrl &= ~(VINO_CTRL_B_LUMA_ONLY | VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER); } /* set palette */ fb->data_format = vcs->data_format; switch (vcs->data_format) { case VINO_DATA_FMT_GREY: ctrl |= (vcs->channel == VINO_CHANNEL_A) ? VINO_CTRL_A_LUMA_ONLY : VINO_CTRL_B_LUMA_ONLY; break; case VINO_DATA_FMT_RGB32: ctrl |= (vcs->channel == VINO_CHANNEL_A) ? VINO_CTRL_A_RGB : VINO_CTRL_B_RGB; break; case VINO_DATA_FMT_YUV: /* nothing needs to be done */ break; case VINO_DATA_FMT_RGB332: ctrl |= (vcs->channel == VINO_CHANNEL_A) ? VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER : VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER; break; } vino->intr_status = intr; vino->control = ctrl; return 0; } /* (execute only with vino_lock locked) */ static inline void vino_dma_start(struct vino_channel_settings *vcs) { u32 ctrl = vino->control; dprintk("vino_dma_start():\n"); ctrl |= (vcs->channel == VINO_CHANNEL_A) ? VINO_CTRL_A_DMA_ENBL : VINO_CTRL_B_DMA_ENBL; vino->control = ctrl; } /* (execute only with vino_lock locked) */ static inline void vino_dma_stop(struct vino_channel_settings *vcs) { u32 ctrl = vino->control; ctrl &= (vcs->channel == VINO_CHANNEL_A) ? ~VINO_CTRL_A_DMA_ENBL : ~VINO_CTRL_B_DMA_ENBL; ctrl &= (vcs->channel == VINO_CHANNEL_A) ? ~VINO_CTRL_A_INT : ~VINO_CTRL_B_INT; vino->control = ctrl; dprintk("vino_dma_stop():\n"); } /* * Load dummy page to descriptor registers. This prevents generating of * spurious interrupts. (execute only with vino_lock locked) */ static void vino_clear_interrupt(struct vino_channel_settings *vcs) { struct sgi_vino_channel *ch; ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b; ch->page_index = 0; ch->line_count = 0; ch->start_desc_tbl = vino_drvdata->dummy_desc_table.dma; ch->next_4_desc = vino_drvdata->dummy_desc_table.dma; udelay(VINO_DESC_FETCH_DELAY); dprintk("channel %c clear interrupt condition\n", (vcs->channel == VINO_CHANNEL_A) ? 'A':'B'); } static int vino_capture(struct vino_channel_settings *vcs, struct vino_framebuffer *fb) { int err = 0; unsigned long flags, flags2; spin_lock_irqsave(&fb->state_lock, flags); if (fb->state == VINO_FRAMEBUFFER_IN_USE) err = -EBUSY; fb->state = VINO_FRAMEBUFFER_IN_USE; spin_unlock_irqrestore(&fb->state_lock, flags); if (err) return err; spin_lock_irqsave(&vino_drvdata->vino_lock, flags); spin_lock_irqsave(&vino_drvdata->input_lock, flags2); vino_dma_setup(vcs, fb); vino_dma_start(vcs); spin_unlock_irqrestore(&vino_drvdata->input_lock, flags2); spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags); return err; } static struct vino_framebuffer *vino_capture_enqueue(struct vino_channel_settings *vcs, unsigned int index) { struct vino_framebuffer *fb; unsigned long flags; dprintk("vino_capture_enqueue():\n"); spin_lock_irqsave(&vcs->capture_lock, flags); fb = vino_queue_add(&vcs->fb_queue, index); if (fb == NULL) { dprintk("vino_capture_enqueue(): vino_queue_add() failed, " "queue full?\n"); goto out; } out: spin_unlock_irqrestore(&vcs->capture_lock, flags); return fb; } static int vino_capture_next(struct vino_channel_settings *vcs, int start) { struct vino_framebuffer *fb; unsigned int incoming, id; int err = 0; unsigned long flags; dprintk("vino_capture_next():\n"); spin_lock_irqsave(&vcs->capture_lock, flags); if (start) { /* start capture only if capture isn't in progress already */ if (vcs->capturing) { spin_unlock_irqrestore(&vcs->capture_lock, flags); return 0; } } else { /* capture next frame: * stop capture if capturing is not set */ if (!vcs->capturing) { spin_unlock_irqrestore(&vcs->capture_lock, flags); return 0; } } err = vino_queue_get_incoming(&vcs->fb_queue, &incoming); if (err) { dprintk("vino_capture_next(): vino_queue_get_incoming() " "failed\n"); err = -EINVAL; goto out; } if (incoming == 0) { dprintk("vino_capture_next(): no buffers available\n"); goto out; } fb = vino_queue_peek(&vcs->fb_queue, &id); if (fb == NULL) { dprintk("vino_capture_next(): vino_queue_peek() failed\n"); err = -EINVAL; goto out; } if (start) { vcs->capturing = 1; } spin_unlock_irqrestore(&vcs->capture_lock, flags); err = vino_capture(vcs, fb); return err; out: vcs->capturing = 0; spin_unlock_irqrestore(&vcs->capture_lock, flags); return err; } static inline int vino_is_capturing(struct vino_channel_settings *vcs) { int ret; unsigned long flags; spin_lock_irqsave(&vcs->capture_lock, flags); ret = vcs->capturing; spin_unlock_irqrestore(&vcs->capture_lock, flags); return ret; } /* waits until a frame is captured */ static int vino_wait_for_frame(struct vino_channel_settings *vcs) { wait_queue_t wait; int err = 0; dprintk("vino_wait_for_frame():\n"); init_waitqueue_entry(&wait, current); /* add ourselves into wait queue */ add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait); /* to ensure that schedule_timeout will return immediately * if VINO interrupt was triggered meanwhile */ schedule_timeout_interruptible(msecs_to_jiffies(100)); if (signal_pending(current)) err = -EINTR; remove_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait); dprintk("vino_wait_for_frame(): waiting for frame %s\n", err ? "failed" : "ok"); return err; } /* the function assumes that PAGE_SIZE % 4 == 0 */ static void vino_convert_to_rgba(struct vino_framebuffer *fb) { unsigned char *pageptr; unsigned int page, i; unsigned char a; for (page = 0; page < fb->desc_table.page_count; page++) { pageptr = (unsigned char *)fb->desc_table.virtual[page]; for (i = 0; i < PAGE_SIZE; i += 4) { a = pageptr[0]; pageptr[0] = pageptr[3]; pageptr[1] = pageptr[2]; pageptr[2] = pageptr[1]; pageptr[3] = a; pageptr += 4; } } } /* checks if the buffer is in correct state and syncs data */ static int vino_check_buffer(struct vino_channel_settings *vcs, struct vino_framebuffer *fb) { int err = 0; unsigned long flags; dprintk("vino_check_buffer():\n"); spin_lock_irqsave(&fb->state_lock, flags); switch (fb->state) { case VINO_FRAMEBUFFER_IN_USE: err = -EIO; break; case VINO_FRAMEBUFFER_READY: vino_sync_buffer(fb); fb->state = VINO_FRAMEBUFFER_UNUSED; break; default: err = -EINVAL; } spin_unlock_irqrestore(&fb->state_lock, flags); if (!err) { if (vino_pixel_conversion && (fb->data_format == VINO_DATA_FMT_RGB32)) { vino_convert_to_rgba(fb); } } else if (err && (err != -EINVAL)) { dprintk("vino_check_buffer(): buffer not ready\n"); spin_lock_irqsave(&vino_drvdata->vino_lock, flags); vino_dma_stop(vcs); vino_clear_interrupt(vcs); spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags); } return err; } /* forcefully terminates capture */ static void vino_capture_stop(struct vino_channel_settings *vcs) { unsigned int incoming = 0, outgoing = 0, id; unsigned long flags, flags2; dprintk("vino_capture_stop():\n"); spin_lock_irqsave(&vcs->capture_lock, flags); /* unset capturing to stop queue processing */ vcs->capturing = 0; spin_lock_irqsave(&vino_drvdata->vino_lock, flags2); vino_dma_stop(vcs); vino_clear_interrupt(vcs); spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags2); /* remove all items from the queue */ if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) { dprintk("vino_capture_stop(): " "vino_queue_get_incoming() failed\n"); goto out; } while (incoming > 0) { vino_queue_transfer(&vcs->fb_queue); if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) { dprintk("vino_capture_stop(): " "vino_queue_get_incoming() failed\n"); goto out; } } if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) { dprintk("vino_capture_stop(): " "vino_queue_get_outgoing() failed\n"); goto out; } while (outgoing > 0) { vino_queue_remove(&vcs->fb_queue, &id); if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) { dprintk("vino_capture_stop(): " "vino_queue_get_outgoing() failed\n"); goto out; } } out: spin_unlock_irqrestore(&vcs->capture_lock, flags); } #if 0 static int vino_capture_failed(struct vino_channel_settings *vcs) { struct vino_framebuffer *fb; unsigned long flags; unsigned int i; int ret; dprintk("vino_capture_failed():\n"); spin_lock_irqsave(&vino_drvdata->vino_lock, flags); vino_dma_stop(vcs); vino_clear_interrupt(vcs); spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags); ret = vino_queue_get_incoming(&vcs->fb_queue, &i); if (ret == VINO_QUEUE_ERROR) { dprintk("vino_queue_get_incoming() failed\n"); return -EINVAL; } if (i == 0) { /* no buffers to process */ return 0; } fb = vino_queue_peek(&vcs->fb_queue, &i); if (fb == NULL) { dprintk("vino_queue_peek() failed\n"); return -EINVAL; } spin_lock_irqsave(&fb->state_lock, flags); if (fb->state == VINO_FRAMEBUFFER_IN_USE) { fb->state = VINO_FRAMEBUFFER_UNUSED; vino_queue_transfer(&vcs->fb_queue); vino_queue_remove(&vcs->fb_queue, &i); /* we should actually discard the newest frame, * but who cares ... */ } spin_unlock_irqrestore(&fb->state_lock, flags); return 0; } #endif static void vino_skip_frame(struct vino_channel_settings *vcs) { struct vino_framebuffer *fb; unsigned long flags; unsigned int id; spin_lock_irqsave(&vcs->capture_lock, flags); fb = vino_queue_peek(&vcs->fb_queue, &id); if (!fb) { spin_unlock_irqrestore(&vcs->capture_lock, flags); dprintk("vino_skip_frame(): vino_queue_peek() failed!\n"); return; } spin_unlock_irqrestore(&vcs->capture_lock, flags); spin_lock_irqsave(&fb->state_lock, flags); fb->state = VINO_FRAMEBUFFER_UNUSED; spin_unlock_irqrestore(&fb->state_lock, flags); vino_capture_next(vcs, 0); } static void vino_frame_done(struct vino_channel_settings *vcs) { struct vino_framebuffer *fb; unsigned long flags; spin_lock_irqsave(&vcs->capture_lock, flags); fb = vino_queue_transfer(&vcs->fb_queue); if (!fb) { spin_unlock_irqrestore(&vcs->capture_lock, flags); dprintk("vino_frame_done(): vino_queue_transfer() failed!\n"); return; } spin_unlock_irqrestore(&vcs->capture_lock, flags); fb->frame_counter = vcs->int_data.frame_counter; memcpy(&fb->timestamp, &vcs->int_data.timestamp, sizeof(struct timeval)); spin_lock_irqsave(&fb->state_lock, flags); if (fb->state == VINO_FRAMEBUFFER_IN_USE) fb->state = VINO_FRAMEBUFFER_READY; spin_unlock_irqrestore(&fb->state_lock, flags); wake_up(&vcs->fb_queue.frame_wait_queue); vino_capture_next(vcs, 0); } static void vino_capture_tasklet(unsigned long channel) { struct vino_channel_settings *vcs; vcs = (channel == VINO_CHANNEL_A) ? &vino_drvdata->a : &vino_drvdata->b; if (vcs->int_data.skip) vcs->int_data.skip_count++; if (vcs->int_data.skip && (vcs->int_data.skip_count <= VINO_MAX_FRAME_SKIP_COUNT)) { vino_skip_frame(vcs); } else { vcs->int_data.skip_count = 0; vino_frame_done(vcs); } } static irqreturn_t vino_interrupt(int irq, void *dev_id) { u32 ctrl, intr; unsigned int fc_a, fc_b; int handled_a = 0, skip_a = 0, done_a = 0; int handled_b = 0, skip_b = 0, done_b = 0; #ifdef VINO_DEBUG_INT int loop = 0; unsigned int line_count = vino->a.line_count, page_index = vino->a.page_index, field_counter = vino->a.field_counter, start_desc_tbl = vino->a.start_desc_tbl, next_4_desc = vino->a.next_4_desc; unsigned int line_count_2, page_index_2, field_counter_2, start_desc_tbl_2, next_4_desc_2; #endif spin_lock(&vino_drvdata->vino_lock); while ((intr = vino->intr_status)) { fc_a = vino->a.field_counter >> 1; fc_b = vino->b.field_counter >> 1; /* handle error-interrupts in some special way ? * --> skips frames */ if (intr & VINO_INTSTAT_A) { if (intr & VINO_INTSTAT_A_EOF) { vino_drvdata->a.field++; if (vino_drvdata->a.field > 1) { vino_dma_stop(&vino_drvdata->a); vino_clear_interrupt(&vino_drvdata->a); vino_drvdata->a.field = 0; done_a = 1; } else { if (vino->a.page_index != vino_drvdata->a.line_size) { vino->a.line_count = 0; vino->a.page_index = vino_drvdata-> a.line_size; vino->a.next_4_desc = vino->a.start_desc_tbl; } } dprintk("channel A end-of-field " "interrupt: %04x\n", intr); } else { vino_dma_stop(&vino_drvdata->a); vino_clear_interrupt(&vino_drvdata->a); vino_drvdata->a.field = 0; skip_a = 1; dprintk("channel A error interrupt: %04x\n", intr); } #ifdef VINO_DEBUG_INT line_count_2 = vino->a.line_count; page_index_2 = vino->a.page_index; field_counter_2 = vino->a.field_counter; start_desc_tbl_2 = vino->a.start_desc_tbl; next_4_desc_2 = vino->a.next_4_desc; printk("intr = %04x, loop = %d, field = %d\n", intr, loop, vino_drvdata->a.field); printk("1- line count = %04d, page index = %04d, " "start = %08x, next = %08x\n" " fieldc = %d, framec = %d\n", line_count, page_index, start_desc_tbl, next_4_desc, field_counter, fc_a); printk("12-line count = %04d, page index = %04d, " " start = %08x, next = %08x\n", line_count_2, page_index_2, start_desc_tbl_2, next_4_desc_2); if (done_a) printk("\n"); #endif } if (intr & VINO_INTSTAT_B) { if (intr & VINO_INTSTAT_B_EOF) { vino_drvdata->b.field++; if (vino_drvdata->b.field > 1) { vino_dma_stop(&vino_drvdata->b); vino_clear_interrupt(&vino_drvdata->b); vino_drvdata->b.field = 0; done_b = 1; } dprintk("channel B end-of-field " "interrupt: %04x\n", intr); } else { vino_dma_stop(&vino_drvdata->b); vino_clear_interrupt(&vino_drvdata->b); vino_drvdata->b.field = 0; skip_b = 1; dprintk("channel B error interrupt: %04x\n", intr); } } /* Always remember to clear interrupt status. * Disable VINO interrupts while we do this. */ ctrl = vino->control; vino->control = ctrl & ~(VINO_CTRL_A_INT | VINO_CTRL_B_INT); vino->intr_status = ~intr; vino->control = ctrl; spin_unlock(&vino_drvdata->vino_lock); if ((!handled_a) && (done_a || skip_a)) { if (!skip_a) { do_gettimeofday(&vino_drvdata-> a.int_data.timestamp); vino_drvdata->a.int_data.frame_counter = fc_a; } vino_drvdata->a.int_data.skip = skip_a; dprintk("channel A %s, interrupt: %d\n", skip_a ? "skipping frame" : "frame done", intr); tasklet_hi_schedule(&vino_tasklet_a); handled_a = 1; } if ((!handled_b) && (done_b || skip_b)) { if (!skip_b) { do_gettimeofday(&vino_drvdata-> b.int_data.timestamp); vino_drvdata->b.int_data.frame_counter = fc_b; } vino_drvdata->b.int_data.skip = skip_b; dprintk("channel B %s, interrupt: %d\n", skip_b ? "skipping frame" : "frame done", intr); tasklet_hi_schedule(&vino_tasklet_b); handled_b = 1; } #ifdef VINO_DEBUG_INT loop++; #endif spin_lock(&vino_drvdata->vino_lock); } spin_unlock(&vino_drvdata->vino_lock); return IRQ_HANDLED; } /* VINO video input management */ static int vino_get_saa7191_input(int input) { switch (input) { case VINO_INPUT_COMPOSITE: return SAA7191_INPUT_COMPOSITE; case VINO_INPUT_SVIDEO: return SAA7191_INPUT_SVIDEO; default: printk(KERN_ERR "VINO: vino_get_saa7191_input(): " "invalid input!\n"); return -1; } } /* execute with input_lock locked */ static int vino_is_input_owner(struct vino_channel_settings *vcs) { switch(vcs->input) { case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: return vino_drvdata->decoder_owner == vcs->channel; case VINO_INPUT_D1: return vino_drvdata->camera_owner == vcs->channel; default: return 0; } } static int vino_acquire_input(struct vino_channel_settings *vcs) { unsigned long flags; int ret = 0; dprintk("vino_acquire_input():\n"); spin_lock_irqsave(&vino_drvdata->input_lock, flags); /* First try D1 and then SAA7191 */ if (vino_drvdata->camera && (vino_drvdata->camera_owner == VINO_NO_CHANNEL)) { vino_drvdata->camera_owner = vcs->channel; vcs->input = VINO_INPUT_D1; vcs->data_norm = VINO_DATA_NORM_D1; } else if (vino_drvdata->decoder && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) { int input; int data_norm; v4l2_std_id norm; input = VINO_INPUT_COMPOSITE; ret = decoder_call(video, s_routing, vino_get_saa7191_input(input), 0, 0); if (ret) { ret = -EINVAL; goto out; } spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); /* Don't hold spinlocks while auto-detecting norm * as it may take a while... */ ret = decoder_call(video, querystd, &norm); if (!ret) { for (data_norm = 0; data_norm < 3; data_norm++) { if (vino_data_norms[data_norm].std & norm) break; } if (data_norm == 3) data_norm = VINO_DATA_NORM_PAL; ret = decoder_call(core, s_std, norm); } spin_lock_irqsave(&vino_drvdata->input_lock, flags); if (ret) { ret = -EINVAL; goto out; } vino_drvdata->decoder_owner = vcs->channel; vcs->input = input; vcs->data_norm = data_norm; } else { vcs->input = (vcs->channel == VINO_CHANNEL_A) ? vino_drvdata->b.input : vino_drvdata->a.input; vcs->data_norm = (vcs->channel == VINO_CHANNEL_A) ? vino_drvdata->b.data_norm : vino_drvdata->a.data_norm; } if (vcs->input == VINO_INPUT_NONE) { ret = -ENODEV; goto out; } vino_set_default_clipping(vcs); vino_set_default_scaling(vcs); vino_set_default_framerate(vcs); dprintk("vino_acquire_input(): %s\n", vino_inputs[vcs->input].name); out: spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return ret; } static int vino_set_input(struct vino_channel_settings *vcs, int input) { struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ? &vino_drvdata->b : &vino_drvdata->a; unsigned long flags; int ret = 0; dprintk("vino_set_input():\n"); spin_lock_irqsave(&vino_drvdata->input_lock, flags); if (vcs->input == input) goto out; switch (input) { case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: if (!vino_drvdata->decoder) { ret = -EINVAL; goto out; } if (vino_drvdata->decoder_owner == VINO_NO_CHANNEL) { vino_drvdata->decoder_owner = vcs->channel; } if (vino_drvdata->decoder_owner == vcs->channel) { int data_norm; v4l2_std_id norm; ret = decoder_call(video, s_routing, vino_get_saa7191_input(input), 0, 0); if (ret) { vino_drvdata->decoder_owner = VINO_NO_CHANNEL; ret = -EINVAL; goto out; } spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); /* Don't hold spinlocks while auto-detecting norm * as it may take a while... */ ret = decoder_call(video, querystd, &norm); if (!ret) { for (data_norm = 0; data_norm < 3; data_norm++) { if (vino_data_norms[data_norm].std & norm) break; } if (data_norm == 3) data_norm = VINO_DATA_NORM_PAL; ret = decoder_call(core, s_std, norm); } spin_lock_irqsave(&vino_drvdata->input_lock, flags); if (ret) { vino_drvdata->decoder_owner = VINO_NO_CHANNEL; ret = -EINVAL; goto out; } vcs->input = input; vcs->data_norm = data_norm; } else { if (input != vcs2->input) { ret = -EBUSY; goto out; } vcs->input = input; vcs->data_norm = vcs2->data_norm; } if (vino_drvdata->camera_owner == vcs->channel) { /* Transfer the ownership or release the input */ if (vcs2->input == VINO_INPUT_D1) { vino_drvdata->camera_owner = vcs2->channel; } else { vino_drvdata->camera_owner = VINO_NO_CHANNEL; } } break; case VINO_INPUT_D1: if (!vino_drvdata->camera) { ret = -EINVAL; goto out; } if (vino_drvdata->camera_owner == VINO_NO_CHANNEL) vino_drvdata->camera_owner = vcs->channel; if (vino_drvdata->decoder_owner == vcs->channel) { /* Transfer the ownership or release the input */ if ((vcs2->input == VINO_INPUT_COMPOSITE) || (vcs2->input == VINO_INPUT_SVIDEO)) { vino_drvdata->decoder_owner = vcs2->channel; } else { vino_drvdata->decoder_owner = VINO_NO_CHANNEL; } } vcs->input = input; vcs->data_norm = VINO_DATA_NORM_D1; break; default: ret = -EINVAL; goto out; } vino_set_default_clipping(vcs); vino_set_default_scaling(vcs); vino_set_default_framerate(vcs); dprintk("vino_set_input(): %s\n", vino_inputs[vcs->input].name); out: spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return ret; } static void vino_release_input(struct vino_channel_settings *vcs) { struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ? &vino_drvdata->b : &vino_drvdata->a; unsigned long flags; dprintk("vino_release_input():\n"); spin_lock_irqsave(&vino_drvdata->input_lock, flags); /* Release ownership of the channel * and if the other channel takes input from * the same source, transfer the ownership */ if (vino_drvdata->camera_owner == vcs->channel) { if (vcs2->input == VINO_INPUT_D1) { vino_drvdata->camera_owner = vcs2->channel; } else { vino_drvdata->camera_owner = VINO_NO_CHANNEL; } } else if (vino_drvdata->decoder_owner == vcs->channel) { if ((vcs2->input == VINO_INPUT_COMPOSITE) || (vcs2->input == VINO_INPUT_SVIDEO)) { vino_drvdata->decoder_owner = vcs2->channel; } else { vino_drvdata->decoder_owner = VINO_NO_CHANNEL; } } vcs->input = VINO_INPUT_NONE; spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); } /* execute with input_lock locked */ static int vino_set_data_norm(struct vino_channel_settings *vcs, unsigned int data_norm, unsigned long *flags) { int err = 0; if (data_norm == vcs->data_norm) return 0; switch (vcs->input) { case VINO_INPUT_D1: /* only one "norm" supported */ if (data_norm != VINO_DATA_NORM_D1) return -EINVAL; break; case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: { v4l2_std_id norm; if ((data_norm != VINO_DATA_NORM_PAL) && (data_norm != VINO_DATA_NORM_NTSC) && (data_norm != VINO_DATA_NORM_SECAM)) return -EINVAL; spin_unlock_irqrestore(&vino_drvdata->input_lock, *flags); /* Don't hold spinlocks while setting norm * as it may take a while... */ norm = vino_data_norms[data_norm].std; err = decoder_call(core, s_std, norm); spin_lock_irqsave(&vino_drvdata->input_lock, *flags); if (err) goto out; vcs->data_norm = data_norm; vino_set_default_clipping(vcs); vino_set_default_scaling(vcs); vino_set_default_framerate(vcs); break; } default: return -EINVAL; } out: return err; } /* V4L2 helper functions */ static int vino_find_data_format(__u32 pixelformat) { int i; for (i = 0; i < VINO_DATA_FMT_COUNT; i++) { if (vino_data_formats[i].pixelformat == pixelformat) return i; } return VINO_DATA_FMT_NONE; } static int vino_int_enum_input(struct vino_channel_settings *vcs, __u32 index) { int input = VINO_INPUT_NONE; unsigned long flags; spin_lock_irqsave(&vino_drvdata->input_lock, flags); if (vino_drvdata->decoder && vino_drvdata->camera) { switch (index) { case 0: input = VINO_INPUT_COMPOSITE; break; case 1: input = VINO_INPUT_SVIDEO; break; case 2: input = VINO_INPUT_D1; break; } } else if (vino_drvdata->decoder) { switch (index) { case 0: input = VINO_INPUT_COMPOSITE; break; case 1: input = VINO_INPUT_SVIDEO; break; } } else if (vino_drvdata->camera) { switch (index) { case 0: input = VINO_INPUT_D1; break; } } spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return input; } /* execute with input_lock locked */ static __u32 vino_find_input_index(struct vino_channel_settings *vcs) { __u32 index = 0; // FIXME: detect when no inputs available if (vino_drvdata->decoder && vino_drvdata->camera) { switch (vcs->input) { case VINO_INPUT_COMPOSITE: index = 0; break; case VINO_INPUT_SVIDEO: index = 1; break; case VINO_INPUT_D1: index = 2; break; } } else if (vino_drvdata->decoder) { switch (vcs->input) { case VINO_INPUT_COMPOSITE: index = 0; break; case VINO_INPUT_SVIDEO: index = 1; break; } } else if (vino_drvdata->camera) { switch (vcs->input) { case VINO_INPUT_D1: index = 0; break; } } return index; } /* V4L2 ioctls */ static int vino_querycap(struct file *file, void *__fh, struct v4l2_capability *cap) { memset(cap, 0, sizeof(struct v4l2_capability)); strcpy(cap->driver, vino_driver_name); strcpy(cap->card, vino_driver_description); strcpy(cap->bus_info, vino_bus_name); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; // V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE return 0; } static int vino_enum_input(struct file *file, void *__fh, struct v4l2_input *i) { struct vino_channel_settings *vcs = video_drvdata(file); __u32 index = i->index; int input; dprintk("requested index = %d\n", index); input = vino_int_enum_input(vcs, index); if (input == VINO_INPUT_NONE) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; i->std = vino_inputs[input].std; strcpy(i->name, vino_inputs[input].name); if (input == VINO_INPUT_COMPOSITE || input == VINO_INPUT_SVIDEO) decoder_call(video, g_input_status, &i->status); return 0; } static int vino_g_input(struct file *file, void *__fh, unsigned int *i) { struct vino_channel_settings *vcs = video_drvdata(file); __u32 index; int input; unsigned long flags; spin_lock_irqsave(&vino_drvdata->input_lock, flags); input = vcs->input; index = vino_find_input_index(vcs); spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); dprintk("input = %d\n", input); if (input == VINO_INPUT_NONE) { return -EINVAL; } *i = index; return 0; } static int vino_s_input(struct file *file, void *__fh, unsigned int i) { struct vino_channel_settings *vcs = video_drvdata(file); int input; dprintk("requested input = %d\n", i); input = vino_int_enum_input(vcs, i); if (input == VINO_INPUT_NONE) return -EINVAL; return vino_set_input(vcs, input); } static int vino_querystd(struct file *file, void *__fh, v4l2_std_id *std) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; int err = 0; spin_lock_irqsave(&vino_drvdata->input_lock, flags); switch (vcs->input) { case VINO_INPUT_D1: *std = vino_inputs[vcs->input].std; break; case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: { decoder_call(video, querystd, std); break; } default: err = -EINVAL; } spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return err; } static int vino_g_std(struct file *file, void *__fh, v4l2_std_id *std) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; spin_lock_irqsave(&vino_drvdata->input_lock, flags); *std = vino_data_norms[vcs->data_norm].std; dprintk("current standard = %d\n", vcs->data_norm); spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return 0; } static int vino_s_std(struct file *file, void *__fh, v4l2_std_id *std) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; int ret = 0; spin_lock_irqsave(&vino_drvdata->input_lock, flags); if (!vino_is_input_owner(vcs)) { ret = -EBUSY; goto out; } /* check if the standard is valid for the current input */ if ((*std) & vino_inputs[vcs->input].std) { dprintk("standard accepted\n"); /* change the video norm for SAA7191 * and accept NTSC for D1 (do nothing) */ if (vcs->input == VINO_INPUT_D1) goto out; if ((*std) & V4L2_STD_PAL) { ret = vino_set_data_norm(vcs, VINO_DATA_NORM_PAL, &flags); } else if ((*std) & V4L2_STD_NTSC) { ret = vino_set_data_norm(vcs, VINO_DATA_NORM_NTSC, &flags); } else if ((*std) & V4L2_STD_SECAM) { ret = vino_set_data_norm(vcs, VINO_DATA_NORM_SECAM, &flags); } else { ret = -EINVAL; } if (ret) { ret = -EINVAL; } } else { ret = -EINVAL; } out: spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return ret; } static int vino_enum_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_fmtdesc *fd) { dprintk("format index = %d\n", fd->index); if (fd->index >= VINO_DATA_FMT_COUNT) return -EINVAL; dprintk("format name = %s\n", vino_data_formats[fd->index].description); fd->pixelformat = vino_data_formats[fd->index].pixelformat; strcpy(fd->description, vino_data_formats[fd->index].description); return 0; } static int vino_try_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *f) { struct vino_channel_settings *vcs = video_drvdata(file); struct vino_channel_settings tempvcs; unsigned long flags; struct v4l2_pix_format *pf = &f->fmt.pix; dprintk("requested: w = %d, h = %d\n", pf->width, pf->height); spin_lock_irqsave(&vino_drvdata->input_lock, flags); memcpy(&tempvcs, vcs, sizeof(struct vino_channel_settings)); spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); tempvcs.data_format = vino_find_data_format(pf->pixelformat); if (tempvcs.data_format == VINO_DATA_FMT_NONE) { tempvcs.data_format = VINO_DATA_FMT_GREY; pf->pixelformat = vino_data_formats[tempvcs.data_format]. pixelformat; } /* data format must be set before clipping/scaling */ vino_set_scaling(&tempvcs, pf->width, pf->height); dprintk("data format = %s\n", vino_data_formats[tempvcs.data_format].description); pf->width = (tempvcs.clipping.right - tempvcs.clipping.left) / tempvcs.decimation; pf->height = (tempvcs.clipping.bottom - tempvcs.clipping.top) / tempvcs.decimation; pf->field = V4L2_FIELD_INTERLACED; pf->bytesperline = tempvcs.line_size; pf->sizeimage = tempvcs.line_size * (tempvcs.clipping.bottom - tempvcs.clipping.top) / tempvcs.decimation; pf->colorspace = vino_data_formats[tempvcs.data_format].colorspace; pf->priv = 0; return 0; } static int vino_g_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *f) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; struct v4l2_pix_format *pf = &f->fmt.pix; spin_lock_irqsave(&vino_drvdata->input_lock, flags); pf->width = (vcs->clipping.right - vcs->clipping.left) / vcs->decimation; pf->height = (vcs->clipping.bottom - vcs->clipping.top) / vcs->decimation; pf->pixelformat = vino_data_formats[vcs->data_format].pixelformat; pf->field = V4L2_FIELD_INTERLACED; pf->bytesperline = vcs->line_size; pf->sizeimage = vcs->line_size * (vcs->clipping.bottom - vcs->clipping.top) / vcs->decimation; pf->colorspace = vino_data_formats[vcs->data_format].colorspace; pf->priv = 0; spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return 0; } static int vino_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *f) { struct vino_channel_settings *vcs = video_drvdata(file); int data_format; unsigned long flags; struct v4l2_pix_format *pf = &f->fmt.pix; spin_lock_irqsave(&vino_drvdata->input_lock, flags); data_format = vino_find_data_format(pf->pixelformat); if (data_format == VINO_DATA_FMT_NONE) { vcs->data_format = VINO_DATA_FMT_GREY; pf->pixelformat = vino_data_formats[vcs->data_format]. pixelformat; } else { vcs->data_format = data_format; } /* data format must be set before clipping/scaling */ vino_set_scaling(vcs, pf->width, pf->height); dprintk("data format = %s\n", vino_data_formats[vcs->data_format].description); pf->width = vcs->clipping.right - vcs->clipping.left; pf->height = vcs->clipping.bottom - vcs->clipping.top; pf->field = V4L2_FIELD_INTERLACED; pf->bytesperline = vcs->line_size; pf->sizeimage = vcs->line_size * (vcs->clipping.bottom - vcs->clipping.top) / vcs->decimation; pf->colorspace = vino_data_formats[vcs->data_format].colorspace; pf->priv = 0; spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return 0; } static int vino_cropcap(struct file *file, void *__fh, struct v4l2_cropcap *ccap) { struct vino_channel_settings *vcs = video_drvdata(file); const struct vino_data_norm *norm; unsigned long flags; switch (ccap->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: spin_lock_irqsave(&vino_drvdata->input_lock, flags); norm = &vino_data_norms[vcs->data_norm]; spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); ccap->bounds.left = 0; ccap->bounds.top = 0; ccap->bounds.width = norm->width; ccap->bounds.height = norm->height; memcpy(&ccap->defrect, &ccap->bounds, sizeof(struct v4l2_rect)); ccap->pixelaspect.numerator = 1; ccap->pixelaspect.denominator = 1; break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: default: return -EINVAL; } return 0; } static int vino_g_crop(struct file *file, void *__fh, struct v4l2_crop *c) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; switch (c->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: spin_lock_irqsave(&vino_drvdata->input_lock, flags); c->c.left = vcs->clipping.left; c->c.top = vcs->clipping.top; c->c.width = vcs->clipping.right - vcs->clipping.left; c->c.height = vcs->clipping.bottom - vcs->clipping.top; spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: default: return -EINVAL; } return 0; } static int vino_s_crop(struct file *file, void *__fh, struct v4l2_crop *c) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; switch (c->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: spin_lock_irqsave(&vino_drvdata->input_lock, flags); vino_set_clipping(vcs, c->c.left, c->c.top, c->c.width, c->c.height); spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: default: return -EINVAL; } return 0; } static int vino_g_parm(struct file *file, void *__fh, struct v4l2_streamparm *sp) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; struct v4l2_captureparm *cp = &sp->parm.capture; cp->capability = V4L2_CAP_TIMEPERFRAME; cp->timeperframe.numerator = 1; spin_lock_irqsave(&vino_drvdata->input_lock, flags); cp->timeperframe.denominator = vcs->fps; spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); /* TODO: cp->readbuffers = xxx; */ return 0; } static int vino_s_parm(struct file *file, void *__fh, struct v4l2_streamparm *sp) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; struct v4l2_captureparm *cp = &sp->parm.capture; spin_lock_irqsave(&vino_drvdata->input_lock, flags); if ((cp->timeperframe.numerator == 0) || (cp->timeperframe.denominator == 0)) { /* reset framerate */ vino_set_default_framerate(vcs); } else { vino_set_framerate(vcs, cp->timeperframe.denominator / cp->timeperframe.numerator); } spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return 0; } static int vino_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffers *rb) { struct vino_channel_settings *vcs = video_drvdata(file); if (vcs->reading) return -EBUSY; /* TODO: check queue type */ if (rb->memory != V4L2_MEMORY_MMAP) { dprintk("type not mmap\n"); return -EINVAL; } dprintk("count = %d\n", rb->count); if (rb->count > 0) { if (vino_is_capturing(vcs)) { dprintk("busy, capturing\n"); return -EBUSY; } if (vino_queue_has_mapped_buffers(&vcs->fb_queue)) { dprintk("busy, buffers still mapped\n"); return -EBUSY; } else { vcs->streaming = 0; vino_queue_free(&vcs->fb_queue); vino_queue_init(&vcs->fb_queue, &rb->count); } } else { vcs->streaming = 0; vino_capture_stop(vcs); vino_queue_free(&vcs->fb_queue); } return 0; } static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs, struct vino_framebuffer *fb, struct v4l2_buffer *b) { if (vino_queue_outgoing_contains(&vcs->fb_queue, fb->id)) { b->flags &= ~V4L2_BUF_FLAG_QUEUED; b->flags |= V4L2_BUF_FLAG_DONE; } else if (vino_queue_incoming_contains(&vcs->fb_queue, fb->id)) { b->flags &= ~V4L2_BUF_FLAG_DONE; b->flags |= V4L2_BUF_FLAG_QUEUED; } else { b->flags &= ~(V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_QUEUED); } b->flags &= ~(V4L2_BUF_FLAG_TIMECODE); if (fb->map_count > 0) b->flags |= V4L2_BUF_FLAG_MAPPED; b->index = fb->id; b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ? V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR; b->m.offset = fb->offset; b->bytesused = fb->data_size; b->length = fb->size; b->field = V4L2_FIELD_INTERLACED; b->sequence = fb->frame_counter; memcpy(&b->timestamp, &fb->timestamp, sizeof(struct timeval)); // b->input ? dprintk("buffer %d: length = %d, bytesused = %d, offset = %d\n", fb->id, fb->size, fb->data_size, fb->offset); } static int vino_querybuf(struct file *file, void *__fh, struct v4l2_buffer *b) { struct vino_channel_settings *vcs = video_drvdata(file); struct vino_framebuffer *fb; if (vcs->reading) return -EBUSY; /* TODO: check queue type */ if (b->index >= vino_queue_get_length(&vcs->fb_queue)) { dprintk("invalid index = %d\n", b->index); return -EINVAL; } fb = vino_queue_get_buffer(&vcs->fb_queue, b->index); if (fb == NULL) { dprintk("vino_queue_get_buffer() failed"); return -EINVAL; } vino_v4l2_get_buffer_status(vcs, fb, b); return 0; } static int vino_qbuf(struct file *file, void *__fh, struct v4l2_buffer *b) { struct vino_channel_settings *vcs = video_drvdata(file); struct vino_framebuffer *fb; int ret; if (vcs->reading) return -EBUSY; /* TODO: check queue type */ if (b->memory != V4L2_MEMORY_MMAP) { dprintk("type not mmap\n"); return -EINVAL; } fb = vino_capture_enqueue(vcs, b->index); if (fb == NULL) return -EINVAL; vino_v4l2_get_buffer_status(vcs, fb, b); if (vcs->streaming) { ret = vino_capture_next(vcs, 1); if (ret) return ret; } return 0; } static int vino_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *b) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned int nonblocking = file->f_flags & O_NONBLOCK; struct vino_framebuffer *fb; unsigned int incoming, outgoing; int err; if (vcs->reading) return -EBUSY; /* TODO: check queue type */ err = vino_queue_get_incoming(&vcs->fb_queue, &incoming); if (err) { dprintk("vino_queue_get_incoming() failed\n"); return -EINVAL; } err = vino_queue_get_outgoing(&vcs->fb_queue, &outgoing); if (err) { dprintk("vino_queue_get_outgoing() failed\n"); return -EINVAL; } dprintk("incoming = %d, outgoing = %d\n", incoming, outgoing); if (outgoing == 0) { if (incoming == 0) { dprintk("no incoming or outgoing buffers\n"); return -EINVAL; } if (nonblocking) { dprintk("non-blocking I/O was selected and " "there are no buffers to dequeue\n"); return -EAGAIN; } err = vino_wait_for_frame(vcs); if (err) { err = vino_wait_for_frame(vcs); if (err) { /* interrupted or no frames captured because of * frame skipping */ /* vino_capture_failed(vcs); */ return -EIO; } } } fb = vino_queue_remove(&vcs->fb_queue, &b->index); if (fb == NULL) { dprintk("vino_queue_remove() failed\n"); return -EINVAL; } err = vino_check_buffer(vcs, fb); vino_v4l2_get_buffer_status(vcs, fb, b); if (err) return -EIO; return 0; } static int vino_streamon(struct file *file, void *__fh, enum v4l2_buf_type i) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned int incoming; int ret; if (vcs->reading) return -EBUSY; if (vcs->streaming) return 0; // TODO: check queue type if (vino_queue_get_length(&vcs->fb_queue) < 1) { dprintk("no buffers allocated\n"); return -EINVAL; } ret = vino_queue_get_incoming(&vcs->fb_queue, &incoming); if (ret) { dprintk("vino_queue_get_incoming() failed\n"); return -EINVAL; } vcs->streaming = 1; if (incoming > 0) { ret = vino_capture_next(vcs, 1); if (ret) { vcs->streaming = 0; dprintk("couldn't start capture\n"); return -EINVAL; } } return 0; } static int vino_streamoff(struct file *file, void *__fh, enum v4l2_buf_type i) { struct vino_channel_settings *vcs = video_drvdata(file); if (vcs->reading) return -EBUSY; if (!vcs->streaming) return 0; vcs->streaming = 0; vino_capture_stop(vcs); return 0; } static int vino_queryctrl(struct file *file, void *__fh, struct v4l2_queryctrl *queryctrl) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; int i; int err = 0; spin_lock_irqsave(&vino_drvdata->input_lock, flags); switch (vcs->input) { case VINO_INPUT_D1: for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) { if (vino_indycam_v4l2_controls[i].id == queryctrl->id) { memcpy(queryctrl, &vino_indycam_v4l2_controls[i], sizeof(struct v4l2_queryctrl)); queryctrl->reserved[0] = 0; goto found; } } err = -EINVAL; break; case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) { if (vino_saa7191_v4l2_controls[i].id == queryctrl->id) { memcpy(queryctrl, &vino_saa7191_v4l2_controls[i], sizeof(struct v4l2_queryctrl)); queryctrl->reserved[0] = 0; goto found; } } err = -EINVAL; break; default: err = -EINVAL; } found: spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return err; } static int vino_g_ctrl(struct file *file, void *__fh, struct v4l2_control *control) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; int i; int err = 0; spin_lock_irqsave(&vino_drvdata->input_lock, flags); switch (vcs->input) { case VINO_INPUT_D1: { err = -EINVAL; for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) { if (vino_indycam_v4l2_controls[i].id == control->id) { err = 0; break; } } if (err) goto out; err = camera_call(core, g_ctrl, control); if (err) err = -EINVAL; break; } case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: { err = -EINVAL; for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) { if (vino_saa7191_v4l2_controls[i].id == control->id) { err = 0; break; } } if (err) goto out; err = decoder_call(core, g_ctrl, control); if (err) err = -EINVAL; break; } default: err = -EINVAL; } out: spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return err; } static int vino_s_ctrl(struct file *file, void *__fh, struct v4l2_control *control) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long flags; int i; int err = 0; spin_lock_irqsave(&vino_drvdata->input_lock, flags); if (!vino_is_input_owner(vcs)) { err = -EBUSY; goto out; } switch (vcs->input) { case VINO_INPUT_D1: { err = -EINVAL; for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) { if (vino_indycam_v4l2_controls[i].id == control->id) { err = 0; break; } } if (err) goto out; if (control->value < vino_indycam_v4l2_controls[i].minimum || control->value > vino_indycam_v4l2_controls[i].maximum) { err = -ERANGE; goto out; } err = camera_call(core, s_ctrl, control); if (err) err = -EINVAL; break; } case VINO_INPUT_COMPOSITE: case VINO_INPUT_SVIDEO: { err = -EINVAL; for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) { if (vino_saa7191_v4l2_controls[i].id == control->id) { err = 0; break; } } if (err) goto out; if (control->value < vino_saa7191_v4l2_controls[i].minimum || control->value > vino_saa7191_v4l2_controls[i].maximum) { err = -ERANGE; goto out; } err = decoder_call(core, s_ctrl, control); if (err) err = -EINVAL; break; } default: err = -EINVAL; } out: spin_unlock_irqrestore(&vino_drvdata->input_lock, flags); return err; } /* File operations */ static int vino_open(struct file *file) { struct vino_channel_settings *vcs = video_drvdata(file); int ret = 0; dprintk("open(): channel = %c\n", (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B'); mutex_lock(&vcs->mutex); if (vcs->users) { dprintk("open(): driver busy\n"); ret = -EBUSY; goto out; } ret = vino_acquire_input(vcs); if (ret) { dprintk("open(): vino_acquire_input() failed\n"); goto out; } vcs->users++; out: mutex_unlock(&vcs->mutex); dprintk("open(): %s!\n", ret ? "failed" : "complete"); return ret; } static int vino_close(struct file *file) { struct vino_channel_settings *vcs = video_drvdata(file); dprintk("close():\n"); mutex_lock(&vcs->mutex); vcs->users--; if (!vcs->users) { vino_release_input(vcs); /* stop DMA and free buffers */ vino_capture_stop(vcs); vino_queue_free(&vcs->fb_queue); } mutex_unlock(&vcs->mutex); return 0; } static void vino_vm_open(struct vm_area_struct *vma) { struct vino_framebuffer *fb = vma->vm_private_data; fb->map_count++; dprintk("vino_vm_open(): count = %d\n", fb->map_count); } static void vino_vm_close(struct vm_area_struct *vma) { struct vino_framebuffer *fb = vma->vm_private_data; fb->map_count--; dprintk("vino_vm_close(): count = %d\n", fb->map_count); } static const struct vm_operations_struct vino_vm_ops = { .open = vino_vm_open, .close = vino_vm_close, }; static int vino_mmap(struct file *file, struct vm_area_struct *vma) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; struct vino_framebuffer *fb = NULL; unsigned int i, length; int ret = 0; dprintk("mmap():\n"); // TODO: reject mmap if already mapped if (mutex_lock_interruptible(&vcs->mutex)) return -EINTR; if (vcs->reading) { ret = -EBUSY; goto out; } // TODO: check queue type if (!(vma->vm_flags & VM_WRITE)) { dprintk("mmap(): app bug: PROT_WRITE please\n"); ret = -EINVAL; goto out; } if (!(vma->vm_flags & VM_SHARED)) { dprintk("mmap(): app bug: MAP_SHARED please\n"); ret = -EINVAL; goto out; } /* find the correct buffer using offset */ length = vino_queue_get_length(&vcs->fb_queue); if (length == 0) { dprintk("mmap(): queue not initialized\n"); ret = -EINVAL; goto out; } for (i = 0; i < length; i++) { fb = vino_queue_get_buffer(&vcs->fb_queue, i); if (fb == NULL) { dprintk("mmap(): vino_queue_get_buffer() failed\n"); ret = -EINVAL; goto out; } if (fb->offset == offset) goto found; } dprintk("mmap(): invalid offset = %lu\n", offset); ret = -EINVAL; goto out; found: dprintk("mmap(): buffer = %d\n", i); if (size > (fb->desc_table.page_count * PAGE_SIZE)) { dprintk("mmap(): failed: size = %lu > %lu\n", size, fb->desc_table.page_count * PAGE_SIZE); ret = -EINVAL; goto out; } for (i = 0; i < fb->desc_table.page_count; i++) { unsigned long pfn = virt_to_phys((void *)fb->desc_table.virtual[i]) >> PAGE_SHIFT; if (size < PAGE_SIZE) break; // protection was: PAGE_READONLY if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, vma->vm_page_prot)) { dprintk("mmap(): remap_pfn_range() failed\n"); ret = -EAGAIN; goto out; } start += PAGE_SIZE; size -= PAGE_SIZE; } fb->map_count = 1; vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; vma->vm_flags &= ~VM_IO; vma->vm_private_data = fb; vma->vm_file = file; vma->vm_ops = &vino_vm_ops; out: mutex_unlock(&vcs->mutex); return ret; } static unsigned int vino_poll(struct file *file, poll_table *pt) { struct vino_channel_settings *vcs = video_drvdata(file); unsigned int outgoing; unsigned int ret = 0; // lock mutex (?) // TODO: this has to be corrected for different read modes dprintk("poll():\n"); if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) { dprintk("poll(): vino_queue_get_outgoing() failed\n"); ret = POLLERR; goto error; } if (outgoing > 0) goto over; poll_wait(file, &vcs->fb_queue.frame_wait_queue, pt); if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) { dprintk("poll(): vino_queue_get_outgoing() failed\n"); ret = POLLERR; goto error; } over: dprintk("poll(): data %savailable\n", (outgoing > 0) ? "" : "not "); if (outgoing > 0) ret = POLLIN | POLLRDNORM; error: return ret; } static long vino_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct vino_channel_settings *vcs = video_drvdata(file); long ret; if (mutex_lock_interruptible(&vcs->mutex)) return -EINTR; ret = video_ioctl2(file, cmd, arg); mutex_unlock(&vcs->mutex); return ret; } /* Initialization and cleanup */ /* __initdata */ static int vino_init_stage; const struct v4l2_ioctl_ops vino_ioctl_ops = { .vidioc_enum_fmt_vid_cap = vino_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vino_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vino_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vino_try_fmt_vid_cap, .vidioc_querycap = vino_querycap, .vidioc_enum_input = vino_enum_input, .vidioc_g_input = vino_g_input, .vidioc_s_input = vino_s_input, .vidioc_g_std = vino_g_std, .vidioc_s_std = vino_s_std, .vidioc_querystd = vino_querystd, .vidioc_cropcap = vino_cropcap, .vidioc_s_crop = vino_s_crop, .vidioc_g_crop = vino_g_crop, .vidioc_s_parm = vino_s_parm, .vidioc_g_parm = vino_g_parm, .vidioc_reqbufs = vino_reqbufs, .vidioc_querybuf = vino_querybuf, .vidioc_qbuf = vino_qbuf, .vidioc_dqbuf = vino_dqbuf, .vidioc_streamon = vino_streamon, .vidioc_streamoff = vino_streamoff, .vidioc_queryctrl = vino_queryctrl, .vidioc_g_ctrl = vino_g_ctrl, .vidioc_s_ctrl = vino_s_ctrl, }; static const struct v4l2_file_operations vino_fops = { .owner = THIS_MODULE, .open = vino_open, .release = vino_close, .unlocked_ioctl = vino_ioctl, .mmap = vino_mmap, .poll = vino_poll, }; static struct video_device vdev_template = { .name = "NOT SET", .fops = &vino_fops, .ioctl_ops = &vino_ioctl_ops, .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, }; static void vino_module_cleanup(int stage) { switch(stage) { case 11: video_unregister_device(vino_drvdata->b.vdev); vino_drvdata->b.vdev = NULL; case 10: video_unregister_device(vino_drvdata->a.vdev); vino_drvdata->a.vdev = NULL; case 9: i2c_del_adapter(&vino_i2c_adapter); case 8: free_irq(SGI_VINO_IRQ, NULL); case 7: if (vino_drvdata->b.vdev) { video_device_release(vino_drvdata->b.vdev); vino_drvdata->b.vdev = NULL; } case 6: if (vino_drvdata->a.vdev) { video_device_release(vino_drvdata->a.vdev); vino_drvdata->a.vdev = NULL; } case 5: /* all entries in dma_cpu dummy table have the same address */ dma_unmap_single(NULL, vino_drvdata->dummy_desc_table.dma_cpu[0], PAGE_SIZE, DMA_FROM_DEVICE); dma_free_coherent(NULL, VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t), (void *)vino_drvdata-> dummy_desc_table.dma_cpu, vino_drvdata->dummy_desc_table.dma); case 4: free_page(vino_drvdata->dummy_page); case 3: v4l2_device_unregister(&vino_drvdata->v4l2_dev); case 2: kfree(vino_drvdata); case 1: iounmap(vino); case 0: break; default: dprintk("vino_module_cleanup(): invalid cleanup stage = %d\n", stage); } } static int vino_probe(void) { unsigned long rev_id; if (ip22_is_fullhouse()) { printk(KERN_ERR "VINO doesn't exist in IP22 Fullhouse\n"); return -ENODEV; } if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) { printk(KERN_ERR "VINO is not found (EISA BUS not present)\n"); return -ENODEV; } vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino)); if (!vino) { printk(KERN_ERR "VINO: ioremap() failed\n"); return -EIO; } vino_init_stage++; if (get_dbe(rev_id, &(vino->rev_id))) { printk(KERN_ERR "Failed to read VINO revision register\n"); vino_module_cleanup(vino_init_stage); return -ENODEV; } if (VINO_ID_VALUE(rev_id) != VINO_CHIP_ID) { printk(KERN_ERR "Unknown VINO chip ID (Rev/ID: 0x%02lx)\n", rev_id); vino_module_cleanup(vino_init_stage); return -ENODEV; } printk(KERN_INFO "VINO revision %ld found\n", VINO_REV_NUM(rev_id)); return 0; } static int vino_init(void) { dma_addr_t dma_dummy_address; int err; int i; vino_drvdata = kzalloc(sizeof(struct vino_settings), GFP_KERNEL); if (!vino_drvdata) { vino_module_cleanup(vino_init_stage); return -ENOMEM; } vino_init_stage++; strlcpy(vino_drvdata->v4l2_dev.name, "vino", sizeof(vino_drvdata->v4l2_dev.name)); err = v4l2_device_register(NULL, &vino_drvdata->v4l2_dev); if (err) return err; vino_init_stage++; /* create a dummy dma descriptor */ vino_drvdata->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!vino_drvdata->dummy_page) { vino_module_cleanup(vino_init_stage); return -ENOMEM; } vino_init_stage++; // TODO: use page_count in dummy_desc_table vino_drvdata->dummy_desc_table.dma_cpu = dma_alloc_coherent(NULL, VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t), &vino_drvdata->dummy_desc_table.dma, GFP_KERNEL | GFP_DMA); if (!vino_drvdata->dummy_desc_table.dma_cpu) { vino_module_cleanup(vino_init_stage); return -ENOMEM; } vino_init_stage++; dma_dummy_address = dma_map_single(NULL, (void *)vino_drvdata->dummy_page, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < VINO_DUMMY_DESC_COUNT; i++) { vino_drvdata->dummy_desc_table.dma_cpu[i] = dma_dummy_address; } /* initialize VINO */ vino->control = 0; vino->a.next_4_desc = vino_drvdata->dummy_desc_table.dma; vino->b.next_4_desc = vino_drvdata->dummy_desc_table.dma; udelay(VINO_DESC_FETCH_DELAY); vino->intr_status = 0; vino->a.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT; vino->b.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT; return 0; } static int vino_init_channel_settings(struct vino_channel_settings *vcs, unsigned int channel, const char *name) { vcs->channel = channel; vcs->input = VINO_INPUT_NONE; vcs->alpha = 0; vcs->users = 0; vcs->data_format = VINO_DATA_FMT_GREY; vcs->data_norm = VINO_DATA_NORM_NTSC; vcs->decimation = 1; vino_set_default_clipping(vcs); vino_set_default_framerate(vcs); vcs->capturing = 0; mutex_init(&vcs->mutex); spin_lock_init(&vcs->capture_lock); mutex_init(&vcs->fb_queue.queue_mutex); spin_lock_init(&vcs->fb_queue.queue_lock); init_waitqueue_head(&vcs->fb_queue.frame_wait_queue); vcs->vdev = video_device_alloc(); if (!vcs->vdev) { vino_module_cleanup(vino_init_stage); return -ENOMEM; } vino_init_stage++; memcpy(vcs->vdev, &vdev_template, sizeof(struct video_device)); strcpy(vcs->vdev->name, name); vcs->vdev->release = video_device_release; vcs->vdev->v4l2_dev = &vino_drvdata->v4l2_dev; video_set_drvdata(vcs->vdev, vcs); return 0; } static int __init vino_module_init(void) { int ret; printk(KERN_INFO "SGI VINO driver version %s\n", VINO_MODULE_VERSION); ret = vino_probe(); if (ret) return ret; ret = vino_init(); if (ret) return ret; /* initialize data structures */ spin_lock_init(&vino_drvdata->vino_lock); spin_lock_init(&vino_drvdata->input_lock); ret = vino_init_channel_settings(&vino_drvdata->a, VINO_CHANNEL_A, vino_vdev_name_a); if (ret) return ret; ret = vino_init_channel_settings(&vino_drvdata->b, VINO_CHANNEL_B, vino_vdev_name_b); if (ret) return ret; /* initialize hardware and register V4L devices */ ret = request_irq(SGI_VINO_IRQ, vino_interrupt, 0, vino_driver_description, NULL); if (ret) { printk(KERN_ERR "VINO: requesting IRQ %02d failed\n", SGI_VINO_IRQ); vino_module_cleanup(vino_init_stage); return -EAGAIN; } vino_init_stage++; ret = i2c_add_adapter(&vino_i2c_adapter); if (ret) { printk(KERN_ERR "VINO I2C bus registration failed\n"); vino_module_cleanup(vino_init_stage); return ret; } i2c_set_adapdata(&vino_i2c_adapter, &vino_drvdata->v4l2_dev); vino_init_stage++; ret = video_register_device(vino_drvdata->a.vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) { printk(KERN_ERR "VINO channel A Video4Linux-device " "registration failed\n"); vino_module_cleanup(vino_init_stage); return -EINVAL; } vino_init_stage++; ret = video_register_device(vino_drvdata->b.vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) { printk(KERN_ERR "VINO channel B Video4Linux-device " "registration failed\n"); vino_module_cleanup(vino_init_stage); return -EINVAL; } vino_init_stage++; vino_drvdata->decoder = v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, "saa7191", 0, I2C_ADDRS(0x45)); vino_drvdata->camera = v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, "indycam", 0, I2C_ADDRS(0x2b)); dprintk("init complete!\n"); return 0; } static void __exit vino_module_exit(void) { dprintk("exiting, stage = %d ...\n", vino_init_stage); vino_module_cleanup(vino_init_stage); dprintk("cleanup complete, exit!\n"); } module_init(vino_module_init); module_exit(vino_module_exit);
gpl-2.0
boyan3010/ShooterU_Kernel_3.2.X
drivers/media/video/m5mols/m5mols_capture.c
564
5306
/* * The Capture code for Fujitsu M-5MOLS ISP * * Copyright (C) 2011 Samsung Electronics Co., Ltd. * Author: HeungJun Kim <riverful.kim@samsung.com> * * Copyright (C) 2009 Samsung Electronics Co., Ltd. * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/m5mols.h> #include "m5mols.h" #include "m5mols_reg.h" static int m5mols_capture_error_handler(struct m5mols_info *info, int timeout) { int ret; /* Disable all interrupts and clear relevant interrupt staus bits */ ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE, info->interrupt & ~(REG_INT_CAPTURE)); if (ret) return ret; if (timeout == 0) return -ETIMEDOUT; return 0; } /** * m5mols_read_rational - I2C read of a rational number * * Read numerator and denominator from registers @addr_num and @addr_den * respectively and return the division result in @val. */ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num, u32 addr_den, u32 *val) { u32 num, den; int ret = m5mols_read_u32(sd, addr_num, &num); if (!ret) ret = m5mols_read_u32(sd, addr_den, &den); if (ret) return ret; *val = den == 0 ? 0 : num / den; return ret; } /** * m5mols_capture_info - Gather captured image information * * For now it gathers only EXIF information and file size. */ static int m5mols_capture_info(struct m5mols_info *info) { struct m5mols_exif *exif = &info->cap.exif; struct v4l2_subdev *sd = &info->sd; int ret; ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU, EXIF_INFO_EXPTIME_DE, &exif->exposure_time); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE, &exif->shutter_speed); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE, &exif->aperture); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE, &exif->brightness); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE, &exif->exposure_bias); if (ret) return ret; ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval); if (ret) return ret; if (!ret) ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main); if (!ret) ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb); if (!ret) info->cap.total = info->cap.main + info->cap.thumb; return ret; } int m5mols_start_capture(struct m5mols_info *info) { struct v4l2_subdev *sd = &info->sd; u8 resolution = info->resolution; int timeout; int ret; /* * Preparing capture. Setting control & interrupt before entering * capture mode * * 1) change to MONITOR mode for operating control & interrupt * 2) set controls (considering v4l2_control value & lock 3A) * 3) set interrupt * 4) change to CAPTURE mode */ ret = m5mols_mode(info, REG_MONITOR); if (!ret) ret = m5mols_sync_controls(info); if (!ret) ret = m5mols_lock_3a(info, true); if (!ret) ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE); if (!ret) ret = m5mols_mode(info, REG_CAPTURE); if (!ret) { /* Wait for capture interrupt, after changing capture mode */ timeout = wait_event_interruptible_timeout(info->irq_waitq, test_bit(ST_CAPT_IRQ, &info->flags), msecs_to_jiffies(2000)); if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) ret = m5mols_capture_error_handler(info, timeout); } if (!ret) ret = m5mols_lock_3a(info, false); if (ret) return ret; /* * Starting capture. Setting capture frame count and resolution and * the format(available format: JPEG, Bayer RAW, YUV). * * 1) select single or multi(enable to 25), format, size * 2) set interrupt * 3) start capture(for main image, now) * 4) get information * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device) */ ret = m5mols_write(sd, CAPC_SEL_FRAME, 1); if (!ret) ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG); if (!ret) ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution); if (!ret) ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE); if (!ret) ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN); if (!ret) { /* Wait for the capture completion interrupt */ timeout = wait_event_interruptible_timeout(info->irq_waitq, test_bit(ST_CAPT_IRQ, &info->flags), msecs_to_jiffies(2000)); if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) { ret = m5mols_capture_info(info); if (!ret) v4l2_subdev_notify(sd, 0, &info->cap.total); } } return m5mols_capture_error_handler(info, timeout); }
gpl-2.0
slayher/android_kernel_omap
drivers/isdn/capi/capiutil.c
564
30022
/* $Id: capiutil.c,v 1.13.6.4 2001/09/23 22:24:33 kai Exp $ * * CAPI 2.0 convert capi message to capi message struct * * From CAPI 2.0 Development Kit AVM 1995 (msg.c) * Rewritten for Linux 1996 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/isdn/capiutil.h> /* from CAPI2.0 DDK AVM Berlin GmbH */ #ifndef CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON char *capi_info2str(u16 reason) { return ".."; } #else char *capi_info2str(u16 reason) { switch (reason) { /*-- informative values (corresponding message was processed) -----*/ case 0x0001: return "NCPI not supported by current protocol, NCPI ignored"; case 0x0002: return "Flags not supported by current protocol, flags ignored"; case 0x0003: return "Alert already sent by another application"; /*-- error information concerning CAPI_REGISTER -----*/ case 0x1001: return "Too many applications"; case 0x1002: return "Logical block size too small, must be at least 128 Bytes"; case 0x1003: return "Buffer exceeds 64 kByte"; case 0x1004: return "Message buffer size too small, must be at least 1024 Bytes"; case 0x1005: return "Max. number of logical connections not supported"; case 0x1006: return "Reserved"; case 0x1007: return "The message could not be accepted because of an internal busy condition"; case 0x1008: return "OS resource error (no memory ?)"; case 0x1009: return "CAPI not installed"; case 0x100A: return "Controller does not support external equipment"; case 0x100B: return "Controller does only support external equipment"; /*-- error information concerning message exchange functions -----*/ case 0x1101: return "Illegal application number"; case 0x1102: return "Illegal command or subcommand or message length less than 12 bytes"; case 0x1103: return "The message could not be accepted because of a queue full condition !! The error code does not imply that CAPI cannot receive messages directed to another controller, PLCI or NCCI"; case 0x1104: return "Queue is empty"; case 0x1105: return "Queue overflow, a message was lost !! This indicates a configuration error. The only recovery from this error is to perform a CAPI_RELEASE"; case 0x1106: return "Unknown notification parameter"; case 0x1107: return "The Message could not be accepted because of an internal busy condition"; case 0x1108: return "OS Resource error (no memory ?)"; case 0x1109: return "CAPI not installed"; case 0x110A: return "Controller does not support external equipment"; case 0x110B: return "Controller does only support external equipment"; /*-- error information concerning resource / coding problems -----*/ case 0x2001: return "Message not supported in current state"; case 0x2002: return "Illegal Controller / PLCI / NCCI"; case 0x2003: return "Out of PLCI"; case 0x2004: return "Out of NCCI"; case 0x2005: return "Out of LISTEN"; case 0x2006: return "Out of FAX resources (protocol T.30)"; case 0x2007: return "Illegal message parameter coding"; /*-- error information concerning requested services -----*/ case 0x3001: return "B1 protocol not supported"; case 0x3002: return "B2 protocol not supported"; case 0x3003: return "B3 protocol not supported"; case 0x3004: return "B1 protocol parameter not supported"; case 0x3005: return "B2 protocol parameter not supported"; case 0x3006: return "B3 protocol parameter not supported"; case 0x3007: return "B protocol combination not supported"; case 0x3008: return "NCPI not supported"; case 0x3009: return "CIP Value unknown"; case 0x300A: return "Flags not supported (reserved bits)"; case 0x300B: return "Facility not supported"; case 0x300C: return "Data length not supported by current protocol"; case 0x300D: return "Reset procedure not supported by current protocol"; /*-- informations about the clearing of a physical connection -----*/ case 0x3301: return "Protocol error layer 1 (broken line or B-channel removed by signalling protocol)"; case 0x3302: return "Protocol error layer 2"; case 0x3303: return "Protocol error layer 3"; case 0x3304: return "Another application got that call"; /*-- T.30 specific reasons -----*/ case 0x3311: return "Connecting not successful (remote station is no FAX G3 machine)"; case 0x3312: return "Connecting not successful (training error)"; case 0x3313: return "Disconnected before transfer (remote station does not support transfer mode, e.g. resolution)"; case 0x3314: return "Disconnected during transfer (remote abort)"; case 0x3315: return "Disconnected during transfer (remote procedure error, e.g. unsuccessful repetition of T.30 commands)"; case 0x3316: return "Disconnected during transfer (local tx data underrun)"; case 0x3317: return "Disconnected during transfer (local rx data overflow)"; case 0x3318: return "Disconnected during transfer (local abort)"; case 0x3319: return "Illegal parameter coding (e.g. SFF coding error)"; /*-- disconnect causes from the network according to ETS 300 102-1/Q.931 -----*/ case 0x3481: return "Unallocated (unassigned) number"; case 0x3482: return "No route to specified transit network"; case 0x3483: return "No route to destination"; case 0x3486: return "Channel unacceptable"; case 0x3487: return "Call awarded and being delivered in an established channel"; case 0x3490: return "Normal call clearing"; case 0x3491: return "User busy"; case 0x3492: return "No user responding"; case 0x3493: return "No answer from user (user alerted)"; case 0x3495: return "Call rejected"; case 0x3496: return "Number changed"; case 0x349A: return "Non-selected user clearing"; case 0x349B: return "Destination out of order"; case 0x349C: return "Invalid number format"; case 0x349D: return "Facility rejected"; case 0x349E: return "Response to STATUS ENQUIRY"; case 0x349F: return "Normal, unspecified"; case 0x34A2: return "No circuit / channel available"; case 0x34A6: return "Network out of order"; case 0x34A9: return "Temporary failure"; case 0x34AA: return "Switching equipment congestion"; case 0x34AB: return "Access information discarded"; case 0x34AC: return "Requested circuit / channel not available"; case 0x34AF: return "Resources unavailable, unspecified"; case 0x34B1: return "Quality of service unavailable"; case 0x34B2: return "Requested facility not subscribed"; case 0x34B9: return "Bearer capability not authorized"; case 0x34BA: return "Bearer capability not presently available"; case 0x34BF: return "Service or option not available, unspecified"; case 0x34C1: return "Bearer capability not implemented"; case 0x34C2: return "Channel type not implemented"; case 0x34C5: return "Requested facility not implemented"; case 0x34C6: return "Only restricted digital information bearer capability is available"; case 0x34CF: return "Service or option not implemented, unspecified"; case 0x34D1: return "Invalid call reference value"; case 0x34D2: return "Identified channel does not exist"; case 0x34D3: return "A suspended call exists, but this call identity does not"; case 0x34D4: return "Call identity in use"; case 0x34D5: return "No call suspended"; case 0x34D6: return "Call having the requested call identity has been cleared"; case 0x34D8: return "Incompatible destination"; case 0x34DB: return "Invalid transit network selection"; case 0x34DF: return "Invalid message, unspecified"; case 0x34E0: return "Mandatory information element is missing"; case 0x34E1: return "Message type non-existent or not implemented"; case 0x34E2: return "Message not compatible with call state or message type non-existent or not implemented"; case 0x34E3: return "Information element non-existent or not implemented"; case 0x34E4: return "Invalid information element contents"; case 0x34E5: return "Message not compatible with call state"; case 0x34E6: return "Recovery on timer expiry"; case 0x34EF: return "Protocol error, unspecified"; case 0x34FF: return "Interworking, unspecified"; default: return "No additional information"; } } #endif typedef struct { int typ; size_t off; } _cdef; #define _CBYTE 1 #define _CWORD 2 #define _CDWORD 3 #define _CSTRUCT 4 #define _CMSTRUCT 5 #define _CEND 6 static _cdef cdef[] = { /*00 */ {_CEND}, /*01 */ {_CEND}, /*02 */ {_CEND}, /*03 */ {_CDWORD, offsetof(_cmsg, adr.adrController)}, /*04 */ {_CMSTRUCT, offsetof(_cmsg, AdditionalInfo)}, /*05 */ {_CSTRUCT, offsetof(_cmsg, B1configuration)}, /*06 */ {_CWORD, offsetof(_cmsg, B1protocol)}, /*07 */ {_CSTRUCT, offsetof(_cmsg, B2configuration)}, /*08 */ {_CWORD, offsetof(_cmsg, B2protocol)}, /*09 */ {_CSTRUCT, offsetof(_cmsg, B3configuration)}, /*0a */ {_CWORD, offsetof(_cmsg, B3protocol)}, /*0b */ {_CSTRUCT, offsetof(_cmsg, BC)}, /*0c */ {_CSTRUCT, offsetof(_cmsg, BChannelinformation)}, /*0d */ {_CMSTRUCT, offsetof(_cmsg, BProtocol)}, /*0e */ {_CSTRUCT, offsetof(_cmsg, CalledPartyNumber)}, /*0f */ {_CSTRUCT, offsetof(_cmsg, CalledPartySubaddress)}, /*10 */ {_CSTRUCT, offsetof(_cmsg, CallingPartyNumber)}, /*11 */ {_CSTRUCT, offsetof(_cmsg, CallingPartySubaddress)}, /*12 */ {_CDWORD, offsetof(_cmsg, CIPmask)}, /*13 */ {_CDWORD, offsetof(_cmsg, CIPmask2)}, /*14 */ {_CWORD, offsetof(_cmsg, CIPValue)}, /*15 */ {_CDWORD, offsetof(_cmsg, Class)}, /*16 */ {_CSTRUCT, offsetof(_cmsg, ConnectedNumber)}, /*17 */ {_CSTRUCT, offsetof(_cmsg, ConnectedSubaddress)}, /*18 */ {_CDWORD, offsetof(_cmsg, Data)}, /*19 */ {_CWORD, offsetof(_cmsg, DataHandle)}, /*1a */ {_CWORD, offsetof(_cmsg, DataLength)}, /*1b */ {_CSTRUCT, offsetof(_cmsg, FacilityConfirmationParameter)}, /*1c */ {_CSTRUCT, offsetof(_cmsg, Facilitydataarray)}, /*1d */ {_CSTRUCT, offsetof(_cmsg, FacilityIndicationParameter)}, /*1e */ {_CSTRUCT, offsetof(_cmsg, FacilityRequestParameter)}, /*1f */ {_CWORD, offsetof(_cmsg, FacilitySelector)}, /*20 */ {_CWORD, offsetof(_cmsg, Flags)}, /*21 */ {_CDWORD, offsetof(_cmsg, Function)}, /*22 */ {_CSTRUCT, offsetof(_cmsg, HLC)}, /*23 */ {_CWORD, offsetof(_cmsg, Info)}, /*24 */ {_CSTRUCT, offsetof(_cmsg, InfoElement)}, /*25 */ {_CDWORD, offsetof(_cmsg, InfoMask)}, /*26 */ {_CWORD, offsetof(_cmsg, InfoNumber)}, /*27 */ {_CSTRUCT, offsetof(_cmsg, Keypadfacility)}, /*28 */ {_CSTRUCT, offsetof(_cmsg, LLC)}, /*29 */ {_CSTRUCT, offsetof(_cmsg, ManuData)}, /*2a */ {_CDWORD, offsetof(_cmsg, ManuID)}, /*2b */ {_CSTRUCT, offsetof(_cmsg, NCPI)}, /*2c */ {_CWORD, offsetof(_cmsg, Reason)}, /*2d */ {_CWORD, offsetof(_cmsg, Reason_B3)}, /*2e */ {_CWORD, offsetof(_cmsg, Reject)}, /*2f */ {_CSTRUCT, offsetof(_cmsg, Useruserdata)} }; static unsigned char *cpars[] = { /* ALERT_REQ */ [0x01] = "\x03\x04\x0c\x27\x2f\x1c\x01\x01", /* CONNECT_REQ */ [0x02] = "\x03\x14\x0e\x10\x0f\x11\x0d\x06\x08\x0a\x05\x07\x09\x01\x0b\x28\x22\x04\x0c\x27\x2f\x1c\x01\x01", /* DISCONNECT_REQ */ [0x04] = "\x03\x04\x0c\x27\x2f\x1c\x01\x01", /* LISTEN_REQ */ [0x05] = "\x03\x25\x12\x13\x10\x11\x01", /* INFO_REQ */ [0x08] = "\x03\x0e\x04\x0c\x27\x2f\x1c\x01\x01", /* FACILITY_REQ */ [0x09] = "\x03\x1f\x1e\x01", /* SELECT_B_PROTOCOL_REQ */ [0x0a] = "\x03\x0d\x06\x08\x0a\x05\x07\x09\x01\x01", /* CONNECT_B3_REQ */ [0x0b] = "\x03\x2b\x01", /* DISCONNECT_B3_REQ */ [0x0d] = "\x03\x2b\x01", /* DATA_B3_REQ */ [0x0f] = "\x03\x18\x1a\x19\x20\x01", /* RESET_B3_REQ */ [0x10] = "\x03\x2b\x01", /* ALERT_CONF */ [0x13] = "\x03\x23\x01", /* CONNECT_CONF */ [0x14] = "\x03\x23\x01", /* DISCONNECT_CONF */ [0x16] = "\x03\x23\x01", /* LISTEN_CONF */ [0x17] = "\x03\x23\x01", /* MANUFACTURER_REQ */ [0x18] = "\x03\x2a\x15\x21\x29\x01", /* INFO_CONF */ [0x1a] = "\x03\x23\x01", /* FACILITY_CONF */ [0x1b] = "\x03\x23\x1f\x1b\x01", /* SELECT_B_PROTOCOL_CONF */ [0x1c] = "\x03\x23\x01", /* CONNECT_B3_CONF */ [0x1d] = "\x03\x23\x01", /* DISCONNECT_B3_CONF */ [0x1f] = "\x03\x23\x01", /* DATA_B3_CONF */ [0x21] = "\x03\x19\x23\x01", /* RESET_B3_CONF */ [0x22] = "\x03\x23\x01", /* CONNECT_IND */ [0x26] = "\x03\x14\x0e\x10\x0f\x11\x0b\x28\x22\x04\x0c\x27\x2f\x1c\x01\x01", /* CONNECT_ACTIVE_IND */ [0x27] = "\x03\x16\x17\x28\x01", /* DISCONNECT_IND */ [0x28] = "\x03\x2c\x01", /* MANUFACTURER_CONF */ [0x2a] = "\x03\x2a\x15\x21\x29\x01", /* INFO_IND */ [0x2c] = "\x03\x26\x24\x01", /* FACILITY_IND */ [0x2d] = "\x03\x1f\x1d\x01", /* CONNECT_B3_IND */ [0x2f] = "\x03\x2b\x01", /* CONNECT_B3_ACTIVE_IND */ [0x30] = "\x03\x2b\x01", /* DISCONNECT_B3_IND */ [0x31] = "\x03\x2d\x2b\x01", /* DATA_B3_IND */ [0x33] = "\x03\x18\x1a\x19\x20\x01", /* RESET_B3_IND */ [0x34] = "\x03\x2b\x01", /* CONNECT_B3_T90_ACTIVE_IND */ [0x35] = "\x03\x2b\x01", /* CONNECT_RESP */ [0x38] = "\x03\x2e\x0d\x06\x08\x0a\x05\x07\x09\x01\x16\x17\x28\x04\x0c\x27\x2f\x1c\x01\x01", /* CONNECT_ACTIVE_RESP */ [0x39] = "\x03\x01", /* DISCONNECT_RESP */ [0x3a] = "\x03\x01", /* MANUFACTURER_IND */ [0x3c] = "\x03\x2a\x15\x21\x29\x01", /* INFO_RESP */ [0x3e] = "\x03\x01", /* FACILITY_RESP */ [0x3f] = "\x03\x1f\x01", /* CONNECT_B3_RESP */ [0x41] = "\x03\x2e\x2b\x01", /* CONNECT_B3_ACTIVE_RESP */ [0x42] = "\x03\x01", /* DISCONNECT_B3_RESP */ [0x43] = "\x03\x01", /* DATA_B3_RESP */ [0x45] = "\x03\x19\x01", /* RESET_B3_RESP */ [0x46] = "\x03\x01", /* CONNECT_B3_T90_ACTIVE_RESP */ [0x47] = "\x03\x01", /* MANUFACTURER_RESP */ [0x4e] = "\x03\x2a\x15\x21\x29\x01", }; /*-------------------------------------------------------*/ #define byteTLcpy(x,y) *(u8 *)(x)=*(u8 *)(y); #define wordTLcpy(x,y) *(u16 *)(x)=*(u16 *)(y); #define dwordTLcpy(x,y) memcpy(x,y,4); #define structTLcpy(x,y,l) memcpy (x,y,l) #define structTLcpyovl(x,y,l) memmove (x,y,l) #define byteTRcpy(x,y) *(u8 *)(y)=*(u8 *)(x); #define wordTRcpy(x,y) *(u16 *)(y)=*(u16 *)(x); #define dwordTRcpy(x,y) memcpy(y,x,4); #define structTRcpy(x,y,l) memcpy (y,x,l) #define structTRcpyovl(x,y,l) memmove (y,x,l) /*-------------------------------------------------------*/ static unsigned command_2_index(unsigned c, unsigned sc) { if (c & 0x80) c = 0x9 + (c & 0x0f); else if (c <= 0x0f); else if (c == 0x41) c = 0x9 + 0x1; else if (c == 0xff) c = 0x00; return (sc & 3) * (0x9 + 0x9) + c; } /*-------------------------------------------------------*/ #define TYP (cdef[cmsg->par[cmsg->p]].typ) #define OFF (((u8 *)cmsg)+cdef[cmsg->par[cmsg->p]].off) static void jumpcstruct(_cmsg * cmsg) { unsigned layer; for (cmsg->p++, layer = 1; layer;) { /* $$$$$ assert (cmsg->p); */ cmsg->p++; switch (TYP) { case _CMSTRUCT: layer++; break; case _CEND: layer--; break; } } } /*-------------------------------------------------------*/ static void pars_2_message(_cmsg * cmsg) { for (; TYP != _CEND; cmsg->p++) { switch (TYP) { case _CBYTE: byteTLcpy(cmsg->m + cmsg->l, OFF); cmsg->l++; break; case _CWORD: wordTLcpy(cmsg->m + cmsg->l, OFF); cmsg->l += 2; break; case _CDWORD: dwordTLcpy(cmsg->m + cmsg->l, OFF); cmsg->l += 4; break; case _CSTRUCT: if (*(u8 **) OFF == NULL) { *(cmsg->m + cmsg->l) = '\0'; cmsg->l++; } else if (**(_cstruct *) OFF != 0xff) { structTLcpy(cmsg->m + cmsg->l, *(_cstruct *) OFF, 1 + **(_cstruct *) OFF); cmsg->l += 1 + **(_cstruct *) OFF; } else { _cstruct s = *(_cstruct *) OFF; structTLcpy(cmsg->m + cmsg->l, s, 3 + *(u16 *) (s + 1)); cmsg->l += 3 + *(u16 *) (s + 1); } break; case _CMSTRUCT: /*----- Metastruktur 0 -----*/ if (*(_cmstruct *) OFF == CAPI_DEFAULT) { *(cmsg->m + cmsg->l) = '\0'; cmsg->l++; jumpcstruct(cmsg); } /*----- Metastruktur wird composed -----*/ else { unsigned _l = cmsg->l; unsigned _ls; cmsg->l++; cmsg->p++; pars_2_message(cmsg); _ls = cmsg->l - _l - 1; if (_ls < 255) (cmsg->m + _l)[0] = (u8) _ls; else { structTLcpyovl(cmsg->m + _l + 3, cmsg->m + _l + 1, _ls); (cmsg->m + _l)[0] = 0xff; wordTLcpy(cmsg->m + _l + 1, &_ls); } } break; } } } /** * capi_cmsg2message() - assemble CAPI 2.0 message from _cmsg structure * @cmsg: _cmsg structure * @msg: buffer for assembled message * * Return value: 0 for success */ unsigned capi_cmsg2message(_cmsg * cmsg, u8 * msg) { cmsg->m = msg; cmsg->l = 8; cmsg->p = 0; cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; pars_2_message(cmsg); wordTLcpy(msg + 0, &cmsg->l); byteTLcpy(cmsg->m + 4, &cmsg->Command); byteTLcpy(cmsg->m + 5, &cmsg->Subcommand); wordTLcpy(cmsg->m + 2, &cmsg->ApplId); wordTLcpy(cmsg->m + 6, &cmsg->Messagenumber); return 0; } /*-------------------------------------------------------*/ static void message_2_pars(_cmsg * cmsg) { for (; TYP != _CEND; cmsg->p++) { switch (TYP) { case _CBYTE: byteTRcpy(cmsg->m + cmsg->l, OFF); cmsg->l++; break; case _CWORD: wordTRcpy(cmsg->m + cmsg->l, OFF); cmsg->l += 2; break; case _CDWORD: dwordTRcpy(cmsg->m + cmsg->l, OFF); cmsg->l += 4; break; case _CSTRUCT: *(u8 **) OFF = cmsg->m + cmsg->l; if (cmsg->m[cmsg->l] != 0xff) cmsg->l += 1 + cmsg->m[cmsg->l]; else cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1); break; case _CMSTRUCT: /*----- Metastruktur 0 -----*/ if (cmsg->m[cmsg->l] == '\0') { *(_cmstruct *) OFF = CAPI_DEFAULT; cmsg->l++; jumpcstruct(cmsg); } else { unsigned _l = cmsg->l; *(_cmstruct *) OFF = CAPI_COMPOSE; cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1; cmsg->p++; message_2_pars(cmsg); } break; } } } /** * capi_message2cmsg() - disassemble CAPI 2.0 message into _cmsg structure * @cmsg: _cmsg structure * @msg: buffer for assembled message * * Return value: 0 for success */ unsigned capi_message2cmsg(_cmsg * cmsg, u8 * msg) { memset(cmsg, 0, sizeof(_cmsg)); cmsg->m = msg; cmsg->l = 8; cmsg->p = 0; byteTRcpy(cmsg->m + 4, &cmsg->Command); byteTRcpy(cmsg->m + 5, &cmsg->Subcommand); cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; message_2_pars(cmsg); wordTRcpy(msg + 0, &cmsg->l); wordTRcpy(cmsg->m + 2, &cmsg->ApplId); wordTRcpy(cmsg->m + 6, &cmsg->Messagenumber); return 0; } /** * capi_cmsg_header() - initialize header part of _cmsg structure * @cmsg: _cmsg structure * @_ApplId: ApplID field value * @_Command: Command field value * @_Subcommand: Subcommand field value * @_Messagenumber: Message Number field value * @_Controller: Controller/PLCI/NCCI field value * * Return value: 0 for success */ unsigned capi_cmsg_header(_cmsg * cmsg, u16 _ApplId, u8 _Command, u8 _Subcommand, u16 _Messagenumber, u32 _Controller) { memset(cmsg, 0, sizeof(_cmsg)); cmsg->ApplId = _ApplId; cmsg->Command = _Command; cmsg->Subcommand = _Subcommand; cmsg->Messagenumber = _Messagenumber; cmsg->adr.adrController = _Controller; return 0; } /*-------------------------------------------------------*/ static char *mnames[] = { [0x01] = "ALERT_REQ", [0x02] = "CONNECT_REQ", [0x04] = "DISCONNECT_REQ", [0x05] = "LISTEN_REQ", [0x08] = "INFO_REQ", [0x09] = "FACILITY_REQ", [0x0a] = "SELECT_B_PROTOCOL_REQ", [0x0b] = "CONNECT_B3_REQ", [0x0d] = "DISCONNECT_B3_REQ", [0x0f] = "DATA_B3_REQ", [0x10] = "RESET_B3_REQ", [0x13] = "ALERT_CONF", [0x14] = "CONNECT_CONF", [0x16] = "DISCONNECT_CONF", [0x17] = "LISTEN_CONF", [0x18] = "MANUFACTURER_REQ", [0x1a] = "INFO_CONF", [0x1b] = "FACILITY_CONF", [0x1c] = "SELECT_B_PROTOCOL_CONF", [0x1d] = "CONNECT_B3_CONF", [0x1f] = "DISCONNECT_B3_CONF", [0x21] = "DATA_B3_CONF", [0x22] = "RESET_B3_CONF", [0x26] = "CONNECT_IND", [0x27] = "CONNECT_ACTIVE_IND", [0x28] = "DISCONNECT_IND", [0x2a] = "MANUFACTURER_CONF", [0x2c] = "INFO_IND", [0x2d] = "FACILITY_IND", [0x2f] = "CONNECT_B3_IND", [0x30] = "CONNECT_B3_ACTIVE_IND", [0x31] = "DISCONNECT_B3_IND", [0x33] = "DATA_B3_IND", [0x34] = "RESET_B3_IND", [0x35] = "CONNECT_B3_T90_ACTIVE_IND", [0x38] = "CONNECT_RESP", [0x39] = "CONNECT_ACTIVE_RESP", [0x3a] = "DISCONNECT_RESP", [0x3c] = "MANUFACTURER_IND", [0x3e] = "INFO_RESP", [0x3f] = "FACILITY_RESP", [0x41] = "CONNECT_B3_RESP", [0x42] = "CONNECT_B3_ACTIVE_RESP", [0x43] = "DISCONNECT_B3_RESP", [0x45] = "DATA_B3_RESP", [0x46] = "RESET_B3_RESP", [0x47] = "CONNECT_B3_T90_ACTIVE_RESP", [0x4e] = "MANUFACTURER_RESP" }; /** * capi_cmd2str() - convert CAPI 2.0 command/subcommand number to name * @cmd: command number * @subcmd: subcommand number * * Return value: static string, NULL if command/subcommand unknown */ char *capi_cmd2str(u8 cmd, u8 subcmd) { return mnames[command_2_index(cmd, subcmd)]; } /*-------------------------------------------------------*/ #ifdef CONFIG_CAPI_TRACE /*-------------------------------------------------------*/ static char *pnames[] = { /*00 */ NULL, /*01 */ NULL, /*02 */ NULL, /*03 */ "Controller/PLCI/NCCI", /*04 */ "AdditionalInfo", /*05 */ "B1configuration", /*06 */ "B1protocol", /*07 */ "B2configuration", /*08 */ "B2protocol", /*09 */ "B3configuration", /*0a */ "B3protocol", /*0b */ "BC", /*0c */ "BChannelinformation", /*0d */ "BProtocol", /*0e */ "CalledPartyNumber", /*0f */ "CalledPartySubaddress", /*10 */ "CallingPartyNumber", /*11 */ "CallingPartySubaddress", /*12 */ "CIPmask", /*13 */ "CIPmask2", /*14 */ "CIPValue", /*15 */ "Class", /*16 */ "ConnectedNumber", /*17 */ "ConnectedSubaddress", /*18 */ "Data32", /*19 */ "DataHandle", /*1a */ "DataLength", /*1b */ "FacilityConfirmationParameter", /*1c */ "Facilitydataarray", /*1d */ "FacilityIndicationParameter", /*1e */ "FacilityRequestParameter", /*1f */ "FacilitySelector", /*20 */ "Flags", /*21 */ "Function", /*22 */ "HLC", /*23 */ "Info", /*24 */ "InfoElement", /*25 */ "InfoMask", /*26 */ "InfoNumber", /*27 */ "Keypadfacility", /*28 */ "LLC", /*29 */ "ManuData", /*2a */ "ManuID", /*2b */ "NCPI", /*2c */ "Reason", /*2d */ "Reason_B3", /*2e */ "Reject", /*2f */ "Useruserdata" }; #include <stdarg.h> /*-------------------------------------------------------*/ static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt,...) { va_list f; size_t n,r; if (!cdb) return NULL; va_start(f, fmt); r = cdb->size - cdb->pos; n = vsnprintf(cdb->p, r, fmt, f); va_end(f); if (n >= r) { /* truncated, need bigger buffer */ size_t ns = 2 * cdb->size; u_char *nb; while ((ns - cdb->pos) <= n) ns *= 2; nb = kmalloc(ns, GFP_ATOMIC); if (!nb) { cdebbuf_free(cdb); return NULL; } memcpy(nb, cdb->buf, cdb->pos); kfree(cdb->buf); nb[cdb->pos] = 0; cdb->buf = nb; cdb->p = cdb->buf + cdb->pos; cdb->size = ns; va_start(f, fmt); r = cdb->size - cdb->pos; n = vsnprintf(cdb->p, r, fmt, f); va_end(f); } cdb->p += n; cdb->pos += n; return cdb; } static _cdebbuf *printstructlen(_cdebbuf *cdb, u8 * m, unsigned len) { unsigned hex = 0; if (!cdb) return NULL; for (; len; len--, m++) if (isalnum(*m) || *m == ' ') { if (hex) cdb = bufprint(cdb, ">"); cdb = bufprint(cdb, "%c", *m); hex = 0; } else { if (!hex) cdb = bufprint(cdb, "<%02x", *m); else cdb = bufprint(cdb, " %02x", *m); hex = 1; } if (hex) cdb = bufprint(cdb, ">"); return cdb; } static _cdebbuf *printstruct(_cdebbuf *cdb, u8 * m) { unsigned len; if (m[0] != 0xff) { len = m[0]; m += 1; } else { len = ((u16 *) (m + 1))[0]; m += 3; } cdb = printstructlen(cdb, m, len); return cdb; } /*-------------------------------------------------------*/ #define NAME (pnames[cmsg->par[cmsg->p]]) static _cdebbuf *protocol_message_2_pars(_cdebbuf *cdb, _cmsg *cmsg, int level) { for (; TYP != _CEND; cmsg->p++) { int slen = 29 + 3 - level; int i; if (!cdb) return NULL; cdb = bufprint(cdb, " "); for (i = 0; i < level - 1; i++) cdb = bufprint(cdb, " "); switch (TYP) { case _CBYTE: cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l)); cmsg->l++; break; case _CWORD: cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l)); cmsg->l += 2; break; case _CDWORD: cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l)); cmsg->l += 4; break; case _CSTRUCT: cdb = bufprint(cdb, "%-*s = ", slen, NAME); if (cmsg->m[cmsg->l] == '\0') cdb = bufprint(cdb, "default"); else cdb = printstruct(cdb, cmsg->m + cmsg->l); cdb = bufprint(cdb, "\n"); if (cmsg->m[cmsg->l] != 0xff) cmsg->l += 1 + cmsg->m[cmsg->l]; else cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1); break; case _CMSTRUCT: /*----- Metastruktur 0 -----*/ if (cmsg->m[cmsg->l] == '\0') { cdb = bufprint(cdb, "%-*s = default\n", slen, NAME); cmsg->l++; jumpcstruct(cmsg); } else { char *name = NAME; unsigned _l = cmsg->l; cdb = bufprint(cdb, "%-*s\n", slen, name); cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1; cmsg->p++; cdb = protocol_message_2_pars(cdb, cmsg, level + 1); } break; } } return cdb; } /*-------------------------------------------------------*/ static _cdebbuf *g_debbuf; static u_long g_debbuf_lock; static _cmsg *g_cmsg; static _cdebbuf *cdebbuf_alloc(void) { _cdebbuf *cdb; if (likely(!test_and_set_bit(1, &g_debbuf_lock))) { cdb = g_debbuf; goto init; } else cdb = kmalloc(sizeof(_cdebbuf), GFP_ATOMIC); if (!cdb) return NULL; cdb->buf = kmalloc(CDEBUG_SIZE, GFP_ATOMIC); if (!cdb->buf) { kfree(cdb); return NULL; } cdb->size = CDEBUG_SIZE; init: cdb->buf[0] = 0; cdb->p = cdb->buf; cdb->pos = 0; return cdb; } /** * cdebbuf_free() - free CAPI debug buffer * @cdb: buffer to free */ void cdebbuf_free(_cdebbuf *cdb) { if (likely(cdb == g_debbuf)) { test_and_clear_bit(1, &g_debbuf_lock); return; } if (likely(cdb)) kfree(cdb->buf); kfree(cdb); } /** * capi_message2str() - format CAPI 2.0 message for printing * @msg: CAPI 2.0 message * * Allocates a CAPI debug buffer and fills it with a printable representation * of the CAPI 2.0 message in @msg. * Return value: allocated debug buffer, NULL on error * The returned buffer should be freed by a call to cdebbuf_free() after use. */ _cdebbuf *capi_message2str(u8 * msg) { _cdebbuf *cdb; _cmsg *cmsg; cdb = cdebbuf_alloc(); if (unlikely(!cdb)) return NULL; if (likely(cdb == g_debbuf)) cmsg = g_cmsg; else cmsg = kmalloc(sizeof(_cmsg), GFP_ATOMIC); if (unlikely(!cmsg)) { cdebbuf_free(cdb); return NULL; } cmsg->m = msg; cmsg->l = 8; cmsg->p = 0; byteTRcpy(cmsg->m + 4, &cmsg->Command); byteTRcpy(cmsg->m + 5, &cmsg->Subcommand); cmsg->par = cpars[command_2_index(cmsg->Command, cmsg->Subcommand)]; cdb = bufprint(cdb, "%-26s ID=%03d #0x%04x LEN=%04d\n", mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], ((unsigned short *) msg)[1], ((unsigned short *) msg)[3], ((unsigned short *) msg)[0]); cdb = protocol_message_2_pars(cdb, cmsg, 1); if (unlikely(cmsg != g_cmsg)) kfree(cmsg); return cdb; } /** * capi_cmsg2str() - format _cmsg structure for printing * @cmsg: _cmsg structure * * Allocates a CAPI debug buffer and fills it with a printable representation * of the CAPI 2.0 message stored in @cmsg by a previous call to * capi_cmsg2message() or capi_message2cmsg(). * Return value: allocated debug buffer, NULL on error * The returned buffer should be freed by a call to cdebbuf_free() after use. */ _cdebbuf *capi_cmsg2str(_cmsg * cmsg) { _cdebbuf *cdb; if (!cmsg->m) return NULL; /* no message */ cdb = cdebbuf_alloc(); if (!cdb) return NULL; cmsg->l = 8; cmsg->p = 0; cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n", mnames[command_2_index(cmsg->Command, cmsg->Subcommand)], ((u16 *) cmsg->m)[1], ((u16 *) cmsg->m)[3], ((u16 *) cmsg->m)[0]); cdb = protocol_message_2_pars(cdb, cmsg, 1); return cdb; } int __init cdebug_init(void) { g_cmsg= kmalloc(sizeof(_cmsg), GFP_KERNEL); if (!g_cmsg) return -ENOMEM; g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL); if (!g_debbuf) { kfree(g_cmsg); return -ENOMEM; } g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL); if (!g_debbuf->buf) { kfree(g_cmsg); kfree(g_debbuf); return -ENOMEM; } g_debbuf->size = CDEBUG_GSIZE; g_debbuf->buf[0] = 0; g_debbuf->p = g_debbuf->buf; g_debbuf->pos = 0; return 0; } void __exit cdebug_exit(void) { if (g_debbuf) kfree(g_debbuf->buf); kfree(g_debbuf); kfree(g_cmsg); } #else /* !CONFIG_CAPI_TRACE */ static _cdebbuf g_debbuf = {"CONFIG_CAPI_TRACE not enabled", NULL, 0, 0}; _cdebbuf *capi_message2str(u8 * msg) { return &g_debbuf; } _cdebbuf *capi_cmsg2str(_cmsg * cmsg) { return &g_debbuf; } void cdebbuf_free(_cdebbuf *cdb) { } int __init cdebug_init(void) { return 0; } void __exit cdebug_exit(void) { } #endif EXPORT_SYMBOL(cdebbuf_free); EXPORT_SYMBOL(capi_cmsg2message); EXPORT_SYMBOL(capi_message2cmsg); EXPORT_SYMBOL(capi_cmsg_header); EXPORT_SYMBOL(capi_cmd2str); EXPORT_SYMBOL(capi_cmsg2str); EXPORT_SYMBOL(capi_message2str); EXPORT_SYMBOL(capi_info2str);
gpl-2.0
Nothing-Dev/android_kernel_lge_jagnm_lp
drivers/usb/misc/adutux.c
820
24644
/* * adutux - driver for ADU devices from Ontrak Control Systems * This is an experimental driver. Use at your own risk. * This driver is not supported by Ontrak Control Systems. * * Copyright (c) 2003 John Homppi (SCO, leave this notice here) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * derived from the Lego USB Tower driver 0.56: * Copyright (c) 2003 David Glance <davidgsf@sourceforge.net> * 2001 Juergen Stuber <stuber@loria.fr> * that was derived from USB Skeleton driver - 0.5 * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com) * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <asm/uaccess.h> #ifdef CONFIG_USB_DEBUG static int debug = 5; #else static int debug = 1; #endif /* Use our own dbg macro */ #undef dbg #define dbg(lvl, format, arg...) \ do { \ if (debug >= lvl) \ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ } while (0) /* Version Information */ #define DRIVER_VERSION "v0.0.13" #define DRIVER_AUTHOR "John Homppi" #define DRIVER_DESC "adutux (see www.ontrak.net)" /* Module parameters */ module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); /* Define these values to match your device */ #define ADU_VENDOR_ID 0x0a07 #define ADU_PRODUCT_ID 0x0064 /* table of devices that work with this driver */ static const struct usb_device_id device_table[] = { { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+100) }, /* ADU200 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+108) }, /* ADU208 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+118) }, /* ADU218 */ { }/* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, device_table); #ifdef CONFIG_USB_DYNAMIC_MINORS #define ADU_MINOR_BASE 0 #else #define ADU_MINOR_BASE 67 #endif /* we can have up to this number of device plugged in at once */ #define MAX_DEVICES 16 #define COMMAND_TIMEOUT (2*HZ) /* 60 second timeout for a command */ /* * The locking scheme is a vanilla 3-lock: * adu_device.buflock: A spinlock, covers what IRQs touch. * adutux_mutex: A Static lock to cover open_count. It would also cover * any globals, but we don't have them in 2.6. * adu_device.mtx: A mutex to hold across sleepers like copy_from_user. * It covers all of adu_device, except the open_count * and what .buflock covers. */ /* Structure to hold all of our device specific stuff */ struct adu_device { struct mutex mtx; struct usb_device* udev; /* save off the usb device pointer */ struct usb_interface* interface; unsigned int minor; /* the starting minor number for this device */ char serial_number[8]; int open_count; /* number of times this port has been opened */ char* read_buffer_primary; int read_buffer_length; char* read_buffer_secondary; int secondary_head; int secondary_tail; spinlock_t buflock; wait_queue_head_t read_wait; wait_queue_head_t write_wait; char* interrupt_in_buffer; struct usb_endpoint_descriptor* interrupt_in_endpoint; struct urb* interrupt_in_urb; int read_urb_finished; char* interrupt_out_buffer; struct usb_endpoint_descriptor* interrupt_out_endpoint; struct urb* interrupt_out_urb; int out_urb_finished; }; static DEFINE_MUTEX(adutux_mutex); static struct usb_driver adu_driver; static void adu_debug_data(int level, const char *function, int size, const unsigned char *data) { int i; if (debug < level) return; printk(KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size); for (i = 0; i < size; ++i) printk("%.2x ", data[i]); printk("\n"); } /** * adu_abort_transfers * aborts transfers and frees associated data structures */ static void adu_abort_transfers(struct adu_device *dev) { unsigned long flags; dbg(2," %s : enter", __func__); if (dev->udev == NULL) { dbg(1," %s : udev is null", __func__); goto exit; } /* shutdown transfer */ /* XXX Anchor these instead */ spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); usb_kill_urb(dev->interrupt_in_urb); } else spin_unlock_irqrestore(&dev->buflock, flags); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); usb_kill_urb(dev->interrupt_out_urb); } else spin_unlock_irqrestore(&dev->buflock, flags); exit: dbg(2," %s : leave", __func__); } static void adu_delete(struct adu_device *dev) { dbg(2, "%s enter", __func__); /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree(dev->read_buffer_primary); kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); kfree(dev); dbg(2, "%s : leave", __func__); } static void adu_interrupt_in_callback(struct urb *urb) { struct adu_device *dev = urb->context; int status = urb->status; dbg(4," %s : enter, status %d", __func__, status); adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); spin_lock(&dev->buflock); if (status != 0) { if ((status != -ENOENT) && (status != -ECONNRESET) && (status != -ESHUTDOWN)) { dbg(1," %s : nonzero status received: %d", __func__, status); } goto exit; } if (urb->actual_length > 0 && dev->interrupt_in_buffer[0] != 0x00) { if (dev->read_buffer_length < (4 * usb_endpoint_maxp(dev->interrupt_in_endpoint)) - (urb->actual_length)) { memcpy (dev->read_buffer_primary + dev->read_buffer_length, dev->interrupt_in_buffer, urb->actual_length); dev->read_buffer_length += urb->actual_length; dbg(2," %s reading %d ", __func__, urb->actual_length); } else { dbg(1," %s : read_buffer overflow", __func__); } } exit: dev->read_urb_finished = 1; spin_unlock(&dev->buflock); /* always wake up so we recover from errors */ wake_up_interruptible(&dev->read_wait); adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); dbg(4," %s : leave, status %d", __func__, status); } static void adu_interrupt_out_callback(struct urb *urb) { struct adu_device *dev = urb->context; int status = urb->status; dbg(4," %s : enter, status %d", __func__, status); adu_debug_data(5,__func__, urb->actual_length, urb->transfer_buffer); if (status != 0) { if ((status != -ENOENT) && (status != -ECONNRESET)) { dbg(1, " %s :nonzero status received: %d", __func__, status); } goto exit; } spin_lock(&dev->buflock); dev->out_urb_finished = 1; wake_up(&dev->write_wait); spin_unlock(&dev->buflock); exit: adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); dbg(4," %s : leave, status %d", __func__, status); } static int adu_open(struct inode *inode, struct file *file) { struct adu_device *dev = NULL; struct usb_interface *interface; int subminor; int retval; dbg(2,"%s : enter", __func__); subminor = iminor(inode); if ((retval = mutex_lock_interruptible(&adutux_mutex))) { dbg(2, "%s : mutex lock failed", __func__); goto exit_no_lock; } interface = usb_find_interface(&adu_driver, subminor); if (!interface) { printk(KERN_ERR "adutux: %s - error, can't find device for " "minor %d\n", __func__, subminor); retval = -ENODEV; goto exit_no_device; } dev = usb_get_intfdata(interface); if (!dev || !dev->udev) { retval = -ENODEV; goto exit_no_device; } /* check that nobody else is using the device */ if (dev->open_count) { retval = -EBUSY; goto exit_no_device; } ++dev->open_count; dbg(2,"%s : open count %d", __func__, dev->open_count); /* save device in the file's private structure */ file->private_data = dev; /* initialize in direction */ dev->read_buffer_length = 0; /* fixup first read by having urb waiting for it */ usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); dev->read_urb_finished = 0; if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL)) dev->read_urb_finished = 1; /* we ignore failure */ /* end of fixup for first read */ /* initialize out direction */ dev->out_urb_finished = 1; retval = 0; exit_no_device: mutex_unlock(&adutux_mutex); exit_no_lock: dbg(2,"%s : leave, return value %d ", __func__, retval); return retval; } static void adu_release_internal(struct adu_device *dev) { dbg(2," %s : enter", __func__); /* decrement our usage count for the device */ --dev->open_count; dbg(2," %s : open count %d", __func__, dev->open_count); if (dev->open_count <= 0) { adu_abort_transfers(dev); dev->open_count = 0; } dbg(2," %s : leave", __func__); } static int adu_release(struct inode *inode, struct file *file) { struct adu_device *dev; int retval = 0; dbg(2," %s : enter", __func__); if (file == NULL) { dbg(1," %s : file is NULL", __func__); retval = -ENODEV; goto exit; } dev = file->private_data; if (dev == NULL) { dbg(1," %s : object is NULL", __func__); retval = -ENODEV; goto exit; } mutex_lock(&adutux_mutex); /* not interruptible */ if (dev->open_count <= 0) { dbg(1," %s : device not opened", __func__); retval = -ENODEV; goto unlock; } adu_release_internal(dev); if (dev->udev == NULL) { /* the device was unplugged before the file was released */ if (!dev->open_count) /* ... and we're the last user */ adu_delete(dev); } unlock: mutex_unlock(&adutux_mutex); exit: dbg(2," %s : leave, return value %d", __func__, retval); return retval; } static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, loff_t *ppos) { struct adu_device *dev; size_t bytes_read = 0; size_t bytes_to_read = count; int i; int retval = 0; int timeout = 0; int should_submit = 0; unsigned long flags; DECLARE_WAITQUEUE(wait, current); dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file); dev = file->private_data; dbg(2," %s : dev=%p", __func__, dev); if (mutex_lock_interruptible(&dev->mtx)) return -ERESTARTSYS; /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } /* verify that some data was requested */ if (count == 0) { dbg(1," %s : read request of 0 bytes", __func__); goto exit; } timeout = COMMAND_TIMEOUT; dbg(2," %s : about to start looping", __func__); while (bytes_to_read) { int data_in_secondary = dev->secondary_tail - dev->secondary_head; dbg(2," %s : while, data_in_secondary=%d, status=%d", __func__, data_in_secondary, dev->interrupt_in_urb->status); if (data_in_secondary) { /* drain secondary buffer */ int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); if (i) { retval = -EFAULT; goto exit; } dev->secondary_head += (amount - i); bytes_read += (amount - i); bytes_to_read -= (amount - i); if (i) { retval = bytes_read ? bytes_read : -EFAULT; goto exit; } } else { /* we check the primary buffer */ spin_lock_irqsave (&dev->buflock, flags); if (dev->read_buffer_length) { /* we secure access to the primary */ char *tmp; dbg(2," %s : swap, read_buffer_length = %d", __func__, dev->read_buffer_length); tmp = dev->read_buffer_secondary; dev->read_buffer_secondary = dev->read_buffer_primary; dev->read_buffer_primary = tmp; dev->secondary_head = 0; dev->secondary_tail = dev->read_buffer_length; dev->read_buffer_length = 0; spin_unlock_irqrestore(&dev->buflock, flags); /* we have a free buffer so use it */ should_submit = 1; } else { /* even the primary was empty - we may need to do IO */ if (!dev->read_urb_finished) { /* somebody is doing IO */ spin_unlock_irqrestore(&dev->buflock, flags); dbg(2," %s : submitted already", __func__); } else { /* we must initiate input */ dbg(2," %s : initiate input", __func__); dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); if (retval) { dev->read_urb_finished = 1; if (retval == -ENOMEM) { retval = bytes_read ? bytes_read : -ENOMEM; } dbg(2," %s : submit failed", __func__); goto exit; } } /* we wait for I/O to complete */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&dev->read_wait, &wait); spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); timeout = schedule_timeout(COMMAND_TIMEOUT); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); } remove_wait_queue(&dev->read_wait, &wait); if (timeout <= 0) { dbg(2," %s : timeout", __func__); retval = bytes_read ? bytes_read : -ETIMEDOUT; goto exit; } if (signal_pending(current)) { dbg(2," %s : signal pending", __func__); retval = bytes_read ? bytes_read : -EINTR; goto exit; } } } } retval = bytes_read; /* if the primary buffer is empty then use it */ spin_lock_irqsave(&dev->buflock, flags); if (should_submit && dev->read_urb_finished) { dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0) dev->read_urb_finished = 1; /* we ignore failure */ } else { spin_unlock_irqrestore(&dev->buflock, flags); } exit: /* unlock the device */ mutex_unlock(&dev->mtx); dbg(2," %s : leave, return value %d", __func__, retval); return retval; } static ssize_t adu_write(struct file *file, const __user char *buffer, size_t count, loff_t *ppos) { DECLARE_WAITQUEUE(waita, current); struct adu_device *dev; size_t bytes_written = 0; size_t bytes_to_write; size_t buffer_size; unsigned long flags; int retval; dbg(2," %s : enter, count = %Zd", __func__, count); dev = file->private_data; retval = mutex_lock_interruptible(&dev->mtx); if (retval) goto exit_nolock; /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } /* verify that we actually have some data to write */ if (count == 0) { dbg(1," %s : write request of 0 bytes", __func__); goto exit; } while (count > 0) { add_wait_queue(&dev->write_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); mutex_unlock(&dev->mtx); if (signal_pending(current)) { dbg(1," %s : interrupted", __func__); set_current_state(TASK_RUNNING); retval = -EINTR; goto exit_onqueue; } if (schedule_timeout(COMMAND_TIMEOUT) == 0) { dbg(1, "%s - command timed out.", __func__); retval = -ETIMEDOUT; goto exit_onqueue; } remove_wait_queue(&dev->write_wait, &waita); retval = mutex_lock_interruptible(&dev->mtx); if (retval) { retval = bytes_written ? bytes_written : retval; goto exit_nolock; } dbg(4," %s : in progress, count = %Zd", __func__, count); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->write_wait, &waita); dbg(4," %s : sending, count = %Zd", __func__, count); /* write the data into interrupt_out_buffer from userspace */ buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); bytes_to_write = count > buffer_size ? buffer_size : count; dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd", __func__, buffer_size, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) { retval = -EFAULT; goto exit; } /* send off the urb */ usb_fill_int_urb( dev->interrupt_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress), dev->interrupt_out_buffer, bytes_to_write, adu_interrupt_out_callback, dev, dev->interrupt_out_endpoint->bInterval); dev->interrupt_out_urb->actual_length = bytes_to_write; dev->out_urb_finished = 0; retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL); if (retval < 0) { dev->out_urb_finished = 1; dev_err(&dev->udev->dev, "Couldn't submit " "interrupt_out_urb %d\n", retval); goto exit; } buffer += bytes_to_write; count -= bytes_to_write; bytes_written += bytes_to_write; } } mutex_unlock(&dev->mtx); return bytes_written; exit: mutex_unlock(&dev->mtx); exit_nolock: dbg(2," %s : leave, return value %d", __func__, retval); return retval; exit_onqueue: remove_wait_queue(&dev->write_wait, &waita); return retval; } /* file operations needed when we register this driver */ static const struct file_operations adu_fops = { .owner = THIS_MODULE, .read = adu_read, .write = adu_write, .open = adu_open, .release = adu_release, .llseek = noop_llseek, }; /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with devfs and the driver core */ static struct usb_class_driver adu_class = { .name = "usb/adutux%d", .fops = &adu_fops, .minor_base = ADU_MINOR_BASE, }; /** * adu_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int adu_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct adu_device *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int retval = -ENODEV; int in_end_size; int out_end_size; int i; dbg(2," %s : enter", __func__); if (udev == NULL) { dev_err(&interface->dev, "udev is NULL.\n"); goto exit; } /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); retval = -ENOMEM; goto exit; } mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); dev->udev = udev; init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); iface_desc = &interface->altsetting[0]; /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) dev->interrupt_in_endpoint = endpoint; if (usb_endpoint_is_int_out(endpoint)) dev->interrupt_out_endpoint = endpoint; } if (dev->interrupt_in_endpoint == NULL) { dev_err(&interface->dev, "interrupt in endpoint not found\n"); goto error; } if (dev->interrupt_out_endpoint == NULL) { dev_err(&interface->dev, "interrupt out endpoint not found\n"); goto error; } in_end_size = usb_endpoint_maxp(dev->interrupt_in_endpoint); out_end_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL); if (!dev->read_buffer_primary) { dev_err(&interface->dev, "Couldn't allocate read_buffer_primary\n"); retval = -ENOMEM; goto error; } /* debug code prime the buffer */ memset(dev->read_buffer_primary, 'a', in_end_size); memset(dev->read_buffer_primary + in_end_size, 'b', in_end_size); memset(dev->read_buffer_primary + (2 * in_end_size), 'c', in_end_size); memset(dev->read_buffer_primary + (3 * in_end_size), 'd', in_end_size); dev->read_buffer_secondary = kmalloc((4 * in_end_size), GFP_KERNEL); if (!dev->read_buffer_secondary) { dev_err(&interface->dev, "Couldn't allocate read_buffer_secondary\n"); retval = -ENOMEM; goto error; } /* debug code prime the buffer */ memset(dev->read_buffer_secondary, 'e', in_end_size); memset(dev->read_buffer_secondary + in_end_size, 'f', in_end_size); memset(dev->read_buffer_secondary + (2 * in_end_size), 'g', in_end_size); memset(dev->read_buffer_secondary + (3 * in_end_size), 'h', in_end_size); dev->interrupt_in_buffer = kmalloc(in_end_size, GFP_KERNEL); if (!dev->interrupt_in_buffer) { dev_err(&interface->dev, "Couldn't allocate interrupt_in_buffer\n"); goto error; } /* debug code prime the buffer */ memset(dev->interrupt_in_buffer, 'i', in_end_size); dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_in_urb) { dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n"); goto error; } dev->interrupt_out_buffer = kmalloc(out_end_size, GFP_KERNEL); if (!dev->interrupt_out_buffer) { dev_err(&interface->dev, "Couldn't allocate interrupt_out_buffer\n"); goto error; } dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_out_urb) { dev_err(&interface->dev, "Couldn't allocate interrupt_out_urb\n"); goto error; } if (!usb_string(udev, udev->descriptor.iSerialNumber, dev->serial_number, sizeof(dev->serial_number))) { dev_err(&interface->dev, "Could not retrieve serial number\n"); goto error; } dbg(2," %s : serial_number=%s", __func__, dev->serial_number); /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &adu_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, (dev->minor - ADU_MINOR_BASE)); exit: dbg(2," %s : leave, return value %p (dev)", __func__, dev); return retval; error: adu_delete(dev); return retval; } /** * adu_disconnect * * Called by the usb core when the device is removed from the system. */ static void adu_disconnect(struct usb_interface *interface) { struct adu_device *dev; int minor; dbg(2," %s : enter", __func__); dev = usb_get_intfdata(interface); mutex_lock(&dev->mtx); /* not interruptible */ dev->udev = NULL; /* poison */ minor = dev->minor; usb_deregister_dev(interface, &adu_class); mutex_unlock(&dev->mtx); mutex_lock(&adutux_mutex); usb_set_intfdata(interface, NULL); /* if the device is not opened, then we clean up right now */ dbg(2," %s : open count %d", __func__, dev->open_count); if (!dev->open_count) adu_delete(dev); mutex_unlock(&adutux_mutex); dev_info(&interface->dev, "ADU device adutux%d now disconnected\n", (minor - ADU_MINOR_BASE)); dbg(2," %s : leave", __func__); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver adu_driver = { .name = "adutux", .probe = adu_probe, .disconnect = adu_disconnect, .id_table = device_table, }; module_usb_driver(adu_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
paladin74/linux
drivers/usb/host/whci/debug.c
1332
5495
/* * Wireless Host Controller (WHC) debug. * * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/export.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" struct whc_dbg { struct dentry *di_f; struct dentry *asl_f; struct dentry *pzl_f; }; static void qset_print(struct seq_file *s, struct whc_qset *qset) { static const char *qh_type[] = { "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", }; struct whc_std *std; struct urb *urb = NULL; int i; seq_printf(s, "qset %08x", (u32)qset->qset_dma); if (&qset->list_node == qset->whc->async_list.prev) { seq_printf(s, " (dummy)\n"); } else { seq_printf(s, " ep%d%s-%s maxpkt: %d\n", qset->qh.info1 & 0x0f, (qset->qh.info1 >> 4) & 0x1 ? "in" : "out", qh_type[(qset->qh.info1 >> 5) & 0x7], (qset->qh.info1 >> 16) & 0xffff); } seq_printf(s, " -> %08x\n", (u32)qset->qh.link); seq_printf(s, " info: %08x %08x %08x\n", qset->qh.info1, qset->qh.info2, qset->qh.info3); seq_printf(s, " sts: %04x errs: %d curwin: %08x\n", qset->qh.status, qset->qh.err_count, qset->qh.cur_window); seq_printf(s, " TD: sts: %08x opts: %08x\n", qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); for (i = 0; i < WHCI_QSET_TD_MAX; i++) { seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", i == qset->td_start ? 'S' : ' ', i == qset->td_end ? 'E' : ' ', i, qset->qtd[i].status, qset->qtd[i].options, (u32)qset->qtd[i].page_list_ptr); } seq_printf(s, " ntds: %d\n", qset->ntds); list_for_each_entry(std, &qset->stds, list_node) { if (urb != std->urb) { urb = std->urb; seq_printf(s, " urb %p transferred: %d bytes\n", urb, urb->actual_length); } if (std->qtd) seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n", std->qtd - &qset->qtd[0], std->len, std->num_pointers ? (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); else seq_printf(s, " sTD[-]: %zd bytes @ %08x\n", std->len, std->num_pointers ? (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); } } static int di_print(struct seq_file *s, void *p) { struct whc *whc = s->private; int d; for (d = 0; d < whc->n_devices; d++) { struct di_buf_entry *di = &whc->di_buf[d]; seq_printf(s, "DI[%d]\n", d); seq_printf(s, " availability: %*pb\n", UWB_NUM_MAS, (unsigned long *)di->availability_info); seq_printf(s, " %c%c key idx: %d dev addr: %d\n", (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); } return 0; } static int asl_print(struct seq_file *s, void *p) { struct whc *whc = s->private; struct whc_qset *qset; list_for_each_entry(qset, &whc->async_list, list_node) { qset_print(s, qset); } return 0; } static int pzl_print(struct seq_file *s, void *p) { struct whc *whc = s->private; struct whc_qset *qset; int period; for (period = 0; period < 5; period++) { seq_printf(s, "Period %d\n", period); list_for_each_entry(qset, &whc->periodic_list[period], list_node) { qset_print(s, qset); } } return 0; } static int di_open(struct inode *inode, struct file *file) { return single_open(file, di_print, inode->i_private); } static int asl_open(struct inode *inode, struct file *file) { return single_open(file, asl_print, inode->i_private); } static int pzl_open(struct inode *inode, struct file *file) { return single_open(file, pzl_print, inode->i_private); } static const struct file_operations di_fops = { .open = di_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct file_operations asl_fops = { .open = asl_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct file_operations pzl_fops = { .open = pzl_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; void whc_dbg_init(struct whc *whc) { if (whc->wusbhc.pal.debugfs_dir == NULL) return; whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL); if (whc->dbg == NULL) return; whc->dbg->di_f = debugfs_create_file("di", 0444, whc->wusbhc.pal.debugfs_dir, whc, &di_fops); whc->dbg->asl_f = debugfs_create_file("asl", 0444, whc->wusbhc.pal.debugfs_dir, whc, &asl_fops); whc->dbg->pzl_f = debugfs_create_file("pzl", 0444, whc->wusbhc.pal.debugfs_dir, whc, &pzl_fops); } void whc_dbg_clean_up(struct whc *whc) { if (whc->dbg) { debugfs_remove(whc->dbg->pzl_f); debugfs_remove(whc->dbg->asl_f); debugfs_remove(whc->dbg->di_f); kfree(whc->dbg); } }
gpl-2.0
CyanogenMod/android_kernel_samsung_smdk4210
arch/x86/mm/memblock.c
2868
8652
#include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/memblock.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/range.h> /* Check for already reserved areas */ bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) { struct memblock_region *r; u64 addr = *addrp, last; u64 size = *sizep; bool changed = false; again: last = addr + size; for_each_memblock(reserved, r) { if (last > r->base && addr < r->base) { size = r->base - addr; changed = true; goto again; } if (last > (r->base + r->size) && addr < (r->base + r->size)) { addr = round_up(r->base + r->size, align); size = last - addr; changed = true; goto again; } if (last <= (r->base + r->size) && addr >= r->base) { *sizep = 0; return false; } } if (changed) { *addrp = addr; *sizep = size; } return changed; } /* * Find next free range after start, and size is returned in *sizep */ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) { struct memblock_region *r; for_each_memblock(memory, r) { u64 ei_start = r->base; u64 ei_last = ei_start + r->size; u64 addr; addr = round_up(ei_start, align); if (addr < start) addr = round_up(start, align); if (addr >= ei_last) continue; *sizep = ei_last - addr; while (memblock_x86_check_reserved_size(&addr, sizep, align)) ; if (*sizep) return addr; } return MEMBLOCK_ERROR; } static __init struct range *find_range_array(int count) { u64 end, size, mem; struct range *range; size = sizeof(struct range) * count; end = memblock.current_limit; mem = memblock_find_in_range(0, end, size, sizeof(struct range)); if (mem == MEMBLOCK_ERROR) panic("can not find more space for range array"); /* * This range is tempoaray, so don't reserve it, it will not be * overlapped because We will not alloccate new buffer before * We discard this one */ range = __va(mem); memset(range, 0, size); return range; } static void __init memblock_x86_subtract_reserved(struct range *range, int az) { u64 final_start, final_end; struct memblock_region *r; /* Take out region array itself at first*/ memblock_free_reserved_regions(); memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt); for_each_memblock(reserved, r) { memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); final_start = PFN_DOWN(r->base); final_end = PFN_UP(r->base + r->size); if (final_start >= final_end) continue; subtract_range(range, az, final_start, final_end); } /* Put region array back ? */ memblock_reserve_reserved_regions(); } struct count_data { int nr; }; static int __init count_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax) { struct count_data *data = datax; data->nr++; return 0; } static int __init count_early_node_map(int nodeid) { struct count_data data; data.nr = 0; work_with_active_regions(nodeid, count_work_fn, &data); return data.nr; } int __init __get_free_all_memory_range(struct range **rangep, int nodeid, unsigned long start_pfn, unsigned long end_pfn) { int count; struct range *range; int nr_range; count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; range = find_range_array(count); nr_range = 0; /* * Use early_node_map[] and memblock.reserved.region to get range array * at first */ nr_range = add_from_early_node_map(range, count, nr_range, nodeid); subtract_range(range, count, 0, start_pfn); subtract_range(range, count, end_pfn, -1ULL); memblock_x86_subtract_reserved(range, count); nr_range = clean_sort_range(range, count); *rangep = range; return nr_range; } int __init get_free_all_memory_range(struct range **rangep, int nodeid) { unsigned long end_pfn = -1UL; #ifdef CONFIG_X86_32 end_pfn = max_low_pfn; #endif return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn); } static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) { int i, count; struct range *range; int nr_range; u64 final_start, final_end; u64 free_size; struct memblock_region *r; count = (memblock.reserved.cnt + memblock.memory.cnt) * 2; range = find_range_array(count); nr_range = 0; addr = PFN_UP(addr); limit = PFN_DOWN(limit); for_each_memblock(memory, r) { final_start = PFN_UP(r->base); final_end = PFN_DOWN(r->base + r->size); if (final_start >= final_end) continue; if (final_start >= limit || final_end <= addr) continue; nr_range = add_range(range, count, nr_range, final_start, final_end); } subtract_range(range, count, 0, addr); subtract_range(range, count, limit, -1ULL); /* Subtract memblock.reserved.region in range ? */ if (!get_free) goto sort_and_count_them; for_each_memblock(reserved, r) { final_start = PFN_DOWN(r->base); final_end = PFN_UP(r->base + r->size); if (final_start >= final_end) continue; if (final_start >= limit || final_end <= addr) continue; subtract_range(range, count, final_start, final_end); } sort_and_count_them: nr_range = clean_sort_range(range, count); free_size = 0; for (i = 0; i < nr_range; i++) free_size += range[i].end - range[i].start; return free_size << PAGE_SHIFT; } u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) { return __memblock_x86_memory_in_range(addr, limit, true); } u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit) { return __memblock_x86_memory_in_range(addr, limit, false); } void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) { if (start == end) return; if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end)) return; memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name); memblock_reserve(start, end - start); } void __init memblock_x86_free_range(u64 start, u64 end) { if (start == end) return; if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end)) return; memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1); memblock_free(start, end - start); } /* * Need to call this function after memblock_x86_register_active_regions, * so early_node_map[] is filled already. */ u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align) { u64 addr; addr = find_memory_core_early(nid, size, align, start, end); if (addr != MEMBLOCK_ERROR) return addr; /* Fallback, should already have start end within node range */ return memblock_find_in_range(start, end, size, align); } /* * Finds an active region in the address range from start_pfn to last_pfn and * returns its range in ei_startpfn and ei_endpfn for the memblock entry. */ static int __init memblock_x86_find_active_region(const struct memblock_region *ei, unsigned long start_pfn, unsigned long last_pfn, unsigned long *ei_startpfn, unsigned long *ei_endpfn) { u64 align = PAGE_SIZE; *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; /* Skip map entries smaller than a page */ if (*ei_startpfn >= *ei_endpfn) return 0; /* Skip if map is outside the node */ if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn) return 0; /* Check for overlaps */ if (*ei_startpfn < start_pfn) *ei_startpfn = start_pfn; if (*ei_endpfn > last_pfn) *ei_endpfn = last_pfn; return 1; } /* Walk the memblock.memory map and register active regions within a node */ void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn) { unsigned long ei_startpfn; unsigned long ei_endpfn; struct memblock_region *r; for_each_memblock(memory, r) if (memblock_x86_find_active_region(r, start_pfn, last_pfn, &ei_startpfn, &ei_endpfn)) add_active_range(nid, ei_startpfn, ei_endpfn); } /* * Find the hole size (in bytes) in the memory range. * @start: starting address of the memory range to scan * @end: ending address of the memory range to scan */ u64 __init memblock_x86_hole_size(u64 start, u64 end) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long last_pfn = end >> PAGE_SHIFT; unsigned long ei_startpfn, ei_endpfn, ram = 0; struct memblock_region *r; for_each_memblock(memory, r) if (memblock_x86_find_active_region(r, start_pfn, last_pfn, &ei_startpfn, &ei_endpfn)) ram += ei_endpfn - ei_startpfn; return end - start - ((u64)ram << PAGE_SHIFT); }
gpl-2.0
roalex/sgs3-kernel
arch/powerpc/platforms/85xx/mpc8536_ds.c
3892
3423
/* * MPC8536 DS Board Setup * * Copyright 2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> void __init mpc8536_ds_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np; np = of_find_node_by_type(NULL, "open-pic"); if (np == NULL) { printk(KERN_ERR "Could not find open-pic node\n"); return; } if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Failed to map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); mpic_init(mpic); } /* * Setup the architecture */ static void __init mpc8536_ds_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; struct pci_controller *hose; #endif dma_addr_t max = 0xffffffff; if (ppc_md.progress) ppc_md.progress("mpc8536_ds_setup_arch()", 0); #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie")) { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); hose = pci_find_hose_for_OF_device(np); max = min(max, hose->dma_window_base_cur + hose->dma_window_size); } } #endif #ifdef CONFIG_SWIOTLB if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; } #endif printk("MPC8536 DS board from Freescale Semiconductor\n"); } static struct of_device_id __initdata mpc8536_ds_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, {}, }; static int __init mpc8536_ds_publish_devices(void) { return of_platform_bus_probe(NULL, mpc8536_ds_ids, NULL); } machine_device_initcall(mpc8536_ds, mpc8536_ds_publish_devices); machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier); /* * Called very early, device-tree isn't unflattened */ static int __init mpc8536_ds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc8536ds"); } define_machine(mpc8536_ds) { .name = "MPC8536 DS", .probe = mpc8536_ds_probe, .setup_arch = mpc8536_ds_setup_arch, .init_IRQ = mpc8536_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
EuphoriaOS/android_kernel_samsung_exynos5410
arch/arm/mach-mmp/time.c
4660
4881
/* * linux/arch/arm/mach-mmp/time.c * * Support for clocksource and clockevents * * Copyright (C) 2008 Marvell International Ltd. * All rights reserved. * * 2008-04-11: Jason Chagas <Jason.chagas@marvell.com> * 2008-10-08: Bin Yang <bin.yang@marvell.com> * * The timers module actually includes three timers, each timer with up to * three match comparators. Timer #0 is used here in free-running mode as * the clock source, and match comparator #1 used as clock event device. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/clockchips.h> #include <linux/io.h> #include <linux/irq.h> #include <asm/sched_clock.h> #include <mach/addr-map.h> #include <mach/regs-timers.h> #include <mach/regs-apbc.h> #include <mach/irqs.h> #include <mach/cputype.h> #include <asm/mach/time.h> #include "clock.h" #define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE #define MAX_DELTA (0xfffffffe) #define MIN_DELTA (16) /* * FIXME: the timer needs some delay to stablize the counter capture */ static inline uint32_t timer_read(void) { int delay = 100; __raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(1)); while (delay--) cpu_relax(); return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1)); } static u32 notrace mmp_read_sched_clock(void) { return timer_read(); } static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *c = dev_id; /* * Clear pending interrupt status. */ __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); /* * Disable timer 0. */ __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); c->event_handler(c); return IRQ_HANDLED; } static int timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags; local_irq_save(flags); /* * Disable timer 0. */ __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); /* * Clear and enable timer match 0 interrupt. */ __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0)); /* * Setup new clockevent timer value. */ __raw_writel(delta - 1, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0)); /* * Enable timer 0. */ __raw_writel(0x03, TIMERS_VIRT_BASE + TMR_CER); local_irq_restore(flags); return 0; } static void timer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; local_irq_save(flags); switch (mode) { case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: /* disable the matching interrupt */ __raw_writel(0x00, TIMERS_VIRT_BASE + TMR_IER(0)); break; case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; } local_irq_restore(flags); } static struct clock_event_device ckevt = { .name = "clockevent", .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 200, .set_next_event = timer_set_next_event, .set_mode = timer_set_mode, }; static cycle_t clksrc_read(struct clocksource *cs) { return timer_read(); } static struct clocksource cksrc = { .name = "clocksource", .rating = 200, .read = clksrc_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void __init timer_config(void) { uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR); __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_CER); /* disable */ ccr &= (cpu_is_mmp2()) ? (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) : (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3)); __raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR); /* set timer 0 to periodic mode, and timer 1 to free-running mode */ __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CMR); __raw_writel(0x1, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* periodic */ __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(1)); /* free-running */ __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1)); /* enable timer 1 counter */ __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CER); } static struct irqaction timer_irq = { .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = timer_interrupt, .dev_id = &ckevt, }; void __init timer_init(int irq) { timer_config(); setup_sched_clock(mmp_read_sched_clock, 32, CLOCK_TICK_RATE); ckevt.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, ckevt.shift); ckevt.max_delta_ns = clockevent_delta2ns(MAX_DELTA, &ckevt); ckevt.min_delta_ns = clockevent_delta2ns(MIN_DELTA, &ckevt); ckevt.cpumask = cpumask_of(0); setup_irq(irq, &timer_irq); clocksource_register_hz(&cksrc, CLOCK_TICK_RATE); clockevents_register_device(&ckevt); }
gpl-2.0
Tegra4/android_kernel_hp_phobos-old
arch/x86/kernel/asm-offsets.c
4916
2155
/* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract * and format the required data. */ #define COMPILE_OFFSETS #include <linux/crypto.h> #include <linux/sched.h> #include <linux/stddef.h> #include <linux/hardirq.h> #include <linux/suspend.h> #include <linux/kbuild.h> #include <asm/processor.h> #include <asm/thread_info.h> #include <asm/sigframe.h> #include <asm/bootparam.h> #include <asm/suspend.h> #ifdef CONFIG_XEN #include <xen/interface/xen.h> #endif #ifdef CONFIG_X86_32 # include "asm-offsets_32.c" #else # include "asm-offsets_64.c" #endif void common(void) { BLANK(); OFFSET(TI_flags, thread_info, flags); OFFSET(TI_status, thread_info, status); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_preempt_count, thread_info, preempt_count); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); BLANK(); OFFSET(pbe_address, pbe, address); OFFSET(pbe_orig_address, pbe, orig_address); OFFSET(pbe_next, pbe, next); #ifdef CONFIG_PARAVIRT BLANK(); OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); #endif #ifdef CONFIG_XEN BLANK(); OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); #endif BLANK(); OFFSET(BP_scratch, boot_params, scratch); OFFSET(BP_loadflags, boot_params, hdr.loadflags); OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); OFFSET(BP_version, boot_params, hdr.version); OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); OFFSET(BP_pref_address, boot_params, hdr.pref_address); OFFSET(BP_code32_start, boot_params, hdr.code32_start); }
gpl-2.0
SlimForce/kernel_lge_hammerhead
drivers/misc/fsa9480.c
4916
13017
/* * fsa9480.c - FSA9480 micro USB switch device driver * * Copyright (C) 2010 Samsung Electronics * Minkyu Kang <mk7.kang@samsung.com> * Wonguk Jeong <wonguk.jeong@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_data/fsa9480.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/pm_runtime.h> /* FSA9480 I2C registers */ #define FSA9480_REG_DEVID 0x01 #define FSA9480_REG_CTRL 0x02 #define FSA9480_REG_INT1 0x03 #define FSA9480_REG_INT2 0x04 #define FSA9480_REG_INT1_MASK 0x05 #define FSA9480_REG_INT2_MASK 0x06 #define FSA9480_REG_ADC 0x07 #define FSA9480_REG_TIMING1 0x08 #define FSA9480_REG_TIMING2 0x09 #define FSA9480_REG_DEV_T1 0x0a #define FSA9480_REG_DEV_T2 0x0b #define FSA9480_REG_BTN1 0x0c #define FSA9480_REG_BTN2 0x0d #define FSA9480_REG_CK 0x0e #define FSA9480_REG_CK_INT1 0x0f #define FSA9480_REG_CK_INT2 0x10 #define FSA9480_REG_CK_INTMASK1 0x11 #define FSA9480_REG_CK_INTMASK2 0x12 #define FSA9480_REG_MANSW1 0x13 #define FSA9480_REG_MANSW2 0x14 /* Control */ #define CON_SWITCH_OPEN (1 << 4) #define CON_RAW_DATA (1 << 3) #define CON_MANUAL_SW (1 << 2) #define CON_WAIT (1 << 1) #define CON_INT_MASK (1 << 0) #define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \ CON_MANUAL_SW | CON_WAIT) /* Device Type 1 */ #define DEV_USB_OTG (1 << 7) #define DEV_DEDICATED_CHG (1 << 6) #define DEV_USB_CHG (1 << 5) #define DEV_CAR_KIT (1 << 4) #define DEV_UART (1 << 3) #define DEV_USB (1 << 2) #define DEV_AUDIO_2 (1 << 1) #define DEV_AUDIO_1 (1 << 0) #define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB) #define DEV_T1_UART_MASK (DEV_UART) #define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG) /* Device Type 2 */ #define DEV_AV (1 << 6) #define DEV_TTY (1 << 5) #define DEV_PPD (1 << 4) #define DEV_JIG_UART_OFF (1 << 3) #define DEV_JIG_UART_ON (1 << 2) #define DEV_JIG_USB_OFF (1 << 1) #define DEV_JIG_USB_ON (1 << 0) #define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON) #define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON) #define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \ DEV_JIG_UART_OFF | DEV_JIG_UART_ON) /* * Manual Switch * D- [7:5] / D+ [4:2] * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO */ #define SW_VAUDIO ((4 << 5) | (4 << 2)) #define SW_UART ((3 << 5) | (3 << 2)) #define SW_AUDIO ((2 << 5) | (2 << 2)) #define SW_DHOST ((1 << 5) | (1 << 2)) #define SW_AUTO ((0 << 5) | (0 << 2)) /* Interrupt 1 */ #define INT_DETACH (1 << 1) #define INT_ATTACH (1 << 0) struct fsa9480_usbsw { struct i2c_client *client; struct fsa9480_platform_data *pdata; int dev1; int dev2; int mansw; }; static struct fsa9480_usbsw *chip; static int fsa9480_write_reg(struct i2c_client *client, int reg, int value) { int ret; ret = i2c_smbus_write_byte_data(client, reg, value); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static int fsa9480_read_reg(struct i2c_client *client, int reg) { int ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static int fsa9480_read_irq(struct i2c_client *client, int *value) { int ret; ret = i2c_smbus_read_i2c_block_data(client, FSA9480_REG_INT1, 2, (u8 *)value); *value &= 0xffff; if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static void fsa9480_set_switch(const char *buf) { struct fsa9480_usbsw *usbsw = chip; struct i2c_client *client = usbsw->client; unsigned int value; unsigned int path = 0; value = fsa9480_read_reg(client, FSA9480_REG_CTRL); if (!strncmp(buf, "VAUDIO", 6)) { path = SW_VAUDIO; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "UART", 4)) { path = SW_UART; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "AUDIO", 5)) { path = SW_AUDIO; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "DHOST", 5)) { path = SW_DHOST; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "AUTO", 4)) { path = SW_AUTO; value |= CON_MANUAL_SW; } else { printk(KERN_ERR "Wrong command\n"); return; } usbsw->mansw = path; fsa9480_write_reg(client, FSA9480_REG_MANSW1, path); fsa9480_write_reg(client, FSA9480_REG_CTRL, value); } static ssize_t fsa9480_get_switch(char *buf) { struct fsa9480_usbsw *usbsw = chip; struct i2c_client *client = usbsw->client; unsigned int value; value = fsa9480_read_reg(client, FSA9480_REG_MANSW1); if (value == SW_VAUDIO) return sprintf(buf, "VAUDIO\n"); else if (value == SW_UART) return sprintf(buf, "UART\n"); else if (value == SW_AUDIO) return sprintf(buf, "AUDIO\n"); else if (value == SW_DHOST) return sprintf(buf, "DHOST\n"); else if (value == SW_AUTO) return sprintf(buf, "AUTO\n"); else return sprintf(buf, "%x", value); } static ssize_t fsa9480_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev); struct i2c_client *client = usbsw->client; int dev1, dev2; dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); if (!dev1 && !dev2) return sprintf(buf, "NONE\n"); /* USB */ if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK) return sprintf(buf, "USB\n"); /* UART */ if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK) return sprintf(buf, "UART\n"); /* CHARGER */ if (dev1 & DEV_T1_CHARGER_MASK) return sprintf(buf, "CHARGER\n"); /* JIG */ if (dev2 & DEV_T2_JIG_MASK) return sprintf(buf, "JIG\n"); return sprintf(buf, "UNKNOWN\n"); } static ssize_t fsa9480_show_manualsw(struct device *dev, struct device_attribute *attr, char *buf) { return fsa9480_get_switch(buf); } static ssize_t fsa9480_set_manualsw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { fsa9480_set_switch(buf); return count; } static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL); static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR, fsa9480_show_manualsw, fsa9480_set_manualsw); static struct attribute *fsa9480_attributes[] = { &dev_attr_device.attr, &dev_attr_switch.attr, NULL }; static const struct attribute_group fsa9480_group = { .attrs = fsa9480_attributes, }; static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr) { int val1, val2, ctrl; struct fsa9480_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL); dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n", intr, val1, val2); if (!intr) goto out; if (intr & INT_ATTACH) { /* Attached */ /* USB */ if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) { if (pdata->usb_cb) pdata->usb_cb(FSA9480_ATTACHED); if (usbsw->mansw) { fsa9480_write_reg(client, FSA9480_REG_MANSW1, usbsw->mansw); } } /* UART */ if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) { if (pdata->uart_cb) pdata->uart_cb(FSA9480_ATTACHED); if (!(ctrl & CON_MANUAL_SW)) { fsa9480_write_reg(client, FSA9480_REG_MANSW1, SW_UART); } } /* CHARGER */ if (val1 & DEV_T1_CHARGER_MASK) { if (pdata->charger_cb) pdata->charger_cb(FSA9480_ATTACHED); } /* JIG */ if (val2 & DEV_T2_JIG_MASK) { if (pdata->jig_cb) pdata->jig_cb(FSA9480_ATTACHED); } } else if (intr & INT_DETACH) { /* Detached */ /* USB */ if (usbsw->dev1 & DEV_T1_USB_MASK || usbsw->dev2 & DEV_T2_USB_MASK) { if (pdata->usb_cb) pdata->usb_cb(FSA9480_DETACHED); } /* UART */ if (usbsw->dev1 & DEV_T1_UART_MASK || usbsw->dev2 & DEV_T2_UART_MASK) { if (pdata->uart_cb) pdata->uart_cb(FSA9480_DETACHED); } /* CHARGER */ if (usbsw->dev1 & DEV_T1_CHARGER_MASK) { if (pdata->charger_cb) pdata->charger_cb(FSA9480_DETACHED); } /* JIG */ if (usbsw->dev2 & DEV_T2_JIG_MASK) { if (pdata->jig_cb) pdata->jig_cb(FSA9480_DETACHED); } } usbsw->dev1 = val1; usbsw->dev2 = val2; out: ctrl &= ~CON_INT_MASK; fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); } static irqreturn_t fsa9480_irq_handler(int irq, void *data) { struct fsa9480_usbsw *usbsw = data; struct i2c_client *client = usbsw->client; int intr; /* clear interrupt */ fsa9480_read_irq(client, &intr); /* device detection */ fsa9480_detect_dev(usbsw, intr); return IRQ_HANDLED; } static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw) { struct fsa9480_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; int ret; int intr; unsigned int ctrl = CON_MASK; /* clear interrupt */ fsa9480_read_irq(client, &intr); /* unmask interrupt (attach/detach only) */ fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc); fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f); usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1); if (usbsw->mansw) ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); if (pdata && pdata->cfg_gpio) pdata->cfg_gpio(); if (client->irq) { ret = request_threaded_irq(client->irq, NULL, fsa9480_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "fsa9480 micro USB", usbsw); if (ret) { dev_err(&client->dev, "failed to reqeust IRQ\n"); return ret; } if (pdata) device_init_wakeup(&client->dev, pdata->wakeup); } return 0; } static int __devinit fsa9480_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct fsa9480_usbsw *usbsw; int ret = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL); if (!usbsw) { dev_err(&client->dev, "failed to allocate driver data\n"); return -ENOMEM; } usbsw->client = client; usbsw->pdata = client->dev.platform_data; chip = usbsw; i2c_set_clientdata(client, usbsw); ret = fsa9480_irq_init(usbsw); if (ret) goto fail1; ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group); if (ret) { dev_err(&client->dev, "failed to create fsa9480 attribute group\n"); goto fail2; } /* ADC Detect Time: 500ms */ fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6); if (chip->pdata->reset_cb) chip->pdata->reset_cb(); /* device detection */ fsa9480_detect_dev(usbsw, INT_ATTACH); pm_runtime_set_active(&client->dev); return 0; fail2: if (client->irq) free_irq(client->irq, usbsw); fail1: kfree(usbsw); return ret; } static int __devexit fsa9480_remove(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); if (client->irq) free_irq(client->irq, usbsw); sysfs_remove_group(&client->dev.kobj, &fsa9480_group); device_init_wakeup(&client->dev, 0); kfree(usbsw); return 0; } #ifdef CONFIG_PM static int fsa9480_suspend(struct i2c_client *client, pm_message_t state) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); struct fsa9480_platform_data *pdata = usbsw->pdata; if (device_may_wakeup(&client->dev) && client->irq) enable_irq_wake(client->irq); if (pdata->usb_power) pdata->usb_power(0); return 0; } static int fsa9480_resume(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); int dev1, dev2; if (device_may_wakeup(&client->dev) && client->irq) disable_irq_wake(client->irq); /* * Clear Pending interrupt. Note that detect_dev does what * the interrupt handler does. So, we don't miss pending and * we reenable interrupt if there is one. */ fsa9480_read_reg(client, FSA9480_REG_INT1); fsa9480_read_reg(client, FSA9480_REG_INT2); dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); /* device detection */ fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH); return 0; } #else #define fsa9480_suspend NULL #define fsa9480_resume NULL #endif /* CONFIG_PM */ static const struct i2c_device_id fsa9480_id[] = { {"fsa9480", 0}, {} }; MODULE_DEVICE_TABLE(i2c, fsa9480_id); static struct i2c_driver fsa9480_i2c_driver = { .driver = { .name = "fsa9480", }, .probe = fsa9480_probe, .remove = __devexit_p(fsa9480_remove), .resume = fsa9480_resume, .suspend = fsa9480_suspend, .id_table = fsa9480_id, }; module_i2c_driver(fsa9480_i2c_driver); MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>"); MODULE_DESCRIPTION("FSA9480 USB Switch driver"); MODULE_LICENSE("GPL");
gpl-2.0
zodex/lge-kernel-clean
drivers/net/wan/sealevel.c
7476
8108
/* * Sealevel Systems 4021 driver. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * (c) Copyright 1999, 2001 Alan Cox * (c) Copyright 2001 Red Hat Inc. * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/slab.h> #include <net/arp.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include "z85230.h" struct slvl_device { struct z8530_channel *chan; int channel; }; struct slvl_board { struct slvl_device dev[2]; struct z8530_dev board; int iobase; }; /* * Network driver support routines */ static inline struct slvl_device* dev_to_chan(struct net_device *dev) { return (struct slvl_device *)dev_to_hdlc(dev)->priv; } /* * Frame receive. Simple for our card as we do HDLC and there * is no funny garbage involved */ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) { /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ skb_trim(skb, skb->len - 2); skb->protocol = hdlc_type_trans(skb, c->netdevice); skb_reset_mac_header(skb); skb->dev = c->netdevice; netif_rx(skb); } /* * We've been placed in the UP state */ static int sealevel_open(struct net_device *d) { struct slvl_device *slvl = dev_to_chan(d); int err = -1; int unit = slvl->channel; /* * Link layer up. */ switch (unit) { case 0: err = z8530_sync_dma_open(d, slvl->chan); break; case 1: err = z8530_sync_open(d, slvl->chan); break; } if (err) return err; err = hdlc_open(d); if (err) { switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; } return err; } slvl->chan->rx_function = sealevel_input; /* * Go go go */ netif_start_queue(d); return 0; } static int sealevel_close(struct net_device *d) { struct slvl_device *slvl = dev_to_chan(d); int unit = slvl->channel; /* * Discard new frames */ slvl->chan->rx_function = z8530_null_rx; hdlc_close(d); netif_stop_queue(d); switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; } return 0; } static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { /* struct slvl_device *slvl=dev_to_chan(d); z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ return hdlc_ioctl(d, ifr, cmd); } /* * Passed network frames, fire them downwind. */ static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) { return z8530_queue_xmit(dev_to_chan(d)->chan, skb); } static int sealevel_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) return 0; return -EINVAL; } static const struct net_device_ops sealevel_ops = { .ndo_open = sealevel_open, .ndo_stop = sealevel_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = sealevel_ioctl, }; static int slvl_setup(struct slvl_device *sv, int iobase, int irq) { struct net_device *dev = alloc_hdlcdev(sv); if (!dev) return -1; dev_to_hdlc(dev)->attach = sealevel_attach; dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; dev->netdev_ops = &sealevel_ops; dev->base_addr = iobase; dev->irq = irq; if (register_hdlc_device(dev)) { pr_err("unable to register HDLC device\n"); free_netdev(dev); return -1; } sv->chan->netdevice = dev; return 0; } /* * Allocate and setup Sealevel board. */ static __init struct slvl_board *slvl_init(int iobase, int irq, int txdma, int rxdma, int slow) { struct z8530_dev *dev; struct slvl_board *b; /* * Get the needed I/O space */ if (!request_region(iobase, 8, "Sealevel 4021")) { pr_warn("I/O 0x%X already in use\n", iobase); return NULL; } b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); if (!b) goto err_kzalloc; b->dev[0].chan = &b->board.chanA; b->dev[0].channel = 0; b->dev[1].chan = &b->board.chanB; b->dev[1].channel = 1; dev = &b->board; /* * Stuff in the I/O addressing */ dev->active = 0; b->iobase = iobase; /* * Select 8530 delays for the old board */ if (slow) iobase |= Z8530_PORT_SLEEP; dev->chanA.ctrlio = iobase + 1; dev->chanA.dataio = iobase; dev->chanB.ctrlio = iobase + 3; dev->chanB.dataio = iobase + 2; dev->chanA.irqs = &z8530_nop; dev->chanB.irqs = &z8530_nop; /* * Assert DTR enable DMA */ outb(3 | (1 << 7), b->iobase + 4); /* We want a fast IRQ for this device. Actually we'd like an even faster IRQ ;) - This is one driver RtLinux is made for */ if (request_irq(irq, z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev) < 0) { pr_warn("IRQ %d already in use\n", irq); goto err_request_irq; } dev->irq = irq; dev->chanA.private = &b->dev[0]; dev->chanB.private = &b->dev[1]; dev->chanA.dev = dev; dev->chanB.dev = dev; dev->chanA.txdma = 3; dev->chanA.rxdma = 1; if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) goto err_dma_tx; if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) goto err_dma_rx; disable_irq(irq); /* * Begin normal initialise */ if (z8530_init(dev) != 0) { pr_err("Z8530 series device not found\n"); enable_irq(irq); goto free_hw; } if (dev->type == Z85C30) { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); } else { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); } /* * Now we can take the IRQ */ enable_irq(irq); if (slvl_setup(&b->dev[0], iobase, irq)) goto free_hw; if (slvl_setup(&b->dev[1], iobase, irq)) goto free_netdev0; z8530_describe(dev, "I/O", iobase); dev->active = 1; return b; free_netdev0: unregister_hdlc_device(b->dev[0].chan->netdevice); free_netdev(b->dev[0].chan->netdevice); free_hw: free_dma(dev->chanA.rxdma); err_dma_rx: free_dma(dev->chanA.txdma); err_dma_tx: free_irq(irq, dev); err_request_irq: kfree(b); err_kzalloc: release_region(iobase, 8); return NULL; } static void __exit slvl_shutdown(struct slvl_board *b) { int u; z8530_shutdown(&b->board); for (u = 0; u < 2; u++) { struct net_device *d = b->dev[u].chan->netdevice; unregister_hdlc_device(d); free_netdev(d); } free_irq(b->board.irq, &b->board); free_dma(b->board.chanA.rxdma); free_dma(b->board.chanA.txdma); /* DMA off on the card, drop DTR */ outb(0, b->iobase); release_region(b->iobase, 8); kfree(b); } static int io=0x238; static int txdma=1; static int rxdma=3; static int irq=5; static bool slow=false; module_param(io, int, 0); MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); module_param(txdma, int, 0); MODULE_PARM_DESC(txdma, "Transmit DMA channel"); module_param(rxdma, int, 0); MODULE_PARM_DESC(rxdma, "Receive DMA channel"); module_param(irq, int, 0); MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card"); module_param(slow, bool, 0); MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012"); MODULE_AUTHOR("Alan Cox"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021"); static struct slvl_board *slvl_unit; static int __init slvl_init_module(void) { slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); return slvl_unit ? 0 : -ENODEV; } static void __exit slvl_cleanup_module(void) { if (slvl_unit) slvl_shutdown(slvl_unit); } module_init(slvl_init_module); module_exit(slvl_cleanup_module);
gpl-2.0
LeMaker/linux-sunxi
drivers/input/mouse/vsxxxaa.c
7988
15074
/* * Driver for DEC VSXXX-AA mouse (hockey-puck mouse, ball or two rollers) * DEC VSXXX-GA mouse (rectangular mouse, with ball) * DEC VSXXX-AB tablet (digitizer with hair cross or stylus) * * Copyright (C) 2003-2004 by Jan-Benedict Glaw <jbglaw@lug-owl.de> * * The packet format was initially taken from a patch to GPM which is (C) 2001 * by Karsten Merker <merker@linuxtag.org> * and Maciej W. Rozycki <macro@ds2.pg.gda.pl> * Later on, I had access to the device's documentation (referenced below). */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Building an adaptor to DE9 / DB25 RS232 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * DISCLAIMER: Use this description AT YOUR OWN RISK! I'll not pay for * anything if you break your mouse, your computer or whatever! * * In theory, this mouse is a simple RS232 device. In practice, it has got * a quite uncommon plug and the requirement to additionally get a power * supply at +5V and -12V. * * If you look at the socket/jack (_not_ at the plug), we use this pin * numbering: * _______ * / 7 6 5 \ * | 4 --- 3 | * \ 2 1 / * ------- * * DEC socket DE9 DB25 Note * 1 (GND) 5 7 - * 2 (RxD) 2 3 - * 3 (TxD) 3 2 - * 4 (-12V) - - Somewhere from the PSU. At ATX, it's * the thin blue wire at pin 12 of the * ATX power connector. Only required for * VSXXX-AA/-GA mice. * 5 (+5V) - - PSU (red wires of ATX power connector * on pin 4, 6, 19 or 20) or HDD power * connector (also red wire). * 6 (+12V) - - HDD power connector, yellow wire. Only * required for VSXXX-AB digitizer. * 7 (dev. avail.) - - The mouse shorts this one to pin 1. * This way, the host computer can detect * the mouse. To use it with the adaptor, * simply don't connect this pin. * * So to get a working adaptor, you need to connect the mouse with three * wires to a RS232 port and two or three additional wires for +5V, +12V and * -12V to the PSU. * * Flow specification for the link is 4800, 8o1. * * The mice and tablet are described in "VCB02 Video Subsystem - Technical * Manual", DEC EK-104AA-TM-001. You'll find it at MANX, a search engine * specific for DEC documentation. Try * http://www.vt100.net/manx/details?pn=EK-104AA-TM-001;id=21;cp=1 */ #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Driver for DEC VSXXX-AA and -GA mice and VSXXX-AB tablet" MODULE_AUTHOR("Jan-Benedict Glaw <jbglaw@lug-owl.de>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #undef VSXXXAA_DEBUG #ifdef VSXXXAA_DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) do {} while (0) #endif #define VSXXXAA_INTRO_MASK 0x80 #define VSXXXAA_INTRO_HEAD 0x80 #define IS_HDR_BYTE(x) \ (((x) & VSXXXAA_INTRO_MASK) == VSXXXAA_INTRO_HEAD) #define VSXXXAA_PACKET_MASK 0xe0 #define VSXXXAA_PACKET_REL 0x80 #define VSXXXAA_PACKET_ABS 0xc0 #define VSXXXAA_PACKET_POR 0xa0 #define MATCH_PACKET_TYPE(data, type) \ (((data) & VSXXXAA_PACKET_MASK) == (type)) struct vsxxxaa { struct input_dev *dev; struct serio *serio; #define BUFLEN 15 /* At least 5 is needed for a full tablet packet */ unsigned char buf[BUFLEN]; unsigned char count; unsigned char version; unsigned char country; unsigned char type; char name[64]; char phys[32]; }; static void vsxxxaa_drop_bytes(struct vsxxxaa *mouse, int num) { if (num >= mouse->count) { mouse->count = 0; } else { memmove(mouse->buf, mouse->buf + num - 1, BUFLEN - num); mouse->count -= num; } } static void vsxxxaa_queue_byte(struct vsxxxaa *mouse, unsigned char byte) { if (mouse->count == BUFLEN) { printk(KERN_ERR "%s on %s: Dropping a byte of full buffer.\n", mouse->name, mouse->phys); vsxxxaa_drop_bytes(mouse, 1); } DBG(KERN_INFO "Queueing byte 0x%02x\n", byte); mouse->buf[mouse->count++] = byte; } static void vsxxxaa_detection_done(struct vsxxxaa *mouse) { switch (mouse->type) { case 0x02: strlcpy(mouse->name, "DEC VSXXX-AA/-GA mouse", sizeof(mouse->name)); break; case 0x04: strlcpy(mouse->name, "DEC VSXXX-AB digitizer", sizeof(mouse->name)); break; default: snprintf(mouse->name, sizeof(mouse->name), "unknown DEC pointer device (type = 0x%02x)", mouse->type); break; } printk(KERN_INFO "Found %s version 0x%02x from country 0x%02x on port %s\n", mouse->name, mouse->version, mouse->country, mouse->phys); } /* * Returns number of bytes to be dropped, 0 if packet is okay. */ static int vsxxxaa_check_packet(struct vsxxxaa *mouse, int packet_len) { int i; /* First byte must be a header byte */ if (!IS_HDR_BYTE(mouse->buf[0])) { DBG("vsck: len=%d, 1st=0x%02x\n", packet_len, mouse->buf[0]); return 1; } /* Check all following bytes */ for (i = 1; i < packet_len; i++) { if (IS_HDR_BYTE(mouse->buf[i])) { printk(KERN_ERR "Need to drop %d bytes of a broken packet.\n", i - 1); DBG(KERN_INFO "check: len=%d, b[%d]=0x%02x\n", packet_len, i, mouse->buf[i]); return i - 1; } } return 0; } static inline int vsxxxaa_smells_like_packet(struct vsxxxaa *mouse, unsigned char type, size_t len) { return mouse->count >= len && MATCH_PACKET_TYPE(mouse->buf[0], type); } static void vsxxxaa_handle_REL_packet(struct vsxxxaa *mouse) { struct input_dev *dev = mouse->dev; unsigned char *buf = mouse->buf; int left, middle, right; int dx, dy; /* * Check for normal stream packets. This is three bytes, * with the first byte's 3 MSB set to 100. * * [0]: 1 0 0 SignX SignY Left Middle Right * [1]: 0 dx dx dx dx dx dx dx * [2]: 0 dy dy dy dy dy dy dy */ /* * Low 7 bit of byte 1 are abs(dx), bit 7 is * 0, bit 4 of byte 0 is direction. */ dx = buf[1] & 0x7f; dx *= ((buf[0] >> 4) & 0x01) ? 1 : -1; /* * Low 7 bit of byte 2 are abs(dy), bit 7 is * 0, bit 3 of byte 0 is direction. */ dy = buf[2] & 0x7f; dy *= ((buf[0] >> 3) & 0x01) ? -1 : 1; /* * Get button state. It's the low three bits * (for three buttons) of byte 0. */ left = buf[0] & 0x04; middle = buf[0] & 0x02; right = buf[0] & 0x01; vsxxxaa_drop_bytes(mouse, 3); DBG(KERN_INFO "%s on %s: dx=%d, dy=%d, buttons=%s%s%s\n", mouse->name, mouse->phys, dx, dy, left ? "L" : "l", middle ? "M" : "m", right ? "R" : "r"); /* * Report what we've found so far... */ input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_MIDDLE, middle); input_report_key(dev, BTN_RIGHT, right); input_report_key(dev, BTN_TOUCH, 0); input_report_rel(dev, REL_X, dx); input_report_rel(dev, REL_Y, dy); input_sync(dev); } static void vsxxxaa_handle_ABS_packet(struct vsxxxaa *mouse) { struct input_dev *dev = mouse->dev; unsigned char *buf = mouse->buf; int left, middle, right, touch; int x, y; /* * Tablet position / button packet * * [0]: 1 1 0 B4 B3 B2 B1 Pr * [1]: 0 0 X5 X4 X3 X2 X1 X0 * [2]: 0 0 X11 X10 X9 X8 X7 X6 * [3]: 0 0 Y5 Y4 Y3 Y2 Y1 Y0 * [4]: 0 0 Y11 Y10 Y9 Y8 Y7 Y6 */ /* * Get X/Y position. Y axis needs to be inverted since VSXXX-AB * counts down->top while monitor counts top->bottom. */ x = ((buf[2] & 0x3f) << 6) | (buf[1] & 0x3f); y = ((buf[4] & 0x3f) << 6) | (buf[3] & 0x3f); y = 1023 - y; /* * Get button state. It's bits <4..1> of byte 0. */ left = buf[0] & 0x02; middle = buf[0] & 0x04; right = buf[0] & 0x08; touch = buf[0] & 0x10; vsxxxaa_drop_bytes(mouse, 5); DBG(KERN_INFO "%s on %s: x=%d, y=%d, buttons=%s%s%s%s\n", mouse->name, mouse->phys, x, y, left ? "L" : "l", middle ? "M" : "m", right ? "R" : "r", touch ? "T" : "t"); /* * Report what we've found so far... */ input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_MIDDLE, middle); input_report_key(dev, BTN_RIGHT, right); input_report_key(dev, BTN_TOUCH, touch); input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); input_sync(dev); } static void vsxxxaa_handle_POR_packet(struct vsxxxaa *mouse) { struct input_dev *dev = mouse->dev; unsigned char *buf = mouse->buf; int left, middle, right; unsigned char error; /* * Check for Power-On-Reset packets. These are sent out * after plugging the mouse in, or when explicitly * requested by sending 'T'. * * [0]: 1 0 1 0 R3 R2 R1 R0 * [1]: 0 M2 M1 M0 D3 D2 D1 D0 * [2]: 0 E6 E5 E4 E3 E2 E1 E0 * [3]: 0 0 0 0 0 Left Middle Right * * M: manufacturer location code * R: revision code * E: Error code. If it's in the range of 0x00..0x1f, only some * minor problem occurred. Errors >= 0x20 are considered bad * and the device may not work properly... * D: <0010> == mouse, <0100> == tablet */ mouse->version = buf[0] & 0x0f; mouse->country = (buf[1] >> 4) & 0x07; mouse->type = buf[1] & 0x0f; error = buf[2] & 0x7f; /* * Get button state. It's the low three bits * (for three buttons) of byte 0. Maybe even the bit <3> * has some meaning if a tablet is attached. */ left = buf[0] & 0x04; middle = buf[0] & 0x02; right = buf[0] & 0x01; vsxxxaa_drop_bytes(mouse, 4); vsxxxaa_detection_done(mouse); if (error <= 0x1f) { /* No (serious) error. Report buttons */ input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_MIDDLE, middle); input_report_key(dev, BTN_RIGHT, right); input_report_key(dev, BTN_TOUCH, 0); input_sync(dev); if (error != 0) printk(KERN_INFO "Your %s on %s reports error=0x%02x\n", mouse->name, mouse->phys, error); } /* * If the mouse was hot-plugged, we need to force differential mode * now... However, give it a second to recover from it's reset. */ printk(KERN_NOTICE "%s on %s: Forcing standard packet format, " "incremental streaming mode and 72 samples/sec\n", mouse->name, mouse->phys); serio_write(mouse->serio, 'S'); /* Standard format */ mdelay(50); serio_write(mouse->serio, 'R'); /* Incremental */ mdelay(50); serio_write(mouse->serio, 'L'); /* 72 samples/sec */ } static void vsxxxaa_parse_buffer(struct vsxxxaa *mouse) { unsigned char *buf = mouse->buf; int stray_bytes; /* * Parse buffer to death... */ do { /* * Out of sync? Throw away what we don't understand. Each * packet starts with a byte whose bit 7 is set. Unhandled * packets (ie. which we don't know about or simply b0rk3d * data...) will get shifted out of the buffer after some * activity on the mouse. */ while (mouse->count > 0 && !IS_HDR_BYTE(buf[0])) { printk(KERN_ERR "%s on %s: Dropping a byte to regain " "sync with mouse data stream...\n", mouse->name, mouse->phys); vsxxxaa_drop_bytes(mouse, 1); } /* * Check for packets we know about. */ if (vsxxxaa_smells_like_packet(mouse, VSXXXAA_PACKET_REL, 3)) { /* Check for broken packet */ stray_bytes = vsxxxaa_check_packet(mouse, 3); if (!stray_bytes) vsxxxaa_handle_REL_packet(mouse); } else if (vsxxxaa_smells_like_packet(mouse, VSXXXAA_PACKET_ABS, 5)) { /* Check for broken packet */ stray_bytes = vsxxxaa_check_packet(mouse, 5); if (!stray_bytes) vsxxxaa_handle_ABS_packet(mouse); } else if (vsxxxaa_smells_like_packet(mouse, VSXXXAA_PACKET_POR, 4)) { /* Check for broken packet */ stray_bytes = vsxxxaa_check_packet(mouse, 4); if (!stray_bytes) vsxxxaa_handle_POR_packet(mouse); } else { break; /* No REL, ABS or POR packet found */ } if (stray_bytes > 0) { printk(KERN_ERR "Dropping %d bytes now...\n", stray_bytes); vsxxxaa_drop_bytes(mouse, stray_bytes); } } while (1); } static irqreturn_t vsxxxaa_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct vsxxxaa *mouse = serio_get_drvdata(serio); vsxxxaa_queue_byte(mouse, data); vsxxxaa_parse_buffer(mouse); return IRQ_HANDLED; } static void vsxxxaa_disconnect(struct serio *serio) { struct vsxxxaa *mouse = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(mouse->dev); kfree(mouse); } static int vsxxxaa_connect(struct serio *serio, struct serio_driver *drv) { struct vsxxxaa *mouse; struct input_dev *input_dev; int err = -ENOMEM; mouse = kzalloc(sizeof(struct vsxxxaa), GFP_KERNEL); input_dev = input_allocate_device(); if (!mouse || !input_dev) goto fail1; mouse->dev = input_dev; mouse->serio = serio; strlcat(mouse->name, "DEC VSXXX-AA/-GA mouse or VSXXX-AB digitizer", sizeof(mouse->name)); snprintf(mouse->phys, sizeof(mouse->phys), "%s/input0", serio->phys); input_dev->name = mouse->name; input_dev->phys = mouse->phys; input_dev->id.bustype = BUS_RS232; input_dev->dev.parent = &serio->dev; __set_bit(EV_KEY, input_dev->evbit); /* We have buttons */ __set_bit(EV_REL, input_dev->evbit); __set_bit(EV_ABS, input_dev->evbit); __set_bit(BTN_LEFT, input_dev->keybit); /* We have 3 buttons */ __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_TOUCH, input_dev->keybit); /* ...and Tablet */ __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0); serio_set_drvdata(serio, mouse); err = serio_open(serio, drv); if (err) goto fail2; /* * Request selftest. Standard packet format and differential * mode will be requested after the device ID'ed successfully. */ serio_write(serio, 'T'); /* Test */ err = input_register_device(input_dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(mouse); return err; } static struct serio_device_id vsxxaa_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_VSXXXAA, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, vsxxaa_serio_ids); static struct serio_driver vsxxxaa_drv = { .driver = { .name = "vsxxxaa", }, .description = DRIVER_DESC, .id_table = vsxxaa_serio_ids, .connect = vsxxxaa_connect, .interrupt = vsxxxaa_interrupt, .disconnect = vsxxxaa_disconnect, }; static int __init vsxxxaa_init(void) { return serio_register_driver(&vsxxxaa_drv); } static void __exit vsxxxaa_exit(void) { serio_unregister_driver(&vsxxxaa_drv); } module_init(vsxxxaa_init); module_exit(vsxxxaa_exit);
gpl-2.0
keyser84/android_kernel_motorola_msm8226
crypto/tea.c
10036
7253
/* * Cryptographic API. * * TEA, XTEA, and XETA crypto alogrithms * * The TEA and Xtended TEA algorithms were developed by David Wheeler * and Roger Needham at the Computer Laboratory of Cambridge University. * * Due to the order of evaluation in XTEA many people have incorrectly * implemented it. XETA (XTEA in the wrong order), exists for * compatibility with these implementations. * * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define TEA_KEY_SIZE 16 #define TEA_BLOCK_SIZE 8 #define TEA_ROUNDS 32 #define TEA_DELTA 0x9e3779b9 #define XTEA_KEY_SIZE 16 #define XTEA_BLOCK_SIZE 8 #define XTEA_ROUNDS 32 #define XTEA_DELTA 0x9e3779b9 struct tea_ctx { u32 KEY[4]; }; struct xtea_ctx { u32 KEY[4]; }; static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; ctx->KEY[0] = le32_to_cpu(key[0]); ctx->KEY[1] = le32_to_cpu(key[1]); ctx->KEY[2] = le32_to_cpu(key[2]); ctx->KEY[3] = le32_to_cpu(key[3]); return 0; } static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, n, sum = 0; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; k2 = ctx->KEY[2]; k3 = ctx->KEY[3]; n = TEA_ROUNDS; while (n-- > 0) { sum += TEA_DELTA; y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1); z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, n, sum; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; k2 = ctx->KEY[2]; k3 = ctx->KEY[3]; sum = TEA_DELTA << 5; n = TEA_ROUNDS; while (n-- > 0) { z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1); sum -= TEA_DELTA; } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; ctx->KEY[0] = le32_to_cpu(key[0]); ctx->KEY[1] = le32_to_cpu(key[1]); ctx->KEY[2] = le32_to_cpu(key[2]); ctx->KEY[3] = le32_to_cpu(key[3]); return 0; } static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); while (sum != limit) { y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); sum += XTEA_DELTA; z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); sum = XTEA_DELTA * XTEA_ROUNDS; while (sum) { z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]); sum -= XTEA_DELTA; y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); while (sum != limit) { y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; sum += XTEA_DELTA; z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); sum = XTEA_DELTA * XTEA_ROUNDS; while (sum) { z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3]; sum -= XTEA_DELTA; y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static struct crypto_alg tea_alg = { .cra_name = "tea", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct tea_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = TEA_KEY_SIZE, .cia_max_keysize = TEA_KEY_SIZE, .cia_setkey = tea_setkey, .cia_encrypt = tea_encrypt, .cia_decrypt = tea_decrypt } } }; static struct crypto_alg xtea_alg = { .cra_name = "xtea", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, .cia_max_keysize = XTEA_KEY_SIZE, .cia_setkey = xtea_setkey, .cia_encrypt = xtea_encrypt, .cia_decrypt = xtea_decrypt } } }; static struct crypto_alg xeta_alg = { .cra_name = "xeta", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, .cia_max_keysize = XTEA_KEY_SIZE, .cia_setkey = xtea_setkey, .cia_encrypt = xeta_encrypt, .cia_decrypt = xeta_decrypt } } }; static int __init tea_mod_init(void) { int ret = 0; ret = crypto_register_alg(&tea_alg); if (ret < 0) goto out; ret = crypto_register_alg(&xtea_alg); if (ret < 0) { crypto_unregister_alg(&tea_alg); goto out; } ret = crypto_register_alg(&xeta_alg); if (ret < 0) { crypto_unregister_alg(&tea_alg); crypto_unregister_alg(&xtea_alg); goto out; } out: return ret; } static void __exit tea_mod_fini(void) { crypto_unregister_alg(&tea_alg); crypto_unregister_alg(&xtea_alg); crypto_unregister_alg(&xeta_alg); } MODULE_ALIAS("xtea"); MODULE_ALIAS("xeta"); module_init(tea_mod_init); module_exit(tea_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms");
gpl-2.0
cubieboard/CC-A80-kernel-source
drivers/input/joystick/turbografx.c
12084
8111
/* * Copyright (c) 1998-2001 Vojtech Pavlik * * Based on the work of: * Steffen Schwenke */ /* * TurboGraFX parallel port interface driver for Linux. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/parport.h> #include <linux/input.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/slab.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("TurboGraFX parallel port interface driver"); MODULE_LICENSE("GPL"); #define TGFX_MAX_PORTS 3 #define TGFX_MAX_DEVICES 7 struct tgfx_config { int args[TGFX_MAX_DEVICES + 1]; unsigned int nargs; }; static struct tgfx_config tgfx_cfg[TGFX_MAX_PORTS] __initdata; module_param_array_named(map, tgfx_cfg[0].args, int, &tgfx_cfg[0].nargs, 0); MODULE_PARM_DESC(map, "Describes first set of devices (<parport#>,<js1>,<js2>,..<js7>"); module_param_array_named(map2, tgfx_cfg[1].args, int, &tgfx_cfg[1].nargs, 0); MODULE_PARM_DESC(map2, "Describes second set of devices"); module_param_array_named(map3, tgfx_cfg[2].args, int, &tgfx_cfg[2].nargs, 0); MODULE_PARM_DESC(map3, "Describes third set of devices"); #define TGFX_REFRESH_TIME HZ/100 /* 10 ms */ #define TGFX_TRIGGER 0x08 #define TGFX_UP 0x10 #define TGFX_DOWN 0x20 #define TGFX_LEFT 0x40 #define TGFX_RIGHT 0x80 #define TGFX_THUMB 0x02 #define TGFX_THUMB2 0x04 #define TGFX_TOP 0x01 #define TGFX_TOP2 0x08 static int tgfx_buttons[] = { BTN_TRIGGER, BTN_THUMB, BTN_THUMB2, BTN_TOP, BTN_TOP2 }; static struct tgfx { struct pardevice *pd; struct timer_list timer; struct input_dev *dev[TGFX_MAX_DEVICES]; char name[TGFX_MAX_DEVICES][64]; char phys[TGFX_MAX_DEVICES][32]; int sticks; int used; struct mutex sem; } *tgfx_base[TGFX_MAX_PORTS]; /* * tgfx_timer() reads and analyzes TurboGraFX joystick data. */ static void tgfx_timer(unsigned long private) { struct tgfx *tgfx = (void *) private; struct input_dev *dev; int data1, data2, i; for (i = 0; i < 7; i++) if (tgfx->sticks & (1 << i)) { dev = tgfx->dev[i]; parport_write_data(tgfx->pd->port, ~(1 << i)); data1 = parport_read_status(tgfx->pd->port) ^ 0x7f; data2 = parport_read_control(tgfx->pd->port) ^ 0x04; /* CAVEAT parport */ input_report_abs(dev, ABS_X, !!(data1 & TGFX_RIGHT) - !!(data1 & TGFX_LEFT)); input_report_abs(dev, ABS_Y, !!(data1 & TGFX_DOWN ) - !!(data1 & TGFX_UP )); input_report_key(dev, BTN_TRIGGER, (data1 & TGFX_TRIGGER)); input_report_key(dev, BTN_THUMB, (data2 & TGFX_THUMB )); input_report_key(dev, BTN_THUMB2, (data2 & TGFX_THUMB2 )); input_report_key(dev, BTN_TOP, (data2 & TGFX_TOP )); input_report_key(dev, BTN_TOP2, (data2 & TGFX_TOP2 )); input_sync(dev); } mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); } static int tgfx_open(struct input_dev *dev) { struct tgfx *tgfx = input_get_drvdata(dev); int err; err = mutex_lock_interruptible(&tgfx->sem); if (err) return err; if (!tgfx->used++) { parport_claim(tgfx->pd); parport_write_control(tgfx->pd->port, 0x04); mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); } mutex_unlock(&tgfx->sem); return 0; } static void tgfx_close(struct input_dev *dev) { struct tgfx *tgfx = input_get_drvdata(dev); mutex_lock(&tgfx->sem); if (!--tgfx->used) { del_timer_sync(&tgfx->timer); parport_write_control(tgfx->pd->port, 0x00); parport_release(tgfx->pd); } mutex_unlock(&tgfx->sem); } /* * tgfx_probe() probes for tg gamepads. */ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) { struct tgfx *tgfx; struct input_dev *input_dev; struct parport *pp; struct pardevice *pd; int i, j; int err; pp = parport_find_number(parport); if (!pp) { printk(KERN_ERR "turbografx.c: no such parport\n"); err = -EINVAL; goto err_out; } pd = parport_register_device(pp, "turbografx", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); if (!pd) { printk(KERN_ERR "turbografx.c: parport busy already - lp.o loaded?\n"); err = -EBUSY; goto err_put_pp; } tgfx = kzalloc(sizeof(struct tgfx), GFP_KERNEL); if (!tgfx) { printk(KERN_ERR "turbografx.c: Not enough memory\n"); err = -ENOMEM; goto err_unreg_pardev; } mutex_init(&tgfx->sem); tgfx->pd = pd; init_timer(&tgfx->timer); tgfx->timer.data = (long) tgfx; tgfx->timer.function = tgfx_timer; for (i = 0; i < n_devs; i++) { if (n_buttons[i] < 1) continue; if (n_buttons[i] > 6) { printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); err = -EINVAL; goto err_unreg_devs; } tgfx->dev[i] = input_dev = input_allocate_device(); if (!input_dev) { printk(KERN_ERR "turbografx.c: Not enough memory for input device\n"); err = -ENOMEM; goto err_unreg_devs; } tgfx->sticks |= (1 << i); snprintf(tgfx->name[i], sizeof(tgfx->name[i]), "TurboGraFX %d-button Multisystem joystick", n_buttons[i]); snprintf(tgfx->phys[i], sizeof(tgfx->phys[i]), "%s/input%d", tgfx->pd->port->name, i); input_dev->name = tgfx->name[i]; input_dev->phys = tgfx->phys[i]; input_dev->id.bustype = BUS_PARPORT; input_dev->id.vendor = 0x0003; input_dev->id.product = n_buttons[i]; input_dev->id.version = 0x0100; input_set_drvdata(input_dev, tgfx); input_dev->open = tgfx_open; input_dev->close = tgfx_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_set_abs_params(input_dev, ABS_X, -1, 1, 0, 0); input_set_abs_params(input_dev, ABS_Y, -1, 1, 0, 0); for (j = 0; j < n_buttons[i]; j++) set_bit(tgfx_buttons[j], input_dev->keybit); err = input_register_device(tgfx->dev[i]); if (err) goto err_free_dev; } if (!tgfx->sticks) { printk(KERN_ERR "turbografx.c: No valid devices specified\n"); err = -EINVAL; goto err_free_tgfx; } parport_put_port(pp); return tgfx; err_free_dev: input_free_device(tgfx->dev[i]); err_unreg_devs: while (--i >= 0) if (tgfx->dev[i]) input_unregister_device(tgfx->dev[i]); err_free_tgfx: kfree(tgfx); err_unreg_pardev: parport_unregister_device(pd); err_put_pp: parport_put_port(pp); err_out: return ERR_PTR(err); } static void tgfx_remove(struct tgfx *tgfx) { int i; for (i = 0; i < TGFX_MAX_DEVICES; i++) if (tgfx->dev[i]) input_unregister_device(tgfx->dev[i]); parport_unregister_device(tgfx->pd); kfree(tgfx); } static int __init tgfx_init(void) { int i; int have_dev = 0; int err = 0; for (i = 0; i < TGFX_MAX_PORTS; i++) { if (tgfx_cfg[i].nargs == 0 || tgfx_cfg[i].args[0] < 0) continue; if (tgfx_cfg[i].nargs < 2) { printk(KERN_ERR "turbografx.c: at least one joystick must be specified\n"); err = -EINVAL; break; } tgfx_base[i] = tgfx_probe(tgfx_cfg[i].args[0], tgfx_cfg[i].args + 1, tgfx_cfg[i].nargs - 1); if (IS_ERR(tgfx_base[i])) { err = PTR_ERR(tgfx_base[i]); break; } have_dev = 1; } if (err) { while (--i >= 0) if (tgfx_base[i]) tgfx_remove(tgfx_base[i]); return err; } return have_dev ? 0 : -ENODEV; } static void __exit tgfx_exit(void) { int i; for (i = 0; i < TGFX_MAX_PORTS; i++) if (tgfx_base[i]) tgfx_remove(tgfx_base[i]); } module_init(tgfx_init); module_exit(tgfx_exit);
gpl-2.0
michael1900/falcon_stock
arch/arm/mach-msm/subsystem_restart.c
53
32493
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/io.h> #include <linux/kthread.h> #include <linux/time.h> #include <linux/wakelock.h> #include <linux/suspend.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/debugfs.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/of_gpio.h> #include <asm/current.h> #include <mach/socinfo.h> #include <mach/subsystem_notif.h> #include <mach/subsystem_restart.h> #include "smd_private.h" static int enable_debug; module_param(enable_debug, int, S_IRUGO | S_IWUSR); /** * enum p_subsys_state - state of a subsystem (private) * @SUBSYS_NORMAL: subsystem is operating normally * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting * * The 'private' side of the subsytem state used to determine where in the * restart process the subsystem is. */ enum p_subsys_state { SUBSYS_NORMAL, SUBSYS_CRASHED, SUBSYS_RESTARTING, }; /** * enum subsys_state - state of a subsystem (public) * @SUBSYS_OFFLINE: subsystem is offline * @SUBSYS_ONLINE: subsystem is online * * The 'public' side of the subsytem state, exposed to userspace. */ enum subsys_state { SUBSYS_OFFLINE, SUBSYS_ONLINE, }; static const char * const subsys_states[] = { [SUBSYS_OFFLINE] = "OFFLINE", [SUBSYS_ONLINE] = "ONLINE", }; static const char * const restart_levels[] = { [RESET_SOC] = "SYSTEM", [RESET_SUBSYS_COUPLED] = "RELATED", [RESET_IGNORE] = "IGNORE", }; /** * struct subsys_tracking - track state of a subsystem or restart order * @p_state: private state of subsystem/order * @state: public state of subsystem/order * @s_lock: protects p_state * @lock: protects subsystem/order callbacks and state * * Tracks the state of a subsystem or a set of subsystems (restart order). * Doing this avoids the need to grab each subsystem's lock and update * each subsystems state when restarting an order. */ struct subsys_tracking { enum p_subsys_state p_state; spinlock_t s_lock; enum subsys_state state; struct mutex lock; }; /** * struct subsys_soc_restart_order - subsystem restart order * @subsystem_list: names of subsystems in this restart order * @count: number of subsystems in order * @track: state tracking and locking * @subsys_ptrs: pointers to subsystems in this restart order */ struct subsys_soc_restart_order { const char * const *subsystem_list; int count; struct subsys_tracking track; struct subsys_device *subsys_ptrs[]; }; struct restart_log { struct timeval time; struct subsys_device *dev; struct list_head list; }; /** * struct subsys_device - subsystem device * @desc: subsystem descriptor * @wake_lock: prevents suspend during subsystem_restart() * @wlname: name of @wake_lock * @work: context for subsystem_restart_wq_func() for this device * @track: state tracking and locking * @notify: subsys notify handle * @dev: device * @owner: module that provides @desc * @count: reference count of subsystem_get()/subsystem_put() * @id: ida * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.) * @restart_order: order of other devices this devices restarts with * @dentry: debugfs directory for this device * @do_ramdump_on_put: ramdump on subsystem_put() if true * @err_ready: completion variable to record error ready from subsystem * @crashed: indicates if subsystem has crashed */ struct subsys_device { struct subsys_desc *desc; struct wake_lock wake_lock; char wlname[64]; struct work_struct work; struct subsys_tracking track; void *notify; struct device dev; struct module *owner; int count; int id; int restart_level; struct subsys_soc_restart_order *restart_order; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; #endif bool do_ramdump_on_put; struct miscdevice misc_dev; char miscdevice_name[32]; struct completion err_ready; bool crashed; }; static struct subsys_device *to_subsys(struct device *d) { return container_of(d, struct subsys_device, dev); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name); } static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { enum subsys_state state = to_subsys(dev)->track.state; return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]); } static ssize_t restart_level_show(struct device *dev, struct device_attribute *attr, char *buf) { int level = to_subsys(dev)->restart_level; return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]); } static ssize_t restart_level_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct subsys_device *subsys = to_subsys(dev); int i; const char *p; p = memchr(buf, '\n', count); if (p) count = p - buf; for (i = 0; i < ARRAY_SIZE(restart_levels); i++) if (!strncasecmp(buf, restart_levels[i], count)) { subsys->restart_level = i; return count; } return -EPERM; } int subsys_get_restart_level(struct subsys_device *dev) { return dev->restart_level; } EXPORT_SYMBOL(subsys_get_restart_level); static void subsys_set_state(struct subsys_device *subsys, enum subsys_state state) { unsigned long flags; spin_lock_irqsave(&subsys->track.s_lock, flags); if (subsys->track.state != state) { subsys->track.state = state; spin_unlock_irqrestore(&subsys->track.s_lock, flags); sysfs_notify(&subsys->dev.kobj, NULL, "state"); return; } spin_unlock_irqrestore(&subsys->track.s_lock, flags); } /** * subsytem_default_online() - Mark a subsystem as online by default * @dev: subsystem to mark as online * * Marks a subsystem as "online" without increasing the reference count * on the subsystem. This is typically used by subsystems that are already * online when the kernel boots up. */ void subsys_default_online(struct subsys_device *dev) { subsys_set_state(dev, SUBSYS_ONLINE); } EXPORT_SYMBOL(subsys_default_online); static struct device_attribute subsys_attrs[] = { __ATTR_RO(name), __ATTR_RO(state), __ATTR(restart_level, 0644, restart_level_show, restart_level_store), __ATTR_NULL, }; static struct bus_type subsys_bus_type = { .name = "msm_subsys", .dev_attrs = subsys_attrs, }; static DEFINE_IDA(subsys_ida); static int enable_ramdumps; module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR); struct workqueue_struct *ssr_wq; static LIST_HEAD(restart_log_list); static DEFINE_MUTEX(soc_order_reg_lock); static DEFINE_MUTEX(restart_log_mutex); /* SOC specific restart orders go here */ #define DEFINE_SINGLE_RESTART_ORDER(name, order) \ static struct subsys_soc_restart_order __##name = { \ .subsystem_list = order, \ .count = ARRAY_SIZE(order), \ .subsys_ptrs = {[ARRAY_SIZE(order)] = NULL} \ }; \ static struct subsys_soc_restart_order *name[] = { \ &__##name, \ } /* MSM 8x60 restart ordering info */ static const char * const _order_8x60_all[] = { "external_modem", "modem", "adsp" }; DEFINE_SINGLE_RESTART_ORDER(orders_8x60_all, _order_8x60_all); static const char * const _order_8x60_modems[] = {"external_modem", "modem"}; DEFINE_SINGLE_RESTART_ORDER(orders_8x60_modems, _order_8x60_modems); /*SGLTE restart ordering info*/ static const char * const order_8960_sglte[] = {"external_modem", "modem"}; static struct subsys_soc_restart_order restart_orders_8960_fusion_sglte = { .subsystem_list = order_8960_sglte, .count = ARRAY_SIZE(order_8960_sglte), .subsys_ptrs = {[ARRAY_SIZE(order_8960_sglte)] = NULL} }; static struct subsys_soc_restart_order *restart_orders_8960_sglte[] = { &restart_orders_8960_fusion_sglte, }; /* These will be assigned to one of the sets above after * runtime SoC identification. */ static struct subsys_soc_restart_order **restart_orders; static int n_restart_orders; static struct subsys_soc_restart_order * update_restart_order(struct subsys_device *dev) { int i, j; struct subsys_soc_restart_order *order; const char *name = dev->desc->name; int len = SUBSYS_NAME_MAX_LENGTH; mutex_lock(&soc_order_reg_lock); for (j = 0; j < n_restart_orders; j++) { order = restart_orders[j]; for (i = 0; i < order->count; i++) { if (!strncmp(order->subsystem_list[i], name, len)) { order->subsys_ptrs[i] = dev; goto found; } } } order = NULL; found: mutex_unlock(&soc_order_reg_lock); return order; } static int modem_restarts; module_param(modem_restarts, int, 0644); static int max_restarts; module_param(max_restarts, int, 0644); static long max_history_time = 3600; module_param(max_history_time, long, 0644); static void do_epoch_check(struct subsys_device *dev) { int n = 0; struct timeval *time_first = NULL, *curr_time; struct restart_log *r_log, *temp; static int max_restarts_check; static long max_history_time_check; mutex_lock(&restart_log_mutex); max_restarts_check = max_restarts; max_history_time_check = max_history_time; /* Check if epoch checking is enabled */ if (!max_restarts_check) goto out; r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL); if (!r_log) goto out; r_log->dev = dev; do_gettimeofday(&r_log->time); curr_time = &r_log->time; INIT_LIST_HEAD(&r_log->list); list_add_tail(&r_log->list, &restart_log_list); list_for_each_entry_safe(r_log, temp, &restart_log_list, list) { if ((curr_time->tv_sec - r_log->time.tv_sec) > max_history_time_check) { pr_debug("Deleted node with restart_time = %ld\n", r_log->time.tv_sec); list_del(&r_log->list); kfree(r_log); continue; } if (!n) { time_first = &r_log->time; pr_debug("Time_first: %ld\n", time_first->tv_sec); } n++; pr_debug("Restart_time: %ld\n", r_log->time.tv_sec); } if (time_first && n >= max_restarts_check) { if ((curr_time->tv_sec - time_first->tv_sec) < max_history_time_check) PR_BUG("Subsystems have crashed %d times in less than " "%ld seconds!", max_restarts_check, max_history_time_check); } out: mutex_unlock(&restart_log_mutex); } static void for_each_subsys_device(struct subsys_device **list, unsigned count, void *data, void (*fn)(struct subsys_device *, void *)) { while (count--) { struct subsys_device *dev = *list++; if (!dev) continue; fn(dev, data); } } static void notify_each_subsys_device(struct subsys_device **list, unsigned count, enum subsys_notif_type notif, void *data) { while (count--) { enum subsys_notif_type type = (enum subsys_notif_type)type; struct subsys_device *dev = *list++; if (!dev) continue; subsys_notif_queue_notification(dev->notify, notif, data); } } static int wait_for_err_ready(struct subsys_device *subsys) { int ret; if (!subsys->desc->err_ready_irq || enable_debug == 1) return 0; ret = wait_for_completion_timeout(&subsys->err_ready, msecs_to_jiffies(10000)); if (!ret) { pr_err("[%s]: Error ready timed out\n", subsys->desc->name); return -ETIMEDOUT; } return 0; } static void subsystem_shutdown(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; pr_info("[%p]: Shutting down %s\n", current, name); if (dev->desc->shutdown(dev->desc) < 0) PR_BUG("subsys-restart: [%p]: Failed to shutdown %s!", current, name); subsys_set_state(dev, SUBSYS_OFFLINE); } static void subsystem_ramdump(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; if (dev->desc->ramdump) if (dev->desc->ramdump(enable_ramdumps, dev->desc) < 0) pr_warn("%s[%p]: Ramdump failed.\n", name, current); dev->do_ramdump_on_put = false; } static void subsystem_powerup(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; int ret; pr_info("[%p]: Powering up %s\n", current, name); init_completion(&dev->err_ready); if (dev->desc->powerup(dev->desc) < 0) PR_BUG("[%p]: Powerup error: %s!", current, name); ret = wait_for_err_ready(dev); if (ret) PR_BUG("[%p]: Timed out waiting for error ready: %s!", current, name); subsys_set_state(dev, SUBSYS_ONLINE); } static int __find_subsys(struct device *dev, void *data) { struct subsys_device *subsys = to_subsys(dev); return !strcmp(subsys->desc->name, data); } static struct subsys_device *find_subsys(const char *str) { struct device *dev; if (!str) return NULL; dev = bus_find_device(&subsys_bus_type, NULL, (void *)str, __find_subsys); return dev ? to_subsys(dev) : NULL; } static int subsys_start(struct subsys_device *subsys) { int ret; init_completion(&subsys->err_ready); ret = subsys->desc->start(subsys->desc); if (ret) return ret; if (subsys->desc->is_not_loadable) { subsys_set_state(subsys, SUBSYS_ONLINE); return 0; } ret = wait_for_err_ready(subsys); if (ret) /* pil-boot succeeded but we need to shutdown * the device because error ready timed out. */ subsys->desc->stop(subsys->desc); else subsys_set_state(subsys, SUBSYS_ONLINE); return ret; } static void subsys_stop(struct subsys_device *subsys) { subsys->desc->stop(subsys->desc); subsys_set_state(subsys, SUBSYS_OFFLINE); } static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys) { struct subsys_soc_restart_order *order = subsys->restart_order; if (order) return &order->track; else return &subsys->track; } /** * subsytem_get() - Boot a subsystem * @name: pointer to a string containing the name of the subsystem to boot * * This function returns a pointer if it succeeds. If an error occurs an * ERR_PTR is returned. * * If this feature is disable, the value %NULL will be returned. */ void *subsystem_get(const char *name) { struct subsys_device *subsys; struct subsys_device *subsys_d; int ret; void *retval; struct subsys_tracking *track; if (!name) return NULL; subsys = retval = find_subsys(name); if (!subsys) return ERR_PTR(-ENODEV); if (!try_module_get(subsys->owner)) { retval = ERR_PTR(-ENODEV); goto err_module; } subsys_d = subsystem_get(subsys->desc->depends_on); if (IS_ERR(subsys_d)) { retval = subsys_d; goto err_depends; } track = subsys_get_track(subsys); mutex_lock(&track->lock); if (!subsys->count) { ret = subsys_start(subsys); if (ret) { retval = ERR_PTR(ret); goto err_start; } } subsys->count++; mutex_unlock(&track->lock); return retval; err_start: mutex_unlock(&track->lock); subsystem_put(subsys_d); err_depends: module_put(subsys->owner); err_module: put_device(&subsys->dev); return retval; } EXPORT_SYMBOL(subsystem_get); /** * subsystem_put() - Shutdown a subsystem * @peripheral_handle: pointer from a previous call to subsystem_get() * * This doesn't imply that a subsystem is shutdown until all callers of * subsystem_get() have called subsystem_put(). */ void subsystem_put(void *subsystem) { struct subsys_device *subsys_d, *subsys = subsystem; struct subsys_tracking *track; if (IS_ERR_OR_NULL(subsys)) return; track = subsys_get_track(subsys); mutex_lock(&track->lock); if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n", subsys->desc->name, __func__)) goto err_out; if (!--subsys->count) { subsys_stop(subsys); if (subsys->do_ramdump_on_put) subsystem_ramdump(subsys, NULL); } mutex_unlock(&track->lock); subsys_d = find_subsys(subsys->desc->depends_on); if (subsys_d) { subsystem_put(subsys_d); put_device(&subsys_d->dev); } module_put(subsys->owner); put_device(&subsys->dev); return; err_out: mutex_unlock(&track->lock); } EXPORT_SYMBOL(subsystem_put); static void subsystem_restart_wq_func(struct work_struct *work) { struct subsys_device *dev = container_of(work, struct subsys_device, work); struct subsys_device **list; struct subsys_desc *desc = dev->desc; struct subsys_soc_restart_order *order = dev->restart_order; struct subsys_tracking *track; unsigned count; unsigned long flags; /* * It's OK to not take the registration lock at this point. * This is because the subsystem list inside the relevant * restart order is not being traversed. */ if (order) { list = order->subsys_ptrs; count = order->count; track = &order->track; } else { list = &dev; count = 1; track = &dev->track; } mutex_lock(&track->lock); do_epoch_check(dev); /* * It's necessary to take the registration lock because the subsystem * list in the SoC restart order will be traversed and it shouldn't be * changed until _this_ restart sequence completes. */ mutex_lock(&soc_order_reg_lock); pr_debug("[%p]: Starting restart sequence for %s\n", current, desc->name); if (!strncmp(desc->name, "modem", SUBSYS_NAME_MAX_LENGTH)) modem_restarts++; notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL); for_each_subsys_device(list, count, NULL, subsystem_shutdown); notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL); notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION, &enable_ramdumps); spin_lock_irqsave(&track->s_lock, flags); track->p_state = SUBSYS_RESTARTING; spin_unlock_irqrestore(&track->s_lock, flags); /* Collect ram dumps for all subsystems in order here */ for_each_subsys_device(list, count, NULL, subsystem_ramdump); notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL); for_each_subsys_device(list, count, NULL, subsystem_powerup); notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL); pr_info("[%p]: Restart sequence for %s completed.\n", current, desc->name); mutex_unlock(&soc_order_reg_lock); mutex_unlock(&track->lock); spin_lock_irqsave(&track->s_lock, flags); track->p_state = SUBSYS_NORMAL; wake_unlock(&dev->wake_lock); spin_unlock_irqrestore(&track->s_lock, flags); } static void __subsystem_restart_dev(struct subsys_device *dev) { struct subsys_desc *desc = dev->desc; const char *name = dev->desc->name; struct subsys_tracking *track; unsigned long flags; pr_debug("Restarting %s [level=%s]!\n", desc->name, restart_levels[dev->restart_level]); track = subsys_get_track(dev); /* * Allow drivers to call subsystem_restart{_dev}() as many times as * they want up until the point where the subsystem is shutdown. */ spin_lock_irqsave(&track->s_lock, flags); if (track->p_state != SUBSYS_CRASHED) { if (dev->track.state == SUBSYS_ONLINE && track->p_state != SUBSYS_RESTARTING) { track->p_state = SUBSYS_CRASHED; wake_lock(&dev->wake_lock); queue_work(ssr_wq, &dev->work); } else { PR_BUG("Subsystem %s crashed during SSR!", name); } } spin_unlock_irqrestore(&track->s_lock, flags); } int subsystem_restart_dev(struct subsys_device *dev) { const char *name; if (!get_device(&dev->dev)) return -ENODEV; if (!try_module_get(dev->owner)) { put_device(&dev->dev); return -ENODEV; } name = dev->desc->name; /* * If a system reboot/shutdown is underway, ignore subsystem errors. * However, print a message so that we know that a subsystem behaved * unexpectedly here. */ if (system_state == SYSTEM_RESTART || system_state == SYSTEM_POWER_OFF) { pr_err("%s crashed during a system poweroff/shutdown.\n", name); return -EBUSY; } pr_info("Restart sequence requested for %s, restart_level = %s.\n", name, restart_levels[dev->restart_level]); switch (dev->restart_level) { case RESET_SUBSYS_COUPLED: __subsystem_restart_dev(dev); break; case RESET_SOC: PR_BUG("subsys-restart: Resetting the SoC - %s crashed.", name); break; case RESET_IGNORE: default: pr_err("subsys-restart: no action taken for %s\n", name); break; } module_put(dev->owner); put_device(&dev->dev); return 0; } EXPORT_SYMBOL(subsystem_restart_dev); int subsystem_restart(const char *name) { int ret; struct subsys_device *dev = find_subsys(name); if (!dev) return -ENODEV; ret = subsystem_restart_dev(dev); put_device(&dev->dev); return ret; } EXPORT_SYMBOL(subsystem_restart); int subsystem_crashed(const char *name) { struct subsys_device *dev = find_subsys(name); struct subsys_tracking *track; if (!dev) return -ENODEV; if (!get_device(&dev->dev)) return -ENODEV; track = subsys_get_track(dev); mutex_lock(&track->lock); dev->do_ramdump_on_put = true; /* * TODO: Make this work with multiple consumers where one is calling * subsystem_restart() and another is calling this function. To do * so would require updating private state, etc. */ mutex_unlock(&track->lock); put_device(&dev->dev); return 0; } EXPORT_SYMBOL(subsystem_crashed); void subsys_set_crash_status(struct subsys_device *dev, bool crashed) { dev->crashed = true; } bool subsys_get_crash_status(struct subsys_device *dev) { return dev->crashed; } #ifdef CONFIG_DEBUG_FS static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { int r; char buf[40]; struct subsys_device *subsys = filp->private_data; r = snprintf(buf, sizeof(buf), "%d\n", subsys->count); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t subsys_debugfs_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct subsys_device *subsys = filp->private_data; char buf[10]; char *cmp; cnt = min(cnt, sizeof(buf) - 1); if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = '\0'; cmp = strstrip(buf); if (!strcmp(cmp, "restart")) { if (subsystem_restart_dev(subsys)) return -EIO; } else if (!strcmp(cmp, "get")) { if (subsystem_get(subsys->desc->name)) return -EIO; } else if (!strcmp(cmp, "put")) { subsystem_put(subsys); } else { return -EINVAL; } return cnt; } static const struct file_operations subsys_debugfs_fops = { .open = simple_open, .read = subsys_debugfs_read, .write = subsys_debugfs_write, }; static struct dentry *subsys_base_dir; static int __init subsys_debugfs_init(void) { subsys_base_dir = debugfs_create_dir("msm_subsys", NULL); return !subsys_base_dir ? -ENOMEM : 0; } static void subsys_debugfs_exit(void) { debugfs_remove_recursive(subsys_base_dir); } static int subsys_debugfs_add(struct subsys_device *subsys) { if (!subsys_base_dir) return -ENOMEM; subsys->dentry = debugfs_create_file(subsys->desc->name, S_IRUGO | S_IWUSR, subsys_base_dir, subsys, &subsys_debugfs_fops); return !subsys->dentry ? -ENOMEM : 0; } static void subsys_debugfs_remove(struct subsys_device *subsys) { debugfs_remove(subsys->dentry); } #else static int __init subsys_debugfs_init(void) { return 0; }; static void subsys_debugfs_exit(void) { } static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; } static void subsys_debugfs_remove(struct subsys_device *subsys) { } #endif static int subsys_device_open(struct inode *inode, struct file *file) { void *retval; struct subsys_device *subsys_dev = container_of(file->private_data, struct subsys_device, misc_dev); if (!file->private_data) return -EINVAL; retval = subsystem_get(subsys_dev->desc->name); if (IS_ERR(retval)) return PTR_ERR(retval); return 0; } static int subsys_device_close(struct inode *inode, struct file *file) { struct subsys_device *subsys_dev = container_of(file->private_data, struct subsys_device, misc_dev); if (!file->private_data) return -EINVAL; subsystem_put(subsys_dev); return 0; } static const struct file_operations subsys_device_fops = { .owner = THIS_MODULE, .open = subsys_device_open, .release = subsys_device_close, }; static void subsys_device_release(struct device *dev) { struct subsys_device *subsys = to_subsys(dev); wake_lock_destroy(&subsys->wake_lock); mutex_destroy(&subsys->track.lock); ida_simple_remove(&subsys_ida, subsys->id); kfree(subsys); } static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys) { struct subsys_device *subsys_dev = subsys; dev_info(subsys_dev->desc->dev, "Subsystem error monitoring/handling services are up\n"); if (subsys_dev->desc->is_not_loadable) return IRQ_HANDLED; complete(&subsys_dev->err_ready); return IRQ_HANDLED; } static int subsys_misc_device_add(struct subsys_device *subsys_dev) { int ret; memset(subsys_dev->miscdevice_name, 0, ARRAY_SIZE(subsys_dev->miscdevice_name)); snprintf(subsys_dev->miscdevice_name, ARRAY_SIZE(subsys_dev->miscdevice_name), "subsys_%s", subsys_dev->desc->name); subsys_dev->misc_dev.minor = MISC_DYNAMIC_MINOR; subsys_dev->misc_dev.name = subsys_dev->miscdevice_name; subsys_dev->misc_dev.fops = &subsys_device_fops; subsys_dev->misc_dev.parent = &subsys_dev->dev; ret = misc_register(&subsys_dev->misc_dev); if (ret) { pr_err("%s: misc_register() failed for %s (%d)", __func__, subsys_dev->miscdevice_name, ret); } return ret; } static void subsys_misc_device_remove(struct subsys_device *subsys_dev) { misc_deregister(&subsys_dev->misc_dev); } static int __get_gpio(struct subsys_desc *desc, const char *prop, int *gpio) { struct device_node *dnode = desc->dev->of_node; int ret = -ENOENT; if (of_find_property(dnode, prop, NULL)) { *gpio = of_get_named_gpio(dnode, prop, 0); ret = *gpio < 0 ? *gpio : 0; } return ret; } static int __get_irq(struct subsys_desc *desc, const char *prop, unsigned int *irq) { int ret, gpio, irql; ret = __get_gpio(desc, prop, &gpio); if (ret) return ret; irql = gpio_to_irq(gpio); if (irql == -ENOENT) irql = -ENXIO; if (irql < 0) { pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name, prop); return irql; } else { *irq = irql; } return 0; } static int subsys_parse_devicetree(struct subsys_desc *desc) { int ret; struct platform_device *pdev = container_of(desc->dev, struct platform_device, dev); ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq); if (ret && ret != -ENOENT) return ret; ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq); if (ret && ret != -ENOENT) return ret; ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq); if (ret && ret != -ENOENT) return ret; ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio); if (ret && ret != -ENOENT) return ret; desc->wdog_bite_irq = platform_get_irq(pdev, 0); if (desc->wdog_bite_irq < 0) return desc->wdog_bite_irq; return 0; } static int subsys_setup_irqs(struct subsys_device *subsys) { struct subsys_desc *desc = subsys->desc; int ret; if (desc->err_fatal_irq && desc->err_fatal_handler) { ret = devm_request_irq(desc->dev, desc->err_fatal_irq, desc->err_fatal_handler, IRQF_TRIGGER_RISING, desc->name, desc); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n", desc->name, ret); return ret; } } if (desc->stop_ack_irq && desc->stop_ack_handler) { ret = devm_request_irq(desc->dev, desc->stop_ack_irq, desc->stop_ack_handler, IRQF_TRIGGER_RISING, desc->name, desc); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n", desc->name, ret); return ret; } } if (desc->wdog_bite_irq && desc->wdog_bite_handler) { ret = devm_request_irq(desc->dev, desc->wdog_bite_irq, desc->wdog_bite_handler, IRQF_TRIGGER_RISING, desc->name, desc); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n", desc->name, ret); return ret; } } if (desc->err_ready_irq) { ret = devm_request_irq(desc->dev, desc->err_ready_irq, subsys_err_ready_intr_handler, IRQF_TRIGGER_RISING, "error_ready_interrupt", subsys); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register err ready handler\n", desc->name); return ret; } } return 0; } struct subsys_device *subsys_register(struct subsys_desc *desc) { struct subsys_device *subsys; int ret; subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) return ERR_PTR(-ENOMEM); subsys->desc = desc; subsys->owner = desc->owner; subsys->dev.parent = desc->dev; subsys->dev.bus = &subsys_bus_type; subsys->dev.release = subsys_device_release; subsys->notify = subsys_notif_add_subsys(desc->name); subsys->restart_order = update_restart_order(subsys); ret = subsys_parse_devicetree(desc); if (ret) goto err_dtree; snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name); wake_lock_init(&subsys->wake_lock, WAKE_LOCK_SUSPEND, subsys->wlname); INIT_WORK(&subsys->work, subsystem_restart_wq_func); spin_lock_init(&subsys->track.s_lock); subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL); if (subsys->id < 0) { ret = subsys->id; goto err_ida; } dev_set_name(&subsys->dev, "subsys%d", subsys->id); mutex_init(&subsys->track.lock); ret = subsys_debugfs_add(subsys); if (ret) goto err_debugfs; ret = device_register(&subsys->dev); if (ret) { device_unregister(&subsys->dev); goto err_register; } ret = subsys_misc_device_add(subsys); if (ret) { put_device(&subsys->dev); goto err_register; } ret = subsys_setup_irqs(subsys); if (ret < 0) goto err_misc_device; return subsys; err_misc_device: subsys_misc_device_remove(subsys); err_register: subsys_debugfs_remove(subsys); err_debugfs: mutex_destroy(&subsys->track.lock); ida_simple_remove(&subsys_ida, subsys->id); err_ida: wake_lock_destroy(&subsys->wake_lock); err_dtree: kfree(subsys); return ERR_PTR(ret); } EXPORT_SYMBOL(subsys_register); void subsys_unregister(struct subsys_device *subsys) { if (IS_ERR_OR_NULL(subsys)) return; if (get_device(&subsys->dev)) { mutex_lock(&subsys->track.lock); WARN_ON(subsys->count); device_unregister(&subsys->dev); mutex_unlock(&subsys->track.lock); subsys_debugfs_remove(subsys); subsys_misc_device_remove(subsys); put_device(&subsys->dev); } } EXPORT_SYMBOL(subsys_unregister); static int subsys_panic(struct device *dev, void *data) { struct subsys_device *subsys = to_subsys(dev); if (subsys->desc->crash_shutdown) subsys->desc->crash_shutdown(subsys->desc); return 0; } static int ssr_panic_handler(struct notifier_block *this, unsigned long event, void *ptr) { bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic); return NOTIFY_DONE; } static struct notifier_block panic_nb = { .notifier_call = ssr_panic_handler, }; static int __init ssr_init_soc_restart_orders(void) { int i; atomic_notifier_chain_register(&panic_notifier_list, &panic_nb); if (cpu_is_msm8x60()) { for (i = 0; i < ARRAY_SIZE(orders_8x60_all); i++) { mutex_init(&orders_8x60_all[i]->track.lock); spin_lock_init(&orders_8x60_all[i]->track.s_lock); } for (i = 0; i < ARRAY_SIZE(orders_8x60_modems); i++) { mutex_init(&orders_8x60_modems[i]->track.lock); spin_lock_init(&orders_8x60_modems[i]->track.s_lock); } restart_orders = orders_8x60_all; n_restart_orders = ARRAY_SIZE(orders_8x60_all); } if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) { restart_orders = restart_orders_8960_sglte; n_restart_orders = ARRAY_SIZE(restart_orders_8960_sglte); } for (i = 0; i < n_restart_orders; i++) { mutex_init(&restart_orders[i]->track.lock); spin_lock_init(&restart_orders[i]->track.s_lock); } return 0; } static int __init subsys_restart_init(void) { int ret; ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0); BUG_ON(!ssr_wq); ret = bus_register(&subsys_bus_type); if (ret) goto err_bus; ret = subsys_debugfs_init(); if (ret) goto err_debugfs; ret = ssr_init_soc_restart_orders(); if (ret) goto err_soc; return 0; err_soc: subsys_debugfs_exit(); err_debugfs: bus_unregister(&subsys_bus_type); err_bus: destroy_workqueue(ssr_wq); return ret; } arch_initcall(subsys_restart_init); MODULE_DESCRIPTION("Subsystem Restart Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Arc-Team/android_kernel_htc_glacier
drivers/input/touchscreen/synaptics_3200.c
53
79103
/* drivers/input/touchscreen/synaptics_3200.c - Synaptics 3200 serious touch panel driver * * Copyright (C) 2011 HTC Corporation. * * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/delay.h> #include <linux/earlysuspend.h> #include <linux/hrtimer.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/synaptics_i2c_rmi.h> #include <linux/slab.h> #include <linux/rmi.h> #include <mach/board.h> #include <mach/msm_hsusb.h> #include <asm/gpio.h> #include <linux/input/mt.h> #define SYN_I2C_RETRY_TIMES 10 #define SYN_WIRELESS_DEBUG /* #define SYN_CABLE_CONTROL */ #define SYN_CALIBRATION_CONTROL /* #define SYN_FILTER_CONTROL */ /* #define SYN_FLASH_PROGRAMMING_LOG */ /* #define SYN_DISABLE_CONFIG_UPDATE */ #define FAKE_EVENT struct synaptics_ts_data { uint16_t addr; struct i2c_client *client; struct input_dev *input_dev; struct workqueue_struct *syn_wq; struct function_t *address_table; int use_irq; int gpio_irq; int gpio_reset; struct hrtimer timer; struct work_struct work; uint16_t max[2]; uint32_t flags; uint8_t num_function; uint8_t finger_support; uint16_t finger_pressed; int (*power)(int on); struct early_suspend early_suspend; int pre_finger_data[11][4]; uint8_t debug_log_level; uint32_t raw_base; uint32_t raw_ref; uint64_t timestamp; uint16_t *filter_level; uint8_t *reduce_report_level; unsigned long single_tap_timeout; int16_t *report_data; uint8_t *temp_report_data; uint8_t grip_suppression; uint8_t grip_b_suppression; uint16_t tap_suppression; uint8_t ambiguous_state; uint8_t diag_command; uint8_t cable_support; uint8_t cable_config; uint8_t key_number; uint16_t key_postion_x[4]; uint16_t key_postion_y; uint8_t intr_bit; uint8_t finger_count; uint8_t page_select; uint8_t config_table[SYN_CONFIG_SIZE]; uint8_t x_channel; uint8_t y_channel; uint8_t *config; uint32_t config_version; uint16_t package_id; uint32_t packrat_number; int layout[4]; uint8_t htc_event; atomic_t data_ready; uint8_t relaxation; uint8_t irq_enabled; uint8_t large_obj_check; uint8_t default_large_obj; uint16_t tw_vendor; uint16_t tw_pin_mask; uint8_t support_htc_event; uint8_t mfg_flag; uint8_t first_pressed; }; #ifdef CONFIG_HAS_EARLYSUSPEND static void synaptics_ts_early_suspend(struct early_suspend *h); static void synaptics_ts_late_resume(struct early_suspend *h); #endif static DECLARE_WAIT_QUEUE_HEAD(syn_data_ready_wq); static DEFINE_MUTEX(syn_mutex); static struct synaptics_ts_data *gl_ts; static uint16_t syn_panel_version; static int i2c_syn_write_byte_data(struct i2c_client *client, uint16_t addr, uint8_t value); static int syn_pdt_scan(struct synaptics_ts_data *ts, int num_page); static int synaptics_init_panel(struct synaptics_ts_data *ts); static irqreturn_t synaptics_irq_thread(int irq, void *ptr); static void syn_page_select(struct i2c_client *client, uint8_t page) { struct synaptics_ts_data *ts = i2c_get_clientdata(client); if (page ^ ts->page_select) { i2c_smbus_write_byte_data(client, 0xFF, page); ts->page_select = page; /* printk(KERN_INFO "TOUCH: Page Select: %s: %d\n", __func__, ts->page_select); */ } } static int i2c_syn_read(struct i2c_client *client, uint16_t addr, uint8_t *data, uint16_t length) { uint8_t retry, buf; struct i2c_msg msg[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &buf, }, { .addr = client->addr, .flags = I2C_M_RD, .len = length, .buf = data, } }; buf = addr & 0xFF; mutex_lock(&syn_mutex); syn_page_select(client, addr >> 8); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(client->adapter, msg, 2) == 2) break; msleep(10); } mutex_unlock(&syn_mutex); if (retry == SYN_I2C_RETRY_TIMES) { printk(KERN_INFO "[TP] i2c_read retry over %d\n", SYN_I2C_RETRY_TIMES); return -EIO; } return 0; } static int i2c_syn_write(struct i2c_client *client, uint16_t addr, uint8_t *data, uint16_t length) { uint8_t retry; uint8_t buf[length + 1]; struct i2c_msg msg[] = { { .addr = client->addr, .flags = 0, .len = length + 1, .buf = buf, } }; mutex_lock(&syn_mutex); syn_page_select(client, addr >> 8); buf[0] = addr & 0xFF; memcpy(&buf[1], &data[0], length); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(client->adapter, msg, 1) == 1) break; mdelay(10); } mutex_unlock(&syn_mutex); if (retry == SYN_I2C_RETRY_TIMES) { printk(KERN_ERR "[TP] i2c_write retry over %d\n", SYN_I2C_RETRY_TIMES); return -EIO; } return 0; } int i2c_rmi_read(uint16_t addr, uint8_t *data, uint16_t length) { uint8_t retry, buf; struct synaptics_ts_data *ts = gl_ts; struct i2c_msg msg[] = { { .addr = ts->client->addr, .flags = 0, .len = 1, .buf = &buf, }, { .addr = ts->client->addr, .flags = I2C_M_RD, .len = length, .buf = data, } }; buf = addr & 0xFF; mutex_lock(&syn_mutex); syn_page_select(ts->client, addr >> 8); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(ts->client->adapter, msg, 2) == 2) break; msleep(10); } mutex_unlock(&syn_mutex); if (retry == SYN_I2C_RETRY_TIMES) { printk(KERN_INFO "[TP] i2c_read retry over %d\n", SYN_I2C_RETRY_TIMES); return -EIO; } return 0; } EXPORT_SYMBOL(i2c_rmi_read); int i2c_rmi_write(uint16_t addr, uint8_t *data, uint16_t length) { uint8_t retry; uint8_t buf[length + 1]; struct synaptics_ts_data *ts = gl_ts; struct i2c_msg msg[] = { { .addr = ts->client->addr, .flags = 0, .len = length + 1, .buf = buf, } }; mutex_lock(&syn_mutex); syn_page_select(ts->client, addr >> 8); buf[0] = addr & 0xFF; memcpy(&buf[1], &data[0], length); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(ts->client->adapter, msg, 1) == 1) break; mdelay(10); } mutex_unlock(&syn_mutex); if (retry == SYN_I2C_RETRY_TIMES) { printk(KERN_ERR "[TP] i2c_write retry over %d\n", SYN_I2C_RETRY_TIMES); return -EIO; } return 0; } EXPORT_SYMBOL(i2c_rmi_write); static int i2c_syn_write_byte_data(struct i2c_client *client, uint16_t addr, uint8_t value) { return i2c_syn_write(client, addr, &value, 1); } static int i2c_syn_error_handler(struct synaptics_ts_data *ts, uint8_t reset, char *reason, const char *fun_name) { int ret; if (reason && fun_name) printk(KERN_ERR "[TP]TOUCH_ERR: I2C Error: %s:%s, reset = %d\n", fun_name, reason, reset); else printk(KERN_INFO "[TP] %s: rason and fun_name can't be null\n", __func__); if (reset) { if (ts->power) { ret = ts->power(0); if (ret < 0) printk(KERN_ERR "[TP] TOUCH_ERR: synaptics i2c error handler power off failed\n"); msleep(10); ret = ts->power(1); if (ret < 0) printk(KERN_ERR "[TP] TOUCH_ERR: synaptics i2c error handler power on failed\n"); } else { /* ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x01, COMMAND_BASE), 0x01); if (ret < 0) printk(KERN_INFO "[TP] TOUCH_ERR: synaptics i2c error handler SW reset failed\n"); else printk(KERN_INFO "[TP] synaptics i2c error handler: reset chip by reset command\n"); msleep(250); */ } ret = synaptics_init_panel(ts); if (ret < 0) printk(KERN_ERR "[TP] TOUCH_ERR: synaptics i2c error handler init panel failed\n"); if (!ts->use_irq) { hrtimer_cancel(&ts->timer); hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); } } return -EIO; } static int get_address_base(struct synaptics_ts_data *ts, uint8_t command, uint8_t type) { uint8_t i; for (i = 0; i < ts->num_function; i++) { if (ts->address_table[i].function_type == command) { switch (type) { case QUERY_BASE: return ts->address_table[i].query_base; case COMMAND_BASE: return ts->address_table[i].command_base; case CONTROL_BASE: return ts->address_table[i].control_base; case DATA_BASE: return ts->address_table[i].data_base; case INTR_SOURCE: return ts->address_table[i].interrupt_source; case FUNCTION: return 1; } } } if (type == FUNCTION) return 0; else return -1; } static int get_int_mask(uint8_t number, uint8_t offset) { uint8_t i, mask = 0; for (i = 0; i < number; i++) mask |= BIT(i); return mask << offset; } static uint32_t syn_crc(uint16_t *data, uint16_t len) { uint32_t sum1, sum2; sum1 = sum2 = 0xFFFF; while (len--) { sum1 += *data++; sum2 += sum1; sum1 = (sum1 & 0xFFFF) + (sum1 >> 16); sum2 = (sum2 & 0xFFFF) + (sum2 >> 16); /* printk("Data: %x, Sum1: %x, Sum2: %x\n", *data, sum1, sum2); */ } return sum1 | (sum2 << 16); } static int wait_flash_interrupt(struct synaptics_ts_data *ts, int attr) { uint8_t data = 0; int i, ret; for (i = 0; i < 5; i++) { #ifdef SYN_FLASH_PROGRAMMING_LOG printk(KERN_INFO "[TP] ATT: %d\n", gpio_get_value(attr)); #endif if (!gpio_get_value(attr)) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE) + 1, &data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:1", __func__); if ((data & 0x01) == 0x01) { #ifdef SYN_FLASH_PROGRAMMING_LOG printk(KERN_INFO "[TP] ATT: %d, status: %x\n", gpio_get_value(attr), data); #endif break; } } msleep(20); } if (i == 5 && syn_panel_version == 0) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE) + 1, &data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:2", __func__); } else if (i == 5) { printk(KERN_INFO "[TP] wait_flash_interrupt: interrupt over time!\n"); return SYN_PROCESS_ERR; } ret = i2c_syn_read(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 18, &data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:3", __func__); /* check = 0x80 */ if (data != 0x80) { printk(KERN_INFO "[TP] wait_flash_interrupt: block config fail!\n"); return SYN_PROCESS_ERR; } return 0; } static int enable_flash_programming(struct synaptics_ts_data *ts, int attr) { int ret; uint8_t data[2]; /* timing need to fine tune, no interrupt low */ ret = i2c_syn_read(ts->client, get_address_base(ts, 0x34, QUERY_BASE), data, 2); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:1", __func__); /* printk("%s: data: %x, %x\n", __func__, data[0], data[1]); */ ret = i2c_syn_write(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 2, data, 2); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:2", __func__); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 18, 0x0F); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:3", __func__); ret = wait_flash_interrupt(ts, attr); if (ret < 0) return ret; return 0; } static int crc_comparison(struct synaptics_ts_data *ts, uint32_t config_crc, int attr) { int ret; uint8_t data[17]; uint32_t flash_crc; #ifdef SYN_FLASH_PROGRAMMING_LOG uint8_t i, j; for (i = 0; i < 0x20; i++) { data[0] = i; data[1] = 0x00; #else data[0] = 0x1F; data[1] = 0x00; #endif ret = i2c_syn_write(ts->client, get_address_base(ts, 0x34, DATA_BASE), data, 2); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:1", __func__); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 18, 0x05); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:2", __func__); ret = wait_flash_interrupt(ts, attr); if (ret < 0) return ret; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 2, data, 17); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:3", __func__); memcpy(&flash_crc, &data[12], 4); #ifdef SYN_FLASH_PROGRAMMING_LOG printk(KERN_INFO "[TP] config_crc = %X, flash_crc = %X\n", config_crc, flash_crc); for (j = 0; j < 0x11; j++) printk(KERN_INFO " %d:%X ", j, data[j]); printk(KERN_INFO "\n"); } #endif if (flash_crc == config_crc) return 0; else return 1; } static int program_config(struct synaptics_ts_data *ts, uint8_t *config, int attr) { int ret; uint8_t data[19]; uint16_t i; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x34, QUERY_BASE), data, 2); /* printk("%s: data: %x, %x\n", __func__, data[0], data[1]); */ if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:1", __func__); ret = i2c_syn_write(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 2, data, 2); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:2", __func__); /* printk("ATT: %d\n", gpio_get_value(attr)); */ ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 18, 0x07); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:3", __func__); ret = wait_flash_interrupt(ts, attr); if (ret < 0) return ret; for (i = 0; i < 0x20; i++) { data[0] = i & 0xFF; data[1] = (i & 0xFF00) >> 8; memcpy(&data[2], &config[16 * i], 16); data[18] = 0x06; ret = i2c_syn_write(ts->client, get_address_base(ts, 0x34, DATA_BASE), data, 19); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:4", __func__); ret = wait_flash_interrupt(ts, attr); if (ret < 0) return ret; } return 0; } static int disable_flash_programming(struct synaptics_ts_data *ts, int status) { int ret; uint8_t data = 0, i; ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x01, COMMAND_BASE), 0x01); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:1", __func__); for (i = 0; i < 25; i++) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE), &data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:2", __func__); if ((data & 0x40) == 0) break; else msleep(20); } if (i == 25) { printk(KERN_INFO "[TP] Disable flash programming fail! F01_data: %X\n", data); return SYN_PROCESS_ERR; } else { printk(KERN_INFO "[TP] Disable flash programming success! F01_data: %X\n", data); return status; } } static int syn_config_update(struct synaptics_ts_data *ts, int attr) { uint8_t retry; uint32_t crc_checksum; int ret; crc_checksum = syn_crc((uint16_t *)ts->config, SYN_CONFIG_SIZE / 2 - 2); memcpy(&ts->config[SYN_CONFIG_SIZE - 4], &crc_checksum, 4); printk(KERN_INFO "[TP] CRC = %X\n" , syn_crc((uint16_t *)ts->config, SYN_CONFIG_SIZE / 2 - 2)); if (ts->tw_pin_mask == 0) { ret = enable_flash_programming(ts, attr); if (ret < 0) { printk(KERN_INFO "[TP] syn_config_update: Enable flash programming fail!\n"); return disable_flash_programming(ts, ret); } ret = syn_pdt_scan(ts, SYN_BL_PAGE); if (ret < 0) { printk(KERN_INFO "[TP] syn_config_update: pdt scan failed\n"); return disable_flash_programming(ts, ret); } } if ((ts->config != NULL && (ts->config[0] << 24 | ts->config[1] << 16 | ts->config[2] << 8 | ts->config[3]) == ts->config_version)) { ret = crc_comparison(ts, crc_checksum, attr); if (ret < 0) { printk(KERN_INFO "[TP] syn_config_update: CRC comparison fail!\n"); return disable_flash_programming(ts, ret); } else if (ret == 0) return disable_flash_programming(ts, 1); } for (retry = 0; retry < 3; retry++) { ret = program_config(ts, ts->config, attr); if (ret < 0) { #ifdef SYN_FLASH_PROGRAMMING_LOG printk(KERN_INFO "[TP] syn_config_update: Program config fail %d!\n", retry + 1); #endif continue; } ret = disable_flash_programming(ts, 0); if (ret == 0) break; else printk(KERN_INFO "[TP] syn_config_update: Disable flash programming fail %d\n", retry + 1); } if (retry == 3) { printk(KERN_INFO "[TP] syn_config_update: Program config fail 3 times\n"); return ret; } return 0; } static int syn_get_tw_vendor(struct synaptics_ts_data *ts, int attr) { uint8_t data[2] = {0}; int ret; ret = enable_flash_programming(ts, attr); if (ret < 0) { printk(KERN_INFO "[TP] Enable flash programming fail!\n"); return disable_flash_programming(ts, -1); } ret = syn_pdt_scan(ts, SYN_BL_PAGE); if (ret < 0) { printk(KERN_INFO "[TP] syn_config_update: pdt scan failed\n"); return disable_flash_programming(ts, ret); } memcpy(&data, &ts->tw_pin_mask, sizeof(ts->tw_pin_mask)); printk("[TP] tw mask = %X %X , %X\n", data[0], data[1], ts->tw_pin_mask); i2c_syn_write(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 2, data, 2); i2c_syn_write(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 4, data, 2); i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 18, 0x08); if (wait_flash_interrupt(ts, attr) < 0) return disable_flash_programming(ts, -1); i2c_syn_read(ts->client, get_address_base(ts, 0x34, DATA_BASE) + 6, data, 2); ts->tw_vendor = (data[1] << 8) | data[0]; printk("[TP] tw vendor= %x %x\n", data[1], data[0]); return 0; } static int synaptics_input_register(struct synaptics_ts_data *ts) { int ret; ts->input_dev = input_allocate_device(); if (ts->input_dev == NULL) { ret = -ENOMEM; printk(KERN_ERR "[TP] TOUCH_ERR: %s: Failed to allocate input device\n", __func__); return ret; } ts->input_dev->name = "synaptics-rmi-touchscreen"; set_bit(EV_SYN, ts->input_dev->evbit); set_bit(EV_KEY, ts->input_dev->evbit); set_bit(EV_ABS, ts->input_dev->evbit); set_bit(KEY_BACK, ts->input_dev->keybit); set_bit(KEY_HOME, ts->input_dev->keybit); set_bit(KEY_MENU, ts->input_dev->keybit); set_bit(KEY_SEARCH, ts->input_dev->keybit); set_bit(KEY_APP_SWITCH, ts->input_dev->keybit); printk(KERN_INFO "[TP] input_set_abs_params: mix_x %d, max_x %d, min_y %d, max_y %d\n", ts->layout[0], ts->layout[1], ts->layout[2], ts->layout[3]); if (ts->htc_event == SYN_AND_REPORT_TYPE_B) { input_mt_init_slots(ts->input_dev, ts->finger_support); } else { ts->input_dev->mtsize = ts->finger_support; input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID, 0, ts->finger_support - 1, 0, 0); } input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, ts->layout[0], ts->layout[1], 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, ts->layout[2], ts->layout[3], 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 30, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE, 0, 30, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_AMPLITUDE, 0, ((255 << 16) | 15), 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_POSITION, 0, ((1 << 31) | (ts->layout[1] << 16) | ts->layout[3]), 0, 0); return input_register_device(ts->input_dev); } static ssize_t touch_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = 0; char fw_version[2]; struct synaptics_ts_data *ts; ts = gl_ts; memcpy(fw_version, &syn_panel_version, 2); ret = sprintf(buf, "synaptics-%d_%c.%c", ts->package_id, fw_version[1], fw_version[0]); if (ts->tw_pin_mask != 0) ret += sprintf(buf+ret, "_twID-%x", ts->tw_vendor); else ret += sprintf(buf+ret, "\n"); ret += sprintf(buf+ret, "_PR: %d\n", ts->packrat_number); return ret; } static DEVICE_ATTR(vendor, S_IRUGO, touch_vendor_show, NULL); static ssize_t gpio_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret = 0; struct synaptics_ts_data *ts; ts = gl_ts; ret = gpio_get_value(ts->gpio_irq); printk(KERN_DEBUG "[TP] GPIO_TP_INT_N=%d\n", ret); sprintf(buf, "GPIO_TP_INT_N=%d\n", ret); ret = strlen(buf) + 1; return ret; } static DEVICE_ATTR(gpio, S_IRUGO, gpio_show, NULL); static uint16_t syn_reg_addr; static ssize_t register_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret = 0; uint8_t data = 0; struct synaptics_ts_data *ts; ts = gl_ts; ret = i2c_syn_read(ts->client, syn_reg_addr, &data, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "r", __func__); ret += sprintf(buf, "addr: 0x , data: 0x \n"); } else { ret += sprintf(buf, "addr: 0x%X, data: 0x%X\n", syn_reg_addr, data); } return ret; } static ssize_t register_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = 0; struct synaptics_ts_data *ts; char buf_tmp[4]; uint8_t write_da; unsigned long addr; ts = gl_ts; memset(buf_tmp, 0x0, sizeof(buf_tmp)); if ((buf[0] == 'r' || buf[0] == 'w') && buf[1] == ':' && (buf[5] == ':' || buf[5] == '\n')) { memcpy(buf_tmp, buf + 2, 3); ret = strict_strtoul(buf_tmp, 16, &addr); syn_reg_addr = addr; printk(KERN_DEBUG "[TP] %s: set syn_reg_addr is: 0x%X\n", __func__, syn_reg_addr); if (buf[0] == 'w' && buf[5] == ':' && buf[9] == '\n') { memcpy(buf_tmp, buf + 6, 3); ret = strict_strtoul(buf_tmp, 16, &addr); write_da = addr; printk(KERN_DEBUG "[TP] write addr: 0x%X, data: 0x%X\n", syn_reg_addr, write_da); ret = i2c_syn_write_byte_data(ts->client, syn_reg_addr, write_da); if (ret < 0) { i2c_syn_error_handler(ts, 0, "w", __func__); } } } return count; } static DEVICE_ATTR(register, (S_IWUSR|S_IRUGO), register_show, register_store); static ssize_t debug_level_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; return sprintf(buf, "%d\n", ts->debug_log_level); } static ssize_t debug_level_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts = gl_ts; if (buf[0] >= '0' && buf[0] <= '9' && buf[1] == '\n') ts->debug_log_level = buf[0] - '0'; return count; } static DEVICE_ATTR(debug_level, (S_IWUSR|S_IRUGO), debug_level_show, debug_level_store); static ssize_t syn_diag_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; size_t count = 0; uint16_t i, j; int ret; ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, DATA_BASE), ts->diag_command); if (ret < 0) { i2c_syn_error_handler(ts, 0, "w:1", __func__); count += sprintf(buf, "[TP] TOUCH_ERR: %s: i2c write fail(%d)\n", __func__, ret); return count; } atomic_set(&ts->data_ready, 0); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x01); if (ret < 0) { atomic_set(&ts->data_ready, 1); i2c_syn_error_handler(ts, 0, "w:2", __func__); count += sprintf(buf, "[TP] TOUCH_ERR: %s: i2c write fail(%d)\n", __func__, ret); return count; } wait_event_interruptible_timeout(syn_data_ready_wq, atomic_read(&ts->data_ready), 50); for (i = 0; i < ts->y_channel; i++) { for (j = 0; j < ts->x_channel; j++) { if(ts->package_id == 3202) count += sprintf(buf + count, "%5d", ts->report_data[i + j*ts->y_channel]); else count += sprintf(buf + count, "%5d", ts->report_data[i*ts->x_channel + j]); } count += sprintf(buf + count, "\n"); } return count; } static ssize_t syn_diag_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts; ts = gl_ts; if (buf[0] == '1') ts->diag_command = 2; else if (buf[0] == '2') ts->diag_command = 3; return count; } static DEVICE_ATTR(diag, (S_IWUSR|S_IRUGO), syn_diag_show, syn_diag_store); static ssize_t syn_unlock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts; int unlock = -1; int ret; uint8_t data = 0; ts = gl_ts; if (buf[0] >= '0' && buf[0] <= '9' && buf[1] == '\n') unlock = buf[0] - '0'; printk(KERN_INFO "[TP] Touch: unlock change to %d\n", unlock); if (unlock == 2 && ts->first_pressed && ts->pre_finger_data[0][0] < 2) { ts->pre_finger_data[0][0] = 2; #ifdef SYN_CALIBRATION_CONTROL ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, 0x0); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:1", __func__); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x04); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:2", __func__); /*printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed\n", __func__);*/ ret = i2c_syn_read(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, &data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:1", __func__); printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed, fast relaxation: %x\n", __func__, data); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, COMMAND_BASE), 0x01); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:3", __func__); printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed, rezero\n", __func__); #endif if (ts->large_obj_check) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 0x29, &data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:2", __func__); printk(KERN_INFO "[TP] %s: large obj suppression: %x\n", __func__, data); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 0x29, ts->default_large_obj); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:4", __func__); printk(KERN_INFO "[TP] %s: unlock confirmed. set large obj suppression: %x\n" , __func__, ts->default_large_obj); } } return count; } static DEVICE_ATTR(unlock, (S_IWUSR|S_IRUGO), NULL, syn_unlock_store); static ssize_t syn_config_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; uint16_t i, length = 0; uint8_t j, temp_func_cmd = 0, temp_func_query = 0, size = 0; size_t count = 0; int ret; printk(KERN_INFO "[TP] ts->num_function: %d\n", ts->num_function); for (i = 0; i < SYN_MAX_PAGE; i++) { for (j = 0; j < ts->num_function; j++) { if (((ts->address_table[j].control_base >> 8) & 0xFF) == i) { temp_func_query = 0; for (temp_func_cmd = j; temp_func_cmd < ts->num_function; temp_func_cmd++) { uint16_t max_addr = (i << 8) | 0xFF; uint16_t min_addr = (i << 8) | 0; if ((ts->address_table[temp_func_cmd].command_base > min_addr) && (ts->address_table[temp_func_cmd].command_base <= max_addr)) break; if ((ts->address_table[temp_func_cmd].query_base > min_addr) && (ts->address_table[temp_func_cmd].query_base <= max_addr) && temp_func_query == 0) temp_func_query = temp_func_cmd; } if (temp_func_cmd != ts->num_function) { size = ts->address_table[temp_func_cmd].command_base - ts->address_table[j].control_base; printk("[TP] page%d has command function, function: %X\n" , i, ts->address_table[temp_func_cmd].function_type); } else { size = ts->address_table[temp_func_query].query_base - ts->address_table[j].control_base; printk("[TP] page%d has no command function, use query function, function: %X\n" , i, ts->address_table[temp_func_query].function_type); } ret = i2c_syn_read(ts->client, ts->address_table[j].control_base, &ts->config_table[length], size); if (ret < 0) { i2c_syn_error_handler(ts, 0, "w", __func__); count += sprintf(buf, "[TP] TOUCH_ERR: %s: i2c write fail(%d)\n", __func__, ret); return count; } length += size; printk(KERN_INFO "[TP] Size: %x, Length: %x\n", size, length); break; } } } printk(KERN_INFO ""); for (i = 0; i < length; i++) { printk("%2.2X ", ts->config_table[i]); if ((i % 16) == 15) printk("\n"); } for (i = 0; i < length; i++) { count += sprintf(buf + count, "%2.2X ", ts->config_table[i]); if ((i % 16) == (16 - 1)) count += sprintf(buf + count, "\n"); } count += sprintf(buf + count, "\n"); return count; } static ssize_t syn_config_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts = gl_ts; uint8_t i, j, k = 0, length = 0; printk(KERN_INFO "[TP] ts->num_function: %d\n", ts->num_function); for (i = 0; i < SYN_MAX_PAGE; i++) { for (j = 0; j < ts->num_function; j++) { if (((ts->address_table[j].control_base >> 8) & 0xFF) == i) { for (k = j; k < ts->num_function; k++) if (ts->address_table[k].command_base != 0) break; length += ts->address_table[k].command_base - ts->address_table[j].control_base; printk(KERN_INFO "[%d]Length: %x\n", i, length); break; } } } return count; } static DEVICE_ATTR(config, (S_IWUSR|S_IRUGO), syn_config_show, syn_config_store); static ssize_t syn_layout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; uint8_t i; size_t count = 0; for (i = 0; i < 4; i++) count += sprintf(buf + count, "%d ", ts->layout[i]); count += sprintf(buf + count, "\n"); return count; } static ssize_t syn_layout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts = gl_ts; char buf_tmp[5]; int i = 0, j = 0, k = 0, ret; unsigned long value; int layout[4] = {0}; for (i = 0; i < 20; i++) { if (buf[i] == ',' || buf[i] == '\n') { memset(buf_tmp, 0x0, sizeof(buf_tmp)); if (i - j <= 5) memcpy(buf_tmp, buf + j, i - j); else { printk(KERN_INFO "buffer size is over 5 char\n"); return count; } j = i + 1; if (k < 4) { ret = strict_strtol(buf_tmp, 10, &value); layout[k++] = value; } } } if (k == 4) { memcpy(ts->layout, layout, sizeof(layout)); printk(KERN_INFO "[TP] %d, %d, %d, %d\n", ts->layout[0], ts->layout[1], ts->layout[2], ts->layout[3]); input_unregister_device(ts->input_dev); synaptics_input_register(ts); } else printk(KERN_INFO "[TP] ERR@%d, %d, %d, %d\n", ts->layout[0], ts->layout[1], ts->layout[2], ts->layout[3]); return count; } static DEVICE_ATTR(layout, (S_IWUSR|S_IRUGO), syn_layout_show, syn_layout_store); static ssize_t syn_pdt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; uint8_t i; size_t count = 0; for (i = 0; i < ts->num_function; i++) { count += sprintf(buf + count, "Funtion: %2X, Query: %3X, Command: %3X, " "Control: %3X, Data: %3X, INTR: %2X\n", ts->address_table[i].function_type, ts->address_table[i].query_base , ts->address_table[i].command_base, ts->address_table[i].control_base, ts->address_table[i].data_base, ts->address_table[i].interrupt_source); } return count; } static DEVICE_ATTR(pdt, S_IRUGO, syn_pdt_show, NULL); static ssize_t syn_htc_event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; return sprintf(buf, "%d\n", ts->htc_event); } static ssize_t syn_htc_event_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts = gl_ts; if (buf[0] >= '0' && buf[0] <= '9' && buf[1] == '\n') ts->htc_event = buf[0] - '0'; return count; } static DEVICE_ATTR(htc_event, (S_IWUSR|S_IRUGO), syn_htc_event_show, syn_htc_event_store); #ifdef SYN_WIRELESS_DEBUG static ssize_t syn_int_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; size_t count = 0; count += sprintf(buf + count, "%d ", ts->irq_enabled); count += sprintf(buf + count, "\n"); return count; } static ssize_t syn_int_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts = gl_ts; int value, ret=0; if (sysfs_streq(buf, "0")) value = false; else if (sysfs_streq(buf, "1")) value = true; else return -EINVAL; if (value) { ret = request_threaded_irq(ts->client->irq, NULL, synaptics_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, ts->client->name, ts); if (ret == 0) { ts->irq_enabled = 1; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, CONTROL_BASE) + 1, &ts->intr_bit, 1); printk(KERN_INFO "%s: interrupt enable: %x\n", __func__, ts->intr_bit); if (ret) free_irq(ts->client->irq, ts); } } else { disable_irq(ts->client->irq); free_irq(ts->client->irq, ts); ts->irq_enabled = 0; } return count; } static DEVICE_ATTR(enabled, (S_IWUSR|S_IRUGO), syn_int_status_show, syn_int_status_store); static ssize_t syn_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_ts_data *ts = gl_ts; if (buf[0] == '1' && ts->gpio_reset) { gpio_direction_output(ts->gpio_reset, 0); msleep(10); gpio_direction_output(ts->gpio_reset, 1); printk(KERN_INFO "[TP] synaptics touch chip reseted.\n"); } return count; } static DEVICE_ATTR(reset, (S_IWUSR), 0, syn_reset); #ifdef FAKE_EVENT static int X_fake_S; static int Y_fake_S; static int X_fake_E; static int Y_fake_E; static int dx_fake; static int dy_fake; static unsigned long report_time; static enum hrtimer_restart synaptics_ts_timer_fake_event_func(struct hrtimer *timer) { struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer); static int i; static int X_tmp; static int Y_tmp; if (!i) { X_tmp = X_fake_S; Y_tmp = Y_fake_S; i++; } if ((dx_fake > 0 ? X_tmp <= X_fake_E : dx_fake ? X_tmp >= X_fake_E : 0) || (dy_fake > 0 ? Y_tmp <= Y_fake_E : dy_fake ? Y_tmp >= Y_fake_E : 0)) { if (ts->htc_event == SYN_AND_REPORT_TYPE_A) { input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0); input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 10); input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 10); input_report_abs(ts->input_dev, ABS_MT_PRESSURE, 5); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, X_tmp); input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, Y_tmp); input_mt_sync(ts->input_dev); } else if (ts->htc_event == SYN_AND_REPORT_TYPE_B) { input_mt_slot(ts->input_dev, 0); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1); input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 10); input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 10); input_report_abs(ts->input_dev, ABS_MT_PRESSURE, 5); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, X_tmp); input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, Y_tmp); } input_sync(ts->input_dev); X_tmp += dx_fake; Y_tmp += dy_fake; hrtimer_start(&ts->timer, ktime_set(0, report_time), HRTIMER_MODE_REL); } else { if (ts->htc_event == SYN_AND_REPORT_TYPE_A) { input_mt_sync(ts->input_dev); } else if (ts->htc_event == SYN_AND_REPORT_TYPE_B) { input_mt_slot(ts->input_dev, 0); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); } input_sync(ts->input_dev); i = 0; printk(KERN_INFO "[TP]End of fake event\n"); } return HRTIMER_NORESTART; } static ssize_t syn_fake_event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_ts_data *ts = gl_ts; static uint8_t i; size_t count = 0; if (!i) { hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ts->timer.function = synaptics_ts_timer_fake_event_func; printk(KERN_INFO "hrtimer_init\n"); i++; } count += sprintf(buf + count, "%d,%d,%d,%d,%d,%d,%lu\n", X_fake_S, Y_fake_S, X_fake_E, Y_fake_E, dx_fake, dy_fake, report_time/1000000); if (dx_fake && dy_fake) count += sprintf(buf + count, "dx_fake or dy_fake should one value need to be zero\n"); else if (dx_fake || dx_fake) hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); return count; } static ssize_t syn_fake_event_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char buf_tmp[5]; int i = 0, j = 0, k = 0, ret; long value; while (1) { if (buf[i] == ',' || buf[i] == '\n') { memset(buf_tmp, 0x0, sizeof(buf_tmp)); if (i - j <= 5) memcpy(buf_tmp, buf + j, i - j); else { printk(KERN_INFO "buffer size is over 5 char\n"); return count; } j = i + 1; ret = strict_strtol(buf_tmp, 10, &value); switch (k) { case 0: X_fake_S = value; break; case 1: Y_fake_S = value; break; case 2: X_fake_E = value; break; case 3: Y_fake_E = value; break; case 4: dx_fake = value; break; case 5: dy_fake = value; break; case 6: report_time = value*1000000; default: break; } k++; } if (buf[i] == '\n') break; i++; } return count; } static DEVICE_ATTR(fake_event, (S_IWUSR|S_IRUGO), syn_fake_event_show, syn_fake_event_store); #endif #endif static struct kobject *android_touch_kobj; static int synaptics_touch_sysfs_init(void) { int ret; #ifdef SYN_WIRELESS_DEBUG struct synaptics_ts_data *ts = gl_ts; #endif android_touch_kobj = kobject_create_and_add("android_touch", NULL); if (android_touch_kobj == NULL) { printk(KERN_ERR "[TP] TOUCH_ERR: %s: subsystem_register failed\n", __func__); ret = -ENOMEM; return ret; } syn_reg_addr = 0; if (sysfs_create_file(android_touch_kobj, &dev_attr_vendor.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_gpio.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_debug_level.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_register.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_unlock.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_config.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_layout.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_pdt.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_htc_event.attr) || sysfs_create_file(android_touch_kobj, &dev_attr_reset.attr) #ifdef FAKE_EVENT || sysfs_create_file(android_touch_kobj, &dev_attr_fake_event.attr) #endif #ifdef SYN_WIRELESS_DEBUG || sysfs_create_file(android_touch_kobj, &dev_attr_enabled.attr) #endif ) return -ENOMEM; if (get_address_base(gl_ts, 0x54, FUNCTION)) if (sysfs_create_file(android_touch_kobj, &dev_attr_diag.attr)) return -ENOMEM; #ifdef SYN_WIRELESS_DEBUG ret= gpio_request(ts->gpio_irq, "synaptics_attn"); if (ret) { printk(KERN_ERR "[TP]%s: Failed to obtain touchpad IRQ %d. Code: %d.", __func__, ts->gpio_irq, ret); return ret; } if (ts->gpio_reset) { ret = gpio_request(ts->gpio_reset, "synaptics_reset"); if (ret) printk(KERN_INFO "[TP]%s: Failed to obtain reset pin: %d. Code: %d.", __func__, ts->gpio_reset, ret); } ret = gpio_export(ts->gpio_irq, true); if (ret) { printk(KERN_ERR "[TP]%s: Failed to " "export ATTN gpio!\n", __func__); ret = 0; } else { ret = gpio_export_link(&(ts->input_dev->dev), "attn", ts->gpio_irq); if (ret) { printk(KERN_ERR "[TP]%s: Failed to " "symlink ATTN gpio!\n", __func__); ret = 0; } else { printk(KERN_INFO "[TP]%s: Exported GPIO %d.", __func__, ts->gpio_irq); } } #endif return 0; } static void synaptics_touch_sysfs_remove(void) { sysfs_remove_file(android_touch_kobj, &dev_attr_vendor.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_gpio.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_debug_level.attr); if (get_address_base(gl_ts, 0x54, FUNCTION)) sysfs_remove_file(android_touch_kobj, &dev_attr_diag.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_register.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_unlock.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_config.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_layout.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_pdt.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_htc_event.attr); sysfs_remove_file(android_touch_kobj, &dev_attr_reset.attr); #ifdef SYN_WIRELESS_DEBUG sysfs_remove_file(android_touch_kobj, &dev_attr_enabled.attr); #endif kobject_del(android_touch_kobj); } static int synaptics_init_panel(struct synaptics_ts_data *ts) { int ret = 0; /* i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, ts->relaxation); i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x04);*/ /* Configured */ ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x01, CONTROL_BASE), 0x80); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:1", __func__); if (ts->mfg_flag == 1) { ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, 0); if (ret < 0) i2c_syn_error_handler(ts, 1, "fast relaxation", __func__); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x04); if (ret < 0) i2c_syn_error_handler(ts, 1, "force update", __func__); printk("[TP] %s set fast relaxation to 0\n", __func__); } return ret; } static void synaptics_ts_finger_func(struct synaptics_ts_data *ts) { int ret; uint8_t buf[((ts->finger_support * 21 + 3) / 4)]; uint8_t data = 0; memset(buf, 0x0, sizeof(buf)); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE) + 2, buf, sizeof(buf)); if (ret < 0) { i2c_syn_error_handler(ts, 0, "r:1", __func__); } else { int finger_data[ts->finger_support][4]; int base = (ts->finger_support + 3) / 4; uint8_t i, j; uint16_t finger_press_changed = 0, finger_release_changed = 0, finger_pressed = 0; ts->finger_count = 0; if (ts->debug_log_level & 0x1) { printk(KERN_INFO "[TP] Touch:"); for (i = 0; i < sizeof(buf); i++) printk(" %2x", buf[i]); printk("\n"); } for (i = 0; i < ts->finger_support; i++) { if (buf[(i / 4)] >> ((i * 2) % 8) & 0x03) { finger_pressed |= BIT(i); ts->finger_count++; } #ifdef SYN_FILTER_CONTROL else if ((ts->grip_suppression | ts->grip_b_suppression) & BIT(i)) { ts->grip_suppression &= ~BIT(i); ts->grip_b_suppression &= ~BIT(i); } #endif } if (ts->finger_pressed != finger_pressed && (ts->pre_finger_data[0][0] < 2 || ts->htc_event == SYN_AND_REPORT_TYPE_B /* || ts->filter_level[0]*/)) { finger_press_changed = ts->finger_pressed ^ finger_pressed; finger_release_changed = finger_press_changed & ts->finger_pressed; finger_press_changed &= finger_pressed; ts->finger_pressed = finger_pressed; } if (ts->htc_event == SYN_AND_REPORT_TYPE_B && finger_release_changed) { for (i = 0; i < ts->finger_support; i++) { if (finger_release_changed & BIT(i)) { input_mt_slot(ts->input_dev, i); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); ts->tap_suppression &= ~BIT(i); } } } if (finger_pressed == 0 /*|| ((ts->grip_suppression | ts->grip_b_suppression) == finger_pressed && finger_release_changed)*/) { if (ts->htc_event == SYN_AND_REPORT_TYPE_A) { /*input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0); */ if (ts->support_htc_event) { input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, 0); input_report_abs(ts->input_dev, ABS_MT_POSITION, 1 << 31); } input_mt_sync(ts->input_dev); } else if (ts->htc_event == SYN_AND_REPORT_TYPE_HTC) { input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, 0); input_report_abs(ts->input_dev, ABS_MT_POSITION, 1 << 31); } #ifdef SYN_FILTER_CONTROL if (ts->filter_level[0]) ts->ambiguous_state = 0; ts->grip_b_suppression = 0; #endif if (ts->reduce_report_level[0]) ts->tap_suppression = 0; if (ts->debug_log_level & 0x2) printk(KERN_INFO "[TP] Finger leave\n"); } if (ts->pre_finger_data[0][0] < 2 || finger_pressed) { for (i = 0; i < ts->finger_support; i++) { uint32_t flip_flag = SYNAPTICS_FLIP_X; if ((finger_pressed | finger_release_changed) & BIT(i)) { uint8_t pos_mask = 0x0f; for (j = 0; j < 2; j++) { finger_data[i][j] = (buf[base+2] & pos_mask) >> (j * 4) | (uint16_t)buf[base + j] << 4; if (ts->flags & flip_flag) finger_data[i][j] = ts->max[j] - finger_data[i][j]; flip_flag <<= 1; pos_mask <<= 4; } finger_data[i][2] = (buf[base+3] >> 4 & 0x0F) + (buf[base+3] & 0x0F); finger_data[i][3] = buf[base+4]; if (ts->flags & SYNAPTICS_SWAP_XY) swap(finger_data[i][0], finger_data[i][1]); if ((finger_release_changed & BIT(i)) && ts->pre_finger_data[0][0] < 2) { if (!ts->first_pressed) { if (ts->finger_count == 0) ts->first_pressed = 1; printk(KERN_INFO "[TP] E%d@%d, %d\n", i + 1, finger_data[i][0], finger_data[i][1]); } #ifdef SYN_CALIBRATION_CONTROL if (i == 0 && !ts->pre_finger_data[0][0] && (abs(ts->pre_finger_data[1][0] - finger_data[0][0]) > 100 || abs(ts->pre_finger_data[1][1] - finger_data[0][1]) > 100)) { ts->pre_finger_data[0][1]++; } else ts->pre_finger_data[0][1] = 0; /* printk("ts->pre_finger_data[0][1] = %d", ts->pre_finger_data[0][1]); */ #endif } #ifdef SYN_FILTER_CONTROL if (abs((buf[base+3] >> 4 & 0x0F) - (buf[base+3] & 0x0F)) >= 10) ts->grip_b_suppression |= BIT(i); if (ts->filter_level[0] && ((finger_press_changed | ts->grip_suppression) & BIT(i))) { if ((finger_data[i][0] < (ts->filter_level[0] + ts->ambiguous_state * 20) || finger_data[i][0] > (ts->filter_level[3] - ts->ambiguous_state * 20)) && !(ts->grip_suppression & BIT(i))) { ts->grip_suppression |= BIT(i); } else if ((finger_data[i][0] < (ts->filter_level[1] + ts->ambiguous_state * 20) || finger_data[i][0] > (ts->filter_level[2] - ts->ambiguous_state * 20)) && (ts->grip_suppression & BIT(i))) ts->grip_suppression |= BIT(i); else if (finger_data[i][0] > (ts->filter_level[1] + ts->ambiguous_state * 20) && finger_data[i][0] < (ts->filter_level[2] - ts->ambiguous_state * 20)) { ts->grip_suppression &= ~BIT(i); } } if ((ts->grip_suppression | ts->grip_b_suppression) & BIT(i)) { finger_pressed &= ~BIT(i); } else #endif if (ts->htc_event == SYN_AND_REPORT_TYPE_B && ts->reduce_report_level[0]) { if (ts->tap_suppression & BIT(i) && finger_pressed & BIT(i)) { int dx, dy = 0; dx = abs(ts->pre_finger_data[i + 1][2] - finger_data[i][0]); dy = abs(ts->pre_finger_data[i + 1][3] - finger_data[i][1]); if (dx > ts->reduce_report_level[TAP_DX_OUTER] || dy > ts->reduce_report_level[TAP_DY_OUTER]) { ts->tap_suppression &= ~BIT(i); } else if (ts->reduce_report_level[TAP_TIMEOUT] && (ts->tap_suppression & BIT(0)) && time_after(jiffies, ts->single_tap_timeout) && (dx > ts->reduce_report_level[TAP_DX_INTER] || dy > ts->reduce_report_level[TAP_DY_INTER])) { ts->tap_suppression &= ~BIT(0); } else { finger_pressed &= ~BIT(i); if (ts->debug_log_level & 0x2) printk(KERN_INFO "[TP] Filtered Finger %d=> X:%d, Y:%d w:%d, z:%d\n", i + 1, finger_data[i][0], finger_data[i][1], finger_data[i][2], finger_data[i][3]); } } } if ((finger_pressed & BIT(i)) == BIT(i)) { finger_pressed &= ~BIT(i); if (ts->htc_event == SYN_AND_REPORT_TYPE_A) { if (ts->support_htc_event) { input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, finger_data[i][3] << 16 | finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_POSITION, (finger_pressed == 0) << 31 | finger_data[i][0] << 16 | finger_data[i][1]); } input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, i); input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, finger_data[i][3]); input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_PRESSURE, finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, finger_data[i][0]); input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, finger_data[i][1]); input_mt_sync(ts->input_dev); } else if (ts->htc_event == SYN_AND_REPORT_TYPE_B) { if (ts->support_htc_event) { input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, finger_data[i][3] << 16 | finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_POSITION, (finger_pressed == 0) << 31 | finger_data[i][0] << 16 | finger_data[i][1]); } input_mt_slot(ts->input_dev, i); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1); input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, finger_data[i][3]); input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_PRESSURE, finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, finger_data[i][0]); input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, finger_data[i][1]); } else if (ts->htc_event == SYN_AND_REPORT_TYPE_HTC) { input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, i); input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, finger_data[i][3] << 16 | finger_data[i][2]); input_report_abs(ts->input_dev, ABS_MT_POSITION, (finger_pressed == 0) << 31 | finger_data[i][0] << 16 | finger_data[i][1]); } if (ts->pre_finger_data[0][0] < 2) { if (finger_press_changed & BIT(i)) { ts->pre_finger_data[i + 1][0] = finger_data[i][0]; ts->pre_finger_data[i + 1][1] = finger_data[i][1]; if (!ts->first_pressed) printk(KERN_INFO "[TP] S%d@%d, %d\n", i + 1, finger_data[i][0], finger_data[i][1]); #ifdef SYN_CALIBRATION_CONTROL if (ts->finger_count == ts->finger_support) { ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, COMMAND_BASE), 0x01); if (ret < 0) i2c_syn_error_handler(ts, 0, "w:Rezero_1", __func__); printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed, rezero\n", __func__); } else if (!ts->pre_finger_data[0][0] && ts->finger_count > 1) ts->pre_finger_data[0][0] = 1; #endif } } if (ts->htc_event == SYN_AND_REPORT_TYPE_B && ts->reduce_report_level[TAP_DX_OUTER]) { if (finger_press_changed & BIT(i)) { ts->tap_suppression &= ~BIT(i); ts->tap_suppression |= BIT(i); ts->pre_finger_data[i + 1][2] = finger_data[i][0]; ts->pre_finger_data[i + 1][3] = finger_data[i][1]; if (ts->reduce_report_level[TAP_TIMEOUT] && (ts->tap_suppression & BIT(0))) ts->single_tap_timeout = jiffies + msecs_to_jiffies(ts->reduce_report_level[TAP_TIMEOUT]); } } if (ts->debug_log_level & 0x2) printk(KERN_INFO "[TP] Finger %d=> X:%d, Y:%d w:%d, z:%d\n", i + 1, finger_data[i][0], finger_data[i][1], finger_data[i][2], finger_data[i][3]); } #ifdef SYN_CALIBRATION_CONTROL if ((finger_release_changed & BIT(i)) && ts->pre_finger_data[0][0] == 1) { ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, COMMAND_BASE), 0x01); if (ret < 0) i2c_syn_error_handler(ts, 0, "w:Rezero_2", __func__); printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed, rezero\n", __func__); } if (!ts->finger_count && !ts->pre_finger_data[0][0] && ts->pre_finger_data[0][1] > 3) { ts->pre_finger_data[0][0] = 2; ts->pre_finger_data[0][1] = 0; ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, 0x0); if (ret < 0) i2c_syn_error_handler(ts, 0, "w:Disable Fast Relax", __func__); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x04); if (ret < 0) i2c_syn_error_handler(ts, 0, "w:TCHTHR", __func__); /*printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed\n", __func__);*/ ret = i2c_syn_read(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, &data, 1); if (ret < 0) i2c_syn_error_handler(ts, 0, "r:fast relaxation", __func__); printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed, fast relaxation: %x\n", __func__, data); if (ts->large_obj_check) { ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 0x29, ts->default_large_obj); if (ret < 0) i2c_syn_error_handler(ts, 0, "w:large obj supression", __func__); printk(KERN_INFO "[TP] %s: Touch Calibration Confirmed, set large obj suppression: %x\n" , __func__, ts->default_large_obj); } } else #endif if (!ts->finger_count) ts->pre_finger_data[0][0] = 0; } base += 5; } #ifdef SYN_FILTER_CONTROL if (ts->filter_level[0] && ts->grip_suppression) { ts->ambiguous_state = 0; for (i = 0; i < ts->finger_support; i++) if (ts->grip_suppression & BIT(i)) ts->ambiguous_state++; } if (ts->debug_log_level & 0x4) printk(KERN_INFO "[TP] ts->grip_suppression: %x, ts->ambiguous_state: %x\n", ts->grip_suppression, ts->ambiguous_state); #endif } } input_sync(ts->input_dev); } static void synaptics_ts_report_func(struct synaptics_ts_data *ts) { int ret; uint8_t data[2] = {0}; ret = i2c_syn_write(ts->client, get_address_base(ts, 0x54, DATA_BASE) + 1, &data[0], 2); if (ret < 0) i2c_syn_error_handler(ts, 0, "w:1", __func__); else { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x54, DATA_BASE) + 3, ts->temp_report_data, ts->x_channel * ts->y_channel * 2); if (ret >= 0) memcpy(&ts->report_data[0], &ts->temp_report_data[0], ts->x_channel * ts->y_channel * 2); else { memset(&ts->report_data[0], 0x0, sizeof(ts->report_data)); i2c_syn_error_handler(ts, 0, "r:2", __func__); } } atomic_set(&ts->data_ready, 1); wake_up(&syn_data_ready_wq); } static void synaptics_ts_status_func(struct synaptics_ts_data *ts) { int ret; uint8_t data = 0; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE), &data, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "r", __func__); } else { data &= 0x0F; printk(KERN_INFO "[TP] Device Status = %x\n", data); if (data == 1) { mutex_lock(&syn_mutex); ts->page_select = 0; mutex_unlock(&syn_mutex); printk(KERN_INFO "[TP] TOUCH: Page Select: %s: %d\n", __func__, ts->page_select); ret = synaptics_init_panel(ts); if (ret < 0) printk(KERN_INFO "[TP]%s: synaptics_init_panel fail\n", __func__); } } } static void synaptics_ts_work_func(struct work_struct *work) { struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work); int ret; uint8_t buf = 0; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE) + 1, &buf, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "r", __func__); } else { if (buf & get_address_base(ts, 0x11, INTR_SOURCE)) synaptics_ts_finger_func(ts); if (buf & get_address_base(ts, 0x01, INTR_SOURCE)) synaptics_ts_status_func(ts); if (buf & get_address_base(ts, 0x54, INTR_SOURCE)) synaptics_ts_report_func(ts); } } static irqreturn_t synaptics_irq_thread(int irq, void *ptr) { struct synaptics_ts_data *ts = ptr; int ret; uint8_t buf = 0; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, DATA_BASE) + 1, &buf, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "r", __func__); } else { if (buf & get_address_base(ts, 0x11, INTR_SOURCE)) synaptics_ts_finger_func(ts); if (buf & get_address_base(ts, 0x01, INTR_SOURCE)) synaptics_ts_status_func(ts); if (buf & get_address_base(ts, 0x54, INTR_SOURCE)) synaptics_ts_report_func(ts); } return IRQ_HANDLED; } static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer) { struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer); queue_work(ts->syn_wq, &ts->work); hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL); return HRTIMER_NORESTART; } #ifdef SYN_CABLE_CONTROL static void cable_tp_status_handler_func(int connect_status) { struct synaptics_ts_data *ts = gl_ts; uint8_t data; int ret; printk(KERN_INFO "[TP] Touch: cable change to %d\n", connect_status); if (connect_status) connect_status = 1; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, CONTROL_BASE), &data, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "r:1", __func__); } else { ts->cable_config = (data & 0xDF) | (connect_status << 5); printk(KERN_INFO "[TP] %s: ts->cable_config: %x\n", __func__, ts->cable_config); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x01, CONTROL_BASE), ts->cable_config); if (ret < 0) { i2c_syn_error_handler(ts, 0, "w:2", __func__); } } } static struct t_usb_status_notifier cable_status_handler = { .name = "usb_tp_connected", .func = cable_tp_status_handler_func, }; #endif static int syn_pdt_scan(struct synaptics_ts_data *ts, int num_page) { uint8_t intr_count = 0, data[6] = {0}, num_function[SYN_MAX_PAGE] = {0}; uint16_t i, j, k = 0; int ret = 0; ts->num_function = 0; for (i = 0; i < num_page; i++) { for (j = (0xEE | (i << 8)); j >= (0xBE | (i << 8)); j -= 6) { ret = i2c_syn_read(ts->client, j, data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r1", __func__); if (data[0] == 0) break; else num_function[i]++; } ts->num_function += num_function[i]; } if (ts->address_table == NULL) { ts->address_table = kzalloc(sizeof(struct function_t) * ts->num_function, GFP_KERNEL); if (ts->address_table == NULL) { printk(KERN_INFO "[TP] syn_pdt_scan: memory allocate fail\n"); return -ENOMEM; } printk(KERN_INFO "[TP] syn_pdt_scan: memory allocate success. ptr: %p\n", ts->address_table); } printk(KERN_INFO "[TP] synaptics: %d function supported\n", ts->num_function); for (i = 0; i < num_page; i++) { for (j = 0; j < num_function[i]; j++) { ret = i2c_syn_read(ts->client, i << 8 | (0xE9 - 6*j), data, 6); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:2", __func__); ts->address_table[j + k].query_base = i << 8 | data[0]; ts->address_table[j + k].command_base = i << 8 | data[1]; ts->address_table[j + k].control_base = i << 8 | data[2]; ts->address_table[j + k].data_base = i << 8 | data[3]; if (data[4] & 0x07) { ts->address_table[j + k].interrupt_source = get_int_mask(data[4] & 0x07, intr_count); intr_count += (data[4] & 0x07); } ts->address_table[j + k].function_type = data[5]; printk(KERN_INFO "Query: %2.2X, Command: %4.4X, Control: %2X, Data: %2X, INTR: %2X, Funtion: %2X\n", ts->address_table[j + k].query_base , ts->address_table[j + k].command_base, ts->address_table[j + k].control_base, ts->address_table[j + k].data_base, ts->address_table[j + k].interrupt_source, ts->address_table[j + k].function_type); } k += num_function[i]; } return ts->num_function; } static int syn_get_version(struct synaptics_ts_data *ts) { uint8_t data[4] = {0}; int ret = 0; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, QUERY_BASE) + 17, data, 4); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:1", __func__); ts->package_id = data[1] << 8 | data[0]; printk(KERN_INFO "[TP] %s: package_id: %d\n", __func__, ts->package_id); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, QUERY_BASE) + 16, data, 3); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:2", __func__); syn_panel_version = data[0] << 8 | data[2]; printk(KERN_INFO "[TP] %s: panel_version: %x\n", __func__, syn_panel_version); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, QUERY_BASE) + 18, data, 3); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:3", __func__); ts->packrat_number = data[2] << 16 | data[1] << 8 | data[0]; printk(KERN_INFO "[TP] %s: packrat_number: %d\n", __func__, ts->packrat_number); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x34, CONTROL_BASE), data, 4); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:4", __func__); ts->config_version = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; printk(KERN_INFO "[TP] %s: config version: %x\n", __func__, ts->config_version); return 0; } static int syn_get_information(struct synaptics_ts_data *ts) { uint8_t data[4] = {0}, i, num_channel, *buf; int ret = 0; /* ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, QUERY_BASE) + 2, data, 2); if (ret < 0) return ret; syn_panel_version = data[0] << 8 | data[1]; printk(KERN_INFO "%s: panel_version: %x\n", __func__, syn_panel_version); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x34, CONTROL_BASE), data, 4); if (ret < 0) return ret; ts->config_version = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; printk(KERN_INFO "%s: config version: %x\n", __func__, ts->config_version); */ ret = i2c_syn_read(ts->client, get_address_base(ts, 0x11, QUERY_BASE) + 1, data, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:1", __func__); if ((data[0] & 0x07) == 5) ts->finger_support = 10; else if ((data[0] & 0x07) < 5) ts->finger_support = (data[0] & 0x07) + 1; else { printk(KERN_ERR "[TP] %s: number of fingers not define: %x\n", __func__, data[0] & 0x07); return SYN_PROCESS_ERR; } printk(KERN_INFO "[TP] %s: finger_support: %d\n", __func__, ts->finger_support); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 6, data, 4); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:2", __func__); ts->max[0] = data[0] | data[1] << 8; ts->max[1] = data[2] | data[3] << 8; printk(KERN_INFO "[TP] %s: max_x: %d, max_y: %d\n", __func__, ts->max[0], ts->max[1]); if (get_address_base(ts, 0x54, FUNCTION)) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x54, QUERY_BASE), data, 2); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:3", __func__); ts->y_channel = data[0]; ts->x_channel = data[1]; num_channel = ts->y_channel + ts->x_channel; buf = kzalloc(num_channel + 1, GFP_KERNEL); ret = i2c_syn_read(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 17, buf, num_channel + 1); if (ret < 0) { kfree(buf); return i2c_syn_error_handler(ts, 0, "r:4", __func__); } for (i = 1; i < num_channel + 1; i++) { if (buf[i] == 0xFF) { if (i <= num_channel - ts->x_channel) ts->y_channel--; else ts->x_channel--; } } if (buf[0] & 0x01) swap(ts->y_channel, ts->x_channel); printk(KERN_INFO "[TP] %s: X: %d, Y: %d\n", __func__, ts->x_channel, ts->y_channel); kfree(buf); ts->temp_report_data = kzalloc(2 * ts->x_channel * ts->y_channel, GFP_KERNEL); ts->report_data = kzalloc(2 * ts->x_channel * ts->y_channel, GFP_KERNEL); if(ts->temp_report_data == NULL || ts->report_data == NULL) return -ENOMEM; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, &ts->relaxation, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:5", __func__); printk(KERN_INFO "[TP] %s: ts->relaxation: %d\n", __func__, ts->relaxation); } if (ts->large_obj_check) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 0x29, &ts->default_large_obj, 1); if (ret < 0) return i2c_syn_error_handler(ts, 0, "r:6", __func__); printk(KERN_INFO "[TP] %s: ts->default_large_obj: %x\n", __func__, ts->default_large_obj); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 0x29, ts->default_large_obj & 0x7F); if (ret < 0) return i2c_syn_error_handler(ts, 0, "w:1", __func__); printk(KERN_INFO "[TP] %s: set large obj suppression register to: %x\n", __func__, ts->default_large_obj & 0x7F); } return 0; } static int synaptics_ts_probe( struct i2c_client *client, const struct i2c_device_id *id) { struct synaptics_ts_data *ts; uint8_t i; int ret = 0; struct synaptics_i2c_rmi_platform_data *pdata; uint8_t data = 0; printk(KERN_INFO "[TP] %s: enter", __func__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { printk(KERN_ERR "[TP] TOUCH_ERR: synaptics_ts_probe: need I2C_FUNC_I2C\n"); ret = -ENODEV; goto err_check_functionality_failed; } ts = kzalloc(sizeof(*ts), GFP_KERNEL); if (ts == NULL) { ret = -ENOMEM; goto err_alloc_data_failed; } ts->client = client; i2c_set_clientdata(client, ts); pdata = client->dev.platform_data; if (pdata == NULL) { printk(KERN_ERR "[TP] pdata is NULL\n"); goto err_get_platform_data_fail; } ret = i2c_syn_read(ts->client, 0x00EE, &data, 1); if (ret < 0) { printk(KERN_INFO "[TP] No Synaptics chip\n"); goto err_detect_failed; } for (i = 0; i < 10; i++) { ret = i2c_syn_read(ts->client, SYN_F01DATA_BASEADDR, &data, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "read device status failed!", __func__); goto err_detect_failed; } if (data & 0x44) { msleep(20); #ifdef SYN_FLASH_PROGRAMMING_LOG printk(KERN_INFO "[TP] synaptics probe: F01_data: %x touch controller stay in bootloader mode!\n", data); #endif } else if (data & 0x40) { printk(KERN_ERR "[TP] TOUCH_ERR: synaptics probe: F01_data: %x touch controller stay in bootloader mode!\n", data); goto err_detect_failed; } else break; } if (i == 10) { uint8_t num = 0; printk(KERN_ERR "[TP] synaptics probe: touch controller doesn't enter UI mode! F01_data: %x\n", data); if (syn_pdt_scan(ts, SYN_BL_PAGE) < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: PDT scan fail\n"); goto err_init_failed; } if (pdata) { while (pdata->default_config != 1) { if (pdata->default_config == 0) { printk(KERN_ERR "[TP] TOUCH_ERR: touch controller stays in bootloader mode " "and recovery method doesn't enable\n"); goto err_init_failed; } pdata++; num++; } ts->config = pdata->config; ret = syn_config_update(ts, pdata->gpio_irq); if (ret < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: syn_config_update fail\n"); goto err_init_failed; } else if (ret == 0) printk(KERN_INFO "[TP] syn_config_update success\n"); else printk(KERN_INFO "[TP] Warning: syn_config_update: the same " "config version and CRC but touch controller always stay in bootloader mode\n"); pdata = pdata - num; } if (ts->address_table != NULL) { kfree(ts->address_table); ts->address_table = NULL; } } if (syn_pdt_scan(ts, SYN_MAX_PAGE) < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: PDT scan fail\n"); goto err_init_failed; } if (syn_get_version(ts) < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: syn_get_version fail\n"); goto err_init_failed; } if (pdata) { while (pdata->version > syn_panel_version) { printk(KERN_INFO "[TP] synaptics_ts_probe: old tp detected, " "panel version = %x\n", syn_panel_version); pdata++; } while (pdata->packrat_number && pdata->packrat_number > ts->packrat_number) { pdata++; } if (pdata->tw_pin_mask) { ts->tw_pin_mask = pdata->tw_pin_mask; ret = syn_get_tw_vendor(ts, pdata->gpio_irq); if (ret < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: syn_get_tw_vendor fail\n"); goto err_init_failed; } } while (pdata->sensor_id > 0 && pdata->sensor_id != (SENSOR_ID_CHECKING_EN | ts->tw_vendor)) { pdata++; } printk(KERN_INFO "[TP] synaptics_ts_probe: pdata->version = %x, pdata->packrat_number = %d," " pdata->sensor_id = %x\n", pdata->version, pdata->packrat_number, pdata->sensor_id); ts->power = pdata->power; ts->flags = pdata->flags; ts->htc_event = pdata->report_type; ts->filter_level = pdata->filter_level; ts->reduce_report_level = pdata->reduce_report_level; ts->gpio_irq = pdata->gpio_irq; ts->gpio_reset = pdata->gpio_reset; ts->large_obj_check = pdata->large_obj_check; ts->support_htc_event = pdata->support_htc_event; ts->mfg_flag = pdata->mfg_flag; #ifdef SYN_CABLE_CONTROL ts->cable_support = pdata->cable_support; /* Reserve */ #endif ts->config = pdata->config; } /* if (pdata->abs_x_max == 0 && pdata->abs_y_max == 0) { ts->layout[0] = ts->layout[2] = 0; ts->layout[1] = ts->max[0]; ts->layout[3] = ts->max[1]; } else { ts->layout[0] = pdata->abs_x_min; ts->layout[1] = pdata->abs_x_max; ts->layout[2] = pdata->abs_y_min; ts->layout[3] = pdata->abs_y_max; } if (get_address_base(ts, 0x19, FUNCTION)) { i2c_syn_read(ts->client, get_address_base(ts, 0x19, QUERY_BASE) + 1, &ts->key_number, 1); for (i = 0; i < ts->key_number; i++) { ts->key_postion_x[i] = (ts->layout[1] - ts->layout[0]) * (i * 2 + 1) / (ts->key_number * 2) + ts->layout[0]; printk(KERN_INFO "ts->key_postion_x[%d]: %d\n", i, ts->key_postion_x[i]); } ts->key_postion_y = ts->layout[2] + (21 * (ts->layout[3] - ts->layout[2]) / 20); printk(KERN_INFO "ts->key_postion_y: %d\n", ts->key_postion_y); } }*/ #ifndef SYN_DISABLE_CONFIG_UPDATE ret = syn_config_update(ts, pdata->gpio_irq); if (ret < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: syn_config_update fail\n"); goto err_init_failed; } else if (ret == 0) printk(KERN_INFO "[TP] syn_config_update success\n"); else printk(KERN_INFO "[TP] syn_config_update: the same config version and CRC\n"); #else if (pdata->tw_pin_mask) { ret = disable_flash_programming(ts, 0); if (ret < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: disable_flash_programming fail\n"); goto err_init_failed; } } #endif if (syn_pdt_scan(ts, SYN_MAX_PAGE) < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: PDT scan fail\n"); goto err_init_failed; } #ifndef SYN_DISABLE_CONFIG_UPDATE if (pdata->customer_register[CUS_REG_BASE]) { ret = i2c_syn_write(ts->client, pdata->customer_register[CUS_REG_BASE], &pdata->customer_register[CUS_BALLISTICS_CTRL], CUS_REG_SIZE - 1); printk(KERN_INFO "[TP] Loads customer register\n"); } #endif if (syn_get_information(ts) < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: syn_get_information fail\n"); goto err_syn_get_info_failed; } if (pdata->abs_x_max == 0 && pdata->abs_y_max == 0) { ts->layout[0] = ts->layout[2] = 0; ts->layout[1] = ts->max[0]; ts->layout[3] = ts->max[1]; } else { ts->layout[0] = pdata->abs_x_min; ts->layout[1] = pdata->abs_x_max; ts->layout[2] = pdata->abs_y_min; ts->layout[3] = pdata->abs_y_max; } if (get_address_base(ts, 0x19, FUNCTION)) { ret = i2c_syn_read(ts->client, get_address_base(ts, 0x19, QUERY_BASE) + 1, &ts->key_number, 1); if (ret < 0) { i2c_syn_error_handler(ts, 0, "F19 Query fail", __func__); goto err_F19_query_failed; } for (i = 0; i < ts->key_number; i++) { ts->key_postion_x[i] = (ts->layout[1] - ts->layout[0]) * (i * 2 + 1) / (ts->key_number * 2) + ts->layout[0]; printk(KERN_INFO "[TP] ts->key_postion_x[%d]: %d\n", i, ts->key_postion_x[i]); } ts->key_postion_y = ts->layout[2] + (21 * (ts->layout[3] - ts->layout[2]) / 20); printk(KERN_INFO "[TP] ts->key_postion_y: %d\n", ts->key_postion_y); } ret = synaptics_init_panel(ts); if (ret < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: synaptics_init_panel fail\n"); goto err_init_panel_failed; } init_waitqueue_head(&syn_data_ready_wq); ret = synaptics_input_register(ts); if (ret) { printk(KERN_ERR "[TP] TOUCH_ERR: synaptics_ts_probe: " "Unable to register %s input device\n", ts->input_dev->name); goto err_input_register_device_failed; } gl_ts = ts; ts->irq_enabled = 0; if (client->irq) { ts->use_irq = 1; ret = request_threaded_irq(client->irq, NULL, synaptics_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, ts); if (ret == 0) { ts->irq_enabled = 1; ret = i2c_syn_read(ts->client, get_address_base(ts, 0x01, CONTROL_BASE) + 1, &ts->intr_bit, 1); if (ret < 0) { free_irq(client->irq, ts); i2c_syn_error_handler(ts, 0, "get interrupt bit failed", __func__); goto err_get_intr_bit_failed; } printk(KERN_INFO "[TP] %s: interrupt enable: %x\n", __func__, ts->intr_bit); } else { dev_err(&client->dev, "[TP] TOUCH_ERR: request_irq failed\n"); ts->use_irq = 0; } } if (!ts->use_irq) { ts->syn_wq = create_singlethread_workqueue("synaptics_wq"); if (!ts->syn_wq) goto err_create_wq_failed; INIT_WORK(&ts->work, synaptics_ts_work_func); hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ts->timer.function = synaptics_ts_timer_func; hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); } #ifdef CONFIG_HAS_EARLYSUSPEND ts->early_suspend.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING - 1; ts->early_suspend.suspend = synaptics_ts_early_suspend; ts->early_suspend.resume = synaptics_ts_late_resume; register_early_suspend(&ts->early_suspend); #endif #ifdef SYN_CABLE_CONTROL if (ts->cable_support) { usb_register_notifier(&cable_status_handler); /* reserve for new version */ ret = i2c_syn_read(ts->client, get_address_base(ts, 0x11, CONTROL_BASE), &ts->cable_config, 1); if (ret < 0) { printk(KERN_ERR "[TP] TOUCH_ERR: get cable config failed\n"); goto err_get_cable_config_failed; } if (usb_get_connect_type()) cable_tp_status_handler_func(1); printk(KERN_INFO "[TP] %s: ts->cable_config: %x\n", __func__, ts->cable_config); } #endif synaptics_touch_sysfs_init(); #ifdef SYN_WIRELESS_DEBUG if (rmi_char_dev_register()) printk(KERN_ERR "[TP] %s: error register char device", __func__); #endif printk(KERN_INFO "[TP] synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling"); return 0; #ifdef SYN_CABLE_CONTROL err_get_cable_config_failed: if (ts->use_irq) free_irq(client->irq, ts); else destroy_workqueue(ts->syn_wq); #endif err_create_wq_failed: err_get_intr_bit_failed: err_input_register_device_failed: input_free_device(ts->input_dev); err_init_panel_failed: err_F19_query_failed: err_syn_get_info_failed: if(ts->report_data != NULL) kfree(ts->report_data); if(ts->temp_report_data != NULL) kfree(ts->temp_report_data); err_init_failed: if(ts->address_table != NULL) kfree(ts->address_table); err_detect_failed: err_get_platform_data_fail: kfree(ts); err_alloc_data_failed: err_check_functionality_failed: return ret; } static int synaptics_ts_remove(struct i2c_client *client) { struct synaptics_ts_data *ts = i2c_get_clientdata(client); unregister_early_suspend(&ts->early_suspend); if (ts->use_irq) free_irq(client->irq, ts); else { hrtimer_cancel(&ts->timer); if (ts->syn_wq) destroy_workqueue(ts->syn_wq); } input_unregister_device(ts->input_dev); synaptics_touch_sysfs_remove(); if(ts->report_data != NULL) kfree(ts->report_data); if(ts->temp_report_data != NULL) kfree(ts->temp_report_data); if(ts->address_table != NULL) kfree(ts->address_table); kfree(ts); return 0; } static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg) { int ret; struct synaptics_ts_data *ts = i2c_get_clientdata(client); if (ts->use_irq) { disable_irq(client->irq); ts->irq_enabled = 0; } else { hrtimer_cancel(&ts->timer); ret = cancel_work_sync(&ts->work); } ts->pre_finger_data[0][0] = 0; ts->pre_finger_data[0][1] = 0; ts->first_pressed = 0; #ifdef SYN_CALIBRATION_CONTROL if (ts->mfg_flag != 1) { ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, ts->relaxation); if (ret < 0) i2c_syn_error_handler(ts, 1, "fast relaxation", __func__); ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x04); if (ret < 0) i2c_syn_error_handler(ts, 1, "force update", __func__); printk("[TP] touch suspend, fast relasxation: %x\n", ts->relaxation); } #endif if (ts->large_obj_check) { ret = i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x11, CONTROL_BASE) + 0x29, ts->default_large_obj & 0x7F); if (ret < 0) i2c_syn_error_handler(ts, 1, "large obj suppression", __func__); printk("[TP] touch suspend, set large obj suppression: %x\n", ts->default_large_obj & 0x7F); } if (ts->power) ts->power(0); else { ret = i2c_syn_write_byte_data(client, get_address_base(ts, 0x01, CONTROL_BASE), 0x01); /* sleep */ if (ret < 0) i2c_syn_error_handler(ts, 1, "sleep", __func__); } return 0; } static int synaptics_ts_resume(struct i2c_client *client) { int ret; struct synaptics_ts_data *ts = i2c_get_clientdata(client); printk(KERN_INFO "%s: enter\n", __func__); if (ts->power) { ts->power(1); msleep(100); #ifdef SYN_CABLE_CONTROL if (ts->cable_support) { if (usb_get_connect_type()) cable_tp_status_handler_func(1); printk(KERN_INFO "%s: ts->cable_config: %x\n", __func__, ts->cable_config); } #endif } else { ret = i2c_syn_write_byte_data(client, get_address_base(ts, 0x01, CONTROL_BASE), 0x00); /* wake */ if (ret < 0) i2c_syn_error_handler(ts, 1, "wake up", __func__); } /* i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, ts->relaxation); i2c_syn_write_byte_data(ts->client, get_address_base(ts, 0x54, COMMAND_BASE), 0x04); printk("[%x]%d, [%x]", get_address_base(ts, 0x54, CONTROL_BASE) + 0x10, ts->relaxation, get_address_base(ts, 0x54, COMMAND_BASE)); */ ret = synaptics_init_panel(ts); if (ret < 0) printk(KERN_ERR "[TP]TOUCH_ERR: synaptics_ts_resume: synaptics init panel failed\n"); if (ts->htc_event == SYN_AND_REPORT_TYPE_A) { if (ts->support_htc_event) { input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, 0); input_report_abs(ts->input_dev, ABS_MT_POSITION, 1 << 31); input_sync(ts->input_dev); } input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0); input_sync(ts->input_dev); } else if (ts->htc_event == SYN_AND_REPORT_TYPE_HTC) { input_report_abs(ts->input_dev, ABS_MT_AMPLITUDE, 0); input_report_abs(ts->input_dev, ABS_MT_POSITION, 1 << 31); } if (ts->use_irq) { enable_irq(client->irq); ts->irq_enabled = 1; } else hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); return 0; } #ifdef CONFIG_HAS_EARLYSUSPEND static void synaptics_ts_early_suspend(struct early_suspend *h) { struct synaptics_ts_data *ts; ts = container_of(h, struct synaptics_ts_data, early_suspend); synaptics_ts_suspend(ts->client, PMSG_SUSPEND); } static void synaptics_ts_late_resume(struct early_suspend *h) { struct synaptics_ts_data *ts; ts = container_of(h, struct synaptics_ts_data, early_suspend); synaptics_ts_resume(ts->client); } #endif static const struct i2c_device_id synaptics_ts_id[] = { { SYNAPTICS_3200_NAME, 0 }, { } }; static struct i2c_driver synaptics_ts_driver = { .probe = synaptics_ts_probe, .remove = synaptics_ts_remove, #ifndef CONFIG_HAS_EARLYSUSPEND .suspend = synaptics_ts_suspend, .resume = synaptics_ts_resume, #endif .id_table = synaptics_ts_id, .driver = { .name = SYNAPTICS_3200_NAME, }, }; static int __devinit synaptics_ts_init(void) { return i2c_add_driver(&synaptics_ts_driver); } static void __exit synaptics_ts_exit(void) { i2c_del_driver(&synaptics_ts_driver); } module_init(synaptics_ts_init); module_exit(synaptics_ts_exit); MODULE_DESCRIPTION("Synaptics Touchscreen Driver"); MODULE_LICENSE("GPL");
gpl-2.0
tsg-/mplayer
libmpdemux/parse_es.c
53
4337
/* * MPEG-ES video parser * * This file is part of MPlayer. * * MPlayer is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * MPlayer is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with MPlayer; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "config.h" #include "mp_msg.h" #include "help_mp.h" #include "stream/stream.h" #include "demuxer.h" #include "parse_es.h" //static unsigned char videobuffer[MAX_VIDEO_PACKET_SIZE]; unsigned char* videobuffer=NULL; int videobuf_len=0; int next_nal = -1; ///! legacy variable, 4 if stream is synced, 0 if not int videobuf_code_len=0; #define MAX_SYNCLEN (10 * 1024 * 1024) // sync video stream, and returns next packet code int sync_video_packet(demux_stream_t *ds){ if (!videobuf_code_len) { int skipped=0; if (!demux_pattern_3(ds, NULL, MAX_SYNCLEN, &skipped, 0x100)) { if (skipped == MAX_SYNCLEN) mp_msg(MSGT_DEMUXER, MSGL_ERR, "parse_es: could not sync video stream!\n"); goto eof_out; } next_nal = demux_getc(ds); if (next_nal < 0) goto eof_out; videobuf_code_len = 4; if(skipped) mp_dbg(MSGT_PARSEES,MSGL_DBG2,"videobuf: %d bytes skipped (next: 0x1%02X)\n",skipped,next_nal); } return 0x100|next_nal; eof_out: next_nal = -1; videobuf_code_len = 0; return 0; } // return: packet length int read_video_packet(demux_stream_t *ds){ int packet_start; int res, read; if (VIDEOBUFFER_SIZE - videobuf_len < 5) return 0; // SYNC STREAM // if(!sync_video_packet(ds)) return 0; // cannot sync (EOF) // COPY STARTCODE: packet_start=videobuf_len; videobuffer[videobuf_len+0]=0; videobuffer[videobuf_len+1]=0; videobuffer[videobuf_len+2]=1; videobuffer[videobuf_len+3]=next_nal; videobuf_len+=4; // READ PACKET: res = demux_pattern_3(ds, &videobuffer[videobuf_len], VIDEOBUFFER_SIZE - videobuf_len, &read, 0x100); videobuf_len += read; if (!res) goto eof_out; videobuf_len-=3; mp_dbg(MSGT_PARSEES,MSGL_DBG2,"videobuf: packet 0x1%02X len=%d (total=%d)\n",videobuffer[packet_start+3],videobuf_len-packet_start,videobuf_len); // Save next packet code: next_nal = demux_getc(ds); if (next_nal < 0) goto eof_out; videobuf_code_len=4; return videobuf_len-packet_start; eof_out: next_nal = -1; videobuf_code_len = 0; return videobuf_len - packet_start; } // return: next packet code int skip_video_packet(demux_stream_t *ds){ // SYNC STREAM // if(!sync_video_packet(ds)) return 0; // cannot sync (EOF) videobuf_code_len=0; // force resync // SYNC AGAIN: return sync_video_packet(ds); } /* stripped down version of a52_syncinfo() from liba52 * copyright belongs to Michel Lespinasse <walken@zoy.org> * and Aaron Holtzman <aholtzma@ess.engr.uvic.ca> */ int mp_a52_framesize(uint8_t * buf, int *srate) { int rate[] = { 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640 }; uint8_t halfrate[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }; int frmsizecod, bitrate, half; if ((buf[0] != 0x0b) || (buf[1] != 0x77)) /* syncword */ return 0; if (buf[5] >= 0x60) /* bsid >= 12 */ return 0; half = halfrate[buf[5] >> 3]; frmsizecod = buf[4] & 63; if (frmsizecod >= 38) return 0; bitrate = rate[frmsizecod >> 1]; switch (buf[4] & 0xc0) { case 0: /* 48 KHz */ *srate = 48000 >> half; return 4 * bitrate; case 0x40: /* 44.1 KHz */ *srate = 44100 >> half; return 2 * (320 * bitrate / 147 + (frmsizecod & 1)); case 0x80: /* 32 KHz */ *srate = 32000 >> half; return 6 * bitrate; } return 0; }
gpl-2.0
jmw7912/wat-0016-kernel-2.6.37
drivers/media/IR/ir-rc5-decoder.c
53
5024
/* ir-rc5-decoder.c - handle RC5(x) IR Pulse/Space protocol * * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This code handles 14 bits RC5 protocols and 20 bits RC5x protocols. * There are other variants that use a different number of bits. * This is currently unsupported. * It considers a carrier of 36 kHz, with a total of 14/20 bits, where * the first two bits are start bits, and a third one is a filing bit */ #include "ir-core-priv.h" #define RC5_NBITS 14 #define RC5X_NBITS 20 #define CHECK_RC5X_NBITS 8 #define RC5_UNIT 888888 /* ns */ #define RC5_BIT_START (1 * RC5_UNIT) #define RC5_BIT_END (1 * RC5_UNIT) #define RC5X_SPACE (4 * RC5_UNIT) enum rc5_state { STATE_INACTIVE, STATE_BIT_START, STATE_BIT_END, STATE_CHECK_RC5X, STATE_FINISHED, }; /** * ir_rc5_decode() - Decode one RC-5 pulse or space * @input_dev: the struct input_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_rc5_decode(struct input_dev *input_dev, struct ir_raw_event ev) { struct ir_input_dev *ir_dev = input_get_drvdata(input_dev); struct rc5_dec *data = &ir_dev->raw->rc5; u8 toggle; u32 scancode; if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC5)) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2)) goto out; again: IR_dprintk(2, "RC5(x) decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; data->state = STATE_BIT_START; data->count = 1; /* We just need enough bits to get to STATE_CHECK_RC5X */ data->wanted_bits = RC5X_NBITS; decrease_duration(&ev, RC5_BIT_START); goto again; case STATE_BIT_START: if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2)) break; data->bits <<= 1; if (!ev.pulse) data->bits |= 1; data->count++; data->state = STATE_BIT_END; return 0; case STATE_BIT_END: if (!is_transition(&ev, &ir_dev->raw->prev_ev)) break; if (data->count == data->wanted_bits) data->state = STATE_FINISHED; else if (data->count == CHECK_RC5X_NBITS) data->state = STATE_CHECK_RC5X; else data->state = STATE_BIT_START; decrease_duration(&ev, RC5_BIT_END); goto again; case STATE_CHECK_RC5X: if (!ev.pulse && geq_margin(ev.duration, RC5X_SPACE, RC5_UNIT / 2)) { /* RC5X */ data->wanted_bits = RC5X_NBITS; decrease_duration(&ev, RC5X_SPACE); } else { /* RC5 */ data->wanted_bits = RC5_NBITS; } data->state = STATE_BIT_START; goto again; case STATE_FINISHED: if (ev.pulse) break; if (data->wanted_bits == RC5X_NBITS) { /* RC5X */ u8 xdata, command, system; xdata = (data->bits & 0x0003F) >> 0; command = (data->bits & 0x00FC0) >> 6; system = (data->bits & 0x1F000) >> 12; toggle = (data->bits & 0x20000) ? 1 : 0; command += (data->bits & 0x01000) ? 0 : 0x40; scancode = system << 16 | command << 8 | xdata; IR_dprintk(1, "RC5X scancode 0x%06x (toggle: %u)\n", scancode, toggle); } else { /* RC5 */ u8 command, system; command = (data->bits & 0x0003F) >> 0; system = (data->bits & 0x007C0) >> 6; toggle = (data->bits & 0x00800) ? 1 : 0; command += (data->bits & 0x01000) ? 0 : 0x40; scancode = system << 8 | command; IR_dprintk(1, "RC5 scancode 0x%04x (toggle: %u)\n", scancode, toggle); } ir_keydown(input_dev, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC5(x) decode failed at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc5_handler = { .protocols = IR_TYPE_RC5, .decode = ir_rc5_decode, }; static int __init ir_rc5_decode_init(void) { ir_raw_handler_register(&rc5_handler); printk(KERN_INFO "IR RC5(x) protocol handler initialized\n"); return 0; } static void __exit ir_rc5_decode_exit(void) { ir_raw_handler_unregister(&rc5_handler); } module_init(ir_rc5_decode_init); module_exit(ir_rc5_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("RC5(x) IR protocol decoder");
gpl-2.0
andygross/omap_dmm_tiler
drivers/gpu/drm/nouveau/core/core/handle.c
53
6057
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/object.h> #include <core/handle.h> #include <core/client.h> #define hprintk(h,l,f,a...) do { \ struct nouveau_client *c = nouveau_client((h)->object); \ struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \ } while(0) int nouveau_handle_init(struct nouveau_handle *handle) { struct nouveau_handle *item; int ret; hprintk(handle, TRACE, "init running\n"); ret = nouveau_object_inc(handle->object); if (ret) return ret; hprintk(handle, TRACE, "init children\n"); list_for_each_entry(item, &handle->tree, head) { ret = nouveau_handle_init(item); if (ret) goto fail; } hprintk(handle, TRACE, "init completed\n"); return 0; fail: hprintk(handle, ERROR, "init failed with %d\n", ret); list_for_each_entry_continue_reverse(item, &handle->tree, head) { nouveau_handle_fini(item, false); } nouveau_object_dec(handle->object, false); return ret; } int nouveau_handle_fini(struct nouveau_handle *handle, bool suspend) { static char *name[2] = { "fini", "suspend" }; struct nouveau_handle *item; int ret; hprintk(handle, TRACE, "%s children\n", name[suspend]); list_for_each_entry(item, &handle->tree, head) { ret = nouveau_handle_fini(item, suspend); if (ret && suspend) goto fail; } hprintk(handle, TRACE, "%s running\n", name[suspend]); if (handle->object) { ret = nouveau_object_dec(handle->object, suspend); if (ret && suspend) goto fail; } hprintk(handle, TRACE, "%s completed\n", name[suspend]); return 0; fail: hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret); list_for_each_entry_continue_reverse(item, &handle->tree, head) { int rret = nouveau_handle_init(item); if (rret) hprintk(handle, FATAL, "failed to restart, %d\n", rret); } return ret; } int nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, struct nouveau_object *object, struct nouveau_handle **phandle) { struct nouveau_object *namedb; struct nouveau_handle *handle; int ret; namedb = parent; while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) namedb = namedb->parent; handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) return -ENOMEM; INIT_LIST_HEAD(&handle->head); INIT_LIST_HEAD(&handle->tree); handle->name = _handle; handle->priv = ~0; ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle); if (ret) { kfree(handle); return ret; } if (nv_parent(parent)->object_attach) { ret = nv_parent(parent)->object_attach(parent, object, _handle); if (ret < 0) { nouveau_handle_destroy(handle); return ret; } handle->priv = ret; } if (object != namedb) { while (!nv_iclass(namedb, NV_CLIENT_CLASS)) namedb = namedb->parent; handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent); if (handle->parent) { list_add(&handle->head, &handle->parent->tree); nouveau_namedb_put(handle->parent); } } hprintk(handle, TRACE, "created\n"); return 0; } void nouveau_handle_destroy(struct nouveau_handle *handle) { struct nouveau_handle *item, *temp; hprintk(handle, TRACE, "destroy running\n"); list_for_each_entry_safe(item, temp, &handle->tree, head) { nouveau_handle_destroy(item); } list_del(&handle->head); if (handle->priv != ~0) { struct nouveau_object *parent = handle->parent->object; nv_parent(parent)->object_detach(parent, handle->priv); } hprintk(handle, TRACE, "destroy completed\n"); nouveau_namedb_remove(handle); kfree(handle); } struct nouveau_object * nouveau_handle_ref(struct nouveau_object *parent, u32 name) { struct nouveau_object *object = NULL; struct nouveau_handle *handle; while (!nv_iclass(parent, NV_NAMEDB_CLASS)) parent = parent->parent; handle = nouveau_namedb_get(nv_namedb(parent), name); if (handle) { nouveau_object_ref(handle->object, &object); nouveau_namedb_put(handle); } return object; } struct nouveau_handle * nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass) { struct nouveau_namedb *namedb; if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) return nouveau_namedb_get_class(namedb, oclass); return NULL; } struct nouveau_handle * nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst) { struct nouveau_namedb *namedb; if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) return nouveau_namedb_get_vinst(namedb, vinst); return NULL; } struct nouveau_handle * nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst) { struct nouveau_namedb *namedb; if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) return nouveau_namedb_get_cinst(namedb, cinst); return NULL; } void nouveau_handle_put(struct nouveau_handle *handle) { if (handle) nouveau_namedb_put(handle); }
gpl-2.0
junmuzi/linux
drivers/infiniband/ulp/ipoib/ipoib_ib.c
53
32373
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/tcp.h> #include "ipoib.h" #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA static int data_debug_level; module_param(data_debug_level, int, 0644); MODULE_PARM_DESC(data_debug_level, "Enable data path debug tracing if > 0"); #endif struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr) { struct ipoib_ah *ah; struct ib_ah *vah; ah = kmalloc(sizeof *ah, GFP_KERNEL); if (!ah) return ERR_PTR(-ENOMEM); ah->dev = dev; ah->last_send = 0; kref_init(&ah->ref); vah = ib_create_ah(pd, attr); if (IS_ERR(vah)) { kfree(ah); ah = (struct ipoib_ah *)vah; } else { ah->ah = vah; ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); } return ah; } void ipoib_free_ah(struct kref *kref) { struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); struct ipoib_dev_priv *priv = netdev_priv(ah->dev); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); list_add_tail(&ah->list, &priv->dead_ahs); spin_unlock_irqrestore(&priv->lock, flags); } static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, u64 mapping[IPOIB_UD_RX_SG]) { ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE); } static int ipoib_ib_post_receive(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_recv_wr *bad_wr; int ret; priv->rx_wr.wr_id = id | IPOIB_OP_RECV; priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } return ret; } static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; u64 *mapping; buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN); if (unlikely(!skb)) return NULL; /* * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte * header. So we need 4 more bytes to get to 48 and align the * IP header to a multiple of 16. */ skb_reserve(skb, 4); mapping = priv->rx_ring[id].mapping; mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) goto error; priv->rx_ring[id].skb = skb; return skb; error: dev_kfree_skb_any(skb); return NULL; } static int ipoib_ib_post_receives(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_alloc_rx_skb(dev, i)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } if (ipoib_ib_post_receive(dev, i)) { ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); return -EIO; } } return 0; } static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; u64 mapping[IPOIB_UD_RX_SG]; union ib_gid *dgid; union ib_gid *sgid; ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_recvq_size)) { ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", wr_id, ipoib_recvq_size); return; } skb = priv->rx_ring[wr_id].skb; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed recv event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); dev_kfree_skb_any(skb); priv->rx_ring[wr_id].skb = NULL; return; } memcpy(mapping, priv->rx_ring[wr_id].mapping, IPOIB_UD_RX_SG * sizeof *mapping); /* * If we can't allocate a new RX buffer, dump * this packet and reuse the old buffer. */ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { ++dev->stats.rx_dropped; goto repost; } ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); ipoib_ud_dma_unmap_rx(priv, mapping); skb_put(skb, wc->byte_len); /* First byte of dgid signals multicast when 0xff */ dgid = &((struct ib_grh *)skb->data)->dgid; if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) skb->pkt_type = PACKET_HOST; else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; sgid = &((struct ib_grh *)skb->data)->sgid; /* * Drop packets that this interface sent, ie multicast packets * that the HCA has replicated. */ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) { int need_repost = 1; if ((wc->wc_flags & IB_WC_GRH) && sgid->global.interface_id != priv->local_gid.global.interface_id) need_repost = 0; if (need_repost) { dev_kfree_skb_any(skb); goto repost; } } skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); repost: if (unlikely(ipoib_ib_post_receive(dev, wr_id))) ipoib_warn(priv, "ipoib_ib_post_receive failed " "for buf %d\n", wr_id); } int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) return -EIO; off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping[i + off] = ib_dma_map_page(ca, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) goto partial_error; } return 0; partial_error: for (; i > 0; --i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); } if (off) ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); return -EIO; } void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ib_dma_unmap_page(priv->ca, mapping[i + off], skb_frag_size(frag), DMA_TO_DEVICE); } } /* * As the result of a completion error the QP Can be transferred to SQE states. * The function checks if the (send)QP is in SQE state and * moves it back to RTS state, that in order to have it functional again. */ static void ipoib_qp_state_validate_work(struct work_struct *work) { struct ipoib_qp_state_validate *qp_work = container_of(work, struct ipoib_qp_state_validate, work); struct ipoib_dev_priv *priv = qp_work->priv; struct ib_qp_attr qp_attr; struct ib_qp_init_attr query_init_attr; int ret; ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); if (ret) { ipoib_warn(priv, "%s: Failed to query QP ret: %d\n", __func__, ret); goto free_res; } pr_info("%s: QP: 0x%x is in state: %d\n", __func__, priv->qp->qp_num, qp_attr.qp_state); /* currently support only in SQE->RTS transition*/ if (qp_attr.qp_state == IB_QPS_SQE) { qp_attr.qp_state = IB_QPS_RTS; ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); if (ret) { pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n", ret, priv->qp->qp_num); goto free_res; } pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n", __func__, priv->qp->qp_num); } else { pr_warn("QP (%d) will stay in state: %d\n", priv->qp->qp_num, qp_attr.qp_state); } free_res: kfree(qp_work); } static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id; struct ipoib_tx_buf *tx_req; ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_sendq_size)) { ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", wr_id, ipoib_sendq_size); return; } tx_req = &priv->tx_ring[wr_id]; ipoib_dma_unmap_tx(priv, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) { struct ipoib_qp_state_validate *qp_work; ipoib_warn(priv, "failed send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC); if (!qp_work) { ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n", __func__, priv->qp->qp_num); return; } INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work); qp_work->priv = priv; queue_work(priv->wq, &qp_work->work); } } static int poll_tx(struct ipoib_dev_priv *priv) { int n, i; n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); for (i = 0; i < n; ++i) ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i); return n == MAX_SEND_CQE; } int ipoib_poll(struct napi_struct *napi, int budget) { struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); struct net_device *dev = priv->dev; int done; int t; int n, i; done = 0; poll_more: while (done < budget) { int max = (budget - done); t = min(IPOIB_NUM_WC, max); n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); for (i = 0; i < n; i++) { struct ib_wc *wc = priv->ibwc + i; if (wc->wr_id & IPOIB_OP_RECV) { ++done; if (wc->wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, wc); else ipoib_ib_handle_rx_wc(dev, wc); } else ipoib_cm_handle_tx_wc(priv->dev, wc); } if (n != t) break; } if (done < budget) { napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && napi_reschedule(napi)) goto poll_more; } return done; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); napi_schedule(&priv->napi); } static void drain_tx_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); netif_tx_lock(dev); while (poll_tx(priv)) ; /* nothing */ if (netif_queue_stopped(dev)) mod_timer(&priv->poll_timer, jiffies + 1); netif_tx_unlock(dev); } void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) { struct ipoib_dev_priv *priv = netdev_priv(dev_ptr); mod_timer(&priv->poll_timer, jiffies); } static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) { struct ib_send_wr *bad_wr; struct sk_buff *skb = tx_req->skb; ipoib_build_sge(priv, tx_req); priv->tx_wr.wr.wr_id = wr_id; priv->tx_wr.remote_qpn = qpn; priv->tx_wr.ah = address; if (head) { priv->tx_wr.mss = skb_shinfo(skb)->gso_size; priv->tx_wr.header = head; priv->tx_wr.hlen = hlen; priv->tx_wr.wr.opcode = IB_WR_LSO; } else priv->tx_wr.wr.opcode = IB_WR_SEND; return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); } void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; int hlen, rc; void *phead; unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb); if (skb_is_gso(skb)) { hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); phead = skb->data; if (unlikely(!skb_pull(skb, hlen))) { ipoib_warn(priv, "linear data too small\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } } else { if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); return; } phead = NULL; hlen = 0; } if (skb_shinfo(skb)->nr_frags > usable_sge) { if (skb_linearize(skb) < 0) { ipoib_warn(priv, "skb could not be linearized\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } /* Does skb_linearize return ok without reducing nr_frags? */ if (skb_shinfo(skb)->nr_frags > usable_sge) { ipoib_warn(priv, "too many frags after skb linearize\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } } ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", skb->len, address, qpn); /* * We put the skb into the tx_ring _before_ we call post_send() * because it's entirely possible that the completion handler will * run before we execute anything after the post_send(). That * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } if (skb->ip_summed == CHECKSUM_PARTIAL) priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; else priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); } skb_orphan(skb); skb_dst_drop(skb); rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); } else { netif_trans_update(dev); address->last_send = priv->tx_head; ++priv->tx_head; } if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) while (poll_tx(priv)) ; /* nothing */ } static void __ipoib_reap_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); unsigned long flags; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) if ((int) priv->tx_tail - (int) ah->last_send >= 0) { list_del(&ah->list); ib_destroy_ah(ah->ah); kfree(ah); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); struct net_device *dev = priv->dev; __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } static void ipoib_flush_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); cancel_delayed_work(&priv->ah_reap_task); flush_workqueue(priv->wq); ipoib_reap_ah(&priv->ah_reap_task.work); } static void ipoib_stop_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); set_bit(IPOIB_STOP_REAPER, &priv->flags); ipoib_flush_ah(dev); } static void ipoib_ib_tx_timer_func(unsigned long ctx) { drain_tx_cq((struct net_device *)ctx); } int ipoib_ib_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; ipoib_pkey_dev_check_presence(dev); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey, (!(priv->pkey & 0x7fff) ? "Invalid" : "not found")); return -1; } ret = ipoib_init_qp(dev); if (ret) { ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); return -1; } ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); goto dev_stop; } ret = ipoib_cm_dev_open(dev); if (ret) { ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); goto dev_stop; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); return 0; dev_stop: if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); ipoib_ib_dev_stop(dev); return -1; } void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); if (!(priv->pkey & 0x7fff) || ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); else set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); } int ipoib_ib_dev_up(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_pkey_dev_check_presence(dev); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { ipoib_dbg(priv, "PKEY is not assigned.\n"); return 0; } set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); return ipoib_mcast_start_thread(dev); } int ipoib_ib_dev_down(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "downing ib_dev\n"); clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); ipoib_mcast_stop_thread(dev); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); return 0; } static int recvs_pending(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int pending = 0; int i; for (i = 0; i < ipoib_recvq_size; ++i) if (priv->rx_ring[i].skb) ++pending; return pending; } void ipoib_drain_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i, n; /* * We call completion handling routines that expect to be * called from the BH-disabled NAPI poll context, so disable * BHs here too. */ local_bh_disable(); do { n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); for (i = 0; i < n; ++i) { /* * Convert any successful completions to flush * errors to avoid passing packets up the * stack after bringing the device down. */ if (priv->ibwc[i].status == IB_WC_SUCCESS) priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { if (priv->ibwc[i].wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); else ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); } else ipoib_cm_handle_tx_wc(dev, priv->ibwc + i); } } while (n == IPOIB_NUM_WC); while (poll_tx(priv)) ; /* nothing */ local_bh_enable(); } int ipoib_ib_dev_stop(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; unsigned long begin; struct ipoib_tx_buf *tx_req; int i; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_disable(&priv->napi); ipoib_cm_dev_stop(dev); /* * Move our QP to the error state and then reinitialize in * when all work requests have completed or have been flushed. */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); /* Wait for all sends and receives to complete */ begin = jiffies; while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { if (time_after(jiffies, begin + 5 * HZ)) { ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", priv->tx_head - priv->tx_tail, recvs_pending(dev)); /* * assume the HW is wedged and just free up * all our pending work requests. */ while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; } for (i = 0; i < ipoib_recvq_size; ++i) { struct ipoib_rx_buf *rx_req; rx_req = &priv->rx_ring[i]; if (!rx_req->skb) continue; ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[i].mapping); dev_kfree_skb_any(rx_req->skb); rx_req->skb = NULL; } goto timeout; } ipoib_drain_cq(dev); msleep(1); } ipoib_dbg(priv, "All sends and receives done.\n"); timeout: del_timer_sync(&priv->poll_timer); qp_attr.qp_state = IB_QPS_RESET; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); ipoib_flush_ah(dev); ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); return 0; } int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); priv->ca = ca; priv->port = port; priv->qp = NULL; if (ipoib_transport_dev_init(dev, ca)) { printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); return -ENODEV; } setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, (unsigned long) dev); if (dev->flags & IFF_UP) { if (ipoib_ib_dev_open(dev)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } } return 0; } /* * Takes whatever value which is in pkey index 0 and updates priv->pkey * returns 0 if the pkey value was changed. */ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) { int result; u16 prev_pkey; prev_pkey = priv->pkey; result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); if (result) { ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", priv->port, result); return result; } priv->pkey |= 0x8000; if (prev_pkey != priv->pkey) { ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", prev_pkey, priv->pkey); /* * Update the pkey in the broadcast address, while making sure to set * the full membership bit, so that we join the right broadcast group. */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; return 0; } return 1; } /* * returns 0 if pkey value was found in a different slot. */ static inline int update_child_pkey(struct ipoib_dev_priv *priv) { u16 old_index = priv->pkey_index; priv->pkey_index = 0; ipoib_pkey_dev_check_presence(priv->dev); if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && (old_index == priv->pkey_index)) return 1; return 0; } /* * returns true if the device address of the ipoib interface has changed and the * new address is a valid one (i.e in the gid table), return false otherwise. */ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) { union ib_gid search_gid; union ib_gid gid0; union ib_gid *netdev_gid; int err; u16 index; u8 port; bool ret = false; netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) return false; netif_addr_lock_bh(priv->dev); /* The subnet prefix may have changed, update it now so we won't have * to do it later */ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; search_gid.global.subnet_prefix = gid0.global.subnet_prefix; search_gid.global.interface_id = priv->local_gid.global.interface_id; netif_addr_unlock_bh(priv->dev); err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, priv->dev, &port, &index); netif_addr_lock_bh(priv->dev); if (search_gid.global.interface_id != priv->local_gid.global.interface_id) /* There was a change while we were looking up the gid, bail * here and let the next work sort this out */ goto out; /* The next section of code needs some background: * Per IB spec the port GUID can't change if the HCA is powered on. * port GUID is the basis for GID at index 0 which is the basis for * the default device address of a ipoib interface. * * so it seems the flow should be: * if user_changed_dev_addr && gid in gid tbl * set bit dev_addr_set * return true * else * return false * * The issue is that there are devices that don't follow the spec, * they change the port GUID when the HCA is powered, so in order * not to break userspace applications, We need to check if the * user wanted to control the device address and we assume that * if he sets the device address back to be based on GID index 0, * he no longer wishs to control it. * * If the user doesn't control the the device address, * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means * the port GUID has changed and GID at index 0 has changed * so we need to change priv->local_gid and priv->dev->dev_addr * to reflect the new GID. */ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { if (!err && port == priv->port) { set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); if (index == 0) clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); else set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); ret = true; } else { ret = false; } } else { if (!err && port == priv->port) { ret = true; } else { if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { memcpy(&priv->local_gid, &gid0, sizeof(priv->local_gid)); memcpy(priv->dev->dev_addr + 4, &gid0, sizeof(priv->local_gid)); ret = true; } } } out: netif_addr_unlock_bh(priv->dev); return ret; } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, enum ipoib_flush_level level, int nesting) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; int result; down_read_nested(&priv->vlan_rwsem, nesting); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) __ipoib_ib_dev_flush(cpriv, level, nesting + 1); up_read(&priv->vlan_rwsem); if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && level != IPOIB_FLUSH_HEAVY) { /* Make sure the dev_addr is set even if not flushing */ if (level == IPOIB_FLUSH_LIGHT) ipoib_dev_addr_changed_valid(priv); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; } if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { /* interface is down. update pkey and leave. */ if (level == IPOIB_FLUSH_HEAVY) { if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) update_parent_pkey(priv); else update_child_pkey(priv); } else if (level == IPOIB_FLUSH_LIGHT) ipoib_dev_addr_changed_valid(priv); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); return; } if (level == IPOIB_FLUSH_HEAVY) { /* child devices chase their origin pkey value, while non-child * (parent) devices should always takes what present in pkey index 0 */ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { result = update_child_pkey(priv); if (result) { /* restart QP only if P_Key index is changed */ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); return; } } else { result = update_parent_pkey(priv); /* restart QP only if P_Key value changed */ if (result) { ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); return; } } } if (level == IPOIB_FLUSH_LIGHT) { ipoib_mark_paths_invalid(dev); ipoib_mcast_dev_flush(dev); ipoib_flush_ah(dev); } if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); if (ipoib_ib_dev_open(dev) != 0) return; if (netif_queue_stopped(dev)) netif_start_queue(dev); } /* * The device could have been brought down between the start and when * we get here, don't bring it back up if it's not configured up */ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_up(dev); if (ipoib_dev_addr_changed_valid(priv)) ipoib_mcast_restart_task(&priv->restart_task); } } void ipoib_ib_dev_flush_light(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); } void ipoib_ib_dev_flush_normal(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); } void ipoib_ib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "cleaning up ib_dev\n"); /* * We must make sure there are no more (path) completions * that may wish to touch priv fields that are no longer valid */ ipoib_flush_paths(dev); ipoib_mcast_stop_thread(dev); ipoib_mcast_dev_flush(dev); /* * All of our ah references aren't free until after * ipoib_mcast_dev_flush(), ipoib_flush_paths, and * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ ipoib_stop_ah(dev); ipoib_transport_dev_cleanup(dev); }
gpl-2.0
skalk/linux
drivers/tty/serial/serial-tegra.c
53
44791
// SPDX-License-Identifier: GPL-2.0 /* * serial_tegra.c * * High-speed serial driver for NVIDIA Tegra SoCs * * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved. * * Author: Laxman Dewangan <ldewangan@nvidia.com> */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pagemap.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/tty_flip.h> #define TEGRA_UART_TYPE "TEGRA_UART" #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE) #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3) #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096 #define TEGRA_UART_LSR_TXFIFO_FULL 0x100 #define TEGRA_UART_IER_EORD 0x20 #define TEGRA_UART_MCR_RTS_EN 0x40 #define TEGRA_UART_MCR_CTS_EN 0x20 #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \ UART_LSR_PE | UART_LSR_FE) #define TEGRA_UART_IRDA_CSR 0x08 #define TEGRA_UART_SIR_ENABLED 0x80 #define TEGRA_UART_TX_PIO 1 #define TEGRA_UART_TX_DMA 2 #define TEGRA_UART_MIN_DMA 16 #define TEGRA_UART_FIFO_SIZE 32 /* * Tx fifo trigger level setting in tegra uart is in * reverse way then conventional uart. */ #define TEGRA_UART_TX_TRIG_16B 0x00 #define TEGRA_UART_TX_TRIG_8B 0x10 #define TEGRA_UART_TX_TRIG_4B 0x20 #define TEGRA_UART_TX_TRIG_1B 0x30 #define TEGRA_UART_MAXIMUM 8 /* Default UART setting when started: 115200 no parity, stop, 8 data bits */ #define TEGRA_UART_DEFAULT_BAUD 115200 #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8 /* Tx transfer mode */ #define TEGRA_TX_PIO 1 #define TEGRA_TX_DMA 2 #define TEGRA_UART_FCR_IIR_FIFO_EN 0x40 /** * tegra_uart_chip_data: SOC specific data. * * @tx_fifo_full_status: Status flag available for checking tx fifo full. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not. * Tegra30 does not allow this. * @support_clk_src_div: Clock source support the clock divider. */ struct tegra_uart_chip_data { bool tx_fifo_full_status; bool allow_txfifo_reset_fifo_mode; bool support_clk_src_div; bool fifo_mode_enable_status; int uart_max_port; int max_dma_burst_bytes; int error_tolerance_low_range; int error_tolerance_high_range; }; struct tegra_baud_tolerance { u32 lower_range_baud; u32 upper_range_baud; s32 tolerance; }; struct tegra_uart_port { struct uart_port uport; const struct tegra_uart_chip_data *cdata; struct clk *uart_clk; struct reset_control *rst; unsigned int current_baud; /* Register shadow */ unsigned long fcr_shadow; unsigned long mcr_shadow; unsigned long lcr_shadow; unsigned long ier_shadow; bool rts_active; int tx_in_progress; unsigned int tx_bytes; bool enable_modem_interrupt; bool rx_timeout; int rx_in_progress; int symb_bit; struct dma_chan *rx_dma_chan; struct dma_chan *tx_dma_chan; dma_addr_t rx_dma_buf_phys; dma_addr_t tx_dma_buf_phys; unsigned char *rx_dma_buf_virt; unsigned char *tx_dma_buf_virt; struct dma_async_tx_descriptor *tx_dma_desc; struct dma_async_tx_descriptor *rx_dma_desc; dma_cookie_t tx_cookie; dma_cookie_t rx_cookie; unsigned int tx_bytes_requested; unsigned int rx_bytes_requested; struct tegra_baud_tolerance *baud_tolerance; int n_adjustable_baud_rates; int required_rate; int configured_rate; bool use_rx_pio; bool use_tx_pio; bool rx_dma_active; }; static void tegra_uart_start_next_tx(struct tegra_uart_port *tup); static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup); static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup, bool dma_to_memory); static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup, unsigned long reg) { return readl(tup->uport.membase + (reg << tup->uport.regshift)); } static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val, unsigned long reg) { writel(val, tup->uport.membase + (reg << tup->uport.regshift)); } static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u) { return container_of(u, struct tegra_uart_port, uport); } static unsigned int tegra_uart_get_mctrl(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); /* * RI - Ring detector is active * CD/DCD/CAR - Carrier detect is always active. For some reason * linux has different names for carrier detect. * DSR - Data Set ready is active as the hardware doesn't support it. * Don't know if the linux support this yet? * CTS - Clear to send. Always set to active, as the hardware handles * CTS automatically. */ if (tup->enable_modem_interrupt) return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS; return TIOCM_CTS; } static void set_rts(struct tegra_uart_port *tup, bool active) { unsigned long mcr; mcr = tup->mcr_shadow; if (active) mcr |= TEGRA_UART_MCR_RTS_EN; else mcr &= ~TEGRA_UART_MCR_RTS_EN; if (mcr != tup->mcr_shadow) { tegra_uart_write(tup, mcr, UART_MCR); tup->mcr_shadow = mcr; } } static void set_dtr(struct tegra_uart_port *tup, bool active) { unsigned long mcr; mcr = tup->mcr_shadow; if (active) mcr |= UART_MCR_DTR; else mcr &= ~UART_MCR_DTR; if (mcr != tup->mcr_shadow) { tegra_uart_write(tup, mcr, UART_MCR); tup->mcr_shadow = mcr; } } static void set_loopbk(struct tegra_uart_port *tup, bool active) { unsigned long mcr = tup->mcr_shadow; if (active) mcr |= UART_MCR_LOOP; else mcr &= ~UART_MCR_LOOP; if (mcr != tup->mcr_shadow) { tegra_uart_write(tup, mcr, UART_MCR); tup->mcr_shadow = mcr; } } static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl) { struct tegra_uart_port *tup = to_tegra_uport(u); int enable; tup->rts_active = !!(mctrl & TIOCM_RTS); set_rts(tup, tup->rts_active); enable = !!(mctrl & TIOCM_DTR); set_dtr(tup, enable); enable = !!(mctrl & TIOCM_LOOP); set_loopbk(tup, enable); } static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl) { struct tegra_uart_port *tup = to_tegra_uport(u); unsigned long lcr; lcr = tup->lcr_shadow; if (break_ctl) lcr |= UART_LCR_SBC; else lcr &= ~UART_LCR_SBC; tegra_uart_write(tup, lcr, UART_LCR); tup->lcr_shadow = lcr; } /** * tegra_uart_wait_cycle_time: Wait for N UART clock periods * * @tup: Tegra serial port data structure. * @cycles: Number of clock periods to wait. * * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART * clock speed is 16X the current baud rate. */ static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup, unsigned int cycles) { if (tup->current_baud) udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16)); } /* Wait for a symbol-time. */ static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup, unsigned int syms) { if (tup->current_baud) udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000, tup->current_baud)); } static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup) { unsigned long iir; unsigned int tmout = 100; do { iir = tegra_uart_read(tup, UART_IIR); if (iir & TEGRA_UART_FCR_IIR_FIFO_EN) return 0; udelay(1); } while (--tmout); return -ETIMEDOUT; } static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits) { unsigned long fcr = tup->fcr_shadow; unsigned int lsr, tmout = 10000; if (tup->rts_active) set_rts(tup, false); if (tup->cdata->allow_txfifo_reset_fifo_mode) { fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); tegra_uart_write(tup, fcr, UART_FCR); } else { fcr &= ~UART_FCR_ENABLE_FIFO; tegra_uart_write(tup, fcr, UART_FCR); udelay(60); fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); tegra_uart_write(tup, fcr, UART_FCR); fcr |= UART_FCR_ENABLE_FIFO; tegra_uart_write(tup, fcr, UART_FCR); if (tup->cdata->fifo_mode_enable_status) tegra_uart_wait_fifo_mode_enabled(tup); } /* Dummy read to ensure the write is posted */ tegra_uart_read(tup, UART_SCR); /* * For all tegra devices (up to t210), there is a hardware issue that * requires software to wait for 32 UART clock periods for the flush * to propagate, otherwise data could be lost. */ tegra_uart_wait_cycle_time(tup, 32); do { lsr = tegra_uart_read(tup, UART_LSR); if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR)) break; udelay(1); } while (--tmout); if (tup->rts_active) set_rts(tup, true); } static long tegra_get_tolerance_rate(struct tegra_uart_port *tup, unsigned int baud, long rate) { int i; for (i = 0; i < tup->n_adjustable_baud_rates; ++i) { if (baud >= tup->baud_tolerance[i].lower_range_baud && baud <= tup->baud_tolerance[i].upper_range_baud) return (rate + (rate * tup->baud_tolerance[i].tolerance) / 10000); } return rate; } static int tegra_check_rate_in_range(struct tegra_uart_port *tup) { long diff; diff = ((long)(tup->configured_rate - tup->required_rate) * 10000) / tup->required_rate; if (diff < (tup->cdata->error_tolerance_low_range * 100) || diff > (tup->cdata->error_tolerance_high_range * 100)) { dev_err(tup->uport.dev, "configured baud rate is out of range by %ld", diff); return -EIO; } return 0; } static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud) { unsigned long rate; unsigned int divisor; unsigned long lcr; unsigned long flags; int ret; if (tup->current_baud == baud) return 0; if (tup->cdata->support_clk_src_div) { rate = baud * 16; tup->required_rate = rate; if (tup->n_adjustable_baud_rates) rate = tegra_get_tolerance_rate(tup, baud, rate); ret = clk_set_rate(tup->uart_clk, rate); if (ret < 0) { dev_err(tup->uport.dev, "clk_set_rate() failed for rate %lu\n", rate); return ret; } tup->configured_rate = clk_get_rate(tup->uart_clk); divisor = 1; ret = tegra_check_rate_in_range(tup); if (ret < 0) return ret; } else { rate = clk_get_rate(tup->uart_clk); divisor = DIV_ROUND_CLOSEST(rate, baud * 16); } spin_lock_irqsave(&tup->uport.lock, flags); lcr = tup->lcr_shadow; lcr |= UART_LCR_DLAB; tegra_uart_write(tup, lcr, UART_LCR); tegra_uart_write(tup, divisor & 0xFF, UART_TX); tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER); lcr &= ~UART_LCR_DLAB; tegra_uart_write(tup, lcr, UART_LCR); /* Dummy read to ensure the write is posted */ tegra_uart_read(tup, UART_SCR); spin_unlock_irqrestore(&tup->uport.lock, flags); tup->current_baud = baud; /* wait two character intervals at new rate */ tegra_uart_wait_sym_time(tup, 2); return 0; } static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup, unsigned long lsr) { char flag = TTY_NORMAL; if (unlikely(lsr & TEGRA_UART_LSR_ANY)) { if (lsr & UART_LSR_OE) { /* Overrrun error */ flag = TTY_OVERRUN; tup->uport.icount.overrun++; dev_err(tup->uport.dev, "Got overrun errors\n"); } else if (lsr & UART_LSR_PE) { /* Parity error */ flag = TTY_PARITY; tup->uport.icount.parity++; dev_err(tup->uport.dev, "Got Parity errors\n"); } else if (lsr & UART_LSR_FE) { flag = TTY_FRAME; tup->uport.icount.frame++; dev_err(tup->uport.dev, "Got frame errors\n"); } else if (lsr & UART_LSR_BI) { /* * Break error * If FIFO read error without any data, reset Rx FIFO */ if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE)) tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR); if (tup->uport.ignore_status_mask & UART_LSR_BI) return TTY_BREAK; flag = TTY_BREAK; tup->uport.icount.brk++; dev_dbg(tup->uport.dev, "Got Break\n"); } uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag); } return flag; } static int tegra_uart_request_port(struct uart_port *u) { return 0; } static void tegra_uart_release_port(struct uart_port *u) { /* Nothing to do here */ } static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes) { struct circ_buf *xmit = &tup->uport.state->xmit; int i; for (i = 0; i < max_bytes; i++) { BUG_ON(uart_circ_empty(xmit)); if (tup->cdata->tx_fifo_full_status) { unsigned long lsr = tegra_uart_read(tup, UART_LSR); if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL)) break; } tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); tup->uport.icount.tx++; } } static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup, unsigned int bytes) { if (bytes > TEGRA_UART_MIN_DMA) bytes = TEGRA_UART_MIN_DMA; tup->tx_in_progress = TEGRA_UART_TX_PIO; tup->tx_bytes = bytes; tup->ier_shadow |= UART_IER_THRI; tegra_uart_write(tup, tup->ier_shadow, UART_IER); } static void tegra_uart_tx_dma_complete(void *args) { struct tegra_uart_port *tup = args; struct circ_buf *xmit = &tup->uport.state->xmit; struct dma_tx_state state; unsigned long flags; unsigned int count; dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); spin_lock_irqsave(&tup->uport.lock, flags); xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); tup->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); tegra_uart_start_next_tx(tup); spin_unlock_irqrestore(&tup->uport.lock, flags); } static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup, unsigned long count) { struct circ_buf *xmit = &tup->uport.state->xmit; dma_addr_t tx_phys_addr; tup->tx_bytes = count & ~(0xF); tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail; dma_sync_single_for_device(tup->uport.dev, tx_phys_addr, tup->tx_bytes, DMA_TO_DEVICE); tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan, tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!tup->tx_dma_desc) { dev_err(tup->uport.dev, "Not able to get desc for Tx\n"); return -EIO; } tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete; tup->tx_dma_desc->callback_param = tup; tup->tx_in_progress = TEGRA_UART_TX_DMA; tup->tx_bytes_requested = tup->tx_bytes; tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc); dma_async_issue_pending(tup->tx_dma_chan); return 0; } static void tegra_uart_start_next_tx(struct tegra_uart_port *tup) { unsigned long tail; unsigned long count; struct circ_buf *xmit = &tup->uport.state->xmit; if (!tup->current_baud) return; tail = (unsigned long)&xmit->buf[xmit->tail]; count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); if (!count) return; if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA) tegra_uart_start_pio_tx(tup, count); else if (BYTES_TO_ALIGN(tail) > 0) tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail)); else tegra_uart_start_tx_dma(tup, count); } /* Called by serial core driver with u->lock taken. */ static void tegra_uart_start_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); struct circ_buf *xmit = &u->state->xmit; if (!uart_circ_empty(xmit) && !tup->tx_in_progress) tegra_uart_start_next_tx(tup); } static unsigned int tegra_uart_tx_empty(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); unsigned int ret = 0; unsigned long flags; spin_lock_irqsave(&u->lock, flags); if (!tup->tx_in_progress) { unsigned long lsr = tegra_uart_read(tup, UART_LSR); if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS) ret = TIOCSER_TEMT; } spin_unlock_irqrestore(&u->lock, flags); return ret; } static void tegra_uart_stop_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); struct circ_buf *xmit = &tup->uport.state->xmit; struct dma_tx_state state; unsigned int count; if (tup->tx_in_progress != TEGRA_UART_TX_DMA) return; dmaengine_terminate_all(tup->tx_dma_chan); dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); tup->tx_in_progress = 0; } static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) { struct circ_buf *xmit = &tup->uport.state->xmit; tegra_uart_fill_tx_fifo(tup, tup->tx_bytes); tup->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); tegra_uart_start_next_tx(tup); } static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, struct tty_port *tty) { do { char flag = TTY_NORMAL; unsigned long lsr = 0; unsigned char ch; lsr = tegra_uart_read(tup, UART_LSR); if (!(lsr & UART_LSR_DR)) break; flag = tegra_uart_decode_rx_error(tup, lsr); if (flag != TTY_NORMAL) continue; ch = (unsigned char) tegra_uart_read(tup, UART_RX); tup->uport.icount.rx++; if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) tty_insert_flip_char(tty, ch, flag); if (tup->uport.ignore_status_mask & UART_LSR_DR) continue; } while (1); } static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, struct tty_port *tty, unsigned int count) { int copied; /* If count is zero, then there is no data to be copied */ if (!count) return; tup->uport.icount.rx += count; if (!tty) { dev_err(tup->uport.dev, "No tty port\n"); return; } if (tup->uport.ignore_status_mask & UART_LSR_DR) return; dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, count, DMA_FROM_DEVICE); copied = tty_insert_flip_string(tty, ((unsigned char *)(tup->rx_dma_buf_virt)), count); if (copied != count) { WARN_ON(1); dev_err(tup->uport.dev, "RxData copy to tty layer failed\n"); } dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys, count, DMA_TO_DEVICE); } static void do_handle_rx_pio(struct tegra_uart_port *tup) { struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); struct tty_port *port = &tup->uport.state->port; tegra_uart_handle_rx_pio(tup, port); if (tty) { tty_flip_buffer_push(port); tty_kref_put(tty); } } static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup, unsigned int residue) { struct tty_port *port = &tup->uport.state->port; unsigned int count; async_tx_ack(tup->rx_dma_desc); count = tup->rx_bytes_requested - residue; /* If we are here, DMA is stopped */ tegra_uart_copy_rx_to_tty(tup, port, count); do_handle_rx_pio(tup); } static void tegra_uart_rx_dma_complete(void *args) { struct tegra_uart_port *tup = args; struct uart_port *u = &tup->uport; unsigned long flags; struct dma_tx_state state; enum dma_status status; spin_lock_irqsave(&u->lock, flags); status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); if (status == DMA_IN_PROGRESS) { dev_dbg(tup->uport.dev, "RX DMA is in progress\n"); goto done; } /* Deactivate flow control to stop sender */ if (tup->rts_active) set_rts(tup, false); tup->rx_dma_active = false; tegra_uart_rx_buffer_push(tup, 0); tegra_uart_start_rx_dma(tup); /* Activate flow control to start transfer */ if (tup->rts_active) set_rts(tup, true); done: spin_unlock_irqrestore(&u->lock, flags); } static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) { struct dma_tx_state state; if (!tup->rx_dma_active) { do_handle_rx_pio(tup); return; } dmaengine_terminate_all(tup->rx_dma_chan); dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); tegra_uart_rx_buffer_push(tup, state.residue); tup->rx_dma_active = false; } static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup) { /* Deactivate flow control to stop sender */ if (tup->rts_active) set_rts(tup, false); tegra_uart_terminate_rx_dma(tup); if (tup->rts_active) set_rts(tup, true); } static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup) { unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE; if (tup->rx_dma_active) return 0; tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan, tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!tup->rx_dma_desc) { dev_err(tup->uport.dev, "Not able to get desc for Rx\n"); return -EIO; } tup->rx_dma_active = true; tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete; tup->rx_dma_desc->callback_param = tup; tup->rx_bytes_requested = count; tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc); dma_async_issue_pending(tup->rx_dma_chan); return 0; } static void tegra_uart_handle_modem_signal_change(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); unsigned long msr; msr = tegra_uart_read(tup, UART_MSR); if (!(msr & UART_MSR_ANY_DELTA)) return; if (msr & UART_MSR_TERI) tup->uport.icount.rng++; if (msr & UART_MSR_DDSR) tup->uport.icount.dsr++; /* We may only get DDCD when HW init and reset */ if (msr & UART_MSR_DDCD) uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD); /* Will start/stop_tx accordingly */ if (msr & UART_MSR_DCTS) uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS); } static irqreturn_t tegra_uart_isr(int irq, void *data) { struct tegra_uart_port *tup = data; struct uart_port *u = &tup->uport; unsigned long iir; unsigned long ier; bool is_rx_start = false; bool is_rx_int = false; unsigned long flags; spin_lock_irqsave(&u->lock, flags); while (1) { iir = tegra_uart_read(tup, UART_IIR); if (iir & UART_IIR_NO_INT) { if (!tup->use_rx_pio && is_rx_int) { tegra_uart_handle_rx_dma(tup); if (tup->rx_in_progress) { ier = tup->ier_shadow; ier |= (UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD | UART_IER_RDI); tup->ier_shadow = ier; tegra_uart_write(tup, ier, UART_IER); } } else if (is_rx_start) { tegra_uart_start_rx_dma(tup); } spin_unlock_irqrestore(&u->lock, flags); return IRQ_HANDLED; } switch ((iir >> 1) & 0x7) { case 0: /* Modem signal change interrupt */ tegra_uart_handle_modem_signal_change(u); break; case 1: /* Transmit interrupt only triggered when using PIO */ tup->ier_shadow &= ~UART_IER_THRI; tegra_uart_write(tup, tup->ier_shadow, UART_IER); tegra_uart_handle_tx_pio(tup); break; case 4: /* End of data */ case 6: /* Rx timeout */ if (!tup->use_rx_pio) { is_rx_int = tup->rx_in_progress; /* Disable Rx interrupts */ ier = tup->ier_shadow; ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD); tup->ier_shadow = ier; tegra_uart_write(tup, ier, UART_IER); break; } /* Fall through */ case 2: /* Receive */ if (!tup->use_rx_pio) { is_rx_start = tup->rx_in_progress; tup->ier_shadow &= ~UART_IER_RDI; tegra_uart_write(tup, tup->ier_shadow, UART_IER); } else { do_handle_rx_pio(tup); } break; case 3: /* Receive error */ tegra_uart_decode_rx_error(tup, tegra_uart_read(tup, UART_LSR)); break; case 5: /* break nothing to handle */ case 7: /* break nothing to handle */ break; } } } static void tegra_uart_stop_rx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); struct tty_port *port = &tup->uport.state->port; unsigned long ier; if (tup->rts_active) set_rts(tup, false); if (!tup->rx_in_progress) return; tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */ ier = tup->ier_shadow; ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD); tup->ier_shadow = ier; tegra_uart_write(tup, ier, UART_IER); tup->rx_in_progress = 0; if (!tup->use_rx_pio) tegra_uart_terminate_rx_dma(tup); else tegra_uart_handle_rx_pio(tup, port); } static void tegra_uart_hw_deinit(struct tegra_uart_port *tup) { unsigned long flags; unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud); unsigned long fifo_empty_time = tup->uport.fifosize * char_time; unsigned long wait_time; unsigned long lsr; unsigned long msr; unsigned long mcr; /* Disable interrupts */ tegra_uart_write(tup, 0, UART_IER); lsr = tegra_uart_read(tup, UART_LSR); if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) { msr = tegra_uart_read(tup, UART_MSR); mcr = tegra_uart_read(tup, UART_MCR); if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS)) dev_err(tup->uport.dev, "Tx Fifo not empty, CTS disabled, waiting\n"); /* Wait for Tx fifo to be empty */ while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) { wait_time = min(fifo_empty_time, 100lu); udelay(wait_time); fifo_empty_time -= wait_time; if (!fifo_empty_time) { msr = tegra_uart_read(tup, UART_MSR); mcr = tegra_uart_read(tup, UART_MCR); if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS)) dev_err(tup->uport.dev, "Slave not ready\n"); break; } lsr = tegra_uart_read(tup, UART_LSR); } } spin_lock_irqsave(&tup->uport.lock, flags); /* Reset the Rx and Tx FIFOs */ tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); tup->current_baud = 0; spin_unlock_irqrestore(&tup->uport.lock, flags); tup->rx_in_progress = 0; tup->tx_in_progress = 0; if (!tup->use_rx_pio) tegra_uart_dma_channel_free(tup, true); if (!tup->use_tx_pio) tegra_uart_dma_channel_free(tup, false); clk_disable_unprepare(tup->uart_clk); } static int tegra_uart_hw_init(struct tegra_uart_port *tup) { int ret; tup->fcr_shadow = 0; tup->mcr_shadow = 0; tup->lcr_shadow = 0; tup->ier_shadow = 0; tup->current_baud = 0; clk_prepare_enable(tup->uart_clk); /* Reset the UART controller to clear all previous status.*/ reset_control_assert(tup->rst); udelay(10); reset_control_deassert(tup->rst); tup->rx_in_progress = 0; tup->tx_in_progress = 0; /* * Set the trigger level * * For PIO mode: * * For receive, this will interrupt the CPU after that many number of * bytes are received, for the remaining bytes the receive timeout * interrupt is received. Rx high watermark is set to 4. * * For transmit, if the trasnmit interrupt is enabled, this will * interrupt the CPU when the number of entries in the FIFO reaches the * low watermark. Tx low watermark is set to 16 bytes. * * For DMA mode: * * Set the Tx trigger to 16. This should match the DMA burst size that * programmed in the DMA registers. */ tup->fcr_shadow = UART_FCR_ENABLE_FIFO; if (tup->use_rx_pio) { tup->fcr_shadow |= UART_FCR_R_TRIG_11; } else { if (tup->cdata->max_dma_burst_bytes == 8) tup->fcr_shadow |= UART_FCR_R_TRIG_10; else tup->fcr_shadow |= UART_FCR_R_TRIG_01; } tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B; tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); /* Dummy read to ensure the write is posted */ tegra_uart_read(tup, UART_SCR); if (tup->cdata->fifo_mode_enable_status) { ret = tegra_uart_wait_fifo_mode_enabled(tup); dev_err(tup->uport.dev, "FIFO mode not enabled\n"); if (ret < 0) return ret; } else { /* * For all tegra devices (up to t210), there is a hardware * issue that requires software to wait for 3 UART clock * periods after enabling the TX fifo, otherwise data could * be lost. */ tegra_uart_wait_cycle_time(tup, 3); } /* * Initialize the UART with default configuration * (115200, N, 8, 1) so that the receive DMA buffer may be * enqueued */ ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD); if (ret < 0) { dev_err(tup->uport.dev, "Failed to set baud rate\n"); return ret; } if (!tup->use_rx_pio) { tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR; tup->fcr_shadow |= UART_FCR_DMA_SELECT; tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); } else { tegra_uart_write(tup, tup->fcr_shadow, UART_FCR); } tup->rx_in_progress = 1; /* * Enable IE_RXS for the receive status interrupts like line errros. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd. * * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when * the DATA is sitting in the FIFO and couldn't be transferred to the * DMA as the DMA size alignment (4 bytes) is not met. EORD will be * triggered when there is a pause of the incomming data stream for 4 * characters long. * * For pauses in the data which is not aligned to 4 bytes, we get * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first * then the EORD. */ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI; /* * If using DMA mode, enable EORD interrupt to notify about RX * completion. */ if (!tup->use_rx_pio) tup->ier_shadow |= TEGRA_UART_IER_EORD; tegra_uart_write(tup, tup->ier_shadow, UART_IER); return 0; } static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup, bool dma_to_memory) { if (dma_to_memory) { dmaengine_terminate_all(tup->rx_dma_chan); dma_release_channel(tup->rx_dma_chan); dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE, tup->rx_dma_buf_virt, tup->rx_dma_buf_phys); tup->rx_dma_chan = NULL; tup->rx_dma_buf_phys = 0; tup->rx_dma_buf_virt = NULL; } else { dmaengine_terminate_all(tup->tx_dma_chan); dma_release_channel(tup->tx_dma_chan); dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys, UART_XMIT_SIZE, DMA_TO_DEVICE); tup->tx_dma_chan = NULL; tup->tx_dma_buf_phys = 0; tup->tx_dma_buf_virt = NULL; } } static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup, bool dma_to_memory) { struct dma_chan *dma_chan; unsigned char *dma_buf; dma_addr_t dma_phys; int ret; struct dma_slave_config dma_sconfig; dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx"); if (IS_ERR(dma_chan)) { ret = PTR_ERR(dma_chan); dev_err(tup->uport.dev, "DMA channel alloc failed: %d\n", ret); return ret; } if (dma_to_memory) { dma_buf = dma_alloc_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE, &dma_phys, GFP_KERNEL); if (!dma_buf) { dev_err(tup->uport.dev, "Not able to allocate the dma buffer\n"); dma_release_channel(dma_chan); return -ENOMEM; } dma_sync_single_for_device(tup->uport.dev, dma_phys, TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE); dma_sconfig.src_addr = tup->uport.mapbase; dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes; tup->rx_dma_chan = dma_chan; tup->rx_dma_buf_virt = dma_buf; tup->rx_dma_buf_phys = dma_phys; } else { dma_phys = dma_map_single(tup->uport.dev, tup->uport.state->xmit.buf, UART_XMIT_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(tup->uport.dev, dma_phys)) { dev_err(tup->uport.dev, "dma_map_single tx failed\n"); dma_release_channel(dma_chan); return -ENOMEM; } dma_buf = tup->uport.state->xmit.buf; dma_sconfig.dst_addr = tup->uport.mapbase; dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.dst_maxburst = 16; tup->tx_dma_chan = dma_chan; tup->tx_dma_buf_virt = dma_buf; tup->tx_dma_buf_phys = dma_phys; } ret = dmaengine_slave_config(dma_chan, &dma_sconfig); if (ret < 0) { dev_err(tup->uport.dev, "Dma slave config failed, err = %d\n", ret); tegra_uart_dma_channel_free(tup, dma_to_memory); return ret; } return 0; } static int tegra_uart_startup(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); int ret; if (!tup->use_tx_pio) { ret = tegra_uart_dma_channel_allocate(tup, false); if (ret < 0) { dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret); return ret; } } if (!tup->use_rx_pio) { ret = tegra_uart_dma_channel_allocate(tup, true); if (ret < 0) { dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret); goto fail_rx_dma; } } ret = tegra_uart_hw_init(tup); if (ret < 0) { dev_err(u->dev, "Uart HW init failed, err = %d\n", ret); goto fail_hw_init; } ret = request_irq(u->irq, tegra_uart_isr, 0, dev_name(u->dev), tup); if (ret < 0) { dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq); goto fail_hw_init; } return 0; fail_hw_init: if (!tup->use_rx_pio) tegra_uart_dma_channel_free(tup, true); fail_rx_dma: if (!tup->use_tx_pio) tegra_uart_dma_channel_free(tup, false); return ret; } /* * Flush any TX data submitted for DMA and PIO. Called when the * TX circular buffer is reset. */ static void tegra_uart_flush_buffer(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); tup->tx_bytes = 0; if (tup->tx_dma_chan) dmaengine_terminate_all(tup->tx_dma_chan); } static void tegra_uart_shutdown(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); tegra_uart_hw_deinit(tup); free_irq(u->irq, tup); } static void tegra_uart_enable_ms(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); if (tup->enable_modem_interrupt) { tup->ier_shadow |= UART_IER_MSI; tegra_uart_write(tup, tup->ier_shadow, UART_IER); } } static void tegra_uart_set_termios(struct uart_port *u, struct ktermios *termios, struct ktermios *oldtermios) { struct tegra_uart_port *tup = to_tegra_uport(u); unsigned int baud; unsigned long flags; unsigned int lcr; int symb_bit = 1; struct clk *parent_clk = clk_get_parent(tup->uart_clk); unsigned long parent_clk_rate = clk_get_rate(parent_clk); int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF; int ret; max_divider *= 16; spin_lock_irqsave(&u->lock, flags); /* Changing configuration, it is safe to stop any rx now */ if (tup->rts_active) set_rts(tup, false); /* Clear all interrupts as configuration is going to be changed */ tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER); tegra_uart_read(tup, UART_IER); tegra_uart_write(tup, 0, UART_IER); tegra_uart_read(tup, UART_IER); /* Parity */ lcr = tup->lcr_shadow; lcr &= ~UART_LCR_PARITY; /* CMSPAR isn't supported by this driver */ termios->c_cflag &= ~CMSPAR; if ((termios->c_cflag & PARENB) == PARENB) { symb_bit++; if (termios->c_cflag & PARODD) { lcr |= UART_LCR_PARITY; lcr &= ~UART_LCR_EPAR; lcr &= ~UART_LCR_SPAR; } else { lcr |= UART_LCR_PARITY; lcr |= UART_LCR_EPAR; lcr &= ~UART_LCR_SPAR; } } lcr &= ~UART_LCR_WLEN8; switch (termios->c_cflag & CSIZE) { case CS5: lcr |= UART_LCR_WLEN5; symb_bit += 5; break; case CS6: lcr |= UART_LCR_WLEN6; symb_bit += 6; break; case CS7: lcr |= UART_LCR_WLEN7; symb_bit += 7; break; default: lcr |= UART_LCR_WLEN8; symb_bit += 8; break; } /* Stop bits */ if (termios->c_cflag & CSTOPB) { lcr |= UART_LCR_STOP; symb_bit += 2; } else { lcr &= ~UART_LCR_STOP; symb_bit++; } tegra_uart_write(tup, lcr, UART_LCR); tup->lcr_shadow = lcr; tup->symb_bit = symb_bit; /* Baud rate. */ baud = uart_get_baud_rate(u, termios, oldtermios, parent_clk_rate/max_divider, parent_clk_rate/16); spin_unlock_irqrestore(&u->lock, flags); ret = tegra_set_baudrate(tup, baud); if (ret < 0) { dev_err(tup->uport.dev, "Failed to set baud rate\n"); return; } if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); spin_lock_irqsave(&u->lock, flags); /* Flow control */ if (termios->c_cflag & CRTSCTS) { tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN; tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN; tegra_uart_write(tup, tup->mcr_shadow, UART_MCR); /* if top layer has asked to set rts active then do so here */ if (tup->rts_active) set_rts(tup, true); } else { tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN; tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN; tegra_uart_write(tup, tup->mcr_shadow, UART_MCR); } /* update the port timeout based on new settings */ uart_update_timeout(u, termios->c_cflag, baud); /* Make sure all writes have completed */ tegra_uart_read(tup, UART_IER); /* Re-enable interrupt */ tegra_uart_write(tup, tup->ier_shadow, UART_IER); tegra_uart_read(tup, UART_IER); tup->uport.ignore_status_mask = 0; /* Ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) tup->uport.ignore_status_mask |= UART_LSR_DR; if (termios->c_iflag & IGNBRK) tup->uport.ignore_status_mask |= UART_LSR_BI; spin_unlock_irqrestore(&u->lock, flags); } static const char *tegra_uart_type(struct uart_port *u) { return TEGRA_UART_TYPE; } static const struct uart_ops tegra_uart_ops = { .tx_empty = tegra_uart_tx_empty, .set_mctrl = tegra_uart_set_mctrl, .get_mctrl = tegra_uart_get_mctrl, .stop_tx = tegra_uart_stop_tx, .start_tx = tegra_uart_start_tx, .stop_rx = tegra_uart_stop_rx, .flush_buffer = tegra_uart_flush_buffer, .enable_ms = tegra_uart_enable_ms, .break_ctl = tegra_uart_break_ctl, .startup = tegra_uart_startup, .shutdown = tegra_uart_shutdown, .set_termios = tegra_uart_set_termios, .type = tegra_uart_type, .request_port = tegra_uart_request_port, .release_port = tegra_uart_release_port, }; static struct uart_driver tegra_uart_driver = { .owner = THIS_MODULE, .driver_name = "tegra_hsuart", .dev_name = "ttyTHS", .cons = NULL, .nr = TEGRA_UART_MAXIMUM, }; static int tegra_uart_parse_dt(struct platform_device *pdev, struct tegra_uart_port *tup) { struct device_node *np = pdev->dev.of_node; int port; int ret; int index; u32 pval; int count; int n_entries; port = of_alias_get_id(np, "serial"); if (port < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port); return port; } tup->uport.line = port; tup->enable_modem_interrupt = of_property_read_bool(np, "nvidia,enable-modem-interrupt"); index = of_property_match_string(np, "dma-names", "rx"); if (index < 0) { tup->use_rx_pio = true; dev_info(&pdev->dev, "RX in PIO mode\n"); } index = of_property_match_string(np, "dma-names", "tx"); if (index < 0) { tup->use_tx_pio = true; dev_info(&pdev->dev, "TX in PIO mode\n"); } n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates"); if (n_entries > 0) { tup->n_adjustable_baud_rates = n_entries / 3; tup->baud_tolerance = devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) * sizeof(*tup->baud_tolerance), GFP_KERNEL); if (!tup->baud_tolerance) return -ENOMEM; for (count = 0, index = 0; count < n_entries; count += 3, index++) { ret = of_property_read_u32_index(np, "nvidia,adjust-baud-rates", count, &pval); if (!ret) tup->baud_tolerance[index].lower_range_baud = pval; ret = of_property_read_u32_index(np, "nvidia,adjust-baud-rates", count + 1, &pval); if (!ret) tup->baud_tolerance[index].upper_range_baud = pval; ret = of_property_read_u32_index(np, "nvidia,adjust-baud-rates", count + 2, &pval); if (!ret) tup->baud_tolerance[index].tolerance = (s32)pval; } } else { tup->n_adjustable_baud_rates = 0; } return 0; } static struct tegra_uart_chip_data tegra20_uart_chip_data = { .tx_fifo_full_status = false, .allow_txfifo_reset_fifo_mode = true, .support_clk_src_div = false, .fifo_mode_enable_status = false, .uart_max_port = 5, .max_dma_burst_bytes = 4, .error_tolerance_low_range = 0, .error_tolerance_high_range = 4, }; static struct tegra_uart_chip_data tegra30_uart_chip_data = { .tx_fifo_full_status = true, .allow_txfifo_reset_fifo_mode = false, .support_clk_src_div = true, .fifo_mode_enable_status = false, .uart_max_port = 5, .max_dma_burst_bytes = 4, .error_tolerance_low_range = 0, .error_tolerance_high_range = 4, }; static struct tegra_uart_chip_data tegra186_uart_chip_data = { .tx_fifo_full_status = true, .allow_txfifo_reset_fifo_mode = false, .support_clk_src_div = true, .fifo_mode_enable_status = true, .uart_max_port = 8, .max_dma_burst_bytes = 8, .error_tolerance_low_range = 0, .error_tolerance_high_range = 4, }; static struct tegra_uart_chip_data tegra194_uart_chip_data = { .tx_fifo_full_status = true, .allow_txfifo_reset_fifo_mode = false, .support_clk_src_div = true, .fifo_mode_enable_status = true, .uart_max_port = 8, .max_dma_burst_bytes = 8, .error_tolerance_low_range = -2, .error_tolerance_high_range = 2, }; static const struct of_device_id tegra_uart_of_match[] = { { .compatible = "nvidia,tegra30-hsuart", .data = &tegra30_uart_chip_data, }, { .compatible = "nvidia,tegra20-hsuart", .data = &tegra20_uart_chip_data, }, { .compatible = "nvidia,tegra186-hsuart", .data = &tegra186_uart_chip_data, }, { .compatible = "nvidia,tegra194-hsuart", .data = &tegra194_uart_chip_data, }, { }, }; MODULE_DEVICE_TABLE(of, tegra_uart_of_match); static int tegra_uart_probe(struct platform_device *pdev) { struct tegra_uart_port *tup; struct uart_port *u; struct resource *resource; int ret; const struct tegra_uart_chip_data *cdata; const struct of_device_id *match; match = of_match_device(tegra_uart_of_match, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } cdata = match->data; tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL); if (!tup) { dev_err(&pdev->dev, "Failed to allocate memory for tup\n"); return -ENOMEM; } ret = tegra_uart_parse_dt(pdev, tup); if (ret < 0) return ret; u = &tup->uport; u->dev = &pdev->dev; u->ops = &tegra_uart_ops; u->type = PORT_TEGRA; u->fifosize = 32; tup->cdata = cdata; platform_set_drvdata(pdev, tup); resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { dev_err(&pdev->dev, "No IO memory resource\n"); return -ENODEV; } u->mapbase = resource->start; u->membase = devm_ioremap_resource(&pdev->dev, resource); if (IS_ERR(u->membase)) return PTR_ERR(u->membase); tup->uart_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(tup->uart_clk)) { dev_err(&pdev->dev, "Couldn't get the clock\n"); return PTR_ERR(tup->uart_clk); } tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial"); if (IS_ERR(tup->rst)) { dev_err(&pdev->dev, "Couldn't get the reset\n"); return PTR_ERR(tup->rst); } u->iotype = UPIO_MEM32; ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; u->irq = ret; u->regshift = 2; ret = uart_add_one_port(&tegra_uart_driver, u); if (ret < 0) { dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret); return ret; } return ret; } static int tegra_uart_remove(struct platform_device *pdev) { struct tegra_uart_port *tup = platform_get_drvdata(pdev); struct uart_port *u = &tup->uport; uart_remove_one_port(&tegra_uart_driver, u); return 0; } #ifdef CONFIG_PM_SLEEP static int tegra_uart_suspend(struct device *dev) { struct tegra_uart_port *tup = dev_get_drvdata(dev); struct uart_port *u = &tup->uport; return uart_suspend_port(&tegra_uart_driver, u); } static int tegra_uart_resume(struct device *dev) { struct tegra_uart_port *tup = dev_get_drvdata(dev); struct uart_port *u = &tup->uport; return uart_resume_port(&tegra_uart_driver, u); } #endif static const struct dev_pm_ops tegra_uart_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume) }; static struct platform_driver tegra_uart_platform_driver = { .probe = tegra_uart_probe, .remove = tegra_uart_remove, .driver = { .name = "serial-tegra", .of_match_table = tegra_uart_of_match, .pm = &tegra_uart_pm_ops, }, }; static int __init tegra_uart_init(void) { int ret; struct device_node *node; const struct of_device_id *match = NULL; const struct tegra_uart_chip_data *cdata = NULL; node = of_find_matching_node(NULL, tegra_uart_of_match); if (node) match = of_match_node(tegra_uart_of_match, node); if (match) cdata = match->data; if (cdata) tegra_uart_driver.nr = cdata->uart_max_port; ret = uart_register_driver(&tegra_uart_driver); if (ret < 0) { pr_err("Could not register %s driver\n", tegra_uart_driver.driver_name); return ret; } ret = platform_driver_register(&tegra_uart_platform_driver); if (ret < 0) { pr_err("Uart platform driver register failed, e = %d\n", ret); uart_unregister_driver(&tegra_uart_driver); return ret; } return 0; } static void __exit tegra_uart_exit(void) { pr_info("Unloading tegra uart driver\n"); platform_driver_unregister(&tegra_uart_platform_driver); uart_unregister_driver(&tegra_uart_driver); } module_init(tegra_uart_init); module_exit(tegra_uart_exit); MODULE_ALIAS("platform:serial-tegra"); MODULE_DESCRIPTION("High speed UART driver for tegra chipset"); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
steev/luna-kernel
drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
309
5957
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include <linux/vmalloc.h> #include <linux/module.h> #include "rtl_btc.h" #include "halbt_precomp.h" struct rtl_btc_ops rtl_btc_operation ={ .btc_init_variables = rtl_btc_init_variables, .btc_init_hal_vars = rtl_btc_init_hal_vars, .btc_init_hw_config = rtl_btc_init_hw_config, .btc_ips_notify = rtl_btc_ips_notify, .btc_scan_notify = rtl_btc_scan_notify, .btc_connect_notify = rtl_btc_connect_notify, .btc_mediastatus_notify = rtl_btc_mediastatus_notify, .btc_periodical = rtl_btc_periodical, .btc_halt_notify = rtl_btc_halt_notify, .btc_btinfo_notify = rtl_btc_btinfo_notify, .btc_is_limited_dig = rtl_btc_is_limited_dig, .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, .btc_is_bt_disabled = rtl_btc_is_bt_disabled, }; void rtl_btc_init_variables(struct rtl_priv *rtlpriv) { exhalbtc_initlize_variables(rtlpriv); } void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) { u8 ant_num; u8 bt_exist; u8 bt_type; ant_num = rtl_get_hwpg_ant_num(rtlpriv); RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, antNum is %d\n", __func__, ant_num)); bt_exist = rtl_get_hwpg_bt_exist(rtlpriv); RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_exist is %d\n", __func__, bt_exist)); exhalbtc_set_bt_exist(bt_exist); bt_type = rtl_get_hwpg_bt_type(rtlpriv); RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_type is %d\n", __func__, bt_type)); exhalbtc_set_chip_type(bt_type); exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num); } void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) { exhalbtc_init_hw_config(&gl_bt_coexist); exhalbtc_init_coex_dm(&gl_bt_coexist); } void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type) { exhalbtc_ips_notify(&gl_bt_coexist, type); } void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype) { exhalbtc_scan_notify(&gl_bt_coexist, scantype); } void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action) { exhalbtc_connect_notify(&gl_bt_coexist, action); } void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus) { exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus); } void rtl_btc_periodical(struct rtl_priv *rtlpriv) { // rtl_bt_dm_monitor(); exhalbtc_periodical(&gl_bt_coexist); } void rtl_btc_halt_notify(void) { exhalbtc_halt_notify(&gl_bt_coexist); } void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmp_buf, u8 length) { exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); } bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) { return gl_bt_coexist.bt_info.limited_dig; } bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv) { bool bt_change_edca = false; u32 cur_edca_val; u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b; u32 edca_hs; u32 edca_addr = 0x504; cur_edca_val = rtl_read_dword(rtlpriv, edca_addr); if (halbtc_is_wifi_uplink(rtlpriv)){ if (cur_edca_val != edca_bt_hs_uplink){ edca_hs = edca_bt_hs_uplink; bt_change_edca = true; } }else{ if (cur_edca_val != edca_bt_hs_downlink){ edca_hs = edca_bt_hs_downlink; bt_change_edca = true; } } if(bt_change_edca) rtl_write_dword(rtlpriv, edca_addr, edca_hs); return true; } bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv) { if (gl_bt_coexist.bt_info.bt_disabled) return true; else return false; } struct rtl_btc_ops *rtl_btc_get_ops_pointer(void) { return &rtl_btc_operation; } //EXPORT_SYMBOL(rtl_btc_get_ops_pointer); u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { u8 num; if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) num = 2; else num = 1; return num; } #if 0 enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); enum rt_media_status m_status = RT_MEDIA_DISCONNECT; u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) { m_status = RT_MEDIA_CONNECT; } return m_status; } #endif u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv) { return rtlpriv->btcoexist.btc_info.btcoexist; } u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) { return rtlpriv->btcoexist.btc_info.bt_type; } #if 0 MODULE_AUTHOR("Page He <page_he@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); static int __init rtl_btcoexist_module_init(void) { //printk("%s, rtlpriv->btc_ops.btc_init_variables addr is %p\n", __func__, rtlpriv->btc_ops.btc_init_variables); return 0; } static void __exit rtl_btcoexist_module_exit(void) { return; } module_init(rtl_btcoexist_module_init); module_exit(rtl_btcoexist_module_exit); #endif
gpl-2.0
rugalmop/mop-5.1.0-trinity
dep/acelite/ace/Atomic_Op_T.cpp
565
2122
// $Id: Atomic_Op_T.cpp 92052 2010-09-27 14:20:22Z vzykov $ #ifndef ACE_ATOMIC_OP_T_CPP #define ACE_ATOMIC_OP_T_CPP #include "ace/Atomic_Op_T.h" #ifdef ACE_HAS_DUMP # include "ace/Log_Msg.h" #endif /* ACE_HAS_DUMP */ #if !defined (ACE_LACKS_PRAGMA_ONCE) # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ #if !defined (__ACE_INLINE__) #include "ace/Atomic_Op_T.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_ALLOC_HOOK_DEFINE(ACE_Atomic_Op_Ex) ACE_ALLOC_HOOK_DEFINE(ACE_Atomic_Op) // ************************************************* template <class ACE_LOCK, class TYPE> ACE_LOCK & ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::mutex (void) { // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::mutex"); return this->mutex_; } template <class ACE_LOCK, class TYPE> void ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::dump (void) const { #if defined (ACE_HAS_DUMP) // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); this->mutex_.dump (); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } template <class ACE_LOCK, class TYPE> ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex (ACE_LOCK & mtx) : mutex_ (mtx) , value_ (0) { // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex"); } template <class ACE_LOCK, class TYPE> ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex ( ACE_LOCK & mtx, typename ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::arg_type c) : mutex_ (mtx) , value_ (c) { // ACE_TRACE ("ACE_Atomic_Op_Ex<ACE_LOCK, TYPE>::ACE_Atomic_Op_Ex"); } // **************************************************************** template <class ACE_LOCK, class TYPE> ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op (void) : impl_ (this->own_mutex_) { // ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op"); } template <class ACE_LOCK, class TYPE> ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op ( typename ACE_Atomic_Op<ACE_LOCK, TYPE>::arg_type c) : impl_ (own_mutex_, c) { // ACE_TRACE ("ACE_Atomic_Op<ACE_LOCK, TYPE>::ACE_Atomic_Op"); } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_ATOMIC_OP_T_CPP */
gpl-2.0
Abhinav1997/kernel_sony_msm8930
drivers/staging/prima/CORE/SYS/legacy/src/pal/src/palTimer.c
821
7442
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /** Copyright (C) 2006 Airgo Networks, Incorporated This file contains function implementations for the Platform Abstration Layer. */ #include <halTypes.h> #include <palTimer.h> #include <vos_timer.h> #include <vos_memory.h> typedef struct sPalTimer { palTimerCallback timerCallback; void *pContext; tHddHandle hHdd; // not really needed when mapping to vos timers tANI_U32 uTimerInterval; //meaningful only is fRestart is true tANI_BOOLEAN fRestart; vos_timer_t vosTimer; } tPalTimer, *tpPalTimer; v_VOID_t internalTimerCallback( v_PVOID_t userData ) { tPalTimer *pPalTimer = (tPalTimer *)userData; if ( pPalTimer ) { if ( pPalTimer->timerCallback ) { pPalTimer->timerCallback( pPalTimer->pContext ); } if ( pPalTimer->fRestart ) { palTimerStart( pPalTimer->hHdd, pPalTimer, pPalTimer->uTimerInterval, eANI_BOOLEAN_TRUE ); } } } #ifdef TIMER_MANAGER eHalStatus palTimerAlloc_debug( tHddHandle hHdd, tPalTimerHandle *phPalTimer, palTimerCallback pCallback, void *pContext, char* fileName, v_U32_t lineNum ) { eHalStatus halStatus = eHAL_STATUS_FAILURE; tPalTimer *pPalTimer = NULL; VOS_STATUS vosStatus; do { // allocate the internal timer structure. pPalTimer = vos_mem_malloc( sizeof( tPalTimer ) ); if ( NULL == pPalTimer ) break; // initialize the vos Timer that underlies the pal Timer. vosStatus = vos_timer_init_debug( &pPalTimer->vosTimer, VOS_TIMER_TYPE_SW, internalTimerCallback, pPalTimer, fileName, lineNum ); if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { // if fail to init the vos timer, free the memory and bail out. vos_mem_free( pPalTimer ); break; } // initialize the info in the internal palTimer struct so we can pPalTimer->timerCallback = pCallback; pPalTimer->pContext = pContext; pPalTimer->hHdd = hHdd; // return a 'handle' to the caller. *phPalTimer = pPalTimer; halStatus = eHAL_STATUS_SUCCESS; } while( 0 ); return( halStatus ); } #else eHalStatus palTimerAlloc( tHddHandle hHdd, tPalTimerHandle *phPalTimer, palTimerCallback pCallback, void *pContext ) { eHalStatus halStatus = eHAL_STATUS_FAILURE; tPalTimer *pPalTimer = NULL; VOS_STATUS vosStatus; do { // allocate the internal timer structure. pPalTimer = vos_mem_malloc( sizeof( tPalTimer ) ); if ( NULL == pPalTimer ) break; // initialize the vos Timer that underlies the pal Timer. vosStatus = vos_timer_init( &pPalTimer->vosTimer, VOS_TIMER_TYPE_SW, internalTimerCallback, pPalTimer ); if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { // if fail to init the vos timer, free the memory and bail out. vos_mem_free( pPalTimer ); break; } // initialize the info in the internal palTimer struct so we can pPalTimer->timerCallback = pCallback; pPalTimer->pContext = pContext; pPalTimer->hHdd = hHdd; // return a 'handle' to the caller. *phPalTimer = pPalTimer; halStatus = eHAL_STATUS_SUCCESS; } while( 0 ); return( halStatus ); } #endif eHalStatus palTimerFree( tHddHandle hHdd, tPalTimerHandle hPalTimer ) { eHalStatus status = eHAL_STATUS_INVALID_PARAMETER; VOS_STATUS vosStatus; tPalTimer *pPalTimer = (tPalTimer *)hPalTimer; do { if ( NULL == pPalTimer ) break; // Destroy the vos timer... vosStatus = vos_timer_destroy( &pPalTimer->vosTimer ); if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) break; // Free the memory for the intrnal timer struct... vos_mem_free( pPalTimer ); status = eHAL_STATUS_SUCCESS; } while( 0 ); return( status ); } eHalStatus palTimerStart(tHddHandle hHdd, tPalTimerHandle hPalTimer, tANI_U32 uExpireTime, tANI_BOOLEAN fRestart) { eHalStatus status = eHAL_STATUS_INVALID_PARAMETER; VOS_STATUS vosStatus; tANI_U32 expireTimeInMS = 0; tPalTimer *pPalTimer = (tPalTimer *)hPalTimer; do { if ( NULL == pPalTimer ) break; pPalTimer->fRestart = fRestart; pPalTimer->uTimerInterval = uExpireTime; // vos Timer takes expiration time in milliseconds. palTimerStart and // the uTimerIntervl in tPalTimer struct have expiration tiem in // microseconds. Make and adjustment from microseconds to milliseconds // before calling the vos_timer_start(). expireTimeInMS = uExpireTime / 1000; vosStatus = vos_timer_start( &pPalTimer->vosTimer, expireTimeInMS ); if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { status = eHAL_STATUS_FAILURE; break; } status = eHAL_STATUS_SUCCESS; } while( 0 ); return( status ); } eHalStatus palTimerStop(tHddHandle hHdd, tPalTimerHandle hPalTimer) { eHalStatus status = eHAL_STATUS_INVALID_PARAMETER; tPalTimer *pPalTimer = (tPalTimer *)hPalTimer; do { if ( NULL == pPalTimer ) break; vos_timer_stop( &pPalTimer->vosTimer ); // make sure the timer is not re-started. pPalTimer->fRestart = eANI_BOOLEAN_FALSE; status = eHAL_STATUS_SUCCESS; } while( 0 ); return( status ); }
gpl-2.0
akalongman/linux
sound/usb/line6/variax.c
1077
7917
/* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/module.h> #include <sound/core.h> #include "driver.h" #define VARIAX_STARTUP_DELAY1 1000 #define VARIAX_STARTUP_DELAY3 100 #define VARIAX_STARTUP_DELAY4 100 /* Stages of Variax startup procedure */ enum { VARIAX_STARTUP_INIT = 1, VARIAX_STARTUP_VERSIONREQ, VARIAX_STARTUP_WAIT, VARIAX_STARTUP_ACTIVATE, VARIAX_STARTUP_WORKQUEUE, VARIAX_STARTUP_SETUP, VARIAX_STARTUP_LAST = VARIAX_STARTUP_SETUP - 1 }; enum { LINE6_PODXTLIVE_VARIAX, LINE6_VARIAX }; struct usb_line6_variax { /* Generic Line 6 USB data */ struct usb_line6 line6; /* Buffer for activation code */ unsigned char *buffer_activate; /* Handler for device initialization */ struct work_struct startup_work; /* Timers for device initialization */ struct timer_list startup_timer1; struct timer_list startup_timer2; /* Current progress in startup procedure */ int startup_progress; }; #define VARIAX_OFFSET_ACTIVATE 7 /* This message is sent by the device during initialization and identifies the connected guitar version. */ static const char variax_init_version[] = { 0xf0, 0x7e, 0x7f, 0x06, 0x02, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x00, 0x00 }; /* This message is the last one sent by the device during initialization. */ static const char variax_init_done[] = { 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x6b }; static const char variax_activate[] = { 0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x2a, 0x01, 0xf7 }; /* forward declarations: */ static void variax_startup2(unsigned long data); static void variax_startup4(unsigned long data); static void variax_startup5(unsigned long data); static void variax_activate_async(struct usb_line6_variax *variax, int a) { variax->buffer_activate[VARIAX_OFFSET_ACTIVATE] = a; line6_send_raw_message_async(&variax->line6, variax->buffer_activate, sizeof(variax_activate)); } /* Variax startup procedure. This is a sequence of functions with special requirements (e.g., must not run immediately after initialization, must not run in interrupt context). After the last one has finished, the device is ready to use. */ static void variax_startup1(struct usb_line6_variax *variax) { CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_INIT); /* delay startup procedure: */ line6_start_timer(&variax->startup_timer1, VARIAX_STARTUP_DELAY1, variax_startup2, (unsigned long)variax); } static void variax_startup2(unsigned long data) { struct usb_line6_variax *variax = (struct usb_line6_variax *)data; struct usb_line6 *line6 = &variax->line6; /* schedule another startup procedure until startup is complete: */ if (variax->startup_progress >= VARIAX_STARTUP_LAST) return; variax->startup_progress = VARIAX_STARTUP_VERSIONREQ; line6_start_timer(&variax->startup_timer1, VARIAX_STARTUP_DELAY1, variax_startup2, (unsigned long)variax); /* request firmware version: */ line6_version_request_async(line6); } static void variax_startup3(struct usb_line6_variax *variax) { CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_WAIT); /* delay startup procedure: */ line6_start_timer(&variax->startup_timer2, VARIAX_STARTUP_DELAY3, variax_startup4, (unsigned long)variax); } static void variax_startup4(unsigned long data) { struct usb_line6_variax *variax = (struct usb_line6_variax *)data; CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_ACTIVATE); /* activate device: */ variax_activate_async(variax, 1); line6_start_timer(&variax->startup_timer2, VARIAX_STARTUP_DELAY4, variax_startup5, (unsigned long)variax); } static void variax_startup5(unsigned long data) { struct usb_line6_variax *variax = (struct usb_line6_variax *)data; CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_WORKQUEUE); /* schedule work for global work queue: */ schedule_work(&variax->startup_work); } static void variax_startup6(struct work_struct *work) { struct usb_line6_variax *variax = container_of(work, struct usb_line6_variax, startup_work); CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_SETUP); /* ALSA audio interface: */ snd_card_register(variax->line6.card); } /* Process a completely received message. */ static void line6_variax_process_message(struct usb_line6 *line6) { struct usb_line6_variax *variax = (struct usb_line6_variax *) line6; const unsigned char *buf = variax->line6.buffer_message; switch (buf[0]) { case LINE6_RESET: dev_info(variax->line6.ifcdev, "VARIAX reset\n"); break; case LINE6_SYSEX_BEGIN: if (memcmp(buf + 1, variax_init_version + 1, sizeof(variax_init_version) - 1) == 0) { variax_startup3(variax); } else if (memcmp(buf + 1, variax_init_done + 1, sizeof(variax_init_done) - 1) == 0) { /* notify of complete initialization: */ variax_startup4((unsigned long)variax); } break; } } /* Variax destructor. */ static void line6_variax_disconnect(struct usb_line6 *line6) { struct usb_line6_variax *variax = (struct usb_line6_variax *)line6; del_timer(&variax->startup_timer1); del_timer(&variax->startup_timer2); cancel_work_sync(&variax->startup_work); kfree(variax->buffer_activate); } /* Try to init workbench device. */ static int variax_init(struct usb_line6 *line6, const struct usb_device_id *id) { struct usb_line6_variax *variax = (struct usb_line6_variax *) line6; int err; line6->process_message = line6_variax_process_message; line6->disconnect = line6_variax_disconnect; init_timer(&variax->startup_timer1); init_timer(&variax->startup_timer2); INIT_WORK(&variax->startup_work, variax_startup6); /* initialize USB buffers: */ variax->buffer_activate = kmemdup(variax_activate, sizeof(variax_activate), GFP_KERNEL); if (variax->buffer_activate == NULL) return -ENOMEM; /* initialize MIDI subsystem: */ err = line6_init_midi(&variax->line6); if (err < 0) return err; /* initiate startup procedure: */ variax_startup1(variax); return 0; } #define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod) #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) /* table of devices that work with this driver */ static const struct usb_device_id variax_id_table[] = { { LINE6_IF_NUM(0x4650, 1), .driver_info = LINE6_PODXTLIVE_VARIAX }, { LINE6_DEVICE(0x534d), .driver_info = LINE6_VARIAX }, {} }; MODULE_DEVICE_TABLE(usb, variax_id_table); static const struct line6_properties variax_properties_table[] = { [LINE6_PODXTLIVE_VARIAX] = { .id = "PODxtLive", .name = "PODxt Live", .capabilities = LINE6_CAP_CONTROL, .altsetting = 1, .ep_ctrl_r = 0x86, .ep_ctrl_w = 0x05, .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, [LINE6_VARIAX] = { .id = "Variax", .name = "Variax Workbench", .capabilities = LINE6_CAP_CONTROL, .altsetting = 1, .ep_ctrl_r = 0x82, .ep_ctrl_w = 0x01, /* no audio channel */ } }; /* Probe USB device. */ static int variax_probe(struct usb_interface *interface, const struct usb_device_id *id) { return line6_probe(interface, id, "Line6-Variax", &variax_properties_table[id->driver_info], variax_init, sizeof(struct usb_line6_variax)); } static struct usb_driver variax_driver = { .name = KBUILD_MODNAME, .probe = variax_probe, .disconnect = line6_disconnect, #ifdef CONFIG_PM .suspend = line6_suspend, .resume = line6_resume, .reset_resume = line6_resume, #endif .id_table = variax_id_table, }; module_usb_driver(variax_driver); MODULE_DESCRIPTION("Vairax Workbench USB driver"); MODULE_LICENSE("GPL");
gpl-2.0
kunato/s3-u6
drivers/staging/octeon/ethernet-xaui.c
3637
3605
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2007 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/kernel.h> #include <linux/netdevice.h> #include <net/dst.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include "cvmx-helper.h" #include "cvmx-gmxx-defs.h" int cvm_oct_xaui_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (!octeon_is_simulation()) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); } return 0; } int cvm_oct_xaui_stop(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); return 0; } static void cvm_oct_xaui_poll(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; link_info = cvmx_helper_link_get(priv->port); if (link_info.u64 == priv->link_info) return; link_info = cvmx_helper_link_autoconf(priv->port); priv->link_info = link_info.u64; /* Tell Linux */ if (link_info.s.link_up) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); if (priv->queue != -1) DEBUGPRINT ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port, priv->queue); else DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); DEBUGPRINT("%s: Link down\n", dev->name); } } int cvm_oct_xaui_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (!octeon_is_simulation() && priv->phydev == NULL) priv->poll = cvm_oct_xaui_poll; return 0; } void cvm_oct_xaui_uninit(struct net_device *dev) { cvm_oct_common_uninit(dev); }
gpl-2.0
aqua-project/Linux-Minimal-x86-Reimplementation
fs/hpfs/anode.c
4661
15737
/* * linux/fs/hpfs/anode.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * handling HPFS anode tree that contains file allocation info */ #include "hpfs_fn.h" /* Find a sector in allocation tree */ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, struct bplus_header *btree, unsigned sec, struct buffer_head *bh) { anode_secno a = -1; struct anode *anode; int i; int c1, c2 = 0; go_down: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (bp_internal(btree)) { for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { a = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a); brelse(bh); return -1; } for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); } brelse(bh); return a; } hpfs_error(s, "sector %08x not found in external anode %08x", sec, a); brelse(bh); return -1; } /* Add a sector to tree */ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno) { struct bplus_header *btree; struct anode *anode = NULL, *ranode = NULL; struct fnode *fnode; anode_secno a, na = -1, ra, up = -1; secno se; struct buffer_head *bh, *bh1, *bh2; int n; unsigned fs; int c1, c2 = 0; if (fnod) { if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, node, &bh))) return -1; btree = &anode->btree; } a = node; go_down: if ((n = btree->n_used_nodes - 1) < -!!fnod) { hpfs_error(s, "anode %08x has no entries", a); brelse(bh); return -1; } if (bp_internal(btree)) { a = le32_to_cpu(btree->u.internal[n].down); btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } if (n >= 0) { if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, fnod?'f':'a', node); brelse(bh); return -1; } if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { le32_add_cpu(&btree->u.external[n].length, 1); mark_buffer_dirty(bh); brelse(bh); return se; } } else { if (fsecno) { hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno); brelse(bh); return -1; } se = !fnod ? node : (node + 16384) & ~16383; } if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { brelse(bh); return -1; } fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); if (!btree->n_free_nodes) { up = a != node ? le32_to_cpu(anode->up) : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { anode->up = cpu_to_le32(node); anode->btree.flags |= BP_fnode_parent; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); btree->flags |= BP_internal; btree->n_free_nodes = 11; btree->n_used_nodes = 1; btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(-1); btree->u.internal[0].down = cpu_to_le32(na); mark_buffer_dirty(bh); } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { brelse(bh); brelse(bh1); hpfs_free_sectors(s, se, 1); hpfs_free_sectors(s, na, 1); return -1; } brelse(bh); bh = bh1; btree = &anode->btree; } btree->n_free_nodes--; n = btree->n_used_nodes++; le16_add_cpu(&btree->first_free, 12); btree->u.external[n].disk_secno = cpu_to_le32(se); btree->u.external[n].file_secno = cpu_to_le32(fs); btree->u.external[n].length = cpu_to_le32(1); mark_buffer_dirty(bh); brelse(bh); if ((a == node && fnod) || na == -1) return se; c2 = 0; while (up != (anode_secno)-1) { struct anode *new_anode; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; if (up != node || !fnod) { if (!(anode = hpfs_map_anode(s, up, &bh))) return -1; btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1; btree = &fnode->btree; } if (btree->n_free_nodes) { btree->n_free_nodes--; n = btree->n_used_nodes++; le16_add_cpu(&btree->first_free, 8); btree->u.internal[n].file_secno = cpu_to_le32(-1); btree->u.internal[n].down = cpu_to_le32(na); btree->u.internal[n-1].file_secno = cpu_to_le32(fs); mark_buffer_dirty(bh); brelse(bh); brelse(bh2); hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(up); if (up == node && fnod) anode->btree.flags |= BP_fnode_parent; else anode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } return se; } up = up != node ? le32_to_cpu(anode->up) : -1; btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); mark_buffer_dirty(bh); brelse(bh); a = na; if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ anode->btree.flags |= BP_internal; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; anode->btree.first_free = cpu_to_le16(16); anode->btree.u.internal[0].down = cpu_to_le32(a); anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if ((anode = hpfs_map_anode(s, a, &bh))) { anode->up = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); } } else na = a; } if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(node); if (fnod) anode->btree.flags |= BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } if (!fnod) { if (!(anode = hpfs_map_anode(s, node, &bh))) { brelse(bh2); return -1; } btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, node, &bh))) { brelse(bh2); return -1; } btree = &fnode->btree; } ranode->up = cpu_to_le32(node); memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); if (fnod) ranode->btree.flags |= BP_fnode_parent; ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes; if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { unode->up = cpu_to_le32(ra); unode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh1); brelse(bh1); } } btree->flags |= BP_internal; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(fs); btree->u.internal[0].down = cpu_to_le32(ra); btree->u.internal[1].file_secno = cpu_to_le32(-1); btree->u.internal[1].down = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); mark_buffer_dirty(bh2); brelse(bh2); return se; } /* * Remove allocation tree. Recursion would look much nicer but * I want to avoid it because it can cause stack overflow. */ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) { struct bplus_header *btree1 = btree; struct anode *anode = NULL; anode_secno ano = 0, oano; struct buffer_head *bh; int level = 0; int pos = 0; int i; int c1, c2 = 0; int d1, d2; go_down: d2 = 0; while (bp_internal(btree1)) { ano = le32_to_cpu(btree1->u.internal[pos].down); if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) return; if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; level++; pos = 0; } for (i = 0; i < btree1->n_used_nodes; i++) hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); go_up: if (!level) return; brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; hpfs_free_sectors(s, ano, 1); oano = ano; ano = le32_to_cpu(anode->up); if (--level) { if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; } else btree1 = btree; for (i = 0; i < btree1->n_used_nodes; i++) { if (le32_to_cpu(btree1->u.internal[i].down) == oano) { if ((pos = i + 1) < btree1->n_used_nodes) goto go_down; else goto go_up; } } hpfs_error(s, "reference to anode %08x not found in anode %08x " "(probably bad up pointer)", oano, level ? ano : -1); if (level) brelse(bh); } /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec) { struct anode *anode; struct buffer_head *bh; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh); } int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(buf, data + (pos & 0x1ff), l); brelse(bh); buf += l; pos += l; len -= l; } return 0; } int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, const char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(data + (pos & 0x1ff), buf, l); mark_buffer_dirty(bh); brelse(bh); buf += l; pos += l; len -= l; } return 0; } void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len) { struct anode *anode; struct buffer_head *bh; if (ano) { if (!(anode = hpfs_map_anode(s, a, &bh))) return; hpfs_remove_btree(s, &anode->btree); brelse(bh); hpfs_free_sectors(s, a, 1); } else hpfs_free_sectors(s, a, (len + 511) >> 9); } /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) { struct fnode *fnode; struct anode *anode; struct buffer_head *bh; struct bplus_header *btree; anode_secno node = f; int i, j, nodes; int c1, c2 = 0; if (fno) { if (!(fnode = hpfs_map_fnode(s, f, &bh))) return; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, f, &bh))) return; btree = &anode->btree; } if (!secs) { hpfs_remove_btree(s, btree); if (fno) { btree->n_free_nodes = 8; btree->n_used_nodes = 0; btree->first_free = cpu_to_le16(8); btree->flags &= ~BP_internal; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); brelse(bh); return; } while (bp_internal(btree)) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; brelse(bh); hpfs_error(s, "internal btree %08x doesn't end with -1", node); return; f: for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); mark_buffer_dirty(bh); if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { brelse(bh); return; } node = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) return; if (!(anode = hpfs_map_anode(s, node, &bh))) return; btree = &anode->btree; } nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; brelse(bh); return; ff: if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); if (i) i--; } else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); } for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); mark_buffer_dirty(bh); brelse(bh); } /* Remove file or directory and it's eas - note that directory must be empty when this is called. */ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) { struct buffer_head *bh; struct fnode *fnode; struct extended_attribute *ea; struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree); else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (ea_indirect(ea)) hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l)); brelse(bh); hpfs_free_sectors(s, fno, 1); }
gpl-2.0
suzuke/pdk7105-stm-kernel
arch/x86/kvm/timer.c
4917
1389
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * timer support * * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/hrtimer.h> #include <linux/atomic.h> #include "kvm_timer.h" enum hrtimer_restart kvm_timer_fn(struct hrtimer *data) { struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); struct kvm_vcpu *vcpu = ktimer->vcpu; wait_queue_head_t *q = &vcpu->wq; /* * There is a race window between reading and incrementing, but we do * not care about potentially losing timer events in the !reinject * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked * in vcpu_enter_guest. */ if (ktimer->reinject || !atomic_read(&ktimer->pending)) { atomic_inc(&ktimer->pending); /* FIXME: this code should not know anything about vcpus */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); } if (waitqueue_active(q)) wake_up_interruptible(q); if (ktimer->t_ops->is_periodic(ktimer)) { hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); return HRTIMER_RESTART; } else return HRTIMER_NORESTART; }
gpl-2.0
V1sk/android_kernel_sony_msm8960
arch/mn10300/mm/tlb-smp.c
7221
5233
/* SMP TLB support routines. * * Copyright (C) 2006-2008 Panasonic Corporation * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/cpumask.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/profile.h> #include <linux/smp.h> #include <asm/tlbflush.h> #include <asm/bitops.h> #include <asm/processor.h> #include <asm/bug.h> #include <asm/exceptions.h> #include <asm/hardirq.h> #include <asm/fpu.h> #include <asm/mmu_context.h> #include <asm/thread_info.h> #include <asm/cpu-regs.h> #include <asm/intctl-regs.h> /* * For flush TLB */ #define FLUSH_ALL 0xffffffff static cpumask_t flush_cpumask; static struct mm_struct *flush_mm; static unsigned long flush_va; static DEFINE_SPINLOCK(tlbstate_lock); DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, unsigned long va); static void do_flush_tlb_all(void *info); /** * smp_flush_tlb - Callback to invalidate the TLB. * @unused: Callback context (ignored). */ void smp_flush_tlb(void *unused) { unsigned long cpu_id; cpu_id = get_cpu(); if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) /* This was a BUG() but until someone can quote me the line * from the intel manual that guarantees an IPI to multiple * CPUs is retried _only_ on the erroring CPUs its staying as a * return * * BUG(); */ goto out; if (flush_va == FLUSH_ALL) local_flush_tlb(); else local_flush_tlb_page(flush_mm, flush_va); smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu_id, &flush_cpumask); smp_mb__after_clear_bit(); out: put_cpu(); } /** * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs * @cpumask: The list of CPUs to target. * @mm: The VM context to flush from (if va!=FLUSH_ALL). * @va: Virtual address to flush or FLUSH_ALL to flush everything. */ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, unsigned long va) { cpumask_t tmp; /* A couple of sanity checks (to be removed): * - mask must not be empty * - current CPU must not be in mask * - we do not send IPIs to as-yet unbooted CPUs. */ BUG_ON(!mm); BUG_ON(cpumask_empty(&cpumask)); BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); cpumask_and(&tmp, &cpumask, cpu_online_mask); BUG_ON(!cpumask_equal(&cpumask, &tmp)); /* I'm not happy about this global shared spinlock in the MM hot path, * but we'll see how contended it is. * * Temporarily this turns IRQs off, so that lockups are detected by the * NMI watchdog. */ spin_lock(&tlbstate_lock); flush_mm = mm; flush_va = va; #if NR_CPUS <= BITS_PER_LONG atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); #else #error Not supported. #endif /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ smp_call_function(smp_flush_tlb, NULL, 1); while (!cpumask_empty(&flush_cpumask)) /* Lockup detection does not belong here */ smp_mb(); flush_mm = NULL; flush_va = 0; spin_unlock(&tlbstate_lock); } /** * flush_tlb_mm - Invalidate TLB of specified VM context * @mm: The VM context to invalidate. */ void flush_tlb_mm(struct mm_struct *mm) { cpumask_t cpu_mask; preempt_disable(); cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); local_flush_tlb(); if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); } /** * flush_tlb_current_task - Invalidate TLB of current task */ void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; cpumask_t cpu_mask; preempt_disable(); cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); local_flush_tlb(); if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); } /** * flush_tlb_page - Invalidate TLB of page * @vma: The VM context to invalidate the page for. * @va: The virtual address of the page to invalidate. */ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; cpumask_t cpu_mask; preempt_disable(); cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); local_flush_tlb_page(mm, va); if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, va); preempt_enable(); } /** * do_flush_tlb_all - Callback to completely invalidate a TLB * @unused: Callback context (ignored). */ static void do_flush_tlb_all(void *unused) { local_flush_tlb_all(); } /** * flush_tlb_all - Completely invalidate TLBs on all CPUs */ void flush_tlb_all(void) { on_each_cpu(do_flush_tlb_all, 0, 1); }
gpl-2.0
AOSPA-L/android_kernel_oppo_msm8974
drivers/staging/rtl8712/usb_halinit.c
7989
12188
/****************************************************************************** * usb_halinit.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _HCI_HAL_INIT_C_ #include "osdep_service.h" #include "drv_types.h" #include "usb_ops.h" #include "usb_osintf.h" u8 r8712_usb_hal_bus_init(struct _adapter *padapter) { u8 val8 = 0; u8 ret = _SUCCESS; int PollingCnt = 20; struct registry_priv *pregistrypriv = &padapter->registrypriv; if (pregistrypriv->chip_version == RTL8712_FPGA) { val8 = 0x01; /* switch to 80M clock */ r8712_write8(padapter, SYS_CLKR, val8); val8 = r8712_read8(padapter, SPS1_CTRL); val8 = val8 | 0x01; /* enable VSPS12 LDO Macro block */ r8712_write8(padapter, SPS1_CTRL, val8); val8 = r8712_read8(padapter, AFE_MISC); val8 = val8 | 0x01; /* Enable AFE Macro Block's Bandgap */ r8712_write8(padapter, AFE_MISC, val8); val8 = r8712_read8(padapter, LDOA15_CTRL); val8 = val8 | 0x01; /* enable LDOA15 block */ r8712_write8(padapter, LDOA15_CTRL, val8); val8 = r8712_read8(padapter, SPS1_CTRL); val8 = val8 | 0x02; /* Enable VSPS12_SW Macro Block */ r8712_write8(padapter, SPS1_CTRL, val8); val8 = r8712_read8(padapter, AFE_MISC); val8 = val8 | 0x02; /* Enable AFE Macro Block's Mbias */ r8712_write8(padapter, AFE_MISC, val8); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); val8 = val8 | 0x08; /* isolate PCIe Analog 1.2V to PCIe 3.3V and PCIE Digital */ r8712_write8(padapter, SYS_ISO_CTRL + 1, val8); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); val8 = val8 & 0xEF; /* attatch AFE PLL to MACTOP/BB/PCIe Digital */ r8712_write8(padapter, SYS_ISO_CTRL + 1, val8); val8 = r8712_read8(padapter, AFE_XTAL_CTRL + 1); val8 = val8 & 0xFB; /* enable AFE clock */ r8712_write8(padapter, AFE_XTAL_CTRL + 1, val8); val8 = r8712_read8(padapter, AFE_PLL_CTRL); val8 = val8 | 0x01; /* Enable AFE PLL Macro Block */ r8712_write8(padapter, AFE_PLL_CTRL, val8); val8 = 0xEE; /* release isolation AFE PLL & MD */ r8712_write8(padapter, SYS_ISO_CTRL, val8); val8 = r8712_read8(padapter, SYS_CLKR + 1); val8 = val8 | 0x08; /* enable MAC clock */ r8712_write8(padapter, SYS_CLKR + 1, val8); val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); val8 = val8 | 0x08; /* enable Core digital and enable IOREG R/W */ r8712_write8(padapter, SYS_FUNC_EN + 1, val8); val8 = val8 | 0x80; /* enable REG_EN */ r8712_write8(padapter, SYS_FUNC_EN + 1, val8); val8 = r8712_read8(padapter, SYS_CLKR + 1); val8 = (val8 | 0x80) & 0xBF; /* switch the control path */ r8712_write8(padapter, SYS_CLKR + 1, val8); val8 = 0xFC; r8712_write8(padapter, CR, val8); val8 = 0x37; r8712_write8(padapter, CR + 1, val8); /* reduce EndPoint & init it */ r8712_write8(padapter, 0x102500ab, r8712_read8(padapter, 0x102500ab) | BIT(6) | BIT(7)); /* consideration of power consumption - init */ r8712_write8(padapter, 0x10250008, r8712_read8(padapter, 0x10250008) & 0xfffffffb); } else if (pregistrypriv->chip_version == RTL8712_1stCUT) { /* Initialization for power on sequence, */ r8712_write8(padapter, SPS0_CTRL + 1, 0x53); r8712_write8(padapter, SPS0_CTRL, 0x57); /* Enable AFE Macro Block's Bandgap and Enable AFE Macro * Block's Mbias */ val8 = r8712_read8(padapter, AFE_MISC); r8712_write8(padapter, AFE_MISC, (val8 | AFE_MISC_BGEN | AFE_MISC_MBEN)); /* Enable LDOA15 block */ val8 = r8712_read8(padapter, LDOA15_CTRL); r8712_write8(padapter, LDOA15_CTRL, (val8 | LDA15_EN)); val8 = r8712_read8(padapter, SPS1_CTRL); r8712_write8(padapter, SPS1_CTRL, (val8 | SPS1_LDEN)); msleep(20); /* Enable Switch Regulator Block */ val8 = r8712_read8(padapter, SPS1_CTRL); r8712_write8(padapter, SPS1_CTRL, (val8 | SPS1_SWEN)); r8712_write32(padapter, SPS1_CTRL, 0x00a7b267); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 | 0x08)); /* Engineer Packet CP test Enable */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x20)); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 & 0x6F)); /* Enable AFE clock */ val8 = r8712_read8(padapter, AFE_XTAL_CTRL + 1); r8712_write8(padapter, AFE_XTAL_CTRL + 1, (val8 & 0xfb)); /* Enable AFE PLL Macro Block */ val8 = r8712_read8(padapter, AFE_PLL_CTRL); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x11)); /* Attatch AFE PLL to MACTOP/BB/PCIe Digital */ val8 = r8712_read8(padapter, SYS_ISO_CTRL); r8712_write8(padapter, SYS_ISO_CTRL, (val8 & 0xEE)); /* Switch to 40M clock */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, val8 & (~SYS_CLKSEL)); /* SSC Disable */ val8 = r8712_read8(padapter, SYS_CLKR); /* Enable MAC clock */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x18)); /* Revised POS, */ r8712_write8(padapter, PMC_FSM, 0x02); /* Enable Core digital and enable IOREG R/W */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x08)); /* Enable REG_EN */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x80)); /* Switch the control path to FW */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x80) & 0xBF); r8712_write8(padapter, CR, 0xFC); r8712_write8(padapter, CR + 1, 0x37); /* Fix the RX FIFO issue(usb error), */ val8 = r8712_read8(padapter, 0x1025FE5c); r8712_write8(padapter, 0x1025FE5c, (val8|BIT(7))); val8 = r8712_read8(padapter, 0x102500ab); r8712_write8(padapter, 0x102500ab, (val8|BIT(6)|BIT(7))); /* For power save, used this in the bit file after 970621 */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, val8&(~CPU_CLKSEL)); } else if (pregistrypriv->chip_version == RTL8712_2ndCUT || pregistrypriv->chip_version == RTL8712_3rdCUT) { /* Initialization for power on sequence, * E-Fuse leakage prevention sequence */ r8712_write8(padapter, 0x37, 0xb0); msleep(20); r8712_write8(padapter, 0x37, 0x30); /* Set control path switch to HW control and reset Digital Core, * CPU Core and MAC I/O to solve FW download fail when system * from resume sate. */ val8 = r8712_read8(padapter, SYS_CLKR + 1); if (val8 & 0x80) { val8 &= 0x3f; r8712_write8(padapter, SYS_CLKR + 1, val8); } val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); val8 &= 0x73; r8712_write8(padapter, SYS_FUNC_EN + 1, val8); msleep(20); /* Revised POS, */ /* Enable AFE Macro Block's Bandgap and Enable AFE Macro * Block's Mbias */ r8712_write8(padapter, SPS0_CTRL + 1, 0x53); r8712_write8(padapter, SPS0_CTRL, 0x57); val8 = r8712_read8(padapter, AFE_MISC); /*Bandgap*/ r8712_write8(padapter, AFE_MISC, (val8 | AFE_MISC_BGEN)); r8712_write8(padapter, AFE_MISC, (val8 | AFE_MISC_BGEN | AFE_MISC_MBEN | AFE_MISC_I32_EN)); /* Enable PLL Power (LDOA15V) */ val8 = r8712_read8(padapter, LDOA15_CTRL); r8712_write8(padapter, LDOA15_CTRL, (val8 | LDA15_EN)); /* Enable LDOV12D block */ val8 = r8712_read8(padapter, LDOV12D_CTRL); r8712_write8(padapter, LDOV12D_CTRL, (val8 | LDV12_EN)); val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 | 0x08)); /* Engineer Packet CP test Enable */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x20)); /* Support 64k IMEM */ val8 = r8712_read8(padapter, SYS_ISO_CTRL + 1); r8712_write8(padapter, SYS_ISO_CTRL + 1, (val8 & 0x68)); /* Enable AFE clock */ val8 = r8712_read8(padapter, AFE_XTAL_CTRL + 1); r8712_write8(padapter, AFE_XTAL_CTRL + 1, (val8 & 0xfb)); /* Enable AFE PLL Macro Block */ val8 = r8712_read8(padapter, AFE_PLL_CTRL); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x11)); /* Some sample will download fw failure. The clock will be * stable with 500 us delay after reset the PLL * TODO: When usleep is added to kernel, change next 3 * udelay(500) to usleep(500) */ udelay(500); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x51)); udelay(500); r8712_write8(padapter, AFE_PLL_CTRL, (val8 | 0x11)); udelay(500); /* Attatch AFE PLL to MACTOP/BB/PCIe Digital */ val8 = r8712_read8(padapter, SYS_ISO_CTRL); r8712_write8(padapter, SYS_ISO_CTRL, (val8 & 0xEE)); /* Switch to 40M clock */ r8712_write8(padapter, SYS_CLKR, 0x00); /* CPU Clock and 80M Clock SSC Disable to overcome FW download * fail timing issue. */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, (val8 | 0xa0)); /* Enable MAC clock */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x18)); /* Revised POS, */ r8712_write8(padapter, PMC_FSM, 0x02); /* Enable Core digital and enable IOREG R/W */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x08)); /* Enable REG_EN */ val8 = r8712_read8(padapter, SYS_FUNC_EN + 1); r8712_write8(padapter, SYS_FUNC_EN + 1, (val8 | 0x80)); /* Switch the control path to FW */ val8 = r8712_read8(padapter, SYS_CLKR + 1); r8712_write8(padapter, SYS_CLKR + 1, (val8 | 0x80) & 0xBF); r8712_write8(padapter, CR, 0xFC); r8712_write8(padapter, CR + 1, 0x37); /* Fix the RX FIFO issue(usb error), 970410 */ val8 = r8712_read8(padapter, 0x1025FE5c); r8712_write8(padapter, 0x1025FE5c, (val8 | BIT(7))); /* For power save, used this in the bit file after 970621 */ val8 = r8712_read8(padapter, SYS_CLKR); r8712_write8(padapter, SYS_CLKR, val8 & (~CPU_CLKSEL)); /* Revised for 8051 ROM code wrong operation. */ r8712_write8(padapter, 0x1025fe1c, 0x80); /* To make sure that TxDMA can ready to download FW. * We should reset TxDMA if IMEM RPT was not ready. */ do { val8 = r8712_read8(padapter, TCR); if ((val8 & _TXDMA_INIT_VALUE) == _TXDMA_INIT_VALUE) break; udelay(5); /* PlatformStallExecution(5); */ } while (PollingCnt--); /* Delay 1ms */ if (PollingCnt <= 0) { val8 = r8712_read8(padapter, CR); r8712_write8(padapter, CR, val8&(~_TXDMA_EN)); udelay(2); /* PlatformStallExecution(2); */ /* Reset TxDMA */ r8712_write8(padapter, CR, val8|_TXDMA_EN); } } else ret = _FAIL; return ret; } unsigned int r8712_usb_inirp_init(struct _adapter *padapter) { u8 i; struct recv_buf *precvbuf; struct intf_hdl *pintfhdl = &padapter->pio_queue->intf; struct recv_priv *precvpriv = &(padapter->recvpriv); precvpriv->ff_hwaddr = RTL8712_DMA_RX0FF; /* mapping rx fifo address */ /* issue Rx irp to receive data */ precvbuf = (struct recv_buf *)precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { if (r8712_usb_read_port(pintfhdl, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) return _FAIL; precvbuf++; precvpriv->free_recv_buf_queue_cnt--; } return _SUCCESS; } unsigned int r8712_usb_inirp_deinit(struct _adapter *padapter) { r8712_usb_read_port_cancel(padapter); return _SUCCESS; }
gpl-2.0
yexihu/kernel-msm
arch/ia64/kernel/ia64_ksyms.c
8501
2506
/* * Architecture-specific kernel symbols * * Don't put any exports here unless it's defined in an assembler file. * All other exports should be put directly after the definition. */ #include <linux/module.h> #include <linux/string.h> EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(strlen); #include<asm/pgtable.h> EXPORT_SYMBOL_GPL(empty_zero_page); #include <asm/checksum.h> EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(csum_ipv6_magic); #include <asm/page.h> EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); #ifdef CONFIG_VIRTUAL_MEM_MAP #include <linux/bootmem.h> EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif #include <asm/processor.h> EXPORT_SYMBOL(ia64_cpu_info); #ifdef CONFIG_SMP EXPORT_SYMBOL(local_per_cpu_offset); #endif #include <asm/uaccess.h> EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); EXPORT_SYMBOL(__strlen_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); /* from arch/ia64/lib */ extern void __divsi3(void); extern void __udivsi3(void); extern void __modsi3(void); extern void __umodsi3(void); extern void __divdi3(void); extern void __udivdi3(void); extern void __moddi3(void); extern void __umoddi3(void); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__umoddi3); #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) extern void xor_ia64_2(void); extern void xor_ia64_3(void); extern void xor_ia64_4(void); extern void xor_ia64_5(void); EXPORT_SYMBOL(xor_ia64_2); EXPORT_SYMBOL(xor_ia64_3); EXPORT_SYMBOL(xor_ia64_4); EXPORT_SYMBOL(xor_ia64_5); #endif #include <asm/pal.h> EXPORT_SYMBOL(ia64_pal_call_phys_stacked); EXPORT_SYMBOL(ia64_pal_call_phys_static); EXPORT_SYMBOL(ia64_pal_call_stacked); EXPORT_SYMBOL(ia64_pal_call_static); EXPORT_SYMBOL(ia64_load_scratch_fpregs); EXPORT_SYMBOL(ia64_save_scratch_fpregs); #include <asm/unwind.h> EXPORT_SYMBOL(unw_init_running); #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) extern void esi_call_phys (void); EXPORT_SYMBOL_GPL(esi_call_phys); #endif extern char ia64_ivt[]; EXPORT_SYMBOL(ia64_ivt); #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(_mcount); #endif
gpl-2.0
v-superuser/android_kernel_htc_msm8974
arch/ia64/kernel/ia64_ksyms.c
8501
2506
/* * Architecture-specific kernel symbols * * Don't put any exports here unless it's defined in an assembler file. * All other exports should be put directly after the definition. */ #include <linux/module.h> #include <linux/string.h> EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(strlen); #include<asm/pgtable.h> EXPORT_SYMBOL_GPL(empty_zero_page); #include <asm/checksum.h> EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(csum_ipv6_magic); #include <asm/page.h> EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); #ifdef CONFIG_VIRTUAL_MEM_MAP #include <linux/bootmem.h> EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif #include <asm/processor.h> EXPORT_SYMBOL(ia64_cpu_info); #ifdef CONFIG_SMP EXPORT_SYMBOL(local_per_cpu_offset); #endif #include <asm/uaccess.h> EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); EXPORT_SYMBOL(__strlen_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); /* from arch/ia64/lib */ extern void __divsi3(void); extern void __udivsi3(void); extern void __modsi3(void); extern void __umodsi3(void); extern void __divdi3(void); extern void __udivdi3(void); extern void __moddi3(void); extern void __umoddi3(void); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__umoddi3); #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) extern void xor_ia64_2(void); extern void xor_ia64_3(void); extern void xor_ia64_4(void); extern void xor_ia64_5(void); EXPORT_SYMBOL(xor_ia64_2); EXPORT_SYMBOL(xor_ia64_3); EXPORT_SYMBOL(xor_ia64_4); EXPORT_SYMBOL(xor_ia64_5); #endif #include <asm/pal.h> EXPORT_SYMBOL(ia64_pal_call_phys_stacked); EXPORT_SYMBOL(ia64_pal_call_phys_static); EXPORT_SYMBOL(ia64_pal_call_stacked); EXPORT_SYMBOL(ia64_pal_call_static); EXPORT_SYMBOL(ia64_load_scratch_fpregs); EXPORT_SYMBOL(ia64_save_scratch_fpregs); #include <asm/unwind.h> EXPORT_SYMBOL(unw_init_running); #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) extern void esi_call_phys (void); EXPORT_SYMBOL_GPL(esi_call_phys); #endif extern char ia64_ivt[]; EXPORT_SYMBOL(ia64_ivt); #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(_mcount); #endif
gpl-2.0
KFire-Android/kernel_omap_otter-common
arch/ia64/kernel/ia64_ksyms.c
8501
2506
/* * Architecture-specific kernel symbols * * Don't put any exports here unless it's defined in an assembler file. * All other exports should be put directly after the definition. */ #include <linux/module.h> #include <linux/string.h> EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(strlen); #include<asm/pgtable.h> EXPORT_SYMBOL_GPL(empty_zero_page); #include <asm/checksum.h> EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(csum_ipv6_magic); #include <asm/page.h> EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); #ifdef CONFIG_VIRTUAL_MEM_MAP #include <linux/bootmem.h> EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif #include <asm/processor.h> EXPORT_SYMBOL(ia64_cpu_info); #ifdef CONFIG_SMP EXPORT_SYMBOL(local_per_cpu_offset); #endif #include <asm/uaccess.h> EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); EXPORT_SYMBOL(__strlen_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); /* from arch/ia64/lib */ extern void __divsi3(void); extern void __udivsi3(void); extern void __modsi3(void); extern void __umodsi3(void); extern void __divdi3(void); extern void __udivdi3(void); extern void __moddi3(void); extern void __umoddi3(void); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__umoddi3); #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) extern void xor_ia64_2(void); extern void xor_ia64_3(void); extern void xor_ia64_4(void); extern void xor_ia64_5(void); EXPORT_SYMBOL(xor_ia64_2); EXPORT_SYMBOL(xor_ia64_3); EXPORT_SYMBOL(xor_ia64_4); EXPORT_SYMBOL(xor_ia64_5); #endif #include <asm/pal.h> EXPORT_SYMBOL(ia64_pal_call_phys_stacked); EXPORT_SYMBOL(ia64_pal_call_phys_static); EXPORT_SYMBOL(ia64_pal_call_stacked); EXPORT_SYMBOL(ia64_pal_call_static); EXPORT_SYMBOL(ia64_load_scratch_fpregs); EXPORT_SYMBOL(ia64_save_scratch_fpregs); #include <asm/unwind.h> EXPORT_SYMBOL(unw_init_running); #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) extern void esi_call_phys (void); EXPORT_SYMBOL_GPL(esi_call_phys); #endif extern char ia64_ivt[]; EXPORT_SYMBOL(ia64_ivt); #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(_mcount); #endif
gpl-2.0
Beeko/android_kernel_samsung_espresso10
arch/tile/lib/strchr_32.c
10037
1971
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #undef strchr char *strchr(const char *s, int c) { int z, g; /* Get an aligned pointer. */ const uintptr_t s_int = (uintptr_t) s; const uint32_t *p = (const uint32_t *)(s_int & -4); /* Create four copies of the byte for which we are looking. */ const uint32_t goal = 0x01010101 * (uint8_t) c; /* Read the first aligned word, but force bytes before the string to * match neither zero nor goal (we make sure the high bit of each * byte is 1, and the low 7 bits are all the opposite of the goal * byte). * * Note that this shift count expression works because we know shift * counts are taken mod 32. */ const uint32_t before_mask = (1 << (s_int << 3)) - 1; uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib(before_mask, 1)); uint32_t zero_matches, goal_matches; while (1) { /* Look for a terminating '\0'. */ zero_matches = __insn_seqb(v, 0); /* Look for the goal byte. */ goal_matches = __insn_seqb(v, goal); if (__builtin_expect(zero_matches | goal_matches, 0)) break; v = *++p; } z = __insn_ctz(zero_matches); g = __insn_ctz(goal_matches); /* If we found c before '\0' we got a match. Note that if c == '\0' * then g == z, and we correctly return the address of the '\0' * rather than NULL. */ return (g <= z) ? ((char *)p) + (g >> 3) : NULL; } EXPORT_SYMBOL(strchr);
gpl-2.0
DJNoXD/rockchip-kernel-rk2918
arch/tile/kernel/asm-offsets.c
10037
2662
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * Generates definitions from c-type structures used by assembly sources. */ #include <linux/kbuild.h> #include <linux/thread_info.h> #include <linux/sched.h> #include <linux/hardirq.h> #include <linux/ptrace.h> #include <hv/hypervisor.h> /* Check for compatible compiler early in the build. */ #ifdef CONFIG_TILEGX # ifndef __tilegx__ # error Can only build TILE-Gx configurations with tilegx compiler # endif # ifndef __LP64__ # error Must not specify -m32 when building the TILE-Gx kernel # endif #else # ifdef __tilegx__ # error Can not build TILEPro/TILE64 configurations with tilegx compiler # endif #endif void foo(void) { DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \ offsetof(struct single_step_state, buffer)); DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \ offsetof(struct single_step_state, flags)); DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \ offsetof(struct single_step_state, orig_pc)); DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \ offsetof(struct single_step_state, next_pc)); DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \ offsetof(struct single_step_state, branch_next_pc)); DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \ offsetof(struct single_step_state, update_value)); DEFINE(THREAD_INFO_TASK_OFFSET, \ offsetof(struct thread_info, task)); DEFINE(THREAD_INFO_FLAGS_OFFSET, \ offsetof(struct thread_info, flags)); DEFINE(THREAD_INFO_STATUS_OFFSET, \ offsetof(struct thread_info, status)); DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \ offsetof(struct thread_info, homecache_cpu)); DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \ offsetof(struct thread_info, step_state)); DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET, offsetof(struct task_struct, thread.ksp)); DEFINE(TASK_STRUCT_THREAD_PC_OFFSET, offsetof(struct task_struct, thread.pc)); DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \ offsetof(HV_Topology, width)); DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \ offsetof(HV_Topology, height)); DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \ offsetof(irq_cpustat_t, irq_syscall_count)); }
gpl-2.0
nocoast/android_kernel_lge_g2
drivers/tty/serial/8250/8250_boca.c
12341
1261
/* * Copyright (C) 2005 Russell King. * Data taken from include/asm-i386/serial.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/serial_8250.h> #define PORT(_base,_irq) \ { \ .iobase = _base, \ .irq = _irq, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF, \ } static struct plat_serial8250_port boca_data[] = { PORT(0x100, 12), PORT(0x108, 12), PORT(0x110, 12), PORT(0x118, 12), PORT(0x120, 12), PORT(0x128, 12), PORT(0x130, 12), PORT(0x138, 12), PORT(0x140, 12), PORT(0x148, 12), PORT(0x150, 12), PORT(0x158, 12), PORT(0x160, 12), PORT(0x168, 12), PORT(0x170, 12), PORT(0x178, 12), { }, }; static struct platform_device boca_device = { .name = "serial8250", .id = PLAT8250_DEV_BOCA, .dev = { .platform_data = boca_data, }, }; static int __init boca_init(void) { return platform_device_register(&boca_device); } module_init(boca_init); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("8250 serial probe module for Boca cards"); MODULE_LICENSE("GPL");
gpl-2.0
FeyoMx/MDSdevKernel_a7010
arch/sh/boards/mach-x3proto/setup.c
12341
5933
/* * arch/sh/boards/mach-x3proto/setup.c * * Renesas SH-X3 Prototype Board Support. * * Copyright (C) 2007 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/smc91x.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/usb/r8a66597.h> #include <linux/usb/m66592.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <mach/ilsel.h> #include <mach/hardware.h> #include <asm/smp-ops.h> static struct resource heartbeat_resources[] = { [0] = { .start = 0xb8140020, .end = 0xb8140020, .flags = IORESOURCE_MEM, }, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .num_resources = ARRAY_SIZE(heartbeat_resources), .resource = heartbeat_resources, }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_resources[] = { [0] = { .start = 0x18000300, .end = 0x18000300 + 0x10 - 1, .flags = IORESOURCE_MEM, }, [1] = { /* Filled in by ilsel */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .resource = smc91x_resources, .num_resources = ARRAY_SIZE(smc91x_resources), .dev = { .platform_data = &smc91x_info, }, }; static struct r8a66597_platdata r8a66597_data = { .xtal = R8A66597_PLATDATA_XTAL_12MHZ, .vif = 1, }; static struct resource r8a66597_usb_host_resources[] = { [0] = { .start = 0x18040000, .end = 0x18080000 - 1, .flags = IORESOURCE_MEM, }, [1] = { /* Filled in by ilsel */ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device r8a66597_usb_host_device = { .name = "r8a66597_hcd", .id = -1, .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &r8a66597_data, }, .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), .resource = r8a66597_usb_host_resources, }; static struct m66592_platdata usbf_platdata = { .xtal = M66592_PLATDATA_XTAL_24MHZ, .vif = 1, }; static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", .start = 0x18080000, .end = 0x180c0000 - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "m66592_udc", /* Filled in by ilsel */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device m66592_usb_peripheral_device = { .name = "m66592_udc", .id = -1, .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, }; static struct gpio_keys_button baseboard_buttons[NR_BASEBOARD_GPIOS] = { { .desc = "key44", .code = KEY_POWER, .active_low = 1, .wakeup = 1, }, { .desc = "key43", .code = KEY_SUSPEND, .active_low = 1, .wakeup = 1, }, { .desc = "key42", .code = KEY_KATAKANAHIRAGANA, .active_low = 1, }, { .desc = "key41", .code = KEY_SWITCHVIDEOMODE, .active_low = 1, }, { .desc = "key34", .code = KEY_F12, .active_low = 1, }, { .desc = "key33", .code = KEY_F11, .active_low = 1, }, { .desc = "key32", .code = KEY_F10, .active_low = 1, }, { .desc = "key31", .code = KEY_F9, .active_low = 1, }, { .desc = "key24", .code = KEY_F8, .active_low = 1, }, { .desc = "key23", .code = KEY_F7, .active_low = 1, }, { .desc = "key22", .code = KEY_F6, .active_low = 1, }, { .desc = "key21", .code = KEY_F5, .active_low = 1, }, { .desc = "key14", .code = KEY_F4, .active_low = 1, }, { .desc = "key13", .code = KEY_F3, .active_low = 1, }, { .desc = "key12", .code = KEY_F2, .active_low = 1, }, { .desc = "key11", .code = KEY_F1, .active_low = 1, }, }; static struct gpio_keys_platform_data baseboard_buttons_data = { .buttons = baseboard_buttons, .nbuttons = ARRAY_SIZE(baseboard_buttons), }; static struct platform_device baseboard_buttons_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &baseboard_buttons_data, }, }; static struct platform_device *x3proto_devices[] __initdata = { &heartbeat_device, &smc91x_device, &r8a66597_usb_host_device, &m66592_usb_peripheral_device, &baseboard_buttons_device, }; static void __init x3proto_init_irq(void) { plat_irq_setup_pins(IRQ_MODE_IRL3210); /* Set ICR0.LVLMODE */ __raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000); } static int __init x3proto_devices_setup(void) { int ret, i; /* * IRLs are only needed for ILSEL mappings, so flip over the INTC * pins at a later point to enable the GPIOs to settle. */ x3proto_init_irq(); /* * Now that ILSELs are available, set up the baseboard GPIOs. */ ret = x3proto_gpio_setup(); if (unlikely(ret)) return ret; /* * Propagate dynamic GPIOs for the baseboard button device. */ for (i = 0; i < ARRAY_SIZE(baseboard_buttons); i++) baseboard_buttons[i].gpio = x3proto_gpio_chip.base + i; r8a66597_usb_host_resources[1].start = r8a66597_usb_host_resources[1].end = ilsel_enable(ILSEL_USBH_I); m66592_usb_peripheral_resources[1].start = m66592_usb_peripheral_resources[1].end = ilsel_enable(ILSEL_USBP_I); smc91x_resources[1].start = smc91x_resources[1].end = ilsel_enable(ILSEL_LAN); return platform_add_devices(x3proto_devices, ARRAY_SIZE(x3proto_devices)); } device_initcall(x3proto_devices_setup); static void __init x3proto_setup(char **cmdline_p) { register_smp_ops(&shx3_smp_ops); } static struct sh_machine_vector mv_x3proto __initmv = { .mv_name = "x3proto", .mv_setup = x3proto_setup, };
gpl-2.0
Kali-/tf101-kernel
arch/arm/mm/mmu.c
54
29962
/* * linux/arch/arm/mm/mmu.c * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/sort.h> #include <linux/fs.h> #include <asm/cputype.h> #include <asm/sections.h> #include <asm/cachetype.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "mm.h" DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); /* * empty_zero_page is a special page that is used for * zero-initialized data and COW. */ struct page *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); /* * The pmd table for the upper-most set of pages. */ pmd_t *top_pmd; #define CPOLICY_UNCACHED 0 #define CPOLICY_BUFFERED 1 #define CPOLICY_WRITETHROUGH 2 #define CPOLICY_WRITEBACK 3 #define CPOLICY_WRITEALLOC 4 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; static unsigned int ecc_mask __initdata = 0; pgprot_t pgprot_user; pgprot_t pgprot_kernel; EXPORT_SYMBOL(pgprot_user); EXPORT_SYMBOL(pgprot_kernel); struct cachepolicy { const char policy[16]; unsigned int cr_mask; unsigned int pmd; unsigned int pte; }; static struct cachepolicy cache_policies[] __initdata = { { .policy = "uncached", .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = L_PTE_MT_UNCACHED, }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = L_PTE_MT_BUFFERABLE, }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = L_PTE_MT_WRITETHROUGH, }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = L_PTE_MT_WRITEBACK, }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = L_PTE_MT_WRITEALLOC, } }; /* * These are useful for identifying cache coherency * problems by allowing the cache or the cache and * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */ static int __init early_cachepolicy(char *p) { int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); if (memcmp(p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; break; } } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); /* * This restriction is partly to do with the way we boot; it is * unpredictable to have memory mapped using two different sets of * memory attributes (shared, type, and cache attribs). We can not * change these attributes once the initial assembly has setup the * page tables. */ if (cpu_architecture() >= CPU_ARCH_ARMv6) { printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); cachepolicy = CPOLICY_WRITEBACK; } flush_cache_all(); set_cr(cr_alignment); return 0; } early_param("cachepolicy", early_cachepolicy); static int __init early_nocache(char *__unused) { char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; } early_param("nocache", early_nocache); static int __init early_nowrite(char *__unused) { char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; } early_param("nowb", early_nowrite); static int __init early_ecc(char *p) { if (memcmp(p, "on", 2) == 0) ecc_mask = PMD_PROTECTION; else if (memcmp(p, "off", 3) == 0) ecc_mask = 0; return 0; } early_param("ecc", early_ecc); static int __init noalign_setup(char *__unused) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); return 1; } __setup("noalign", noalign_setup); #ifndef CONFIG_SMP void adjust_cr(unsigned long mask, unsigned long set) { unsigned long flags; mask &= ~CR_A; set &= mask; local_irq_save(flags); cr_no_alignment = (cr_no_alignment & ~mask) | set; cr_alignment = (cr_alignment & ~mask) | set; set_cr((get_cr() & ~mask) | set); local_irq_restore(flags); } #endif #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, }, [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, [MT_DEVICE_CACHED] = { /* ioremap_cached */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, [MT_UNCACHED] = { .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_IO, }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, [MT_MINICLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, .domain = DOMAIN_KERNEL, }, [MT_LOW_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_NONCACHED] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_DTCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_ITCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_IO, }, }; const struct mem_type *get_mem_type(unsigned int type) { return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; } EXPORT_SYMBOL(get_mem_type); /* * Adjust the PMD section entries according to the CPU in use. */ static void __init build_mem_type_table(void) { struct cachepolicy *cp; unsigned int cr = get_cr(); unsigned int user_pgprot, kern_pgprot, vecs_pgprot; int cpu_arch = cpu_architecture(); int i; if (cpu_arch < CPU_ARCH_ARMv6) { #if defined(CONFIG_CPU_DCACHE_DISABLE) if (cachepolicy > CPOLICY_BUFFERED) cachepolicy = CPOLICY_BUFFERED; #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) if (cachepolicy > CPOLICY_WRITETHROUGH) cachepolicy = CPOLICY_WRITETHROUGH; #endif } if (cpu_arch < CPU_ARCH_ARMv5) { if (cachepolicy >= CPOLICY_WRITEALLOC) cachepolicy = CPOLICY_WRITEBACK; ecc_mask = 0; } #ifdef CONFIG_SMP cachepolicy = CPOLICY_WRITEALLOC; #endif /* * Strip out features not present on earlier architectures. * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those * without extended page tables don't have the 'Shared' bit. */ if (cpu_arch < CPU_ARCH_ARMv5) for (i = 0; i < ARRAY_SIZE(mem_types); i++) mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) for (i = 0; i < ARRAY_SIZE(mem_types); i++) mem_types[i].prot_sect &= ~PMD_SECT_S; /* * ARMv5 and lower, bit 4 must be set for page tables (was: cache * "update-able on write" bit on ARM610). However, Xscale and * Xscale3 require this bit to be cleared. */ if (cpu_is_xscale() || cpu_is_xsc3()) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { mem_types[i].prot_sect &= ~PMD_BIT4; mem_types[i].prot_l1 &= ~PMD_BIT4; } } else if (cpu_arch < CPU_ARCH_ARMv6) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { if (mem_types[i].prot_l1) mem_types[i].prot_l1 |= PMD_BIT4; if (mem_types[i].prot_sect) mem_types[i].prot_sect |= PMD_BIT4; } } /* * Mark the device areas according to the CPU/architecture. */ if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { if (!cpu_is_xsc3()) { /* * Mark device regions on ARMv6+ as execute-never * to prevent speculative instruction fetches. */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; } if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* * For ARMv7 with TEX remapping, * - shared device is SXCB=1100 * - nonshared device is SXCB=0100 * - write combine device mem is SXCB=0001 * (Uncached Normal memory) */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; } else if (cpu_is_xsc3()) { /* * For Xscale3, * - shared device is TEXCB=00101 * - nonshared device is TEXCB=01000 * - write combine device mem is TEXCB=00100 * (Inner/Outer Uncacheable in xsc3 parlance) */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); } else { /* * For ARMv6 and ARMv7 without TEX remapping, * - shared device is TEXCB=00001 * - nonshared device is TEXCB=01000 * - write combine device mem is TEXCB=00100 * (Uncached Normal in ARMv6 parlance). */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); } } else { /* * On others, write combining is "Uncached/Buffered" */ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; } /* * Now deal with the memory-type mappings */ cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; #ifndef CONFIG_SMP /* * Only use write-through for non-SMP systems */ if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; #endif /* * Enable CPU-specific coherency if supported. * (Only available on XSC3 at the moment.) */ if (arch_is_coherent() && cpu_is_xsc3()) { mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; } /* * ARMv6 and above have extended page tables. */ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { /* * Mark cache clean areas and XIP ROM read only * from SVC mode and no access from userspace. */ mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #ifdef CONFIG_SMP /* * Mark memory with the "shared" attribute for SMP systems */ user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; #endif } /* * Non-cacheable Normal - intended for memory areas that must * not cause dirty cache line writebacks when used */ if (cpu_arch >= CPU_ARCH_ARMv6) { if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* Non-cacheable Normal is XCB = 001 */ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERED; } else { /* For both ARMv6 and non-TEX-remapping ARMv7 */ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_TEX(1); } } else { mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; } for (i = 0; i < 16; i++) { unsigned long v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); } mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_ROM].prot_sect |= cp->pmd; switch (cp->pmd) { case PMD_SECT_WT: mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; break; case PMD_SECT_WB: case PMD_SECT_WBWA: mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; break; } printk("Memory policy: ECC %sabled, Data cache %s\n", ecc_mask ? "en" : "dis", cp->policy); for (i = 0; i < ARRAY_SIZE(mem_types); i++) { struct mem_type *t = &mem_types[i]; if (t->prot_l1) t->prot_l1 |= PMD_DOMAIN(t->domain); if (t->prot_sect) t->prot_sect |= PMD_DOMAIN(t->domain); } } #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { if (!pfn_valid(pfn)) return pgprot_noncached(vma_prot); else if (file->f_flags & O_SYNC) return pgprot_writecombine(vma_prot); return vma_prot; } EXPORT_SYMBOL(phys_mem_access_prot); #endif #define vectors_base() (vectors_high() ? 0xffff0000 : 0) static void __init *early_alloc(unsigned long sz) { void *ptr = __va(memblock_alloc(sz, sz)); memset(ptr, 0, sz); return ptr; } static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | prot); } BUG_ON(pmd_bad(*pmd)); return pte_offset_kernel(pmd, addr); } static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type) { pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long phys, const struct mem_type *type) { pmd_t *pmd = pmd_offset(pgd, addr); /* * Try a section mapping - end, addr and phys must all be aligned * to a section boundary. Note that PMDs refer to the individual * L1 entries, whereas PGDs refer to a group of L1 entries making * up one logical pointer to an L2 table. */ if (((addr | end | phys) & ~SECTION_MASK) == 0) { pmd_t *p = pmd; if (addr & SECTION_SIZE) pmd++; do { *pmd = __pmd(phys | type->prot_sect); phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); flush_pmd_entry(p); } else { /* * No need to loop; pte's aren't interested in the * individual L1 entries. */ alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); } } static void __init create_36bit_mapping(struct map_desc *md, const struct mem_type *type) { unsigned long phys, addr, length, end; pgd_t *pgd; addr = md->virtual; phys = (unsigned long)__pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length); if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { printk(KERN_ERR "MM: CPU does not support supersection " "mapping for 0x%08llx at 0x%08lx\n", __pfn_to_phys((u64)md->pfn), addr); return; } /* N.B. ARMv6 supersections are only defined to work with domain 0. * Since domain assignments can in fact be arbitrary, the * 'domain == 0' check below is required to insure that ARMv6 * supersections are only allocated for domain 0 regardless * of the actual domain assignments in use. */ if (type->domain) { printk(KERN_ERR "MM: invalid domain in supersection " "mapping for 0x%08llx at 0x%08lx\n", __pfn_to_phys((u64)md->pfn), addr); return; } if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { printk(KERN_ERR "MM: cannot create mapping for " "0x%08llx at 0x%08lx invalid alignment\n", __pfn_to_phys((u64)md->pfn), addr); return; } /* * Shift bits [35:32] of address into bits [23:20] of PMD * (See ARMv6 spec). */ phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); pgd = pgd_offset_k(addr); end = addr + length; do { pmd_t *pmd = pmd_offset(pgd, addr); int i; for (i = 0; i < 16; i++) *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); addr += SUPERSECTION_SIZE; phys += SUPERSECTION_SIZE; pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; } while (addr != end); } /* * Create the page directory entries and any necessary * page tables for the mapping specified by `md'. We * are able to cope here with varying sizes and address * offsets, and we take full advantage of sections and * supersections. */ static void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; pgd_t *pgd; if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { printk(KERN_WARNING "BUG: not creating mapping for " "0x%08llx at 0x%08lx in user region\n", __pfn_to_phys((u64)md->pfn), md->virtual); return; } if ((md->type == MT_DEVICE || md->type == MT_ROM) && md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " "overlaps vmalloc space\n", __pfn_to_phys((u64)md->pfn), md->virtual); } type = &mem_types[md->type]; /* * Catch 36-bit addresses */ if (md->pfn >= 0x100000) { create_36bit_mapping(md, type); return; } addr = md->virtual & PAGE_MASK; phys = (unsigned long)__pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " "be mapped using pages, ignoring.\n", __pfn_to_phys(md->pfn), addr); return; } pgd = pgd_offset_k(addr); end = addr + length; do { unsigned long next = pgd_addr_end(addr, end); alloc_init_section(pgd, addr, next, phys, type); phys += next - addr; addr = next; } while (pgd++, addr != end); } /* * Create the architecture specific mappings */ void __init iotable_init(struct map_desc *io_desc, int nr) { int i; for (i = 0; i < nr; i++) create_mapping(io_desc + i); } static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ static int __init early_vmalloc(char *arg) { unsigned long vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; printk(KERN_WARNING "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); printk(KERN_WARNING "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); return 0; } early_param("vmalloc", early_vmalloc); phys_addr_t lowmem_end_addr; static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; lowmem_end_addr = __pa(vmalloc_min - 1) + 1; for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM if (__va(bank->start) > vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) highmem = 1; bank->highmem = highmem; /* * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ if (__va(bank->start) < vmalloc_min && bank->size > vmalloc_min - __va(bank->start)) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); meminfo.nr_banks++; i++; bank[1].size -= vmalloc_min - __va(bank->start); bank[1].start = __pa(vmalloc_min - 1) + 1; bank[1].highmem = highmem = 1; j++; } bank->size = vmalloc_min - __va(bank->start); } #else bank->highmem = highmem; /* * Check whether this memory bank would entirely overlap * the vmalloc area. */ if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) { printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " "(vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1); continue; } /* * Check whether this memory bank would partially overlap * the vmalloc area. */ if (__va(bank->start + bank->size) > vmalloc_min || __va(bank->start + bank->size) < __va(bank->start)) { unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " "to -%.8lx (vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1, bank->start + newsize - 1); bank->size = newsize; } #endif j++; } #ifdef CONFIG_HIGHMEM if (highmem) { const char *reason = NULL; if (cache_is_vipt_aliasing()) { /* * Interactions between kmap and other mappings * make highmem support with aliasing VIPT caches * rather difficult. */ reason = "with VIPT aliasing cache"; #ifdef CONFIG_SMP } else if (tlb_ops_need_broadcast()) { /* * kmap_high needs to occasionally flush TLB entries, * however, if the TLB entries need to be broadcast * we may deadlock: * kmap_high(irqs off)->flush_all_zero_pkmaps-> * flush_tlb_kernel_range->smp_call_function_many * (must not be called with irqs off) */ reason = "without hardware TLB ops broadcasting"; #endif } if (reason) { printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", reason); while (j > 0 && meminfo.bank[j - 1].highmem) j--; } } #endif meminfo.nr_banks = j; } static inline void prepare_page_table(void) { unsigned long addr; phys_addr_t end; /* * Clear out all the mappings below the kernel image. */ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; #endif for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); /* * Find the end of the first block of lowmem. This is complicated * when we use memblock. */ end = memblock.memory.region[0].base + memblock.memory.region[0].size; if (end >= lowmem_end_addr) end = lowmem_end_addr; /* * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ for (addr = __phys_to_virt(end); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } /* * Reserve the special regions of memory */ void __init arm_mm_memblock_reserve(void) { /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... */ memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif } /* * Set up device the mappings. Since we clear out the page tables for all * mappings above VMALLOC_END, we will remove any debug device mappings. * This means you have to be careful how you debug this function, or any * called function. This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. */ static void __init devicemaps_init(struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; void *vectors; /* * Allocate the vector page early. */ vectors = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); /* * Map the kernel if it is XIP. * It is always first in the modulearea. */ #ifdef CONFIG_XIP_KERNEL map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.virtual = MODULES_VADDR; map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; map.type = MT_ROM; create_mapping(&map); #endif /* * Map the cache flushing regions. */ #ifdef FLUSH_BASE map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); map.virtual = FLUSH_BASE; map.length = SZ_1M; map.type = MT_CACHECLEAN; create_mapping(&map); #endif #ifdef FLUSH_BASE_MINICACHE map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); map.virtual = FLUSH_BASE_MINICACHE; map.length = SZ_1M; map.type = MT_MINICLEAN; create_mapping(&map); #endif /* * Create a mapping for the machine vectors at the high-vectors * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; create_mapping(&map); if (!vectors_high()) { map.virtual = 0; map.type = MT_LOW_VECTORS; create_mapping(&map); } /* * Ask the machine support to map in the statically mapped devices. */ if (mdesc->map_io) mdesc->map_io(); /* * Finally flush the caches and tlb to ensure that we're in a * consistent state wrt the writebuffer. This also ensures that * any write-allocated cache lines in the vector page are written * back. After this point, we can start to touch devices again. */ local_flush_tlb_all(); flush_cache_all(); } static void __init kmap_init(void) { #ifdef CONFIG_HIGHMEM pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), PKMAP_BASE, _PAGE_KERNEL_TABLE); #endif } static void __init map_lowmem(void) { int i; /* Map all the lowmem memory banks. */ for (i = 0; i < memblock.memory.cnt; i++) { phys_addr_t start = memblock.memory.region[i].base; phys_addr_t end = start + memblock.memory.region[i].size; struct map_desc map; if (end >= lowmem_end_addr) end = lowmem_end_addr; if (start >= end) break; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY; create_mapping(&map); } } static int __init meminfo_cmp(const void *_a, const void *_b) { const struct membank *a = _a, *b = _b; long cmp = bank_pfn_start(a) - bank_pfn_start(b); return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; } /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); map_lowmem(); devicemaps_init(mdesc); kmap_init(); top_pmd = pmd_off_k(0xffff0000); /* allocate the zero page. */ zero_page = early_alloc(PAGE_SIZE); bootmem_init(); empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } /* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ void setup_mm_for_reboot(char mode) { unsigned long base_pmdval; pgd_t *pgd; int i; /* * We need to access to user-mode page tables here. For kernel threads * we don't have any user-mode mappings so we use the context that we * "borrowed". */ pgd = current->active_mm->pgd; base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) base_pmdval |= PMD_BIT4; for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; pmd_t *pmd; pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd[0] = __pmd(pmdval); pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); flush_pmd_entry(pmd); } local_flush_tlb_all(); }
gpl-2.0
ShanghaiTimes/Audacity2015
lib-src/portaudio-v19/examples/paex_saw.c
54
5015
/** @file paex_saw.c @ingroup examples_src @brief Play a simple (aliasing) sawtooth wave. @author Phil Burk http://www.softsynth.com */ /* * $Id: paex_saw.c 1752 2011-09-08 03:21:55Z philburk $ * * This program uses the PortAudio Portable Audio Library. * For more information see: http://www.portaudio.com * Copyright (c) 1999-2000 Ross Bencina and Phil Burk * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files * (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * The text above constitutes the entire PortAudio license; however, * the PortAudio community also makes the following non-binding requests: * * Any person wishing to distribute modifications to the Software is * requested to send the modifications to the original developer so that * they can be incorporated into the canonical version. It is also * requested that these non-binding requests be included along with the * license above. */ #include <stdio.h> #include <math.h> #include "portaudio.h" #define NUM_SECONDS (4) #define SAMPLE_RATE (44100) typedef struct { float left_phase; float right_phase; } paTestData; /* This routine will be called by the PortAudio engine when audio is needed. ** It may called at interrupt level on some machines so don't do anything ** that could mess up the system like calling malloc() or free(). */ static int patestCallback( const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void *userData ) { /* Cast data passed through stream to our structure. */ paTestData *data = (paTestData*)userData; float *out = (float*)outputBuffer; unsigned int i; (void) inputBuffer; /* Prevent unused variable warning. */ for( i=0; i<framesPerBuffer; i++ ) { *out++ = data->left_phase; /* left */ *out++ = data->right_phase; /* right */ /* Generate simple sawtooth phaser that ranges between -1.0 and 1.0. */ data->left_phase += 0.01f; /* When signal reaches top, drop back down. */ if( data->left_phase >= 1.0f ) data->left_phase -= 2.0f; /* higher pitch so we can distinguish left and right. */ data->right_phase += 0.03f; if( data->right_phase >= 1.0f ) data->right_phase -= 2.0f; } return 0; } /*******************************************************************/ static paTestData data; int main(void); int main(void) { PaStream *stream; PaError err; printf("PortAudio Test: output sawtooth wave.\n"); /* Initialize our data for use by callback. */ data.left_phase = data.right_phase = 0.0; /* Initialize library before making any other calls. */ err = Pa_Initialize(); if( err != paNoError ) goto error; /* Open an audio I/O stream. */ err = Pa_OpenDefaultStream( &stream, 0, /* no input channels */ 2, /* stereo output */ paFloat32, /* 32 bit floating point output */ SAMPLE_RATE, 256, /* frames per buffer */ patestCallback, &data ); if( err != paNoError ) goto error; err = Pa_StartStream( stream ); if( err != paNoError ) goto error; /* Sleep for several seconds. */ Pa_Sleep(NUM_SECONDS*1000); err = Pa_StopStream( stream ); if( err != paNoError ) goto error; err = Pa_CloseStream( stream ); if( err != paNoError ) goto error; Pa_Terminate(); printf("Test finished.\n"); return err; error: Pa_Terminate(); fprintf( stderr, "An error occured while using the portaudio stream\n" ); fprintf( stderr, "Error number: %d\n", err ); fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); return err; }
gpl-2.0
acuicultor/android_kernel_oneplus_msm8974
kernel/watchdog.c
54
17032
/* * Detect hard and soft lockups on a system * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * * Note: Most of this code is borrowed heavily from the original softlockup * detector, so thanks to Ingo for the initial implementation. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks * to those contributors as well. */ #define pr_fmt(fmt) "NMI watchdog: " fmt #include <linux/mm.h> #include <linux/cpu.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/lockdep.h> #include <linux/notifier.h> #include <linux/module.h> #include <linux/sysctl.h> #include <asm/irq_regs.h> #include <linux/perf_event.h> int watchdog_enabled = 1; int __read_mostly watchdog_thresh = 10; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, soft_watchdog_warn); #ifdef CONFIG_HARDLOCKUP_DETECTOR static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); #endif #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif /* boot commands */ /* * Should we panic when a soft-lockup or hard-lockup occurs: */ #ifdef CONFIG_HARDLOCKUP_DETECTOR static int hardlockup_panic = CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; static int __init hardlockup_panic_setup(char *str) { if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; else if (!strncmp(str, "nopanic", 7)) hardlockup_panic = 0; else if (!strncmp(str, "0", 1)) watchdog_enabled = 0; return 1; } __setup("nmi_watchdog=", hardlockup_panic_setup); #endif unsigned int __read_mostly softlockup_panic = CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; static int __init softlockup_panic_setup(char *str) { softlockup_panic = simple_strtoul(str, NULL, 0); return 1; } __setup("softlockup_panic=", softlockup_panic_setup); static int __init nowatchdog_setup(char *str) { watchdog_enabled = 0; return 1; } __setup("nowatchdog", nowatchdog_setup); /* deprecated */ static int __init nosoftlockup_setup(char *str) { watchdog_enabled = 0; return 1; } __setup("nosoftlockup", nosoftlockup_setup); /* */ /* * Hard-lockup warnings should be triggered after just a few seconds. Soft- * lockups can have false positives under extreme conditions. So we generally * want a higher threshold for soft lockups than for hard lockups. So we couple * the thresholds with a factor: we make the soft threshold twice the amount of * time the hard threshold is. */ static int get_softlockup_thresh(void) { return watchdog_thresh * 2; } /* * Returns seconds, approximately. We don't need nanosecond * resolution, and we don't need to waste time with a big divide when * 2^30ns == 1.074s. */ static unsigned long get_timestamp(int this_cpu) { return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } static u64 get_sample_period(void) { /* * convert watchdog_thresh from seconds to ns * the divide by 5 is to give hrtimer several chances (two * or three with the current relation between the soft * and hard thresholds) to increment before the * hardlockup detector generates a warning */ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ static void __touch_watchdog(void) { int this_cpu = raw_smp_processor_id(); __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); } void touch_softlockup_watchdog(void) { __this_cpu_write(watchdog_touch_ts, 0); } EXPORT_SYMBOL(touch_softlockup_watchdog); void touch_all_softlockup_watchdogs(void) { int cpu; /* * this is done lockless * do we care if a 0 races with a timestamp? * all it means is the softlock check starts one cycle later */ for_each_online_cpu(cpu) per_cpu(watchdog_touch_ts, cpu) = 0; } #ifdef CONFIG_HARDLOCKUP_DETECTOR void touch_nmi_watchdog(void) { /* * Using __raw here because some code paths have * preemption enabled. If preemption is enabled * then interrupts should be enabled too, in which * case we shouldn't have to worry about the watchdog * going off. */ __raw_get_cpu_var(watchdog_nmi_touch) = true; touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); #endif void touch_softlockup_watchdog_sync(void) { __raw_get_cpu_var(softlockup_touch_sync) = true; __raw_get_cpu_var(watchdog_touch_ts) = 0; } #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI /* watchdog detector functions */ static int is_hardlockup(void) { unsigned long hrint = __this_cpu_read(hrtimer_interrupts); if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) return 1; __this_cpu_write(hrtimer_interrupts_saved, hrint); return 0; } #endif #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU static int is_hardlockup_other_cpu(int cpu) { unsigned long hrint = per_cpu(hrtimer_interrupts, cpu); if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint) return 1; per_cpu(hrtimer_interrupts_saved, cpu) = hrint; return 0; } static void watchdog_check_hardlockup_other_cpu(void) { int cpu; /* * Test for hardlockups every 3 samples. The sample period is * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over * watchdog_thresh (over by 20%). */ if (__this_cpu_read(hrtimer_interrupts) % 3 != 0) return; /* check for a hardlockup on the next cpu */ cpu = cpumask_next(smp_processor_id(), cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(cpu_online_mask); if (cpu == smp_processor_id()) return; if (per_cpu(watchdog_nmi_touch, cpu) == true) { per_cpu(watchdog_nmi_touch, cpu) = false; return; } if (is_hardlockup_other_cpu(cpu)) { /* only warn once */ if (per_cpu(hard_watchdog_warn, cpu) == true) return; if (hardlockup_panic) panic("Watchdog detected hard LOCKUP on cpu %d", cpu); else WARN(1, "Watchdog detected hard LOCKUP on cpu %d", cpu); per_cpu(hard_watchdog_warn, cpu) = true; } else { per_cpu(hard_watchdog_warn, cpu) = false; } } #else static inline void watchdog_check_hardlockup_other_cpu(void) { return; } #endif static int is_softlockup(unsigned long touch_ts) { unsigned long now = get_timestamp(smp_processor_id()); /* Warn about unreasonable delays: */ if (time_after(now, touch_ts + get_softlockup_thresh())) return now - touch_ts; return 0; } #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, .size = sizeof(struct perf_event_attr), .pinned = 1, .disabled = 1, }; /* Callback function for perf event subsystem */ static void watchdog_overflow_callback(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; } /* check for a hardlockup * This is done by making sure our timer interrupt * is incrementing. The timer interrupt should have * fired multiple times before we overflow'd. If it hasn't * then this is a good indication the cpu is stuck */ if (is_hardlockup()) { int this_cpu = smp_processor_id(); /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) return; if (hardlockup_panic) panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); else WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); __this_cpu_write(hard_watchdog_warn, true); return; } __this_cpu_write(hard_watchdog_warn, false); return; } #endif #ifdef CONFIG_HARDLOCKUP_DETECTOR static void watchdog_interrupt_count(void) { __this_cpu_inc(hrtimer_interrupts); } #else static inline void watchdog_interrupt_count(void) { return; } #endif /* CONFIG_HARDLOCKUP_DETECTOR */ /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); struct pt_regs *regs = get_irq_regs(); int duration; /* kick the hardlockup detector */ watchdog_interrupt_count(); /* test for hardlockups on the next cpu */ watchdog_check_hardlockup_other_cpu(); /* kick the softlockup detector */ wake_up_process(__this_cpu_read(softlockup_watchdog)); /* .. and repeat */ hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); if (touch_ts == 0) { if (unlikely(__this_cpu_read(softlockup_touch_sync))) { /* * If the time stamp was touched atomically * make sure the scheduler tick is up to date. */ __this_cpu_write(softlockup_touch_sync, false); sched_clock_tick(); } __touch_watchdog(); return HRTIMER_RESTART; } /* check for a softlockup * This is done by making sure a high priority task is * being scheduled. The task touches the watchdog to * indicate it is getting cpu time. If it hasn't then * this is a good indication some task is hogging the cpu */ duration = is_softlockup(touch_ts); if (unlikely(duration)) { /* only warn once */ if (__this_cpu_read(soft_watchdog_warn) == true) return HRTIMER_RESTART; printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", smp_processor_id(), duration, current->comm, task_pid_nr(current)); print_modules(); print_irqtrace_events(current); if (regs) show_regs(regs); else dump_stack(); if (softlockup_panic) panic("softlockup: hung tasks"); __this_cpu_write(soft_watchdog_warn, true); } else __this_cpu_write(soft_watchdog_warn, false); return HRTIMER_RESTART; } /* * The watchdog thread - touches the timestamp. */ static int watchdog(void *unused) { struct sched_param param = { .sched_priority = 0 }; struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); /* initialize timestamp */ __touch_watchdog(); /* kick off the timer for the hardlockup detector */ /* done here because hrtimer_start can only pin to smp_processor_id() */ hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), HRTIMER_MODE_REL_PINNED); set_current_state(TASK_INTERRUPTIBLE); /* * Run briefly (kicked by the hrtimer callback function) once every * get_sample_period() seconds (4 seconds by default) to reset the * softlockup timestamp. If this gets delayed for more than * 2*watchdog_thresh seconds then the debug-printout triggers in * watchdog_timer_fn(). */ while (!kthread_should_stop()) { __touch_watchdog(); schedule(); if (kthread_should_stop()) break; set_current_state(TASK_INTERRUPTIBLE); } /* * Drop the policy/priority elevation during thread exit to avoid a * scheduling latency spike. */ __set_current_state(TASK_RUNNING); sched_setscheduler(current, SCHED_NORMAL, &param); return 0; } #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI static int watchdog_nmi_enable(int cpu) { struct perf_event_attr *wd_attr; struct perf_event *event = per_cpu(watchdog_ev, cpu); /* is it already setup and enabled? */ if (event && event->state > PERF_EVENT_STATE_OFF) goto out; /* it is setup but not enabled */ if (event != NULL) goto out_enable; wd_attr = &wd_hw_attr; wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); /* Try to register using hardware perf events */ event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); if (!IS_ERR(event)) { pr_info("enabled, takes one hw-pmu counter.\n"); goto out_save; } /* vary the KERN level based on the returned errno */ if (PTR_ERR(event) == -EOPNOTSUPP) pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); else if (PTR_ERR(event) == -ENOENT) pr_warning("disabled (cpu%i): hardware events not enabled\n", cpu); else pr_err("disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); return PTR_ERR(event); /* success path */ out_save: per_cpu(watchdog_ev, cpu) = event; out_enable: perf_event_enable(per_cpu(watchdog_ev, cpu)); out: return 0; } static void watchdog_nmi_disable(int cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); if (event) { perf_event_disable(event); per_cpu(watchdog_ev, cpu) = NULL; /* should be in cleanup, but blocks oprofile */ perf_event_release_kernel(event); } return; } #else static int watchdog_nmi_enable(int cpu) { return 0; } static void watchdog_nmi_disable(int cpu) { return; } #endif /* CONFIG_HARDLOCKUP_DETECTOR */ /* prepare/enable/disable routines */ static void watchdog_prepare_cpu(int cpu) { struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); WARN_ON(per_cpu(softlockup_watchdog, cpu)); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU /* * The new cpu will be marked online before the first hrtimer interrupt * runs on it. If another cpu tests for a hardlockup on the new cpu * before it has run its first hrtimer, it will get a false positive. * Touch the watchdog on the new cpu to delay the first check for at * least 3 sampling periods to guarantee one hrtimer has run on the new * cpu. */ per_cpu(watchdog_nmi_touch, cpu) = true; #endif } static int watchdog_enable(int cpu) { struct task_struct *p = per_cpu(softlockup_watchdog, cpu); int err = 0; /* enable the perf event */ err = watchdog_nmi_enable(cpu); /* Regardless of err above, fall through and start softlockup */ /* create the watchdog thread */ if (!p) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu); if (IS_ERR(p)) { pr_err("softlockup watchdog for %i failed\n", cpu); if (!err) { /* if hardlockup hasn't already set this */ err = PTR_ERR(p); /* and disable the perf event */ watchdog_nmi_disable(cpu); } goto out; } sched_setscheduler(p, SCHED_FIFO, &param); kthread_bind(p, cpu); per_cpu(watchdog_touch_ts, cpu) = 0; per_cpu(softlockup_watchdog, cpu) = p; wake_up_process(p); } out: return err; } static void watchdog_disable(int cpu) { struct task_struct *p = per_cpu(softlockup_watchdog, cpu); struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); /* * cancel the timer first to stop incrementing the stats * and waking up the kthread */ hrtimer_cancel(hrtimer); /* disable the perf event */ watchdog_nmi_disable(cpu); /* stop the watchdog thread */ if (p) { per_cpu(softlockup_watchdog, cpu) = NULL; kthread_stop(p); } } /* sysctl functions */ #ifdef CONFIG_SYSCTL static void watchdog_enable_all_cpus(void) { int cpu; watchdog_enabled = 0; for_each_online_cpu(cpu) if (!watchdog_enable(cpu)) /* if any cpu succeeds, watchdog is considered enabled for the system */ watchdog_enabled = 1; if (!watchdog_enabled) pr_err("failed to be enabled on some cpus\n"); } static void watchdog_disable_all_cpus(void) { int cpu; for_each_online_cpu(cpu) watchdog_disable(cpu); /* if all watchdogs are disabled, then they are disabled for the system */ watchdog_enabled = 0; } /* * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh */ int proc_dowatchdog(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write) goto out; if (watchdog_enabled && watchdog_thresh) watchdog_enable_all_cpus(); else watchdog_disable_all_cpus(); out: return ret; } #endif /* CONFIG_SYSCTL */ /* * Create/destroy watchdog threads as CPUs come and go: */ static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: watchdog_prepare_cpu(hotcpu); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: if (watchdog_enabled) watchdog_enable(hotcpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: watchdog_disable(hotcpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: watchdog_disable(hotcpu); break; #endif /* CONFIG_HOTPLUG_CPU */ } /* * hardlockup and softlockup are not important enough * to block cpu bring up. Just always succeed and * rely on printk output to flag problems. */ return NOTIFY_OK; } static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; void __init lockup_detector_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); WARN_ON(notifier_to_errno(err)); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); return; }
gpl-2.0
serj/kernel_graph_2.6.32_rhel
arch/sh/kernel/cpu/sh4a/clock-sh7343.c
566
6574
/* * arch/sh/kernel/cpu/sh4a/clock-sh7343.c * * SH7343 clock framework support * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <asm/clock.h> /* SH7343 registers */ #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define PLLCR 0xa4150024 #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .name = "rclk", .id = -1, .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ struct clk extal_clk = { .name = "extal", .id = -1, .rate = 33333333, }; /* The dll block multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long dll_recalc(struct clk *clk) { unsigned long mult; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(DLLFRQ); else mult = 0; return clk->parent->rate * mult; } static struct clk_ops dll_clk_ops = { .recalc = dll_recalc, }; static struct clk dll_clk = { .name = "dll_clk", .id = -1, .ops = &dll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); return clk->parent->rate * mult; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .name = "pll_clk", .id = -1, .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; struct clk *main_clks[] = { &r_clk, &extal_clk, &dll_clk, &pll_clk, }; static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; static struct clk_div_mult_table div4_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), .multipliers = multipliers, .nr_multipliers = ARRAY_SIZE(multipliers), }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_SIUA, DIV4_SIUB, DIV4_NR }; #define DIV4(_str, _reg, _bit, _mask, _flags) \ SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0), [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0), [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0), }; struct clk div6_clks[] = { SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), }; #define MSTP(_str, _parent, _reg, _bit, _flags) \ SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags) static struct clk mstp_clks[] = { MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), MSTP("intc3", &div4_clks[DIV4_P], MSTPCR0, 23, 0), MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 0), MSTP("dmac0", &div4_clks[DIV4_P], MSTPCR0, 21, 0), MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0), MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0), MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0), MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0), MSTP("cmt0", &r_clk, MSTPCR0, 14, 0), MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0), MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0), MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0), MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0), MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0), MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0), MSTP("scif3", &div4_clks[DIV4_P], MSTPCR0, 4, 0), MSTP("sio0", &div4_clks[DIV4_P], MSTPCR0, 3, 0), MSTP("siof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0), MSTP("siof1", &div4_clks[DIV4_P], MSTPCR0, 1, 0), MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0), MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0), MSTP("tpu0", &div4_clks[DIV4_P], MSTPCR2, 25, 0), MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0), MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0), MSTP("mmcif0", &div4_clks[DIV4_P], MSTPCR2, 17, 0), MSTP("sim0", &div4_clks[DIV4_P], MSTPCR2, 16, 0), MSTP("keysc0", &r_clk, MSTPCR2, 14, 0), MSTP("tsif0", &div4_clks[DIV4_P], MSTPCR2, 13, 0), MSTP("s3d40", &div4_clks[DIV4_P], MSTPCR2, 12, 0), MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0), MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0), MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0), MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0), MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0), MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or dll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; }
gpl-2.0
spica234/HP-TestBuild-Repo-upwords-Sr3R
drivers/staging/comedi/drivers/das1800.c
566
49823
/* comedi/drivers/das1800.c Driver for Keitley das1700/das1800 series boards Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: das1800 Description: Keithley Metrabyte DAS1800 (& compatibles) Author: Frank Mori Hess <fmhess@users.sourceforge.net> Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st), DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao), DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da), DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da), DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st), DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc), DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st), DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr), DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc), DAS-1802AO (das-1802ao) Status: works The waveform analog output on the 'ao' cards is not supported. If you need it, send me (Frank Hess) an email. Configuration options: [0] - I/O port base address [1] - IRQ (optional, required for timed or externally triggered conversions) [2] - DMA0 (optional, requires irq) [3] - DMA1 (optional, requires irq and dma0) */ /* This driver supports the following Keithley boards: das-1701st das-1701st-da das-1701ao das-1702st das-1702st-da das-1702hr das-1702hr-da das-1702ao das-1801st das-1801st-da das-1801hc das-1801ao das-1802st das-1802st-da das-1802hr das-1802hr-da das-1802hc das-1802ao Options: [0] - base io address [1] - irq (optional, required for timed or externally triggered conversions) [2] - dma0 (optional, requires irq) [3] - dma1 (optional, requires irq and dma0) irq can be omitted, although the cmd interface will not work without it. analog input cmd triggers supported: start_src: TRIG_NOW | TRIG_EXT scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT scan_end_src: TRIG_COUNT convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW) stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's 'burst mode' which limits the valid conversion time to 64 microseconds (convert_arg <= 64000). This limitation does not apply if scan_begin_src is TRIG_FOLLOW. NOTES: Only the DAS-1801ST has been tested by me. Unipolar and bipolar ranges cannot be mixed in the channel/gain list. TODO: Make it automatically allocate irq and dma channels if they are not specified Add support for analog out on 'ao' cards read insn for analog out */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #include <asm/dma.h> #include "8253.h" #include "comedi_fc.h" /* misc. defines */ #define DAS1800_SIZE 16 /* uses 16 io addresses */ #define FIFO_SIZE 1024 /* 1024 sample fifo */ #define TIMER_BASE 200 /* 5 Mhz master clock */ #define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */ #define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */ /* Registers for the das1800 */ #define DAS1800_FIFO 0x0 #define DAS1800_QRAM 0x0 #define DAS1800_DAC 0x0 #define DAS1800_SELECT 0x2 #define ADC 0x0 #define QRAM 0x1 #define DAC(a) (0x2 + a) #define DAS1800_DIGITAL 0x3 #define DAS1800_CONTROL_A 0x4 #define FFEN 0x1 #define CGEN 0x4 #define CGSL 0x8 #define TGEN 0x10 #define TGSL 0x20 #define ATEN 0x80 #define DAS1800_CONTROL_B 0x5 #define DMA_CH5 0x1 #define DMA_CH6 0x2 #define DMA_CH7 0x3 #define DMA_CH5_CH6 0x5 #define DMA_CH6_CH7 0x6 #define DMA_CH7_CH5 0x7 #define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */ #define DMA_DUAL 0x4 #define IRQ3 0x8 #define IRQ5 0x10 #define IRQ7 0x18 #define IRQ10 0x28 #define IRQ11 0x30 #define IRQ15 0x38 #define FIMD 0x40 #define DAS1800_CONTROL_C 0X6 #define IPCLK 0x1 #define XPCLK 0x3 #define BMDE 0x4 #define CMEN 0x8 #define UQEN 0x10 #define SD 0x40 #define UB 0x80 #define DAS1800_STATUS 0x7 /* bits that prevent interrupt status bits (and CVEN) from being cleared on write */ #define CLEAR_INTR_MASK (CVEN_MASK | 0x1f) #define INT 0x1 #define DMATC 0x2 #define CT0TC 0x8 #define OVF 0x10 #define FHF 0x20 #define FNE 0x40 #define CVEN_MASK 0x40 /* masks CVEN on write */ #define CVEN 0x80 #define DAS1800_BURST_LENGTH 0x8 #define DAS1800_BURST_RATE 0x9 #define DAS1800_QRAM_ADDRESS 0xa #define DAS1800_COUNTER 0xc #define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */ enum { das1701st, das1701st_da, das1702st, das1702st_da, das1702hr, das1702hr_da, das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da, das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao }; static int das1800_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das1800_detach(struct comedi_device *dev); static int das1800_probe(struct comedi_device *dev); static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t das1800_interrupt(int irq, void *d); static int das1800_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s); static void das1800_ai_handler(struct comedi_device *dev); static void das1800_handle_dma(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int status); static void das1800_flush_dma(struct comedi_device *dev, struct comedi_subdevice *s); static void das1800_flush_dma_channel(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int channel, uint16_t * buffer); static void das1800_handle_fifo_half_full(struct comedi_device *dev, struct comedi_subdevice *s); static void das1800_handle_fifo_not_empty(struct comedi_device *dev, struct comedi_subdevice *s); static int das1800_ai_do_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int das1800_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int das1800_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_set_frequency(struct comedi_device *dev); static unsigned int burst_convert_arg(unsigned int convert_arg, int round_mode); static unsigned int suggest_transfer_size(struct comedi_cmd *cmd); /* analog input ranges */ static const struct comedi_lrange range_ai_das1801 = { 8, { RANGE(-5, 5), RANGE(-1, 1), RANGE(-0.1, 0.1), RANGE(-0.02, 0.02), RANGE(0, 5), RANGE(0, 1), RANGE(0, 0.1), RANGE(0, 0.02), } }; static const struct comedi_lrange range_ai_das1802 = { 8, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1.25, 1.25), RANGE(0, 10), RANGE(0, 5), RANGE(0, 2.5), RANGE(0, 1.25), } }; struct das1800_board { const char *name; int ai_speed; /* max conversion period in nanoseconds */ int resolution; /* bits of ai resolution */ int qram_len; /* length of card's channel / gain queue */ int common; /* supports AREF_COMMON flag */ int do_n_chan; /* number of digital output channels */ int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */ int ao_n_chan; /* number of analog out channels */ const struct comedi_lrange *range_ai; /* available input ranges */ }; /* Warning: the maximum conversion speeds listed below are * not always achievable depending on board setup (see * user manual.) */ static const struct das1800_board das1800_boards[] = { { .name = "das-1701st", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1801, }, { .name = "das-1701st-da", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 4, .range_ai = &range_ai_das1801, }, { .name = "das-1702st", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1702st-da", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 4, .range_ai = &range_ai_das1802, }, { .name = "das-1702hr", .ai_speed = 20000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1702hr-da", .ai_speed = 20000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1701ao", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1801, }, { .name = "das-1702ao", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1801st", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1801, }, { .name = "das-1801st-da", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 4, .range_ai = &range_ai_das1801, }, { .name = "das-1802st", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1802st-da", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 4, .range_ai = &range_ai_das1802, }, { .name = "das-1802hr", .ai_speed = 10000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1802hr-da", .ai_speed = 10000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1801hc", .ai_speed = 3000, .resolution = 12, .qram_len = 64, .common = 0, .do_n_chan = 8, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1801, }, { .name = "das-1802hc", .ai_speed = 3000, .resolution = 12, .qram_len = 64, .common = 0, .do_n_chan = 8, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1801ao", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1801, }, { .name = "das-1802ao", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct das1800_board *)dev->board_ptr) struct das1800_private { volatile unsigned int count; /* number of data points left to be taken */ unsigned int divisor1; /* value to load into board's counter 1 for timed conversions */ unsigned int divisor2; /* value to load into board's counter 2 for timed conversions */ int do_bits; /* digital output bits */ int irq_dma_bits; /* bits for control register b */ /* dma bits for control register b, stored so that dma can be * turned on and off */ int dma_bits; unsigned int dma0; /* dma channels used */ unsigned int dma1; volatile unsigned int dma_current; /* dma channel currently in use */ uint16_t *ai_buf0; /* pointers to dma buffers */ uint16_t *ai_buf1; uint16_t *dma_current_buf; /* pointer to dma buffer currently being used */ unsigned int dma_transfer_size; /* size of transfer currently used, in bytes */ unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */ short ao_update_bits; /* remembers the last write to the 'update' dac */ }; #define devpriv ((struct das1800_private *)dev->private) /* analog out range for boards with basic analog out */ static const struct comedi_lrange range_ao_1 = { 1, { RANGE(-10, 10), } }; /* analog out range for 'ao' boards */ /* static const struct comedi_lrange range_ao_2 = { 2, { RANGE(-10, 10), RANGE(-5, 5), } }; */ static struct comedi_driver driver_das1800 = { .driver_name = "das1800", .module = THIS_MODULE, .attach = das1800_attach, .detach = das1800_detach, .num_names = ARRAY_SIZE(das1800_boards), .board_name = &das1800_boards[0].name, .offset = sizeof(struct das1800_board), }; /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ COMEDI_INITCLEANUP(driver_das1800); static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0, unsigned int dma1) { unsigned long flags; /* need an irq to do dma */ if (dev->irq && dma0) { /* encode dma0 and dma1 into 2 digit hexadecimal for switch */ switch ((dma0 & 0x7) | (dma1 << 4)) { case 0x5: /* dma0 == 5 */ devpriv->dma_bits |= DMA_CH5; break; case 0x6: /* dma0 == 6 */ devpriv->dma_bits |= DMA_CH6; break; case 0x7: /* dma0 == 7 */ devpriv->dma_bits |= DMA_CH7; break; case 0x65: /* dma0 == 5, dma1 == 6 */ devpriv->dma_bits |= DMA_CH5_CH6; break; case 0x76: /* dma0 == 6, dma1 == 7 */ devpriv->dma_bits |= DMA_CH6_CH7; break; case 0x57: /* dma0 == 7, dma1 == 5 */ devpriv->dma_bits |= DMA_CH7_CH5; break; default: printk(" only supports dma channels 5 through 7\n" " Dual dma only allows the following combinations:\n" " dma 5,6 / 6,7 / or 7,5\n"); return -EINVAL; break; } if (request_dma(dma0, driver_das1800.driver_name)) { printk(" failed to allocate dma channel %i\n", dma0); return -EINVAL; } devpriv->dma0 = dma0; devpriv->dma_current = dma0; if (dma1) { if (request_dma(dma1, driver_das1800.driver_name)) { printk(" failed to allocate dma channel %i\n", dma1); return -EINVAL; } devpriv->dma1 = dma1; } devpriv->ai_buf0 = kmalloc(DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (devpriv->ai_buf0 == NULL) return -ENOMEM; devpriv->dma_current_buf = devpriv->ai_buf0; if (dma1) { devpriv->ai_buf1 = kmalloc(DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (devpriv->ai_buf1 == NULL) return -ENOMEM; } flags = claim_dma_lock(); disable_dma(devpriv->dma0); set_dma_mode(devpriv->dma0, DMA_MODE_READ); if (dma1) { disable_dma(devpriv->dma1); set_dma_mode(devpriv->dma1, DMA_MODE_READ); } release_dma_lock(flags); } return 0; } static int das1800_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = it->options[0]; unsigned int irq = it->options[1]; unsigned int dma0 = it->options[2]; unsigned int dma1 = it->options[3]; unsigned long iobase2; int board; int retval; /* allocate and initialize dev->private */ if (alloc_private(dev, sizeof(struct das1800_private)) < 0) return -ENOMEM; printk("comedi%d: %s: io 0x%lx", dev->minor, driver_das1800.driver_name, iobase); if (irq) { printk(", irq %u", irq); if (dma0) { printk(", dma %u", dma0); if (dma1) printk(" and %u", dma1); } } printk("\n"); if (iobase == 0) { printk(" io base address required\n"); return -EINVAL; } /* check if io addresses are available */ if (!request_region(iobase, DAS1800_SIZE, driver_das1800.driver_name)) { printk (" I/O port conflict: failed to allocate ports 0x%lx to 0x%lx\n", iobase, iobase + DAS1800_SIZE - 1); return -EIO; } dev->iobase = iobase; board = das1800_probe(dev); if (board < 0) { printk(" unable to determine board type\n"); return -ENODEV; } dev->board_ptr = das1800_boards + board; dev->board_name = thisboard->name; /* if it is an 'ao' board with fancy analog out then we need extra io ports */ if (thisboard->ao_ability == 2) { iobase2 = iobase + IOBASE2; if (!request_region(iobase2, DAS1800_SIZE, driver_das1800.driver_name)) { printk (" I/O port conflict: failed to allocate ports 0x%lx to 0x%lx\n", iobase2, iobase2 + DAS1800_SIZE - 1); return -EIO; } devpriv->iobase2 = iobase2; } /* grab our IRQ */ if (irq) { if (request_irq(irq, das1800_interrupt, 0, driver_das1800.driver_name, dev)) { printk(" unable to allocate irq %u\n", irq); return -EINVAL; } } dev->irq = irq; /* set bits that tell card which irq to use */ switch (irq) { case 0: break; case 3: devpriv->irq_dma_bits |= 0x8; break; case 5: devpriv->irq_dma_bits |= 0x10; break; case 7: devpriv->irq_dma_bits |= 0x18; break; case 10: devpriv->irq_dma_bits |= 0x28; break; case 11: devpriv->irq_dma_bits |= 0x30; break; case 15: devpriv->irq_dma_bits |= 0x38; break; default: printk(" irq out of range\n"); return -EINVAL; break; } retval = das1800_init_dma(dev, dma0, dma1); if (retval < 0) return retval; if (devpriv->ai_buf0 == NULL) { devpriv->ai_buf0 = kmalloc(FIFO_SIZE * sizeof(uint16_t), GFP_KERNEL); if (devpriv->ai_buf0 == NULL) return -ENOMEM; } if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; /* analog input subdevice */ s = dev->subdevices + 0; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND | SDF_CMD_READ; if (thisboard->common) s->subdev_flags |= SDF_COMMON; s->n_chan = thisboard->qram_len; s->len_chanlist = thisboard->qram_len; s->maxdata = (1 << thisboard->resolution) - 1; s->range_table = thisboard->range_ai; s->do_cmd = das1800_ai_do_cmd; s->do_cmdtest = das1800_ai_do_cmdtest; s->insn_read = das1800_ai_rinsn; s->poll = das1800_ai_poll; s->cancel = das1800_cancel; /* analog out */ s = dev->subdevices + 1; if (thisboard->ao_ability == 1) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->ao_n_chan; s->maxdata = (1 << thisboard->resolution) - 1; s->range_table = &range_ao_1; s->insn_write = das1800_ao_winsn; } else { s->type = COMEDI_SUBD_UNUSED; } /* di */ s = dev->subdevices + 2; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das1800_di_rbits; /* do */ s = dev->subdevices + 3; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = thisboard->do_n_chan; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das1800_do_wbits; das1800_cancel(dev, dev->read_subdev); /* initialize digital out channels */ outb(devpriv->do_bits, dev->iobase + DAS1800_DIGITAL); /* initialize analog out channels */ if (thisboard->ao_ability == 1) { /* select 'update' dac channel for baseAddress + 0x0 */ outb(DAC(thisboard->ao_n_chan - 1), dev->iobase + DAS1800_SELECT); outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC); } return 0; }; static int das1800_detach(struct comedi_device *dev) { /* only free stuff if it has been allocated by _attach */ if (dev->iobase) release_region(dev->iobase, DAS1800_SIZE); if (dev->irq) free_irq(dev->irq, dev); if (dev->private) { if (devpriv->iobase2) release_region(devpriv->iobase2, DAS1800_SIZE); if (devpriv->dma0) free_dma(devpriv->dma0); if (devpriv->dma1) free_dma(devpriv->dma1); if (devpriv->ai_buf0) kfree(devpriv->ai_buf0); if (devpriv->ai_buf1) kfree(devpriv->ai_buf1); } printk("comedi%d: %s: remove\n", dev->minor, driver_das1800.driver_name); return 0; }; /* probes and checks das-1800 series board type */ static int das1800_probe(struct comedi_device *dev) { int id; int board; id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf; /* get id bits */ board = ((struct das1800_board *)dev->board_ptr) - das1800_boards; switch (id) { case 0x3: if (board == das1801st_da || board == das1802st_da || board == das1701st_da || board == das1702st_da) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800st-da series\n"); return das1801st; break; case 0x4: if (board == das1802hr_da || board == das1702hr_da) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1802hr-da\n"); return das1802hr; break; case 0x5: if (board == das1801ao || board == das1802ao || board == das1701ao || board == das1702ao) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800ao series\n"); return das1801ao; break; case 0x6: if (board == das1802hr || board == das1702hr) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk(" Board model (probed, not recommended): das-1802hr\n"); return das1802hr; break; case 0x7: if (board == das1801st || board == das1802st || board == das1701st || board == das1702st) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800st series\n"); return das1801st; break; case 0x8: if (board == das1801hc || board == das1802hc) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800hc series\n"); return das1801hc; break; default: printk (" Board model: probe returned 0x%x (unknown, please report)\n", id); return board; break; } return -1; } static int das1800_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; /* prevent race with interrupt handler */ spin_lock_irqsave(&dev->spinlock, flags); das1800_ai_handler(dev); spin_unlock_irqrestore(&dev->spinlock, flags); return s->async->buf_write_count - s->async->buf_read_count; } static irqreturn_t das1800_interrupt(int irq, void *d) { struct comedi_device *dev = d; unsigned int status; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* Prevent race with das1800_ai_poll() on multi processor systems. * Also protects indirect addressing in das1800_ai_handler */ spin_lock(&dev->spinlock); status = inb(dev->iobase + DAS1800_STATUS); /* if interrupt was not caused by das-1800 */ if (!(status & INT)) { spin_unlock(&dev->spinlock); return IRQ_NONE; } /* clear the interrupt status bit INT */ outb(CLEAR_INTR_MASK & ~INT, dev->iobase + DAS1800_STATUS); /* handle interrupt */ das1800_ai_handler(dev); spin_unlock(&dev->spinlock); return IRQ_HANDLED; } /* the guts of the interrupt handler, that is shared with das1800_ai_poll */ static void das1800_ai_handler(struct comedi_device *dev) { struct comedi_subdevice *s = dev->subdevices + 0; /* analog input subdevice */ struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int status = inb(dev->iobase + DAS1800_STATUS); async->events = 0; /* select adc for base address + 0 */ outb(ADC, dev->iobase + DAS1800_SELECT); /* dma buffer full */ if (devpriv->irq_dma_bits & DMA_ENABLED) { /* look for data from dma transfer even if dma terminal count hasn't happened yet */ das1800_handle_dma(dev, s, status); } else if (status & FHF) { /* if fifo half full */ das1800_handle_fifo_half_full(dev, s); } else if (status & FNE) { /* if fifo not empty */ das1800_handle_fifo_not_empty(dev, s); } async->events |= COMEDI_CB_BLOCK; /* if the card's fifo has overflowed */ if (status & OVF) { /* clear OVF interrupt bit */ outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS); comedi_error(dev, "DAS1800 FIFO overflow"); das1800_cancel(dev, s); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; comedi_event(dev, s); return; } /* stop taking data if appropriate */ /* stop_src TRIG_EXT */ if (status & CT0TC) { /* clear CT0TC interrupt bit */ outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS); /* make sure we get all remaining data from board before quitting */ if (devpriv->irq_dma_bits & DMA_ENABLED) das1800_flush_dma(dev, s); else das1800_handle_fifo_not_empty(dev, s); das1800_cancel(dev, s); /* disable hardware conversions */ async->events |= COMEDI_CB_EOA; } else if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) { /* stop_src TRIG_COUNT */ das1800_cancel(dev, s); /* disable hardware conversions */ async->events |= COMEDI_CB_EOA; } comedi_event(dev, s); return; } static void das1800_handle_dma(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int status) { unsigned long flags; const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL; flags = claim_dma_lock(); das1800_flush_dma_channel(dev, s, devpriv->dma_current, devpriv->dma_current_buf); /* re-enable dma channel */ set_dma_addr(devpriv->dma_current, virt_to_bus(devpriv->dma_current_buf)); set_dma_count(devpriv->dma_current, devpriv->dma_transfer_size); enable_dma(devpriv->dma_current); release_dma_lock(flags); if (status & DMATC) { /* clear DMATC interrupt bit */ outb(CLEAR_INTR_MASK & ~DMATC, dev->iobase + DAS1800_STATUS); /* switch dma channels for next time, if appropriate */ if (dual_dma) { /* read data from the other channel next time */ if (devpriv->dma_current == devpriv->dma0) { devpriv->dma_current = devpriv->dma1; devpriv->dma_current_buf = devpriv->ai_buf1; } else { devpriv->dma_current = devpriv->dma0; devpriv->dma_current_buf = devpriv->ai_buf0; } } } return; } static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev, uint16_t sample) { sample += 1 << (thisboard->resolution - 1); return sample; } static void munge_data(struct comedi_device *dev, uint16_t * array, unsigned int num_elements) { unsigned int i; int unipolar; /* see if card is using a unipolar or bipolar range so we can munge data correctly */ unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB; /* convert to unsigned type if we are in a bipolar mode */ if (!unipolar) { for (i = 0; i < num_elements; i++) { array[i] = munge_bipolar_sample(dev, array[i]); } } } /* Utility function used by das1800_flush_dma() and das1800_handle_dma(). * Assumes dma lock is held */ static void das1800_flush_dma_channel(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int channel, uint16_t * buffer) { unsigned int num_bytes, num_samples; struct comedi_cmd *cmd = &s->async->cmd; disable_dma(channel); /* clear flip-flop to make sure 2-byte registers * get set correctly */ clear_dma_ff(channel); /* figure out how many points to read */ num_bytes = devpriv->dma_transfer_size - get_dma_residue(channel); num_samples = num_bytes / sizeof(short); /* if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_samples) num_samples = devpriv->count; munge_data(dev, buffer, num_samples); cfc_write_array_to_buffer(s, buffer, num_bytes); if (s->async->cmd.stop_src == TRIG_COUNT) devpriv->count -= num_samples; return; } /* flushes remaining data from board when external trigger has stopped aquisition * and we are using dma transfers */ static void das1800_flush_dma(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL; flags = claim_dma_lock(); das1800_flush_dma_channel(dev, s, devpriv->dma_current, devpriv->dma_current_buf); if (dual_dma) { /* switch to other channel and flush it */ if (devpriv->dma_current == devpriv->dma0) { devpriv->dma_current = devpriv->dma1; devpriv->dma_current_buf = devpriv->ai_buf1; } else { devpriv->dma_current = devpriv->dma0; devpriv->dma_current_buf = devpriv->ai_buf0; } das1800_flush_dma_channel(dev, s, devpriv->dma_current, devpriv->dma_current_buf); } release_dma_lock(flags); /* get any remaining samples in fifo */ das1800_handle_fifo_not_empty(dev, s); return; } static void das1800_handle_fifo_half_full(struct comedi_device *dev, struct comedi_subdevice *s) { int numPoints = 0; /* number of points to read */ struct comedi_cmd *cmd = &s->async->cmd; numPoints = FIFO_SIZE / 2; /* if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT && devpriv->count < numPoints) numPoints = devpriv->count; insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, numPoints); munge_data(dev, devpriv->ai_buf0, numPoints); cfc_write_array_to_buffer(s, devpriv->ai_buf0, numPoints * sizeof(devpriv->ai_buf0[0])); if (cmd->stop_src == TRIG_COUNT) devpriv->count -= numPoints; return; } static void das1800_handle_fifo_not_empty(struct comedi_device *dev, struct comedi_subdevice *s) { short dpnt; int unipolar; struct comedi_cmd *cmd = &s->async->cmd; unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB; while (inb(dev->iobase + DAS1800_STATUS) & FNE) { if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) break; dpnt = inw(dev->iobase + DAS1800_FIFO); /* convert to unsigned type if we are in a bipolar mode */ if (!unipolar) ; dpnt = munge_bipolar_sample(dev, dpnt); cfc_write_to_buffer(s, dpnt); if (cmd->stop_src == TRIG_COUNT) devpriv->count--; } return; } static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */ outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */ outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */ if (devpriv->dma0) disable_dma(devpriv->dma0); if (devpriv->dma1) disable_dma(devpriv->dma1); return 0; } /* test analog input cmd */ static int das1800_ai_do_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int tmp_arg; int i; int unipolar; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_EXT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* uniqueness check */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; /* compatibility check */ if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_TIMER) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } switch (cmd->stop_src) { case TRIG_COUNT: if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; default: break; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { /* if we are not in burst mode */ if (cmd->scan_begin_src == TRIG_FOLLOW) { tmp_arg = cmd->convert_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd-> flags & TRIG_ROUND_MASK); if (tmp_arg != cmd->convert_arg) err++; } /* if we are in burst mode */ else { /* check that convert_arg is compatible */ tmp_arg = cmd->convert_arg; cmd->convert_arg = burst_convert_arg(cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp_arg != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER) { /* if scans are timed faster than conversion rate allows */ if (cmd->convert_arg * cmd->chanlist_len > cmd->scan_begin_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; err++; } tmp_arg = cmd->scan_begin_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv-> divisor1), &(devpriv-> divisor2), &(cmd-> scan_begin_arg), cmd-> flags & TRIG_ROUND_MASK); if (tmp_arg != cmd->scan_begin_arg) err++; } } } if (err) return 4; /* make sure user is not trying to mix unipolar and bipolar ranges */ if (cmd->chanlist) { unipolar = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR; for (i = 1; i < cmd->chanlist_len; i++) { if (unipolar != (CR_RANGE(cmd->chanlist[i]) & UNIPOLAR)) { comedi_error(dev, "unipolar and bipolar ranges cannot be mixed in the chanlist"); err++; break; } } } if (err) return 5; return 0; } /* analog input cmd interface */ /* first, some utility functions used in the main ai_do_cmd() */ /* returns appropriate bits for control register a, depending on command */ static int control_a_bits(struct comedi_cmd cmd) { int control_a; control_a = FFEN; /* enable fifo */ if (cmd.stop_src == TRIG_EXT) { control_a |= ATEN; } switch (cmd.start_src) { case TRIG_EXT: control_a |= TGEN | CGSL; break; case TRIG_NOW: control_a |= CGEN; break; default: break; } return control_a; } /* returns appropriate bits for control register c, depending on command */ static int control_c_bits(struct comedi_cmd cmd) { int control_c; int aref; /* set clock source to internal or external, select analog reference, * select unipolar / bipolar */ aref = CR_AREF(cmd.chanlist[0]); control_c = UQEN; /* enable upper qram addresses */ if (aref != AREF_DIFF) control_c |= SD; if (aref == AREF_COMMON) control_c |= CMEN; /* if a unipolar range was selected */ if (CR_RANGE(cmd.chanlist[0]) & UNIPOLAR) control_c |= UB; switch (cmd.scan_begin_src) { case TRIG_FOLLOW: /* not in burst mode */ switch (cmd.convert_src) { case TRIG_TIMER: /* trig on cascaded counters */ control_c |= IPCLK; break; case TRIG_EXT: /* trig on falling edge of external trigger */ control_c |= XPCLK; break; default: break; } break; case TRIG_TIMER: /* burst mode with internal pacer clock */ control_c |= BMDE | IPCLK; break; case TRIG_EXT: /* burst mode with external trigger */ control_c |= BMDE | XPCLK; break; default: break; } return control_c; } /* sets up counters */ static int setup_counters(struct comedi_device *dev, struct comedi_cmd cmd) { /* setup cascaded counters for conversion/scan frequency */ switch (cmd.scan_begin_src) { case TRIG_FOLLOW: /* not in burst mode */ if (cmd.convert_src == TRIG_TIMER) { /* set conversion frequency */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd.convert_arg), cmd. flags & TRIG_ROUND_MASK); if (das1800_set_frequency(dev) < 0) { return -1; } } break; case TRIG_TIMER: /* in burst mode */ /* set scan frequency */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd.scan_begin_arg), cmd.flags & TRIG_ROUND_MASK); if (das1800_set_frequency(dev) < 0) { return -1; } break; default: break; } /* setup counter 0 for 'about triggering' */ if (cmd.stop_src == TRIG_EXT) { /* load counter 0 in mode 0 */ i8254_load(dev->iobase + DAS1800_COUNTER, 0, 0, 1, 0); } return 0; } /* sets up dma */ static void setup_dma(struct comedi_device *dev, struct comedi_cmd cmd) { unsigned long lock_flags; const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL; if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0) return; /* determine a reasonable dma transfer size */ devpriv->dma_transfer_size = suggest_transfer_size(&cmd); lock_flags = claim_dma_lock(); disable_dma(devpriv->dma0); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma0); set_dma_addr(devpriv->dma0, virt_to_bus(devpriv->ai_buf0)); /* set appropriate size of transfer */ set_dma_count(devpriv->dma0, devpriv->dma_transfer_size); devpriv->dma_current = devpriv->dma0; devpriv->dma_current_buf = devpriv->ai_buf0; enable_dma(devpriv->dma0); /* set up dual dma if appropriate */ if (dual_dma) { disable_dma(devpriv->dma1); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma1); set_dma_addr(devpriv->dma1, virt_to_bus(devpriv->ai_buf1)); /* set appropriate size of transfer */ set_dma_count(devpriv->dma1, devpriv->dma_transfer_size); enable_dma(devpriv->dma1); } release_dma_lock(lock_flags); return; } /* programs channel/gain list into card */ static void program_chanlist(struct comedi_device *dev, struct comedi_cmd cmd) { int i, n, chan_range; unsigned long irq_flags; const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */ const int range_bitshift = 8; n = cmd.chanlist_len; /* spinlock protects indirect addressing */ spin_lock_irqsave(&dev->spinlock, irq_flags); outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */ outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */ /* make channel / gain list */ for (i = 0; i < n; i++) { chan_range = CR_CHAN(cmd. chanlist[i]) | ((CR_RANGE(cmd.chanlist[i]) & range_mask) << range_bitshift); outw(chan_range, dev->iobase + DAS1800_QRAM); } outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */ spin_unlock_irqrestore(&dev->spinlock, irq_flags); return; } /* analog input do_cmd */ static int das1800_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int ret; int control_a, control_c; struct comedi_async *async = s->async; struct comedi_cmd cmd = async->cmd; if (!dev->irq) { comedi_error(dev, "no irq assigned for das-1800, cannot do hardware conversions"); return -1; } /* disable dma on TRIG_WAKE_EOS, or TRIG_RT * (because dma in handler is unsafe at hard real-time priority) */ if (cmd.flags & (TRIG_WAKE_EOS | TRIG_RT)) { devpriv->irq_dma_bits &= ~DMA_ENABLED; } else { devpriv->irq_dma_bits |= devpriv->dma_bits; } /* interrupt on end of conversion for TRIG_WAKE_EOS */ if (cmd.flags & TRIG_WAKE_EOS) { /* interrupt fifo not empty */ devpriv->irq_dma_bits &= ~FIMD; } else { /* interrupt fifo half full */ devpriv->irq_dma_bits |= FIMD; } /* determine how many conversions we need */ if (cmd.stop_src == TRIG_COUNT) { devpriv->count = cmd.stop_arg * cmd.chanlist_len; } das1800_cancel(dev, s); /* determine proper bits for control registers */ control_a = control_a_bits(cmd); control_c = control_c_bits(cmd); /* setup card and start */ program_chanlist(dev, cmd); ret = setup_counters(dev, cmd); if (ret < 0) { comedi_error(dev, "Error setting up counters"); return ret; } setup_dma(dev, cmd); outb(control_c, dev->iobase + DAS1800_CONTROL_C); /* set conversion rate and length for burst mode */ if (control_c & BMDE) { /* program conversion period with number of microseconds minus 1 */ outb(cmd.convert_arg / 1000 - 1, dev->iobase + DAS1800_BURST_RATE); outb(cmd.chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH); } outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */ outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */ outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */ return 0; } /* read analog input */ static int das1800_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int chan, range, aref, chan_range; int timeout = 1000; short dpnt; int conv_flags = 0; unsigned long irq_flags; /* set up analog reference and unipolar / bipolar mode */ aref = CR_AREF(insn->chanspec); conv_flags |= UQEN; if (aref != AREF_DIFF) conv_flags |= SD; if (aref == AREF_COMMON) conv_flags |= CMEN; /* if a unipolar range was selected */ if (CR_RANGE(insn->chanspec) & UNIPOLAR) conv_flags |= UB; outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */ outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */ outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */ outb(FFEN, dev->iobase + DAS1800_CONTROL_A); chan = CR_CHAN(insn->chanspec); /* mask of unipolar/bipolar bit from range */ range = CR_RANGE(insn->chanspec) & 0x3; chan_range = chan | (range << 8); spin_lock_irqsave(&dev->spinlock, irq_flags); outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */ outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */ outw(chan_range, dev->iobase + DAS1800_QRAM); outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */ outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ outb(0, dev->iobase + DAS1800_FIFO); for (i = 0; i < timeout; i++) { if (inb(dev->iobase + DAS1800_STATUS) & FNE) break; } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } dpnt = inw(dev->iobase + DAS1800_FIFO); /* shift data to offset binary for bipolar ranges */ if ((conv_flags & UB) == 0) dpnt += 1 << (thisboard->resolution - 1); data[n] = dpnt; } spin_unlock_irqrestore(&dev->spinlock, irq_flags); return n; } /* writes to an analog output channel */ static int das1800_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); /* int range = CR_RANGE(insn->chanspec); */ int update_chan = thisboard->ao_n_chan - 1; short output; unsigned long irq_flags; /* card expects two's complement data */ output = data[0] - (1 << (thisboard->resolution - 1)); /* if the write is to the 'update' channel, we need to remember its value */ if (chan == update_chan) devpriv->ao_update_bits = output; /* write to channel */ spin_lock_irqsave(&dev->spinlock, irq_flags); outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */ outw(output, dev->iobase + DAS1800_DAC); /* now we need to write to 'update' channel to update all dac channels */ if (chan != update_chan) { outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */ outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC); } spin_unlock_irqrestore(&dev->spinlock, irq_flags); return 1; } /* reads from digital input channels */ static int das1800_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf; data[0] = 0; return 2; } /* writes to digital output channels */ static int das1800_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int wbits; /* only set bits that have been masked */ data[0] &= (1 << s->n_chan) - 1; wbits = devpriv->do_bits; wbits &= ~data[0]; wbits |= data[0] & data[1]; devpriv->do_bits = wbits; outb(devpriv->do_bits, dev->iobase + DAS1800_DIGITAL); data[1] = devpriv->do_bits; return 2; } /* loads counters with divisor1, divisor2 from private structure */ static int das1800_set_frequency(struct comedi_device *dev) { int err = 0; /* counter 1, mode 2 */ if (i8254_load(dev->iobase + DAS1800_COUNTER, 0, 1, devpriv->divisor1, 2)) err++; /* counter 2, mode 2 */ if (i8254_load(dev->iobase + DAS1800_COUNTER, 0, 2, devpriv->divisor2, 2)) err++; if (err) return -1; return 0; } /* converts requested conversion timing to timing compatible with * hardware, used only when card is in 'burst mode' */ static unsigned int burst_convert_arg(unsigned int convert_arg, int round_mode) { unsigned int micro_sec; /* in burst mode, the maximum conversion time is 64 microseconds */ if (convert_arg > 64000) convert_arg = 64000; /* the conversion time must be an integral number of microseconds */ switch (round_mode) { case TRIG_ROUND_NEAREST: default: micro_sec = (convert_arg + 500) / 1000; break; case TRIG_ROUND_DOWN: micro_sec = convert_arg / 1000; break; case TRIG_ROUND_UP: micro_sec = (convert_arg - 1) / 1000 + 1; break; } /* return number of nanoseconds */ return micro_sec * 1000; } /* utility function that suggests a dma transfer size based on the conversion period 'ns' */ static unsigned int suggest_transfer_size(struct comedi_cmd *cmd) { unsigned int size = DMA_BUF_SIZE; static const int sample_size = 2; /* size in bytes of one sample from board */ unsigned int fill_time = 300000000; /* target time in nanoseconds for filling dma buffer */ unsigned int max_size; /* maximum size we will allow for a transfer */ /* make dma buffer fill in 0.3 seconds for timed modes */ switch (cmd->scan_begin_src) { case TRIG_FOLLOW: /* not in burst mode */ if (cmd->convert_src == TRIG_TIMER) size = (fill_time / cmd->convert_arg) * sample_size; break; case TRIG_TIMER: size = (fill_time / (cmd->scan_begin_arg * cmd->chanlist_len)) * sample_size; break; default: size = DMA_BUF_SIZE; break; } /* set a minimum and maximum size allowed */ max_size = DMA_BUF_SIZE; /* if we are taking limited number of conversions, limit transfer size to that */ if (cmd->stop_src == TRIG_COUNT && cmd->stop_arg * cmd->chanlist_len * sample_size < max_size) max_size = cmd->stop_arg * cmd->chanlist_len * sample_size; if (size > max_size) size = max_size; if (size < sample_size) size = sample_size; return size; }
gpl-2.0
ThiagoGarciaAlves/linux
drivers/rtc/rtc-palmas.c
566
10413
/* * rtc-palmas.c -- Palmas Real Time Clock driver. * RTC driver for TI Palma series devices like TPS65913, * TPS65914 power management IC. * * Copyright (c) 2012, NVIDIA Corporation. * * Author: Laxman Dewangan <ldewangan@nvidia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA */ #include <linux/bcd.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mfd/palmas.h> #include <linux/module.h> #include <linux/of.h> #include <linux/rtc.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/pm.h> struct palmas_rtc { struct rtc_device *rtc; struct device *dev; unsigned int irq; }; /* Total number of RTC registers needed to set time*/ #define PALMAS_NUM_TIME_REGS (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1) static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[PALMAS_NUM_TIME_REGS]; struct palmas *palmas = dev_get_drvdata(dev->parent); int ret; /* Copy RTC counting registers to static registers or latches */ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG, PALMAS_RTC_CTRL_REG_GET_TIME, PALMAS_RTC_CTRL_REG_GET_TIME); if (ret < 0) { dev_err(dev, "RTC CTRL reg update failed, err: %d\n", ret); return ret; } ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG, rtc_data, PALMAS_NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "RTC_SECONDS reg read failed, err = %d\n", ret); return ret; } tm->tm_sec = bcd2bin(rtc_data[0]); tm->tm_min = bcd2bin(rtc_data[1]); tm->tm_hour = bcd2bin(rtc_data[2]); tm->tm_mday = bcd2bin(rtc_data[3]); tm->tm_mon = bcd2bin(rtc_data[4]) - 1; tm->tm_year = bcd2bin(rtc_data[5]) + 100; return ret; } static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[PALMAS_NUM_TIME_REGS]; struct palmas *palmas = dev_get_drvdata(dev->parent); int ret; rtc_data[0] = bin2bcd(tm->tm_sec); rtc_data[1] = bin2bcd(tm->tm_min); rtc_data[2] = bin2bcd(tm->tm_hour); rtc_data[3] = bin2bcd(tm->tm_mday); rtc_data[4] = bin2bcd(tm->tm_mon + 1); rtc_data[5] = bin2bcd(tm->tm_year - 100); /* Stop RTC while updating the RTC time registers */ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG, PALMAS_RTC_CTRL_REG_STOP_RTC, 0); if (ret < 0) { dev_err(dev, "RTC stop failed, err = %d\n", ret); return ret; } ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG, rtc_data, PALMAS_NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "RTC_SECONDS reg write failed, err = %d\n", ret); return ret; } /* Start back RTC */ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG, PALMAS_RTC_CTRL_REG_STOP_RTC, PALMAS_RTC_CTRL_REG_STOP_RTC); if (ret < 0) dev_err(dev, "RTC start failed, err = %d\n", ret); return ret; } static int palmas_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) { struct palmas *palmas = dev_get_drvdata(dev->parent); u8 val; val = enabled ? PALMAS_RTC_INTERRUPTS_REG_IT_ALARM : 0; return palmas_write(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG, val); } static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned char alarm_data[PALMAS_NUM_TIME_REGS]; u32 int_val; struct palmas *palmas = dev_get_drvdata(dev->parent); int ret; ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE, PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "RTC_ALARM_SECONDS read failed, err = %d\n", ret); return ret; } alm->time.tm_sec = bcd2bin(alarm_data[0]); alm->time.tm_min = bcd2bin(alarm_data[1]); alm->time.tm_hour = bcd2bin(alarm_data[2]); alm->time.tm_mday = bcd2bin(alarm_data[3]); alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1; alm->time.tm_year = bcd2bin(alarm_data[5]) + 100; ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG, &int_val); if (ret < 0) { dev_err(dev, "RTC_INTERRUPTS reg read failed, err = %d\n", ret); return ret; } if (int_val & PALMAS_RTC_INTERRUPTS_REG_IT_ALARM) alm->enabled = 1; return ret; } static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned char alarm_data[PALMAS_NUM_TIME_REGS]; struct palmas *palmas = dev_get_drvdata(dev->parent); int ret; ret = palmas_rtc_alarm_irq_enable(dev, 0); if (ret < 0) { dev_err(dev, "Disable RTC alarm failed\n"); return ret; } alarm_data[0] = bin2bcd(alm->time.tm_sec); alarm_data[1] = bin2bcd(alm->time.tm_min); alarm_data[2] = bin2bcd(alm->time.tm_hour); alarm_data[3] = bin2bcd(alm->time.tm_mday); alarm_data[4] = bin2bcd(alm->time.tm_mon + 1); alarm_data[5] = bin2bcd(alm->time.tm_year - 100); ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "ALARM_SECONDS_REG write failed, err = %d\n", ret); return ret; } if (alm->enabled) ret = palmas_rtc_alarm_irq_enable(dev, 1); return ret; } static int palmas_clear_interrupts(struct device *dev) { struct palmas *palmas = dev_get_drvdata(dev->parent); unsigned int rtc_reg; int ret; ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG, &rtc_reg); if (ret < 0) { dev_err(dev, "RTC_STATUS read failed, err = %d\n", ret); return ret; } ret = palmas_write(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG, rtc_reg); if (ret < 0) { dev_err(dev, "RTC_STATUS write failed, err = %d\n", ret); return ret; } return 0; } static irqreturn_t palmas_rtc_interrupt(int irq, void *context) { struct palmas_rtc *palmas_rtc = context; struct device *dev = palmas_rtc->dev; int ret; ret = palmas_clear_interrupts(dev); if (ret < 0) { dev_err(dev, "RTC interrupt clear failed, err = %d\n", ret); return IRQ_NONE; } rtc_update_irq(palmas_rtc->rtc, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } static struct rtc_class_ops palmas_rtc_ops = { .read_time = palmas_rtc_read_time, .set_time = palmas_rtc_set_time, .read_alarm = palmas_rtc_read_alarm, .set_alarm = palmas_rtc_set_alarm, .alarm_irq_enable = palmas_rtc_alarm_irq_enable, }; static int palmas_rtc_probe(struct platform_device *pdev) { struct palmas *palmas = dev_get_drvdata(pdev->dev.parent); struct palmas_rtc *palmas_rtc = NULL; int ret; bool enable_bb_charging = false; bool high_bb_charging = false; if (pdev->dev.of_node) { enable_bb_charging = of_property_read_bool(pdev->dev.of_node, "ti,backup-battery-chargeable"); high_bb_charging = of_property_read_bool(pdev->dev.of_node, "ti,backup-battery-charge-high-current"); } palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc), GFP_KERNEL); if (!palmas_rtc) return -ENOMEM; /* Clear pending interrupts */ ret = palmas_clear_interrupts(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "clear RTC int failed, err = %d\n", ret); return ret; } palmas_rtc->dev = &pdev->dev; platform_set_drvdata(pdev, palmas_rtc); if (enable_bb_charging) { unsigned reg = PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG; if (high_bb_charging) reg = 0; ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE, PALMAS_BACKUP_BATTERY_CTRL, PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG, reg); if (ret < 0) { dev_err(&pdev->dev, "BACKUP_BATTERY_CTRL update failed, %d\n", ret); return ret; } ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE, PALMAS_BACKUP_BATTERY_CTRL, PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN, PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN); if (ret < 0) { dev_err(&pdev->dev, "BACKUP_BATTERY_CTRL update failed, %d\n", ret); return ret; } } /* Start RTC */ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG, PALMAS_RTC_CTRL_REG_STOP_RTC, PALMAS_RTC_CTRL_REG_STOP_RTC); if (ret < 0) { dev_err(&pdev->dev, "RTC_CTRL write failed, err = %d\n", ret); return ret; } palmas_rtc->irq = platform_get_irq(pdev, 0); device_init_wakeup(&pdev->dev, 1); palmas_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &palmas_rtc_ops, THIS_MODULE); if (IS_ERR(palmas_rtc->rtc)) { ret = PTR_ERR(palmas_rtc->rtc); dev_err(&pdev->dev, "RTC register failed, err = %d\n", ret); return ret; } ret = devm_request_threaded_irq(&pdev->dev, palmas_rtc->irq, NULL, palmas_rtc_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(&pdev->dev), palmas_rtc); if (ret < 0) { dev_err(&pdev->dev, "IRQ request failed, err = %d\n", ret); return ret; } return 0; } static int palmas_rtc_remove(struct platform_device *pdev) { palmas_rtc_alarm_irq_enable(&pdev->dev, 0); return 0; } #ifdef CONFIG_PM_SLEEP static int palmas_rtc_suspend(struct device *dev) { struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(palmas_rtc->irq); return 0; } static int palmas_rtc_resume(struct device *dev) { struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(palmas_rtc->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(palmas_rtc_pm_ops, palmas_rtc_suspend, palmas_rtc_resume); #ifdef CONFIG_OF static const struct of_device_id of_palmas_rtc_match[] = { { .compatible = "ti,palmas-rtc"}, { }, }; MODULE_DEVICE_TABLE(of, of_palmas_rtc_match); #endif static struct platform_driver palmas_rtc_driver = { .probe = palmas_rtc_probe, .remove = palmas_rtc_remove, .driver = { .name = "palmas-rtc", .pm = &palmas_rtc_pm_ops, .of_match_table = of_match_ptr(of_palmas_rtc_match), }, }; module_platform_driver(palmas_rtc_driver); MODULE_ALIAS("platform:palmas_rtc"); MODULE_DESCRIPTION("TI PALMAS series RTC driver"); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
mayli/unionfs-2.6.32.y
drivers/video/intelfb/intelfbhw.c
566
52213
/* * intelfb * * Linux framebuffer driver for Intel(R) 865G integrated graphics chips. * * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org> * 2004 Sylvain Meyer * * This driver consists of two parts. The first part (intelfbdrv.c) provides * the basic fbdev interfaces, is derived in part from the radeonfb and * vesafb drivers, and is covered by the GPL. The second part (intelfbhw.c) * provides the code to program the hardware. Most of it is derived from * the i810/i830 XFree86 driver. The HW-specific code is covered here * under a dual license (GPL and MIT/XFree86 license). * * Author: David Dawes * */ /* $DHD: intelfb/intelfbhw.c,v 1.9 2003/06/27 15:06:25 dawes Exp $ */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/interrupt.h> #include <asm/io.h> #include "intelfb.h" #include "intelfbhw.h" struct pll_min_max { int min_m, max_m, min_m1, max_m1; int min_m2, max_m2, min_n, max_n; int min_p, max_p, min_p1, max_p1; int min_vco, max_vco, p_transition_clk, ref_clk; int p_inc_lo, p_inc_hi; }; #define PLLS_I8xx 0 #define PLLS_I9xx 1 #define PLLS_MAX 2 static struct pll_min_max plls[PLLS_MAX] = { { 108, 140, 18, 26, 6, 16, 3, 16, 4, 128, 0, 31, 930000, 1400000, 165000, 48000, 4, 2 }, /* I8xx */ { 75, 120, 10, 20, 5, 9, 4, 7, 5, 80, 1, 8, 1400000, 2800000, 200000, 96000, 10, 5 } /* I9xx */ }; int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo) { u32 tmp; if (!pdev || !dinfo) return 1; switch (pdev->device) { case PCI_DEVICE_ID_INTEL_830M: dinfo->name = "Intel(R) 830M"; dinfo->chipset = INTEL_830M; dinfo->mobile = 1; dinfo->pll_index = PLLS_I8xx; return 0; case PCI_DEVICE_ID_INTEL_845G: dinfo->name = "Intel(R) 845G"; dinfo->chipset = INTEL_845G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I8xx; return 0; case PCI_DEVICE_ID_INTEL_854: dinfo->mobile = 1; dinfo->name = "Intel(R) 854"; dinfo->chipset = INTEL_854; return 0; case PCI_DEVICE_ID_INTEL_85XGM: tmp = 0; dinfo->mobile = 1; dinfo->pll_index = PLLS_I8xx; pci_read_config_dword(pdev, INTEL_85X_CAPID, &tmp); switch ((tmp >> INTEL_85X_VARIANT_SHIFT) & INTEL_85X_VARIANT_MASK) { case INTEL_VAR_855GME: dinfo->name = "Intel(R) 855GME"; dinfo->chipset = INTEL_855GME; return 0; case INTEL_VAR_855GM: dinfo->name = "Intel(R) 855GM"; dinfo->chipset = INTEL_855GM; return 0; case INTEL_VAR_852GME: dinfo->name = "Intel(R) 852GME"; dinfo->chipset = INTEL_852GME; return 0; case INTEL_VAR_852GM: dinfo->name = "Intel(R) 852GM"; dinfo->chipset = INTEL_852GM; return 0; default: dinfo->name = "Intel(R) 852GM/855GM"; dinfo->chipset = INTEL_85XGM; return 0; } break; case PCI_DEVICE_ID_INTEL_865G: dinfo->name = "Intel(R) 865G"; dinfo->chipset = INTEL_865G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I8xx; return 0; case PCI_DEVICE_ID_INTEL_915G: dinfo->name = "Intel(R) 915G"; dinfo->chipset = INTEL_915G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_915GM: dinfo->name = "Intel(R) 915GM"; dinfo->chipset = INTEL_915GM; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_945G: dinfo->name = "Intel(R) 945G"; dinfo->chipset = INTEL_945G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_945GM: dinfo->name = "Intel(R) 945GM"; dinfo->chipset = INTEL_945GM; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_945GME: dinfo->name = "Intel(R) 945GME"; dinfo->chipset = INTEL_945GME; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_965G: dinfo->name = "Intel(R) 965G"; dinfo->chipset = INTEL_965G; dinfo->mobile = 0; dinfo->pll_index = PLLS_I9xx; return 0; case PCI_DEVICE_ID_INTEL_965GM: dinfo->name = "Intel(R) 965GM"; dinfo->chipset = INTEL_965GM; dinfo->mobile = 1; dinfo->pll_index = PLLS_I9xx; return 0; default: return 1; } } int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size, int *stolen_size) { struct pci_dev *bridge_dev; u16 tmp; int stolen_overhead; if (!pdev || !aperture_size || !stolen_size) return 1; /* Find the bridge device. It is always 0:0.0 */ if (!(bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)))) { ERR_MSG("cannot find bridge device\n"); return 1; } /* Get the fb aperture size and "stolen" memory amount. */ tmp = 0; pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); pci_dev_put(bridge_dev); switch (pdev->device) { case PCI_DEVICE_ID_INTEL_915G: case PCI_DEVICE_ID_INTEL_915GM: case PCI_DEVICE_ID_INTEL_945G: case PCI_DEVICE_ID_INTEL_945GM: case PCI_DEVICE_ID_INTEL_945GME: case PCI_DEVICE_ID_INTEL_965G: case PCI_DEVICE_ID_INTEL_965GM: /* 915, 945 and 965 chipsets support a 256MB aperture. Aperture size is determined by inspected the base address of the aperture. */ if (pci_resource_start(pdev, 2) & 0x08000000) *aperture_size = MB(128); else *aperture_size = MB(256); break; default: if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) *aperture_size = MB(64); else *aperture_size = MB(128); break; } /* Stolen memory size is reduced by the GTT and the popup. GTT is 1K per MB of aperture size, and popup is 4K. */ stolen_overhead = (*aperture_size / MB(1)) + 4; switch(pdev->device) { case PCI_DEVICE_ID_INTEL_830M: case PCI_DEVICE_ID_INTEL_845G: switch (tmp & INTEL_830_GMCH_GMS_MASK) { case INTEL_830_GMCH_GMS_STOLEN_512: *stolen_size = KB(512) - KB(stolen_overhead); return 0; case INTEL_830_GMCH_GMS_STOLEN_1024: *stolen_size = MB(1) - KB(stolen_overhead); return 0; case INTEL_830_GMCH_GMS_STOLEN_8192: *stolen_size = MB(8) - KB(stolen_overhead); return 0; case INTEL_830_GMCH_GMS_LOCAL: ERR_MSG("only local memory found\n"); return 1; case INTEL_830_GMCH_GMS_DISABLED: ERR_MSG("video memory is disabled\n"); return 1; default: ERR_MSG("unexpected GMCH_GMS value: 0x%02x\n", tmp & INTEL_830_GMCH_GMS_MASK); return 1; } break; default: switch (tmp & INTEL_855_GMCH_GMS_MASK) { case INTEL_855_GMCH_GMS_STOLEN_1M: *stolen_size = MB(1) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_4M: *stolen_size = MB(4) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_8M: *stolen_size = MB(8) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_16M: *stolen_size = MB(16) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_STOLEN_32M: *stolen_size = MB(32) - KB(stolen_overhead); return 0; case INTEL_915G_GMCH_GMS_STOLEN_48M: *stolen_size = MB(48) - KB(stolen_overhead); return 0; case INTEL_915G_GMCH_GMS_STOLEN_64M: *stolen_size = MB(64) - KB(stolen_overhead); return 0; case INTEL_855_GMCH_GMS_DISABLED: ERR_MSG("video memory is disabled\n"); return 0; default: ERR_MSG("unexpected GMCH_GMS value: 0x%02x\n", tmp & INTEL_855_GMCH_GMS_MASK); return 1; } } } int intelfbhw_check_non_crt(struct intelfb_info *dinfo) { int dvo = 0; if (INREG(LVDS) & PORT_ENABLE) dvo |= LVDS_PORT; if (INREG(DVOA) & PORT_ENABLE) dvo |= DVOA_PORT; if (INREG(DVOB) & PORT_ENABLE) dvo |= DVOB_PORT; if (INREG(DVOC) & PORT_ENABLE) dvo |= DVOC_PORT; return dvo; } const char * intelfbhw_dvo_to_string(int dvo) { if (dvo & DVOA_PORT) return "DVO port A"; else if (dvo & DVOB_PORT) return "DVO port B"; else if (dvo & DVOC_PORT) return "DVO port C"; else if (dvo & LVDS_PORT) return "LVDS port"; else return NULL; } int intelfbhw_validate_mode(struct intelfb_info *dinfo, struct fb_var_screeninfo *var) { int bytes_per_pixel; int tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_validate_mode\n"); #endif bytes_per_pixel = var->bits_per_pixel / 8; if (bytes_per_pixel == 3) bytes_per_pixel = 4; /* Check if enough video memory. */ tmp = var->yres_virtual * var->xres_virtual * bytes_per_pixel; if (tmp > dinfo->fb.size) { WRN_MSG("Not enough video ram for mode " "(%d KByte vs %d KByte).\n", BtoKB(tmp), BtoKB(dinfo->fb.size)); return 1; } /* Check if x/y limits are OK. */ if (var->xres - 1 > HACTIVE_MASK) { WRN_MSG("X resolution too large (%d vs %d).\n", var->xres, HACTIVE_MASK + 1); return 1; } if (var->yres - 1 > VACTIVE_MASK) { WRN_MSG("Y resolution too large (%d vs %d).\n", var->yres, VACTIVE_MASK + 1); return 1; } if (var->xres < 4) { WRN_MSG("X resolution too small (%d vs 4).\n", var->xres); return 1; } if (var->yres < 4) { WRN_MSG("Y resolution too small (%d vs 4).\n", var->yres); return 1; } /* Check for doublescan modes. */ if (var->vmode & FB_VMODE_DOUBLE) { WRN_MSG("Mode is double-scan.\n"); return 1; } if ((var->vmode & FB_VMODE_INTERLACED) && (var->yres & 1)) { WRN_MSG("Odd number of lines in interlaced mode\n"); return 1; } /* Check if clock is OK. */ tmp = 1000000000 / var->pixclock; if (tmp < MIN_CLOCK) { WRN_MSG("Pixel clock is too low (%d MHz vs %d MHz).\n", (tmp + 500) / 1000, MIN_CLOCK / 1000); return 1; } if (tmp > MAX_CLOCK) { WRN_MSG("Pixel clock is too high (%d MHz vs %d MHz).\n", (tmp + 500) / 1000, MAX_CLOCK / 1000); return 1; } return 0; } int intelfbhw_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct intelfb_info *dinfo = GET_DINFO(info); u32 offset, xoffset, yoffset; #if VERBOSE > 0 DBG_MSG("intelfbhw_pan_display\n"); #endif xoffset = ROUND_DOWN_TO(var->xoffset, 8); yoffset = var->yoffset; if ((xoffset + var->xres > var->xres_virtual) || (yoffset + var->yres > var->yres_virtual)) return -EINVAL; offset = (yoffset * dinfo->pitch) + (xoffset * var->bits_per_pixel) / 8; offset += dinfo->fb.offset << 12; dinfo->vsync.pan_offset = offset; if ((var->activate & FB_ACTIVATE_VBL) && !intelfbhw_enable_irq(dinfo)) dinfo->vsync.pan_display = 1; else { dinfo->vsync.pan_display = 0; OUTREG(DSPABASE, offset); } return 0; } /* Blank the screen. */ void intelfbhw_do_blank(int blank, struct fb_info *info) { struct intelfb_info *dinfo = GET_DINFO(info); u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_blank: blank is %d\n", blank); #endif /* Turn plane A on or off */ tmp = INREG(DSPACNTR); if (blank) tmp &= ~DISPPLANE_PLANE_ENABLE; else tmp |= DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); /* Flush */ tmp = INREG(DSPABASE); OUTREG(DSPABASE, tmp); /* Turn off/on the HW cursor */ #if VERBOSE > 0 DBG_MSG("cursor_on is %d\n", dinfo->cursor_on); #endif if (dinfo->cursor_on) { if (blank) intelfbhw_cursor_hide(dinfo); else intelfbhw_cursor_show(dinfo); dinfo->cursor_on = 1; } dinfo->cursor_blanked = blank; /* Set DPMS level */ tmp = INREG(ADPA) & ~ADPA_DPMS_CONTROL_MASK; switch (blank) { case FB_BLANK_UNBLANK: case FB_BLANK_NORMAL: tmp |= ADPA_DPMS_D0; break; case FB_BLANK_VSYNC_SUSPEND: tmp |= ADPA_DPMS_D1; break; case FB_BLANK_HSYNC_SUSPEND: tmp |= ADPA_DPMS_D2; break; case FB_BLANK_POWERDOWN: tmp |= ADPA_DPMS_D3; break; } OUTREG(ADPA, tmp); return; } void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp) { u32 palette_reg = (dinfo->pipe == PIPE_A) ? PALETTE_A : PALETTE_B; #if VERBOSE > 0 DBG_MSG("intelfbhw_setcolreg: %d: (%d, %d, %d)\n", regno, red, green, blue); #endif OUTREG(palette_reg + (regno << 2), (red << PALETTE_8_RED_SHIFT) | (green << PALETTE_8_GREEN_SHIFT) | (blue << PALETTE_8_BLUE_SHIFT)); } int intelfbhw_read_hw_state(struct intelfb_info *dinfo, struct intelfb_hwstate *hw, int flag) { int i; #if VERBOSE > 0 DBG_MSG("intelfbhw_read_hw_state\n"); #endif if (!hw || !dinfo) return -1; /* Read in as much of the HW state as possible. */ hw->vga0_divisor = INREG(VGA0_DIVISOR); hw->vga1_divisor = INREG(VGA1_DIVISOR); hw->vga_pd = INREG(VGAPD); hw->dpll_a = INREG(DPLL_A); hw->dpll_b = INREG(DPLL_B); hw->fpa0 = INREG(FPA0); hw->fpa1 = INREG(FPA1); hw->fpb0 = INREG(FPB0); hw->fpb1 = INREG(FPB1); if (flag == 1) return flag; #if 0 /* This seems to be a problem with the 852GM/855GM */ for (i = 0; i < PALETTE_8_ENTRIES; i++) { hw->palette_a[i] = INREG(PALETTE_A + (i << 2)); hw->palette_b[i] = INREG(PALETTE_B + (i << 2)); } #endif if (flag == 2) return flag; hw->htotal_a = INREG(HTOTAL_A); hw->hblank_a = INREG(HBLANK_A); hw->hsync_a = INREG(HSYNC_A); hw->vtotal_a = INREG(VTOTAL_A); hw->vblank_a = INREG(VBLANK_A); hw->vsync_a = INREG(VSYNC_A); hw->src_size_a = INREG(SRC_SIZE_A); hw->bclrpat_a = INREG(BCLRPAT_A); hw->htotal_b = INREG(HTOTAL_B); hw->hblank_b = INREG(HBLANK_B); hw->hsync_b = INREG(HSYNC_B); hw->vtotal_b = INREG(VTOTAL_B); hw->vblank_b = INREG(VBLANK_B); hw->vsync_b = INREG(VSYNC_B); hw->src_size_b = INREG(SRC_SIZE_B); hw->bclrpat_b = INREG(BCLRPAT_B); if (flag == 3) return flag; hw->adpa = INREG(ADPA); hw->dvoa = INREG(DVOA); hw->dvob = INREG(DVOB); hw->dvoc = INREG(DVOC); hw->dvoa_srcdim = INREG(DVOA_SRCDIM); hw->dvob_srcdim = INREG(DVOB_SRCDIM); hw->dvoc_srcdim = INREG(DVOC_SRCDIM); hw->lvds = INREG(LVDS); if (flag == 4) return flag; hw->pipe_a_conf = INREG(PIPEACONF); hw->pipe_b_conf = INREG(PIPEBCONF); hw->disp_arb = INREG(DISPARB); if (flag == 5) return flag; hw->cursor_a_control = INREG(CURSOR_A_CONTROL); hw->cursor_b_control = INREG(CURSOR_B_CONTROL); hw->cursor_a_base = INREG(CURSOR_A_BASEADDR); hw->cursor_b_base = INREG(CURSOR_B_BASEADDR); if (flag == 6) return flag; for (i = 0; i < 4; i++) { hw->cursor_a_palette[i] = INREG(CURSOR_A_PALETTE0 + (i << 2)); hw->cursor_b_palette[i] = INREG(CURSOR_B_PALETTE0 + (i << 2)); } if (flag == 7) return flag; hw->cursor_size = INREG(CURSOR_SIZE); if (flag == 8) return flag; hw->disp_a_ctrl = INREG(DSPACNTR); hw->disp_b_ctrl = INREG(DSPBCNTR); hw->disp_a_base = INREG(DSPABASE); hw->disp_b_base = INREG(DSPBBASE); hw->disp_a_stride = INREG(DSPASTRIDE); hw->disp_b_stride = INREG(DSPBSTRIDE); if (flag == 9) return flag; hw->vgacntrl = INREG(VGACNTRL); if (flag == 10) return flag; hw->add_id = INREG(ADD_ID); if (flag == 11) return flag; for (i = 0; i < 7; i++) { hw->swf0x[i] = INREG(SWF00 + (i << 2)); hw->swf1x[i] = INREG(SWF10 + (i << 2)); if (i < 3) hw->swf3x[i] = INREG(SWF30 + (i << 2)); } for (i = 0; i < 8; i++) hw->fence[i] = INREG(FENCE + (i << 2)); hw->instpm = INREG(INSTPM); hw->mem_mode = INREG(MEM_MODE); hw->fw_blc_0 = INREG(FW_BLC_0); hw->fw_blc_1 = INREG(FW_BLC_1); hw->hwstam = INREG16(HWSTAM); hw->ier = INREG16(IER); hw->iir = INREG16(IIR); hw->imr = INREG16(IMR); return 0; } static int calc_vclock3(int index, int m, int n, int p) { if (p == 0 || n == 0) return 0; return plls[index].ref_clk * m / n / p; } static int calc_vclock(int index, int m1, int m2, int n, int p1, int p2, int lvds) { struct pll_min_max *pll = &plls[index]; u32 m, vco, p; m = (5 * (m1 + 2)) + (m2 + 2); n += 2; vco = pll->ref_clk * m / n; if (index == PLLS_I8xx) p = ((p1 + 2) * (1 << (p2 + 1))); else p = ((p1) * (p2 ? 5 : 10)); return vco / p; } #if REGDUMP static void intelfbhw_get_p1p2(struct intelfb_info *dinfo, int dpll, int *o_p1, int *o_p2) { int p1, p2; if (IS_I9XX(dinfo)) { if (dpll & DPLL_P1_FORCE_DIV2) p1 = 1; else p1 = (dpll >> DPLL_P1_SHIFT) & 0xff; p1 = ffs(p1); p2 = (dpll >> DPLL_I9XX_P2_SHIFT) & DPLL_P2_MASK; } else { if (dpll & DPLL_P1_FORCE_DIV2) p1 = 0; else p1 = (dpll >> DPLL_P1_SHIFT) & DPLL_P1_MASK; p2 = (dpll >> DPLL_P2_SHIFT) & DPLL_P2_MASK; } *o_p1 = p1; *o_p2 = p2; } #endif void intelfbhw_print_hw_state(struct intelfb_info *dinfo, struct intelfb_hwstate *hw) { #if REGDUMP int i, m1, m2, n, p1, p2; int index = dinfo->pll_index; DBG_MSG("intelfbhw_print_hw_state\n"); if (!hw) return; /* Read in as much of the HW state as possible. */ printk("hw state dump start\n"); printk(" VGA0_DIVISOR: 0x%08x\n", hw->vga0_divisor); printk(" VGA1_DIVISOR: 0x%08x\n", hw->vga1_divisor); printk(" VGAPD: 0x%08x\n", hw->vga_pd); n = (hw->vga0_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->vga0_divisor >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->vga0_divisor >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->vga_pd, &p1, &p2); printk(" VGA0: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" VGA0: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); n = (hw->vga1_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->vga1_divisor >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->vga1_divisor >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->vga_pd, &p1, &p2); printk(" VGA1: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" VGA1: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); printk(" DPLL_A: 0x%08x\n", hw->dpll_a); printk(" DPLL_B: 0x%08x\n", hw->dpll_b); printk(" FPA0: 0x%08x\n", hw->fpa0); printk(" FPA1: 0x%08x\n", hw->fpa1); printk(" FPB0: 0x%08x\n", hw->fpb0); printk(" FPB1: 0x%08x\n", hw->fpb1); n = (hw->fpa0 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->fpa0 >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->fpa0 >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->dpll_a, &p1, &p2); printk(" PLLA0: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" PLLA0: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); n = (hw->fpa1 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m1 = (hw->fpa1 >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK; m2 = (hw->fpa1 >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK; intelfbhw_get_p1p2(dinfo, hw->dpll_a, &p1, &p2); printk(" PLLA1: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n", m1, m2, n, p1, p2); printk(" PLLA1: clock is %d\n", calc_vclock(index, m1, m2, n, p1, p2, 0)); #if 0 printk(" PALETTE_A:\n"); for (i = 0; i < PALETTE_8_ENTRIES) printk(" %3d: 0x%08x\n", i, hw->palette_a[i]); printk(" PALETTE_B:\n"); for (i = 0; i < PALETTE_8_ENTRIES) printk(" %3d: 0x%08x\n", i, hw->palette_b[i]); #endif printk(" HTOTAL_A: 0x%08x\n", hw->htotal_a); printk(" HBLANK_A: 0x%08x\n", hw->hblank_a); printk(" HSYNC_A: 0x%08x\n", hw->hsync_a); printk(" VTOTAL_A: 0x%08x\n", hw->vtotal_a); printk(" VBLANK_A: 0x%08x\n", hw->vblank_a); printk(" VSYNC_A: 0x%08x\n", hw->vsync_a); printk(" SRC_SIZE_A: 0x%08x\n", hw->src_size_a); printk(" BCLRPAT_A: 0x%08x\n", hw->bclrpat_a); printk(" HTOTAL_B: 0x%08x\n", hw->htotal_b); printk(" HBLANK_B: 0x%08x\n", hw->hblank_b); printk(" HSYNC_B: 0x%08x\n", hw->hsync_b); printk(" VTOTAL_B: 0x%08x\n", hw->vtotal_b); printk(" VBLANK_B: 0x%08x\n", hw->vblank_b); printk(" VSYNC_B: 0x%08x\n", hw->vsync_b); printk(" SRC_SIZE_B: 0x%08x\n", hw->src_size_b); printk(" BCLRPAT_B: 0x%08x\n", hw->bclrpat_b); printk(" ADPA: 0x%08x\n", hw->adpa); printk(" DVOA: 0x%08x\n", hw->dvoa); printk(" DVOB: 0x%08x\n", hw->dvob); printk(" DVOC: 0x%08x\n", hw->dvoc); printk(" DVOA_SRCDIM: 0x%08x\n", hw->dvoa_srcdim); printk(" DVOB_SRCDIM: 0x%08x\n", hw->dvob_srcdim); printk(" DVOC_SRCDIM: 0x%08x\n", hw->dvoc_srcdim); printk(" LVDS: 0x%08x\n", hw->lvds); printk(" PIPEACONF: 0x%08x\n", hw->pipe_a_conf); printk(" PIPEBCONF: 0x%08x\n", hw->pipe_b_conf); printk(" DISPARB: 0x%08x\n", hw->disp_arb); printk(" CURSOR_A_CONTROL: 0x%08x\n", hw->cursor_a_control); printk(" CURSOR_B_CONTROL: 0x%08x\n", hw->cursor_b_control); printk(" CURSOR_A_BASEADDR: 0x%08x\n", hw->cursor_a_base); printk(" CURSOR_B_BASEADDR: 0x%08x\n", hw->cursor_b_base); printk(" CURSOR_A_PALETTE: "); for (i = 0; i < 4; i++) { printk("0x%08x", hw->cursor_a_palette[i]); if (i < 3) printk(", "); } printk("\n"); printk(" CURSOR_B_PALETTE: "); for (i = 0; i < 4; i++) { printk("0x%08x", hw->cursor_b_palette[i]); if (i < 3) printk(", "); } printk("\n"); printk(" CURSOR_SIZE: 0x%08x\n", hw->cursor_size); printk(" DSPACNTR: 0x%08x\n", hw->disp_a_ctrl); printk(" DSPBCNTR: 0x%08x\n", hw->disp_b_ctrl); printk(" DSPABASE: 0x%08x\n", hw->disp_a_base); printk(" DSPBBASE: 0x%08x\n", hw->disp_b_base); printk(" DSPASTRIDE: 0x%08x\n", hw->disp_a_stride); printk(" DSPBSTRIDE: 0x%08x\n", hw->disp_b_stride); printk(" VGACNTRL: 0x%08x\n", hw->vgacntrl); printk(" ADD_ID: 0x%08x\n", hw->add_id); for (i = 0; i < 7; i++) { printk(" SWF0%d 0x%08x\n", i, hw->swf0x[i]); } for (i = 0; i < 7; i++) { printk(" SWF1%d 0x%08x\n", i, hw->swf1x[i]); } for (i = 0; i < 3; i++) { printk(" SWF3%d 0x%08x\n", i, hw->swf3x[i]); } for (i = 0; i < 8; i++) printk(" FENCE%d 0x%08x\n", i, hw->fence[i]); printk(" INSTPM 0x%08x\n", hw->instpm); printk(" MEM_MODE 0x%08x\n", hw->mem_mode); printk(" FW_BLC_0 0x%08x\n", hw->fw_blc_0); printk(" FW_BLC_1 0x%08x\n", hw->fw_blc_1); printk(" HWSTAM 0x%04x\n", hw->hwstam); printk(" IER 0x%04x\n", hw->ier); printk(" IIR 0x%04x\n", hw->iir); printk(" IMR 0x%04x\n", hw->imr); printk("hw state dump end\n"); #endif } /* Split the M parameter into M1 and M2. */ static int splitm(int index, unsigned int m, unsigned int *retm1, unsigned int *retm2) { int m1, m2; int testm; struct pll_min_max *pll = &plls[index]; /* no point optimising too much - brute force m */ for (m1 = pll->min_m1; m1 < pll->max_m1 + 1; m1++) { for (m2 = pll->min_m2; m2 < pll->max_m2 + 1; m2++) { testm = (5 * (m1 + 2)) + (m2 + 2); if (testm == m) { *retm1 = (unsigned int)m1; *retm2 = (unsigned int)m2; return 0; } } } return 1; } /* Split the P parameter into P1 and P2. */ static int splitp(int index, unsigned int p, unsigned int *retp1, unsigned int *retp2) { int p1, p2; struct pll_min_max *pll = &plls[index]; if (index == PLLS_I9xx) { p2 = (p % 10) ? 1 : 0; p1 = p / (p2 ? 5 : 10); *retp1 = (unsigned int)p1; *retp2 = (unsigned int)p2; return 0; } if (p % 4 == 0) p2 = 1; else p2 = 0; p1 = (p / (1 << (p2 + 1))) - 2; if (p % 4 == 0 && p1 < pll->min_p1) { p2 = 0; p1 = (p / (1 << (p2 + 1))) - 2; } if (p1 < pll->min_p1 || p1 > pll->max_p1 || (p1 + 2) * (1 << (p2 + 1)) != p) { return 1; } else { *retp1 = (unsigned int)p1; *retp2 = (unsigned int)p2; return 0; } } static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2, u32 *retn, u32 *retp1, u32 *retp2, u32 *retclock) { u32 m1, m2, n, p1, p2, n1, testm; u32 f_vco, p, p_best = 0, m, f_out = 0; u32 err_max, err_target, err_best = 10000000; u32 n_best = 0, m_best = 0, f_best, f_err; u32 p_min, p_max, p_inc, div_max; struct pll_min_max *pll = &plls[index]; /* Accept 0.5% difference, but aim for 0.1% */ err_max = 5 * clock / 1000; err_target = clock / 1000; DBG_MSG("Clock is %d\n", clock); div_max = pll->max_vco / clock; p_inc = (clock <= pll->p_transition_clk) ? pll->p_inc_lo : pll->p_inc_hi; p_min = p_inc; p_max = ROUND_DOWN_TO(div_max, p_inc); if (p_min < pll->min_p) p_min = pll->min_p; if (p_max > pll->max_p) p_max = pll->max_p; DBG_MSG("p range is %d-%d (%d)\n", p_min, p_max, p_inc); p = p_min; do { if (splitp(index, p, &p1, &p2)) { WRN_MSG("cannot split p = %d\n", p); p += p_inc; continue; } n = pll->min_n; f_vco = clock * p; do { m = ROUND_UP_TO(f_vco * n, pll->ref_clk) / pll->ref_clk; if (m < pll->min_m) m = pll->min_m + 1; if (m > pll->max_m) m = pll->max_m - 1; for (testm = m - 1; testm <= m; testm++) { f_out = calc_vclock3(index, testm, n, p); if (splitm(index, testm, &m1, &m2)) { WRN_MSG("cannot split m = %d\n", testm); continue; } if (clock > f_out) f_err = clock - f_out; else/* slightly bias the error for bigger clocks */ f_err = f_out - clock + 1; if (f_err < err_best) { m_best = testm; n_best = n; p_best = p; f_best = f_out; err_best = f_err; } } n++; } while ((n <= pll->max_n) && (f_out >= clock)); p += p_inc; } while ((p <= p_max)); if (!m_best) { WRN_MSG("cannot find parameters for clock %d\n", clock); return 1; } m = m_best; n = n_best; p = p_best; splitm(index, m, &m1, &m2); splitp(index, p, &p1, &p2); n1 = n - 2; DBG_MSG("m, n, p: %d (%d,%d), %d (%d), %d (%d,%d), " "f: %d (%d), VCO: %d\n", m, m1, m2, n, n1, p, p1, p2, calc_vclock3(index, m, n, p), calc_vclock(index, m1, m2, n1, p1, p2, 0), calc_vclock3(index, m, n, p) * p); *retm1 = m1; *retm2 = m2; *retn = n1; *retp1 = p1; *retp2 = p2; *retclock = calc_vclock(index, m1, m2, n1, p1, p2, 0); return 0; } static __inline__ int check_overflow(u32 value, u32 limit, const char *description) { if (value > limit) { WRN_MSG("%s value %d exceeds limit %d\n", description, value, limit); return 1; } return 0; } /* It is assumed that hw is filled in with the initial state information. */ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo, struct intelfb_hwstate *hw, struct fb_var_screeninfo *var) { int pipe = PIPE_A; u32 *dpll, *fp0, *fp1; u32 m1, m2, n, p1, p2, clock_target, clock; u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive; u32 vsync_start, vsync_end, vblank_start, vblank_end, vtotal, vactive; u32 vsync_pol, hsync_pol; u32 *vs, *vb, *vt, *hs, *hb, *ht, *ss, *pipe_conf; u32 stride_alignment; DBG_MSG("intelfbhw_mode_to_hw\n"); /* Disable VGA */ hw->vgacntrl |= VGA_DISABLE; /* Check whether pipe A or pipe B is enabled. */ if (hw->pipe_a_conf & PIPECONF_ENABLE) pipe = PIPE_A; else if (hw->pipe_b_conf & PIPECONF_ENABLE) pipe = PIPE_B; /* Set which pipe's registers will be set. */ if (pipe == PIPE_B) { dpll = &hw->dpll_b; fp0 = &hw->fpb0; fp1 = &hw->fpb1; hs = &hw->hsync_b; hb = &hw->hblank_b; ht = &hw->htotal_b; vs = &hw->vsync_b; vb = &hw->vblank_b; vt = &hw->vtotal_b; ss = &hw->src_size_b; pipe_conf = &hw->pipe_b_conf; } else { dpll = &hw->dpll_a; fp0 = &hw->fpa0; fp1 = &hw->fpa1; hs = &hw->hsync_a; hb = &hw->hblank_a; ht = &hw->htotal_a; vs = &hw->vsync_a; vb = &hw->vblank_a; vt = &hw->vtotal_a; ss = &hw->src_size_a; pipe_conf = &hw->pipe_a_conf; } /* Use ADPA register for sync control. */ hw->adpa &= ~ADPA_USE_VGA_HVPOLARITY; /* sync polarity */ hsync_pol = (var->sync & FB_SYNC_HOR_HIGH_ACT) ? ADPA_SYNC_ACTIVE_HIGH : ADPA_SYNC_ACTIVE_LOW; vsync_pol = (var->sync & FB_SYNC_VERT_HIGH_ACT) ? ADPA_SYNC_ACTIVE_HIGH : ADPA_SYNC_ACTIVE_LOW; hw->adpa &= ~((ADPA_SYNC_ACTIVE_MASK << ADPA_VSYNC_ACTIVE_SHIFT) | (ADPA_SYNC_ACTIVE_MASK << ADPA_HSYNC_ACTIVE_SHIFT)); hw->adpa |= (hsync_pol << ADPA_HSYNC_ACTIVE_SHIFT) | (vsync_pol << ADPA_VSYNC_ACTIVE_SHIFT); /* Connect correct pipe to the analog port DAC */ hw->adpa &= ~(PIPE_MASK << ADPA_PIPE_SELECT_SHIFT); hw->adpa |= (pipe << ADPA_PIPE_SELECT_SHIFT); /* Set DPMS state to D0 (on) */ hw->adpa &= ~ADPA_DPMS_CONTROL_MASK; hw->adpa |= ADPA_DPMS_D0; hw->adpa |= ADPA_DAC_ENABLE; *dpll |= (DPLL_VCO_ENABLE | DPLL_VGA_MODE_DISABLE); *dpll &= ~(DPLL_RATE_SELECT_MASK | DPLL_REFERENCE_SELECT_MASK); *dpll |= (DPLL_REFERENCE_DEFAULT | DPLL_RATE_SELECT_FP0); /* Desired clock in kHz */ clock_target = 1000000000 / var->pixclock; if (calc_pll_params(dinfo->pll_index, clock_target, &m1, &m2, &n, &p1, &p2, &clock)) { WRN_MSG("calc_pll_params failed\n"); return 1; } /* Check for overflow. */ if (check_overflow(p1, DPLL_P1_MASK, "PLL P1 parameter")) return 1; if (check_overflow(p2, DPLL_P2_MASK, "PLL P2 parameter")) return 1; if (check_overflow(m1, FP_DIVISOR_MASK, "PLL M1 parameter")) return 1; if (check_overflow(m2, FP_DIVISOR_MASK, "PLL M2 parameter")) return 1; if (check_overflow(n, FP_DIVISOR_MASK, "PLL N parameter")) return 1; *dpll &= ~DPLL_P1_FORCE_DIV2; *dpll &= ~((DPLL_P2_MASK << DPLL_P2_SHIFT) | (DPLL_P1_MASK << DPLL_P1_SHIFT)); if (IS_I9XX(dinfo)) { *dpll |= (p2 << DPLL_I9XX_P2_SHIFT); *dpll |= (1 << (p1 - 1)) << DPLL_P1_SHIFT; } else *dpll |= (p2 << DPLL_P2_SHIFT) | (p1 << DPLL_P1_SHIFT); *fp0 = (n << FP_N_DIVISOR_SHIFT) | (m1 << FP_M1_DIVISOR_SHIFT) | (m2 << FP_M2_DIVISOR_SHIFT); *fp1 = *fp0; hw->dvob &= ~PORT_ENABLE; hw->dvoc &= ~PORT_ENABLE; /* Use display plane A. */ hw->disp_a_ctrl |= DISPPLANE_PLANE_ENABLE; hw->disp_a_ctrl &= ~DISPPLANE_GAMMA_ENABLE; hw->disp_a_ctrl &= ~DISPPLANE_PIXFORMAT_MASK; switch (intelfb_var_to_depth(var)) { case 8: hw->disp_a_ctrl |= DISPPLANE_8BPP | DISPPLANE_GAMMA_ENABLE; break; case 15: hw->disp_a_ctrl |= DISPPLANE_15_16BPP; break; case 16: hw->disp_a_ctrl |= DISPPLANE_16BPP; break; case 24: hw->disp_a_ctrl |= DISPPLANE_32BPP_NO_ALPHA; break; } hw->disp_a_ctrl &= ~(PIPE_MASK << DISPPLANE_SEL_PIPE_SHIFT); hw->disp_a_ctrl |= (pipe << DISPPLANE_SEL_PIPE_SHIFT); /* Set CRTC registers. */ hactive = var->xres; hsync_start = hactive + var->right_margin; hsync_end = hsync_start + var->hsync_len; htotal = hsync_end + var->left_margin; hblank_start = hactive; hblank_end = htotal; DBG_MSG("H: act %d, ss %d, se %d, tot %d bs %d, be %d\n", hactive, hsync_start, hsync_end, htotal, hblank_start, hblank_end); vactive = var->yres; if (var->vmode & FB_VMODE_INTERLACED) vactive--; /* the chip adds 2 halflines automatically */ vsync_start = vactive + var->lower_margin; vsync_end = vsync_start + var->vsync_len; vtotal = vsync_end + var->upper_margin; vblank_start = vactive; vblank_end = vtotal; vblank_end = vsync_end + 1; DBG_MSG("V: act %d, ss %d, se %d, tot %d bs %d, be %d\n", vactive, vsync_start, vsync_end, vtotal, vblank_start, vblank_end); /* Adjust for register values, and check for overflow. */ hactive--; if (check_overflow(hactive, HACTIVE_MASK, "CRTC hactive")) return 1; hsync_start--; if (check_overflow(hsync_start, HSYNCSTART_MASK, "CRTC hsync_start")) return 1; hsync_end--; if (check_overflow(hsync_end, HSYNCEND_MASK, "CRTC hsync_end")) return 1; htotal--; if (check_overflow(htotal, HTOTAL_MASK, "CRTC htotal")) return 1; hblank_start--; if (check_overflow(hblank_start, HBLANKSTART_MASK, "CRTC hblank_start")) return 1; hblank_end--; if (check_overflow(hblank_end, HBLANKEND_MASK, "CRTC hblank_end")) return 1; vactive--; if (check_overflow(vactive, VACTIVE_MASK, "CRTC vactive")) return 1; vsync_start--; if (check_overflow(vsync_start, VSYNCSTART_MASK, "CRTC vsync_start")) return 1; vsync_end--; if (check_overflow(vsync_end, VSYNCEND_MASK, "CRTC vsync_end")) return 1; vtotal--; if (check_overflow(vtotal, VTOTAL_MASK, "CRTC vtotal")) return 1; vblank_start--; if (check_overflow(vblank_start, VBLANKSTART_MASK, "CRTC vblank_start")) return 1; vblank_end--; if (check_overflow(vblank_end, VBLANKEND_MASK, "CRTC vblank_end")) return 1; *ht = (htotal << HTOTAL_SHIFT) | (hactive << HACTIVE_SHIFT); *hb = (hblank_start << HBLANKSTART_SHIFT) | (hblank_end << HSYNCEND_SHIFT); *hs = (hsync_start << HSYNCSTART_SHIFT) | (hsync_end << HSYNCEND_SHIFT); *vt = (vtotal << VTOTAL_SHIFT) | (vactive << VACTIVE_SHIFT); *vb = (vblank_start << VBLANKSTART_SHIFT) | (vblank_end << VSYNCEND_SHIFT); *vs = (vsync_start << VSYNCSTART_SHIFT) | (vsync_end << VSYNCEND_SHIFT); *ss = (hactive << SRC_SIZE_HORIZ_SHIFT) | (vactive << SRC_SIZE_VERT_SHIFT); hw->disp_a_stride = dinfo->pitch; DBG_MSG("pitch is %d\n", hw->disp_a_stride); hw->disp_a_base = hw->disp_a_stride * var->yoffset + var->xoffset * var->bits_per_pixel / 8; hw->disp_a_base += dinfo->fb.offset << 12; /* Check stride alignment. */ stride_alignment = IS_I9XX(dinfo) ? STRIDE_ALIGNMENT_I9XX : STRIDE_ALIGNMENT; if (hw->disp_a_stride % stride_alignment != 0) { WRN_MSG("display stride %d has bad alignment %d\n", hw->disp_a_stride, stride_alignment); return 1; } /* Set the palette to 8-bit mode. */ *pipe_conf &= ~PIPECONF_GAMMA; if (var->vmode & FB_VMODE_INTERLACED) *pipe_conf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else *pipe_conf &= ~PIPECONF_INTERLACE_MASK; return 0; } /* Program a (non-VGA) video mode. */ int intelfbhw_program_mode(struct intelfb_info *dinfo, const struct intelfb_hwstate *hw, int blank) { int pipe = PIPE_A; u32 tmp; const u32 *dpll, *fp0, *fp1, *pipe_conf; const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss; u32 dpll_reg, fp0_reg, fp1_reg, pipe_conf_reg, pipe_stat_reg; u32 hsync_reg, htotal_reg, hblank_reg; u32 vsync_reg, vtotal_reg, vblank_reg; u32 src_size_reg; u32 count, tmp_val[3]; /* Assume single pipe, display plane A, analog CRT. */ #if VERBOSE > 0 DBG_MSG("intelfbhw_program_mode\n"); #endif /* Disable VGA */ tmp = INREG(VGACNTRL); tmp |= VGA_DISABLE; OUTREG(VGACNTRL, tmp); /* Check whether pipe A or pipe B is enabled. */ if (hw->pipe_a_conf & PIPECONF_ENABLE) pipe = PIPE_A; else if (hw->pipe_b_conf & PIPECONF_ENABLE) pipe = PIPE_B; dinfo->pipe = pipe; if (pipe == PIPE_B) { dpll = &hw->dpll_b; fp0 = &hw->fpb0; fp1 = &hw->fpb1; pipe_conf = &hw->pipe_b_conf; hs = &hw->hsync_b; hb = &hw->hblank_b; ht = &hw->htotal_b; vs = &hw->vsync_b; vb = &hw->vblank_b; vt = &hw->vtotal_b; ss = &hw->src_size_b; dpll_reg = DPLL_B; fp0_reg = FPB0; fp1_reg = FPB1; pipe_conf_reg = PIPEBCONF; pipe_stat_reg = PIPEBSTAT; hsync_reg = HSYNC_B; htotal_reg = HTOTAL_B; hblank_reg = HBLANK_B; vsync_reg = VSYNC_B; vtotal_reg = VTOTAL_B; vblank_reg = VBLANK_B; src_size_reg = SRC_SIZE_B; } else { dpll = &hw->dpll_a; fp0 = &hw->fpa0; fp1 = &hw->fpa1; pipe_conf = &hw->pipe_a_conf; hs = &hw->hsync_a; hb = &hw->hblank_a; ht = &hw->htotal_a; vs = &hw->vsync_a; vb = &hw->vblank_a; vt = &hw->vtotal_a; ss = &hw->src_size_a; dpll_reg = DPLL_A; fp0_reg = FPA0; fp1_reg = FPA1; pipe_conf_reg = PIPEACONF; pipe_stat_reg = PIPEASTAT; hsync_reg = HSYNC_A; htotal_reg = HTOTAL_A; hblank_reg = HBLANK_A; vsync_reg = VSYNC_A; vtotal_reg = VTOTAL_A; vblank_reg = VBLANK_A; src_size_reg = SRC_SIZE_A; } /* turn off pipe */ tmp = INREG(pipe_conf_reg); tmp &= ~PIPECONF_ENABLE; OUTREG(pipe_conf_reg, tmp); count = 0; do { tmp_val[count % 3] = INREG(PIPEA_DSL); if ((tmp_val[0] == tmp_val[1]) && (tmp_val[1] == tmp_val[2])) break; count++; udelay(1); if (count % 200 == 0) { tmp = INREG(pipe_conf_reg); tmp &= ~PIPECONF_ENABLE; OUTREG(pipe_conf_reg, tmp); } } while (count < 2000); OUTREG(ADPA, INREG(ADPA) & ~ADPA_DAC_ENABLE); /* Disable planes A and B. */ tmp = INREG(DSPACNTR); tmp &= ~DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); tmp = INREG(DSPBCNTR); tmp &= ~DISPPLANE_PLANE_ENABLE; OUTREG(DSPBCNTR, tmp); /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */ mdelay(20); OUTREG(DVOB, INREG(DVOB) & ~PORT_ENABLE); OUTREG(DVOC, INREG(DVOC) & ~PORT_ENABLE); OUTREG(ADPA, INREG(ADPA) & ~ADPA_DAC_ENABLE); /* Disable Sync */ tmp = INREG(ADPA); tmp &= ~ADPA_DPMS_CONTROL_MASK; tmp |= ADPA_DPMS_D3; OUTREG(ADPA, tmp); /* do some funky magic - xyzzy */ OUTREG(0x61204, 0xabcd0000); /* turn off PLL */ tmp = INREG(dpll_reg); tmp &= ~DPLL_VCO_ENABLE; OUTREG(dpll_reg, tmp); /* Set PLL parameters */ OUTREG(fp0_reg, *fp0); OUTREG(fp1_reg, *fp1); /* Enable PLL */ OUTREG(dpll_reg, *dpll); /* Set DVOs B/C */ OUTREG(DVOB, hw->dvob); OUTREG(DVOC, hw->dvoc); /* undo funky magic */ OUTREG(0x61204, 0x00000000); /* Set ADPA */ OUTREG(ADPA, INREG(ADPA) | ADPA_DAC_ENABLE); OUTREG(ADPA, (hw->adpa & ~(ADPA_DPMS_CONTROL_MASK)) | ADPA_DPMS_D3); /* Set pipe parameters */ OUTREG(hsync_reg, *hs); OUTREG(hblank_reg, *hb); OUTREG(htotal_reg, *ht); OUTREG(vsync_reg, *vs); OUTREG(vblank_reg, *vb); OUTREG(vtotal_reg, *vt); OUTREG(src_size_reg, *ss); switch (dinfo->info->var.vmode & (FB_VMODE_INTERLACED | FB_VMODE_ODD_FLD_FIRST)) { case FB_VMODE_INTERLACED | FB_VMODE_ODD_FLD_FIRST: OUTREG(pipe_stat_reg, 0xFFFF | PIPESTAT_FLD_EVT_ODD_EN); break; case FB_VMODE_INTERLACED: /* even lines first */ OUTREG(pipe_stat_reg, 0xFFFF | PIPESTAT_FLD_EVT_EVEN_EN); break; default: /* non-interlaced */ OUTREG(pipe_stat_reg, 0xFFFF); /* clear all status bits only */ } /* Enable pipe */ OUTREG(pipe_conf_reg, *pipe_conf | PIPECONF_ENABLE); /* Enable sync */ tmp = INREG(ADPA); tmp &= ~ADPA_DPMS_CONTROL_MASK; tmp |= ADPA_DPMS_D0; OUTREG(ADPA, tmp); /* setup display plane */ if (dinfo->pdev->device == PCI_DEVICE_ID_INTEL_830M) { /* * i830M errata: the display plane must be enabled * to allow writes to the other bits in the plane * control register. */ tmp = INREG(DSPACNTR); if ((tmp & DISPPLANE_PLANE_ENABLE) != DISPPLANE_PLANE_ENABLE) { tmp |= DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); OUTREG(DSPACNTR, hw->disp_a_ctrl|DISPPLANE_PLANE_ENABLE); mdelay(1); } } OUTREG(DSPACNTR, hw->disp_a_ctrl & ~DISPPLANE_PLANE_ENABLE); OUTREG(DSPASTRIDE, hw->disp_a_stride); OUTREG(DSPABASE, hw->disp_a_base); /* Enable plane */ if (!blank) { tmp = INREG(DSPACNTR); tmp |= DISPPLANE_PLANE_ENABLE; OUTREG(DSPACNTR, tmp); OUTREG(DSPABASE, hw->disp_a_base); } return 0; } /* forward declarations */ static void refresh_ring(struct intelfb_info *dinfo); static void reset_state(struct intelfb_info *dinfo); static void do_flush(struct intelfb_info *dinfo); static u32 get_ring_space(struct intelfb_info *dinfo) { u32 ring_space; if (dinfo->ring_tail >= dinfo->ring_head) ring_space = dinfo->ring.size - (dinfo->ring_tail - dinfo->ring_head); else ring_space = dinfo->ring_head - dinfo->ring_tail; if (ring_space > RING_MIN_FREE) ring_space -= RING_MIN_FREE; else ring_space = 0; return ring_space; } static int wait_ring(struct intelfb_info *dinfo, int n) { int i = 0; unsigned long end; u32 last_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; #if VERBOSE > 0 DBG_MSG("wait_ring: %d\n", n); #endif end = jiffies + (HZ * 3); while (dinfo->ring_space < n) { dinfo->ring_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; dinfo->ring_space = get_ring_space(dinfo); if (dinfo->ring_head != last_head) { end = jiffies + (HZ * 3); last_head = dinfo->ring_head; } i++; if (time_before(end, jiffies)) { if (!i) { /* Try again */ reset_state(dinfo); refresh_ring(dinfo); do_flush(dinfo); end = jiffies + (HZ * 3); i = 1; } else { WRN_MSG("ring buffer : space: %d wanted %d\n", dinfo->ring_space, n); WRN_MSG("lockup - turning off hardware " "acceleration\n"); dinfo->ring_lockup = 1; break; } } udelay(1); } return i; } static void do_flush(struct intelfb_info *dinfo) { START_RING(2); OUT_RING(MI_FLUSH | MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE); OUT_RING(MI_NOOP); ADVANCE_RING(); } void intelfbhw_do_sync(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("intelfbhw_do_sync\n"); #endif if (!dinfo->accel) return; /* * Send a flush, then wait until the ring is empty. This is what * the XFree86 driver does, and actually it doesn't seem a lot worse * than the recommended method (both have problems). */ do_flush(dinfo); wait_ring(dinfo, dinfo->ring.size - RING_MIN_FREE); dinfo->ring_space = dinfo->ring.size - RING_MIN_FREE; } static void refresh_ring(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("refresh_ring\n"); #endif dinfo->ring_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; dinfo->ring_tail = INREG(PRI_RING_TAIL) & RING_TAIL_MASK; dinfo->ring_space = get_ring_space(dinfo); } static void reset_state(struct intelfb_info *dinfo) { int i; u32 tmp; #if VERBOSE > 0 DBG_MSG("reset_state\n"); #endif for (i = 0; i < FENCE_NUM; i++) OUTREG(FENCE + (i << 2), 0); /* Flush the ring buffer if it's enabled. */ tmp = INREG(PRI_RING_LENGTH); if (tmp & RING_ENABLE) { #if VERBOSE > 0 DBG_MSG("reset_state: ring was enabled\n"); #endif refresh_ring(dinfo); intelfbhw_do_sync(dinfo); DO_RING_IDLE(); } OUTREG(PRI_RING_LENGTH, 0); OUTREG(PRI_RING_HEAD, 0); OUTREG(PRI_RING_TAIL, 0); OUTREG(PRI_RING_START, 0); } /* Stop the 2D engine, and turn off the ring buffer. */ void intelfbhw_2d_stop(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("intelfbhw_2d_stop: accel: %d, ring_active: %d\n", dinfo->accel, dinfo->ring_active); #endif if (!dinfo->accel) return; dinfo->ring_active = 0; reset_state(dinfo); } /* * Enable the ring buffer, and initialise the 2D engine. * It is assumed that the graphics engine has been stopped by previously * calling intelfb_2d_stop(). */ void intelfbhw_2d_start(struct intelfb_info *dinfo) { #if VERBOSE > 0 DBG_MSG("intelfbhw_2d_start: accel: %d, ring_active: %d\n", dinfo->accel, dinfo->ring_active); #endif if (!dinfo->accel) return; /* Initialise the primary ring buffer. */ OUTREG(PRI_RING_LENGTH, 0); OUTREG(PRI_RING_TAIL, 0); OUTREG(PRI_RING_HEAD, 0); OUTREG(PRI_RING_START, dinfo->ring.physical & RING_START_MASK); OUTREG(PRI_RING_LENGTH, ((dinfo->ring.size - GTT_PAGE_SIZE) & RING_LENGTH_MASK) | RING_NO_REPORT | RING_ENABLE); refresh_ring(dinfo); dinfo->ring_active = 1; } /* 2D fillrect (solid fill or invert) */ void intelfbhw_do_fillrect(struct intelfb_info *dinfo, u32 x, u32 y, u32 w, u32 h, u32 color, u32 pitch, u32 bpp, u32 rop) { u32 br00, br09, br13, br14, br16; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_fillrect: (%d,%d) %dx%d, c 0x%06x, p %d bpp %d, " "rop 0x%02x\n", x, y, w, h, color, pitch, bpp, rop); #endif br00 = COLOR_BLT_CMD; br09 = dinfo->fb_start + (y * pitch + x * (bpp / 8)); br13 = (rop << ROP_SHIFT) | pitch; br14 = (h << HEIGHT_SHIFT) | ((w * (bpp / 8)) << WIDTH_SHIFT); br16 = color; switch (bpp) { case 8: br13 |= COLOR_DEPTH_8; break; case 16: br13 |= COLOR_DEPTH_16; break; case 32: br13 |= COLOR_DEPTH_32; br00 |= WRITE_ALPHA | WRITE_RGB; break; } START_RING(6); OUT_RING(br00); OUT_RING(br13); OUT_RING(br14); OUT_RING(br09); OUT_RING(br16); OUT_RING(MI_NOOP); ADVANCE_RING(); #if VERBOSE > 0 DBG_MSG("ring = 0x%08x, 0x%08x (%d)\n", dinfo->ring_head, dinfo->ring_tail, dinfo->ring_space); #endif } void intelfbhw_do_bitblt(struct intelfb_info *dinfo, u32 curx, u32 cury, u32 dstx, u32 dsty, u32 w, u32 h, u32 pitch, u32 bpp) { u32 br00, br09, br11, br12, br13, br22, br23, br26; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_bitblt: (%d,%d)->(%d,%d) %dx%d, p %d bpp %d\n", curx, cury, dstx, dsty, w, h, pitch, bpp); #endif br00 = XY_SRC_COPY_BLT_CMD; br09 = dinfo->fb_start; br11 = (pitch << PITCH_SHIFT); br12 = dinfo->fb_start; br13 = (SRC_ROP_GXCOPY << ROP_SHIFT) | (pitch << PITCH_SHIFT); br22 = (dstx << WIDTH_SHIFT) | (dsty << HEIGHT_SHIFT); br23 = ((dstx + w) << WIDTH_SHIFT) | ((dsty + h) << HEIGHT_SHIFT); br26 = (curx << WIDTH_SHIFT) | (cury << HEIGHT_SHIFT); switch (bpp) { case 8: br13 |= COLOR_DEPTH_8; break; case 16: br13 |= COLOR_DEPTH_16; break; case 32: br13 |= COLOR_DEPTH_32; br00 |= WRITE_ALPHA | WRITE_RGB; break; } START_RING(8); OUT_RING(br00); OUT_RING(br13); OUT_RING(br22); OUT_RING(br23); OUT_RING(br09); OUT_RING(br26); OUT_RING(br11); OUT_RING(br12); ADVANCE_RING(); } int intelfbhw_do_drawglyph(struct intelfb_info *dinfo, u32 fg, u32 bg, u32 w, u32 h, const u8* cdat, u32 x, u32 y, u32 pitch, u32 bpp) { int nbytes, ndwords, pad, tmp; u32 br00, br09, br13, br18, br19, br22, br23; int dat, ix, iy, iw; int i, j; #if VERBOSE > 0 DBG_MSG("intelfbhw_do_drawglyph: (%d,%d) %dx%d\n", x, y, w, h); #endif /* size in bytes of a padded scanline */ nbytes = ROUND_UP_TO(w, 16) / 8; /* Total bytes of padded scanline data to write out. */ nbytes = nbytes * h; /* * Check if the glyph data exceeds the immediate mode limit. * It would take a large font (1K pixels) to hit this limit. */ if (nbytes > MAX_MONO_IMM_SIZE) return 0; /* Src data is packaged a dword (32-bit) at a time. */ ndwords = ROUND_UP_TO(nbytes, 4) / 4; /* * Ring has to be padded to a quad word. But because the command starts with 7 bytes, pad only if there is an even number of ndwords */ pad = !(ndwords % 2); tmp = (XY_MONO_SRC_IMM_BLT_CMD & DW_LENGTH_MASK) + ndwords; br00 = (XY_MONO_SRC_IMM_BLT_CMD & ~DW_LENGTH_MASK) | tmp; br09 = dinfo->fb_start; br13 = (SRC_ROP_GXCOPY << ROP_SHIFT) | (pitch << PITCH_SHIFT); br18 = bg; br19 = fg; br22 = (x << WIDTH_SHIFT) | (y << HEIGHT_SHIFT); br23 = ((x + w) << WIDTH_SHIFT) | ((y + h) << HEIGHT_SHIFT); switch (bpp) { case 8: br13 |= COLOR_DEPTH_8; break; case 16: br13 |= COLOR_DEPTH_16; break; case 32: br13 |= COLOR_DEPTH_32; br00 |= WRITE_ALPHA | WRITE_RGB; break; } START_RING(8 + ndwords); OUT_RING(br00); OUT_RING(br13); OUT_RING(br22); OUT_RING(br23); OUT_RING(br09); OUT_RING(br18); OUT_RING(br19); ix = iy = 0; iw = ROUND_UP_TO(w, 8) / 8; while (ndwords--) { dat = 0; for (j = 0; j < 2; ++j) { for (i = 0; i < 2; ++i) { if (ix != iw || i == 0) dat |= cdat[iy*iw + ix++] << (i+j*2)*8; } if (ix == iw && iy != (h-1)) { ix = 0; ++iy; } } OUT_RING(dat); } if (pad) OUT_RING(MI_NOOP); ADVANCE_RING(); return 1; } /* HW cursor functions. */ void intelfbhw_cursor_init(struct intelfb_info *dinfo) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_init\n"); #endif if (dinfo->mobile || IS_I9XX(dinfo)) { if (!dinfo->cursor.physical) return; tmp = INREG(CURSOR_A_CONTROL); tmp &= ~(CURSOR_MODE_MASK | CURSOR_MOBILE_GAMMA_ENABLE | CURSOR_MEM_TYPE_LOCAL | (1 << CURSOR_PIPE_SELECT_SHIFT)); tmp |= CURSOR_MODE_DISABLE; OUTREG(CURSOR_A_CONTROL, tmp); OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } else { tmp = INREG(CURSOR_CONTROL); tmp &= ~(CURSOR_FORMAT_MASK | CURSOR_GAMMA_ENABLE | CURSOR_ENABLE | CURSOR_STRIDE_MASK); tmp = CURSOR_FORMAT_3C; OUTREG(CURSOR_CONTROL, tmp); OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.offset << 12); tmp = (64 << CURSOR_SIZE_H_SHIFT) | (64 << CURSOR_SIZE_V_SHIFT); OUTREG(CURSOR_SIZE, tmp); } } void intelfbhw_cursor_hide(struct intelfb_info *dinfo) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_hide\n"); #endif dinfo->cursor_on = 0; if (dinfo->mobile || IS_I9XX(dinfo)) { if (!dinfo->cursor.physical) return; tmp = INREG(CURSOR_A_CONTROL); tmp &= ~CURSOR_MODE_MASK; tmp |= CURSOR_MODE_DISABLE; OUTREG(CURSOR_A_CONTROL, tmp); /* Flush changes */ OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } else { tmp = INREG(CURSOR_CONTROL); tmp &= ~CURSOR_ENABLE; OUTREG(CURSOR_CONTROL, tmp); } } void intelfbhw_cursor_show(struct intelfb_info *dinfo) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_show\n"); #endif dinfo->cursor_on = 1; if (dinfo->cursor_blanked) return; if (dinfo->mobile || IS_I9XX(dinfo)) { if (!dinfo->cursor.physical) return; tmp = INREG(CURSOR_A_CONTROL); tmp &= ~CURSOR_MODE_MASK; tmp |= CURSOR_MODE_64_4C_AX; OUTREG(CURSOR_A_CONTROL, tmp); /* Flush changes */ OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } else { tmp = INREG(CURSOR_CONTROL); tmp |= CURSOR_ENABLE; OUTREG(CURSOR_CONTROL, tmp); } } void intelfbhw_cursor_setpos(struct intelfb_info *dinfo, int x, int y) { u32 tmp; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_setpos: (%d, %d)\n", x, y); #endif /* * Sets the position. The coordinates are assumed to already * have any offset adjusted. Assume that the cursor is never * completely off-screen, and that x, y are always >= 0. */ tmp = ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) | ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); OUTREG(CURSOR_A_POSITION, tmp); if (IS_I9XX(dinfo)) OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical); } void intelfbhw_cursor_setcolor(struct intelfb_info *dinfo, u32 bg, u32 fg) { #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_setcolor\n"); #endif OUTREG(CURSOR_A_PALETTE0, bg & CURSOR_PALETTE_MASK); OUTREG(CURSOR_A_PALETTE1, fg & CURSOR_PALETTE_MASK); OUTREG(CURSOR_A_PALETTE2, fg & CURSOR_PALETTE_MASK); OUTREG(CURSOR_A_PALETTE3, bg & CURSOR_PALETTE_MASK); } void intelfbhw_cursor_load(struct intelfb_info *dinfo, int width, int height, u8 *data) { u8 __iomem *addr = (u8 __iomem *)dinfo->cursor.virtual; int i, j, w = width / 8; int mod = width % 8, t_mask, d_mask; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_load\n"); #endif if (!dinfo->cursor.virtual) return; t_mask = 0xff >> mod; d_mask = ~(0xff >> mod); for (i = height; i--; ) { for (j = 0; j < w; j++) { writeb(0x00, addr + j); writeb(*(data++), addr + j+8); } if (mod) { writeb(t_mask, addr + j); writeb(*(data++) & d_mask, addr + j+8); } addr += 16; } } void intelfbhw_cursor_reset(struct intelfb_info *dinfo) { u8 __iomem *addr = (u8 __iomem *)dinfo->cursor.virtual; int i, j; #if VERBOSE > 0 DBG_MSG("intelfbhw_cursor_reset\n"); #endif if (!dinfo->cursor.virtual) return; for (i = 64; i--; ) { for (j = 0; j < 8; j++) { writeb(0xff, addr + j+0); writeb(0x00, addr + j+8); } addr += 16; } } static irqreturn_t intelfbhw_irq(int irq, void *dev_id) { u16 tmp; struct intelfb_info *dinfo = dev_id; spin_lock(&dinfo->int_lock); tmp = INREG16(IIR); if (dinfo->info->var.vmode & FB_VMODE_INTERLACED) tmp &= PIPE_A_EVENT_INTERRUPT; else tmp &= VSYNC_PIPE_A_INTERRUPT; /* non-interlaced */ if (tmp == 0) { spin_unlock(&dinfo->int_lock); return IRQ_RETVAL(0); /* not us */ } /* clear status bits 0-15 ASAP and don't touch bits 16-31 */ OUTREG(PIPEASTAT, INREG(PIPEASTAT)); OUTREG16(IIR, tmp); if (dinfo->vsync.pan_display) { dinfo->vsync.pan_display = 0; OUTREG(DSPABASE, dinfo->vsync.pan_offset); } dinfo->vsync.count++; wake_up_interruptible(&dinfo->vsync.wait); spin_unlock(&dinfo->int_lock); return IRQ_RETVAL(1); } int intelfbhw_enable_irq(struct intelfb_info *dinfo) { u16 tmp; if (!test_and_set_bit(0, &dinfo->irq_flags)) { if (request_irq(dinfo->pdev->irq, intelfbhw_irq, IRQF_SHARED, "intelfb", dinfo)) { clear_bit(0, &dinfo->irq_flags); return -EINVAL; } spin_lock_irq(&dinfo->int_lock); OUTREG16(HWSTAM, 0xfffe); /* i830 DRM uses ffff */ OUTREG16(IMR, 0); } else spin_lock_irq(&dinfo->int_lock); if (dinfo->info->var.vmode & FB_VMODE_INTERLACED) tmp = PIPE_A_EVENT_INTERRUPT; else tmp = VSYNC_PIPE_A_INTERRUPT; /* non-interlaced */ if (tmp != INREG16(IER)) { DBG_MSG("changing IER to 0x%X\n", tmp); OUTREG16(IER, tmp); } spin_unlock_irq(&dinfo->int_lock); return 0; } void intelfbhw_disable_irq(struct intelfb_info *dinfo) { if (test_and_clear_bit(0, &dinfo->irq_flags)) { if (dinfo->vsync.pan_display) { dinfo->vsync.pan_display = 0; OUTREG(DSPABASE, dinfo->vsync.pan_offset); } spin_lock_irq(&dinfo->int_lock); OUTREG16(HWSTAM, 0xffff); OUTREG16(IMR, 0xffff); OUTREG16(IER, 0x0); OUTREG16(IIR, INREG16(IIR)); /* clear IRQ requests */ spin_unlock_irq(&dinfo->int_lock); free_irq(dinfo->pdev->irq, dinfo); } } int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe) { struct intelfb_vsync *vsync; unsigned int count; int ret; switch (pipe) { case 0: vsync = &dinfo->vsync; break; default: return -ENODEV; } ret = intelfbhw_enable_irq(dinfo); if (ret) return ret; count = vsync->count; ret = wait_event_interruptible_timeout(vsync->wait, count != vsync->count, HZ / 10); if (ret < 0) return ret; if (ret == 0) { DBG_MSG("wait_for_vsync timed out!\n"); return -ETIMEDOUT; } return 0; }
gpl-2.0
davidmueller13/flo-1
arch/arm/mach-msm/subsystem_map.c
822
12647
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/io.h> #include <linux/types.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/memory_alloc.h> #include <linux/module.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/msm_subsystem_map.h> struct msm_buffer_node { struct rb_node rb_node_all_buffer; struct rb_node rb_node_paddr; struct msm_mapped_buffer *buf; unsigned long length; unsigned int *subsystems; unsigned int nsubsys; unsigned int phys; }; static struct rb_root buffer_root; static struct rb_root phys_root; DEFINE_MUTEX(msm_buffer_mutex); static unsigned long subsystem_to_domain_tbl[] = { VIDEO_DOMAIN, VIDEO_DOMAIN, CAMERA_DOMAIN, DISPLAY_READ_DOMAIN, DISPLAY_WRITE_DOMAIN, ROTATOR_SRC_DOMAIN, ROTATOR_DST_DOMAIN, 0xFFFFFFFF }; static struct msm_buffer_node *find_buffer(void *key) { struct rb_root *root = &buffer_root; struct rb_node *p = root->rb_node; mutex_lock(&msm_buffer_mutex); while (p) { struct msm_buffer_node *node; node = rb_entry(p, struct msm_buffer_node, rb_node_all_buffer); if (node->buf->vaddr) { if (key < node->buf->vaddr) p = p->rb_left; else if (key > node->buf->vaddr) p = p->rb_right; else { mutex_unlock(&msm_buffer_mutex); return node; } } else { if (key < (void *)node->buf) p = p->rb_left; else if (key > (void *)node->buf) p = p->rb_right; else { mutex_unlock(&msm_buffer_mutex); return node; } } } mutex_unlock(&msm_buffer_mutex); return NULL; } static struct msm_buffer_node *find_buffer_phys(unsigned int phys) { struct rb_root *root = &phys_root; struct rb_node *p = root->rb_node; mutex_lock(&msm_buffer_mutex); while (p) { struct msm_buffer_node *node; node = rb_entry(p, struct msm_buffer_node, rb_node_paddr); if (phys < node->phys) p = p->rb_left; else if (phys > node->phys) p = p->rb_right; else { mutex_unlock(&msm_buffer_mutex); return node; } } mutex_unlock(&msm_buffer_mutex); return NULL; } static int add_buffer(struct msm_buffer_node *node) { struct rb_root *root = &buffer_root; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; void *key; if (node->buf->vaddr) key = node->buf->vaddr; else key = node->buf; mutex_lock(&msm_buffer_mutex); while (*p) { struct msm_buffer_node *tmp; parent = *p; tmp = rb_entry(parent, struct msm_buffer_node, rb_node_all_buffer); if (tmp->buf->vaddr) { if (key < tmp->buf->vaddr) p = &(*p)->rb_left; else if (key > tmp->buf->vaddr) p = &(*p)->rb_right; else { WARN(1, "tried to add buffer twice! buf = %p" " vaddr = %p iova = %p", tmp->buf, tmp->buf->vaddr, tmp->buf->iova); mutex_unlock(&msm_buffer_mutex); return -EINVAL; } } else { if (key < (void *)tmp->buf) p = &(*p)->rb_left; else if (key > (void *)tmp->buf) p = &(*p)->rb_right; else { WARN(1, "tried to add buffer twice! buf = %p" " vaddr = %p iova = %p", tmp->buf, tmp->buf->vaddr, tmp->buf->iova); mutex_unlock(&msm_buffer_mutex); return -EINVAL; } } } rb_link_node(&node->rb_node_all_buffer, parent, p); rb_insert_color(&node->rb_node_all_buffer, root); mutex_unlock(&msm_buffer_mutex); return 0; } static int add_buffer_phys(struct msm_buffer_node *node) { struct rb_root *root = &phys_root; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; mutex_lock(&msm_buffer_mutex); while (*p) { struct msm_buffer_node *tmp; parent = *p; tmp = rb_entry(parent, struct msm_buffer_node, rb_node_paddr); if (node->phys < tmp->phys) p = &(*p)->rb_left; else if (node->phys > tmp->phys) p = &(*p)->rb_right; else { WARN(1, "tried to add buffer twice! buf = %p" " vaddr = %p iova = %p", tmp->buf, tmp->buf->vaddr, tmp->buf->iova); mutex_unlock(&msm_buffer_mutex); return -EINVAL; } } rb_link_node(&node->rb_node_paddr, parent, p); rb_insert_color(&node->rb_node_paddr, root); mutex_unlock(&msm_buffer_mutex); return 0; } static int remove_buffer(struct msm_buffer_node *victim_node) { struct rb_root *root = &buffer_root; if (!victim_node) return -EINVAL; mutex_lock(&msm_buffer_mutex); rb_erase(&victim_node->rb_node_all_buffer, root); mutex_unlock(&msm_buffer_mutex); return 0; } static int remove_buffer_phys(struct msm_buffer_node *victim_node) { struct rb_root *root = &phys_root; if (!victim_node) return -EINVAL; mutex_lock(&msm_buffer_mutex); rb_erase(&victim_node->rb_node_paddr, root); mutex_unlock(&msm_buffer_mutex); return 0; } static unsigned long msm_subsystem_get_domain_no(int subsys_id) { if (subsys_id > INVALID_SUBSYS_ID && subsys_id <= MAX_SUBSYSTEM_ID && subsys_id < ARRAY_SIZE(subsystem_to_domain_tbl)) return subsystem_to_domain_tbl[subsys_id]; else return subsystem_to_domain_tbl[MAX_SUBSYSTEM_ID]; } static unsigned long msm_subsystem_get_partition_no(int subsys_id) { switch (subsys_id) { case MSM_SUBSYSTEM_VIDEO_FWARE: return VIDEO_FIRMWARE_POOL; case MSM_SUBSYSTEM_VIDEO: return VIDEO_MAIN_POOL; case MSM_SUBSYSTEM_CAMERA: case MSM_SUBSYSTEM_DISPLAY: case MSM_SUBSYSTEM_ROTATOR: return GEN_POOL; default: return 0xFFFFFFFF; } } phys_addr_t msm_subsystem_check_iova_mapping(int subsys_id, unsigned long iova) { struct iommu_domain *subsys_domain; if (!msm_use_iommu()) /* * If there is no iommu, Just return the iova in this case. */ return iova; subsys_domain = msm_get_iommu_domain(msm_subsystem_get_domain_no (subsys_id)); if (!subsys_domain) return -EINVAL; return iommu_iova_to_phys(subsys_domain, iova); } EXPORT_SYMBOL(msm_subsystem_check_iova_mapping); struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys, unsigned int length, unsigned int flags, int *subsys_ids, unsigned int nsubsys) { struct msm_mapped_buffer *buf, *err; struct msm_buffer_node *node; int i = 0, j = 0, ret; unsigned long iova_start = 0, temp_phys, temp_va = 0; struct iommu_domain *d = NULL; int map_size = length; if (!((flags & MSM_SUBSYSTEM_MAP_KADDR) || (flags & MSM_SUBSYSTEM_MAP_IOVA))) { pr_warn("%s: no mapping flag was specified. The caller" " should explicitly specify what to map in the" " flags.\n", __func__); err = ERR_PTR(-EINVAL); goto outret; } buf = kzalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) { err = ERR_PTR(-ENOMEM); goto outret; } node = kzalloc(sizeof(*node), GFP_ATOMIC); if (!node) { err = ERR_PTR(-ENOMEM); goto outkfreebuf; } node->phys = phys; if (flags & MSM_SUBSYSTEM_MAP_KADDR) { struct msm_buffer_node *old_buffer; old_buffer = find_buffer_phys(phys); if (old_buffer) { WARN(1, "%s: Attempting to map %lx twice in the kernel" " virtual space. Don't do that!\n", __func__, phys); err = ERR_PTR(-EINVAL); goto outkfreenode; } if (flags & MSM_SUBSYSTEM_MAP_CACHED) buf->vaddr = ioremap(phys, length); else if (flags & MSM_SUBSYSTEM_MAP_KADDR) buf->vaddr = ioremap_nocache(phys, length); else { pr_warn("%s: no cachability flag was indicated. Caller" " must specify a cachability flag.\n", __func__); err = ERR_PTR(-EINVAL); goto outkfreenode; } if (!buf->vaddr) { pr_err("%s: could not ioremap\n", __func__); err = ERR_PTR(-EINVAL); goto outkfreenode; } if (add_buffer_phys(node)) { err = ERR_PTR(-EINVAL); goto outiounmap; } } if ((flags & MSM_SUBSYSTEM_MAP_IOVA) && subsys_ids) { int min_align; length = round_up(length, SZ_4K); if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X) map_size = 2 * length; else map_size = length; buf->iova = kzalloc(sizeof(unsigned long)*nsubsys, GFP_ATOMIC); if (!buf->iova) { err = ERR_PTR(-ENOMEM); goto outremovephys; } /* * The alignment must be specified as the exact value wanted * e.g. 8k alignment must pass (0x2000 | other flags) */ min_align = flags & ~(SZ_4K - 1); for (i = 0; i < nsubsys; i++) { unsigned int domain_no, partition_no; if (!msm_use_iommu()) { buf->iova[i] = phys; continue; } d = msm_get_iommu_domain( msm_subsystem_get_domain_no(subsys_ids[i])); if (!d) { pr_err("%s: could not get domain for subsystem" " %d\n", __func__, subsys_ids[i]); continue; } domain_no = msm_subsystem_get_domain_no(subsys_ids[i]); partition_no = msm_subsystem_get_partition_no( subsys_ids[i]); ret = msm_allocate_iova_address(domain_no, partition_no, map_size, max(min_align, SZ_4K), &iova_start); if (ret) { pr_err("%s: could not allocate iova address\n", __func__); continue; } temp_phys = phys; temp_va = iova_start; for (j = length; j > 0; j -= SZ_4K, temp_phys += SZ_4K, temp_va += SZ_4K) { ret = iommu_map(d, temp_va, temp_phys, SZ_4K, (IOMMU_READ | IOMMU_WRITE)); if (ret) { pr_err("%s: could not map iommu for" " domain %p, iova %lx," " phys %lx\n", __func__, d, temp_va, temp_phys); err = ERR_PTR(-EINVAL); goto outdomain; } } buf->iova[i] = iova_start; if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X) msm_iommu_map_extra (d, temp_va, length, SZ_4K, (IOMMU_READ | IOMMU_WRITE)); } } node->buf = buf; node->subsystems = subsys_ids; node->length = map_size; node->nsubsys = nsubsys; if (add_buffer(node)) { err = ERR_PTR(-EINVAL); goto outiova; } return buf; outiova: if (flags & MSM_SUBSYSTEM_MAP_IOVA) { if (d) iommu_unmap(d, temp_va, SZ_4K); } outdomain: if (flags & MSM_SUBSYSTEM_MAP_IOVA) { /* Unmap the rest of the current domain, i */ if (d) { for (j -= SZ_4K, temp_va -= SZ_4K; j > 0; temp_va -= SZ_4K, j -= SZ_4K) iommu_unmap(d, temp_va, SZ_4K); } /* Unmap all the other domains */ for (i--; i >= 0; i--) { unsigned int domain_no, partition_no; if (!msm_use_iommu()) continue; domain_no = msm_subsystem_get_domain_no(subsys_ids[i]); partition_no = msm_subsystem_get_partition_no( subsys_ids[i]); d = msm_get_iommu_domain(domain_no); if (d) { temp_va = buf->iova[i]; for (j = length; j > 0; j -= SZ_4K, temp_va += SZ_4K) iommu_unmap(d, temp_va, SZ_4K); } msm_free_iova_address(buf->iova[i], domain_no, partition_no, length); } kfree(buf->iova); } outremovephys: if (flags & MSM_SUBSYSTEM_MAP_KADDR) remove_buffer_phys(node); outiounmap: if (flags & MSM_SUBSYSTEM_MAP_KADDR) iounmap(buf->vaddr); outkfreenode: kfree(node); outkfreebuf: kfree(buf); outret: return err; } EXPORT_SYMBOL(msm_subsystem_map_buffer); int msm_subsystem_unmap_buffer(struct msm_mapped_buffer *buf) { struct msm_buffer_node *node; int i, j, ret; unsigned long temp_va; if (IS_ERR_OR_NULL(buf)) goto out; if (buf->vaddr) node = find_buffer(buf->vaddr); else node = find_buffer(buf); if (!node) goto out; if (node->buf != buf) { pr_err("%s: caller must pass in the same buffer structure" " returned from map_buffer when freeding\n", __func__); goto out; } if (buf->iova) { if (msm_use_iommu()) for (i = 0; i < node->nsubsys; i++) { struct iommu_domain *subsys_domain; unsigned int domain_no, partition_no; subsys_domain = msm_get_iommu_domain( msm_subsystem_get_domain_no( node->subsystems[i])); if (!subsys_domain) continue; domain_no = msm_subsystem_get_domain_no( node->subsystems[i]); partition_no = msm_subsystem_get_partition_no( node->subsystems[i]); temp_va = buf->iova[i]; for (j = node->length; j > 0; j -= SZ_4K, temp_va += SZ_4K) { ret = iommu_unmap(subsys_domain, temp_va, SZ_4K); WARN(ret, "iommu_unmap returned a " " non-zero value.\n"); } msm_free_iova_address(buf->iova[i], domain_no, partition_no, node->length); } kfree(buf->iova); } if (buf->vaddr) { remove_buffer_phys(node); iounmap(buf->vaddr); } remove_buffer(node); kfree(node); kfree(buf); return 0; out: return -EINVAL; } EXPORT_SYMBOL(msm_subsystem_unmap_buffer);
gpl-2.0
msm8916-zte/android_kernel_zte_msm8916
net/llc/llc_proc.c
2358
6765
/* * proc_llc.c - proc interface for LLC * * Copyright (c) 2001 by Jay Schulist <jschlst@samba.org> * 2002-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/errno.h> #include <linux/seq_file.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/llc.h> #include <net/llc_c_ac.h> #include <net/llc_c_ev.h> #include <net/llc_c_st.h> #include <net/llc_conn.h> static void llc_ui_format_mac(struct seq_file *seq, u8 *addr) { seq_printf(seq, "%pM", addr); } static struct sock *llc_get_sk_idx(loff_t pos) { struct llc_sap *sap; struct sock *sk = NULL; int i; list_for_each_entry_rcu(sap, &llc_sap_list, node) { spin_lock_bh(&sap->sk_lock); for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) { struct hlist_nulls_head *head = &sap->sk_laddr_hash[i]; struct hlist_nulls_node *node; sk_nulls_for_each(sk, node, head) { if (!pos) goto found; /* keep the lock */ --pos; } } spin_unlock_bh(&sap->sk_lock); } sk = NULL; found: return sk; } static void *llc_seq_start(struct seq_file *seq, loff_t *pos) { loff_t l = *pos; rcu_read_lock_bh(); return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; } static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) { struct hlist_nulls_node *node; struct sock *sk = NULL; while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) goto out; out: return sk; } static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock* sk, *next; struct llc_sock *llc; struct llc_sap *sap; ++*pos; if (v == SEQ_START_TOKEN) { sk = llc_get_sk_idx(0); goto out; } sk = v; next = sk_nulls_next(sk); if (next) { sk = next; goto out; } llc = llc_sk(sk); sap = llc->sap; sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr)); if (sk) goto out; spin_unlock_bh(&sap->sk_lock); list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) { spin_lock_bh(&sap->sk_lock); sk = laddr_hash_next(sap, -1); if (sk) break; /* keep the lock */ spin_unlock_bh(&sap->sk_lock); } out: return sk; } static void llc_seq_stop(struct seq_file *seq, void *v) { if (v && v != SEQ_START_TOKEN) { struct sock *sk = v; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; spin_unlock_bh(&sap->sk_lock); } rcu_read_unlock_bh(); } static int llc_seq_socket_show(struct seq_file *seq, void *v) { struct sock* sk; struct llc_sock *llc; if (v == SEQ_START_TOKEN) { seq_puts(seq, "SKt Mc local_mac_sap remote_mac_sap " " tx_queue rx_queue st uid link\n"); goto out; } sk = v; llc = llc_sk(sk); /* FIXME: check if the address is multicast */ seq_printf(seq, "%2X %2X ", sk->sk_type, 0); if (llc->dev) llc_ui_format_mac(seq, llc->dev->dev_addr); else { u8 addr[6] = {0,0,0,0,0,0}; llc_ui_format_mac(seq, addr); } seq_printf(seq, "@%02X ", llc->sap->laddr.lsap); llc_ui_format_mac(seq, llc->daddr.mac); seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk) - llc->copied_seq, sk->sk_state, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), llc->link); out: return 0; } static const char *const llc_conn_state_names[] = { [LLC_CONN_STATE_ADM] = "adm", [LLC_CONN_STATE_SETUP] = "setup", [LLC_CONN_STATE_NORMAL] = "normal", [LLC_CONN_STATE_BUSY] = "busy", [LLC_CONN_STATE_REJ] = "rej", [LLC_CONN_STATE_AWAIT] = "await", [LLC_CONN_STATE_AWAIT_BUSY] = "await_busy", [LLC_CONN_STATE_AWAIT_REJ] = "await_rej", [LLC_CONN_STATE_D_CONN] = "d_conn", [LLC_CONN_STATE_RESET] = "reset", [LLC_CONN_STATE_ERROR] = "error", [LLC_CONN_STATE_TEMP] = "temp", }; static int llc_seq_core_show(struct seq_file *seq, void *v) { struct sock* sk; struct llc_sock *llc; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Connection list:\n" "dsap state retr txw rxw pf ff sf df rs cs " "tack tpfc trs tbs blog busr\n"); goto out; } sk = v; llc = llc_sk(sk); seq_printf(seq, " %02X %-10s %3d %3d %3d %2d %2d %2d %2d %2d %2d " "%4d %4d %3d %3d %4d %4d\n", llc->daddr.lsap, llc_conn_state_names[llc->state], llc->retry_count, llc->k, llc->rw, llc->p_flag, llc->f_flag, llc->s_flag, llc->data_flag, llc->remote_busy_flag, llc->cause_flag, timer_pending(&llc->ack_timer.timer), timer_pending(&llc->pf_cycle_timer.timer), timer_pending(&llc->rej_sent_timer.timer), timer_pending(&llc->busy_state_timer.timer), !!sk->sk_backlog.tail, !!sock_owned_by_user(sk)); out: return 0; } static const struct seq_operations llc_seq_socket_ops = { .start = llc_seq_start, .next = llc_seq_next, .stop = llc_seq_stop, .show = llc_seq_socket_show, }; static const struct seq_operations llc_seq_core_ops = { .start = llc_seq_start, .next = llc_seq_next, .stop = llc_seq_stop, .show = llc_seq_core_show, }; static int llc_seq_socket_open(struct inode *inode, struct file *file) { return seq_open(file, &llc_seq_socket_ops); } static int llc_seq_core_open(struct inode *inode, struct file *file) { return seq_open(file, &llc_seq_core_ops); } static const struct file_operations llc_seq_socket_fops = { .owner = THIS_MODULE, .open = llc_seq_socket_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations llc_seq_core_fops = { .owner = THIS_MODULE, .open = llc_seq_core_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *llc_proc_dir; int __init llc_proc_init(void) { int rc = -ENOMEM; struct proc_dir_entry *p; llc_proc_dir = proc_mkdir("llc", init_net.proc_net); if (!llc_proc_dir) goto out; p = proc_create("socket", S_IRUGO, llc_proc_dir, &llc_seq_socket_fops); if (!p) goto out_socket; p = proc_create("core", S_IRUGO, llc_proc_dir, &llc_seq_core_fops); if (!p) goto out_core; rc = 0; out: return rc; out_core: remove_proc_entry("socket", llc_proc_dir); out_socket: remove_proc_entry("llc", init_net.proc_net); goto out; } void llc_proc_exit(void) { remove_proc_entry("socket", llc_proc_dir); remove_proc_entry("core", llc_proc_dir); remove_proc_entry("llc", init_net.proc_net); }
gpl-2.0
conv8888/fkan-linux
arch/tile/lib/memchr_64.c
2358
1956
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #include "string-endian.h" void *memchr(const void *s, int c, size_t n) { const uint64_t *last_word_ptr; const uint64_t *p; const char *last_byte_ptr; uintptr_t s_int; uint64_t goal, before_mask, v, bits; char *ret; if (__builtin_expect(n == 0, 0)) { /* Don't dereference any memory if the array is empty. */ return NULL; } /* Get an aligned pointer. */ s_int = (uintptr_t) s; p = (const uint64_t *)(s_int & -8); /* Create eight copies of the byte for which we are looking. */ goal = copy_byte(c); /* Read the first word, but munge it so that bytes before the array * will not match goal. */ before_mask = MASK(s_int); v = (*p | before_mask) ^ (goal & before_mask); /* Compute the address of the last byte. */ last_byte_ptr = (const char *)s + n - 1; /* Compute the address of the word containing the last byte. */ last_word_ptr = (const uint64_t *)((uintptr_t) last_byte_ptr & -8); while ((bits = __insn_v1cmpeq(v, goal)) == 0) { if (__builtin_expect(p == last_word_ptr, 0)) { /* We already read the last word in the array, * so give up. */ return NULL; } v = *++p; } /* We found a match, but it might be in a byte past the end * of the array. */ ret = ((char *)p) + (CFZ(bits) >> 3); return (ret <= last_byte_ptr) ? ret : NULL; } EXPORT_SYMBOL(memchr);
gpl-2.0
shakalaca/ASUS_ZenFone_ZE551KL
kernel/drivers/isdn/i4l/isdn_common.c
2614
57932
/* $Id: isdn_common.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $ * * Linux ISDN subsystem, common used functions (linklevel). * * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/isdn.h> #include <linux/mutex.h> #include "isdn_common.h" #include "isdn_tty.h" #include "isdn_net.h" #include "isdn_ppp.h" #ifdef CONFIG_ISDN_AUDIO #include "isdn_audio.h" #endif #ifdef CONFIG_ISDN_DIVERSION_MODULE #define CONFIG_ISDN_DIVERSION #endif #ifdef CONFIG_ISDN_DIVERSION #include <linux/isdn_divertif.h> #endif /* CONFIG_ISDN_DIVERSION */ #include "isdn_v110.h" /* Debugflags */ #undef ISDN_DEBUG_STATCALLB MODULE_DESCRIPTION("ISDN4Linux: link layer"); MODULE_AUTHOR("Fritz Elfert"); MODULE_LICENSE("GPL"); isdn_dev *dev; static DEFINE_MUTEX(isdn_mutex); static char *isdn_revision = "$Revision: 1.1.2.3 $"; extern char *isdn_net_revision; #ifdef CONFIG_ISDN_PPP extern char *isdn_ppp_revision; #else static char *isdn_ppp_revision = ": none $"; #endif #ifdef CONFIG_ISDN_AUDIO extern char *isdn_audio_revision; #else static char *isdn_audio_revision = ": none $"; #endif extern char *isdn_v110_revision; #ifdef CONFIG_ISDN_DIVERSION static isdn_divert_if *divert_if; /* = NULL */ #endif /* CONFIG_ISDN_DIVERSION */ static int isdn_writebuf_stub(int, int, const u_char __user *, int); static void set_global_features(void); static int isdn_wildmat(char *s, char *p); static int isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding); static inline void isdn_lock_driver(isdn_driver_t *drv) { try_module_get(drv->interface->owner); drv->locks++; } void isdn_lock_drivers(void) { int i; for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (!dev->drv[i]) continue; isdn_lock_driver(dev->drv[i]); } } static inline void isdn_unlock_driver(isdn_driver_t *drv) { if (drv->locks > 0) { drv->locks--; module_put(drv->interface->owner); } } void isdn_unlock_drivers(void) { int i; for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (!dev->drv[i]) continue; isdn_unlock_driver(dev->drv[i]); } } #if defined(ISDN_DEBUG_NET_DUMP) || defined(ISDN_DEBUG_MODEM_DUMP) void isdn_dumppkt(char *s, u_char *p, int len, int dumplen) { int dumpc; printk(KERN_DEBUG "%s(%d) ", s, len); for (dumpc = 0; (dumpc < dumplen) && (len); len--, dumpc++) printk(" %02x", *p++); printk("\n"); } #endif /* * I picked the pattern-matching-functions from an old GNU-tar version (1.10) * It was originally written and put to PD by rs@mirror.TMC.COM (Rich Salz) */ static int isdn_star(char *s, char *p) { while (isdn_wildmat(s, p)) { if (*++s == '\0') return (2); } return (0); } /* * Shell-type Pattern-matching for incoming caller-Ids * This function gets a string in s and checks, if it matches the pattern * given in p. * * Return: * 0 = match. * 1 = no match. * 2 = no match. Would eventually match, if s would be longer. * * Possible Patterns: * * '?' matches one character * '*' matches zero or more characters * [xyz] matches the set of characters in brackets. * [^xyz] matches any single character not in the set of characters */ static int isdn_wildmat(char *s, char *p) { register int last; register int matched; register int reverse; register int nostar = 1; if (!(*s) && !(*p)) return (1); for (; *p; s++, p++) switch (*p) { case '\\': /* * Literal match with following character, * fall through. */ p++; default: if (*s != *p) return (*s == '\0') ? 2 : 1; continue; case '?': /* Match anything. */ if (*s == '\0') return (2); continue; case '*': nostar = 0; /* Trailing star matches everything. */ return (*++p ? isdn_star(s, p) : 0); case '[': /* [^....] means inverse character class. */ if ((reverse = (p[1] == '^'))) p++; for (last = 0, matched = 0; *++p && (*p != ']'); last = *p) /* This next line requires a good C compiler. */ if (*p == '-' ? *s <= *++p && *s >= last : *s == *p) matched = 1; if (matched == reverse) return (1); continue; } return (*s == '\0') ? 0 : nostar; } int isdn_msncmp(const char *msn1, const char *msn2) { char TmpMsn1[ISDN_MSNLEN]; char TmpMsn2[ISDN_MSNLEN]; char *p; for (p = TmpMsn1; *msn1 && *msn1 != ':';) // Strip off a SPID *p++ = *msn1++; *p = '\0'; for (p = TmpMsn2; *msn2 && *msn2 != ':';) // Strip off a SPID *p++ = *msn2++; *p = '\0'; return isdn_wildmat(TmpMsn1, TmpMsn2); } int isdn_dc2minor(int di, int ch) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (dev->chanmap[i] == ch && dev->drvmap[i] == di) return i; return -1; } static int isdn_timer_cnt1 = 0; static int isdn_timer_cnt2 = 0; static int isdn_timer_cnt3 = 0; static void isdn_timer_funct(ulong dummy) { int tf = dev->tflags; if (tf & ISDN_TIMER_FAST) { if (tf & ISDN_TIMER_MODEMREAD) isdn_tty_readmodem(); if (tf & ISDN_TIMER_MODEMPLUS) isdn_tty_modem_escape(); if (tf & ISDN_TIMER_MODEMXMIT) isdn_tty_modem_xmit(); } if (tf & ISDN_TIMER_SLOW) { if (++isdn_timer_cnt1 >= ISDN_TIMER_02SEC) { isdn_timer_cnt1 = 0; if (tf & ISDN_TIMER_NETDIAL) isdn_net_dial(); } if (++isdn_timer_cnt2 >= ISDN_TIMER_1SEC) { isdn_timer_cnt2 = 0; if (tf & ISDN_TIMER_NETHANGUP) isdn_net_autohup(); if (++isdn_timer_cnt3 >= ISDN_TIMER_RINGING) { isdn_timer_cnt3 = 0; if (tf & ISDN_TIMER_MODEMRING) isdn_tty_modem_ring(); } if (tf & ISDN_TIMER_CARRIER) isdn_tty_carrier_timeout(); } } if (tf) mod_timer(&dev->timer, jiffies + ISDN_TIMER_RES); } void isdn_timer_ctrl(int tf, int onoff) { unsigned long flags; int old_tflags; spin_lock_irqsave(&dev->timerlock, flags); if ((tf & ISDN_TIMER_SLOW) && (!(dev->tflags & ISDN_TIMER_SLOW))) { /* If the slow-timer wasn't activated until now */ isdn_timer_cnt1 = 0; isdn_timer_cnt2 = 0; } old_tflags = dev->tflags; if (onoff) dev->tflags |= tf; else dev->tflags &= ~tf; if (dev->tflags && !old_tflags) mod_timer(&dev->timer, jiffies + ISDN_TIMER_RES); spin_unlock_irqrestore(&dev->timerlock, flags); } /* * Receive a packet from B-Channel. (Called from low-level-module) */ static void isdn_receive_skb_callback(int di, int channel, struct sk_buff *skb) { int i; if ((i = isdn_dc2minor(di, channel)) == -1) { dev_kfree_skb(skb); return; } /* Update statistics */ dev->ibytes[i] += skb->len; /* First, try to deliver data to network-device */ if (isdn_net_rcv_skb(i, skb)) return; /* V.110 handling * makes sense for async streams only, so it is * called after possible net-device delivery. */ if (dev->v110[i]) { atomic_inc(&dev->v110use[i]); skb = isdn_v110_decode(dev->v110[i], skb); atomic_dec(&dev->v110use[i]); if (!skb) return; } /* No network-device found, deliver to tty or raw-channel */ if (skb->len) { if (isdn_tty_rcv_skb(i, di, channel, skb)) return; wake_up_interruptible(&dev->drv[di]->rcv_waitq[channel]); } else dev_kfree_skb(skb); } /* * Intercept command from Linklevel to Lowlevel. * If layer 2 protocol is V.110 and this is not supported by current * lowlevel-driver, use driver's transparent mode and handle V.110 in * linklevel instead. */ int isdn_command(isdn_ctrl *cmd) { if (cmd->driver == -1) { printk(KERN_WARNING "isdn_command command(%x) driver -1\n", cmd->command); return (1); } if (!dev->drv[cmd->driver]) { printk(KERN_WARNING "isdn_command command(%x) dev->drv[%d] NULL\n", cmd->command, cmd->driver); return (1); } if (!dev->drv[cmd->driver]->interface) { printk(KERN_WARNING "isdn_command command(%x) dev->drv[%d]->interface NULL\n", cmd->command, cmd->driver); return (1); } if (cmd->command == ISDN_CMD_SETL2) { int idx = isdn_dc2minor(cmd->driver, cmd->arg & 255); unsigned long l2prot = (cmd->arg >> 8) & 255; unsigned long features = (dev->drv[cmd->driver]->interface->features >> ISDN_FEATURE_L2_SHIFT) & ISDN_FEATURE_L2_MASK; unsigned long l2_feature = (1 << l2prot); switch (l2prot) { case ISDN_PROTO_L2_V11096: case ISDN_PROTO_L2_V11019: case ISDN_PROTO_L2_V11038: /* If V.110 requested, but not supported by * HL-driver, set emulator-flag and change * Layer-2 to transparent */ if (!(features & l2_feature)) { dev->v110emu[idx] = l2prot; cmd->arg = (cmd->arg & 255) | (ISDN_PROTO_L2_TRANS << 8); } else dev->v110emu[idx] = 0; } } return dev->drv[cmd->driver]->interface->command(cmd); } void isdn_all_eaz(int di, int ch) { isdn_ctrl cmd; if (di < 0) return; cmd.driver = di; cmd.arg = ch; cmd.command = ISDN_CMD_SETEAZ; cmd.parm.num[0] = '\0'; isdn_command(&cmd); } /* * Begin of a CAPI like LL<->HL interface, currently used only for * supplementary service (CAPI 2.0 part III) */ #include <linux/isdn/capicmd.h> static int isdn_capi_rec_hl_msg(capi_msg *cm) { switch (cm->Command) { case CAPI_FACILITY: /* in the moment only handled in tty */ return (isdn_tty_capi_facility(cm)); default: return (-1); } } static int isdn_status_callback(isdn_ctrl *c) { int di; u_long flags; int i; int r; int retval = 0; isdn_ctrl cmd; isdn_net_dev *p; di = c->driver; i = isdn_dc2minor(di, c->arg); switch (c->command) { case ISDN_STAT_BSENT: if (i < 0) return -1; if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; if (isdn_net_stat_callback(i, c)) return 0; if (isdn_v110_stat_callback(i, c)) return 0; if (isdn_tty_stat_callback(i, c)) return 0; wake_up_interruptible(&dev->drv[di]->snd_waitq[c->arg]); break; case ISDN_STAT_STAVAIL: dev->drv[di]->stavail += c->arg; wake_up_interruptible(&dev->drv[di]->st_waitq); break; case ISDN_STAT_RUN: dev->drv[di]->flags |= DRV_FLAG_RUNNING; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (dev->drvmap[i] == di) isdn_all_eaz(di, dev->chanmap[i]); set_global_features(); break; case ISDN_STAT_STOP: dev->drv[di]->flags &= ~DRV_FLAG_RUNNING; break; case ISDN_STAT_ICALL: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "ICALL (net): %d %ld %s\n", di, c->arg, c->parm.num); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) { cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_HANGUP; isdn_command(&cmd); return 0; } /* Try to find a network-interface which will accept incoming call */ r = ((c->command == ISDN_STAT_ICALLW) ? 0 : isdn_net_find_icall(di, c->arg, i, &c->parm.setup)); switch (r) { case 0: /* No network-device replies. * Try ttyI's. * These return 0 on no match, 1 on match and * 3 on eventually match, if CID is longer. */ if (c->command == ISDN_STAT_ICALL) if ((retval = isdn_tty_find_icall(di, c->arg, &c->parm.setup))) return (retval); #ifdef CONFIG_ISDN_DIVERSION if (divert_if) if ((retval = divert_if->stat_callback(c))) return (retval); /* processed */ #endif /* CONFIG_ISDN_DIVERSION */ if ((!retval) && (dev->drv[di]->flags & DRV_FLAG_REJBUS)) { /* No tty responding */ cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_HANGUP; isdn_command(&cmd); retval = 2; } break; case 1: /* Schedule connection-setup */ isdn_net_dial(); cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_ACCEPTD; for (p = dev->netdev; p; p = p->next) if (p->local->isdn_channel == cmd.arg) { strcpy(cmd.parm.setup.eazmsn, p->local->msn); isdn_command(&cmd); retval = 1; break; } break; case 2: /* For calling back, first reject incoming call ... */ case 3: /* Interface found, but down, reject call actively */ retval = 2; printk(KERN_INFO "isdn: Rejecting Call\n"); cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_HANGUP; isdn_command(&cmd); if (r == 3) break; /* Fall through */ case 4: /* ... then start callback. */ isdn_net_dial(); break; case 5: /* Number would eventually match, if longer */ retval = 3; break; } #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "ICALL: ret=%d\n", retval); #endif return retval; break; case ISDN_STAT_CINF: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "CINF: %ld %s\n", c->arg, c->parm.num); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; if (strcmp(c->parm.num, "0")) isdn_net_stat_callback(i, c); isdn_tty_stat_callback(i, c); break; case ISDN_STAT_CAUSE: #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "CAUSE: %ld %s\n", c->arg, c->parm.num); #endif printk(KERN_INFO "isdn: %s,ch%ld cause: %s\n", dev->drvid[di], c->arg, c->parm.num); isdn_tty_stat_callback(i, c); #ifdef CONFIG_ISDN_DIVERSION if (divert_if) divert_if->stat_callback(c); #endif /* CONFIG_ISDN_DIVERSION */ break; case ISDN_STAT_DISPLAY: #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "DISPLAY: %ld %s\n", c->arg, c->parm.display); #endif isdn_tty_stat_callback(i, c); #ifdef CONFIG_ISDN_DIVERSION if (divert_if) divert_if->stat_callback(c); #endif /* CONFIG_ISDN_DIVERSION */ break; case ISDN_STAT_DCONN: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "DCONN: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; /* Find any net-device, waiting for D-channel setup */ if (isdn_net_stat_callback(i, c)) break; isdn_v110_stat_callback(i, c); /* Find any ttyI, waiting for D-channel setup */ if (isdn_tty_stat_callback(i, c)) { cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_ACCEPTB; isdn_command(&cmd); break; } break; case ISDN_STAT_DHUP: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "DHUP: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; dev->drv[di]->online &= ~(1 << (c->arg)); isdn_info_update(); /* Signal hangup to network-devices */ if (isdn_net_stat_callback(i, c)) break; isdn_v110_stat_callback(i, c); if (isdn_tty_stat_callback(i, c)) break; #ifdef CONFIG_ISDN_DIVERSION if (divert_if) divert_if->stat_callback(c); #endif /* CONFIG_ISDN_DIVERSION */ break; break; case ISDN_STAT_BCONN: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "BCONN: %ld\n", c->arg); #endif /* Signal B-channel-connect to network-devices */ if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; dev->drv[di]->online |= (1 << (c->arg)); isdn_info_update(); if (isdn_net_stat_callback(i, c)) break; isdn_v110_stat_callback(i, c); if (isdn_tty_stat_callback(i, c)) break; break; case ISDN_STAT_BHUP: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "BHUP: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; dev->drv[di]->online &= ~(1 << (c->arg)); isdn_info_update(); #ifdef CONFIG_ISDN_X25 /* Signal hangup to network-devices */ if (isdn_net_stat_callback(i, c)) break; #endif isdn_v110_stat_callback(i, c); if (isdn_tty_stat_callback(i, c)) break; break; case ISDN_STAT_NODCH: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "NODCH: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; if (isdn_net_stat_callback(i, c)) break; if (isdn_tty_stat_callback(i, c)) break; break; case ISDN_STAT_ADDCH: spin_lock_irqsave(&dev->lock, flags); if (isdn_add_channels(dev->drv[di], di, c->arg, 1)) { spin_unlock_irqrestore(&dev->lock, flags); return -1; } spin_unlock_irqrestore(&dev->lock, flags); isdn_info_update(); break; case ISDN_STAT_DISCH: spin_lock_irqsave(&dev->lock, flags); for (i = 0; i < ISDN_MAX_CHANNELS; i++) if ((dev->drvmap[i] == di) && (dev->chanmap[i] == c->arg)) { if (c->parm.num[0]) dev->usage[i] &= ~ISDN_USAGE_DISABLED; else if (USG_NONE(dev->usage[i])) { dev->usage[i] |= ISDN_USAGE_DISABLED; } else retval = -1; break; } spin_unlock_irqrestore(&dev->lock, flags); isdn_info_update(); break; case ISDN_STAT_UNLOAD: while (dev->drv[di]->locks > 0) { isdn_unlock_driver(dev->drv[di]); } spin_lock_irqsave(&dev->lock, flags); isdn_tty_stat_callback(i, c); for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (dev->drvmap[i] == di) { dev->drvmap[i] = -1; dev->chanmap[i] = -1; dev->usage[i] &= ~ISDN_USAGE_DISABLED; } dev->drivers--; dev->channels -= dev->drv[di]->channels; kfree(dev->drv[di]->rcverr); kfree(dev->drv[di]->rcvcount); for (i = 0; i < dev->drv[di]->channels; i++) skb_queue_purge(&dev->drv[di]->rpqueue[i]); kfree(dev->drv[di]->rpqueue); kfree(dev->drv[di]->rcv_waitq); kfree(dev->drv[di]); dev->drv[di] = NULL; dev->drvid[di][0] = '\0'; isdn_info_update(); set_global_features(); spin_unlock_irqrestore(&dev->lock, flags); return 0; case ISDN_STAT_L1ERR: break; case CAPI_PUT_MESSAGE: return (isdn_capi_rec_hl_msg(&c->parm.cmsg)); #ifdef CONFIG_ISDN_TTY_FAX case ISDN_STAT_FAXIND: isdn_tty_stat_callback(i, c); break; #endif #ifdef CONFIG_ISDN_AUDIO case ISDN_STAT_AUDIO: isdn_tty_stat_callback(i, c); break; #endif #ifdef CONFIG_ISDN_DIVERSION case ISDN_STAT_PROT: case ISDN_STAT_REDIR: if (divert_if) return (divert_if->stat_callback(c)); #endif /* CONFIG_ISDN_DIVERSION */ default: return -1; } return 0; } /* * Get integer from char-pointer, set pointer to end of number */ int isdn_getnum(char **p) { int v = -1; while (*p[0] >= '0' && *p[0] <= '9') v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p[0]++) - '0'); return v; } #define DLE 0x10 /* * isdn_readbchan() tries to get data from the read-queue. * It MUST be called with interrupts off. * * Be aware that this is not an atomic operation when sleep != 0, even though * interrupts are turned off! Well, like that we are currently only called * on behalf of a read system call on raw device files (which are documented * to be dangerous and for debugging purpose only). The inode semaphore * takes care that this is not called for the same minor device number while * we are sleeping, but access is not serialized against simultaneous read() * from the corresponding ttyI device. Can other ugly events, like changes * of the mapping (di,ch)<->minor, happen during the sleep? --he */ int isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue_head_t *sleep) { int count; int count_pull; int count_put; int dflag; struct sk_buff *skb; u_char *cp; if (!dev->drv[di]) return 0; if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) { if (sleep) interruptible_sleep_on(sleep); else return 0; } if (len > dev->drv[di]->rcvcount[channel]) len = dev->drv[di]->rcvcount[channel]; cp = buf; count = 0; while (len) { if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel]))) break; #ifdef CONFIG_ISDN_AUDIO if (ISDN_AUDIO_SKB_LOCK(skb)) break; ISDN_AUDIO_SKB_LOCK(skb) = 1; if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) { char *p = skb->data; unsigned long DLEmask = (1 << channel); dflag = 0; count_pull = count_put = 0; while ((count_pull < skb->len) && (len > 0)) { len--; if (dev->drv[di]->DLEflag & DLEmask) { *cp++ = DLE; dev->drv[di]->DLEflag &= ~DLEmask; } else { *cp++ = *p; if (*p == DLE) { dev->drv[di]->DLEflag |= DLEmask; (ISDN_AUDIO_SKB_DLECOUNT(skb))--; } p++; count_pull++; } count_put++; } if (count_pull >= skb->len) dflag = 1; } else { #endif /* No DLE's in buff, so simply copy it */ dflag = 1; if ((count_pull = skb->len) > len) { count_pull = len; dflag = 0; } count_put = count_pull; skb_copy_from_linear_data(skb, cp, count_put); cp += count_put; len -= count_put; #ifdef CONFIG_ISDN_AUDIO } #endif count += count_put; if (fp) { memset(fp, 0, count_put); fp += count_put; } if (dflag) { /* We got all the data in this buff. * Now we can dequeue it. */ if (fp) *(fp - 1) = 0xff; #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]); dev_kfree_skb(skb); } else { /* Not yet emptied this buff, so it * must stay in the queue, for further calls * but we pull off the data we got until now. */ skb_pull(skb, count_pull); #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif } dev->drv[di]->rcvcount[channel] -= count_put; } return count; } /* * isdn_readbchan_tty() tries to get data from the read-queue. * It MUST be called with interrupts off. * * Be aware that this is not an atomic operation when sleep != 0, even though * interrupts are turned off! Well, like that we are currently only called * on behalf of a read system call on raw device files (which are documented * to be dangerous and for debugging purpose only). The inode semaphore * takes care that this is not called for the same minor device number while * we are sleeping, but access is not serialized against simultaneous read() * from the corresponding ttyI device. Can other ugly events, like changes * of the mapping (di,ch)<->minor, happen during the sleep? --he */ int isdn_readbchan_tty(int di, int channel, struct tty_port *port, int cisco_hack) { int count; int count_pull; int count_put; int dflag; struct sk_buff *skb; char last = 0; int len; if (!dev->drv[di]) return 0; if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) return 0; len = tty_buffer_request_room(port, dev->drv[di]->rcvcount[channel]); if (len == 0) return len; count = 0; while (len) { if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel]))) break; #ifdef CONFIG_ISDN_AUDIO if (ISDN_AUDIO_SKB_LOCK(skb)) break; ISDN_AUDIO_SKB_LOCK(skb) = 1; if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) { char *p = skb->data; unsigned long DLEmask = (1 << channel); dflag = 0; count_pull = count_put = 0; while ((count_pull < skb->len) && (len > 0)) { /* push every character but the last to the tty buffer directly */ if (count_put) tty_insert_flip_char(port, last, TTY_NORMAL); len--; if (dev->drv[di]->DLEflag & DLEmask) { last = DLE; dev->drv[di]->DLEflag &= ~DLEmask; } else { last = *p; if (last == DLE) { dev->drv[di]->DLEflag |= DLEmask; (ISDN_AUDIO_SKB_DLECOUNT(skb))--; } p++; count_pull++; } count_put++; } if (count_pull >= skb->len) dflag = 1; } else { #endif /* No DLE's in buff, so simply copy it */ dflag = 1; if ((count_pull = skb->len) > len) { count_pull = len; dflag = 0; } count_put = count_pull; if (count_put > 1) tty_insert_flip_string(port, skb->data, count_put - 1); last = skb->data[count_put - 1]; len -= count_put; #ifdef CONFIG_ISDN_AUDIO } #endif count += count_put; if (dflag) { /* We got all the data in this buff. * Now we can dequeue it. */ if (cisco_hack) tty_insert_flip_char(port, last, 0xFF); else tty_insert_flip_char(port, last, TTY_NORMAL); #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]); dev_kfree_skb(skb); } else { tty_insert_flip_char(port, last, TTY_NORMAL); /* Not yet emptied this buff, so it * must stay in the queue, for further calls * but we pull off the data we got until now. */ skb_pull(skb, count_pull); #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif } dev->drv[di]->rcvcount[channel] -= count_put; } return count; } static inline int isdn_minor2drv(int minor) { return (dev->drvmap[minor]); } static inline int isdn_minor2chan(int minor) { return (dev->chanmap[minor]); } static char * isdn_statstr(void) { static char istatbuf[2048]; char *p; int i; sprintf(istatbuf, "idmap:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%s ", (dev->drvmap[i] < 0) ? "-" : dev->drvid[dev->drvmap[i]]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\nchmap:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%d ", dev->chanmap[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\ndrmap:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%d ", dev->drvmap[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\nusage:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%d ", dev->usage[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\nflags:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (dev->drv[i]) { sprintf(p, "%ld ", dev->drv[i]->online); p = istatbuf + strlen(istatbuf); } else { sprintf(p, "? "); p = istatbuf + strlen(istatbuf); } } sprintf(p, "\nphone:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%s ", dev->num[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\n"); return istatbuf; } /* Module interface-code */ void isdn_info_update(void) { infostruct *p = dev->infochain; while (p) { *(p->private) = 1; p = (infostruct *) p->next; } wake_up_interruptible(&(dev->info_waitq)); } static ssize_t isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off) { uint minor = iminor(file_inode(file)); int len = 0; int drvidx; int chidx; int retval; char *p; mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { if (!file->private_data) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } interruptible_sleep_on(&(dev->info_waitq)); } p = isdn_statstr(); file->private_data = NULL; if ((len = strlen(p)) <= count) { if (copy_to_user(buf, p, len)) { retval = -EFAULT; goto out; } *off += len; retval = len; goto out; } retval = 0; goto out; } if (!dev->drivers) { retval = -ENODEV; goto out; } if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_read minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); if (drvidx < 0) { retval = -ENODEV; goto out; } if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) { retval = -ENODEV; goto out; } chidx = isdn_minor2chan(minor); if (!(p = kmalloc(count, GFP_KERNEL))) { retval = -ENOMEM; goto out; } len = isdn_readbchan(drvidx, chidx, p, NULL, count, &dev->drv[drvidx]->rcv_waitq[chidx]); *off += len; if (copy_to_user(buf, p, len)) len = -EFAULT; kfree(p); retval = len; goto out; } if (minor <= ISDN_MINOR_CTRLMAX) { drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); if (drvidx < 0) { retval = -ENODEV; goto out; } if (!dev->drv[drvidx]->stavail) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq)); } if (dev->drv[drvidx]->interface->readstat) { if (count > dev->drv[drvidx]->stavail) count = dev->drv[drvidx]->stavail; len = dev->drv[drvidx]->interface->readstat(buf, count, drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL)); if (len < 0) { retval = len; goto out; } } else { len = 0; } if (len) dev->drv[drvidx]->stavail -= len; else dev->drv[drvidx]->stavail = 0; *off += len; retval = len; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { retval = isdn_ppp_read(minor - ISDN_MINOR_PPP, file, buf, count); goto out; } #endif retval = -ENODEV; out: mutex_unlock(&isdn_mutex); return retval; } static ssize_t isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { uint minor = iminor(file_inode(file)); int drvidx; int chidx; int retval; if (minor == ISDN_MINOR_STATUS) return -EPERM; if (!dev->drivers) return -ENODEV; mutex_lock(&isdn_mutex); if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_write minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); if (drvidx < 0) { retval = -ENODEV; goto out; } if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) { retval = -ENODEV; goto out; } chidx = isdn_minor2chan(minor); while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0) interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]); goto out; } if (minor <= ISDN_MINOR_CTRLMAX) { drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); if (drvidx < 0) { retval = -ENODEV; goto out; } /* * We want to use the isdnctrl device to load the firmware * if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; */ if (dev->drv[drvidx]->interface->writecmd) retval = dev->drv[drvidx]->interface-> writecmd(buf, count, drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL)); else retval = count; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { retval = isdn_ppp_write(minor - ISDN_MINOR_PPP, file, buf, count); goto out; } #endif retval = -ENODEV; out: mutex_unlock(&isdn_mutex); return retval; } static unsigned int isdn_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; unsigned int minor = iminor(file_inode(file)); int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { poll_wait(file, &(dev->info_waitq), wait); /* mask = POLLOUT | POLLWRNORM; */ if (file->private_data) { mask |= POLLIN | POLLRDNORM; } goto out; } if (minor >= ISDN_MINOR_CTRL && minor <= ISDN_MINOR_CTRLMAX) { if (drvidx < 0) { /* driver deregistered while file open */ mask = POLLHUP; goto out; } poll_wait(file, &(dev->drv[drvidx]->st_waitq), wait); mask = POLLOUT | POLLWRNORM; if (dev->drv[drvidx]->stavail) { mask |= POLLIN | POLLRDNORM; } goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { mask = isdn_ppp_poll(file, wait); goto out; } #endif mask = POLLERR; out: mutex_unlock(&isdn_mutex); return mask; } static int isdn_ioctl(struct file *file, uint cmd, ulong arg) { uint minor = iminor(file_inode(file)); isdn_ctrl c; int drvidx; int ret; int i; char __user *p; char *s; union iocpar { char name[10]; char bname[22]; isdn_ioctl_struct iocts; isdn_net_ioctl_phone phone; isdn_net_ioctl_cfg cfg; } iocpar; void __user *argp = (void __user *)arg; #define name iocpar.name #define bname iocpar.bname #define iocts iocpar.iocts #define phone iocpar.phone #define cfg iocpar.cfg if (minor == ISDN_MINOR_STATUS) { switch (cmd) { case IIOCGETDVR: return (TTY_DV + (NET_DV << 8) + (INF_DV << 16)); case IIOCGETCPS: if (arg) { ulong __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, p, sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { put_user(dev->ibytes[i], p++); put_user(dev->obytes[i], p++); } return 0; } else return -EINVAL; break; case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; return isdn_net_getpeer(&phone, argp); } else return -EINVAL; default: return -EINVAL; } } if (!dev->drivers) return -ENODEV; if (minor <= ISDN_MINOR_BMAX) { drvidx = isdn_minor2drv(minor); if (drvidx < 0) return -ENODEV; if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; return 0; } if (minor <= ISDN_MINOR_CTRLMAX) { /* * isdn net devices manage lots of configuration variables as linked lists. * Those lists must only be manipulated from user space. Some of the ioctl's * service routines access user space and are not atomic. Therefore, ioctl's * manipulating the lists and ioctl's sleeping while accessing the lists * are serialized by means of a semaphore. */ switch (cmd) { case IIOCNETDWRSET: printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n"); return (-EINVAL); case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; case IIOCNETAIF: /* Add a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; s = name; } else { s = NULL; } ret = mutex_lock_interruptible(&dev->mtx); if (ret) return ret; if ((s = isdn_net_new(s, NULL))) { if (copy_to_user(argp, s, strlen(s) + 1)) { ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETASL: /* Add a slave to a network-interface */ if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); if (ret) return ret; if ((s = isdn_net_newslave(bname))) { if (copy_to_user(argp, s, strlen(s) + 1)) { ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETDIF: /* Delete a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if (ret) return ret; ret = isdn_net_rm(name); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETSCF: /* Set configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; return isdn_net_setcfg(&cfg); } else return -EINVAL; case IIOCNETGCF: /* Get configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; if (!(ret = isdn_net_getcfg(&cfg))) { if (copy_to_user(argp, &cfg, sizeof(cfg))) return -EFAULT; } return ret; } else return -EINVAL; case IIOCNETANM: /* Add a phone-number to a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if (ret) return ret; ret = isdn_net_addphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETGNM: /* Get list of phone-numbers of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if (ret) return ret; ret = isdn_net_getphones(&phone, argp); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDNM: /* Delete a phone-number of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if (ret) return ret; ret = isdn_net_delphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDIL: /* Force dialing of a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_dial(name); } else return -EINVAL; #ifdef CONFIG_ISDN_PPP case IIOCNETALN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_dial_slave(name); case IIOCNETDLN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_hangup_slave(name); #endif case IIOCNETHUP: /* Force hangup of a network-interface */ if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_hangup(name); break; case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); return 0; case IIOCSETGST: if (arg) dev->global_flags |= ISDN_GLOBAL_STOPPED; else dev->global_flags &= ~ISDN_GLOBAL_STOPPED; printk(KERN_INFO "isdn: Global Mode %s\n", (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running"); return 0; case IIOCSETBRJ: drvidx = -1; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid) - 1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } } if (drvidx == -1) return -ENODEV; if (iocts.arg) dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS; else dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS; return 0; case IIOCSIGPRF: dev->profd = current; return 0; break; case IIOCGETPRF: /* Get all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_to_user(p, dev->mdm.info[i].emu.profile, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; } return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS; } else return -EINVAL; break; case IIOCSETPRF: /* Set all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_READ, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_from_user(dev->mdm.info[i].emu.profile, p, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; } return 0; } else return -EINVAL; break; case IIOCSETMAP: case IIOCGETMAP: /* Set/Get MSN->EAZ-Mapping for a driver */ if (arg) { if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid) - 1] = 0; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (cmd == IIOCSETMAP) { int loop = 1; p = (char __user *) iocts.arg; i = 0; while (loop) { int j = 0; while (1) { if (!access_ok(VERIFY_READ, p, 1)) return -EFAULT; get_user(bname[j], p++); switch (bname[j]) { case '\0': loop = 0; /* Fall through */ case ',': bname[j] = '\0'; strcpy(dev->drv[drvidx]->msn2eaz[i], bname); j = ISDN_MSNLEN; break; default: j++; } if (j >= ISDN_MSNLEN) break; } if (++i > 9) break; } } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { snprintf(bname, sizeof(bname), "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); if (copy_to_user(p, bname, strlen(bname) + 1)) return -EFAULT; p += strlen(bname); } } return 0; } else return -EINVAL; case IIOCDBGVAR: if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; return 0; } else return -EINVAL; break; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; else return -EINVAL; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid) - 1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (!access_ok(VERIFY_WRITE, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; c.driver = drvidx; c.command = ISDN_CMD_IOCTL; c.arg = cmd; memcpy(c.parm.num, &iocts.arg, sizeof(ulong)); ret = isdn_command(&c); memcpy(&iocts.arg, c.parm.num, sizeof(ulong)); if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct))) return -EFAULT; return ret; } else return -EINVAL; } } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg)); #endif return -ENODEV; #undef name #undef bname #undef iocts #undef phone #undef cfg } static long isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&isdn_mutex); ret = isdn_ioctl(file, cmd, arg); mutex_unlock(&isdn_mutex); return ret; } /* * Open the device code. */ static int isdn_open(struct inode *ino, struct file *filep) { uint minor = iminor(ino); int drvidx; int chidx; int retval = -ENODEV; mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { infostruct *p; if ((p = kmalloc(sizeof(infostruct), GFP_KERNEL))) { p->next = (char *) dev->infochain; p->private = (char *) &(filep->private_data); dev->infochain = p; /* At opening we allow a single update */ filep->private_data = (char *) 1; retval = 0; goto out; } else { retval = -ENOMEM; goto out; } } if (!dev->channels) goto out; if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_open minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); if (drvidx < 0) goto out; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) goto out; if (!(dev->drv[drvidx]->online & (1 << chidx))) goto out; isdn_lock_drivers(); retval = 0; goto out; } if (minor <= ISDN_MINOR_CTRLMAX) { drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); if (drvidx < 0) goto out; isdn_lock_drivers(); retval = 0; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { retval = isdn_ppp_open(minor - ISDN_MINOR_PPP, filep); if (retval == 0) isdn_lock_drivers(); goto out; } #endif out: nonseekable_open(ino, filep); mutex_unlock(&isdn_mutex); return retval; } static int isdn_close(struct inode *ino, struct file *filep) { uint minor = iminor(ino); mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { infostruct *p = dev->infochain; infostruct *q = NULL; while (p) { if (p->private == (char *) &(filep->private_data)) { if (q) q->next = p->next; else dev->infochain = (infostruct *) (p->next); kfree(p); goto out; } q = p; p = (infostruct *) (p->next); } printk(KERN_WARNING "isdn: No private data while closing isdnctrl\n"); goto out; } isdn_unlock_drivers(); if (minor <= ISDN_MINOR_BMAX) goto out; if (minor <= ISDN_MINOR_CTRLMAX) { if (dev->profd == current) dev->profd = NULL; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) isdn_ppp_release(minor - ISDN_MINOR_PPP, filep); #endif out: mutex_unlock(&isdn_mutex); return 0; } static const struct file_operations isdn_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = isdn_read, .write = isdn_write, .poll = isdn_poll, .unlocked_ioctl = isdn_unlocked_ioctl, .open = isdn_open, .release = isdn_close, }; char * isdn_map_eaz2msn(char *msn, int di) { isdn_driver_t *this = dev->drv[di]; int i; if (strlen(msn) == 1) { i = msn[0] - '0'; if ((i >= 0) && (i <= 9)) if (strlen(this->msn2eaz[i])) return (this->msn2eaz[i]); } return (msn); } /* * Find an unused ISDN-channel, whose feature-flags match the * given L2- and L3-protocols. */ #define L2V (~(ISDN_FEATURE_L2_V11096 | ISDN_FEATURE_L2_V11019 | ISDN_FEATURE_L2_V11038)) /* * This function must be called with holding the dev->lock. */ int isdn_get_free_channel(int usage, int l2_proto, int l3_proto, int pre_dev , int pre_chan, char *msn) { int i; ulong features; ulong vfeatures; features = ((1 << l2_proto) | (0x10000 << l3_proto)); vfeatures = (((1 << l2_proto) | (0x10000 << l3_proto)) & ~(ISDN_FEATURE_L2_V11096 | ISDN_FEATURE_L2_V11019 | ISDN_FEATURE_L2_V11038)); /* If Layer-2 protocol is V.110, accept drivers with * transparent feature even if these don't support V.110 * because we can emulate this in linklevel. */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (USG_NONE(dev->usage[i]) && (dev->drvmap[i] != -1)) { int d = dev->drvmap[i]; if ((dev->usage[i] & ISDN_USAGE_EXCLUSIVE) && ((pre_dev != d) || (pre_chan != dev->chanmap[i]))) continue; if (!strcmp(isdn_map_eaz2msn(msn, d), "-")) continue; if (dev->usage[i] & ISDN_USAGE_DISABLED) continue; /* usage not allowed */ if (dev->drv[d]->flags & DRV_FLAG_RUNNING) { if (((dev->drv[d]->interface->features & features) == features) || (((dev->drv[d]->interface->features & vfeatures) == vfeatures) && (dev->drv[d]->interface->features & ISDN_FEATURE_L2_TRANS))) { if ((pre_dev < 0) || (pre_chan < 0)) { dev->usage[i] &= ISDN_USAGE_EXCLUSIVE; dev->usage[i] |= usage; isdn_info_update(); return i; } else { if ((pre_dev == d) && (pre_chan == dev->chanmap[i])) { dev->usage[i] &= ISDN_USAGE_EXCLUSIVE; dev->usage[i] |= usage; isdn_info_update(); return i; } } } } } return -1; } /* * Set state of ISDN-channel to 'unused' */ void isdn_free_channel(int di, int ch, int usage) { int i; if ((di < 0) || (ch < 0)) { printk(KERN_WARNING "%s: called with invalid drv(%d) or channel(%d)\n", __func__, di, ch); return; } for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (((!usage) || ((dev->usage[i] & ISDN_USAGE_MASK) == usage)) && (dev->drvmap[i] == di) && (dev->chanmap[i] == ch)) { dev->usage[i] &= (ISDN_USAGE_NONE | ISDN_USAGE_EXCLUSIVE); strcpy(dev->num[i], "???"); dev->ibytes[i] = 0; dev->obytes[i] = 0; // 20.10.99 JIM, try to reinitialize v110 ! dev->v110emu[i] = 0; atomic_set(&(dev->v110use[i]), 0); isdn_v110_close(dev->v110[i]); dev->v110[i] = NULL; // 20.10.99 JIM, try to reinitialize v110 ! isdn_info_update(); if (dev->drv[di]) skb_queue_purge(&dev->drv[di]->rpqueue[ch]); } } /* * Cancel Exclusive-Flag for ISDN-channel */ void isdn_unexclusive_channel(int di, int ch) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if ((dev->drvmap[i] == di) && (dev->chanmap[i] == ch)) { dev->usage[i] &= ~ISDN_USAGE_EXCLUSIVE; isdn_info_update(); return; } } /* * writebuf replacement for SKB_ABLE drivers */ static int isdn_writebuf_stub(int drvidx, int chan, const u_char __user *buf, int len) { int ret; int hl = dev->drv[drvidx]->interface->hl_hdrlen; struct sk_buff *skb = alloc_skb(hl + len, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, hl); if (copy_from_user(skb_put(skb, len), buf, len)) { dev_kfree_skb(skb); return -EFAULT; } ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb); if (ret <= 0) dev_kfree_skb(skb); if (ret > 0) dev->obytes[isdn_dc2minor(drvidx, chan)] += ret; return ret; } /* * Return: length of data on success, -ERRcode on failure. */ int isdn_writebuf_skb_stub(int drvidx, int chan, int ack, struct sk_buff *skb) { int ret; struct sk_buff *nskb = NULL; int v110_ret = skb->len; int idx = isdn_dc2minor(drvidx, chan); if (dev->v110[idx]) { atomic_inc(&dev->v110use[idx]); nskb = isdn_v110_encode(dev->v110[idx], skb); atomic_dec(&dev->v110use[idx]); if (!nskb) return 0; v110_ret = *((int *)nskb->data); skb_pull(nskb, sizeof(int)); if (!nskb->len) { dev_kfree_skb(nskb); return v110_ret; } /* V.110 must always be acknowledged */ ack = 1; ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, nskb); } else { int hl = dev->drv[drvidx]->interface->hl_hdrlen; if (skb_headroom(skb) < hl) { /* * This should only occur when new HL driver with * increased hl_hdrlen was loaded after netdevice * was created and connected to the new driver. * * The V.110 branch (re-allocates on its own) does * not need this */ struct sk_buff *skb_tmp; skb_tmp = skb_realloc_headroom(skb, hl); printk(KERN_DEBUG "isdn_writebuf_skb_stub: reallocating headroom%s\n", skb_tmp ? "" : " failed"); if (!skb_tmp) return -ENOMEM; /* 0 better? */ ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, skb_tmp); if (ret > 0) { dev_kfree_skb(skb); } else { dev_kfree_skb(skb_tmp); } } else { ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, skb); } } if (ret > 0) { dev->obytes[idx] += ret; if (dev->v110[idx]) { atomic_inc(&dev->v110use[idx]); dev->v110[idx]->skbuser++; atomic_dec(&dev->v110use[idx]); /* For V.110 return unencoded data length */ ret = v110_ret; /* if the complete frame was send we free the skb; if not upper function will requeue the skb */ if (ret == skb->len) dev_kfree_skb(skb); } } else if (dev->v110[idx]) dev_kfree_skb(nskb); return ret; } static int isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding) { int j, k, m; init_waitqueue_head(&d->st_waitq); if (d->flags & DRV_FLAG_RUNNING) return -1; if (n < 1) return 0; m = (adding) ? d->channels + n : n; if (dev->channels + n > ISDN_MAX_CHANNELS) { printk(KERN_WARNING "register_isdn: Max. %d channels supported\n", ISDN_MAX_CHANNELS); return -1; } if ((adding) && (d->rcverr)) kfree(d->rcverr); if (!(d->rcverr = kzalloc(sizeof(int) * m, GFP_ATOMIC))) { printk(KERN_WARNING "register_isdn: Could not alloc rcverr\n"); return -1; } if ((adding) && (d->rcvcount)) kfree(d->rcvcount); if (!(d->rcvcount = kzalloc(sizeof(int) * m, GFP_ATOMIC))) { printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n"); if (!adding) kfree(d->rcverr); return -1; } if ((adding) && (d->rpqueue)) { for (j = 0; j < d->channels; j++) skb_queue_purge(&d->rpqueue[j]); kfree(d->rpqueue); } if (!(d->rpqueue = kmalloc(sizeof(struct sk_buff_head) * m, GFP_ATOMIC))) { printk(KERN_WARNING "register_isdn: Could not alloc rpqueue\n"); if (!adding) { kfree(d->rcvcount); kfree(d->rcverr); } return -1; } for (j = 0; j < m; j++) { skb_queue_head_init(&d->rpqueue[j]); } if ((adding) && (d->rcv_waitq)) kfree(d->rcv_waitq); d->rcv_waitq = kmalloc(sizeof(wait_queue_head_t) * 2 * m, GFP_ATOMIC); if (!d->rcv_waitq) { printk(KERN_WARNING "register_isdn: Could not alloc rcv_waitq\n"); if (!adding) { kfree(d->rpqueue); kfree(d->rcvcount); kfree(d->rcverr); } return -1; } d->snd_waitq = d->rcv_waitq + m; for (j = 0; j < m; j++) { init_waitqueue_head(&d->rcv_waitq[j]); init_waitqueue_head(&d->snd_waitq[j]); } dev->channels += n; for (j = d->channels; j < m; j++) for (k = 0; k < ISDN_MAX_CHANNELS; k++) if (dev->chanmap[k] < 0) { dev->chanmap[k] = j; dev->drvmap[k] = drvidx; break; } d->channels = m; return 0; } /* * Low-level-driver registration */ static void set_global_features(void) { int drvidx; dev->global_features = 0; for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) { if (!dev->drv[drvidx]) continue; if (dev->drv[drvidx]->interface) dev->global_features |= dev->drv[drvidx]->interface->features; } } #ifdef CONFIG_ISDN_DIVERSION static char *map_drvname(int di) { if ((di < 0) || (di >= ISDN_MAX_DRIVERS)) return (NULL); return (dev->drvid[di]); /* driver name */ } /* map_drvname */ static int map_namedrv(char *id) { int i; for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (!strcmp(dev->drvid[i], id)) return (i); } return (-1); } /* map_namedrv */ int DIVERT_REG_NAME(isdn_divert_if *i_div) { if (i_div->if_magic != DIVERT_IF_MAGIC) return (DIVERT_VER_ERR); switch (i_div->cmd) { case DIVERT_CMD_REL: if (divert_if != i_div) return (DIVERT_REL_ERR); divert_if = NULL; /* free interface */ return (DIVERT_NO_ERR); case DIVERT_CMD_REG: if (divert_if) return (DIVERT_REG_ERR); i_div->ll_cmd = isdn_command; /* set command function */ i_div->drv_to_name = map_drvname; i_div->name_to_drv = map_namedrv; divert_if = i_div; /* remember interface */ return (DIVERT_NO_ERR); default: return (DIVERT_CMD_ERR); } } /* DIVERT_REG_NAME */ EXPORT_SYMBOL(DIVERT_REG_NAME); #endif /* CONFIG_ISDN_DIVERSION */ EXPORT_SYMBOL(register_isdn); #ifdef CONFIG_ISDN_PPP EXPORT_SYMBOL(isdn_ppp_register_compressor); EXPORT_SYMBOL(isdn_ppp_unregister_compressor); #endif int register_isdn(isdn_if *i) { isdn_driver_t *d; int j; ulong flags; int drvidx; if (dev->drivers >= ISDN_MAX_DRIVERS) { printk(KERN_WARNING "register_isdn: Max. %d drivers supported\n", ISDN_MAX_DRIVERS); return 0; } if (!i->writebuf_skb) { printk(KERN_WARNING "register_isdn: No write routine given.\n"); return 0; } if (!(d = kzalloc(sizeof(isdn_driver_t), GFP_KERNEL))) { printk(KERN_WARNING "register_isdn: Could not alloc driver-struct\n"); return 0; } d->maxbufsize = i->maxbufsize; d->pktcount = 0; d->stavail = 0; d->flags = DRV_FLAG_LOADED; d->online = 0; d->interface = i; d->channels = 0; spin_lock_irqsave(&dev->lock, flags); for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) if (!dev->drv[drvidx]) break; if (isdn_add_channels(d, drvidx, i->channels, 0)) { spin_unlock_irqrestore(&dev->lock, flags); kfree(d); return 0; } i->channels = drvidx; i->rcvcallb_skb = isdn_receive_skb_callback; i->statcallb = isdn_status_callback; if (!strlen(i->id)) sprintf(i->id, "line%d", drvidx); for (j = 0; j < drvidx; j++) if (!strcmp(i->id, dev->drvid[j])) sprintf(i->id, "line%d", drvidx); dev->drv[drvidx] = d; strcpy(dev->drvid[drvidx], i->id); isdn_info_update(); dev->drivers++; set_global_features(); spin_unlock_irqrestore(&dev->lock, flags); return 1; } /* ***************************************************************************** * And now the modules code. ***************************************************************************** */ static char * isdn_getrev(const char *revision) { char *rev; char *p; if ((p = strchr(revision, ':'))) { rev = p + 2; p = strchr(rev, '$'); *--p = 0; } else rev = "???"; return rev; } /* * Allocate and initialize all data, register modem-devices */ static int __init isdn_init(void) { int i; char tmprev[50]; dev = vzalloc(sizeof(isdn_dev)); if (!dev) { printk(KERN_WARNING "isdn: Could not allocate device-struct.\n"); return -EIO; } init_timer(&dev->timer); dev->timer.function = isdn_timer_funct; spin_lock_init(&dev->lock); spin_lock_init(&dev->timerlock); #ifdef MODULE dev->owner = THIS_MODULE; #endif mutex_init(&dev->mtx); init_waitqueue_head(&dev->info_waitq); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { dev->drvmap[i] = -1; dev->chanmap[i] = -1; dev->m_idx[i] = -1; strcpy(dev->num[i], "???"); } if (register_chrdev(ISDN_MAJOR, "isdn", &isdn_fops)) { printk(KERN_WARNING "isdn: Could not register control devices\n"); vfree(dev); return -EIO; } if ((isdn_tty_modem_init()) < 0) { printk(KERN_WARNING "isdn: Could not register tty devices\n"); vfree(dev); unregister_chrdev(ISDN_MAJOR, "isdn"); return -EIO; } #ifdef CONFIG_ISDN_PPP if (isdn_ppp_init() < 0) { printk(KERN_WARNING "isdn: Could not create PPP-device-structs\n"); isdn_tty_exit(); unregister_chrdev(ISDN_MAJOR, "isdn"); vfree(dev); return -EIO; } #endif /* CONFIG_ISDN_PPP */ strcpy(tmprev, isdn_revision); printk(KERN_NOTICE "ISDN subsystem Rev: %s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_net_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_ppp_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_audio_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_v110_revision); printk("%s", isdn_getrev(tmprev)); #ifdef MODULE printk(" loaded\n"); #else printk("\n"); #endif isdn_info_update(); return 0; } /* * Unload module */ static void __exit isdn_exit(void) { #ifdef CONFIG_ISDN_PPP isdn_ppp_cleanup(); #endif if (isdn_net_rmall() < 0) { printk(KERN_WARNING "isdn: net-device busy, remove cancelled\n"); return; } isdn_tty_exit(); unregister_chrdev(ISDN_MAJOR, "isdn"); del_timer(&dev->timer); /* call vfree with interrupts enabled, else it will hang */ vfree(dev); printk(KERN_NOTICE "ISDN-subsystem unloaded\n"); } module_init(isdn_init); module_exit(isdn_exit);
gpl-2.0
HurryNwait/kernel-crespo-jellybean
arch/m68k/mm/memory.c
3894
7649
/* * linux/arch/m68k/mm/memory.c * * Copyright (C) 1995 Hamish Macdonald */ #include <linux/module.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/gfp.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/system.h> #include <asm/traps.h> #include <asm/machdep.h> /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from struct page instead of separately kmalloced struct. Stolen from arch/sparc/mm/srmmu.c ... */ typedef struct list_head ptable_desc; static LIST_HEAD(ptable_list); #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) void __init init_pointer_table(unsigned long ptable) { ptable_desc *dp; unsigned long page = ptable & PAGE_MASK; unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); dp = PD_PTABLE(page); if (!(PD_MARKBITS(dp) & mask)) { PD_MARKBITS(dp) = 0xff; list_add(dp, &ptable_list); } PD_MARKBITS(dp) &= ~mask; #ifdef DEBUG printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); #endif /* unreserve the page so it's possible to free that page */ PD_PAGE(dp)->flags &= ~(1 << PG_reserved); init_page_count(PD_PAGE(dp)); return; } pmd_t *get_pointer_table (void) { ptable_desc *dp = ptable_list.next; unsigned char mask = PD_MARKBITS (dp); unsigned char tmp; unsigned int off; /* * For a pointer table for a user process address space, a * table is taken from a page allocated for the purpose. Each * page can hold 8 pointer tables. The page is remapped in * virtual address space to be noncacheable. */ if (mask == 0) { void *page; ptable_desc *new; if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) return NULL; flush_tlb_kernel_page(page); nocache_page(page); new = PD_PTABLE(page); PD_MARKBITS(new) = 0xfe; list_add_tail(new, dp); return (pmd_t *)page; } for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) ; PD_MARKBITS(dp) = mask & ~tmp; if (!PD_MARKBITS(dp)) { /* move to end of list */ list_move_tail(dp, &ptable_list); } return (pmd_t *) (page_address(PD_PAGE(dp)) + off); } int free_pointer_table (pmd_t *ptable) { ptable_desc *dp; unsigned long page = (unsigned long)ptable & PAGE_MASK; unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); dp = PD_PTABLE(page); if (PD_MARKBITS (dp) & mask) panic ("table already free!"); PD_MARKBITS (dp) |= mask; if (PD_MARKBITS(dp) == 0xff) { /* all tables in page are free, free page */ list_del(dp); cache_page((void *)page); free_page (page); return 1; } else if (ptable_list.next != dp) { /* * move this descriptor to the front of the list, since * it has one or more free tables. */ list_move(dp, &ptable_list); } return 0; } /* invalidate page in both caches */ static inline void clear040(unsigned long paddr) { asm volatile ( "nop\n\t" ".chip 68040\n\t" "cinvp %%bc,(%0)\n\t" ".chip 68k" : : "a" (paddr)); } /* invalidate page in i-cache */ static inline void cleari040(unsigned long paddr) { asm volatile ( "nop\n\t" ".chip 68040\n\t" "cinvp %%ic,(%0)\n\t" ".chip 68k" : : "a" (paddr)); } /* push page in both caches */ /* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */ static inline void push040(unsigned long paddr) { asm volatile ( "nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" ".chip 68k" : : "a" (paddr)); } /* push and invalidate page in both caches, must disable ints * to avoid invalidating valid data */ static inline void pushcl040(unsigned long paddr) { unsigned long flags; local_irq_save(flags); push040(paddr); if (CPU_IS_060) clear040(paddr); local_irq_restore(flags); } /* * 040: Hit every page containing an address in the range paddr..paddr+len-1. * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). * Hit every page until there is a page or less to go. Hit the next page, * and the one after that if the range hits it. */ /* ++roman: A little bit more care is required here: The CINVP instruction * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning * and the end of the region must be treated differently if they are not * exactly at the beginning or end of a page boundary. Else, maybe too much * data becomes invalidated and thus lost forever. CPUSHP does what we need: * it invalidates the page after pushing dirty data to memory. (Thanks to Jes * for discovering the problem!) */ /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set * the DPI bit in the CACR; would it cause problems with temporarily changing * this?). So we have to push first and then additionally to invalidate. */ /* * cache_clear() semantics: Clear any cache entries for the area in question, * without writing back dirty entries first. This is useful if the data will * be overwritten anyway, e.g. by DMA to memory. The range is defined by a * _physical_ address. */ void cache_clear (unsigned long paddr, int len) { if (CPU_IS_040_OR_060) { int tmp; /* * We need special treatment for the first page, in case it * is not page-aligned. Page align the addresses to work * around bug I17 in the 68060. */ if ((tmp = -paddr & (PAGE_SIZE - 1))) { pushcl040(paddr & PAGE_MASK); if ((len -= tmp) <= 0) return; paddr += tmp; } tmp = PAGE_SIZE; paddr &= PAGE_MASK; while ((len -= tmp) >= 0) { clear040(paddr); paddr += tmp; } if ((len += tmp)) /* a page boundary gets crossed at the end */ pushcl040(paddr); } else /* 68030 or 68020 */ asm volatile ("movec %/cacr,%/d0\n\t" "oriw %0,%/d0\n\t" "movec %/d0,%/cacr" : : "i" (FLUSH_I_AND_D) : "d0"); #ifdef CONFIG_M68K_L2_CACHE if(mach_l2_flush) mach_l2_flush(0); #endif } EXPORT_SYMBOL(cache_clear); /* * cache_push() semantics: Write back any dirty cache data in the given area, * and invalidate the range in the instruction cache. It needs not (but may) * invalidate those entries also in the data cache. The range is defined by a * _physical_ address. */ void cache_push (unsigned long paddr, int len) { if (CPU_IS_040_OR_060) { int tmp = PAGE_SIZE; /* * on 68040 or 68060, push cache lines for pages in the range; * on the '040 this also invalidates the pushed lines, but not on * the '060! */ len += paddr & (PAGE_SIZE - 1); /* * Work around bug I17 in the 68060 affecting some instruction * lines not being invalidated properly. */ paddr &= PAGE_MASK; do { push040(paddr); paddr += tmp; } while ((len -= tmp) > 0); } /* * 68030/68020 have no writeback cache. On the other hand, * cache_push is actually a superset of cache_clear (the lines * get written back and invalidated), so we should make sure * to perform the corresponding actions. After all, this is getting * called in places where we've just loaded code, or whatever, so * flushing the icache is appropriate; flushing the dcache shouldn't * be required. */ else /* 68030 or 68020 */ asm volatile ("movec %/cacr,%/d0\n\t" "oriw %0,%/d0\n\t" "movec %/d0,%/cacr" : : "i" (FLUSH_I) : "d0"); #ifdef CONFIG_M68K_L2_CACHE if(mach_l2_flush) mach_l2_flush(1); #endif } EXPORT_SYMBOL(cache_push);
gpl-2.0
junkTzu/kernel-MB860
arch/arm/mach-nuc93x/nuc932.c
3894
1255
/* * linux/arch/arm/mach-nuc93x/nuc932.c * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC932 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" #include "clock.h" /* define specific CPU platform device */ static struct platform_device *nuc932_dev[] __initdata = { }; /* define specific CPU platform io map */ static struct map_desc nuc932evb_iodesc[] __initdata = { }; /*Init NUC932 evb io*/ void __init nuc932_map_io(void) { nuc93x_map_io(nuc932evb_iodesc, ARRAY_SIZE(nuc932evb_iodesc)); } /*Init NUC932 clock*/ void __init nuc932_init_clocks(void) { nuc93x_init_clocks(); } /*enable NUC932 uart clock*/ void __init nuc932_init_uartclk(void) { struct clk *ck_uart = clk_get(NULL, "uart"); BUG_ON(IS_ERR(ck_uart)); clk_enable(ck_uart); } /*Init NUC932 board info*/ void __init nuc932_board_init(void) { nuc93x_board_init(nuc932_dev, ARRAY_SIZE(nuc932_dev)); }
gpl-2.0
mikeNG/android_kernel_oneplus_msm8974
arch/powerpc/mm/hugetlbpage.c
4406
22352
/* * PPC Huge TLB Page Support for Kernel. * * Copyright (C) 2003 David Gibson, IBM Corporation. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor * * Based on the IA-32 version: * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> */ #include <linux/mm.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <linux/export.h> #include <linux/of_fdt.h> #include <linux/memblock.h> #include <linux/bootmem.h> #include <linux/moduleparam.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/setup.h> #define PAGE_SHIFT_64K 16 #define PAGE_SHIFT_16M 24 #define PAGE_SHIFT_16G 34 unsigned int HPAGE_SHIFT; /* * Tracks gpages after the device tree is scanned and before the * huge_boot_pages list is ready. On non-Freescale implementations, this is * just used to track 16G pages and so is a single array. FSL-based * implementations may have more than one gpage size, so we need multiple * arrays */ #ifdef CONFIG_PPC_FSL_BOOK3E #define MAX_NUMBER_GPAGES 128 struct psize_gpages { u64 gpage_list[MAX_NUMBER_GPAGES]; unsigned int nr_gpages; }; static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; #else #define MAX_NUMBER_GPAGES 1024 static u64 gpage_freearray[MAX_NUMBER_GPAGES]; static unsigned nr_gpages; #endif static inline int shift_to_mmu_psize(unsigned int shift) { int psize; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) if (mmu_psize_defs[psize].shift == shift) return psize; return -1; } static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) { if (mmu_psize_defs[mmu_psize].shift) return mmu_psize_defs[mmu_psize].shift; BUG(); } #define hugepd_none(hpd) ((hpd).pd == 0) pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) { pgd_t *pg; pud_t *pu; pmd_t *pm; hugepd_t *hpdp = NULL; unsigned pdshift = PGDIR_SHIFT; if (shift) *shift = 0; pg = pgdir + pgd_index(ea); if (is_hugepd(pg)) { hpdp = (hugepd_t *)pg; } else if (!pgd_none(*pg)) { pdshift = PUD_SHIFT; pu = pud_offset(pg, ea); if (is_hugepd(pu)) hpdp = (hugepd_t *)pu; else if (!pud_none(*pu)) { pdshift = PMD_SHIFT; pm = pmd_offset(pu, ea); if (is_hugepd(pm)) hpdp = (hugepd_t *)pm; else if (!pmd_none(*pm)) { return pte_offset_kernel(pm, ea); } } } if (!hpdp) return NULL; if (shift) *shift = hugepd_shift(*hpdp); return hugepte_offset(hpdp, ea, pdshift); } EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); } static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, unsigned long address, unsigned pdshift, unsigned pshift) { struct kmem_cache *cachep; pte_t *new; #ifdef CONFIG_PPC_FSL_BOOK3E int i; int num_hugepd = 1 << (pshift - pdshift); cachep = hugepte_cache; #else cachep = PGT_CACHE(pdshift - pshift); #endif new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); if (! new) return -ENOMEM; spin_lock(&mm->page_table_lock); #ifdef CONFIG_PPC_FSL_BOOK3E /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. * We need all of these so the DTLB pgtable walk code can find the * right higher-level entry without knowing if it's a hugepage or not. */ for (i = 0; i < num_hugepd; i++, hpdp++) { if (unlikely(!hugepd_none(*hpdp))) break; else hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; } /* If we bailed from the for loop early, an error occurred, clean up */ if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) hpdp->pd = 0; kmem_cache_free(cachep, new); } #else if (!hugepd_none(*hpdp)) kmem_cache_free(cachep, new); else hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; #endif spin_unlock(&mm->page_table_lock); return 0; } /* * These macros define how to determine which level of the page table holds * the hpdp. */ #ifdef CONFIG_PPC_FSL_BOOK3E #define HUGEPD_PGD_SHIFT PGDIR_SHIFT #define HUGEPD_PUD_SHIFT PUD_SHIFT #else #define HUGEPD_PGD_SHIFT PUD_SHIFT #define HUGEPD_PUD_SHIFT PMD_SHIFT #endif pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pg; pud_t *pu; pmd_t *pm; hugepd_t *hpdp = NULL; unsigned pshift = __ffs(sz); unsigned pdshift = PGDIR_SHIFT; addr &= ~(sz-1); pg = pgd_offset(mm, addr); if (pshift >= HUGEPD_PGD_SHIFT) { hpdp = (hugepd_t *)pg; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); if (pshift >= HUGEPD_PUD_SHIFT) { hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); hpdp = (hugepd_t *)pm; } } if (!hpdp) return NULL; BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) return NULL; return hugepte_offset(hpdp, addr, pdshift); } #ifdef CONFIG_PPC_FSL_BOOK3E /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. */ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { unsigned int idx = shift_to_mmu_psize(__ffs(page_size)); int i; if (addr == 0) return; gpage_freearray[idx].nr_gpages = number_of_pages; for (i = 0; i < number_of_pages; i++) { gpage_freearray[idx].gpage_list[i] = addr; addr += page_size; } } /* * Moves the gigantic page addresses from the temporary list to the * huge_boot_pages list. */ int alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT); int nr_gpages = gpage_freearray[idx].nr_gpages; if (nr_gpages == 0) return 0; #ifdef CONFIG_HIGHMEM /* * If gpages can be in highmem we can't use the trick of storing the * data structure in the page; allocate space for this */ m = alloc_bootmem(sizeof(struct huge_bootmem_page)); m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; #else m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); #endif list_add(&m->list, &huge_boot_pages); gpage_freearray[idx].nr_gpages = nr_gpages; gpage_freearray[idx].gpage_list[nr_gpages] = 0; m->hstate = hstate; return 1; } /* * Scan the command line hugepagesz= options for gigantic pages; store those in * a list that we use to allocate the memory once all options are parsed. */ unsigned long gpage_npages[MMU_PAGE_COUNT]; static int __init do_gpage_early_setup(char *param, char *val) { static phys_addr_t size; unsigned long npages; /* * The hugepagesz and hugepages cmdline options are interleaved. We * use the size variable to keep track of whether or not this was done * properly and skip over instances where it is incorrect. Other * command-line parsing code will issue warnings, so we don't need to. * */ if ((strcmp(param, "default_hugepagesz") == 0) || (strcmp(param, "hugepagesz") == 0)) { size = memparse(val, NULL); } else if (strcmp(param, "hugepages") == 0) { if (size != 0) { if (sscanf(val, "%lu", &npages) <= 0) npages = 0; gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; size = 0; } } return 0; } /* * This function allocates physical space for pages that are larger than the * buddy allocator can handle. We want to allocate these in highmem because * the amount of lowmem is limited. This means that this function MUST be * called before lowmem_end_addr is set up in MMU_init() in order for the lmb * allocate to grab highmem. */ void __init reserve_hugetlb_gpages(void) { static __initdata char cmdline[COMMAND_LINE_SIZE]; phys_addr_t size, base; int i; strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, &do_gpage_early_setup); /* * Walk gpage list in reverse, allocating larger page sizes first. * Skip over unsupported sizes, or sizes that have 0 gpages allocated. * When we reach the point in the list where pages are no longer * considered gpages, we're done. */ for (i = MMU_PAGE_COUNT-1; i >= 0; i--) { if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0) continue; else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT)) break; size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i)); base = memblock_alloc_base(size * gpage_npages[i], size, MEMBLOCK_ALLOC_ANYWHERE); add_gpage(base, size, gpage_npages[i]); } } #else /* !PPC_FSL_BOOK3E */ /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. */ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { if (!addr) return; while (number_of_pages > 0) { gpage_freearray[nr_gpages] = addr; nr_gpages++; number_of_pages--; addr += page_size; } } /* Moves the gigantic page addresses from the temporary list to the * huge_boot_pages list. */ int alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; if (nr_gpages == 0) return 0; m = phys_to_virt(gpage_freearray[--nr_gpages]); gpage_freearray[nr_gpages] = 0; list_add(&m->list, &huge_boot_pages); m->hstate = hstate; return 1; } #endif int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } #ifdef CONFIG_PPC_FSL_BOOK3E #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) struct hugepd_freelist { struct rcu_head rcu; unsigned int index; void *ptes[0]; }; static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); static void hugepd_free_rcu_callback(struct rcu_head *head) { struct hugepd_freelist *batch = container_of(head, struct hugepd_freelist, rcu); unsigned int i; for (i = 0; i < batch->index; i++) kmem_cache_free(hugepte_cache, batch->ptes[i]); free_page((unsigned long)batch); } static void hugepd_free(struct mmu_gather *tlb, void *hugepte) { struct hugepd_freelist **batchp; batchp = &__get_cpu_var(hugepd_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))) { kmem_cache_free(hugepte_cache, hugepte); return; } if (*batchp == NULL) { *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC); (*batchp)->index = 0; } (*batchp)->ptes[(*batchp)->index++] = hugepte; if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); *batchp = NULL; } } #endif static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, unsigned long start, unsigned long end, unsigned long floor, unsigned long ceiling) { pte_t *hugepte = hugepd_page(*hpdp); int i; unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; #ifdef CONFIG_PPC_FSL_BOOK3E /* Note: On fsl the hpdp may be the first of several */ num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); #else unsigned int shift = hugepd_shift(*hpdp); #endif start &= pdmask; if (start < floor) return; if (ceiling) { ceiling &= pdmask; if (! ceiling) return; } if (end - 1 > ceiling - 1) return; for (i = 0; i < num_hugepd; i++, hpdp++) hpdp->pd = 0; tlb->need_flush = 1; #ifdef CONFIG_PPC_FSL_BOOK3E hugepd_free(tlb, hugepte); #else pgtable_free_tlb(tlb, hugepte, pdshift - shift); #endif } static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; do { pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) continue; #ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since * there may be more than one entry at this level for a * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); #endif free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, addr, next, floor, ceiling); } while (addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); } static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; do { pud = pud_offset(pgd, addr); next = pud_addr_end(addr, end); if (!is_hugepd(pud)) { if (pud_none_or_clear_bad(pud)) continue; hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } else { #ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since * there may be more than one entry at this level for a * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); #endif free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, addr, next, floor, ceiling); } } while (addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(pgd, start); pgd_clear(pgd); pud_free_tlb(tlb, pud, start); } /* * This function frees user-level page tables of a process. * * Must be called with pagetable lock held. */ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; /* * Because there are a number of different possible pagetable * layouts for hugepage ranges, we limit knowledge of how * things should be laid out to the allocation path * (huge_pte_alloc(), above). Everything else works out the * structure as it goes from information in the hugepd * pointers. That means that we can't here use the * optimization used in the normal page free_pgd_range(), of * checking whether we're actually covering a large enough * range to have to do anything at the top level of the walk * instead of at the bottom. * * To make sense of this, you should probably go read the big * block comment at the top of the normal free_pgd_range(), * too. */ do { next = pgd_addr_end(addr, end); pgd = pgd_offset(tlb->mm, addr); if (!is_hugepd(pgd)) { if (pgd_none_or_clear_bad(pgd)) continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } else { #ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since * there may be more than one entry at the pgd level * for a single hugepage, but all of them point to the * same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); #endif free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, addr, next, floor, ceiling); } } while (addr = next, addr != end); } struct page * follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { pte_t *ptep; struct page *page; unsigned shift; unsigned long mask; ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); /* Verify it is a huge page else bail. */ if (!ptep || !shift) return ERR_PTR(-EINVAL); mask = (1UL << shift) - 1; page = pte_page(*ptep); if (page) page += (address & mask) / PAGE_SIZE; return page; } int pmd_huge(pmd_t pmd) { return 0; } int pud_huge(pud_t pud) { return 0; } struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { BUG(); return NULL; } static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long mask; unsigned long pte_end; struct page *head, *page, *tail; pte_t pte; int refs; pte_end = (addr + sz) & ~(sz-1); if (pte_end < end) end = pte_end; pte = *ptep; mask = _PAGE_PRESENT | _PAGE_USER; if (write) mask |= _PAGE_RW; if ((pte_val(pte) & mask) != mask) return 0; /* hugepages are never "special" */ VM_BUG_ON(!pfn_valid(pte_pfn(pte))); refs = 0; head = pte_page(pte); page = head + ((addr & (sz-1)) >> PAGE_SHIFT); tail = page; do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pte_val(pte) != pte_val(*ptep))) { /* Could be optimized better */ *nr -= refs; while (refs--) put_page(head); return 0; } /* * Any tail page need their mapcount reference taken before we * return. */ while (refs--) { if (PageTail(tail)) get_huge_page_tail(tail); tail++; } return 1; } static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, unsigned long sz) { unsigned long __boundary = (addr + sz) & ~(sz-1); return (__boundary - 1 < end - 1) ? __boundary : end; } int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { pte_t *ptep; unsigned long sz = 1UL << hugepd_shift(*hugepd); unsigned long next; ptep = hugepte_offset(hugepd, addr, pdshift); do { next = hugepte_addr_end(addr, end, sz); if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr)) return 0; } while (ptep++, addr = next, addr != end); return 1; } #ifdef CONFIG_PPC_MM_SLICES unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); } #endif unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { #ifdef CONFIG_PPC_MM_SLICES unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); return 1UL << mmu_psize_to_shift(psize); #else if (!is_vm_hugetlb_page(vma)) return PAGE_SIZE; return huge_page_size(hstate_vma(vma)); #endif } static inline bool is_power_of_4(unsigned long x) { if (is_power_of_2(x)) return (__ilog2(x) % 2) ? false : true; return false; } static int __init add_huge_page_size(unsigned long long size) { int shift = __ffs(size); int mmu_psize; /* Check that it is a page size supported by the hardware and * that it fits within pagetable and slice limits. */ #ifdef CONFIG_PPC_FSL_BOOK3E if ((size < PAGE_SIZE) || !is_power_of_4(size)) return -EINVAL; #else if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) return -EINVAL; #endif if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) return -EINVAL; #ifdef CONFIG_SPU_FS_64K_LS /* Disable support for 64K huge pages when 64K SPU local store * support is enabled as the current implementation conflicts. */ if (shift == PAGE_SHIFT_64K) return -EINVAL; #endif /* CONFIG_SPU_FS_64K_LS */ BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); /* Return if huge page size has already been setup */ if (size_to_hstate(size)) return 0; hugetlb_add_hstate(shift - PAGE_SHIFT); return 0; } static int __init hugepage_setup_sz(char *str) { unsigned long long size; size = memparse(str, &str); if (add_huge_page_size(size) != 0) printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); return 1; } __setup("hugepagesz=", hugepage_setup_sz); #ifdef CONFIG_PPC_FSL_BOOK3E struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { int psize; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { unsigned shift; if (!mmu_psize_defs[psize].shift) continue; shift = mmu_psize_to_shift(psize); /* Don't treat normal page sizes as huge... */ if (shift != PAGE_SHIFT) if (add_huge_page_size(1ULL << shift) < 0) continue; } /* * Create a kmem cache for hugeptes. The bottom bits in the pte have * size information encoded in them, so align them to allow this */ hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t), HUGEPD_SHIFT_MASK + 1, 0, NULL); if (hugepte_cache == NULL) panic("%s: Unable to create kmem cache for hugeptes\n", __func__); /* Default hpage size = 4M */ if (mmu_psize_defs[MMU_PAGE_4M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; else panic("%s: Unable to set default huge page size\n", __func__); return 0; } #else static int __init hugetlbpage_init(void) { int psize; if (!mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { unsigned shift; unsigned pdshift; if (!mmu_psize_defs[psize].shift) continue; shift = mmu_psize_to_shift(psize); if (add_huge_page_size(1ULL << shift) < 0) continue; if (shift < PMD_SHIFT) pdshift = PMD_SHIFT; else if (shift < PUD_SHIFT) pdshift = PUD_SHIFT; else pdshift = PGDIR_SHIFT; pgtable_cache_add(pdshift - shift, NULL); if (!PGT_CACHE(pdshift - shift)) panic("hugetlbpage_init(): could not create " "pgtable cache for %d bit pagesize\n", shift); } /* Set default large page size. Currently, we pick 16M or 1M * depending on what is available */ if (mmu_psize_defs[MMU_PAGE_16M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; else if (mmu_psize_defs[MMU_PAGE_1M].shift) HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; return 0; } #endif module_init(hugetlbpage_init); void flush_dcache_icache_hugepage(struct page *page) { int i; void *start; BUG_ON(!PageCompound(page)); for (i = 0; i < (1UL << compound_order(page)); i++) { if (!PageHighMem(page)) { __flush_dcache_icache(page_address(page+i)); } else { start = kmap_atomic(page+i); __flush_dcache_icache(start); kunmap_atomic(start); } } }
gpl-2.0
vwmofo/android_kernel_htc_msm8960
arch/microblaze/kernel/process.c
4406
6932
/* * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/pm.h> #include <linux/tick.h> #include <linux/bitops.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> /* for USER_DS macros */ #include <asm/cacheflush.h> void show_regs(struct pt_regs *regs) { printk(KERN_INFO " Registers dump: mode=%X\r\n", regs->pt_mode); printk(KERN_INFO " r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", regs->r1, regs->r2, regs->r3, regs->r4); printk(KERN_INFO " r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n", regs->r5, regs->r6, regs->r7, regs->r8); printk(KERN_INFO " r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n", regs->r9, regs->r10, regs->r11, regs->r12); printk(KERN_INFO " r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n", regs->r13, regs->r14, regs->r15, regs->r16); printk(KERN_INFO " r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n", regs->r17, regs->r18, regs->r19, regs->r20); printk(KERN_INFO " r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n", regs->r21, regs->r22, regs->r23, regs->r24); printk(KERN_INFO " r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n", regs->r25, regs->r26, regs->r27, regs->r28); printk(KERN_INFO " r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n", regs->r29, regs->r30, regs->r31, regs->pc); printk(KERN_INFO " msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", regs->msr, regs->ear, regs->esr, regs->fsr); } void (*pm_idle)(void); void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); static int hlt_counter = 1; void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); static int __init nohlt_setup(char *__unused) { hlt_counter = 1; return 1; } __setup("nohlt", nohlt_setup); static int __init hlt_setup(char *__unused) { hlt_counter = 0; return 1; } __setup("hlt", hlt_setup); void default_idle(void) { if (likely(hlt_counter)) { local_irq_disable(); stop_critical_timings(); cpu_relax(); start_critical_timings(); local_irq_enable(); } else { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); local_irq_disable(); while (!need_resched()) cpu_sleep(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } } void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { void (*idle)(void) = pm_idle; if (!idle) idle = default_idle; tick_nohz_idle_enter(); rcu_idle_enter(); while (!need_resched()) idle(); rcu_idle_exit(); tick_nohz_idle_exit(); schedule_preempt_disabled(); check_pgt_cache(); } } void flush_thread(void) { } int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs = task_pt_regs(p); struct thread_info *ti = task_thread_info(p); *childregs = *regs; if (user_mode(regs)) childregs->r1 = usp; else childregs->r1 = ((unsigned long) ti) + THREAD_SIZE; #ifndef CONFIG_MMU memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); ti->cpu_context.r1 = (unsigned long)childregs; ti->cpu_context.msr = (unsigned long)childregs->msr; #else /* if creating a kernel thread then update the current reg (we don't * want to use the parent's value when restoring by POP_STATE) */ if (kernel_mode(regs)) /* save new current on stack to use POP_STATE */ childregs->CURRENT_TASK = (unsigned long)p; /* if returning to user then use the parent's value of this register */ /* if we're creating a new kernel thread then just zeroing all * the registers. That's OK for a brand new thread.*/ /* Pls. note that some of them will be restored in POP_STATE */ if (kernel_mode(regs)) memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); /* if this thread is created for fork/vfork/clone, then we want to * restore all the parent's context */ /* in addition to the registers which will be restored by POP_STATE */ else { ti->cpu_context = *(struct cpu_context *)regs; childregs->msr |= MSR_UMS; } /* FIXME STATE_SAVE_PT_OFFSET; */ ti->cpu_context.r1 = (unsigned long)childregs; /* we should consider the fact that childregs is a copy of the parent * regs which were saved immediately after entering the kernel state * before enabling VM. This MSR will be restored in switch_to and * RETURN() and we want to have the right machine state there * specifically this state must have INTs disabled before and enabled * after performing rtbd * compose the right MSR for RETURN(). It will work for switch_to also * excepting for VM and UMS * don't touch UMS , CARRY and cache bits * right now MSR is a copy of parent one */ childregs->msr |= MSR_BIP; childregs->msr &= ~MSR_EIP; childregs->msr |= MSR_IE; childregs->msr &= ~MSR_VM; childregs->msr |= MSR_VMS; childregs->msr |= MSR_EE; /* exceptions will be enabled*/ ti->cpu_context.msr = (childregs->msr|MSR_VM); ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ ti->cpu_context.msr &= ~MSR_IE; #endif ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; if (clone_flags & CLONE_SETTLS) ; return 0; } #ifndef CONFIG_MMU /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { struct cpu_context *ctx = &(((struct thread_info *)(tsk->stack))->cpu_context); /* Check whether the thread is blocked in resume() */ if (in_sched_functions(ctx->r15)) return (unsigned long)ctx->r15; else return ctx->r14; } #endif static void kernel_thread_helper(int (*fn)(void *), void *arg) { fn(arg); do_exit(-1); } int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); /* store them in non-volatile registers */ regs.r5 = (unsigned long)fn; regs.r6 = (unsigned long)arg; local_save_flags(regs.msr); regs.pc = (unsigned long)kernel_thread_helper; regs.pt_mode = 1; return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } EXPORT_SYMBOL_GPL(kernel_thread); unsigned long get_wchan(struct task_struct *p) { /* TBD (used by procfs) */ return 0; } /* Set up a thread for executing a new program */ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) { regs->pc = pc; regs->r1 = usp; regs->pt_mode = 0; #ifdef CONFIG_MMU regs->msr |= MSR_UMS; #endif } #ifdef CONFIG_MMU #include <linux/elfcore.h> /* * Set up a thread for executing a new program */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) { return 0; /* MicroBlaze has no separate FPU registers */ } #endif /* CONFIG_MMU */
gpl-2.0
ausdim/GE-Edition-I9505-jfltexx
drivers/acpi/acpica/dsargs.c
4918
12110
/****************************************************************************** * * Module Name: dsargs - Support for execution of dynamic arguments for static * objects (regions, fields, buffer fields, etc.) * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #include "acdispat.h" #include "acnamesp.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsargs") /* Local prototypes */ static acpi_status acpi_ds_execute_arguments(struct acpi_namespace_node *node, struct acpi_namespace_node *scope_node, u32 aml_length, u8 *aml_start); /******************************************************************************* * * FUNCTION: acpi_ds_execute_arguments * * PARAMETERS: Node - Object NS node * scope_node - Parent NS node * aml_length - Length of executable AML * aml_start - Pointer to the AML * * RETURN: Status. * * DESCRIPTION: Late (deferred) execution of region or field arguments * ******************************************************************************/ static acpi_status acpi_ds_execute_arguments(struct acpi_namespace_node *node, struct acpi_namespace_node *scope_node, u32 aml_length, u8 *aml_start) { acpi_status status; union acpi_parse_object *op; struct acpi_walk_state *walk_state; ACPI_FUNCTION_TRACE(ds_execute_arguments); /* Allocate a new parser op to be the root of the parsed tree */ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Save the Node for use in acpi_ps_parse_aml */ op->common.node = scope_node; /* Create and initialize a new parser state */ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL); if (!walk_state) { status = AE_NO_MEMORY; goto cleanup; } status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, aml_length, NULL, ACPI_IMODE_LOAD_PASS1); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); goto cleanup; } /* Mark this parse as a deferred opcode */ walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP; walk_state->deferred_node = node; /* Pass1: Parse the entire declaration */ status = acpi_ps_parse_aml(walk_state); if (ACPI_FAILURE(status)) { goto cleanup; } /* Get and init the Op created above */ op->common.node = node; acpi_ps_delete_parse_tree(op); /* Evaluate the deferred arguments */ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } op->common.node = scope_node; /* Create and initialize a new parser state */ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL); if (!walk_state) { status = AE_NO_MEMORY; goto cleanup; } /* Execute the opcode and arguments */ status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, aml_length, NULL, ACPI_IMODE_EXECUTE); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); goto cleanup; } /* Mark this execution as a deferred opcode */ walk_state->deferred_node = node; status = acpi_ps_parse_aml(walk_state); cleanup: acpi_ps_delete_parse_tree(op); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_get_buffer_field_arguments * * PARAMETERS: obj_desc - A valid buffer_field object * * RETURN: Status. * * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late * evaluation of these field attributes. * ******************************************************************************/ acpi_status acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc) { union acpi_operand_object *extra_desc; struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc); if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return_ACPI_STATUS(AE_OK); } /* Get the AML pointer (method object) and buffer_field node */ extra_desc = acpi_ns_get_secondary_object(obj_desc); node = obj_desc->buffer_field.node; ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD, node, NULL)); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n", acpi_ut_get_node_name(node))); /* Execute the AML code for the term_arg arguments */ status = acpi_ds_execute_arguments(node, node->parent, extra_desc->extra.aml_length, extra_desc->extra.aml_start); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_get_bank_field_arguments * * PARAMETERS: obj_desc - A valid bank_field object * * RETURN: Status. * * DESCRIPTION: Get bank_field bank_value. This implements the late * evaluation of these field attributes. * ******************************************************************************/ acpi_status acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc) { union acpi_operand_object *extra_desc; struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc); if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return_ACPI_STATUS(AE_OK); } /* Get the AML pointer (method object) and bank_field node */ extra_desc = acpi_ns_get_secondary_object(obj_desc); node = obj_desc->bank_field.node; ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL)); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n", acpi_ut_get_node_name(node))); /* Execute the AML code for the term_arg arguments */ status = acpi_ds_execute_arguments(node, node->parent, extra_desc->extra.aml_length, extra_desc->extra.aml_start); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ut_add_address_range(obj_desc->region.space_id, obj_desc->region.address, obj_desc->region.length, node); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_get_buffer_arguments * * PARAMETERS: obj_desc - A valid Buffer object * * RETURN: Status. * * DESCRIPTION: Get Buffer length and initializer byte list. This implements * the late evaluation of these attributes. * ******************************************************************************/ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc) { struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc); if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return_ACPI_STATUS(AE_OK); } /* Get the Buffer node */ node = obj_desc->buffer.node; if (!node) { ACPI_ERROR((AE_INFO, "No pointer back to namespace node in buffer object %p", obj_desc)); return_ACPI_STATUS(AE_AML_INTERNAL); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n")); /* Execute the AML code for the term_arg arguments */ status = acpi_ds_execute_arguments(node, node, obj_desc->buffer.aml_length, obj_desc->buffer.aml_start); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_get_package_arguments * * PARAMETERS: obj_desc - A valid Package object * * RETURN: Status. * * DESCRIPTION: Get Package length and initializer byte list. This implements * the late evaluation of these attributes. * ******************************************************************************/ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc) { struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc); if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { return_ACPI_STATUS(AE_OK); } /* Get the Package node */ node = obj_desc->package.node; if (!node) { ACPI_ERROR((AE_INFO, "No pointer back to namespace node in package %p", obj_desc)); return_ACPI_STATUS(AE_AML_INTERNAL); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n")); /* Execute the AML code for the term_arg arguments */ status = acpi_ds_execute_arguments(node, node, obj_desc->package.aml_length, obj_desc->package.aml_start); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_get_region_arguments * * PARAMETERS: obj_desc - A valid region object * * RETURN: Status. * * DESCRIPTION: Get region address and length. This implements the late * evaluation of these region attributes. * ******************************************************************************/ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc) { struct acpi_namespace_node *node; acpi_status status; union acpi_operand_object *extra_desc; ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc); if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { return_ACPI_STATUS(AE_OK); } extra_desc = acpi_ns_get_secondary_object(obj_desc); if (!extra_desc) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Get the Region node */ node = obj_desc->region.node; ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_REGION, node, NULL)); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n", acpi_ut_get_node_name(node), extra_desc->extra.aml_start)); /* Execute the argument AML */ status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node, extra_desc->extra.aml_length, extra_desc->extra.aml_start); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ut_add_address_range(obj_desc->region.space_id, obj_desc->region.address, obj_desc->region.length, node); return_ACPI_STATUS(status); }
gpl-2.0
Mirsaid02/android_kernel_samsung_ms013g-2
arch/arm/mach-imx/iomux-imx31.c
5174
4502
/* * Copyright 2004-2006 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de> * Copyright (C) 2009 by Valentin Longchamp <valentin.longchamp@epfl.ch> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/gpio.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/kernel.h> #include <mach/hardware.h> #include <mach/iomux-mx3.h> /* * IOMUX register (base) addresses */ #define IOMUX_BASE MX31_IO_ADDRESS(MX31_IOMUXC_BASE_ADDR) #define IOMUXINT_OBS1 (IOMUX_BASE + 0x000) #define IOMUXINT_OBS2 (IOMUX_BASE + 0x004) #define IOMUXGPR (IOMUX_BASE + 0x008) #define IOMUXSW_MUX_CTL (IOMUX_BASE + 0x00C) #define IOMUXSW_PAD_CTL (IOMUX_BASE + 0x154) static DEFINE_SPINLOCK(gpio_mux_lock); #define IOMUX_REG_MASK (IOMUX_PADNUM_MASK & ~0x3) unsigned long mxc_pin_alloc_map[NB_PORTS * 32 / BITS_PER_LONG]; /* * set the mode for a IOMUX pin. */ int mxc_iomux_mode(unsigned int pin_mode) { u32 field, l, mode, ret = 0; void __iomem *reg; reg = IOMUXSW_MUX_CTL + (pin_mode & IOMUX_REG_MASK); field = pin_mode & 0x3; mode = (pin_mode & IOMUX_MODE_MASK) >> IOMUX_MODE_SHIFT; spin_lock(&gpio_mux_lock); l = __raw_readl(reg); l &= ~(0xff << (field * 8)); l |= mode << (field * 8); __raw_writel(l, reg); spin_unlock(&gpio_mux_lock); return ret; } EXPORT_SYMBOL(mxc_iomux_mode); /* * This function configures the pad value for a IOMUX pin. */ void mxc_iomux_set_pad(enum iomux_pins pin, u32 config) { u32 field, l; void __iomem *reg; pin &= IOMUX_PADNUM_MASK; reg = IOMUXSW_PAD_CTL + (pin + 2) / 3 * 4; field = (pin + 2) % 3; pr_debug("%s: reg offset = 0x%x, field = %d\n", __func__, (pin + 2) / 3, field); spin_lock(&gpio_mux_lock); l = __raw_readl(reg); l &= ~(0x1ff << (field * 10)); l |= config << (field * 10); __raw_writel(l, reg); spin_unlock(&gpio_mux_lock); } EXPORT_SYMBOL(mxc_iomux_set_pad); /* * allocs a single pin: * - reserves the pin so that it is not claimed by another driver * - setups the iomux according to the configuration */ int mxc_iomux_alloc_pin(unsigned int pin, const char *label) { unsigned pad = pin & IOMUX_PADNUM_MASK; if (pad >= (PIN_MAX + 1)) { printk(KERN_ERR "mxc_iomux: Attempt to request nonexistant pin %u for \"%s\"\n", pad, label ? label : "?"); return -EINVAL; } if (test_and_set_bit(pad, mxc_pin_alloc_map)) { printk(KERN_ERR "mxc_iomux: pin %u already used. Allocation for \"%s\" failed\n", pad, label ? label : "?"); return -EBUSY; } mxc_iomux_mode(pin); return 0; } EXPORT_SYMBOL(mxc_iomux_alloc_pin); int mxc_iomux_setup_multiple_pins(const unsigned int *pin_list, unsigned count, const char *label) { const unsigned int *p = pin_list; int i; int ret = -EINVAL; for (i = 0; i < count; i++) { ret = mxc_iomux_alloc_pin(*p, label); if (ret) goto setup_error; p++; } return 0; setup_error: mxc_iomux_release_multiple_pins(pin_list, i); return ret; } EXPORT_SYMBOL(mxc_iomux_setup_multiple_pins); void mxc_iomux_release_pin(unsigned int pin) { unsigned pad = pin & IOMUX_PADNUM_MASK; if (pad < (PIN_MAX + 1)) clear_bit(pad, mxc_pin_alloc_map); } EXPORT_SYMBOL(mxc_iomux_release_pin); void mxc_iomux_release_multiple_pins(const unsigned int *pin_list, int count) { const unsigned int *p = pin_list; int i; for (i = 0; i < count; i++) { mxc_iomux_release_pin(*p); p++; } } EXPORT_SYMBOL(mxc_iomux_release_multiple_pins); /* * This function enables/disables the general purpose function for a particular * signal. */ void mxc_iomux_set_gpr(enum iomux_gp_func gp, bool en) { u32 l; spin_lock(&gpio_mux_lock); l = __raw_readl(IOMUXGPR); if (en) l |= gp; else l &= ~gp; __raw_writel(l, IOMUXGPR); spin_unlock(&gpio_mux_lock); } EXPORT_SYMBOL(mxc_iomux_set_gpr);
gpl-2.0
nasty007/kernel_msm
arch/mips/sgi-ip27/ip27-klnuma.c
7734
3697
/* * Ported from IRIX to Linux by Kanoj Sarcar, 06/08/00. * Copyright 2000 - 2001 Silicon Graphics, Inc. * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com) */ #include <linux/init.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/kernel.h> #include <linux/nodemask.h> #include <linux/string.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/sn/types.h> #include <asm/sn/arch.h> #include <asm/sn/gda.h> #include <asm/sn/hub.h> #include <asm/sn/mapped_kernel.h> #include <asm/sn/sn_private.h> static cpumask_t ktext_repmask; /* * XXX - This needs to be much smarter about where it puts copies of the * kernel. For example, we should never put a copy on a headless node, * and we should respect the topology of the machine. */ void __init setup_replication_mask(void) { /* Set only the master cnode's bit. The master cnode is always 0. */ cpus_clear(ktext_repmask); cpu_set(0, ktext_repmask); #ifdef CONFIG_REPLICATE_KTEXT #ifndef CONFIG_MAPPED_KERNEL #error Kernel replication works with mapped kernel support. No calias support. #endif { cnodeid_t cnode; for_each_online_node(cnode) { if (cnode == 0) continue; /* Advertise that we have a copy of the kernel */ cpu_set(cnode, ktext_repmask); } } #endif /* Set up a GDA pointer to the replication mask. */ GDA->g_ktext_repmask = &ktext_repmask; } static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) { kern_vars_t *kvp; kvp = &hub_data(client_nasid)->kern_vars; KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; kvp->kv_magic = KV_MAGIC; kvp->kv_ro_nasid = server_nasid; kvp->kv_rw_nasid = master_nasid; kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid); kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid); printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid); } /* XXX - When the BTE works, we should use it instead of this. */ static __init void copy_kernel(nasid_t dest_nasid) { unsigned long dest_kern_start, source_start, source_end, kern_size; source_start = (unsigned long) _stext; source_end = (unsigned long) _etext; kern_size = source_end - source_start; dest_kern_start = CHANGE_ADDR_NASID(MAPPED_KERN_RO_TO_K0(source_start), dest_nasid); memcpy((void *)dest_kern_start, (void *)source_start, kern_size); } void __init replicate_kernel_text() { cnodeid_t cnode; nasid_t client_nasid; nasid_t server_nasid; server_nasid = master_nasid; /* Record where the master node should get its kernel text */ set_ktext_source(master_nasid, master_nasid); for_each_online_node(cnode) { if (cnode == 0) continue; client_nasid = COMPACT_TO_NASID_NODEID(cnode); /* Check if this node should get a copy of the kernel */ if (cpu_isset(cnode, ktext_repmask)) { server_nasid = client_nasid; copy_kernel(server_nasid); } /* Record where this node should get its kernel text */ set_ktext_source(client_nasid, server_nasid); } } /* * Return pfn of first free page of memory on a node. PROM may allocate * data structures on the first couple of pages of the first slot of each * node. If this is the case, getfirstfree(node) > getslotstart(node, 0). */ pfn_t node_getfirstfree(cnodeid_t cnode) { unsigned long loadbase = REP_BASE; nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); unsigned long offset; #ifdef CONFIG_MAPPED_KERNEL loadbase += 16777216; #endif offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask))) return (TO_NODE(nasid, offset) >> PAGE_SHIFT); else return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT); }
gpl-2.0
PsychoGame/omnirom_kernel_lge_msm8974-old
net/irda/ircomm/ircomm_tty_ioctl.c
7990
11665
/********************************************************************* * * Filename: ircomm_tty_ioctl.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Thu Jun 10 14:39:09 1999 * Modified at: Wed Jan 5 14:45:43 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/init.h> #include <linux/fs.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/serial.h> #include <asm/uaccess.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> #include <net/irda/ircomm_core.h> #include <net/irda/ircomm_param.h> #include <net/irda/ircomm_tty_attach.h> #include <net/irda/ircomm_tty.h> #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) /* * Function ircomm_tty_change_speed (driver) * * Change speed of the driver. If the remote device is a DCE, then this * should make it change the speed of its serial port */ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) { unsigned cflag, cval; int baud; IRDA_DEBUG(2, "%s()\n", __func__ ); if (!self->tty || !self->tty->termios || !self->ircomm) return; cflag = self->tty->termios->c_cflag; /* byte size and parity */ switch (cflag & CSIZE) { case CS5: cval = IRCOMM_WSIZE_5; break; case CS6: cval = IRCOMM_WSIZE_6; break; case CS7: cval = IRCOMM_WSIZE_7; break; case CS8: cval = IRCOMM_WSIZE_8; break; default: cval = IRCOMM_WSIZE_5; break; } if (cflag & CSTOPB) cval |= IRCOMM_2_STOP_BIT; if (cflag & PARENB) cval |= IRCOMM_PARITY_ENABLE; if (!(cflag & PARODD)) cval |= IRCOMM_PARITY_EVEN; /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(self->tty); if (!baud) baud = 9600; /* B0 transition handled in rs_set_termios */ self->settings.data_rate = baud; ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE); /* CTS flow control flag and modem status interrupts */ if (cflag & CRTSCTS) { self->flags |= ASYNC_CTS_FLOW; self->settings.flow_control |= IRCOMM_RTS_CTS_IN; /* This got me. Bummer. Jean II */ if (self->service_type == IRCOMM_3_WIRE_RAW) IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__); } else { self->flags &= ~ASYNC_CTS_FLOW; self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; } if (cflag & CLOCAL) self->flags &= ~ASYNC_CHECK_CD; else self->flags |= ASYNC_CHECK_CD; #if 0 /* * Set up parity check flag */ if (I_INPCK(self->tty)) driver->read_status_mask |= LSR_FE | LSR_PE; if (I_BRKINT(driver->tty) || I_PARMRK(driver->tty)) driver->read_status_mask |= LSR_BI; /* * Characters to ignore */ driver->ignore_status_mask = 0; if (I_IGNPAR(driver->tty)) driver->ignore_status_mask |= LSR_PE | LSR_FE; if (I_IGNBRK(self->tty)) { self->ignore_status_mask |= LSR_BI; /* * If we're ignore parity and break indicators, ignore * overruns too. (For real raw support). */ if (I_IGNPAR(self->tty)) self->ignore_status_mask |= LSR_OE; } #endif self->settings.data_format = cval; ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE); ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE); } /* * Function ircomm_tty_set_termios (tty, old_termios) * * This routine allows the tty driver to be notified when device's * termios settings have changed. Note that a well-designed tty driver * should be prepared to accept the case where old == NULL, and try to * do something rational. */ void ircomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int cflag = tty->termios->c_cflag; IRDA_DEBUG(2, "%s()\n", __func__ ); if ((cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) { return; } ircomm_tty_change_speed(self); /* Handle transition to B0 status */ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) { self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS); ircomm_param_request(self, IRCOMM_DTE, TRUE); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { self->settings.dte |= IRCOMM_DTR; if (!(tty->termios->c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { self->settings.dte |= IRCOMM_RTS; } ircomm_param_request(self, IRCOMM_DTE, TRUE); } /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; ircomm_tty_start(tty); } } /* * Function ircomm_tty_tiocmget (tty) * * * */ int ircomm_tty_tiocmget(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int result; IRDA_DEBUG(2, "%s()\n", __func__ ); if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; result = ((self->settings.dte & IRCOMM_RTS) ? TIOCM_RTS : 0) | ((self->settings.dte & IRCOMM_DTR) ? TIOCM_DTR : 0) | ((self->settings.dce & IRCOMM_CD) ? TIOCM_CAR : 0) | ((self->settings.dce & IRCOMM_RI) ? TIOCM_RNG : 0) | ((self->settings.dce & IRCOMM_DSR) ? TIOCM_DSR : 0) | ((self->settings.dce & IRCOMM_CTS) ? TIOCM_CTS : 0); return result; } /* * Function ircomm_tty_tiocmset (tty, set, clear) * * * */ int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_DEBUG(2, "%s()\n", __func__ ); if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); if (set & TIOCM_RTS) self->settings.dte |= IRCOMM_RTS; if (set & TIOCM_DTR) self->settings.dte |= IRCOMM_DTR; if (clear & TIOCM_RTS) self->settings.dte &= ~IRCOMM_RTS; if (clear & TIOCM_DTR) self->settings.dte &= ~IRCOMM_DTR; if ((set|clear) & TIOCM_RTS) self->settings.dte |= IRCOMM_DELTA_RTS; if ((set|clear) & TIOCM_DTR) self->settings.dte |= IRCOMM_DELTA_DTR; ircomm_param_request(self, IRCOMM_DTE, TRUE); return 0; } /* * Function get_serial_info (driver, retinfo) * * * */ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self, struct serial_struct __user *retinfo) { struct serial_struct info; if (!retinfo) return -EFAULT; IRDA_DEBUG(2, "%s()\n", __func__ ); memset(&info, 0, sizeof(info)); info.line = self->line; info.flags = self->flags; info.baud_base = self->settings.data_rate; info.close_delay = self->close_delay; info.closing_wait = self->closing_wait; /* For compatibility */ info.type = PORT_16550A; info.port = 0; info.irq = 0; info.xmit_fifo_size = 0; info.hub6 = 0; info.custom_divisor = 0; if (copy_to_user(retinfo, &info, sizeof(*retinfo))) return -EFAULT; return 0; } /* * Function set_serial_info (driver, new_info) * * * */ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self, struct serial_struct __user *new_info) { #if 0 struct serial_struct new_serial; struct ircomm_tty_cb old_state, *state; IRDA_DEBUG(0, "%s()\n", __func__ ); if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) return -EFAULT; state = self old_state = *self; if (!capable(CAP_SYS_ADMIN)) { if ((new_serial.baud_base != state->settings.data_rate) || (new_serial.close_delay != state->close_delay) || ((new_serial.flags & ~ASYNC_USR_MASK) != (self->flags & ~ASYNC_USR_MASK))) return -EPERM; state->flags = ((state->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); self->flags = ((self->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); /* self->custom_divisor = new_serial.custom_divisor; */ goto check_and_exit; } /* * OK, past this point, all the error checking has been done. * At this point, we start making changes..... */ if (self->settings.data_rate != new_serial.baud_base) { self->settings.data_rate = new_serial.baud_base; ircomm_param_request(self, IRCOMM_DATA_RATE, TRUE); } self->close_delay = new_serial.close_delay * HZ/100; self->closing_wait = new_serial.closing_wait * HZ/100; /* self->custom_divisor = new_serial.custom_divisor; */ self->flags = ((self->flags & ~ASYNC_FLAGS) | (new_serial.flags & ASYNC_FLAGS)); self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; check_and_exit: if (self->flags & ASYNC_INITIALIZED) { if (((old_state.flags & ASYNC_SPD_MASK) != (self->flags & ASYNC_SPD_MASK)) || (old_driver.custom_divisor != driver->custom_divisor)) { if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) driver->tty->alt_speed = 57600; if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) driver->tty->alt_speed = 115200; if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) driver->tty->alt_speed = 230400; if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) driver->tty->alt_speed = 460800; ircomm_tty_change_speed(driver); } } #endif return 0; } /* * Function ircomm_tty_ioctl (tty, cmd, arg) * * * */ int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__ ); if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case TIOCGSERIAL: ret = ircomm_tty_get_serial_info(self, (struct serial_struct __user *) arg); break; case TIOCSSERIAL: ret = ircomm_tty_set_serial_info(self, (struct serial_struct __user *) arg); break; case TIOCMIWAIT: IRDA_DEBUG(0, "(), TIOCMIWAIT, not impl!\n"); break; case TIOCGICOUNT: IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __func__ ); #if 0 save_flags(flags); cli(); cnow = driver->icount; restore_flags(flags); p_cuser = (struct serial_icounter_struct __user *) arg; if (put_user(cnow.cts, &p_cuser->cts) || put_user(cnow.dsr, &p_cuser->dsr) || put_user(cnow.rng, &p_cuser->rng) || put_user(cnow.dcd, &p_cuser->dcd) || put_user(cnow.rx, &p_cuser->rx) || put_user(cnow.tx, &p_cuser->tx) || put_user(cnow.frame, &p_cuser->frame) || put_user(cnow.overrun, &p_cuser->overrun) || put_user(cnow.parity, &p_cuser->parity) || put_user(cnow.brk, &p_cuser->brk) || put_user(cnow.buf_overrun, &p_cuser->buf_overrun)) return -EFAULT; #endif return 0; default: ret = -ENOIOCTLCMD; /* ioctls which we must ignore */ } return ret; }
gpl-2.0
ubuntustudio-kernel/ubuntu-precise-lowlatency
arch/mips/bcm63xx/dev-pcmcia.c
9014
3049
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/bootinfo.h> #include <linux/platform_device.h> #include <bcm63xx_cs.h> #include <bcm63xx_cpu.h> #include <bcm63xx_dev_pcmcia.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> static struct resource pcmcia_resources[] = { /* pcmcia registers */ { /* start & end filled at runtime */ .flags = IORESOURCE_MEM, }, /* pcmcia memory zone resources */ { .start = BCM_PCMCIA_COMMON_BASE_PA, .end = BCM_PCMCIA_COMMON_END_PA, .flags = IORESOURCE_MEM, }, { .start = BCM_PCMCIA_ATTR_BASE_PA, .end = BCM_PCMCIA_ATTR_END_PA, .flags = IORESOURCE_MEM, }, { .start = BCM_PCMCIA_IO_BASE_PA, .end = BCM_PCMCIA_IO_END_PA, .flags = IORESOURCE_MEM, }, /* PCMCIA irq */ { /* start filled at runtime */ .flags = IORESOURCE_IRQ, }, /* declare PCMCIA IO resource also */ { .start = BCM_PCMCIA_IO_BASE_PA, .end = BCM_PCMCIA_IO_END_PA, .flags = IORESOURCE_IO, }, }; static struct bcm63xx_pcmcia_platform_data pd; static struct platform_device bcm63xx_pcmcia_device = { .name = "bcm63xx_pcmcia", .id = 0, .num_resources = ARRAY_SIZE(pcmcia_resources), .resource = pcmcia_resources, .dev = { .platform_data = &pd, }, }; static int __init config_pcmcia_cs(unsigned int cs, u32 base, unsigned int size) { int ret; ret = bcm63xx_set_cs_status(cs, 0); if (!ret) ret = bcm63xx_set_cs_base(cs, base, size); if (!ret) ret = bcm63xx_set_cs_status(cs, 1); return ret; } static const __initdata struct { unsigned int cs; unsigned int base; unsigned int size; } pcmcia_cs[3] = { { .cs = MPI_CS_PCMCIA_COMMON, .base = BCM_PCMCIA_COMMON_BASE_PA, .size = BCM_PCMCIA_COMMON_SIZE }, { .cs = MPI_CS_PCMCIA_ATTR, .base = BCM_PCMCIA_ATTR_BASE_PA, .size = BCM_PCMCIA_ATTR_SIZE }, { .cs = MPI_CS_PCMCIA_IO, .base = BCM_PCMCIA_IO_BASE_PA, .size = BCM_PCMCIA_IO_SIZE }, }; int __init bcm63xx_pcmcia_register(void) { int ret, i; if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358()) return 0; /* use correct pcmcia ready gpio depending on processor */ switch (bcm63xx_get_cpu_id()) { case BCM6348_CPU_ID: pd.ready_gpio = 22; break; case BCM6358_CPU_ID: pd.ready_gpio = 18; break; default: return -ENODEV; } pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA); pcmcia_resources[0].end = pcmcia_resources[0].start + RSET_PCMCIA_SIZE - 1; pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA); /* configure pcmcia chip selects */ for (i = 0; i < 3; i++) { ret = config_pcmcia_cs(pcmcia_cs[i].cs, pcmcia_cs[i].base, pcmcia_cs[i].size); if (ret) goto out_err; } return platform_device_register(&bcm63xx_pcmcia_device); out_err: printk(KERN_ERR "unable to set pcmcia chip select\n"); return ret; }
gpl-2.0
NXT-F1V3/kernel_dev
arch/sh/drivers/pci/ops-sh7786.c
12342
4765
/* * Generic SH7786 PCI-Express operations. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License v2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/spinlock.h> #include "pcie-sh7786.h" enum { PCI_ACCESS_READ, PCI_ACCESS_WRITE, }; static int sh7786_pcie_config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { struct pci_channel *chan = bus->sysdata; int dev, func, type, reg; dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); type = !!bus->parent; reg = where & ~3; if (bus->number > 255 || dev > 31 || func > 7) return PCIBIOS_FUNC_NOT_SUPPORTED; /* * While each channel has its own memory-mapped extended config * space, it's generally only accessible when in endpoint mode. * When in root complex mode, the controller is unable to target * itself with either type 0 or type 1 accesses, and indeed, any * controller initiated target transfer to its own config space * result in a completer abort. * * Each channel effectively only supports a single device, but as * the same channel <-> device access works for any PCI_SLOT() * value, we cheat a bit here and bind the controller's config * space to devfn 0 in order to enable self-enumeration. In this * case the regular PAR/PDR path is sidelined and the mangled * config access itself is initiated as a SuperHyway transaction. */ if (pci_is_root_bus(bus)) { if (dev == 0) { if (access_type == PCI_ACCESS_READ) *data = pci_read_reg(chan, PCI_REG(reg)); else pci_write_reg(chan, *data, PCI_REG(reg)); return PCIBIOS_SUCCESSFUL; } else if (dev > 1) return PCIBIOS_DEVICE_NOT_FOUND; } /* Clear errors */ pci_write_reg(chan, pci_read_reg(chan, SH4A_PCIEERRFR), SH4A_PCIEERRFR); /* Set the PIO address */ pci_write_reg(chan, (bus->number << 24) | (dev << 19) | (func << 16) | reg, SH4A_PCIEPAR); /* Enable the configuration access */ pci_write_reg(chan, (1 << 31) | (type << 8), SH4A_PCIEPCTLR); /* Check for errors */ if (pci_read_reg(chan, SH4A_PCIEERRFR) & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; /* Check for master and target aborts */ if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28))) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == PCI_ACCESS_READ) *data = pci_read_reg(chan, SH4A_PCIEPDR); else pci_write_reg(chan, *data, SH4A_PCIEPDR); /* Disable the configuration access */ pci_write_reg(chan, 0, SH4A_PCIEPCTLR); return PCIBIOS_SUCCESSFUL; } static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { unsigned long flags; int ret; u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; raw_spin_lock_irqsave(&pci_config_lock, flags); ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) { *val = 0xffffffff; goto out; } if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 2) << 3)) & 0xffff; else *val = data; dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x " "where=0x%04x size=%d val=0x%08lx\n", bus->number, devfn, where, size, (unsigned long)*val); out: raw_spin_unlock_irqrestore(&pci_config_lock, flags); return ret; } static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { unsigned long flags; int shift, ret; u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; raw_spin_lock_irqsave(&pci_config_lock, flags); ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) goto out; dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x " "where=0x%04x size=%d val=%08lx\n", bus->number, devfn, where, size, (unsigned long)val); if (size == 1) { shift = (where & 3) << 3; data &= ~(0xff << shift); data |= ((val & 0xff) << shift); } else if (size == 2) { shift = (where & 2) << 3; data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); } else data = val; ret = sh7786_pcie_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data); out: raw_spin_unlock_irqrestore(&pci_config_lock, flags); return ret; } struct pci_ops sh7786_pci_ops = { .read = sh7786_pcie_read, .write = sh7786_pcie_write, };
gpl-2.0
Californication/lge-kernel-msm7x27-ICS-JB
fs/ntfs/quota.c
14390
3724
/* * quota.c - NTFS kernel quota ($Quota) handling. Part of the Linux-NTFS * project. * * Copyright (c) 2004 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef NTFS_RW #include "index.h" #include "quota.h" #include "debug.h" #include "ntfs.h" /** * ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume * @vol: ntfs volume on which to mark the quotas out of date * * Mark the quotas out of date on the ntfs volume @vol and return 'true' on * success and 'false' on error. */ bool ntfs_mark_quotas_out_of_date(ntfs_volume *vol) { ntfs_index_context *ictx; QUOTA_CONTROL_ENTRY *qce; const le32 qid = QUOTA_DEFAULTS_ID; int err; ntfs_debug("Entering."); if (NVolQuotaOutOfDate(vol)) goto done; if (!vol->quota_ino || !vol->quota_q_ino) { ntfs_error(vol->sb, "Quota inodes are not open."); return false; } mutex_lock(&vol->quota_q_ino->i_mutex); ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino)); if (!ictx) { ntfs_error(vol->sb, "Failed to get index context."); goto err_out; } err = ntfs_index_lookup(&qid, sizeof(qid), ictx); if (err) { if (err == -ENOENT) ntfs_error(vol->sb, "Quota defaults entry is not " "present."); else ntfs_error(vol->sb, "Lookup of quota defaults entry " "failed."); goto err_out; } if (ictx->data_len < offsetof(QUOTA_CONTROL_ENTRY, sid)) { ntfs_error(vol->sb, "Quota defaults entry size is invalid. " "Run chkdsk."); goto err_out; } qce = (QUOTA_CONTROL_ENTRY*)ictx->data; if (le32_to_cpu(qce->version) != QUOTA_VERSION) { ntfs_error(vol->sb, "Quota defaults entry version 0x%x is not " "supported.", le32_to_cpu(qce->version)); goto err_out; } ntfs_debug("Quota defaults flags = 0x%x.", le32_to_cpu(qce->flags)); /* If quotas are already marked out of date, no need to do anything. */ if (qce->flags & QUOTA_FLAG_OUT_OF_DATE) goto set_done; /* * If quota tracking is neither requested, nor enabled and there are no * pending deletes, no need to mark the quotas out of date. */ if (!(qce->flags & (QUOTA_FLAG_TRACKING_ENABLED | QUOTA_FLAG_TRACKING_REQUESTED | QUOTA_FLAG_PENDING_DELETES))) goto set_done; /* * Set the QUOTA_FLAG_OUT_OF_DATE bit thus marking quotas out of date. * This is verified on WinXP to be sufficient to cause windows to * rescan the volume on boot and update all quota entries. */ qce->flags |= QUOTA_FLAG_OUT_OF_DATE; /* Ensure the modified flags are written to disk. */ ntfs_index_entry_flush_dcache_page(ictx); ntfs_index_entry_mark_dirty(ictx); set_done: ntfs_index_ctx_put(ictx); mutex_unlock(&vol->quota_q_ino->i_mutex); /* * We set the flag so we do not try to mark the quotas out of date * again on remount. */ NVolSetQuotaOutOfDate(vol); done: ntfs_debug("Done."); return true; err_out: if (ictx) ntfs_index_ctx_put(ictx); mutex_unlock(&vol->quota_q_ino->i_mutex); return false; } #endif /* NTFS_RW */
gpl-2.0
rminnich/linux
sound/drivers/mpu401/mpu401.c
55
7615
/* * Driver for generic MPU-401 boards (UART mode only) * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2004 by Castet Matthieu <castet.matthieu@free.fr> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pnp.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/module.h> #include <sound/core.h> #include <sound/mpu401.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("MPU-401 UART"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* exclude the first card */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ #ifdef CONFIG_PNP static bool pnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* MPU-401 port number */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* MPU-401 IRQ */ static bool uart_enter[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for MPU-401 device."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for MPU-401 device."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable MPU-401 device."); #ifdef CONFIG_PNP module_param_array(pnp, bool, NULL, 0444); MODULE_PARM_DESC(pnp, "PnP detection for MPU-401 device."); #endif module_param_hw_array(port, long, ioport, NULL, 0444); MODULE_PARM_DESC(port, "Port # for MPU-401 device."); module_param_hw_array(irq, int, irq, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for MPU-401 device."); module_param_array(uart_enter, bool, NULL, 0444); MODULE_PARM_DESC(uart_enter, "Issue UART_ENTER command at open."); static struct platform_device *platform_devices[SNDRV_CARDS]; static int pnp_registered; static unsigned int snd_mpu401_devices; static int snd_mpu401_create(struct device *devptr, int dev, struct snd_card **rcard) { struct snd_card *card; int err; if (!uart_enter[dev]) snd_printk(KERN_ERR "the uart_enter option is obsolete; remove it\n"); *rcard = NULL; err = snd_card_new(devptr, index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; strcpy(card->driver, "MPU-401 UART"); strcpy(card->shortname, card->driver); sprintf(card->longname, "%s at %#lx, ", card->shortname, port[dev]); if (irq[dev] >= 0) { sprintf(card->longname + strlen(card->longname), "irq %d", irq[dev]); } else { strcat(card->longname, "polled"); } err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, port[dev], 0, irq[dev], NULL); if (err < 0) { printk(KERN_ERR "MPU401 not detected at 0x%lx\n", port[dev]); goto _err; } *rcard = card; return 0; _err: snd_card_free(card); return err; } static int snd_mpu401_probe(struct platform_device *devptr) { int dev = devptr->id; int err; struct snd_card *card; if (port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify port\n"); return -EINVAL; } if (irq[dev] == SNDRV_AUTO_IRQ) { snd_printk(KERN_ERR "specify or disable IRQ\n"); return -EINVAL; } err = snd_mpu401_create(&devptr->dev, dev, &card); if (err < 0) return err; if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } platform_set_drvdata(devptr, card); return 0; } static int snd_mpu401_remove(struct platform_device *devptr) { snd_card_free(platform_get_drvdata(devptr)); return 0; } #define SND_MPU401_DRIVER "snd_mpu401" static struct platform_driver snd_mpu401_driver = { .probe = snd_mpu401_probe, .remove = snd_mpu401_remove, .driver = { .name = SND_MPU401_DRIVER, }, }; #ifdef CONFIG_PNP #define IO_EXTENT 2 static struct pnp_device_id snd_mpu401_pnpids[] = { { .id = "PNPb006" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids); static int snd_mpu401_pnp(int dev, struct pnp_dev *device, const struct pnp_device_id *id) { if (!pnp_port_valid(device, 0) || pnp_port_flags(device, 0) & IORESOURCE_DISABLED) { snd_printk(KERN_ERR "no PnP port\n"); return -ENODEV; } if (pnp_port_len(device, 0) < IO_EXTENT) { snd_printk(KERN_ERR "PnP port length is %llu, expected %d\n", (unsigned long long)pnp_port_len(device, 0), IO_EXTENT); return -ENODEV; } port[dev] = pnp_port_start(device, 0); if (!pnp_irq_valid(device, 0) || pnp_irq_flags(device, 0) & IORESOURCE_DISABLED) { snd_printk(KERN_WARNING "no PnP irq, using polling\n"); irq[dev] = -1; } else { irq[dev] = pnp_irq(device, 0); } return 0; } static int snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) { static int dev; struct snd_card *card; int err; for ( ; dev < SNDRV_CARDS; ++dev) { if (!enable[dev] || !pnp[dev]) continue; err = snd_mpu401_pnp(dev, pnp_dev, id); if (err < 0) return err; err = snd_mpu401_create(&pnp_dev->dev, dev, &card); if (err < 0) return err; if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pnp_set_drvdata(pnp_dev, card); snd_mpu401_devices++; ++dev; return 0; } return -ENODEV; } static void snd_mpu401_pnp_remove(struct pnp_dev *dev) { struct snd_card *card = (struct snd_card *) pnp_get_drvdata(dev); snd_card_disconnect(card); snd_card_free_when_closed(card); } static struct pnp_driver snd_mpu401_pnp_driver = { .name = "mpu401", .id_table = snd_mpu401_pnpids, .probe = snd_mpu401_pnp_probe, .remove = snd_mpu401_pnp_remove, }; #else static struct pnp_driver snd_mpu401_pnp_driver; #endif static void snd_mpu401_unregister_all(void) { int i; if (pnp_registered) pnp_unregister_driver(&snd_mpu401_pnp_driver); for (i = 0; i < ARRAY_SIZE(platform_devices); ++i) platform_device_unregister(platform_devices[i]); platform_driver_unregister(&snd_mpu401_driver); } static int __init alsa_card_mpu401_init(void) { int i, err; if ((err = platform_driver_register(&snd_mpu401_driver)) < 0) return err; for (i = 0; i < SNDRV_CARDS; i++) { struct platform_device *device; if (! enable[i]) continue; #ifdef CONFIG_PNP if (pnp[i]) continue; #endif device = platform_device_register_simple(SND_MPU401_DRIVER, i, NULL, 0); if (IS_ERR(device)) continue; if (!platform_get_drvdata(device)) { platform_device_unregister(device); continue; } platform_devices[i] = device; snd_mpu401_devices++; } err = pnp_register_driver(&snd_mpu401_pnp_driver); if (!err) pnp_registered = 1; if (!snd_mpu401_devices) { #ifdef MODULE printk(KERN_ERR "MPU-401 device not found or device busy\n"); #endif snd_mpu401_unregister_all(); return -ENODEV; } return 0; } static void __exit alsa_card_mpu401_exit(void) { snd_mpu401_unregister_all(); } module_init(alsa_card_mpu401_init) module_exit(alsa_card_mpu401_exit)
gpl-2.0
codeaurora-unoffical/linux-msm
drivers/mtd/spi-nor/winbond.c
55
5484
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2005, Intec Automation Inc. * Copyright (C) 2014, Freescale Semiconductor, Inc. */ #include <linux/mtd/spi-nor.h> #include "core.h" static int w25q256_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt, struct spi_nor_flash_parameter *params) { /* * W25Q256JV supports 4B opcodes but W25Q256FV does not. * Unfortunately, Winbond has re-used the same JEDEC ID for both * variants which prevents us from defining a new entry in the parts * table. * To differentiate between W25Q256JV and W25Q256FV check SFDP header * version: only JV has JESD216A compliant structure (version 5). */ if (bfpt_header->major == SFDP_JESD216_MAJOR && bfpt_header->minor == SFDP_JESD216A_MINOR) nor->flags |= SNOR_F_4B_OPCODES; return 0; } static struct spi_nor_fixups w25q256_fixups = { .post_bfpt = w25q256_post_bfpt_fixups, }; static const struct flash_info winbond_parts[] = { /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) }, { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) .fixups = &w25q256_fixups }, { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, }; /** * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes. * @nor: pointer to 'struct spi_nor'. * @enable: true to enter the 4-byte address mode, false to exit the 4-byte * address mode. * * Return: 0 on success, -errno otherwise. */ static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable) { int ret; ret = spi_nor_set_4byte_addr_mode(nor, enable); if (ret || enable) return ret; /* * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address * Register to be set to 1, so all 3-byte-address reads come from the * second 16M. We must clear the register to enable normal behavior. */ ret = spi_nor_write_enable(nor); if (ret) return ret; ret = spi_nor_write_ear(nor, 0); if (ret) return ret; return spi_nor_write_disable(nor); } static void winbond_default_init(struct spi_nor *nor) { nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode; } static const struct spi_nor_fixups winbond_fixups = { .default_init = winbond_default_init, }; const struct spi_nor_manufacturer spi_nor_winbond = { .name = "winbond", .parts = winbond_parts, .nparts = ARRAY_SIZE(winbond_parts), .fixups = &winbond_fixups, };
gpl-2.0
srfarias/srfarias_kernel_msm8916
net/netfilter/nf_conntrack_proto_generic.c
1335
5826
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack_l4proto.h> static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; static bool nf_generic_should_process(u8 proto) { switch (proto) { #ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE case IPPROTO_SCTP: return false; #endif #ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE case IPPROTO_DCCP: return false; #endif #ifdef CONFIG_NF_CT_PROTO_GRE_MODULE case IPPROTO_GRE: return false; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE case IPPROTO_UDPLITE: return false; #endif default: return true; } } static inline struct nf_generic_net *generic_pernet(struct net *net) { return &net->ct.nf_ct_proto.generic; } static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { tuple->src.u.all = 0; tuple->dst.u.all = 0; return true; } static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { tuple->src.u.all = 0; tuple->dst.u.all = 0; return true; } /* Print out the per-protocol part of the tuple. */ static int generic_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { return 0; } static unsigned int *generic_get_timeouts(struct net *net) { return &(generic_pernet(net)->timeout); } /* Returns verdict for packet, or -1 for invalid. */ static int generic_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeout) { nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { return nf_generic_should_process(nf_ct_protonum(ct)); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeout = data; struct nf_generic_net *gn = generic_pernet(net); if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; else { /* Set default generic timeout. */ *timeout = gn->timeout; } return 0; } static int generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table generic_sysctl_table[] = { { .procname = "nf_conntrack_generic_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT static struct ctl_table generic_compat_sysctl_table[] = { { .procname = "ip_conntrack_generic_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ #endif /* CONFIG_SYSCTL */ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn, struct nf_generic_net *gn) { #ifdef CONFIG_SYSCTL pn->ctl_table = kmemdup(generic_sysctl_table, sizeof(generic_sysctl_table), GFP_KERNEL); if (!pn->ctl_table) return -ENOMEM; pn->ctl_table[0].data = &gn->timeout; #endif return 0; } static int generic_kmemdup_compat_sysctl_table(struct nf_proto_net *pn, struct nf_generic_net *gn) { #ifdef CONFIG_SYSCTL #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT pn->ctl_compat_table = kmemdup(generic_compat_sysctl_table, sizeof(generic_compat_sysctl_table), GFP_KERNEL); if (!pn->ctl_compat_table) return -ENOMEM; pn->ctl_compat_table[0].data = &gn->timeout; #endif #endif return 0; } static int generic_init_net(struct net *net, u_int16_t proto) { int ret; struct nf_generic_net *gn = generic_pernet(net); struct nf_proto_net *pn = &gn->pn; gn->timeout = nf_ct_generic_timeout; ret = generic_kmemdup_compat_sysctl_table(pn, gn); if (ret < 0) return ret; ret = generic_kmemdup_sysctl_table(pn, gn); if (ret < 0) nf_ct_kfree_compat_sysctl_table(pn); return ret; } static struct nf_proto_net *generic_get_net_proto(struct net *net) { return &net->ct.nf_ct_proto.generic.pn; } struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = { .l3proto = PF_UNSPEC, .l4proto = 255, .name = "unknown", .pkt_to_tuple = generic_pkt_to_tuple, .invert_tuple = generic_invert_tuple, .print_tuple = generic_print_tuple, .packet = generic_packet, .get_timeouts = generic_get_timeouts, .new = generic_new, #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = generic_timeout_nlattr_to_obj, .obj_to_nlattr = generic_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_GENERIC_MAX, .obj_size = sizeof(unsigned int), .nla_policy = generic_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = generic_init_net, .get_net_proto = generic_get_net_proto, };
gpl-2.0
phenyl-sphinx/linux
drivers/net/wireless/zd1201.c
1591
46790
/* * Driver for ZyDAS zd1201 based wireless USB devices. * * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * Parts of this driver have been derived from a wlan-ng version * modified by ZyDAS. They also made documentation available, thanks! * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. */ #include <linux/module.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include <linux/string.h> #include <linux/if_arp.h> #include <linux/firmware.h> #include "zd1201.h" static struct usb_device_id zd1201_table[] = { {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */ {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */ {} }; static int ap; /* Are we an AP or a normal station? */ #define ZD1201_VERSION "0.15" MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>"); MODULE_DESCRIPTION("Driver for ZyDAS ZD1201 based USB Wireless adapters"); MODULE_VERSION(ZD1201_VERSION); MODULE_LICENSE("GPL"); module_param(ap, int, 0); MODULE_PARM_DESC(ap, "If non-zero Access Point firmware will be loaded"); MODULE_DEVICE_TABLE(usb, zd1201_table); static int zd1201_fw_upload(struct usb_device *dev, int apfw) { const struct firmware *fw_entry; const char *data; unsigned long len; int err; unsigned char ret; char *buf; char *fwfile; if (apfw) fwfile = "zd1201-ap.fw"; else fwfile = "zd1201.fw"; err = request_firmware(&fw_entry, fwfile, &dev->dev); if (err) { dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile); dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n"); dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n"); return err; } data = fw_entry->data; len = fw_entry->size; buf = kmalloc(1024, GFP_ATOMIC); if (!buf) { err = -ENOMEM; goto exit; } while (len > 0) { int translen = (len > 1024) ? 1024 : len; memcpy(buf, data, translen); err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | 0x40, 0, 0, buf, translen, ZD1201_FW_TIMEOUT); if (err < 0) goto exit; len -= translen; data += translen; } err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x2, USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT); if (err < 0) goto exit; err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); if (err < 0) goto exit; memcpy(&ret, buf, sizeof(ret)); if (ret & 0x80) { err = -EIO; goto exit; } err = 0; exit: kfree(buf); release_firmware(fw_entry); return err; } MODULE_FIRMWARE("zd1201-ap.fw"); MODULE_FIRMWARE("zd1201.fw"); static void zd1201_usbfree(struct urb *urb) { struct zd1201 *zd = urb->context; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: urb failed: %d\n", zd->dev->name, urb->status); } kfree(urb->transfer_buffer); usb_free_urb(urb); } /* cmdreq message: u32 type u16 cmd u16 parm0 u16 parm1 u16 parm2 u8 pad[4] total: 4 + 2 + 2 + 2 + 2 + 4 = 16 */ static int zd1201_docmd(struct zd1201 *zd, int cmd, int parm0, int parm1, int parm2) { unsigned char *command; int ret; struct urb *urb; command = kmalloc(16, GFP_ATOMIC); if (!command) return -ENOMEM; *((__le32*)command) = cpu_to_le32(ZD1201_USB_CMDREQ); *((__le16*)&command[4]) = cpu_to_le16(cmd); *((__le16*)&command[6]) = cpu_to_le16(parm0); *((__le16*)&command[8]) = cpu_to_le16(parm1); *((__le16*)&command[10])= cpu_to_le16(parm2); urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(command); return -ENOMEM; } usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), command, 16, zd1201_usbfree, zd); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { kfree(command); usb_free_urb(urb); } return ret; } /* Callback after sending out a packet */ static void zd1201_usbtx(struct urb *urb) { struct zd1201 *zd = urb->context; netif_wake_queue(zd->dev); } /* Incoming data */ static void zd1201_usbrx(struct urb *urb) { struct zd1201 *zd = urb->context; int free = 0; unsigned char *data = urb->transfer_buffer; struct sk_buff *skb; unsigned char type; if (!zd) return; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n", zd->dev->name, urb->status); free = 1; goto exit; } if (urb->status != 0 || urb->actual_length == 0) goto resubmit; type = data[0]; if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) { memcpy(zd->rxdata, data, urb->actual_length); zd->rxlen = urb->actual_length; zd->rxdatas = 1; wake_up(&zd->rxdataq); } /* Info frame */ if (type == ZD1201_PACKET_INQUIRE) { int i = 0; unsigned short infotype, framelen, copylen; framelen = le16_to_cpu(*(__le16*)&data[4]); infotype = le16_to_cpu(*(__le16*)&data[6]); if (infotype == ZD1201_INF_LINKSTATUS) { short linkstatus; linkstatus = le16_to_cpu(*(__le16*)&data[8]); switch(linkstatus) { case 1: netif_carrier_on(zd->dev); break; case 2: netif_carrier_off(zd->dev); break; case 3: netif_carrier_off(zd->dev); break; case 4: netif_carrier_on(zd->dev); break; default: netif_carrier_off(zd->dev); } goto resubmit; } if (infotype == ZD1201_INF_ASSOCSTATUS) { short status = le16_to_cpu(*(__le16*)(data+8)); int event; union iwreq_data wrqu; switch (status) { case ZD1201_ASSOCSTATUS_STAASSOC: case ZD1201_ASSOCSTATUS_REASSOC: event = IWEVREGISTERED; break; case ZD1201_ASSOCSTATUS_DISASSOC: case ZD1201_ASSOCSTATUS_ASSOCFAIL: case ZD1201_ASSOCSTATUS_AUTHFAIL: default: event = IWEVEXPIRED; } memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(zd->dev, event, &wrqu, NULL); goto resubmit; } if (infotype == ZD1201_INF_AUTHREQ) { union iwreq_data wrqu; memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* There isn't a event that trully fits this request. We assume that userspace will be smart enough to see a new station being expired and sends back a authstation ioctl to authorize it. */ wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL); goto resubmit; } /* Other infotypes are handled outside this handler */ zd->rxlen = 0; while (i < urb->actual_length) { copylen = le16_to_cpu(*(__le16*)&data[i+2]); /* Sanity check, sometimes we get junk */ if (copylen+zd->rxlen > sizeof(zd->rxdata)) break; memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen); zd->rxlen += copylen; i += 64; } if (i >= urb->actual_length) { zd->rxdatas = 1; wake_up(&zd->rxdataq); } goto resubmit; } /* Actual data */ if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { int datalen = urb->actual_length-1; unsigned short len, fc, seq; len = ntohs(*(__be16 *)&data[datalen-2]); if (len>datalen) len=datalen; fc = le16_to_cpu(*(__le16 *)&data[datalen-16]); seq = le16_to_cpu(*(__le16 *)&data[datalen-24]); if (zd->monitor) { if (datalen < 24) goto resubmit; if (!(skb = dev_alloc_skb(datalen+24))) goto resubmit; memcpy(skb_put(skb, 2), &data[datalen-16], 2); memcpy(skb_put(skb, 2), &data[datalen-2], 2); memcpy(skb_put(skb, 6), &data[datalen-14], 6); memcpy(skb_put(skb, 6), &data[datalen-22], 6); memcpy(skb_put(skb, 6), &data[datalen-8], 6); memcpy(skb_put(skb, 2), &data[datalen-24], 2); memcpy(skb_put(skb, len), data, len); skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); goto resubmit; } if ((seq & IEEE80211_SCTL_FRAG) || (fc & IEEE80211_FCTL_MOREFRAGS)) { struct zd1201_frag *frag = NULL; char *ptr; if (datalen<14) goto resubmit; if ((seq & IEEE80211_SCTL_FRAG) == 0) { frag = kmalloc(sizeof(*frag), GFP_ATOMIC); if (!frag) goto resubmit; skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2); if (!skb) { kfree(frag); goto resubmit; } frag->skb = skb; frag->seq = seq & IEEE80211_SCTL_SEQ; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); hlist_add_head(&frag->fnode, &zd->fraglist); goto resubmit; } hlist_for_each_entry(frag, &zd->fraglist, fnode) if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) break; if (!frag) goto resubmit; skb = frag->skb; ptr = skb_put(skb, len); if (ptr) memcpy(ptr, data+8, len); if (fc & IEEE80211_FCTL_MOREFRAGS) goto resubmit; hlist_del_init(&frag->fnode); kfree(frag); } else { if (datalen<14) goto resubmit; skb = dev_alloc_skb(len + 14 + 2); if (!skb) goto resubmit; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); } skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); } resubmit: memset(data, 0, ZD1201_RXSIZE); urb->status = 0; urb->dev = zd->usb; if(usb_submit_urb(urb, GFP_ATOMIC)) free = 1; exit: if (free) { zd->rxlen = 0; zd->rxdatas = 1; wake_up(&zd->rxdataq); kfree(urb->transfer_buffer); } } static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata, unsigned int riddatalen) { int err; int i = 0; int code; int rid_fid; int length; unsigned char *pdata; zd->rxdatas = 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_ACCESS, rid, 0, 0); if (err) return err; wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; code = le16_to_cpu(*(__le16*)(&zd->rxdata[4])); rid_fid = le16_to_cpu(*(__le16*)(&zd->rxdata[6])); length = le16_to_cpu(*(__le16*)(&zd->rxdata[8])); if (length > zd->rxlen) length = zd->rxlen-6; /* If access bit is not on, then error */ if ((code & ZD1201_ACCESSBIT) != ZD1201_ACCESSBIT || rid_fid != rid ) return -EINVAL; /* Not enough buffer for allocating data */ if (riddatalen != (length - 4)) { dev_dbg(&zd->usb->dev, "riddatalen mismatches, expected=%u, (packet=%u) length=%u, rid=0x%04X, rid_fid=0x%04X\n", riddatalen, zd->rxlen, length, rid, rid_fid); return -ENODATA; } zd->rxdatas = 0; /* Issue SetRxRid commnd */ err = zd1201_docmd(zd, ZD1201_CMDCODE_SETRXRID, rid, 0, length); if (err) return err; /* Receive RID record from resource packets */ wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; if (zd->rxdata[zd->rxlen - 1] != ZD1201_PACKET_RESOURCE) { dev_dbg(&zd->usb->dev, "Packet type mismatch: 0x%x not 0x3\n", zd->rxdata[zd->rxlen-1]); return -EINVAL; } /* Set the data pointer and received data length */ pdata = zd->rxdata; length = zd->rxlen; do { int actual_length; actual_length = (length > 64) ? 64 : length; if (pdata[0] != 0x3) { dev_dbg(&zd->usb->dev, "Rx Resource packet type error: %02X\n", pdata[0]); return -EINVAL; } if (actual_length != 64) { /* Trim the last packet type byte */ actual_length--; } /* Skip the 4 bytes header (RID length and RID) */ if (i == 0) { pdata += 8; actual_length -= 8; } else { pdata += 4; actual_length -= 4; } memcpy(riddata, pdata, actual_length); riddata += actual_length; pdata += actual_length; length -= 64; i++; } while (length > 0); return 0; } /* * resreq: * byte type * byte sequence * u16 reserved * byte data[12] * total: 16 */ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int wait) { int err; unsigned char *request; int reqlen; char seq=0; struct urb *urb; gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; len += 4; /* first 4 are for header */ zd->rxdatas = 0; zd->rxlen = 0; for (seq=0; len > 0; seq++) { request = kmalloc(16, gfp_mask); if (!request) return -ENOMEM; urb = usb_alloc_urb(0, gfp_mask); if (!urb) { kfree(request); return -ENOMEM; } memset(request, 0, 16); reqlen = len>12 ? 12 : len; request[0] = ZD1201_USB_RESREQ; request[1] = seq; request[2] = 0; request[3] = 0; if (request[1] == 0) { /* add header */ *(__le16*)&request[4] = cpu_to_le16((len-2+1)/2); *(__le16*)&request[6] = cpu_to_le16(rid); memcpy(request+8, buf, reqlen-4); buf += reqlen-4; } else { memcpy(request+4, buf, reqlen); buf += reqlen; } len -= reqlen; usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), request, 16, zd1201_usbfree, zd); err = usb_submit_urb(urb, gfp_mask); if (err) goto err; } request = kmalloc(16, gfp_mask); if (!request) return -ENOMEM; urb = usb_alloc_urb(0, gfp_mask); if (!urb) { kfree(request); return -ENOMEM; } *((__le32*)request) = cpu_to_le32(ZD1201_USB_CMDREQ); *((__le16*)&request[4]) = cpu_to_le16(ZD1201_CMDCODE_ACCESS|ZD1201_ACCESSBIT); *((__le16*)&request[6]) = cpu_to_le16(rid); *((__le16*)&request[8]) = cpu_to_le16(0); *((__le16*)&request[10]) = cpu_to_le16(0); usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), request, 16, zd1201_usbfree, zd); err = usb_submit_urb(urb, gfp_mask); if (err) goto err; if (wait) { wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen || le16_to_cpu(*(__le16*)&zd->rxdata[6]) != rid) { dev_dbg(&zd->usb->dev, "wrong or no RID received\n"); } } return 0; err: kfree(request); usb_free_urb(urb); return err; } static inline int zd1201_getconfig16(struct zd1201 *zd, int rid, short *val) { int err; __le16 zdval; err = zd1201_getconfig(zd, rid, &zdval, sizeof(__le16)); if (err) return err; *val = le16_to_cpu(zdval); return 0; } static inline int zd1201_setconfig16(struct zd1201 *zd, int rid, short val) { __le16 zdval = cpu_to_le16(val); return (zd1201_setconfig(zd, rid, &zdval, sizeof(__le16), 1)); } static int zd1201_drvr_start(struct zd1201 *zd) { int err, i; short max; __le16 zdmax; unsigned char *buffer; buffer = kzalloc(ZD1201_RXSIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; usb_fill_bulk_urb(zd->rx_urb, zd->usb, usb_rcvbulkpipe(zd->usb, zd->endp_in), buffer, ZD1201_RXSIZE, zd1201_usbrx, zd); err = usb_submit_urb(zd->rx_urb, GFP_KERNEL); if (err) goto err_buffer; err = zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); if (err) goto err_urb; err = zd1201_getconfig(zd, ZD1201_RID_CNFMAXTXBUFFERNUMBER, &zdmax, sizeof(__le16)); if (err) goto err_urb; max = le16_to_cpu(zdmax); for (i=0; i<max; i++) { err = zd1201_docmd(zd, ZD1201_CMDCODE_ALLOC, 1514, 0, 0); if (err) goto err_urb; } return 0; err_urb: usb_kill_urb(zd->rx_urb); return err; err_buffer: kfree(buffer); return err; } /* Magic alert: The firmware doesn't seem to like the MAC state being * toggled in promisc (aka monitor) mode. * (It works a number of times, but will halt eventually) * So we turn it of before disabling and on after enabling if needed. */ static int zd1201_enable(struct zd1201 *zd) { int err; if (zd->mac_enabled) return 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_ENABLE, 0, 0, 0); if (!err) zd->mac_enabled = 1; if (zd->monitor) err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 1); return err; } static int zd1201_disable(struct zd1201 *zd) { int err; if (!zd->mac_enabled) return 0; if (zd->monitor) { err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0); if (err) return err; } err = zd1201_docmd(zd, ZD1201_CMDCODE_DISABLE, 0, 0, 0); if (!err) zd->mac_enabled = 0; return err; } static int zd1201_mac_reset(struct zd1201 *zd) { if (!zd->mac_enabled) return 0; zd1201_disable(zd); return zd1201_enable(zd); } static int zd1201_join(struct zd1201 *zd, char *essid, int essidlen) { int err, val; char buf[IW_ESSID_MAX_SIZE+2]; err = zd1201_disable(zd); if (err) return err; val = ZD1201_CNFAUTHENTICATION_OPENSYSTEM; val |= ZD1201_CNFAUTHENTICATION_SHAREDKEY; err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, val); if (err) return err; *(__le16 *)buf = cpu_to_le16(essidlen); memcpy(buf+2, essid, essidlen); if (!zd->ap) { /* Normal station */ err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } else { /* AP */ err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR, zd->dev->dev_addr, zd->dev->addr_len, 1); if (err) return err; err = zd1201_enable(zd); if (err) return err; msleep(100); return 0; } static int zd1201_net_open(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); /* Start MAC with wildcard if no essid set */ if (!zd->mac_enabled) zd1201_join(zd, zd->essid, zd->essidlen); netif_start_queue(dev); return 0; } static int zd1201_net_stop(struct net_device *dev) { netif_stop_queue(dev); return 0; } /* RFC 1042 encapsulates Ethernet frames in 802.11 frames by prefixing them with 0xaa, 0xaa, 0x03) followed by a SNAP OID of 0 (0x00, 0x00, 0x00). Zd requires an additional padding, copy of ethernet addresses, length of the standard RFC 1042 packet and a command byte (which is nul for tx). tx frame (from Wlan NG): RFC 1042: llc 0xAA 0xAA 0x03 (802.2 LLC) snap 0x00 0x00 0x00 (Ethernet encapsulated) type 2 bytes, Ethernet type field payload (minus eth header) Zydas specific: padding 1B if (skb->len+8+1)%64==0 Eth MAC addr 12 bytes, Ethernet MAC addresses length 2 bytes, RFC 1042 packet length (llc+snap+type+payload) zd 1 null byte, zd1201 packet type */ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); unsigned char *txbuf = zd->txdata; int txbuflen, pad = 0, err; struct urb *urb = zd->tx_urb; if (!zd->mac_enabled || zd->monitor) { dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } netif_stop_queue(dev); txbuflen = skb->len + 8 + 1; if (txbuflen%64 == 0) { pad = 1; txbuflen++; } txbuf[0] = 0xAA; txbuf[1] = 0xAA; txbuf[2] = 0x03; txbuf[3] = 0x00; /* rfc1042 */ txbuf[4] = 0x00; txbuf[5] = 0x00; skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12); if (pad) txbuf[skb->len-12+6]=0; skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12); *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); txbuf[txbuflen-1] = 0; usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out), txbuf, txbuflen, zd1201_usbtx, zd); err = usb_submit_urb(zd->tx_urb, GFP_ATOMIC); if (err) { dev->stats.tx_errors++; netif_start_queue(dev); } else { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } kfree_skb(skb); return NETDEV_TX_OK; } static void zd1201_tx_timeout(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); if (!zd) return; dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n", dev->name); usb_unlink_urb(zd->tx_urb); dev->stats.tx_errors++; /* Restart the timeout to quiet the watchdog: */ dev->trans_start = jiffies; /* prevent tx timeout */ } static int zd1201_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct zd1201 *zd = netdev_priv(dev); int err; if (!zd) return -ENODEV; err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR, addr->sa_data, dev->addr_len, 1); if (err) return err; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return zd1201_mac_reset(zd); } static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); return &zd->iwstats; } static void zd1201_set_multicast(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); struct netdev_hw_addr *ha; unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; int i; if (netdev_mc_count(dev) > ZD1201_MAXMULTI) return; i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, netdev_mc_count(dev) * ETH_ALEN, 0); } static int zd1201_config_commit(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); return zd1201_mac_reset(zd); } static int zd1201_get_name(struct net_device *dev, struct iw_request_info *info, char *name, char *extra) { strcpy(name, "IEEE 802.11b"); return 0; } static int zd1201_set_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short channel = 0; int err; if (freq->e == 0) channel = freq->m; else channel = ieee80211_frequency_to_channel(freq->m); err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel); if (err) return err; zd1201_mac_reset(zd); return 0; } static int zd1201_get_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short channel; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, &channel); if (err) return err; freq->e = 0; freq->m = channel; return 0; } static int zd1201_set_mode(struct net_device *dev, struct iw_request_info *info, __u32 *mode, char *extra) { struct zd1201 *zd = netdev_priv(dev); short porttype, monitor = 0; unsigned char buffer[IW_ESSID_MAX_SIZE+2]; int err; if (zd->ap) { if (*mode != IW_MODE_MASTER) return -EINVAL; return 0; } err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0); if (err) return err; zd->dev->type = ARPHRD_ETHER; switch(*mode) { case IW_MODE_MONITOR: monitor = 1; zd->dev->type = ARPHRD_IEEE80211; /* Make sure we are no longer associated with by setting an 'impossible' essid. (otherwise we mess up firmware) */ zd1201_join(zd, "\0-*#\0", 5); /* Put port in pIBSS */ case 8: /* No pseudo-IBSS in wireless extensions (yet) */ porttype = ZD1201_PORTTYPE_PSEUDOIBSS; break; case IW_MODE_ADHOC: porttype = ZD1201_PORTTYPE_IBSS; break; case IW_MODE_INFRA: porttype = ZD1201_PORTTYPE_BSS; break; default: return -EINVAL; } err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); if (err) return err; if (zd->monitor && !monitor) { zd1201_disable(zd); *(__le16 *)buffer = cpu_to_le16(zd->essidlen); memcpy(buffer+2, zd->essid, zd->essidlen); err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buffer, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } zd->monitor = monitor; /* If monitor mode is set we don't actually turn it on here since it * is done during mac reset anyway (see zd1201_mac_enable). */ zd1201_mac_reset(zd); return 0; } static int zd1201_get_mode(struct net_device *dev, struct iw_request_info *info, __u32 *mode, char *extra) { struct zd1201 *zd = netdev_priv(dev); short porttype; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPORTTYPE, &porttype); if (err) return err; switch(porttype) { case ZD1201_PORTTYPE_IBSS: *mode = IW_MODE_ADHOC; break; case ZD1201_PORTTYPE_BSS: *mode = IW_MODE_INFRA; break; case ZD1201_PORTTYPE_WDS: *mode = IW_MODE_REPEAT; break; case ZD1201_PORTTYPE_PSEUDOIBSS: *mode = 8;/* No Pseudo-IBSS... */ break; case ZD1201_PORTTYPE_AP: *mode = IW_MODE_MASTER; break; default: dev_dbg(&zd->usb->dev, "Unknown porttype: %d\n", porttype); *mode = IW_MODE_AUTO; } if (zd->monitor) *mode = IW_MODE_MONITOR; return 0; } static int zd1201_get_range(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct iw_range *range = (struct iw_range *)extra; wrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->we_version_compiled = WIRELESS_EXT; range->we_version_source = WIRELESS_EXT; range->max_qual.qual = 128; range->max_qual.level = 128; range->max_qual.noise = 128; range->max_qual.updated = 7; range->encoding_size[0] = 5; range->encoding_size[1] = 13; range->num_encoding_sizes = 2; range->max_encoding_tokens = ZD1201_NUMKEYS; range->num_bitrates = 4; range->bitrate[0] = 1000000; range->bitrate[1] = 2000000; range->bitrate[2] = 5500000; range->bitrate[3] = 11000000; range->min_rts = 0; range->min_frag = ZD1201_FRAGMIN; range->max_rts = ZD1201_RTSMAX; range->min_frag = ZD1201_FRAGMAX; return 0; } /* Little bit of magic here: we only get the quality if we poll * for it, and we never get an actual request to trigger such * a poll. Therefore we 'assume' that the user will soon ask for * the stats after asking the bssid. */ static int zd1201_get_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[6]; if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) { /* Unfortunately the quality and noise reported is useless. they seem to be accumulators that increase until you read them, unless we poll on a fixed interval we can't use them */ /*zd->iwstats.qual.qual = le16_to_cpu(((__le16 *)buffer)[0]);*/ zd->iwstats.qual.level = le16_to_cpu(((__le16 *)buffer)[1]); /*zd->iwstats.qual.noise = le16_to_cpu(((__le16 *)buffer)[2]);*/ zd->iwstats.qual.updated = 2; } return zd1201_getconfig(zd, ZD1201_RID_CURRENTBSSID, ap_addr->sa_data, 6); } static int zd1201_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *srq, char *extra) { /* We do everything in get_scan */ return 0; } static int zd1201_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *srq, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err, i, j, enabled_save; struct iw_event iwe; char *cev = extra; char *end_buf = extra + IW_SCAN_MAX_DATA; /* No scanning in AP mode */ if (zd->ap) return -EOPNOTSUPP; /* Scan doesn't seem to work if disabled */ enabled_save = zd->mac_enabled; zd1201_enable(zd); zd->rxdatas = 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_INQUIRE, ZD1201_INQ_SCANRESULTS, 0, 0); if (err) return err; wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; if (le16_to_cpu(*(__le16*)&zd->rxdata[2]) != ZD1201_INQ_SCANRESULTS) return -EIO; for(i=8; i<zd->rxlen; i+=62) { iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, zd->rxdata+i+6, 6); cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_ADDR_LEN); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = zd->rxdata[i+16]; iwe.u.data.flags = 1; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, zd->rxdata+i+18); iwe.cmd = SIOCGIWMODE; if (zd->rxdata[i+14]&0x01) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_UINT_LEN); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = zd->rxdata[i+0]; iwe.u.freq.e = 0; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_FREQ_LEN); iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = 0; iwe.u.bitrate.disabled = 0; for (j=0; j<10; j++) if (zd->rxdata[i+50+j]) { iwe.u.bitrate.value = (zd->rxdata[i+50+j]&0x7f)*500000; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_PARAM_LEN); } iwe.cmd = SIOCGIWENCODE; iwe.u.data.length = 0; if (zd->rxdata[i+14]&0x10) iwe.u.data.flags = IW_ENCODE_ENABLED; else iwe.u.data.flags = IW_ENCODE_DISABLED; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, NULL); iwe.cmd = IWEVQUAL; iwe.u.qual.qual = zd->rxdata[i+4]; iwe.u.qual.noise= zd->rxdata[i+2]/10-100; iwe.u.qual.level = (256+zd->rxdata[i+4]*100)/255-100; iwe.u.qual.updated = 7; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_QUAL_LEN); } if (!enabled_save) zd1201_disable(zd); srq->length = cev - extra; srq->flags = 0; return 0; } static int zd1201_set_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); if (data->length > IW_ESSID_MAX_SIZE) return -EINVAL; if (data->length < 1) data->length = 1; zd->essidlen = data->length; memset(zd->essid, 0, IW_ESSID_MAX_SIZE+1); memcpy(zd->essid, essid, data->length); return zd1201_join(zd, zd->essid, zd->essidlen); } static int zd1201_get_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); memcpy(essid, zd->essid, zd->essidlen); data->flags = 1; data->length = zd->essidlen; return 0; } static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *nick) { strcpy(nick, "zd1201"); data->flags = 1; data->length = strlen(nick); return 0; } static int zd1201_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rate; int err; switch (rrq->value) { case 1000000: rate = ZD1201_RATEB1; break; case 2000000: rate = ZD1201_RATEB2; break; case 5500000: rate = ZD1201_RATEB5; break; case 11000000: default: rate = ZD1201_RATEB11; break; } if (!rrq->fixed) { /* Also enable all lower bitrates */ rate |= rate-1; } err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, rate); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rate; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CURRENTTXRATE, &rate); if (err) return err; switch(rate) { case 1: rrq->value = 1000000; break; case 2: rrq->value = 2000000; break; case 5: rrq->value = 5500000; break; case 11: rrq->value = 11000000; break; default: rrq->value = 0; } rrq->fixed = 0; rrq->disabled = 0; return 0; } static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; short val = rts->value; if (rts->disabled || !rts->fixed) val = ZD1201_RTSMAX; if (val > ZD1201_RTSMAX) return -EINVAL; if (val < 0) return -EINVAL; err = zd1201_setconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, val); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rtst; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, &rtst); if (err) return err; rts->value = rtst; rts->disabled = (rts->value == ZD1201_RTSMAX); rts->fixed = 1; return 0; } static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; short val = frag->value; if (frag->disabled || !frag->fixed) val = ZD1201_FRAGMAX; if (val > ZD1201_FRAGMAX) return -EINVAL; if (val < ZD1201_FRAGMIN) return -EINVAL; if (val & 1) return -EINVAL; err = zd1201_setconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, val); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct zd1201 *zd = netdev_priv(dev); short fragt; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, &fragt); if (err) return err; frag->value = fragt; frag->disabled = (frag->value == ZD1201_FRAGMAX); frag->fixed = 1; return 0; } static int zd1201_set_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { return 0; } static int zd1201_get_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { return 0; } static int zd1201_set_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *key) { struct zd1201 *zd = netdev_priv(dev); short i; int err, rid; if (erq->length > ZD1201_MAXKEYLEN) return -EINVAL; i = (erq->flags & IW_ENCODE_INDEX)-1; if (i == -1) { err = zd1201_getconfig16(zd,ZD1201_RID_CNFDEFAULTKEYID,&i); if (err) return err; } else { err = zd1201_setconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, i); if (err) return err; } if (i < 0 || i >= ZD1201_NUMKEYS) return -EINVAL; rid = ZD1201_RID_CNFDEFAULTKEY0 + i; err = zd1201_setconfig(zd, rid, key, erq->length, 1); if (err) return err; zd->encode_keylen[i] = erq->length; memcpy(zd->encode_keys[i], key, erq->length); i=0; if (!(erq->flags & IW_ENCODE_DISABLED & IW_ENCODE_MODE)) { i |= 0x01; zd->encode_enabled = 1; } else zd->encode_enabled = 0; if (erq->flags & IW_ENCODE_RESTRICTED & IW_ENCODE_MODE) { i |= 0x02; zd->encode_restricted = 1; } else zd->encode_restricted = 0; err = zd1201_setconfig16(zd, ZD1201_RID_CNFWEBFLAGS, i); if (err) return err; if (zd->encode_enabled) i = ZD1201_CNFAUTHENTICATION_SHAREDKEY; else i = ZD1201_CNFAUTHENTICATION_OPENSYSTEM; err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, i); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *key) { struct zd1201 *zd = netdev_priv(dev); short i; int err; if (zd->encode_enabled) erq->flags = IW_ENCODE_ENABLED; else erq->flags = IW_ENCODE_DISABLED; if (zd->encode_restricted) erq->flags |= IW_ENCODE_RESTRICTED; else erq->flags |= IW_ENCODE_OPEN; i = (erq->flags & IW_ENCODE_INDEX) -1; if (i == -1) { err = zd1201_getconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, &i); if (err) return err; } if (i<0 || i>= ZD1201_NUMKEYS) return -EINVAL; erq->flags |= i+1; erq->length = zd->encode_keylen[i]; memcpy(key, zd->encode_keys[i], erq->length); return 0; } static int zd1201_set_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short enabled, duration, level; int err; enabled = vwrq->disabled ? 0 : 1; if (enabled) { if (vwrq->flags & IW_POWER_PERIOD) { duration = vwrq->value; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, duration); if (err) return err; goto out; } if (vwrq->flags & IW_POWER_TIMEOUT) { err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration); if (err) return err; level = vwrq->value * 4 / duration; if (level > 4) level = 4; if (level < 0) level = 0; err = zd1201_setconfig16(zd, ZD1201_RID_CNFPMEPS, level); if (err) return err; goto out; } return -EINVAL; } out: return zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled); } static int zd1201_get_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short enabled, level, duration; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMENABLED, &enabled); if (err) return err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMEPS, &level); if (err) return err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration); if (err) return err; vwrq->disabled = enabled ? 0 : 1; if (vwrq->flags & IW_POWER_TYPE) { if (vwrq->flags & IW_POWER_PERIOD) { vwrq->value = duration; vwrq->flags = IW_POWER_PERIOD; } else { vwrq->value = duration * level / 4; vwrq->flags = IW_POWER_TIMEOUT; } } if (vwrq->flags & IW_POWER_MODE) { if (enabled && level) vwrq->flags = IW_POWER_UNICAST_R; else vwrq->flags = IW_POWER_ALL_R; } return 0; } static const iw_handler zd1201_iw_handler[] = { (iw_handler) zd1201_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) zd1201_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) zd1201_set_freq, /* SIOCSIWFREQ */ (iw_handler) zd1201_get_freq, /* SIOCGIWFREQ */ (iw_handler) zd1201_set_mode, /* SIOCSIWMODE */ (iw_handler) zd1201_get_mode, /* SIOCGIWMODE */ (iw_handler) NULL, /* SIOCSIWSENS */ (iw_handler) NULL, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) zd1201_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ (iw_handler) NULL, /* SIOCSIWSPY */ (iw_handler) NULL, /* SIOCGIWSPY */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL/*zd1201_set_wap*/, /* SIOCSIWAP */ (iw_handler) zd1201_get_wap, /* SIOCGIWAP */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* SIOCGIWAPLIST */ (iw_handler) zd1201_set_scan, /* SIOCSIWSCAN */ (iw_handler) zd1201_get_scan, /* SIOCGIWSCAN */ (iw_handler) zd1201_set_essid, /* SIOCSIWESSID */ (iw_handler) zd1201_get_essid, /* SIOCGIWESSID */ (iw_handler) NULL, /* SIOCSIWNICKN */ (iw_handler) zd1201_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) zd1201_set_rate, /* SIOCSIWRATE */ (iw_handler) zd1201_get_rate, /* SIOCGIWRATE */ (iw_handler) zd1201_set_rts, /* SIOCSIWRTS */ (iw_handler) zd1201_get_rts, /* SIOCGIWRTS */ (iw_handler) zd1201_set_frag, /* SIOCSIWFRAG */ (iw_handler) zd1201_get_frag, /* SIOCGIWFRAG */ (iw_handler) NULL, /* SIOCSIWTXPOW */ (iw_handler) NULL, /* SIOCGIWTXPOW */ (iw_handler) zd1201_set_retry, /* SIOCSIWRETRY */ (iw_handler) zd1201_get_retry, /* SIOCGIWRETRY */ (iw_handler) zd1201_set_encode, /* SIOCSIWENCODE */ (iw_handler) zd1201_get_encode, /* SIOCGIWENCODE */ (iw_handler) zd1201_set_power, /* SIOCSIWPOWER */ (iw_handler) zd1201_get_power, /* SIOCGIWPOWER */ }; static int zd1201_set_hostauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); if (!zd->ap) return -EOPNOTSUPP; return zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value); } static int zd1201_get_hostauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short hostauth; int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_getconfig16(zd, ZD1201_RID_CNFHOSTAUTH, &hostauth); if (err) return err; rrq->value = hostauth; rrq->fixed = 1; return 0; } static int zd1201_auth_sta(struct net_device *dev, struct iw_request_info *info, struct sockaddr *sta, char *extra) { struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[10]; if (!zd->ap) return -EOPNOTSUPP; memcpy(buffer, sta->sa_data, ETH_ALEN); *(short*)(buffer+6) = 0; /* 0==success, 1==failure */ *(short*)(buffer+8) = 0; return zd1201_setconfig(zd, ZD1201_RID_AUTHENTICATESTA, buffer, 10, 1); } static int zd1201_set_maxassoc(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value); if (err) return err; return 0; } static int zd1201_get_maxassoc(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short maxassoc; int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, &maxassoc); if (err) return err; rrq->value = maxassoc; rrq->fixed = 1; return 0; } static const iw_handler zd1201_private_handler[] = { (iw_handler) zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */ (iw_handler) zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */ (iw_handler) zd1201_auth_sta, /* ZD1201SIWAUTHSTA */ (iw_handler) NULL, /* nothing to get */ (iw_handler) zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */ (iw_handler) zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */ }; static const struct iw_priv_args zd1201_private_args[] = { { ZD1201SIWHOSTAUTH, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "sethostauth" }, { ZD1201GIWHOSTAUTH, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostauth" }, { ZD1201SIWAUTHSTA, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "authstation" }, { ZD1201SIWMAXASSOC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "setmaxassoc" }, { ZD1201GIWMAXASSOC, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmaxassoc" }, }; static const struct iw_handler_def zd1201_iw_handlers = { .num_standard = ARRAY_SIZE(zd1201_iw_handler), .num_private = ARRAY_SIZE(zd1201_private_handler), .num_private_args = ARRAY_SIZE(zd1201_private_args), .standard = (iw_handler *)zd1201_iw_handler, .private = (iw_handler *)zd1201_private_handler, .private_args = (struct iw_priv_args *) zd1201_private_args, .get_wireless_stats = zd1201_get_wireless_stats, }; static const struct net_device_ops zd1201_netdev_ops = { .ndo_open = zd1201_net_open, .ndo_stop = zd1201_net_stop, .ndo_start_xmit = zd1201_hard_start_xmit, .ndo_tx_timeout = zd1201_tx_timeout, .ndo_set_rx_mode = zd1201_set_multicast, .ndo_set_mac_address = zd1201_set_mac_address, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int zd1201_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct zd1201 *zd; struct net_device *dev; struct usb_device *usb; int err; short porttype; char buf[IW_ESSID_MAX_SIZE+2]; usb = interface_to_usbdev(interface); dev = alloc_etherdev(sizeof(*zd)); if (!dev) return -ENOMEM; zd = netdev_priv(dev); zd->dev = dev; zd->ap = ap; zd->usb = usb; zd->removed = 0; init_waitqueue_head(&zd->rxdataq); INIT_HLIST_HEAD(&zd->fraglist); err = zd1201_fw_upload(usb, zd->ap); if (err) { dev_err(&usb->dev, "zd1201 firmware upload failed: %d\n", err); goto err_zd; } zd->endp_in = 1; zd->endp_out = 1; zd->endp_out2 = 2; zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL); zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!zd->rx_urb || !zd->tx_urb) { err = -ENOMEM; goto err_zd; } mdelay(100); err = zd1201_drvr_start(zd); if (err) goto err_zd; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXDATALEN, 2312); if (err) goto err_start; err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, ZD1201_RATEB1 | ZD1201_RATEB2 | ZD1201_RATEB5 | ZD1201_RATEB11); if (err) goto err_start; dev->netdev_ops = &zd1201_netdev_ops; dev->wireless_handlers = &zd1201_iw_handlers; dev->watchdog_timeo = ZD1201_TX_TIMEOUT; strcpy(dev->name, "wlan%d"); err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, dev->dev_addr, dev->addr_len); if (err) goto err_start; /* Set wildcard essid to match zd->essid */ *(__le16 *)buf = cpu_to_le16(0); err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) goto err_start; if (zd->ap) porttype = ZD1201_PORTTYPE_AP; else porttype = ZD1201_PORTTYPE_BSS; err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); if (err) goto err_start; SET_NETDEV_DEV(dev, &usb->dev); err = register_netdev(dev); if (err) goto err_start; dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n", dev->name); usb_set_intfdata(interface, zd); zd1201_enable(zd); /* zd1201 likes to startup enabled, */ zd1201_disable(zd); /* interfering with all the wifis in range */ return 0; err_start: /* Leave the device in reset state */ zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); err_zd: usb_free_urb(zd->tx_urb); usb_free_urb(zd->rx_urb); free_netdev(dev); return err; } static void zd1201_disconnect(struct usb_interface *interface) { struct zd1201 *zd = usb_get_intfdata(interface); struct hlist_node *node2; struct zd1201_frag *frag; if (!zd) return; usb_set_intfdata(interface, NULL); hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) { hlist_del_init(&frag->fnode); kfree_skb(frag->skb); kfree(frag); } if (zd->tx_urb) { usb_kill_urb(zd->tx_urb); usb_free_urb(zd->tx_urb); } if (zd->rx_urb) { usb_kill_urb(zd->rx_urb); usb_free_urb(zd->rx_urb); } if (zd->dev) { unregister_netdev(zd->dev); free_netdev(zd->dev); } } #ifdef CONFIG_PM static int zd1201_suspend(struct usb_interface *interface, pm_message_t message) { struct zd1201 *zd = usb_get_intfdata(interface); netif_device_detach(zd->dev); zd->was_enabled = zd->mac_enabled; if (zd->was_enabled) return zd1201_disable(zd); else return 0; } static int zd1201_resume(struct usb_interface *interface) { struct zd1201 *zd = usb_get_intfdata(interface); if (!zd || !zd->dev) return -ENODEV; netif_device_attach(zd->dev); if (zd->was_enabled) return zd1201_enable(zd); else return 0; } #else #define zd1201_suspend NULL #define zd1201_resume NULL #endif static struct usb_driver zd1201_usb = { .name = "zd1201", .probe = zd1201_probe, .disconnect = zd1201_disconnect, .id_table = zd1201_table, .suspend = zd1201_suspend, .resume = zd1201_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(zd1201_usb);
gpl-2.0
kn8wolf/sandy_oneplus2_msm8994
arch/sparc/kernel/vio.c
2615
10613
/* vio.c: Virtual I/O channel devices probing infrastructure. * * Copyright (c) 2003-2005 IBM Corp. * Dave Engebretsen engebret@us.ibm.com * Santiago Leon santil@us.ibm.com * Hollis Blanchard <hollisb@us.ibm.com> * Stephen Rothwell * * Adapted to sparc64 by David S. Miller davem@davemloft.net */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/export.h> #include <linux/init.h> #include <asm/mdesc.h> #include <asm/vio.h> static const struct vio_device_id *vio_match_device( const struct vio_device_id *matches, const struct vio_dev *dev) { const char *type, *compat; int len; type = dev->type; compat = dev->compat; len = dev->compat_len; while (matches->type[0] || matches->compat[0]) { int match = 1; if (matches->type[0]) match &= !strcmp(matches->type, type); if (matches->compat[0]) { match &= len && of_find_in_proplist(compat, matches->compat, len); } if (match) return matches; matches++; } return NULL; } static int vio_bus_match(struct device *dev, struct device_driver *drv) { struct vio_dev *vio_dev = to_vio_dev(dev); struct vio_driver *vio_drv = to_vio_driver(drv); const struct vio_device_id *matches = vio_drv->id_table; if (!matches) return 0; return vio_match_device(matches, vio_dev) != NULL; } static int vio_device_probe(struct device *dev) { struct vio_dev *vdev = to_vio_dev(dev); struct vio_driver *drv = to_vio_driver(dev->driver); const struct vio_device_id *id; int error = -ENODEV; if (drv->probe) { id = vio_match_device(drv->id_table, vdev); if (id) error = drv->probe(vdev, id); } return error; } static int vio_device_remove(struct device *dev) { struct vio_dev *vdev = to_vio_dev(dev); struct vio_driver *drv = to_vio_driver(dev->driver); if (drv->remove) return drv->remove(vdev); return 1; } static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *vdev = to_vio_dev(dev); const char *str = "none"; if (!strcmp(vdev->type, "vnet-port")) str = "vnet"; else if (!strcmp(vdev->type, "vdc-port")) str = "vdisk"; return sprintf(buf, "%s\n", str); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *vdev = to_vio_dev(dev); return sprintf(buf, "%s\n", vdev->type); } static struct device_attribute vio_dev_attrs[] = { __ATTR_RO(devspec), __ATTR_RO(type), __ATTR_NULL }; static struct bus_type vio_bus_type = { .name = "vio", .dev_attrs = vio_dev_attrs, .match = vio_bus_match, .probe = vio_device_probe, .remove = vio_device_remove, }; int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, const char *mod_name) { viodrv->driver.bus = &vio_bus_type; viodrv->driver.name = viodrv->name; viodrv->driver.owner = owner; viodrv->driver.mod_name = mod_name; return driver_register(&viodrv->driver); } EXPORT_SYMBOL(__vio_register_driver); void vio_unregister_driver(struct vio_driver *viodrv) { driver_unregister(&viodrv->driver); } EXPORT_SYMBOL(vio_unregister_driver); static void vio_dev_release(struct device *dev) { kfree(to_vio_dev(dev)); } static ssize_t show_pciobppath_attr(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *vdev; struct device_node *dp; vdev = to_vio_dev(dev); dp = vdev->dp; return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); } static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); static struct device_node *cdev_node; static struct vio_dev *root_vdev; static u64 cdev_cfg_handle; static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, struct vio_dev *vdev) { u64 a; mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { const u64 *chan_id; const u64 *irq; u64 target; target = mdesc_arc_target(hp, a); irq = mdesc_get_property(hp, target, "tx-ino", NULL); if (irq) vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); irq = mdesc_get_property(hp, target, "rx-ino", NULL); if (irq) vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); chan_id = mdesc_get_property(hp, target, "id", NULL); if (chan_id) vdev->channel_id = *chan_id; } } static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, struct device *parent) { const char *type, *compat, *bus_id_name; struct device_node *dp; struct vio_dev *vdev; int err, tlen, clen; const u64 *id, *cfg_handle; u64 a; type = mdesc_get_property(hp, mp, "device-type", &tlen); if (!type) { type = mdesc_get_property(hp, mp, "name", &tlen); if (!type) { type = mdesc_node_name(hp, mp); tlen = strlen(type) + 1; } } if (tlen > VIO_MAX_TYPE_LEN) { printk(KERN_ERR "VIO: Type string [%s] is too long.\n", type); return NULL; } id = mdesc_get_property(hp, mp, "id", NULL); cfg_handle = NULL; mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { u64 target; target = mdesc_arc_target(hp, a); cfg_handle = mdesc_get_property(hp, target, "cfg-handle", NULL); if (cfg_handle) break; } bus_id_name = type; if (!strcmp(type, "domain-services-port")) bus_id_name = "ds"; /* * 20 char is the old driver-core name size limit, which is no more. * This check can probably be removed after review and possible * adaption of the vio users name length handling. */ if (strlen(bus_id_name) >= 20 - 4) { printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n", bus_id_name); return NULL; } compat = mdesc_get_property(hp, mp, "device-type", &clen); if (!compat) { clen = 0; } else if (clen > VIO_MAX_COMPAT_LEN) { printk(KERN_ERR "VIO: Compat len %d for [%s] is too long.\n", clen, type); return NULL; } vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); if (!vdev) { printk(KERN_ERR "VIO: Could not allocate vio_dev\n"); return NULL; } vdev->mp = mp; memcpy(vdev->type, type, tlen); if (compat) memcpy(vdev->compat, compat, clen); else memset(vdev->compat, 0, sizeof(vdev->compat)); vdev->compat_len = clen; vdev->channel_id = ~0UL; vdev->tx_irq = ~0; vdev->rx_irq = ~0; vio_fill_channel_info(hp, mp, vdev); if (!id) { dev_set_name(&vdev->dev, "%s", bus_id_name); vdev->dev_no = ~(u64)0; } else if (!cfg_handle) { dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); vdev->dev_no = *id; } else { dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, *cfg_handle, *id); vdev->dev_no = *cfg_handle; } vdev->dev.parent = parent; vdev->dev.bus = &vio_bus_type; vdev->dev.release = vio_dev_release; if (parent == NULL) { dp = cdev_node; } else if (to_vio_dev(parent) == root_vdev) { dp = of_get_next_child(cdev_node, NULL); while (dp) { if (!strcmp(dp->type, type)) break; dp = of_get_next_child(cdev_node, dp); } } else { dp = to_vio_dev(parent)->dp; } vdev->dp = dp; printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev)); err = device_register(&vdev->dev); if (err) { printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", dev_name(&vdev->dev), err); kfree(vdev); return NULL; } if (vdev->dp) err = sysfs_create_file(&vdev->dev.kobj, &dev_attr_obppath.attr); return vdev; } static void vio_add(struct mdesc_handle *hp, u64 node) { (void) vio_create_one(hp, node, &root_vdev->dev); } static int vio_md_node_match(struct device *dev, void *arg) { struct vio_dev *vdev = to_vio_dev(dev); if (vdev->mp == (u64) arg) return 1; return 0; } static void vio_remove(struct mdesc_handle *hp, u64 node) { struct device *dev; dev = device_find_child(&root_vdev->dev, (void *) node, vio_md_node_match); if (dev) { printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); device_unregister(dev); put_device(dev); } } static struct mdesc_notifier_client vio_device_notifier = { .add = vio_add, .remove = vio_remove, .node_name = "virtual-device-port", }; /* We are only interested in domain service ports under the * "domain-services" node. On control nodes there is another port * under "openboot" that we should not mess with as aparently that is * reserved exclusively for OBP use. */ static void vio_add_ds(struct mdesc_handle *hp, u64 node) { int found; u64 a; found = 0; mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { u64 target = mdesc_arc_target(hp, a); const char *name = mdesc_node_name(hp, target); if (!strcmp(name, "domain-services")) { found = 1; break; } } if (found) (void) vio_create_one(hp, node, &root_vdev->dev); } static struct mdesc_notifier_client vio_ds_notifier = { .add = vio_add_ds, .remove = vio_remove, .node_name = "domain-services-port", }; static const char *channel_devices_node = "channel-devices"; static const char *channel_devices_compat = "SUNW,sun4v-channel-devices"; static const char *cfg_handle_prop = "cfg-handle"; static int __init vio_init(void) { struct mdesc_handle *hp; const char *compat; const u64 *cfg_handle; int err, len; u64 root; err = bus_register(&vio_bus_type); if (err) { printk(KERN_ERR "VIO: Could not register bus type err=%d\n", err); return err; } hp = mdesc_grab(); if (!hp) return 0; root = mdesc_node_by_name(hp, MDESC_NODE_NULL, channel_devices_node); if (root == MDESC_NODE_NULL) { printk(KERN_INFO "VIO: No channel-devices MDESC node.\n"); mdesc_release(hp); return 0; } cdev_node = of_find_node_by_name(NULL, "channel-devices"); err = -ENODEV; if (!cdev_node) { printk(KERN_INFO "VIO: No channel-devices OBP node.\n"); goto out_release; } compat = mdesc_get_property(hp, root, "compatible", &len); if (!compat) { printk(KERN_ERR "VIO: Channel devices lacks compatible " "property\n"); goto out_release; } if (!of_find_in_proplist(compat, channel_devices_compat, len)) { printk(KERN_ERR "VIO: Channel devices node lacks (%s) " "compat entry.\n", channel_devices_compat); goto out_release; } cfg_handle = mdesc_get_property(hp, root, cfg_handle_prop, NULL); if (!cfg_handle) { printk(KERN_ERR "VIO: Channel devices lacks %s property\n", cfg_handle_prop); goto out_release; } cdev_cfg_handle = *cfg_handle; root_vdev = vio_create_one(hp, root, NULL); err = -ENODEV; if (!root_vdev) { printk(KERN_ERR "VIO: Could not create root device.\n"); goto out_release; } mdesc_register_notifier(&vio_device_notifier); mdesc_register_notifier(&vio_ds_notifier); mdesc_release(hp); return err; out_release: mdesc_release(hp); return err; } postcore_initcall(vio_init);
gpl-2.0
playfulgod/lge-kernel-iproj
arch/x86/kvm/i8259.c
2871
12625
/* * 8259 interrupt controller emulation * * Copyright (c) 2003-2004 Fabrice Bellard * Copyright (c) 2007 Intel Corporation * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * Port from Qemu. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/bitops.h> #include "irq.h" #include <linux/kvm_host.h> #include "trace.h" static void pic_irq_request(struct kvm *kvm, int level); static void pic_lock(struct kvm_pic *s) __acquires(&s->lock) { spin_lock(&s->lock); } static void pic_unlock(struct kvm_pic *s) __releases(&s->lock) { bool wakeup = s->wakeup_needed; struct kvm_vcpu *vcpu, *found = NULL; int i; s->wakeup_needed = false; spin_unlock(&s->lock); if (wakeup) { kvm_for_each_vcpu(i, vcpu, s->kvm) { if (kvm_apic_accept_pic_intr(vcpu)) { found = vcpu; break; } } if (!found) return; kvm_make_request(KVM_REQ_EVENT, found); kvm_vcpu_kick(found); } } static void pic_clear_isr(struct kvm_kpic_state *s, int irq) { s->isr &= ~(1 << irq); if (s != &s->pics_state->pics[0]) irq += 8; /* * We are dropping lock while calling ack notifiers since ack * notifier callbacks for assigned devices call into PIC recursively. * Other interrupt may be delivered to PIC while lock is dropped but * it should be safe since PIC state is already updated at this stage. */ pic_unlock(s->pics_state); kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); pic_lock(s->pics_state); } /* * set irq level. If an edge is detected, then the IRR is set to 1 */ static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) { int mask, ret = 1; mask = 1 << irq; if (s->elcr & mask) /* level triggered */ if (level) { ret = !(s->irr & mask); s->irr |= mask; s->last_irr |= mask; } else { s->irr &= ~mask; s->last_irr &= ~mask; } else /* edge triggered */ if (level) { if ((s->last_irr & mask) == 0) { ret = !(s->irr & mask); s->irr |= mask; } s->last_irr |= mask; } else s->last_irr &= ~mask; return (s->imr & mask) ? -1 : ret; } /* * return the highest priority found in mask (highest = smallest * number). Return 8 if no irq */ static inline int get_priority(struct kvm_kpic_state *s, int mask) { int priority; if (mask == 0) return 8; priority = 0; while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) priority++; return priority; } /* * return the pic wanted interrupt. return -1 if none */ static int pic_get_irq(struct kvm_kpic_state *s) { int mask, cur_priority, priority; mask = s->irr & ~s->imr; priority = get_priority(s, mask); if (priority == 8) return -1; /* * compute current priority. If special fully nested mode on the * master, the IRQ coming from the slave is not taken into account * for the priority computation. */ mask = s->isr; if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) mask &= ~(1 << 2); cur_priority = get_priority(s, mask); if (priority < cur_priority) /* * higher priority found: an irq should be generated */ return (priority + s->priority_add) & 7; else return -1; } /* * raise irq to CPU if necessary. must be called every time the active * irq may change */ static void pic_update_irq(struct kvm_pic *s) { int irq2, irq; irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) { /* * if irq request by slave pic, signal master PIC */ pic_set_irq1(&s->pics[0], 2, 1); pic_set_irq1(&s->pics[0], 2, 0); } irq = pic_get_irq(&s->pics[0]); pic_irq_request(s->kvm, irq >= 0); } void kvm_pic_update_irq(struct kvm_pic *s) { pic_lock(s); pic_update_irq(s); pic_unlock(s); } int kvm_pic_set_irq(void *opaque, int irq, int level) { struct kvm_pic *s = opaque; int ret = -1; pic_lock(s); if (irq >= 0 && irq < PIC_NUM_PINS) { ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); pic_update_irq(s); trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, s->pics[irq >> 3].imr, ret == 0); } pic_unlock(s); return ret; } /* * acknowledge interrupt 'irq' */ static inline void pic_intack(struct kvm_kpic_state *s, int irq) { s->isr |= 1 << irq; /* * We don't clear a level sensitive interrupt here */ if (!(s->elcr & (1 << irq))) s->irr &= ~(1 << irq); if (s->auto_eoi) { if (s->rotate_on_auto_eoi) s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); } } int kvm_pic_read_irq(struct kvm *kvm) { int irq, irq2, intno; struct kvm_pic *s = pic_irqchip(kvm); pic_lock(s); irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); if (irq == 2) { irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) pic_intack(&s->pics[1], irq2); else /* * spurious IRQ on slave controller */ irq2 = 7; intno = s->pics[1].irq_base + irq2; irq = irq2 + 8; } else intno = s->pics[0].irq_base + irq; } else { /* * spurious IRQ on host controller */ irq = 7; intno = s->pics[0].irq_base + irq; } pic_update_irq(s); pic_unlock(s); return intno; } void kvm_pic_reset(struct kvm_kpic_state *s) { int irq; struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu; u8 irr = s->irr, isr = s->imr; s->last_irr = 0; s->irr = 0; s->imr = 0; s->isr = 0; s->priority_add = 0; s->irq_base = 0; s->read_reg_select = 0; s->poll = 0; s->special_mask = 0; s->init_state = 0; s->auto_eoi = 0; s->rotate_on_auto_eoi = 0; s->special_fully_nested_mode = 0; s->init4 = 0; for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) if (irr & (1 << irq) || isr & (1 << irq)) { pic_clear_isr(s, irq); } } } static void pic_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; int priority, cmd, irq; addr &= 1; if (addr == 0) { if (val & 0x10) { s->init4 = val & 1; s->last_irr = 0; s->imr = 0; s->priority_add = 0; s->special_mask = 0; s->read_reg_select = 0; if (!s->init4) { s->special_fully_nested_mode = 0; s->auto_eoi = 0; } s->init_state = 1; if (val & 0x02) printk(KERN_ERR "single mode not supported"); if (val & 0x08) printk(KERN_ERR "level sensitive irq not supported"); } else if (val & 0x08) { if (val & 0x04) s->poll = 1; if (val & 0x02) s->read_reg_select = val & 1; if (val & 0x40) s->special_mask = (val >> 5) & 1; } else { cmd = val >> 5; switch (cmd) { case 0: case 4: s->rotate_on_auto_eoi = cmd >> 2; break; case 1: /* end of interrupt */ case 5: priority = get_priority(s, s->isr); if (priority != 8) { irq = (priority + s->priority_add) & 7; if (cmd == 5) s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); } break; case 3: irq = val & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); break; case 6: s->priority_add = (val + 1) & 7; pic_update_irq(s->pics_state); break; case 7: irq = val & 7; s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); break; default: break; /* no operation */ } } } else switch (s->init_state) { case 0: { /* normal mode */ u8 imr_diff = s->imr ^ val, off = (s == &s->pics_state->pics[0]) ? 0 : 8; s->imr = val; for (irq = 0; irq < PIC_NUM_PINS/2; irq++) if (imr_diff & (1 << irq)) kvm_fire_mask_notifiers( s->pics_state->kvm, SELECT_PIC(irq + off), irq + off, !!(s->imr & (1 << irq))); pic_update_irq(s->pics_state); break; } case 1: s->irq_base = val & 0xf8; s->init_state = 2; break; case 2: if (s->init4) s->init_state = 3; else s->init_state = 0; break; case 3: s->special_fully_nested_mode = (val >> 4) & 1; s->auto_eoi = (val >> 1) & 1; s->init_state = 0; break; } } static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) { int ret; ret = pic_get_irq(s); if (ret >= 0) { if (addr1 >> 7) { s->pics_state->pics[0].isr &= ~(1 << 2); s->pics_state->pics[0].irr &= ~(1 << 2); } s->irr &= ~(1 << ret); pic_clear_isr(s, ret); if (addr1 >> 7 || ret != 2) pic_update_irq(s->pics_state); } else { ret = 0x07; pic_update_irq(s->pics_state); } return ret; } static u32 pic_ioport_read(void *opaque, u32 addr1) { struct kvm_kpic_state *s = opaque; unsigned int addr; int ret; addr = addr1; addr &= 1; if (s->poll) { ret = pic_poll_read(s, addr1); s->poll = 0; } else if (addr == 0) if (s->read_reg_select) ret = s->isr; else ret = s->irr; else ret = s->imr; return ret; } static void elcr_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; s->elcr = val & s->elcr_mask; } static u32 elcr_ioport_read(void *opaque, u32 addr1) { struct kvm_kpic_state *s = opaque; return s->elcr; } static int picdev_in_range(gpa_t addr) { switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: case 0x4d0: case 0x4d1: return 1; default: return 0; } } static inline struct kvm_pic *to_pic(struct kvm_io_device *dev) { return container_of(dev, struct kvm_pic, dev); } static int picdev_write(struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct kvm_pic *s = to_pic(this); unsigned char data = *(unsigned char *)val; if (!picdev_in_range(addr)) return -EOPNOTSUPP; if (len != 1) { if (printk_ratelimit()) printk(KERN_ERR "PIC: non byte write\n"); return 0; } pic_lock(s); switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: pic_ioport_write(&s->pics[addr >> 7], addr, data); break; case 0x4d0: case 0x4d1: elcr_ioport_write(&s->pics[addr & 1], addr, data); break; } pic_unlock(s); return 0; } static int picdev_read(struct kvm_io_device *this, gpa_t addr, int len, void *val) { struct kvm_pic *s = to_pic(this); unsigned char data = 0; if (!picdev_in_range(addr)) return -EOPNOTSUPP; if (len != 1) { if (printk_ratelimit()) printk(KERN_ERR "PIC: non byte read\n"); return 0; } pic_lock(s); switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: data = pic_ioport_read(&s->pics[addr >> 7], addr); break; case 0x4d0: case 0x4d1: data = elcr_ioport_read(&s->pics[addr & 1], addr); break; } *(unsigned char *)val = data; pic_unlock(s); return 0; } /* * callback when PIC0 irq status changed */ static void pic_irq_request(struct kvm *kvm, int level) { struct kvm_pic *s = pic_irqchip(kvm); if (!s->output) s->wakeup_needed = true; s->output = level; } static const struct kvm_io_device_ops picdev_ops = { .read = picdev_read, .write = picdev_write, }; struct kvm_pic *kvm_create_pic(struct kvm *kvm) { struct kvm_pic *s; int ret; s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); if (!s) return NULL; spin_lock_init(&s->lock); s->kvm = kvm; s->pics[0].elcr_mask = 0xf8; s->pics[1].elcr_mask = 0xde; s->pics[0].pics_state = s; s->pics[1].pics_state = s; /* * Initialize PIO device */ kvm_iodevice_init(&s->dev, &picdev_ops); mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev); mutex_unlock(&kvm->slots_lock); if (ret < 0) { kfree(s); return NULL; } return s; } void kvm_destroy_pic(struct kvm *kvm) { struct kvm_pic *vpic = kvm->arch.vpic; if (vpic) { kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); kvm->arch.vpic = NULL; kfree(vpic); } }
gpl-2.0
imoseyon/leanKernel-i500-gingerbread
drivers/media/video/pvrusb2/pvrusb2-main.c
3639
4459
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include "pvrusb2-hdw.h" #include "pvrusb2-devattr.h" #include "pvrusb2-context.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS #include "pvrusb2-sysfs.h" #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" #define DRIVER_VERSION "V4L in-tree version" #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ PVR2_TRACE_INFO| \ PVR2_TRACE_STD| \ PVR2_TRACE_TOLERANCE| \ PVR2_TRACE_TRAP| \ 0) int pvrusb2_debug = DEFAULT_DEBUG_MASK; module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "Debug trace mask"); #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS static struct pvr2_sysfs_class *class_ptr = NULL; #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ static void pvr_setup_attach(struct pvr2_context *pvr) { /* Create association with v4l layer */ pvr2_v4l2_create(pvr); #ifdef CONFIG_VIDEO_PVRUSB2_DVB /* Create association with dvb layer */ pvr2_dvb_create(pvr); #endif #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS pvr2_sysfs_create(pvr,class_ptr); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ } static int pvr_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct pvr2_context *pvr; /* Create underlying hardware interface */ pvr = pvr2_context_create(intf,devid,pvr_setup_attach); if (!pvr) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to create hdw handler"); return -ENOMEM; } pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); usb_set_intfdata(intf, pvr); return 0; } /* * pvr_disconnect() * */ static void pvr_disconnect(struct usb_interface *intf) { struct pvr2_context *pvr = usb_get_intfdata(intf); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); usb_set_intfdata (intf, NULL); pvr2_context_disconnect(pvr); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); } static struct usb_driver pvr_driver = { .name = "pvrusb2", .id_table = pvr2_device_table, .probe = pvr_probe, .disconnect = pvr_disconnect }; /* * pvr_init() / pvr_exit() * * This code is run to initialize/exit the driver. * */ static int __init pvr_init(void) { int ret; pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); ret = pvr2_context_global_init(); if (ret != 0) { pvr2_trace(PVR2_TRACE_INIT,"pvr_init failure code=%d",ret); return ret; } #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS class_ptr = pvr2_sysfs_class_create(); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ ret = usb_register(&pvr_driver); if (ret == 0) printk(KERN_INFO "pvrusb2: " DRIVER_VERSION ":" DRIVER_DESC "\n"); if (pvrusb2_debug) printk(KERN_INFO "pvrusb2: Debug mask is %d (0x%x)\n", pvrusb2_debug,pvrusb2_debug); pvr2_trace(PVR2_TRACE_INIT,"pvr_init complete"); return ret; } static void __exit pvr_exit(void) { pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); usb_deregister(&pvr_driver); pvr2_context_global_done(); #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS pvr2_sysfs_class_destroy(class_ptr); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ pvr2_trace(PVR2_TRACE_INIT,"pvr_exit complete"); } module_init(pvr_init); module_exit(pvr_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 70 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
pocketbook-free/kernel_622
drivers/net/wireless/orinoco/mic.c
4151
2110
/* Orinoco MIC helpers * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/if_ether.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include "orinoco.h" #include "mic.h" /********************************************************************/ /* Michael MIC crypto setup */ /********************************************************************/ int orinoco_mic_init(struct orinoco_private *priv) { priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " "crypto API michael_mic\n"); priv->tx_tfm_mic = NULL; return -ENOMEM; } priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " "crypto API michael_mic\n"); priv->rx_tfm_mic = NULL; return -ENOMEM; } return 0; } void orinoco_mic_free(struct orinoco_private *priv) { if (priv->tx_tfm_mic) crypto_free_hash(priv->tx_tfm_mic); if (priv->rx_tfm_mic) crypto_free_hash(priv->rx_tfm_mic); } int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } /* Copy header into buffer. We need the padding on the end zeroed */ memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN*2] = priority; hdr[ETH_ALEN*2+1] = 0; hdr[ETH_ALEN*2+2] = 0; hdr[ETH_ALEN*2+3] = 0; /* Use scatter gather to MIC header and data in one go */ sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
gpl-2.0
iamroot11c/kernel_source
drivers/pcmcia/sa1100_cerf.c
4407
2012
/* * drivers/pcmcia/sa1100_cerf.c * * PCMCIA implementation routines for CerfBoard * Based off the Assabet. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/cerf.h> #include "sa1100_generic.h" #define CERF_SOCKET 1 static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret; ret = gpio_request_one(CERF_GPIO_CF_RESET, GPIOF_OUT_INIT_LOW, "CF_RESET"); if (ret) return ret; skt->stat[SOC_STAT_CD].gpio = CERF_GPIO_CF_CD; skt->stat[SOC_STAT_CD].name = "CF_CD"; skt->stat[SOC_STAT_BVD1].gpio = CERF_GPIO_CF_BVD1; skt->stat[SOC_STAT_BVD1].name = "CF_BVD1"; skt->stat[SOC_STAT_BVD2].gpio = CERF_GPIO_CF_BVD2; skt->stat[SOC_STAT_BVD2].name = "CF_BVD2"; skt->stat[SOC_STAT_RDY].gpio = CERF_GPIO_CF_IRQ; skt->stat[SOC_STAT_RDY].name = "CF_IRQ"; return 0; } static void cerf_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { gpio_free(CERF_GPIO_CF_RESET); } static void cerf_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->vs_3v = 1; state->vs_Xv = 0; } static int cerf_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { switch (state->Vcc) { case 0: case 50: case 33: break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); return -1; } gpio_set_value(CERF_GPIO_CF_RESET, !!(state->flags & SS_RESET)); return 0; } static struct pcmcia_low_level cerf_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = cerf_pcmcia_hw_init, .hw_shutdown = cerf_pcmcia_hw_shutdown, .socket_state = cerf_pcmcia_socket_state, .configure_socket = cerf_pcmcia_configure_socket, }; int pcmcia_cerf_init(struct device *dev) { int ret = -ENODEV; if (machine_is_cerf()) ret = sa11xx_drv_pcmcia_probe(dev, &cerf_pcmcia_ops, CERF_SOCKET, 1); return ret; }
gpl-2.0
KylinUI/android_kernel_motorola_msm8960dt-common
drivers/acpi/acpica/pstree.c
4919
8025
/****************************************************************************** * * Module Name: pstree - Parser op tree manipulation/traversal/search * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pstree") /* Local prototypes */ #ifdef ACPI_OBSOLETE_FUNCTIONS union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op); #endif /******************************************************************************* * * FUNCTION: acpi_ps_get_arg * * PARAMETERS: Op - Get an argument for this op * Argn - Nth argument to get * * RETURN: The argument (as an Op object). NULL if argument does not exist * * DESCRIPTION: Get the specified op's argument. * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn) { union acpi_parse_object *arg = NULL; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_ENTRY(); /* if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP) { return (Op->Common.Value.Arg); } */ /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode or ASCII character */ return (NULL); } /* Check if this opcode requires argument sub-objects */ if (!(op_info->flags & AML_HAS_ARGS)) { /* Has no linked argument objects */ return (NULL); } /* Get the requested argument object */ arg = op->common.value.arg; while (arg && argn) { argn--; arg = arg->common.next; } return (arg); } /******************************************************************************* * * FUNCTION: acpi_ps_append_arg * * PARAMETERS: Op - Append an argument to this Op. * Arg - Argument Op to append * * RETURN: None. * * DESCRIPTION: Append an argument to an op's argument list (a NULL arg is OK) * ******************************************************************************/ void acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg) { union acpi_parse_object *prev_arg; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_ENTRY(); if (!op) { return; } /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode */ ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X", op->common.aml_opcode)); return; } /* Check if this opcode requires argument sub-objects */ if (!(op_info->flags & AML_HAS_ARGS)) { /* Has no linked argument objects */ return; } /* Append the argument to the linked argument list */ if (op->common.value.arg) { /* Append to existing argument list */ prev_arg = op->common.value.arg; while (prev_arg->common.next) { prev_arg = prev_arg->common.next; } prev_arg->common.next = arg; } else { /* No argument list, this will be the first argument */ op->common.value.arg = arg; } /* Set the parent in this arg and any args linked after it */ while (arg) { arg->common.parent = op; arg = arg->common.next; op->common.arg_list_length++; } } #ifdef ACPI_FUTURE_USAGE /******************************************************************************* * * FUNCTION: acpi_ps_get_depth_next * * PARAMETERS: Origin - Root of subtree to search * Op - Last (previous) Op that was found * * RETURN: Next Op found in the search. * * DESCRIPTION: Get next op in tree (walking the tree in depth-first order) * Return NULL when reaching "origin" or when walking up from root * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, union acpi_parse_object *op) { union acpi_parse_object *next = NULL; union acpi_parse_object *parent; union acpi_parse_object *arg; ACPI_FUNCTION_ENTRY(); if (!op) { return (NULL); } /* Look for an argument or child */ next = acpi_ps_get_arg(op, 0); if (next) { return (next); } /* Look for a sibling */ next = op->common.next; if (next) { return (next); } /* Look for a sibling of parent */ parent = op->common.parent; while (parent) { arg = acpi_ps_get_arg(parent, 0); while (arg && (arg != origin) && (arg != op)) { arg = arg->common.next; } if (arg == origin) { /* Reached parent of origin, end search */ return (NULL); } if (parent->common.next) { /* Found sibling of parent */ return (parent->common.next); } op = parent; parent = parent->common.parent; } return (next); } #ifdef ACPI_OBSOLETE_FUNCTIONS /******************************************************************************* * * FUNCTION: acpi_ps_get_child * * PARAMETERS: Op - Get the child of this Op * * RETURN: Child Op, Null if none is found. * * DESCRIPTION: Get op's children or NULL if none * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op) { union acpi_parse_object *child = NULL; ACPI_FUNCTION_ENTRY(); switch (op->common.aml_opcode) { case AML_SCOPE_OP: case AML_ELSE_OP: case AML_DEVICE_OP: case AML_THERMAL_ZONE_OP: case AML_INT_METHODCALL_OP: child = acpi_ps_get_arg(op, 0); break; case AML_BUFFER_OP: case AML_PACKAGE_OP: case AML_METHOD_OP: case AML_IF_OP: case AML_WHILE_OP: case AML_FIELD_OP: child = acpi_ps_get_arg(op, 1); break; case AML_POWER_RES_OP: case AML_INDEX_FIELD_OP: child = acpi_ps_get_arg(op, 2); break; case AML_PROCESSOR_OP: case AML_BANK_FIELD_OP: child = acpi_ps_get_arg(op, 3); break; default: /* All others have no children */ break; } return (child); } #endif #endif /* ACPI_FUTURE_USAGE */
gpl-2.0
cm-maya/android_kernel_hp_maya
drivers/acpi/acpica/nsnames.c
4919
8030
/******************************************************************************* * * Module Name: nsnames - Name manipulation and search * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") /******************************************************************************* * * FUNCTION: acpi_ns_build_external_path * * PARAMETERS: Node - NS node whose pathname is needed * Size - Size of the pathname * *name_buffer - Where to return the pathname * * RETURN: Status * Places the pathname into the name_buffer, in external format * (name segments separated by path separators) * * DESCRIPTION: Generate a full pathaname * ******************************************************************************/ acpi_status acpi_ns_build_external_path(struct acpi_namespace_node *node, acpi_size size, char *name_buffer) { acpi_size index; struct acpi_namespace_node *parent_node; ACPI_FUNCTION_ENTRY(); /* Special case for root */ index = size - 1; if (index < ACPI_NAME_SIZE) { name_buffer[0] = AML_ROOT_PREFIX; name_buffer[1] = 0; return (AE_OK); } /* Store terminator byte, then build name backwards */ parent_node = node; name_buffer[index] = 0; while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) { index -= ACPI_NAME_SIZE; /* Put the name into the buffer */ ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name); parent_node = parent_node->parent; /* Prefix name with the path separator */ index--; name_buffer[index] = ACPI_PATH_SEPARATOR; } /* Overwrite final separator with the root prefix character */ name_buffer[index] = AML_ROOT_PREFIX; if (index != 0) { ACPI_ERROR((AE_INFO, "Could not construct external pathname; index=%u, size=%u, Path=%s", (u32) index, (u32) size, &name_buffer[size])); return (AE_BAD_PARAMETER); } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname * * PARAMETERS: Node - Namespace node whose pathname is needed * * RETURN: Pointer to storage containing the fully qualified name of * the node, In external format (name segments separated by path * separators.) * * DESCRIPTION: Used for debug printing in acpi_ns_search_table(). * ******************************************************************************/ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { acpi_status status; char *name_buffer; acpi_size size; ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); /* Calculate required buffer size based on depth below root */ size = acpi_ns_get_pathname_length(node); if (!size) { return_PTR(NULL); } /* Allocate a buffer to be returned to caller */ name_buffer = ACPI_ALLOCATE_ZEROED(size); if (!name_buffer) { ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size)); return_PTR(NULL); } /* Build the path in the allocated buffer */ status = acpi_ns_build_external_path(node, size, name_buffer); if (ACPI_FAILURE(status)) { ACPI_FREE(name_buffer); return_PTR(NULL); } return_PTR(name_buffer); } /******************************************************************************* * * FUNCTION: acpi_ns_get_pathname_length * * PARAMETERS: Node - Namespace node * * RETURN: Length of path, including prefix * * DESCRIPTION: Get the length of the pathname string for this node * ******************************************************************************/ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) { acpi_size size; struct acpi_namespace_node *next_node; ACPI_FUNCTION_ENTRY(); /* * Compute length of pathname as 5 * number of name segments. * Go back up the parent tree to the root */ size = 0; next_node = node; while (next_node && (next_node != acpi_gbl_root_node)) { if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "Invalid Namespace Node (%p) while traversing namespace", next_node)); return 0; } size += ACPI_PATH_SEGMENT_LENGTH; next_node = next_node->parent; } if (!size) { size = 1; /* Root node case */ } return (size + 1); /* +1 for null string terminator */ } /******************************************************************************* * * FUNCTION: acpi_ns_handle_to_pathname * * PARAMETERS: target_handle - Handle of named object whose name is * to be found * Buffer - Where the pathname is returned * * RETURN: Status, Buffer is filled with pathname if status is AE_OK * * DESCRIPTION: Build and return a full namespace pathname * ******************************************************************************/ acpi_status acpi_ns_handle_to_pathname(acpi_handle target_handle, struct acpi_buffer * buffer) { acpi_status status; struct acpi_namespace_node *node; acpi_size required_size; ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle); node = acpi_ns_validate_handle(target_handle); if (!node) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Determine size required for the caller buffer */ required_size = acpi_ns_get_pathname_length(node); if (!required_size) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(buffer, required_size); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Build the path in the caller buffer */ status = acpi_ns_build_external_path(node, required_size, buffer->pointer); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", (char *)buffer->pointer, (u32) required_size)); return_ACPI_STATUS(AE_OK); }
gpl-2.0
marlontoe/android_kernel_sony_msm8974
drivers/acpi/acpica/nsnames.c
4919
8030
/******************************************************************************* * * Module Name: nsnames - Name manipulation and search * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") /******************************************************************************* * * FUNCTION: acpi_ns_build_external_path * * PARAMETERS: Node - NS node whose pathname is needed * Size - Size of the pathname * *name_buffer - Where to return the pathname * * RETURN: Status * Places the pathname into the name_buffer, in external format * (name segments separated by path separators) * * DESCRIPTION: Generate a full pathaname * ******************************************************************************/ acpi_status acpi_ns_build_external_path(struct acpi_namespace_node *node, acpi_size size, char *name_buffer) { acpi_size index; struct acpi_namespace_node *parent_node; ACPI_FUNCTION_ENTRY(); /* Special case for root */ index = size - 1; if (index < ACPI_NAME_SIZE) { name_buffer[0] = AML_ROOT_PREFIX; name_buffer[1] = 0; return (AE_OK); } /* Store terminator byte, then build name backwards */ parent_node = node; name_buffer[index] = 0; while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) { index -= ACPI_NAME_SIZE; /* Put the name into the buffer */ ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name); parent_node = parent_node->parent; /* Prefix name with the path separator */ index--; name_buffer[index] = ACPI_PATH_SEPARATOR; } /* Overwrite final separator with the root prefix character */ name_buffer[index] = AML_ROOT_PREFIX; if (index != 0) { ACPI_ERROR((AE_INFO, "Could not construct external pathname; index=%u, size=%u, Path=%s", (u32) index, (u32) size, &name_buffer[size])); return (AE_BAD_PARAMETER); } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname * * PARAMETERS: Node - Namespace node whose pathname is needed * * RETURN: Pointer to storage containing the fully qualified name of * the node, In external format (name segments separated by path * separators.) * * DESCRIPTION: Used for debug printing in acpi_ns_search_table(). * ******************************************************************************/ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { acpi_status status; char *name_buffer; acpi_size size; ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); /* Calculate required buffer size based on depth below root */ size = acpi_ns_get_pathname_length(node); if (!size) { return_PTR(NULL); } /* Allocate a buffer to be returned to caller */ name_buffer = ACPI_ALLOCATE_ZEROED(size); if (!name_buffer) { ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size)); return_PTR(NULL); } /* Build the path in the allocated buffer */ status = acpi_ns_build_external_path(node, size, name_buffer); if (ACPI_FAILURE(status)) { ACPI_FREE(name_buffer); return_PTR(NULL); } return_PTR(name_buffer); } /******************************************************************************* * * FUNCTION: acpi_ns_get_pathname_length * * PARAMETERS: Node - Namespace node * * RETURN: Length of path, including prefix * * DESCRIPTION: Get the length of the pathname string for this node * ******************************************************************************/ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) { acpi_size size; struct acpi_namespace_node *next_node; ACPI_FUNCTION_ENTRY(); /* * Compute length of pathname as 5 * number of name segments. * Go back up the parent tree to the root */ size = 0; next_node = node; while (next_node && (next_node != acpi_gbl_root_node)) { if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "Invalid Namespace Node (%p) while traversing namespace", next_node)); return 0; } size += ACPI_PATH_SEGMENT_LENGTH; next_node = next_node->parent; } if (!size) { size = 1; /* Root node case */ } return (size + 1); /* +1 for null string terminator */ } /******************************************************************************* * * FUNCTION: acpi_ns_handle_to_pathname * * PARAMETERS: target_handle - Handle of named object whose name is * to be found * Buffer - Where the pathname is returned * * RETURN: Status, Buffer is filled with pathname if status is AE_OK * * DESCRIPTION: Build and return a full namespace pathname * ******************************************************************************/ acpi_status acpi_ns_handle_to_pathname(acpi_handle target_handle, struct acpi_buffer * buffer) { acpi_status status; struct acpi_namespace_node *node; acpi_size required_size; ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle); node = acpi_ns_validate_handle(target_handle); if (!node) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Determine size required for the caller buffer */ required_size = acpi_ns_get_pathname_length(node); if (!required_size) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(buffer, required_size); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Build the path in the caller buffer */ status = acpi_ns_build_external_path(node, required_size, buffer->pointer); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", (char *)buffer->pointer, (u32) required_size)); return_ACPI_STATUS(AE_OK); }
gpl-2.0
aatjitra/OPO
drivers/acpi/acpica/exdebug.c
4919
7540
/****************************************************************************** * * Module Name: exdebug - Support for stores to the AML Debug Object * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exdebug") #ifndef ACPI_NO_ERROR_MESSAGES /******************************************************************************* * * FUNCTION: acpi_ex_do_debug_object * * PARAMETERS: source_desc - Object to be output to "Debug Object" * Level - Indentation level (used for packages) * Index - Current package element, zero if not pkg * * RETURN: None * * DESCRIPTION: Handles stores to the AML Debug Object. For example: * Store(INT1, Debug) * * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set. * * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal * operational case, stores to the debug object are ignored but can be easily * enabled if necessary. * ******************************************************************************/ void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) { u32 i; ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc); /* Output must be enabled via the debug_object global or the dbg_level */ if (!acpi_gbl_enable_aml_debug_object && !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) { return_VOID; } /* * Print line header as long as we are not in the middle of an * object display */ if (!((level > 0) && index == 0)) { acpi_os_printf("[ACPI Debug] %*s", level, " "); } /* Display the index for package output only */ if (index > 0) { acpi_os_printf("(%.2u) ", index - 1); } if (!source_desc) { acpi_os_printf("[Null Object]\n"); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { acpi_os_printf("%s ", acpi_ut_get_object_type_name(source_desc)); if (!acpi_ut_valid_internal_object(source_desc)) { acpi_os_printf("%p, Invalid Internal Object!\n", source_desc); return_VOID; } } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { acpi_os_printf("%s: %p\n", acpi_ut_get_type_name(((struct acpi_namespace_node *) source_desc)->type), source_desc); return_VOID; } else { return_VOID; } /* source_desc is of type ACPI_DESC_TYPE_OPERAND */ switch (source_desc->common.type) { case ACPI_TYPE_INTEGER: /* Output correct integer width */ if (acpi_gbl_integer_byte_width == 4) { acpi_os_printf("0x%8.8X\n", (u32)source_desc->integer.value); } else { acpi_os_printf("0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64(source_desc->integer. value)); } break; case ACPI_TYPE_BUFFER: acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); acpi_ut_dump_buffer2(source_desc->buffer.pointer, (source_desc->buffer.length < 256) ? source_desc->buffer.length : 256, DB_BYTE_DISPLAY); break; case ACPI_TYPE_STRING: acpi_os_printf("[0x%.2X] \"%s\"\n", source_desc->string.length, source_desc->string.pointer); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("[Contains 0x%.2X Elements]\n", source_desc->package.count); /* Output the entire contents of the package */ for (i = 0; i < source_desc->package.count; i++) { acpi_ex_do_debug_object(source_desc->package. elements[i], level + 4, i + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[%s] ", acpi_ut_get_reference_name(source_desc)); /* Decode the reference */ switch (source_desc->reference.class) { case ACPI_REFCLASS_INDEX: acpi_os_printf("0x%X\n", source_desc->reference.value); break; case ACPI_REFCLASS_TABLE: /* Case for ddb_handle */ acpi_os_printf("Table Index 0x%X\n", source_desc->reference.value); return; default: break; } acpi_os_printf(" "); /* Check for valid node first, then valid object */ if (source_desc->reference.node) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.node) != ACPI_DESC_TYPE_NAMED) { acpi_os_printf (" %p - Not a valid namespace node\n", source_desc->reference.node); } else { acpi_os_printf("Node %p [%4.4s] ", source_desc->reference.node, (source_desc->reference.node)-> name.ascii); switch ((source_desc->reference.node)->type) { /* These types have no attached object */ case ACPI_TYPE_DEVICE: acpi_os_printf("Device\n"); break; case ACPI_TYPE_THERMAL: acpi_os_printf("Thermal Zone\n"); break; default: acpi_ex_do_debug_object((source_desc-> reference. node)->object, level + 4, 0); break; } } } else if (source_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.object) == ACPI_DESC_TYPE_NAMED) { acpi_ex_do_debug_object(((struct acpi_namespace_node *) source_desc->reference. object)->object, level + 4, 0); } else { acpi_ex_do_debug_object(source_desc->reference. object, level + 4, 0); } } break; default: acpi_os_printf("%p\n", source_desc); break; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n")); return_VOID; } #endif
gpl-2.0
AOKP/kernel_motorola_msm8960dt
drivers/acpi/acpica/utmisc.c
4919
26816
/******************************************************************************* * * Module Name: utmisc - common utility procedures * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/module.h> #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmisc") /******************************************************************************* * * FUNCTION: acpi_ut_validate_exception * * PARAMETERS: Status - The acpi_status code to be formatted * * RETURN: A string containing the exception text. NULL if exception is * not valid. * * DESCRIPTION: This function validates and translates an ACPI exception into * an ASCII string. * ******************************************************************************/ const char *acpi_ut_validate_exception(acpi_status status) { u32 sub_status; const char *exception = NULL; ACPI_FUNCTION_ENTRY(); /* * Status is composed of two parts, a "type" and an actual code */ sub_status = (status & ~AE_CODE_MASK); switch (status & AE_CODE_MASK) { case AE_CODE_ENVIRONMENTAL: if (sub_status <= AE_CODE_ENV_MAX) { exception = acpi_gbl_exception_names_env[sub_status]; } break; case AE_CODE_PROGRAMMER: if (sub_status <= AE_CODE_PGM_MAX) { exception = acpi_gbl_exception_names_pgm[sub_status]; } break; case AE_CODE_ACPI_TABLES: if (sub_status <= AE_CODE_TBL_MAX) { exception = acpi_gbl_exception_names_tbl[sub_status]; } break; case AE_CODE_AML: if (sub_status <= AE_CODE_AML_MAX) { exception = acpi_gbl_exception_names_aml[sub_status]; } break; case AE_CODE_CONTROL: if (sub_status <= AE_CODE_CTRL_MAX) { exception = acpi_gbl_exception_names_ctrl[sub_status]; } break; default: break; } return (ACPI_CAST_PTR(const char, exception)); } /******************************************************************************* * * FUNCTION: acpi_ut_is_pci_root_bridge * * PARAMETERS: Id - The HID/CID in string format * * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge * * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID. * ******************************************************************************/ u8 acpi_ut_is_pci_root_bridge(char *id) { /* * Check if this is a PCI root bridge. * ACPI 3.0+: check for a PCI Express root also. */ if (!(ACPI_STRCMP(id, PCI_ROOT_HID_STRING)) || !(ACPI_STRCMP(id, PCI_EXPRESS_ROOT_HID_STRING))) { return (TRUE); } return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_ut_is_aml_table * * PARAMETERS: Table - An ACPI table * * RETURN: TRUE if table contains executable AML; FALSE otherwise * * DESCRIPTION: Check ACPI Signature for a table that contains AML code. * Currently, these are DSDT,SSDT,PSDT. All other table types are * data tables that do not contain AML code. * ******************************************************************************/ u8 acpi_ut_is_aml_table(struct acpi_table_header *table) { /* These are the only tables that contain executable AML */ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) || ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) || ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) { return (TRUE); } return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_ut_allocate_owner_id * * PARAMETERS: owner_id - Where the new owner ID is returned * * RETURN: Status * * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to * track objects created by the table or method, to be deleted * when the method exits or the table is unloaded. * ******************************************************************************/ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) { u32 i; u32 j; u32 k; acpi_status status; ACPI_FUNCTION_TRACE(ut_allocate_owner_id); /* Guard against multiple allocations of ID to the same location */ if (*owner_id) { ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists", *owner_id)); return_ACPI_STATUS(AE_ALREADY_EXISTS); } /* Mutex for the global ID mask */ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Find a free owner ID, cycle through all possible IDs on repeated * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have * to be scanned twice. */ for (i = 0, j = acpi_gbl_last_owner_id_index; i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) { if (j >= ACPI_NUM_OWNERID_MASKS) { j = 0; /* Wraparound to start of mask array */ } for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) { if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) { /* There are no free IDs in this mask */ break; } if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) { /* * Found a free ID. The actual ID is the bit index plus one, * making zero an invalid Owner ID. Save this as the last ID * allocated and update the global ID mask. */ acpi_gbl_owner_id_mask[j] |= (1 << k); acpi_gbl_last_owner_id_index = (u8) j; acpi_gbl_next_owner_id_offset = (u8) (k + 1); /* * Construct encoded ID from the index and bit position * * Note: Last [j].k (bit 255) is never used and is marked * permanently allocated (prevents +1 overflow) */ *owner_id = (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j)); ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "Allocated OwnerId: %2.2X\n", (unsigned int)*owner_id)); goto exit; } } acpi_gbl_next_owner_id_offset = 0; } /* * All owner_ids have been allocated. This typically should * not happen since the IDs are reused after deallocation. The IDs are * allocated upon table load (one per table) and method execution, and * they are released when a table is unloaded or a method completes * execution. * * If this error happens, there may be very deep nesting of invoked control * methods, or there may be a bug where the IDs are not released. */ status = AE_OWNER_ID_LIMIT; ACPI_ERROR((AE_INFO, "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT")); exit: (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_release_owner_id * * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_iD * * RETURN: None. No error is returned because we are either exiting a * control method or unloading a table. Either way, we would * ignore any error anyway. * * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 * ******************************************************************************/ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) { acpi_owner_id owner_id = *owner_id_ptr; acpi_status status; u32 index; u32 bit; ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id); /* Always clear the input owner_id (zero is an invalid ID) */ *owner_id_ptr = 0; /* Zero is not a valid owner_iD */ if (owner_id == 0) { ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id)); return_VOID; } /* Mutex for the global ID mask */ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); if (ACPI_FAILURE(status)) { return_VOID; } /* Normalize the ID to zero */ owner_id--; /* Decode ID to index/offset pair */ index = ACPI_DIV_32(owner_id); bit = 1 << ACPI_MOD_32(owner_id); /* Free the owner ID only if it is valid */ if (acpi_gbl_owner_id_mask[index] & bit) { acpi_gbl_owner_id_mask[index] ^= bit; } else { ACPI_ERROR((AE_INFO, "Release of non-allocated OwnerId: 0x%2.2X", owner_id + 1)); } (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_strupr (strupr) * * PARAMETERS: src_string - The source string to convert * * RETURN: None * * DESCRIPTION: Convert string to uppercase * * NOTE: This is not a POSIX function, so it appears here, not in utclib.c * ******************************************************************************/ void acpi_ut_strupr(char *src_string) { char *string; ACPI_FUNCTION_ENTRY(); if (!src_string) { return; } /* Walk entire string, uppercasing the letters */ for (string = src_string; *string; string++) { *string = (char)ACPI_TOUPPER(*string); } return; } /******************************************************************************* * * FUNCTION: acpi_ut_print_string * * PARAMETERS: String - Null terminated ASCII string * max_length - Maximum output length * * RETURN: None * * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape * sequences. * ******************************************************************************/ void acpi_ut_print_string(char *string, u8 max_length) { u32 i; if (!string) { acpi_os_printf("<\"NULL STRING PTR\">"); return; } acpi_os_printf("\""); for (i = 0; string[i] && (i < max_length); i++) { /* Escape sequences */ switch (string[i]) { case 0x07: acpi_os_printf("\\a"); /* BELL */ break; case 0x08: acpi_os_printf("\\b"); /* BACKSPACE */ break; case 0x0C: acpi_os_printf("\\f"); /* FORMFEED */ break; case 0x0A: acpi_os_printf("\\n"); /* LINEFEED */ break; case 0x0D: acpi_os_printf("\\r"); /* CARRIAGE RETURN */ break; case 0x09: acpi_os_printf("\\t"); /* HORIZONTAL TAB */ break; case 0x0B: acpi_os_printf("\\v"); /* VERTICAL TAB */ break; case '\'': /* Single Quote */ case '\"': /* Double Quote */ case '\\': /* Backslash */ acpi_os_printf("\\%c", (int)string[i]); break; default: /* Check for printable character or hex escape */ if (ACPI_IS_PRINT(string[i])) { /* This is a normal character */ acpi_os_printf("%c", (int)string[i]); } else { /* All others will be Hex escapes */ acpi_os_printf("\\x%2.2X", (s32) string[i]); } break; } } acpi_os_printf("\""); if (i == max_length && string[i]) { acpi_os_printf("..."); } } /******************************************************************************* * * FUNCTION: acpi_ut_dword_byte_swap * * PARAMETERS: Value - Value to be converted * * RETURN: u32 integer with bytes swapped * * DESCRIPTION: Convert a 32-bit value to big-endian (swap the bytes) * ******************************************************************************/ u32 acpi_ut_dword_byte_swap(u32 value) { union { u32 value; u8 bytes[4]; } out; union { u32 value; u8 bytes[4]; } in; ACPI_FUNCTION_ENTRY(); in.value = value; out.bytes[0] = in.bytes[3]; out.bytes[1] = in.bytes[2]; out.bytes[2] = in.bytes[1]; out.bytes[3] = in.bytes[0]; return (out.value); } /******************************************************************************* * * FUNCTION: acpi_ut_set_integer_width * * PARAMETERS: Revision From DSDT header * * RETURN: None * * DESCRIPTION: Set the global integer bit width based upon the revision * of the DSDT. For Revision 1 and 0, Integers are 32 bits. * For Revision 2 and above, Integers are 64 bits. Yes, this * makes a difference. * ******************************************************************************/ void acpi_ut_set_integer_width(u8 revision) { if (revision < 2) { /* 32-bit case */ acpi_gbl_integer_bit_width = 32; acpi_gbl_integer_nybble_width = 8; acpi_gbl_integer_byte_width = 4; } else { /* 64-bit case (ACPI 2.0+) */ acpi_gbl_integer_bit_width = 64; acpi_gbl_integer_nybble_width = 16; acpi_gbl_integer_byte_width = 8; } } #ifdef ACPI_DEBUG_OUTPUT /******************************************************************************* * * FUNCTION: acpi_ut_display_init_pathname * * PARAMETERS: Type - Object type of the node * obj_handle - Handle whose pathname will be displayed * Path - Additional path string to be appended. * (NULL if no extra path) * * RETURN: acpi_status * * DESCRIPTION: Display full pathname of an object, DEBUG ONLY * ******************************************************************************/ void acpi_ut_display_init_pathname(u8 type, struct acpi_namespace_node *obj_handle, char *path) { acpi_status status; struct acpi_buffer buffer; ACPI_FUNCTION_ENTRY(); /* Only print the path if the appropriate debug level is enabled */ if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { return; } /* Get the full pathname to the node */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; status = acpi_ns_handle_to_pathname(obj_handle, &buffer); if (ACPI_FAILURE(status)) { return; } /* Print what we're doing */ switch (type) { case ACPI_TYPE_METHOD: acpi_os_printf("Executing "); break; default: acpi_os_printf("Initializing "); break; } /* Print the object type and pathname */ acpi_os_printf("%-12s %s", acpi_ut_get_type_name(type), (char *)buffer.pointer); /* Extra path is used to append names like _STA, _INI, etc. */ if (path) { acpi_os_printf(".%s", path); } acpi_os_printf("\n"); ACPI_FREE(buffer.pointer); } #endif /******************************************************************************* * * FUNCTION: acpi_ut_valid_acpi_char * * PARAMETERS: Char - The character to be examined * Position - Byte position (0-3) * * RETURN: TRUE if the character is valid, FALSE otherwise * * DESCRIPTION: Check for a valid ACPI character. Must be one of: * 1) Upper case alpha * 2) numeric * 3) underscore * * We allow a '!' as the last character because of the ASF! table * ******************************************************************************/ u8 acpi_ut_valid_acpi_char(char character, u32 position) { if (!((character >= 'A' && character <= 'Z') || (character >= '0' && character <= '9') || (character == '_'))) { /* Allow a '!' in the last position */ if (character == '!' && position == 3) { return (TRUE); } return (FALSE); } return (TRUE); } /******************************************************************************* * * FUNCTION: acpi_ut_valid_acpi_name * * PARAMETERS: Name - The name to be examined * * RETURN: TRUE if the name is valid, FALSE otherwise * * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: * 1) Upper case alpha * 2) numeric * 3) underscore * ******************************************************************************/ u8 acpi_ut_valid_acpi_name(u32 name) { u32 i; ACPI_FUNCTION_ENTRY(); for (i = 0; i < ACPI_NAME_SIZE; i++) { if (!acpi_ut_valid_acpi_char ((ACPI_CAST_PTR(char, &name))[i], i)) { return (FALSE); } } return (TRUE); } /******************************************************************************* * * FUNCTION: acpi_ut_repair_name * * PARAMETERS: Name - The ACPI name to be repaired * * RETURN: Repaired version of the name * * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and * return the new name. * ******************************************************************************/ acpi_name acpi_ut_repair_name(char *name) { u32 i; char new_name[ACPI_NAME_SIZE]; for (i = 0; i < ACPI_NAME_SIZE; i++) { new_name[i] = name[i]; /* * Replace a bad character with something printable, yet technically * still invalid. This prevents any collisions with existing "good" * names in the namespace. */ if (!acpi_ut_valid_acpi_char(name[i], i)) { new_name[i] = '*'; } } return (*(u32 *) new_name); } /******************************************************************************* * * FUNCTION: acpi_ut_strtoul64 * * PARAMETERS: String - Null terminated string * Base - Radix of the string: 16 or ACPI_ANY_BASE; * ACPI_ANY_BASE means 'in behalf of to_integer' * ret_integer - Where the converted integer is returned * * RETURN: Status and Converted value * * DESCRIPTION: Convert a string into an unsigned value. Performs either a * 32-bit or 64-bit conversion, depending on the current mode * of the interpreter. * NOTE: Does not support Octal strings, not needed. * ******************************************************************************/ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) { u32 this_digit = 0; u64 return_value = 0; u64 quotient; u64 dividend; u32 to_integer_op = (base == ACPI_ANY_BASE); u32 mode32 = (acpi_gbl_integer_byte_width == 4); u8 valid_digits = 0; u8 sign_of0x = 0; u8 term = 0; ACPI_FUNCTION_TRACE_STR(ut_stroul64, string); switch (base) { case ACPI_ANY_BASE: case 16: break; default: /* Invalid Base */ return_ACPI_STATUS(AE_BAD_PARAMETER); } if (!string) { goto error_exit; } /* Skip over any white space in the buffer */ while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) { string++; } if (to_integer_op) { /* * Base equal to ACPI_ANY_BASE means 'to_integer operation case'. * We need to determine if it is decimal or hexadecimal. */ if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) { sign_of0x = 1; base = 16; /* Skip over the leading '0x' */ string += 2; } else { base = 10; } } /* Any string left? Check that '0x' is not followed by white space. */ if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') { if (to_integer_op) { goto error_exit; } else { goto all_done; } } /* * Perform a 32-bit or 64-bit conversion, depending upon the current * execution mode of the interpreter */ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX; /* Main loop: convert the string to a 32- or 64-bit integer */ while (*string) { if (ACPI_IS_DIGIT(*string)) { /* Convert ASCII 0-9 to Decimal value */ this_digit = ((u8) * string) - '0'; } else if (base == 10) { /* Digit is out of range; possible in to_integer case only */ term = 1; } else { this_digit = (u8) ACPI_TOUPPER(*string); if (ACPI_IS_XDIGIT((char)this_digit)) { /* Convert ASCII Hex char to value */ this_digit = this_digit - 'A' + 10; } else { term = 1; } } if (term) { if (to_integer_op) { goto error_exit; } else { break; } } else if ((valid_digits == 0) && (this_digit == 0) && !sign_of0x) { /* Skip zeros */ string++; continue; } valid_digits++; if (sign_of0x && ((valid_digits > 16) || ((valid_digits > 8) && mode32))) { /* * This is to_integer operation case. * No any restrictions for string-to-integer conversion, * see ACPI spec. */ goto error_exit; } /* Divide the digit into the correct position */ (void)acpi_ut_short_divide((dividend - (u64) this_digit), base, &quotient, NULL); if (return_value > quotient) { if (to_integer_op) { goto error_exit; } else { break; } } return_value *= base; return_value += this_digit; string++; } /* All done, normal exit */ all_done: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", ACPI_FORMAT_UINT64(return_value))); *ret_integer = return_value; return_ACPI_STATUS(AE_OK); error_exit: /* Base was set/validated above */ if (base == 10) { return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT); } else { return_ACPI_STATUS(AE_BAD_HEX_CONSTANT); } } /******************************************************************************* * * FUNCTION: acpi_ut_create_update_state_and_push * * PARAMETERS: Object - Object to be added to the new state * Action - Increment/Decrement * state_list - List the state will be added to * * RETURN: Status * * DESCRIPTION: Create a new state and push it * ******************************************************************************/ acpi_status acpi_ut_create_update_state_and_push(union acpi_operand_object *object, u16 action, union acpi_generic_state **state_list) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); /* Ignore null objects; these are expected */ if (!object) { return (AE_OK); } state = acpi_ut_create_update_state(object, action); if (!state) { return (AE_NO_MEMORY); } acpi_ut_push_generic_state(state_list, state); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_walk_package_tree * * PARAMETERS: source_object - The package to walk * target_object - Target object (if package is being copied) * walk_callback - Called once for each package element * Context - Passed to the callback function * * RETURN: Status * * DESCRIPTION: Walk through a package * ******************************************************************************/ acpi_status acpi_ut_walk_package_tree(union acpi_operand_object * source_object, void *target_object, acpi_pkg_callback walk_callback, void *context) { acpi_status status = AE_OK; union acpi_generic_state *state_list = NULL; union acpi_generic_state *state; u32 this_index; union acpi_operand_object *this_source_obj; ACPI_FUNCTION_TRACE(ut_walk_package_tree); state = acpi_ut_create_pkg_state(source_object, target_object, 0); if (!state) { return_ACPI_STATUS(AE_NO_MEMORY); } while (state) { /* Get one element of the package */ this_index = state->pkg.index; this_source_obj = (union acpi_operand_object *) state->pkg.source_object->package.elements[this_index]; /* * Check for: * 1) An uninitialized package element. It is completely * legal to declare a package and leave it uninitialized * 2) Not an internal object - can be a namespace node instead * 3) Any type other than a package. Packages are handled in else * case below. */ if ((!this_source_obj) || (ACPI_GET_DESCRIPTOR_TYPE(this_source_obj) != ACPI_DESC_TYPE_OPERAND) || (this_source_obj->common.type != ACPI_TYPE_PACKAGE)) { status = walk_callback(ACPI_COPY_TYPE_SIMPLE, this_source_obj, state, context); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } state->pkg.index++; while (state->pkg.index >= state->pkg.source_object->package.count) { /* * We've handled all of the objects at this level, This means * that we have just completed a package. That package may * have contained one or more packages itself. * * Delete this state and pop the previous state (package). */ acpi_ut_delete_generic_state(state); state = acpi_ut_pop_generic_state(&state_list); /* Finished when there are no more states */ if (!state) { /* * We have handled all of the objects in the top level * package just add the length of the package objects * and exit */ return_ACPI_STATUS(AE_OK); } /* * Go back up a level and move the index past the just * completed package object. */ state->pkg.index++; } } else { /* This is a subobject of type package */ status = walk_callback(ACPI_COPY_TYPE_PACKAGE, this_source_obj, state, context); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Push the current state and create a new one * The callback above returned a new target package object. */ acpi_ut_push_generic_state(&state_list, state); state = acpi_ut_create_pkg_state(this_source_obj, state->pkg. this_target_obj, 0); if (!state) { /* Free any stacked Update State objects */ while (state_list) { state = acpi_ut_pop_generic_state (&state_list); acpi_ut_delete_generic_state(state); } return_ACPI_STATUS(AE_NO_MEMORY); } } } /* We should never get here */ return_ACPI_STATUS(AE_AML_INTERNAL); }
gpl-2.0